hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b4631189d6d2e11cc9e3587fbf8d89019ec14498 | 6,856 | py | Python | data_service/api/data_api.py | statisticsnorway/microdata-data-service | d477b7b75589d4c977771122558c948c040a1106 | [
"Apache-2.0"
]
| null | null | null | data_service/api/data_api.py | statisticsnorway/microdata-data-service | d477b7b75589d4c977771122558c948c040a1106 | [
"Apache-2.0"
]
| 7 | 2021-10-08T13:40:33.000Z | 2022-02-04T10:37:55.000Z | data_service/api/data_api.py | statisticsnorway/microdata-data-service | d477b7b75589d4c977771122558c948c040a1106 | [
"Apache-2.0"
]
| null | null | null | import logging
import os
import io
from fastapi import APIRouter, Depends, Header
from fastapi.responses import FileResponse, StreamingResponse
from fastapi import HTTPException, status
import pyarrow as pa
import pyarrow.parquet as pq
from data_service.api.query_models import (
InputTimePeriodQuery, InputTimeQuery, InputFixedQuery
)
from data_service.config import config
from data_service.api.response_models import ErrorMessage
from data_service.config.config import get_settings
from data_service.config.dependencies import get_processor
from data_service.core.processor import Processor
from data_service.api.auth import authorize_user
data_router = APIRouter()
log = logging.getLogger(__name__)
@data_router.get("/data/resultSet", responses={
204: {}, 404: {"model": ErrorMessage}})
def retrieve_result_set(file_name: str,
authorization: str = Header(None),
settings: config.BaseSettings = Depends(get_settings)):
"""
Stream a generated result parquet file.
"""
log.info(
f"Entering /data/resultSet with request for file name: {file_name}"
)
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
file_path = (
f"{settings.RESULTSET_DIR}/{file_name}"
)
if not os.path.isfile(file_path):
log.warning(f"No file found for path: {file_path}")
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail='Result set not found'
)
else:
return FileResponse(
file_path, media_type='application/octet-stream'
)
@data_router.post("/data/event/generate-file",
responses={404: {"model": ErrorMessage}})
def create_result_file_event(input_query: InputTimePeriodQuery,
authorization: str = Header(None),
processor: Processor = Depends(get_processor)):
"""
Create result set of data with temporality type event,
and write result to file. Returns name of file in response.
"""
log.info(
f'Entering /data/event/generate-file with input query: {input_query}'
)
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
result_data = processor.process_event_request(input_query)
resultset_file_name = processor.write_table(result_data)
log.info(f'File name for event result set: {resultset_file_name}')
return {
'filename': resultset_file_name,
}
@data_router.post("/data/status/generate-file",
responses={404: {"model": ErrorMessage}})
def create_result_file_status(input_query: InputTimeQuery,
authorization: str = Header(None),
processor: Processor = Depends(get_processor)):
"""
Create result set of data with temporality type status,
and write result to file. Returns name of file in response.
"""
log.info(
f'Entering /data/status/generate-file with input query: {input_query}'
)
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
result_data = processor.process_status_request(input_query)
resultset_file_name = processor.write_table(result_data)
log.info(f'File name for event result set: {resultset_file_name}')
return {
'filename': resultset_file_name,
}
@data_router.post("/data/fixed/generate-file",
responses={404: {"model": ErrorMessage}})
def create_file_result_fixed(input_query: InputFixedQuery,
authorization: str = Header(None),
processor: Processor = Depends(get_processor)):
"""
Create result set of data with temporality type fixed,
and write result to file. Returns name of file in response.
"""
log.info(
f'Entering /data/fixed/generate-file with input query: {input_query}'
)
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
result_data = processor.process_fixed_request(input_query)
resultset_file_name = processor.write_table(result_data)
log.info(f'File name for event result set: {resultset_file_name}')
return {
'filename': resultset_file_name,
}
@data_router.post("/data/event/stream",
responses={404: {"model": ErrorMessage}})
def stream_result_event(input_query: InputTimePeriodQuery,
authorization: str = Header(None),
processor: Processor = Depends(get_processor)):
"""
Create Result set of data with temporality type event,
and stream result as response.
"""
log.info(f'Entering /data/event/stream with input query: {input_query}')
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
result_data = processor.process_event_request(input_query)
buffer_stream = pa.BufferOutputStream()
pq.write_table(result_data, buffer_stream)
return StreamingResponse(
io.BytesIO(buffer_stream.getvalue().to_pybytes())
)
@data_router.post("/data/status/stream",
responses={404: {"model": ErrorMessage}})
def stream_result_status(input_query: InputTimeQuery,
authorization: str = Header(None),
processor: Processor = Depends(get_processor)):
"""
Create result set of data with temporality type status,
and stream result as response.
"""
log.info(f'Entering /data/status/stream with input query: {input_query}')
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
result_data = processor.process_status_request(input_query)
buffer_stream = pa.BufferOutputStream()
pq.write_table(result_data, buffer_stream)
return StreamingResponse(
io.BytesIO(buffer_stream.getvalue().to_pybytes())
)
@data_router.post("/data/fixed/stream",
responses={404: {"model": ErrorMessage}})
def stream_result_fixed(input_query: InputFixedQuery,
authorization: str = Header(None),
processor: Processor = Depends(get_processor)):
"""
Create result set of data with temporality type fixed,
and stream result as response.
"""
log.info(f'Entering /data/fixed/stream with input query: {input_query}')
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
result_data = processor.process_fixed_request(input_query)
buffer_stream = pa.BufferOutputStream()
pq.write_table(result_data, buffer_stream)
return StreamingResponse(
io.BytesIO(buffer_stream.getvalue().to_pybytes())
)
| 35.708333 | 79 | 0.67605 | 0 | 0 | 0 | 0 | 6,125 | 0.893378 | 0 | 0 | 2,041 | 0.297695 |
b466221b457ff5136956aa01c99313e615519ec6 | 1,976 | py | Python | lib/parser/augur/Bonus.py | Innoviox/QuizDB | b26adf7134408e5b29ebc8b0b05601cbbf45667a | [
"MIT"
]
| null | null | null | lib/parser/augur/Bonus.py | Innoviox/QuizDB | b26adf7134408e5b29ebc8b0b05601cbbf45667a | [
"MIT"
]
| null | null | null | lib/parser/augur/Bonus.py | Innoviox/QuizDB | b26adf7134408e5b29ebc8b0b05601cbbf45667a | [
"MIT"
]
| null | null | null | from utils import sanitize
class Bonus:
def __init__(self, number, leadin="", texts=None, answers=None,
category="", subcategory="",
tournament="", round=""):
self.number = number
self.leadin = leadin
self.texts = texts
self.answers = answers
self.category = category
self.subcategory = subcategory
self.tournament = tournament
self.round = round
if texts is None:
self.texts = []
if answers is None:
self.answers = []
def has_content(self):
if len(self.texts) == 0 and len(self.answers) == 0:
return False
if len(self.texts) == 0 or len(self.answers) == 0:
print("Discrepancy in Bonus %d" % self.number)
return False
return self.texts[0].strip() != "" or self.answers[0].strip() != ""
def to_dict(self):
return {
"number": self.number,
"leadin": self.leadin,
"formatted_texts": self.texts,
"formatted_answers": self.answers,
"texts": [sanitize(t, valid_tags=[]) for t in self.texts],
"answers": [sanitize(a, valid_tags=[]) for a in self.answers],
"category": self.category,
"subcategory": self.subcategory,
"tournament": self.tournament,
"round": self.round
}
def __str__(self):
return str(self.to_dict())
def is_valid(self):
return (self.leadin.strip() != "" and
len(self.texts) == 3 and
len(self.answers) == 3 and
all(text.strip() != "" for text in self.texts) and
all(answer.strip() != "" for answer in self.answers))
def content(self):
text = self.leadin
for i in range(3):
if len(self.texts) > i:
text += " " + self.texts[i] + " ANSWER: " + self.answers[i]
return text
| 31.365079 | 75 | 0.522267 | 1,946 | 0.984818 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.085526 |
b466a703c7aeb3f1783bcb9b597f3efeb4eaec6e | 5,389 | py | Python | train_folds.py | wubinbai/argus-freesound | 662aa9570b31075fb1d9f18102bda89ed6bb0fc1 | [
"MIT"
]
| 1 | 2019-12-12T02:37:57.000Z | 2019-12-12T02:37:57.000Z | train_folds.py | wubinbai/argus-freesound | 662aa9570b31075fb1d9f18102bda89ed6bb0fc1 | [
"MIT"
]
| null | null | null | train_folds.py | wubinbai/argus-freesound | 662aa9570b31075fb1d9f18102bda89ed6bb0fc1 | [
"MIT"
]
| null | null | null | import json
import argparse
from argus.callbacks import MonitorCheckpoint, \
EarlyStopping, LoggingToFile, ReduceLROnPlateau
from torch.utils.data import DataLoader
from src.datasets import FreesoundDataset, FreesoundNoisyDataset, RandomDataset
from src.datasets import get_corrected_noisy_data, FreesoundCorrectedNoisyDataset
from src.mixers import RandomMixer, AddMixer, SigmoidConcatMixer, UseMixerWithProb
from src.transforms import get_transforms
from src.argus_models import FreesoundModel
from src.utils import load_noisy_data, load_folds_data
from src import config
parser = argparse.ArgumentParser()
parser.add_argument('--experiment', required=True, type=str)
args = parser.parse_args()
BATCH_SIZE = 128
CROP_SIZE = 256
DATASET_SIZE = 128 * 256
NOISY_PROB = 0.01
CORR_NOISY_PROB = 0.42
MIXER_PROB = 0.8
WRAP_PAD_PROB = 0.5
CORRECTIONS = True
if config.kernel:
NUM_WORKERS = 2
else:
NUM_WORKERS = 8
SAVE_DIR = config.experiments_dir / args.experiment
PARAMS = {
'nn_module': ('AuxSkipAttention', {
'num_classes': len(config.classes),
'base_size': 64,
'dropout': 0.4,
'ratio': 16,
'kernel_size': 7,
'last_filters': 8,
'last_fc': 4
}),
'loss': ('OnlyNoisyLSoftLoss', {
'beta': 0.7,
'noisy_weight': 0.5,
'curated_weight': 0.5
}),
'optimizer': ('Adam', {'lr': 0.0009}),
'device': 'cuda',
'aux': {
'weights': [1.0, 0.4, 0.2, 0.1]
},
'amp': {
'opt_level': 'O2',
'keep_batchnorm_fp32': True,
'loss_scale': "dynamic"
}
}
def train_fold(save_dir, train_folds, val_folds,
folds_data, noisy_data, corrected_noisy_data):
train_transfrom = get_transforms(train=True,
size=CROP_SIZE,
wrap_pad_prob=WRAP_PAD_PROB,
resize_scale=(0.8, 1.0),
resize_ratio=(1.7, 2.3),
resize_prob=0.33,
spec_num_mask=2,
spec_freq_masking=0.15,
spec_time_masking=0.20,
spec_prob=0.5)
mixer = RandomMixer([
SigmoidConcatMixer(sigmoid_range=(3, 12)),
AddMixer(alpha_dist='uniform')
], p=[0.6, 0.4])
mixer = UseMixerWithProb(mixer, prob=MIXER_PROB)
curated_dataset = FreesoundDataset(folds_data, train_folds,
transform=train_transfrom,
mixer=mixer)
noisy_dataset = FreesoundNoisyDataset(noisy_data,
transform=train_transfrom,
mixer=mixer)
corr_noisy_dataset = FreesoundCorrectedNoisyDataset(corrected_noisy_data,
transform=train_transfrom,
mixer=mixer)
dataset_probs = [NOISY_PROB, CORR_NOISY_PROB, 1 - NOISY_PROB - CORR_NOISY_PROB]
print("Dataset probs", dataset_probs)
print("Dataset lens", len(noisy_dataset), len(corr_noisy_dataset), len(curated_dataset))
train_dataset = RandomDataset([noisy_dataset, corr_noisy_dataset, curated_dataset],
p=dataset_probs,
size=DATASET_SIZE)
val_dataset = FreesoundDataset(folds_data, val_folds,
get_transforms(False, CROP_SIZE))
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
shuffle=True, drop_last=True,
num_workers=NUM_WORKERS)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE * 2,
shuffle=False, num_workers=NUM_WORKERS)
model = FreesoundModel(PARAMS)
callbacks = [
MonitorCheckpoint(save_dir, monitor='val_lwlrap', max_saves=1),
ReduceLROnPlateau(monitor='val_lwlrap', patience=6, factor=0.6, min_lr=1e-8),
EarlyStopping(monitor='val_lwlrap', patience=18),
LoggingToFile(save_dir / 'log.txt'),
]
model.fit(train_loader,
val_loader=val_loader,
max_epochs=700,
callbacks=callbacks,
metrics=['multi_accuracy', 'lwlrap'])
if __name__ == "__main__":
if not SAVE_DIR.exists():
SAVE_DIR.mkdir(parents=True, exist_ok=True)
else:
print(f"Folder {SAVE_DIR} already exists.")
with open(SAVE_DIR / 'source.py', 'w') as outfile:
outfile.write(open(__file__).read())
print("Model params", PARAMS)
with open(SAVE_DIR / 'params.json', 'w') as outfile:
json.dump(PARAMS, outfile)
folds_data = load_folds_data(use_corrections=CORRECTIONS)
noisy_data = load_noisy_data()
corrected_noisy_data = get_corrected_noisy_data()
for fold in config.folds:
val_folds = [fold]
train_folds = list(set(config.folds) - set(val_folds))
save_fold_dir = SAVE_DIR / f'fold_{fold}'
print(f"Val folds: {val_folds}, Train folds: {train_folds}")
print(f"Fold save dir {save_fold_dir}")
train_fold(save_fold_dir, train_folds, val_folds,
folds_data, noisy_data, corrected_noisy_data)
| 36.659864 | 92 | 0.594544 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 588 | 0.109111 |
b467f6301bdf6a68e1a4f160e420835a55b52e4b | 202 | py | Python | source_code/day001/input-exercise.py | MKutka/100daysofcode | e69c207956a1b16bb861e5831f0d66e75f16b31e | [
"MIT"
]
| null | null | null | source_code/day001/input-exercise.py | MKutka/100daysofcode | e69c207956a1b16bb861e5831f0d66e75f16b31e | [
"MIT"
]
| null | null | null | source_code/day001/input-exercise.py | MKutka/100daysofcode | e69c207956a1b16bb861e5831f0d66e75f16b31e | [
"MIT"
]
| null | null | null | #Day 1.3 Exercise!!
#First way I thought to do it without help
name = input("What is your name? ")
print(len(name))
#Way I found to do it from searching google
print(len(input("What is your name? "))) | 25.25 | 43 | 0.70297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 146 | 0.722772 |
b4695e99489bd38daaa6b9010e4ec8efec4ce4a7 | 3,524 | py | Python | pyvalidator/is_strong_password.py | theteladras/py.validator | 624ace7973552c8ac9353f48acbf96ec0ecc24a9 | [
"MIT"
]
| 15 | 2021-11-01T14:14:56.000Z | 2022-03-17T11:52:29.000Z | pyvalidator/is_strong_password.py | theteladras/py.validator | 624ace7973552c8ac9353f48acbf96ec0ecc24a9 | [
"MIT"
]
| 1 | 2022-03-16T13:39:16.000Z | 2022-03-17T09:16:00.000Z | pyvalidator/is_strong_password.py | theteladras/py.validator | 624ace7973552c8ac9353f48acbf96ec0ecc24a9 | [
"MIT"
]
| null | null | null | from typing import TypedDict
from .utils.Classes.String import String
from .utils.assert_string import assert_string
from .utils.merge import merge
class _IsStrongPasswordOptions(TypedDict):
min_length: int
min_uppercase: int
min_lowercase: int
min_numbers: int
min_symbols: int
return_score: bool
points_per_unique: int
points_per_repeat: float
points_for_containing_upper: int
points_for_containing_lower: int
points_for_containing_number: int
points_for_containing_symbol: int
class _Analysis(TypedDict):
length: int
unique_chars: int
uppercase_count: int
lowercase_count: int
number_count: int
symbol_count: int
default_options: _IsStrongPasswordOptions = {
"min_length": 8,
"min_uppercase": 1,
"min_lowercase": 1,
"min_numbers": 1,
"min_symbols": 1,
"return_score": False,
"points_per_unique": 1,
"points_per_repeat": 0.5,
"points_for_containing_lower": 10,
"points_for_containing_upper": 10,
"points_for_containing_number": 10,
"points_for_containing_symbol": 10,
}
def count_chars(pw: String):
result = {}
for char in pw:
if char in result:
result[char] += result[char] + 1
else:
result[char] = 1
return result
def analyze_password(pw: String) -> _Analysis:
upper_case_regex = r"^[A-Z]$"
lower_case_regex = r"^[a-z]$"
number_regex = r"^[0-9]$"
symbol_regex = r"^[-#!$@%^&*()_+|~=`{}\[\]:\";'<>?,./ ]$"
char_map = count_chars(pw)
analysis: _Analysis = {
"length": pw.length,
"unique_chars": len([*char_map]),
"uppercase_count": 0,
"lowercase_count": 0,
"number_count": 0,
"symbol_count": 0,
}
for char in [*char_map]:
char = String(char)
if char.match(upper_case_regex):
analysis["uppercase_count"] += char_map[char]
elif char.match(lower_case_regex):
analysis["lowercase_count"] += char_map[char]
elif char.match(number_regex):
analysis["number_count"] += char_map[char]
elif char.match(symbol_regex):
analysis["symbol_count"] += char_map[char]
return analysis
def score_password(analysis: _Analysis, options: _IsStrongPasswordOptions):
points = 0
points += analysis["unique_chars"] * options["points_per_unique"]
points += (analysis["length"] - analysis["unique_chars"]) * options["points_per_unique"]
if analysis["uppercase_count"] > 0:
points += options["points_for_containing_upper"]
if analysis["lowercase_count"] > 0:
points += options["points_for_containing_lower"]
if analysis["number_count"] > 0:
points += options["points_for_containing_number"]
if analysis["symbol_count"] > 0:
points += options["points_for_containing_symbol"]
return points
def is_strong_password(input: str, options: _IsStrongPasswordOptions = {}) -> bool:
input = assert_string(input)
options = merge(options, default_options)
analysis = analyze_password(input)
if options["return_score"]:
return score_password(analysis, options)
return (
analysis["length"] >= options["min_length"] and
analysis["uppercase_count"] >= options["min_uppercase"] and
analysis["lowercase_count"] >= options["min_lowercase"] and
analysis["number_count"] >= options["min_numbers"] and
analysis["symbol_count"] >= options["min_symbols"]
)
| 29.864407 | 92 | 0.652667 | 538 | 0.152667 | 0 | 0 | 0 | 0 | 0 | 0 | 862 | 0.244608 |
b46a7333f1fb95d927bac95badb76d52d1539743 | 1,691 | py | Python | boilerplate_app/serializers.py | taher-systango/DjangoUnboxed | 808ab771a44564458b897b6ec854c08f43cccf2a | [
"MIT"
]
| null | null | null | boilerplate_app/serializers.py | taher-systango/DjangoUnboxed | 808ab771a44564458b897b6ec854c08f43cccf2a | [
"MIT"
]
| null | null | null | boilerplate_app/serializers.py | taher-systango/DjangoUnboxed | 808ab771a44564458b897b6ec854c08f43cccf2a | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Python imports.
import logging
import datetime
import calendar
# Django imports.
from django.db import transaction
# Rest Framework imports.
from rest_framework import serializers
# Third Party Library imports
# local imports.
from boilerplate_app.models import User, Projects
class UserCreateSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
def validate(self, data, *args, **kwargs):
return super(UserCreateSerializer, self).validate(data, *args, **kwargs)
@transaction.atomic()
def create(self, validated_data):
# Register new users
user = super(UserCreateSerializer, self).create(validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class Meta:
model = User
fields = ('email', 'id', 'password', 'username', 'first_name', 'last_name', 'role')
extra_kwargs = {'password':{'write_only':True}}
class UserListSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'first_name', 'last_name', 'email', 'role')
class ProjectsCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Projects
fields = ('project_name','user')
def create(self, validated_data):
user = User.objects.get(pk=validated_data.pop('user'))
return Projects.objects.create(**validated_data,user=user)
class ProjectsListSerializer(serializers.ModelSerializer):
class Meta:
model = Projects
fields = ('id', 'project_name', 'user') | 25.621212 | 91 | 0.686576 | 1,310 | 0.77469 | 0 | 0 | 254 | 0.150207 | 0 | 0 | 350 | 0.206978 |
b46ebc3b01df0741b7690606a0b55aac51c6693f | 237 | py | Python | wagtail/wagtailadmin/blocks.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
]
| 3 | 2016-08-17T13:56:36.000Z | 2019-04-23T19:59:25.000Z | wagtail/wagtailadmin/blocks.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
]
| 11 | 2016-08-05T15:43:06.000Z | 2016-12-16T13:32:23.000Z | wagtail/wagtailadmin/blocks.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
]
| 2 | 2017-08-08T01:39:02.000Z | 2018-05-06T06:16:10.000Z | from __future__ import absolute_import, unicode_literals
import warnings
from wagtail.wagtailcore.blocks import * # noqa
warnings.warn("wagtail.wagtailadmin.blocks has moved to wagtail.wagtailcore.blocks", UserWarning, stacklevel=2)
| 29.625 | 111 | 0.827004 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.316456 |
b46ec93c5a888c6cb9e5eeda25bf96474de0d3f9 | 5,197 | py | Python | src/olympia/stats/management/commands/theme_update_counts_from_file.py | mstriemer/olympia | 2e700c20e0a8ed3f0dd389d1521c3798bf7ed7f7 | [
"BSD-3-Clause"
]
| null | null | null | src/olympia/stats/management/commands/theme_update_counts_from_file.py | mstriemer/olympia | 2e700c20e0a8ed3f0dd389d1521c3798bf7ed7f7 | [
"BSD-3-Clause"
]
| null | null | null | src/olympia/stats/management/commands/theme_update_counts_from_file.py | mstriemer/olympia | 2e700c20e0a8ed3f0dd389d1521c3798bf7ed7f7 | [
"BSD-3-Clause"
]
| null | null | null | import codecs
from datetime import datetime, timedelta
from optparse import make_option
from os import path, unlink
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
import commonware.log
from olympia import amo
from olympia.addons.models import Addon, Persona
from olympia.stats.models import ThemeUpdateCount
from . import get_date_from_file, save_stats_to_file
log = commonware.log.getLogger('adi.themeupdatecount')
class Command(BaseCommand):
"""Process hive results stored in different files and store them in the db.
Usage:
./manage.py theme_update_counts_from_file <folder> --date=YYYY-MM-DD
If no date is specified, the default is the day before.
If not folder is specified, the default is `hive_results/<YYYY-MM-DD>/`.
This folder will be located in `<settings.NETAPP_STORAGE>/tmp`.
File processed:
- theme_update_counts.hive
Each file has the following cols:
- date
- addon id (if src is not "gp") or persona id
- src (if it's "gp" then it's an old request with the persona id)
- count
"""
help = __doc__
option_list = BaseCommand.option_list + (
make_option('--date', action='store', type='string',
dest='date', help='Date in the YYYY-MM-DD format.'),
make_option('--separator', action='store', type='string', default='\t',
dest='separator', help='Field separator in file.'),
)
def handle(self, *args, **options):
start = datetime.now() # Measure the time it takes to run the script.
day = options['date']
if not day:
day = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
folder = args[0] if args else 'hive_results'
folder = path.join(settings.TMP_PATH, folder, day)
sep = options['separator']
filepath = path.join(folder, 'theme_update_counts.hive')
# Make sure we're not trying to update with mismatched data.
if get_date_from_file(filepath, sep) != day:
raise CommandError('%s file contains data for another day' %
filepath)
# First, make sure we don't have any existing counts for the same day,
# or it would just increment again the same data.
ThemeUpdateCount.objects.filter(date=day).delete()
theme_update_counts = {}
# Preload a set containing the ids of all the persona Add-on objects
# that we care about. When looping, if we find an id that is not in
# that set, we'll reject it.
addons = set(Addon.objects.filter(type=amo.ADDON_PERSONA,
status=amo.STATUS_PUBLIC,
persona__isnull=False)
.values_list('id', flat=True))
# Preload all the Personas once and for all. This builds a dict where
# each key (the persona_id we get from the hive query) has the addon_id
# as value.
persona_to_addon = dict(Persona.objects.values_list('persona_id',
'addon_id'))
with codecs.open(filepath, encoding='utf8') as count_file:
for index, line in enumerate(count_file):
if index and (index % 1000000) == 0:
log.info('Processed %s lines' % index)
splitted = line[:-1].split(sep)
if len(splitted) != 4:
log.debug('Badly formatted row: %s' % line)
continue
day, id_, src, count = splitted
try:
id_, count = int(id_), int(count)
except ValueError: # Badly formatted? Drop.
continue
if src:
src = src.strip()
# If src is 'gp', it's an old request for the persona id.
if id_ not in persona_to_addon and src == 'gp':
continue # No such persona.
addon_id = persona_to_addon[id_] if src == 'gp' else id_
# Does this addon exist?
if addon_id not in addons:
continue
# Memoize the ThemeUpdateCount.
if addon_id in theme_update_counts:
tuc = theme_update_counts[addon_id]
else:
tuc = ThemeUpdateCount(addon_id=addon_id, date=day,
count=0)
theme_update_counts[addon_id] = tuc
# We can now fill the ThemeUpdateCount object.
tuc.count += count
# Create in bulk: this is much faster.
ThemeUpdateCount.objects.bulk_create(theme_update_counts.values(), 100)
for theme_update_count in theme_update_counts.values():
save_stats_to_file(theme_update_count)
log.info('Processed a total of %s lines' % (index + 1))
log.debug('Total processing time: %s' % (datetime.now() - start))
# Clean up file.
log.debug('Deleting {path}'.format(path=filepath))
unlink(filepath)
| 39.371212 | 79 | 0.583029 | 4,720 | 0.908216 | 0 | 0 | 0 | 0 | 0 | 0 | 1,820 | 0.350202 |
b470fed81935f91ffeef38325bad1336729439b2 | 663 | py | Python | KRR/Saved/Run 4/plot_all.py | MadsAW/machine-learning-on-materials | 6101c7e3d12be54b12391c78442294198a39cc9b | [
"MIT"
]
| 2 | 2018-10-10T09:32:34.000Z | 2019-03-28T08:42:31.000Z | KRR/Saved/Run 4/plot_all.py | MadsAW/machine-learning-on-materials | 6101c7e3d12be54b12391c78442294198a39cc9b | [
"MIT"
]
| null | null | null | KRR/Saved/Run 4/plot_all.py | MadsAW/machine-learning-on-materials | 6101c7e3d12be54b12391c78442294198a39cc9b | [
"MIT"
]
| null | null | null | import numpy as np
import pickle
import matplotlib.pyplot as plt
import os
import fnmatch
folder = "GP/"
ktype = "lin/"
matrices=os.listdir(folder+ktype)
for matrix in matrices:
if fnmatch.fnmatch(matrix, '*_val_*'):
with open(folder+ktype+matrix, "rb") as pickleFile:
results = pickle.load(pickleFile)
arrray = results[2]
# Enable interactive mode
plt.ion()
# Draw the grid lines
plt.grid(True)
plt.plot(results[1],results[2],label=matrix)
plt.xscale('symlog', linthreshx=20)
plt.legend(loc='upper left')
plt.show()
| 27.625 | 59 | 0.585219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.135747 |
b4710f59abe4e45a9ec483665b621401e554ee1a | 5,919 | py | Python | FATERUI/common/camera/mindvision/camera_mindvision.py | LynnChan706/Fater | dde8e3baaac7be8b0c1f8bee0da628f6e6f2b772 | [
"MIT"
]
| 4 | 2018-12-07T02:17:26.000Z | 2020-12-03T05:32:23.000Z | FATERUI/common/camera/mindvision/camera_mindvision.py | LynnChan706/Fater | dde8e3baaac7be8b0c1f8bee0da628f6e6f2b772 | [
"MIT"
]
| null | null | null | FATERUI/common/camera/mindvision/camera_mindvision.py | LynnChan706/Fater | dde8e3baaac7be8b0c1f8bee0da628f6e6f2b772 | [
"MIT"
]
| 1 | 2021-12-30T12:14:52.000Z | 2021-12-30T12:14:52.000Z | #!/usr/bin/env python2.7
# coding=utf-8
import logging
import traceback
import time
from FATERUI.common.camera.camera import Camera
from . import CameraMindVision
from FATERUI.common.camera.common_tools import *
import cv2
# from aoi.common.infraredcontrol import infraredcontrol
from time import sleep
import datetime
def get_formated_time (pstr='%Y%m%d_%H_%M_%S_'):
return datetime.datetime.now().strftime(pstr)+ str(datetime.datetime.now().microsecond)
class MindVision(Camera):
def __init__(self, camera_degree=0, is_trigger_mode=False):
Camera.__init__(self)
self.camera_name = 'MINDVISION'
self.camera_init_status = False
self.__is_trigger_mode = is_trigger_mode
self.__camera = CameraMindVision.CameraMindVision(_mode=0,
_single_mode=False,
_packetSize=9000,
_strobe_enable=False,
_trigger_delay=0.018,
_interPacketDelay=3000,
_debug=False,
_is_hardware_trigger=self.__is_trigger_mode,
)
self.camera_status = self.CAMERA_STATUS_UNCONNECTED
self.__mark_as_open = False
self.__flash_mode = True
self.__old_time = 0.0
self.__rotate_degree = camera_degree
# self.__control=infraredcontrol.infrared()
def get_camera_name(self):
return self.camera_name
def open(self):
return self.__camera.open()
def __del__(self):
if self.get_camera_status() == self.CAMERA_STATUS_CONNECTED:
self.__camera.release_camera()
self.__mark_as_open = False
del self.__camera
def close(self):
self.__camera.save_parmeter()
self.__camera.release_camera()
self.__mark_as_open = False
self.camera_status = self.CAMERA_STATUS_UNCONNECTED
return True
def __take_picture(self):
print('ccc take a picture mind')
if self.__mark_as_open:
self.__mark_as_open = False
self.__camera.open()
logging.getLogger('pointgrey_camera').info('camera info:%s' % self.get_camera_description())
img = self.__camera.get_image_in_numpy()
img2=cv2.flip(img,0)
timenow = get_formated_time()
pimgtest = './imgdata/orgimg/' + timenow + 'x.bmp'
if img!=None:
cv2.imwrite(pimgtest,img)
return img2
def set_flash_mode(self, flash=True):
self.__flash_mode = flash
def take_picture(self, index=None):
img=None
try:
img = self.__take_picture()
if img is not None and img.size > 100:
if self.__is_trigger_mode:
if time.time() - self.__old_time < 1:
return None
self.__old_time = time.time()
if abs(self.__rotate_degree) > 0.0001:
img = transform_image(img, angle=self.__rotate_degree, keep_all=True)
elif img.size <= 100:
img = None
except Exception as err:
print('error ---',err)
# logging.getLogger('logger_system').exception(u'error in take_picture:%s' % traceback.print_exc())
img = None
return img
def get_camera_description(self):
desc = 'firmware:' + str(self.__camera.get_firmware_version()) +\
'\nid:' + str(self.get_camera_id())
return desc
def get_camera_id(self):
return self.__camera.get_camera_id()
def get_camera_status(self):
return self.camera_status
def get_frame_count(self):
return self.__camera.get_frame_count()
def get_error_frame_count(self):
return self.__camera.get_error_frame_count()
def get_frame_rate(self):
return self.__camera.get_frame_rate()
def get_parameter(self):
shutter_time = self.__camera.get_shutter()
wb_red = self.__camera.get_white_balance_red()
wb_green = self.__camera.get_white_balance_green()
wb_blue = self.__camera.get_white_balance_blue()
parameter = {'shutter': shutter_time,
'wb_red': wb_red,
'wb_green': wb_green,
'wb_blue': wb_blue,
'rotate': self.__rotate_degree}
return parameter
def set_parameter(self, **kwargs):
flag = True
shutter = kwargs.get('shutter', None)
if shutter is not None:
success = self.__camera.set_shutter(kwargs['shutter'])
if not success:
flag = False
wb_red = kwargs.get('wb_red', None)
if wb_red is not None:
success = self.__camera.set_wb_red(wb_red)
if not success:
flag = False
wb_green = kwargs.get('wb_green', None)
if wb_green is not None:
success = self.__camera.set_wb_green(wb_green)
if not success:
flag = False
wb_blue = kwargs.get('wb_blue', None)
if wb_blue is not None and wb_blue > 0:
success = self.__camera.set_wb_blue(wb_blue)
if not success:
flag = False
self.__rotate_degree = kwargs.get('rotate', self.__rotate_degree)
return flag
def get_picture_info(self):
return 'Temperature:[%.2f]\t\tFrameRate:[%.2f]' % (self.get_camera_temperature(), self.get_frame_rate())
def get_camera_temperature(self):
return self.__camera.get_camera_temperature()
def __is_connect(self):
return self.__camera.is_connected()
| 35.656627 | 112 | 0.58101 | 5,451 | 0.920933 | 0 | 0 | 0 | 0 | 0 | 0 | 517 | 0.087346 |
b4723dbed92e900ca2787cc70a21f796a78c3f12 | 2,307 | py | Python | tests/conftest.py | arosen93/jobflow | fbd5868394c6f4f6b4f2e0ccf4b7ff7d21fe7258 | [
"BSD-3-Clause-LBNL"
]
| 10 | 2021-11-13T07:43:27.000Z | 2022-03-14T11:05:15.000Z | tests/conftest.py | arosen93/jobflow | fbd5868394c6f4f6b4f2e0ccf4b7ff7d21fe7258 | [
"BSD-3-Clause-LBNL"
]
| 69 | 2021-08-31T13:15:54.000Z | 2022-03-31T21:43:56.000Z | tests/conftest.py | arosen93/jobflow | fbd5868394c6f4f6b4f2e0ccf4b7ff7d21fe7258 | [
"BSD-3-Clause-LBNL"
]
| 5 | 2021-10-17T03:52:57.000Z | 2022-03-31T00:17:20.000Z | import pytest
@pytest.fixture(scope="session")
def test_data():
from pathlib import Path
module_dir = Path(__file__).resolve().parent
test_dir = module_dir / "test_data"
return test_dir.resolve()
@pytest.fixture(scope="session")
def database():
return "jobflow_test"
@pytest.fixture(scope="session")
def mongo_jobstore(database):
from maggma.stores import MongoStore
from jobflow import JobStore
store = JobStore(MongoStore(database, "outputs"))
store.connect()
return store
@pytest.fixture(scope="function")
def memory_jobstore():
from maggma.stores import MemoryStore
from jobflow import JobStore
store = JobStore(MemoryStore())
store.connect()
return store
@pytest.fixture(scope="function")
def memory_data_jobstore():
from maggma.stores import MemoryStore
from jobflow import JobStore
store = JobStore(MemoryStore(), additional_stores={"data": MemoryStore()})
store.connect()
return store
@pytest.fixture
def clean_dir():
import os
import shutil
import tempfile
old_cwd = os.getcwd()
newpath = tempfile.mkdtemp()
os.chdir(newpath)
yield
os.chdir(old_cwd)
shutil.rmtree(newpath)
@pytest.fixture(scope="session")
def debug_mode():
return False
@pytest.fixture(scope="session")
def lpad(database, debug_mode):
from fireworks import LaunchPad
lpad = LaunchPad(name=database)
lpad.reset("", require_password=False)
yield lpad
if not debug_mode:
lpad.reset("", require_password=False)
for coll in lpad.db.list_collection_names():
lpad.db[coll].drop()
@pytest.fixture
def no_pydot(monkeypatch):
import builtins
import_orig = builtins.__import__
def mocked_import(name, *args, **kwargs):
if name == "pydot":
raise ImportError()
return import_orig(name, *args, **kwargs)
monkeypatch.setattr(builtins, "__import__", mocked_import)
@pytest.fixture
def no_matplotlib(monkeypatch):
import builtins
import_orig = builtins.__import__
def mocked_import(name, *args, **kwargs):
if name == "matplotlib":
raise ImportError()
return import_orig(name, *args, **kwargs)
monkeypatch.setattr(builtins, "__import__", mocked_import)
| 20.236842 | 78 | 0.686606 | 0 | 0 | 530 | 0.229736 | 2,263 | 0.980928 | 0 | 0 | 152 | 0.065886 |
b472afbaafc9d44c866573f1e75db57304dcd9c2 | 2,142 | py | Python | src/utilities/download_file_from_zip.py | Bhaskers-Blu-Org2/arcticseals | 9e2629ca0ce7aadbe63118f39ff2da757d5dbc33 | [
"MIT"
]
| 16 | 2019-05-21T20:08:01.000Z | 2021-11-23T22:34:00.000Z | src/utilities/download_file_from_zip.py | Bhaskers-Blu-Org2/arcticseals | 9e2629ca0ce7aadbe63118f39ff2da757d5dbc33 | [
"MIT"
]
| 14 | 2018-07-18T17:14:30.000Z | 2019-04-14T14:40:12.000Z | src/utilities/download_file_from_zip.py | microsoft/arcticseals | 8adf8fbae679866373b9b5f981ceaa6ae9226c6a | [
"MIT"
]
| 10 | 2019-05-10T20:56:47.000Z | 2022-01-26T13:16:40.000Z | # This script allows to download a single file from a remote ZIP archive
# without downloading the whole ZIP file itself.
# The hosting server needs to support the HTTP range header for it to work
import zipfile
import requests
import argparse
class HTTPIO(object):
def __init__(self, url):
self.url = url
r = requests.head(self.url)
self.size = int(r.headers['content-length'])
assert self.size > 0
self.offset = 0
def seek(self, offset, whence=0):
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.size + offset
else:
raise Exception('Unknown value for parameter whence')
def read(self, size = None):
if size is None:
r = requests.get(self.url,
headers={"range": "bytes={}-{}".format(self.offset, self.size - 1)},
stream=True)
else:
r = requests.get(self.url,
headers={"range": "bytes={}-{}".format(self.offset, min(self.size - 1, self.offset+size - 1))},
stream=True)
r.raise_for_status()
r.raw.decode_content = True
content = r.raw.read()
self.offset += len(content)
return content
def tell(self):
return self.offset
def download_file(zip_url, relative_path, output_file):
with zipfile.ZipFile(HTTPIO(zip_url)) as zz:
with open(output_file, 'wb') as f:
f.write(zz.read(relative_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('URL', type=str, help='URL to zip file, e.g. https://example.com/myfile.zip')
parser.add_argument('FILE_PATH', type=str, help='Path of the desired file in the ZIP file, e.g. myfolder/mydocument.docx')
parser.add_argument('OUTPUT_FILE', type=str, help='Local path to write the file to, e.g. /home/user/mydocument.docx')
args = parser.parse_args()
download_file(args.URL, args.FILE_PATH, args.OUTPUT_FILE)
| 38.945455 | 126 | 0.600373 | 1,189 | 0.555089 | 0 | 0 | 0 | 0 | 0 | 0 | 522 | 0.243697 |
b474450a8d01b6c6116bd09fee74ef2ac63927a9 | 5,928 | py | Python | cxphasing/CXFileReader.py | jbgastineau/cxphasing | a9847a0afb9a981d81f027e75c06c9bb2b531d33 | [
"MIT"
]
| 3 | 2018-05-11T16:05:55.000Z | 2021-12-20T08:52:02.000Z | cxphasing/CXFileReader.py | jbgastineau/cxphasing | a9847a0afb9a981d81f027e75c06c9bb2b531d33 | [
"MIT"
]
| null | null | null | cxphasing/CXFileReader.py | jbgastineau/cxphasing | a9847a0afb9a981d81f027e75c06c9bb2b531d33 | [
"MIT"
]
| 2 | 2018-11-14T08:57:10.000Z | 2021-12-20T08:52:06.000Z | import Image
import readMDA
import h5py
import os
import numpy
from mmpad_image import open_mmpad_tif
import numpy as np
import scipy as sp
import sys
#import libtiff
from cxparams import CXParams as CXP
class CXFileReader(object):
"""
file_reader
A generic and configurable file reader.
The file reader determines the file type from the extension.
For hierarchical data files a method for extracting the data must be specified.
Inputs
------
filename - the name of the file to read
h5_file_path - hdf5 files: a string describing the location of the data inside a hierarchical data format
mda_filepath - mda files: must specify whether to read a detector channel or positioner number.
For e.g. detector channel 5 mda_filepath='d5'
positioner number 2 mda_filepath='p2'
Outputs
-------
data - the 2 or 3D array read from the data file.
Example Usage:
fr = FileReader()
data=fr.open('filename.h5', h5_file_path='/some/string')
data=fr.open('filename.mda', mda_file_path='d4') for detector channel 4
"""
def __init__(self, *args, **kwargs):
self.args = args
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def openup(self, filename, **kwargs):
if not os.path.isfile(filename):
CXP.log.error('{} is not a valid file'.format(filename))
sys.exit(1)
self.extension = filename.split('.')[-1].lower()
for key in kwargs.keys():
setattr(self, key, kwargs[key])
try:
action = {
'mda': self.read_mda,
'h5': self.read_h5,
'hdf5': self.read_h5,
'jpg': self.read_image,
'jpeg': self.read_image,
'png': self.read_image,
'tif': self.read_image,
'tiff': self.read_tif,
'npy': self.read_npy,
'npz': self.read_npz,
'dat': self.read_dat,
'pkl': self.read_pickle,
'mmpd': self.read_mmpad,
'pil': self.read_pilatus
}[self.extension]
except NameError:
CXP.log.error('Unknown file extension {}'.format(self.extension))
raise
return action(filename=filename)
def read_mda(self, filename=None):
if not filename:
filename = self.filename
source = self.mda_file_path[0].lower()
if source not in ['d', 'p']:
CXP.log.error("mda_file_path first character must be 'd' or 'p'")
raise
channel = self.mda_file_path[1]
if not np.isnumeric(channel):
CXP.log.error("mda_file_path second character must be numeric.")
raise
try:
return readMDA.readMDA(filename)[2][source].data
except:
CXP.log.error('Could not extract array from mda file')
raise
def read_h5(self, filename=None, h5_file_path='/entry/instrument/detector/data'):
if not filename:
filename = self.filename
try:
h5_file_path = self.h5_file_path
except:
pass
try:
return h5py.File(filename)[h5_file_path].value
except:
CXP.log.error('Could not extract data from h5 file.')
raise
def read_image(self, filename=None):
if not filename:
filename = self.filename
try:
return sp.misc.fromimage(Image.open(filename))
except:
CXP.log.error('Unable to read data from {}'.format(filename))
raise
def read_npy(self, filename=None):
if not filename:
filename = self.filename
try:
return numpy.load(filename)
except IOError as e:
print e
CXP.log.error('Could not extract data from numpy file.')
raise
def read_npz(self, filename=None):
if not filename:
filename = self.filename
l=[]
try:
d= dict(numpy.load(filename))
# Return list in the right order
for i in range(len(d)):
l.append(d['arr_{:d}'.format(i)])
return l
except IOError:
CXP.log.error('Could not extract data from numpy file.')
raise
def read_dat(self, filename=None):
if not filename:
filename = self.filename
try:
return sp.fromfile(filename)
except:
CXP.log.error('Could not extract data from data file.')
raise
def read_pickle(self, filename=None):
if not filename:
filename = self.filename
try:
return pickle.load(filename)
except:
CXP.log.error('Could not load data from pickle')
raise
def read_mmpad(self, filename=None):
if not filename:
filename = self.filename
try:
return open_mmpad_tif(filename)
except:
CXP.log.error('Could not load data from pickle')
raise
def read_pilatus(self, filename=None):
if not filename:
filename = self.filename
try:
return sp.misc.fromimage(Image.open(filename))[:-1,:-1]
except:
CXP.log.error('Unable to read data from {}'.format(filename))
raise
def read_tif(self, filename=None):
if not filename:
filename = self.filename
try:
return libtiff.TIFF.open(filename).read_image()
except:
CXP.log.error('Unable to read data from {}'.format(filename))
raise
| 29.20197 | 109 | 0.546896 | 5,720 | 0.964912 | 0 | 0 | 0 | 0 | 0 | 0 | 1,590 | 0.268219 |
b474f12be63300ba532e865b5aa405c132d39b80 | 1,881 | py | Python | homeassistant/components/notify/file.py | SKarthick5121995/karthickmaduraai | d5498dfb83e9bb57fff16db854ae3e15c1e15425 | [
"MIT"
]
| null | null | null | homeassistant/components/notify/file.py | SKarthick5121995/karthickmaduraai | d5498dfb83e9bb57fff16db854ae3e15c1e15425 | [
"MIT"
]
| null | null | null | homeassistant/components/notify/file.py | SKarthick5121995/karthickmaduraai | d5498dfb83e9bb57fff16db854ae3e15c1e15425 | [
"MIT"
]
| null | null | null | """
Support for file notification.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.file/
"""
import logging
import os
import homeassistant.util.dt as dt_util
from homeassistant.components.notify import (
ATTR_TITLE, DOMAIN, BaseNotificationService)
from homeassistant.helpers import validate_config
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config):
"""Get the file notification service."""
if not validate_config({DOMAIN: config},
{DOMAIN: ['filename',
'timestamp']},
_LOGGER):
return None
filename = config['filename']
timestamp = config['timestamp']
return FileNotificationService(hass, filename, timestamp)
# pylint: disable=too-few-public-methods
class FileNotificationService(BaseNotificationService):
"""Implement the notification service for the File service."""
def __init__(self, hass, filename, add_timestamp):
"""Initialize the service."""
self.filepath = os.path.join(hass.config.config_dir, filename)
self.add_timestamp = add_timestamp
def send_message(self, message="", **kwargs):
"""Send a message to a file."""
with open(self.filepath, 'a') as file:
if os.stat(self.filepath).st_size == 0:
title = '{} notifications (Log started: {})\n{}\n'.format(
kwargs.get(ATTR_TITLE),
dt_util.strip_microseconds(dt_util.utcnow()),
'-' * 80)
file.write(title)
if self.add_timestamp == 1:
text = '{} {}\n'.format(dt_util.utcnow(), message)
file.write(text)
else:
text = '{}\n'.format(message)
file.write(text)
| 33 | 74 | 0.607656 | 1,007 | 0.535354 | 0 | 0 | 0 | 0 | 0 | 0 | 473 | 0.251462 |
b47686e7b028cb8a2b59a0aee1c9616fb72be5a0 | 2,561 | py | Python | app/eSignature/views/eg035_scheduled_sending.py | docusign/eg-03-python-auth-code-grant | e92913e25f753fb6b52fc3da6bc4b76c49c75b37 | [
"MIT"
]
| 7 | 2019-05-09T05:17:35.000Z | 2020-05-06T14:27:51.000Z | app/eSignature/views/eg035_scheduled_sending.py | docusign/eg-03-python-auth-code-grant | e92913e25f753fb6b52fc3da6bc4b76c49c75b37 | [
"MIT"
]
| 1 | 2019-06-25T23:06:34.000Z | 2019-06-25T23:06:34.000Z | app/eSignature/views/eg035_scheduled_sending.py | docusign/eg-03-python-auth-code-grant | e92913e25f753fb6b52fc3da6bc4b76c49c75b37 | [
"MIT"
]
| 8 | 2019-06-21T23:57:48.000Z | 2020-02-11T18:58:34.000Z |
""" Example 035: Scheduled sending and delayed routing """
from os import path
from docusign_esign.client.api_exception import ApiException
from flask import render_template, session, Blueprint, request
from ..examples.eg035_scheduled_sending import Eg035ScheduledSendingController
from ...docusign import authenticate
from ...ds_config import DS_CONFIG
from ...error_handlers import process_error
from ...consts import pattern
eg = "eg035" # reference (and url) for this example
eg035 = Blueprint("eg035", __name__)
def get_args():
"""Get request and session arguments"""
# More data validation would be a good idea here
# Strip anything other than characters listed
signer_email = pattern.sub("", request.form.get("signer_email"))
signer_name = pattern.sub("", request.form.get("signer_name"))
resume_date = request.form.get("resume_date")
envelope_args = {
"signer_email": signer_email,
"signer_name": signer_name,
"resume_date": resume_date,
"status": "sent",
}
args = {
"account_id": session["ds_account_id"],
"base_path": session["ds_base_path"],
"access_token": session["ds_access_token"],
"envelope_args": envelope_args
}
return args
@eg035.route("/eg035", methods=["POST"])
@authenticate(eg=eg)
def sign_by_email():
"""
1. Get required arguments
2. Call the worker method
3. Render success response with envelopeId
"""
# 1. Get required arguments
args = get_args()
try:
# 1. Call the worker method
results = Eg035ScheduledSendingController.worker(args)
print(results)
except ApiException as err:
return process_error(err)
# 2. Render success response with envelopeId
return render_template(
"example_done.html",
title="Envelope sent",
h1="Envelope sent",
message=f"The envelope has been created and scheduled!<br/>Envelope ID: {results['envelope_id']}."
)
@eg035.route("/eg035", methods=["GET"])
@authenticate(eg=eg)
def get_view():
"""responds with the form for the example"""
return render_template(
"eg035_scheduled_sending.html",
title="Scheduled sending",
source_file="eg035_scheduled_sending.py",
source_url=DS_CONFIG["github_example_url"] + "eg035_scheduled_sending.py",
documentation=DS_CONFIG["documentation"] + eg,
show_doc=DS_CONFIG["documentation"],
signer_name=DS_CONFIG["signer_name"],
signer_email=DS_CONFIG["signer_email"]
)
| 30.855422 | 106 | 0.681765 | 0 | 0 | 0 | 0 | 1,298 | 0.506833 | 0 | 0 | 1,046 | 0.408434 |
b476e08084c58f13ed7fa0dc7c045384b16ce2fe | 584 | py | Python | Informatik1/Finals Prep/HS20/1 Warmup/tally.py | Queentaker/uzh | 35cccaf910b95d15db21be80c8567eb427202591 | [
"MIT"
]
| 8 | 2021-11-21T10:02:08.000Z | 2022-03-15T21:02:02.000Z | Informatik1/Finals Prep/HS20/1 Warmup/tally.py | Queentaker/uzh | 35cccaf910b95d15db21be80c8567eb427202591 | [
"MIT"
]
| null | null | null | Informatik1/Finals Prep/HS20/1 Warmup/tally.py | Queentaker/uzh | 35cccaf910b95d15db21be80c8567eb427202591 | [
"MIT"
]
| 3 | 2021-11-19T18:52:56.000Z | 2022-02-27T15:45:59.000Z | #-- THIS LINE SHOULD BE THE FIRST LINE OF YOUR SUBMISSION! --#
def tally(costs, discounts, rebate_factor):
cost = sum(costs)
discount = sum(discounts)
pre = (cost - discount) * rebate_factor
if pre < 0:
return 0
else:
return round(pre, 2)
#-- THIS LINE SHOULD BE THE LAST LINE OF YOUR SUBMISSION! ---#
### DO NOT SUBMIT THE FOLLOWING LINES!!! THESE ARE FOR LOCAL TESTING ONLY!
# ((10+24) - (3+4+3)) * 0.3
assert(tally([10,24], [3,4,3], 0.30) == 7.20)
# if the result would be negative, 0 is returned instead
assert(tally([10], [20], 0.1) == 0) | 29.2 | 74 | 0.623288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 281 | 0.481164 |
b47710dd97089aa9a4dab16fd72a2e7d5d913aa8 | 2,450 | py | Python | linear_sequence_of_dominos/valid_sequence.py | bhpayne/domino_tile_floor | e5473081160e6f59c901fcc7563a984ef8c0a76d | [
"Apache-2.0"
]
| null | null | null | linear_sequence_of_dominos/valid_sequence.py | bhpayne/domino_tile_floor | e5473081160e6f59c901fcc7563a984ef8c0a76d | [
"Apache-2.0"
]
| 6 | 2020-12-31T03:33:44.000Z | 2021-01-01T01:46:56.000Z | linear_sequence_of_dominos/valid_sequence.py | bhpayne/domino_tile_floor | e5473081160e6f59c901fcc7563a984ef8c0a76d | [
"Apache-2.0"
]
| 1 | 2018-03-15T19:30:11.000Z | 2018-03-15T19:30:11.000Z | #!/usr/bin/env python3
"""
Given a set of dominos, construct a linear sequence
For example, if the set of dominos is
[ (0,0) (1,0), (1,1)]
then a valid linear sequence of length four would be
(0,0),(0,1),(1,1),(1,0)
In this script we first create a set of dominos to sample from.
Then every permutation of that set is tested to see whether
the sequence is a valid linear sequence.
If the sequence is invalid, a counter is incremented to
record how long the sequence was.
"""
# http://www.domino-games.com/domino-rules/double-six.html
import itertools
'''
list_of_dominos = [ (0,0), (1,0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0),
(1,1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1),
(2, 2), (3, 2), (4, 2), (5, 2), (6, 2),
(3, 3), (4, 3), (5, 3), (6, 3),
(4, 4), (5, 4), (6, 4),
(5, 5), (6, 5),
(6, 6)]
'''
list_of_dominos = [(0,0),(1,0),(2,0),(3,0),
(1,1),(2,1),(3,1),
(2,2),(3,2),
(3,3)]
print("number of unique dominos in this set is",len(list_of_dominos))
# 28! = 3*10^29
#print(list_of_dominos)
broke_on={}
for indx in range(11):
broke_on[indx+1]=0
print("initialized data structure (nothing up my sleeve):",broke_on)
for this_perm in itertools.permutations(list_of_dominos):
if(this_perm[0][1] != this_perm[1][0]):
#print("broke on first pair")
broke_on[1] += 1
elif(this_perm[1][1] != this_perm[2][0]):
#print("broke on second pair")
broke_on[2] += 1
elif(this_perm[2][1] != this_perm[3][0]):
broke_on[3] += 1
elif(this_perm[3][1] != this_perm[4][0]):
broke_on[4] += 1
elif(this_perm[4][1] != this_perm[5][0]):
broke_on[5] += 1
elif(this_perm[5][1] != this_perm[6][0]):
broke_on[6] += 1
elif(this_perm[6][1] != this_perm[7][0]):
broke_on[7] += 1
elif(this_perm[7][1] != this_perm[8][0]):
broke_on[8] += 1
elif(this_perm[8][1] != this_perm[9][0]):
broke_on[9] += 1
elif(this_perm[9][1] != this_perm[10][0]):
broke_on[10] += 1
else:
print("made it to another pair")
print(this_perm)
break
print(broke_on)
| 33.108108 | 73 | 0.490204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,273 | 0.519592 |
b479184cf8fa1d0126f4741b48905617a0186570 | 3,418 | py | Python | Projects/Arena/old-version.py | hastysun/Python | b63921160fe13291dbfcbdcf821364e76d3164d5 | [
"Apache-2.0"
]
| 1 | 2018-05-16T22:41:38.000Z | 2018-05-16T22:41:38.000Z | Projects/Arena/old-version.py | hastysun/Python | b63921160fe13291dbfcbdcf821364e76d3164d5 | [
"Apache-2.0"
]
| null | null | null | Projects/Arena/old-version.py | hastysun/Python | b63921160fe13291dbfcbdcf821364e76d3164d5 | [
"Apache-2.0"
]
| null | null | null | ## Unit 4 Project - Two Player Game
## Gavin Weiss - Computer Programming II
## The Elder Scrolls X
# A fan made 2 player game successor the The Elder Scrolls Series
# Two players start off in an arena
# Can choose starting items
# Can choose classes
## Libraries
import time # Self explanatory
import random # Self explanatory
import os # Used for Linux commands
import os, platform # For Linux intro
## Functions
def sleep(): # This function just automates what I usually do manually
time.sleep(0.1)
print("\n")
return
## Code
class Player1(object): # This is the class for Player 1
def __init__(self, name, health, attack, stamina, defense):
self.name = name # Player's name
self.health = health # Player's max health
self.attack = attack # Player's attack power, can be changed
self.stamina = stamina # How many attacks you can do
self.defense = defense # How much damage you take
def Stats(self):
sleep()
print(self.name + "'s currents stats are: ")
sleep()
print("Health = " + str(self.health))
print("Attack = " + str(self.attack))
print("Stamina = " + str(self.stamina))
print("Defense = " + str(self.defense))
sleep()
class Player2(object): # This is the class for Player 2
def __init__(self, name, health, attack, stamina, defense):
self.name = name
self.health = health
self.attack = attack
self.stamina = stamina
self.defense = defense
def Stats(self):
sleep()
print(self.name + "'s currents stats are: ")
sleep()
print("Health = " + str(self.health))
print("Attack = " + str(self.attack))
print("Stamina = " + str(self.stamina))
print("Defense = " + str(self.defense))
sleep()
def intro1(): # This is an intro for Linux
sleep()
os.system("figlet Elder Scrolls X")
sleep()
return
def intro2(): # Intro for anything else
sleep()
print("\n\t Elder Scrolls X")
sleep()
return
if platform.system() == "Linux":
intro1()
else:
intro2()
def CharCreation(): # Function to ask questions for class choosing
sleep()
print("=> What kind of class do you want?")
sleep()
print("> 1 - Knight")
#sleep()
print("> 2 - Thief")
#sleep()
print("> 3 - Lancer")
sleep()
return
sleep()
print("=> Player 1 : What is your name?")
name1 = input("> ") # "name1" is Player 1's name
sleep()
print("=> Player 1,")
CharCreation()
CharCreationChoice1 = input("> ")
if CharCreationChoice1 == ("1"): # Knight
player1 = Player1(name1, 200, 150, 50, 200)
if CharCreationChoice1 == ("2"): # Thief
player1 = Player1(name1, 100, 200, 100, 50)
if CharCreationChoice1 == ("3"): # Lancer
player1 = Player1(name1, 100, 100, 100, 100)
sleep()
player1.Stats() # Prints the stats for Player 1
sleep()
print("=> Player 2 : What is your name?")
name2 = input("> ") # "name2" is Player 2's name
CharCreation()
CharCreationChoice2 = input("> ")
if CharCreationChoice2 == ("1"): # Knight
player2 = Player2(name2, 200, 150, 50, 200)
if CharCreationChoice2 == ("2"): # Thief
player2 = Player2(name2, 100, 200, 100, 50)
if CharCreationChoice2 == ("3"): # Lancer
player2 = Player2(name2, 100, 100, 100, 100)
player2.Stats() # Prints Player 2's stats
| 21.098765 | 70 | 0.613517 | 1,293 | 0.378291 | 0 | 0 | 0 | 0 | 0 | 0 | 1,269 | 0.37127 |
b47a0c18a008deafead3e0074466d35091002570 | 6,689 | py | Python | chromium/tools/telemetry/telemetry/internal/image_processing/video.py | wedataintelligence/vivaldi-source | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | [
"BSD-3-Clause"
]
| 925 | 2015-11-06T03:04:46.000Z | 2017-09-16T19:08:43.000Z | chromium/tools/telemetry/telemetry/internal/image_processing/video.py | wedataintelligence/vivaldi-source | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | [
"BSD-3-Clause"
]
| 29 | 2015-11-09T17:37:28.000Z | 2017-08-16T17:50:11.000Z | chromium/tools/telemetry/telemetry/internal/image_processing/video.py | wedataintelligence/vivaldi-source | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | [
"BSD-3-Clause"
]
| 51 | 2015-11-08T07:06:38.000Z | 2017-08-21T07:27:19.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import subprocess
from catapult_base import cloud_storage
from telemetry.core import platform
from telemetry.util import image_util
from telemetry.util import rgba_color
HIGHLIGHT_ORANGE_FRAME = rgba_color.WEB_PAGE_TEST_ORANGE
class BoundingBoxNotFoundException(Exception):
pass
class Video(object):
"""Utilities for storing and interacting with the video capture."""
def __init__(self, video_file_obj):
assert video_file_obj.delete
assert not video_file_obj.close_called
self._video_file_obj = video_file_obj
self._tab_contents_bounding_box = None
def UploadToCloudStorage(self, bucket, target_path):
"""Uploads video file to cloud storage.
Args:
target_path: Path indicating where to store the file in cloud storage.
"""
cloud_storage.Insert(bucket, target_path, self._video_file_obj.name)
def GetVideoFrameIter(self):
"""Returns the iteration for processing the video capture.
This looks for the initial color flash in the first frame to establish the
tab content boundaries and then omits all frames displaying the flash.
Yields:
(time_ms, image) tuples representing each video keyframe. Only the first
frame is a run of sequential duplicate bitmaps is typically included.
time_ms is milliseconds since navigationStart.
image may be a telemetry.core.Bitmap, or a numpy array depending on
whether numpy is installed.
"""
frame_generator = self._FramesFromMp4(self._video_file_obj.name)
# Flip through frames until we find the initial tab contents flash.
content_box = None
for _, bmp in frame_generator:
content_box = self._FindHighlightBoundingBox(
bmp, HIGHLIGHT_ORANGE_FRAME)
if content_box:
break
if not content_box:
raise BoundingBoxNotFoundException(
'Failed to identify tab contents in video capture.')
# Flip through frames until the flash goes away and emit that as frame 0.
timestamp = 0
for timestamp, bmp in frame_generator:
if not self._FindHighlightBoundingBox(bmp, HIGHLIGHT_ORANGE_FRAME):
yield 0, image_util.Crop(bmp, *content_box)
break
start_time = timestamp
for timestamp, bmp in frame_generator:
yield timestamp - start_time, image_util.Crop(bmp, *content_box)
def _FindHighlightBoundingBox(self, bmp, color, bounds_tolerance=8,
color_tolerance=8):
"""Returns the bounding box of the content highlight of the given color.
Raises:
BoundingBoxNotFoundException if the hightlight could not be found.
"""
content_box, pixel_count = image_util.GetBoundingBox(bmp, color,
tolerance=color_tolerance)
if not content_box:
return None
# We assume arbitrarily that tabs are all larger than 200x200. If this
# fails it either means that assumption has changed or something is
# awry with our bounding box calculation.
if content_box[2] < 200 or content_box[3] < 200:
raise BoundingBoxNotFoundException('Unexpectedly small tab contents.')
# TODO(tonyg): Can this threshold be increased?
if pixel_count < 0.9 * content_box[2] * content_box[3]:
raise BoundingBoxNotFoundException(
'Low count of pixels in tab contents matching expected color.')
# Since we allow some fuzziness in bounding box finding, we want to make
# sure that the bounds are always stable across a run. So we cache the
# first box, whatever it may be.
#
# This relies on the assumption that since Telemetry doesn't know how to
# resize the window, we should always get the same content box for a tab.
# If this assumption changes, this caching needs to be reworked.
if not self._tab_contents_bounding_box:
self._tab_contents_bounding_box = content_box
# Verify that there is only minor variation in the bounding box. If it's
# just a few pixels, we can assume it's due to compression artifacts.
for x, y in zip(self._tab_contents_bounding_box, content_box):
if abs(x - y) > bounds_tolerance:
# If this fails, it means either that either the above assumption has
# changed or something is awry with our bounding box calculation.
raise BoundingBoxNotFoundException(
'Unexpected change in tab contents box.')
return self._tab_contents_bounding_box
def _FramesFromMp4(self, mp4_file):
host_platform = platform.GetHostPlatform()
if not host_platform.CanLaunchApplication('avconv'):
host_platform.InstallApplication('avconv')
def GetDimensions(video):
proc = subprocess.Popen(['avconv', '-i', video], stderr=subprocess.PIPE)
dimensions = None
output = ''
for line in proc.stderr.readlines():
output += line
if 'Video:' in line:
dimensions = line.split(',')[2]
dimensions = map(int, dimensions.split()[0].split('x'))
break
proc.communicate()
assert dimensions, ('Failed to determine video dimensions. output=%s' %
output)
return dimensions
def GetFrameTimestampMs(stderr):
"""Returns the frame timestamp in integer milliseconds from the dump log.
The expected line format is:
' dts=1.715 pts=1.715\n'
We have to be careful to only read a single timestamp per call to avoid
deadlock because avconv interleaves its writes to stdout and stderr.
"""
while True:
line = ''
next_char = ''
while next_char != '\n':
next_char = stderr.read(1)
line += next_char
if 'pts=' in line:
return int(1000 * float(line.split('=')[-1]))
dimensions = GetDimensions(mp4_file)
frame_length = dimensions[0] * dimensions[1] * 3
frame_data = bytearray(frame_length)
# Use rawvideo so that we don't need any external library to parse frames.
proc = subprocess.Popen(['avconv', '-i', mp4_file, '-vcodec',
'rawvideo', '-pix_fmt', 'rgb24', '-dump',
'-loglevel', 'debug', '-f', 'rawvideo', '-'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
while True:
num_read = proc.stdout.readinto(frame_data)
if not num_read:
raise StopIteration
assert num_read == len(frame_data), 'Unexpected frame size: %d' % num_read
yield (GetFrameTimestampMs(proc.stderr),
image_util.FromRGBPixels(dimensions[0], dimensions[1], frame_data))
| 38.889535 | 80 | 0.687995 | 6,292 | 0.940649 | 3,609 | 0.539543 | 0 | 0 | 0 | 0 | 2,901 | 0.433697 |
b47a8507030b3a977cb62f1a1969dd088038ec0e | 2,545 | py | Python | homeassistant/components/renault/renault_coordinator.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
]
| 5 | 2019-02-24T11:46:18.000Z | 2019-05-28T17:37:21.000Z | homeassistant/components/renault/renault_coordinator.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
]
| 77 | 2020-07-16T16:43:09.000Z | 2022-03-31T06:14:37.000Z | homeassistant/components/renault/renault_coordinator.py | Vaarlion/core | f3de8b9f28de01abf72c0f5bb0b457eb1841f201 | [
"Apache-2.0"
]
| 7 | 2021-03-20T12:34:01.000Z | 2021-12-02T10:13:52.000Z | """Proxy to handle account communication with Renault servers."""
from __future__ import annotations
from collections.abc import Awaitable
from datetime import timedelta
import logging
from typing import Callable, TypeVar
from renault_api.kamereon.exceptions import (
AccessDeniedException,
KamereonResponseException,
NotSupportedException,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
T = TypeVar("T")
class RenaultDataUpdateCoordinator(DataUpdateCoordinator[T]):
"""Handle vehicle communication with Renault servers."""
def __init__(
self,
hass: HomeAssistant,
logger: logging.Logger,
*,
name: str,
update_interval: timedelta,
update_method: Callable[[], Awaitable[T]],
) -> None:
"""Initialise coordinator."""
super().__init__(
hass,
logger,
name=name,
update_interval=update_interval,
update_method=update_method,
)
self.access_denied = False
self.not_supported = False
async def _async_update_data(self) -> T:
"""Fetch the latest data from the source."""
if self.update_method is None:
raise NotImplementedError("Update method not implemented")
try:
return await self.update_method()
except AccessDeniedException as err:
# Disable because the account is not allowed to access this Renault endpoint.
self.update_interval = None
self.access_denied = True
raise UpdateFailed(f"This endpoint is denied: {err}") from err
except NotSupportedException as err:
# Disable because the vehicle does not support this Renault endpoint.
self.update_interval = None
self.not_supported = True
raise UpdateFailed(f"This endpoint is not supported: {err}") from err
except KamereonResponseException as err:
# Other Renault errors.
raise UpdateFailed(f"Error communicating with API: {err}") from err
async def async_config_entry_first_refresh(self) -> None:
"""Refresh data for the first time when a config entry is setup.
Contrary to base implementation, we are not raising ConfigEntryNotReady
but only updating the `access_denied` and `not_supported` flags.
"""
await self._async_refresh(log_failures=False, raise_on_auth_failed=True)
| 34.863014 | 89 | 0.673477 | 2,032 | 0.798428 | 0 | 0 | 0 | 0 | 1,381 | 0.542633 | 738 | 0.28998 |
b47abe86e6343865f80d317d811f2885ba219aec | 122 | py | Python | Pacote Dowload/pythonProject/aula020.py | J297-hub/exercicios-de-python | cde355f9aeb43abce7890cd9879646bfe768190e | [
"MIT"
]
| null | null | null | Pacote Dowload/pythonProject/aula020.py | J297-hub/exercicios-de-python | cde355f9aeb43abce7890cd9879646bfe768190e | [
"MIT"
]
| null | null | null | Pacote Dowload/pythonProject/aula020.py | J297-hub/exercicios-de-python | cde355f9aeb43abce7890cd9879646bfe768190e | [
"MIT"
]
| null | null | null | def soma (a,b):
print(f'A = {a} e B = {b}')
s=a+b
print(f'A soma A + B ={s}')
#Programa Principal
soma(4,5)
| 13.555556 | 31 | 0.491803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.483607 |
b47bec3668d864394eed49330fbd6e060a5d6f09 | 2,270 | py | Python | docs/script/CLI_docker_image_uri_script.py | ai4eu/on-boarding | 6de8c323bb5f2d18612ba807b0caa8e5954bf8a8 | [
"Apache-2.0"
]
| null | null | null | docs/script/CLI_docker_image_uri_script.py | ai4eu/on-boarding | 6de8c323bb5f2d18612ba807b0caa8e5954bf8a8 | [
"Apache-2.0"
]
| 1 | 2021-06-15T10:45:35.000Z | 2021-06-15T10:45:35.000Z | docs/script/CLI_docker_image_uri_script.py | ai4eu/on-boarding | 6de8c323bb5f2d18612ba807b0caa8e5954bf8a8 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
# ===================================================================================
# Copyright (C) 2019 Fraunhofer Gesellschaft. All rights reserved.
# ===================================================================================
# This Acumos software file is distributed by Fraunhofer Gesellschaft
# under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============LICENSE_END==========================================================
"""
Provides an example of Docker URI cli on-boarding
"""
import requests
import os
import json
# properties of the model
model_name = "my-model-1"
dockerImageURI = "cicd.ai4eu-dev.eu:7444/myimages/onboardingtest:v3" #Docker image URI looks like: example.com:port/image-tag:version
license_file = "./license-1.0.0.json"
protobuf_file = "./model.proto"
# setup parameters
host = os.environ['ACUMOS_HOST'] # FQHN like aiexp-preprod.ai4europe.eu
token = os.environ['ACUMOS_TOKEN'] # format is 'acumos_username:API_TOKEN'
advanced_api = "https://" + host + ":443/onboarding-app/v2/advancedModel"
files= {'license': ('license.json', open(license_file, 'rb'), 'application.json'),
'protobuf': ('model.proto', open(protobuf_file, 'rb'), 'text/plain')}
headers = {"Accept": "application/json",
"modelname": model_name,
"Authorization": token,
"dockerFileURL": dockerImageURI,
'isCreateMicroservice': 'false'}
#send request
response = requests.post(advanced_api, files=files, headers=headers)
#check response
if response.status_code == 201:
body = json.loads(response.text)
solution_id = body['result']['solutionId']
print("Docker uri is pushed successfully on {" + host + "}, response is: ", response.status_code, " - solutionId: ", solution_id)
else:
print("Docker uri is not pushed on {" + host + "}, response is: ", response.status_code)
| 43.653846 | 133 | 0.640529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,617 | 0.712335 |
b47d1e8ef224c2f61bfc09b6164ea7a99d634c35 | 1,169 | py | Python | zeus/networks/pytorch/backbones/getter.py | shaido987/vega | 14d5d49fb8bdf96bd1f3fcfac201ce6b6712c3b6 | [
"MIT"
]
| 1 | 2021-05-08T07:47:44.000Z | 2021-05-08T07:47:44.000Z | zeus/networks/pytorch/backbones/getter.py | WholeG/vega | d1ccf1c3ce68a118bdb6775594ceed0f895911e7 | [
"MIT"
]
| null | null | null | zeus/networks/pytorch/backbones/getter.py | WholeG/vega | d1ccf1c3ce68a118bdb6775594ceed0f895911e7 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""ResNetVariant for Detection."""
from zeus.common import ClassType, ClassFactory
from zeus.modules.connections.connections import MultiOutputGetter
@ClassFactory.register(ClassType.NETWORK)
class BackboneGetter(MultiOutputGetter):
"""Backbone Getter form torchvision ResNet."""
def __init__(self, backbone_name, layer_names=None, **kwargs):
backbone = ClassFactory.get_cls(ClassType.NETWORK, backbone_name)
backbone = backbone(**kwargs) if kwargs else backbone()
if hasattr(backbone, "layers_name"):
layer_names = backbone.layers_name()
layer_names = layer_names or ['layer1', 'layer2', 'layer3', 'layer4']
super(BackboneGetter, self).__init__(backbone, layer_names)
| 43.296296 | 77 | 0.739093 | 537 | 0.459367 | 0 | 0 | 579 | 0.495295 | 0 | 0 | 552 | 0.472198 |
b47e174f27a9621be97952b2420f06f45e7e9776 | 836 | py | Python | pure_ee/lista.py | geosconsulting/gee_wapor | c3c451fcb21664172a74647fe5d9e56f312aa1df | [
"Apache-1.1"
]
| 2 | 2017-11-30T18:45:59.000Z | 2018-04-08T16:47:43.000Z | pure_ee/lista.py | geosconsulting/gee_wapor | c3c451fcb21664172a74647fe5d9e56f312aa1df | [
"Apache-1.1"
]
| null | null | null | pure_ee/lista.py | geosconsulting/gee_wapor | c3c451fcb21664172a74647fe5d9e56f312aa1df | [
"Apache-1.1"
]
| 1 | 2021-09-09T06:03:44.000Z | 2021-09-09T06:03:44.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 20 08:40:22 2017
@author: fabio
"""
import ee
import ee.mapclient
ee.Initialize()
collection = ee.ImageCollection('MODIS/MCD43A4_NDVI')
lista = collection.toList(10)
#print lista.getInfo()
image = ee.Image('LC8_L1T/LC81910312016217LGN00')
#print image.getInfo()
bandNames = image.bandNames()
print('Band Names: ', bandNames.getInfo())
b1scale = image.select('B1').projection().nominalScale()
print('Band 1 scale: ', b1scale.getInfo())
b8scale = image.select('B8').projection().nominalScale()
print('Band 8 scale: ', b8scale.getInfo())
ndvi = image.normalizedDifference(['B5', 'B4'])
ee.mapclient.addToMap(ndvi,
{'min' : -1,
"max": 1},
"NDVI")
ee.mapclient.centerMap(12.3536,41.7686,9)
| 19.904762 | 56 | 0.643541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.33134 |
b47e5c3d3423860e078e6b322a1719db193870cb | 3,107 | py | Python | pkg/tests/helpers_test.py | hborawski/rules_pkg | 8d542763a3959db79175404758f46c7f3f385fa5 | [
"Apache-2.0"
]
| null | null | null | pkg/tests/helpers_test.py | hborawski/rules_pkg | 8d542763a3959db79175404758f46c7f3f385fa5 | [
"Apache-2.0"
]
| null | null | null | pkg/tests/helpers_test.py | hborawski/rules_pkg | 8d542763a3959db79175404758f46c7f3f385fa5 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from private import helpers
class GetFlagValueTestCase(unittest.TestCase):
def testNonStripped(self):
self.assertEqual(helpers.GetFlagValue('value ', strip=False), 'value ')
def testStripped(self):
self.assertEqual(helpers.GetFlagValue('value ', strip=True), 'value')
def testNonStripped_fromFile(self):
with tempfile.TemporaryDirectory() as temp_d:
argfile_path = os.path.join(temp_d, 'argfile')
with open(argfile_path, 'wb') as f:
f.write(b'value ')
self.assertEqual(
helpers.GetFlagValue('@'+argfile_path, strip=False), 'value ')
def testStripped_fromFile(self):
with tempfile.TemporaryDirectory() as temp_d:
argfile_path = os.path.join(temp_d, 'argfile')
with open(argfile_path, 'wb') as f:
f.write(b'value ')
self.assertEqual(
helpers.GetFlagValue('@'+argfile_path, strip=True), 'value')
class SplitNameValuePairAtSeparatorTestCase(unittest.TestCase):
def testNoSep(self):
key, val = helpers.SplitNameValuePairAtSeparator('abc', '=')
self.assertEqual(key, 'abc')
self.assertEqual(val, '')
def testNoSepWithEscape(self):
key, val = helpers.SplitNameValuePairAtSeparator('a\\=bc', '=')
self.assertEqual(key, 'a=bc')
self.assertEqual(val, '')
def testNoSepWithDanglingEscape(self):
key, val = helpers.SplitNameValuePairAtSeparator('abc\\', '=')
self.assertEqual(key, 'abc')
self.assertEqual(val, '')
def testHappyCase(self):
key, val = helpers.SplitNameValuePairAtSeparator('abc=xyz', '=')
self.assertEqual(key, 'abc')
self.assertEqual(val, 'xyz')
def testHappyCaseWithEscapes(self):
key, val = helpers.SplitNameValuePairAtSeparator('a\\=\\=b\\=c=xyz', '=')
self.assertEqual(key, 'a==b=c')
self.assertEqual(val, 'xyz')
def testStopsAtFirstSep(self):
key, val = helpers.SplitNameValuePairAtSeparator('a=b=c', '=')
self.assertEqual(key, 'a')
self.assertEqual(val, 'b=c')
def testDoesntUnescapeVal(self):
key, val = helpers.SplitNameValuePairAtSeparator('abc=x\\=yz\\', '=')
self.assertEqual(key, 'abc')
# the val doesn't get unescaped at all
self.assertEqual(val, 'x\\=yz\\')
def testUnescapesNonsepCharsToo(self):
key, val = helpers.SplitNameValuePairAtSeparator('na\\xffme=value', '=')
# this behaviour is surprising
self.assertEqual(key, 'naxffme')
self.assertEqual(val, 'value')
if __name__ == '__main__':
unittest.main()
| 33.408602 | 77 | 0.700032 | 2,381 | 0.766334 | 0 | 0 | 0 | 0 | 0 | 0 | 956 | 0.307692 |
b47f33a5bfd7dd5f1e09089984f041a42647c888 | 177 | py | Python | atendimento/admin.py | alantinoco/django-crmsmart | f8bd3404e0dfdf4a2976ec8bbdaee27a012f9981 | [
"MIT"
]
| null | null | null | atendimento/admin.py | alantinoco/django-crmsmart | f8bd3404e0dfdf4a2976ec8bbdaee27a012f9981 | [
"MIT"
]
| null | null | null | atendimento/admin.py | alantinoco/django-crmsmart | f8bd3404e0dfdf4a2976ec8bbdaee27a012f9981 | [
"MIT"
]
| null | null | null | from django.contrib import admin
from .models import Contato, Venda, FormaPagamento
admin.site.register(Contato)
admin.site.register(Venda)
admin.site.register(FormaPagamento)
| 25.285714 | 50 | 0.830508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b47fefb3dba103290fb50268daa1c3e2fc5aec99 | 11,967 | py | Python | pome/models/transaction.py | pome-gr/pome | c6f4f9842cb7047dbd4e739e3be9b049a1887c58 | [
"MIT"
]
| 3 | 2021-09-10T09:49:21.000Z | 2021-12-14T12:44:53.000Z | pome/models/transaction.py | pome-gr/pome | c6f4f9842cb7047dbd4e739e3be9b049a1887c58 | [
"MIT"
]
| 2 | 2021-09-10T11:17:51.000Z | 2021-09-11T20:43:30.000Z | pome/models/transaction.py | pome-gr/pome | c6f4f9842cb7047dbd4e739e3be9b049a1887c58 | [
"MIT"
]
| null | null | null | import os
import re
import urllib
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Tuple, Union
from money.currency import Currency
from money.money import Money
from werkzeug.utils import secure_filename
from pome import g
from pome.models.encoder import PomeEncodable
RECORDED_TX_FOLDER_NAME = os.path.join("transactions", "recorded")
class Amount(PomeEncodable):
def __init__(self, currency_code: str, raw_amount_in_main_currency: str):
# Putting this there to avoid circular imports
from pome.currency import DECIMAL_PRECISION_FOR_CURRENCY
amount_regex = re.compile(
"^[0-9]*(\.[0-9]{0," + str(DECIMAL_PRECISION_FOR_CURRENCY) + "})?$"
)
if not bool(amount_regex.fullmatch(raw_amount_in_main_currency)):
raise ValueError(
f"Invalid payload amount {raw_amount_in_main_currency}. Decimal separator is '.' and maximum number of decimals allowed is set by the currency (EUR and USD are 2 decimals)."
)
self.raw_amount_in_main_currency: str = raw_amount_in_main_currency
self.currency_code: str = currency_code
def amount(self, formatted=False) -> Union[Money, str]:
to_ret = Money(self.raw_amount_in_main_currency, Currency(self.currency_code))
if not formatted:
return to_ret
return to_ret.format(g.company.locale)
@classmethod
def from_payload(cls, payload: str):
try:
return cls(g.company.accounts_currency_code, payload)
except ValueError as e:
raise e
class TransactionAttachmentOnDisk(PomeEncodable):
def __init__(self, filename: str, filepath: str):
self.filename = filename
self.filepath = filepath
class TransactionAttachmentPayload(PomeEncodable):
def __init__(self, filename: str, b64_content: str):
self.filename = filename
self.b64_content = b64_content
def save_on_disk(self, tx_path: str) -> TransactionAttachmentOnDisk:
filepath = os.path.join(tx_path, self.filename)
response = urllib.request.urlopen(self.b64_content)
with open(filepath, "wb") as f:
f.write(response.file.read())
return TransactionAttachmentOnDisk(self.filename, filepath)
@classmethod
def from_payload(cls, payload):
try:
if not "filename" in payload:
raise ValueError("Field `filename` was not set in attached file.")
if not "b64_content" in payload:
raise ValueError("Filed `b64_content` was not set in attached file.")
return cls(secure_filename(payload["filename"]), payload["b64_content"])
except ValueError as e:
raise e
class TransactionLine(PomeEncodable):
def __init__(self, account_dr_code: str, account_cr_code: str, amount: Amount):
self.account_dr_code: str = account_dr_code
self.account_cr_code: str = account_cr_code
self.amount: Amount = amount
if not g.accounts_chart.is_valid_account_code(self.account_dr_code):
raise ValueError(f"Invalid dr account code {self.account_dr_code }")
if not g.accounts_chart.is_valid_account_code(self.account_cr_code):
raise ValueError(f"Invalid cr account code {self.account_cr_code}")
def _post_load_json(self):
self.amount = Amount.from_json_dict(self.amount)
@classmethod
def from_payload(cls, payload):
try:
if type(payload) != dict:
raise ValueError(f"Invalid transaction line {payload}.")
if "account_dr" not in payload:
raise ValueError(f"Field `account_dr` was not set in {payload}.")
if "account_cr" not in payload:
raise ValueError(f"Field `account_cr` was not set in {payload}.")
if "raw_amount_in_main_currency" not in payload:
raise ValueError(
f"Field `raw_amount_in_main_currency` was not set in {payload}."
)
return cls(
str(payload["account_dr"]),
str(payload["account_cr"]),
Amount.from_payload(payload["raw_amount_in_main_currency"]),
)
except ValueError as e:
raise e
class Transaction(PomeEncodable):
"""Stores all the metadata associated to a transaction."""
default_filename = "tx.json"
def __init__(
self,
date: Union[None, str],
lines: List[TransactionLine],
attachments: Union[
List[TransactionAttachmentOnDisk], List[TransactionAttachmentPayload]
],
narrative: str = "",
comments: str = "",
date_recorded: Union[None, str] = None,
id: Union[None, str] = None,
):
self.date: Union[None, str] = date
self.lines: List[TransactionLine] = lines
self.attachments: Union[
List[TransactionAttachmentOnDisk], List[TransactionAttachmentPayload]
] = attachments
self.date_recorded: Union[None, str] = date_recorded
self.narrative: str = narrative
self.comments: str = comments
self.id: Union[None, str] = id
if not self.validate_date(self.date):
raise ValueError(
f"Invalid date {self.date}. A valid date is yyyy-mm-dd, for instance 2021-08-30."
)
if not self.validate_date(self.date_recorded, True):
raise ValueError(
f"Invalid record date {self.date_recorded}. A valid date record date is ISO8601, for instance 2008-08-30T01:45:36.123Z."
)
@classmethod
def get_transactions_id_sorted_by_date_recorded(cls, transactions):
return [
tx.id
for tx in sorted(list(transactions.values()), key=lambda x: x.date_recorded)
]
@classmethod
def order_recorded(cls, transactions):
sorted_transactions = cls.get_transactions_id_sorted_by_date_recorded(
transactions
)
def f(tx_id):
return sorted_transactions.index(tx_id) + 1
return f
def _post_load_json(self):
self.lines = list(map(TransactionLine.from_json_dict, self.lines))
self.attachments = list(
map(TransactionAttachmentOnDisk.from_json_dict, self.attachments)
)
def total_amount(self, formatted=False) -> Union[Money, str]:
to_return = Money("0", Currency(g.company.accounts_currency_code))
for line in self.lines:
to_return += line.amount.amount()
if not formatted:
return to_return
return to_return.format(g.company.locale)
@classmethod
def fetch_all_recorded_transactions(cls) -> Dict[str, "Transaction"]:
to_return = {}
try:
for tx_folder in os.listdir(RECORDED_TX_FOLDER_NAME):
tx_file = os.path.join(
RECORDED_TX_FOLDER_NAME, tx_folder, cls.default_filename
)
if not os.path.exists(tx_file):
continue
to_return[tx_folder] = cls.from_json_file(tx_file)
if tx_folder != to_return[tx_folder].id:
raise ValueError(
f"Transaction id `{to_return[tx_folder].id}` stored in `{tx_file}` does not match folder name {tx_folder}`"
)
except FileNotFoundError as e:
return {}
return to_return
def commit_message(self) -> str:
to_return = self.date + "\n"
to_return += "=" * len(self.date) + "\n"
to_return += "lines:\n"
for line in self.lines:
to_return += " " + (
"DR "
+ g.accounts_chart.account_codes[line.account_dr_code].pretty_name()
+ "\n"
+ "\tCR "
+ g.accounts_chart.account_codes[line.account_cr_code].pretty_name()
+ "\n"
+ " "
+ line.amount.amount().format(g.company.locale)
+ "\n\n"
)
if self.narrative != "":
to_return += "narrative:" + "\n"
to_return += " " + self.narrative + "\n"
if self.comments != "":
to_return += "\n" + "comments:" + "\n"
to_return += " " + self.comments + "\n"
if len(self.attachments) != 0:
to_return += "\n" + "attachments:" + "\n"
for file in self.attachments:
to_return += f" - {file.filepath}\n"
return to_return
def assign_suitable_id(self) -> Union[None, str]:
if self.id is not None:
return self.id
if self.date is None:
return None
self.id = self.date
i = 1
while os.path.exists(self.get_tx_path()):
self.id = self.date + f"_{i}"
i += 1
return self.id
def get_tx_path(self, absolute: bool = False) -> Union[None, str]:
if self.id is None:
return None
if not absolute:
return os.path.join(RECORDED_TX_FOLDER_NAME, self.id)
else:
return os.path.join(os.getcwd(), RECORDED_TX_FOLDER_NAME, self.id)
def save_on_disk(self):
if self.get_tx_path() is None:
return
Path(self.get_tx_path()).mkdir(parents=True, exist_ok=True)
with open(os.path.join(self.get_tx_path(), self.default_filename), "w") as f:
for i in range(len(self.attachments)):
if isinstance(self.attachments[i], TransactionAttachmentPayload):
self.attachments[i] = self.attachments[i].save_on_disk(
self.get_tx_path()
)
f.write(self.to_json())
regex_date = re.compile("^\d{4}\-(0[1-9]|1[012])\-(0[1-9]|[12][0-9]|3[01])$")
regex_ISO8601 = re.compile(
"^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9]):([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$"
)
@classmethod
def validate_date(cls, date_str, ISO8601=False):
p = cls.regex_date if not ISO8601 else cls.regex_ISO8601
return bool(p.fullmatch(date_str))
@classmethod
def from_payload(cls, json_payload):
try:
if not "date" in json_payload:
raise ValueError(f"Field `date` was not set. Format is yyyy-mm-dd.")
date = json_payload["date"]
if not "lines" in json_payload:
raise ValueError("No transaction lines specified.")
lines = []
for line in json_payload["lines"]:
try:
tx_line = TransactionLine.from_payload(line)
lines.append(tx_line)
except ValueError as e:
raise e
narrative = ""
if "narrative" in json_payload:
narrative = str(json_payload["narrative"])
comments = ""
if "comments" in json_payload:
comments = str(json_payload["comments"])
file_list = []
if "files" in json_payload:
if type(json_payload["files"]) != list:
raise ValueError("Invalid file payload.")
for file in json_payload["files"]:
file_list.append(TransactionAttachmentPayload.from_payload(file))
date_recorded = datetime.utcnow().isoformat() + "+00:00"
if "date_recorded" in json_payload:
date_recorded = json_payload["date_recorded"]
toReturn = cls(
date,
lines,
file_list,
narrative,
comments,
date_recorded=date_recorded,
)
return toReturn
except ValueError as e:
raise (e)
| 37.164596 | 189 | 0.585694 | 11,575 | 0.967243 | 0 | 0 | 4,640 | 0.387733 | 0 | 0 | 1,838 | 0.153589 |
b4823115f908163488e925d3fd26ab3097c5417b | 4,350 | py | Python | site/external/moya.logins/py/oauth1.py | moyaproject/moya-techblog | 4f7d606b22773db40850b742945e83e328c63bb7 | [
"MIT"
]
| 31 | 2015-11-19T04:47:05.000Z | 2021-12-29T19:10:58.000Z | site/external/moya.logins/py/oauth1.py | moyaproject/moya-techblog | 4f7d606b22773db40850b742945e83e328c63bb7 | [
"MIT"
]
| 1 | 2015-11-19T14:28:52.000Z | 2015-11-24T00:14:25.000Z | site/external/moya.logins/py/oauth1.py | moyaproject/moya-techblog | 4f7d606b22773db40850b742945e83e328c63bb7 | [
"MIT"
]
| 1 | 2021-08-10T15:09:01.000Z | 2021-08-10T15:09:01.000Z | from __future__ import unicode_literals
from __future__ import print_function
import moya
from moya.compat import text_type
from requests_oauthlib import OAuth1Session
def get_credentials(provider, credentials):
client_id = credentials.client_id or provider.get('client_id', None)
client_secret = credentials.client_secret or provider.get('client_secret', None)
return client_id, client_secret
@moya.expose.macro('get_oauth_resource_owner')
def get_oauth_resource_owner(app, provider, credentials):
client_id, client_secret = get_credentials(provider, credentials)
oauth = OAuth1Session(client_id, client_secret=client_secret)
request_token_url = provider['request_token_url']
response = oauth.fetch_request_token(request_token_url)
resource_owner_key = response.get('oauth_token')
resource_owner_secret = response.get('oauth_token_secret')
result = {
"key": resource_owner_key,
"secret": resource_owner_secret
}
return result
@moya.expose.macro('get_oauth_authorize_url')
def get_oauth_authorize_url(app, provider, credentials):
context = moya.pilot.context
client_id, client_secret = get_credentials(provider, credentials)
resource_owner_key = context['.session.oauth1.resource_owner.key']
resource_owner_secret = context['.session.oauth1.resource_owner.secret']
oauth = OAuth1Session(client_id,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret)
authorization_url = oauth.authorization_url(provider['authorization_base_url'])
return authorization_url
@moya.expose.macro('get_oauth_access_token')
def get_oauth_access_token(app, provider, credentials, verifier):
context = moya.pilot.context
client_id, client_secret = get_credentials(provider, credentials)
resource_owner_key = context['.session.oauth1.resource_owner.key']
resource_owner_secret = context['.session.oauth1.resource_owner.secret']
oauth = OAuth1Session(client_id,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier)
access_token_url = provider['access_token_url']
oauth_tokens = oauth.fetch_access_token(access_token_url)
return oauth_tokens
@moya.expose.macro('get_oauth_profile')
def get_oauth_profile(app, provider, credentials, verifier):
context = moya.pilot.context
client_id, client_secret = get_credentials(provider, credentials)
resource_owner_key = context['.session.oauth1.resource_owner.key']
resource_owner_secret = context['.session.oauth1.resource_owner.secret']
resources = provider.get('resources', {})
session = OAuth1Session(client_id,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier)
access_token_url = provider['access_token_url']
try:
oauth_tokens = session.fetch_access_token(access_token_url)
except Exception as e:
app.throw('moya.logins.access-fail',
text_type(e))
info = {}
for scope, scope_url in sorted(resources.items()):
try:
response = session.get(scope_url)
except Exception as e:
app.throw('moya.logins.get-scope-fail',
text_type(e),
diagnosis="There may be a connectivity issue getting scope information.",
scope=scope,
scope_url=scope_url)
try:
info[scope] = scope_data = response.json()
#if(context['.debug']):
# context['.console'].obj(context, scope_data)
except:
pass
provider_profile = provider.get('profile', {})
profile = {}
context['_oauth_info'] = info
with context.frame('_oauth_info'):
for k, v in provider_profile.items():
try:
profile[k] = context.eval(v)
except:
pass
return {'profile': profile, 'info': info}
| 38.157895 | 95 | 0.672184 | 0 | 0 | 0 | 0 | 3,928 | 0.902989 | 0 | 0 | 718 | 0.165057 |
b482963dac3d21247c020b102c3b4900ec667eba | 6,599 | py | Python | cgp.py | BakudanKame/CGPCatAndRat | 29a57b6bc5d00ef11acfe78afa4ff2b418d0b971 | [
"MIT"
]
| null | null | null | cgp.py | BakudanKame/CGPCatAndRat | 29a57b6bc5d00ef11acfe78afa4ff2b418d0b971 | [
"MIT"
]
| 1 | 2021-03-05T17:37:44.000Z | 2021-03-05T17:37:44.000Z | cgp.py | BakudanKame/CGPCatAndRat | 29a57b6bc5d00ef11acfe78afa4ff2b418d0b971 | [
"MIT"
]
| 1 | 2021-03-04T19:47:42.000Z | 2021-03-04T19:47:42.000Z | """
Cartesian genetic programming
"""
import operator as op
import random
import copy
import math
from settings import VERBOSE, N_COLS, LEVEL_BACK
class Function:
"""
A general function
"""
def __init__(self, f, arity, name=None):
self.f = f
self.arity = arity
self.name = f.__name__ if name is None else name
def __call__(self, *args, **kwargs):
return self.f(*args, **kwargs)
class Node:
"""
A node in CGP graph
"""
def __init__(self, max_arity):
"""
Initialize this node randomly
"""
self.i_func = None
self.i_inputs = [None] * max_arity
self.weights = [None] * max_arity
self.i_output = None
self.output = None
self.active = False
class Individual:
"""
An individual (chromosome, genotype, etc.) in evolution
"""
function_set = None
weight_range = [-1, 1]
max_arity = 3
n_inputs = 3
n_outputs = 1
n_cols = N_COLS
level_back = LEVEL_BACK
def __init__(self):
self.nodes = []
for pos in range(self.n_cols):
self.nodes.append(self._create_random_node(pos))
for i in range(1, self.n_outputs + 1):
self.nodes[-i].active = True
self.fitness = None
self._active_determined = False
def _create_random_node(self, pos):
node = Node(self.max_arity)
node.i_func = random.randint(0, len(self.function_set) - 1)
for i in range(self.function_set[node.i_func].arity):
node.i_inputs[i] = random.randint(max(pos - self.level_back, -self.n_inputs), pos - 1)
node.weights[i] = random.uniform(self.weight_range[0], self.weight_range[1])
node.i_output = pos
return node
def _determine_active_nodes(self):
"""
Determine which nodes in the CGP graph are active
"""
# check each node in reverse order
n_active = 0
for node in reversed(self.nodes):
if node.active:
n_active += 1
for i in range(self.function_set[node.i_func].arity):
i_input = node.i_inputs[i]
if i_input >= 0: # a node (not an input)
self.nodes[i_input].active = True
if VERBOSE:
print("# active genes: ", n_active)
def eval(self, *args):
"""
Given inputs, evaluate the output of this CGP individual.
:return the final output value
"""
if not self._active_determined:
self._determine_active_nodes()
self._active_determined = True
# forward pass: evaluate
for node in self.nodes:
if node.active:
inputs = []
for i in range(self.function_set[node.i_func].arity):
i_input = node.i_inputs[i]
w = node.weights[i]
if i_input < 0:
inputs.append(args[-i_input - 1] * w)
else:
inputs.append(self.nodes[i_input].output * w)
node.output = self.function_set[node.i_func](*inputs)
return self.nodes[-1].output
def mutate(self, mut_rate=0.01):
"""
Mutate this individual. Each gene is varied with probability *mut_rate*.
:param mut_rate: mutation probability
:return a child after mutation
"""
child = copy.deepcopy(self)
for pos, node in enumerate(child.nodes):
# mutate the function gene
if random.random() < mut_rate:
node.i_func = random.choice(range(len(self.function_set)))
# mutate the input genes (connection genes)
arity = self.function_set[node.i_func].arity
for i in range(arity):
if node.i_inputs[i] is None or random.random() < mut_rate: # if the mutated function requires more arguments, then the last ones are None
node.i_inputs[i] = random.randint(max(pos - self.level_back, -self.n_inputs), pos - 1)
if node.weights[i] is None or random.random() < mut_rate:
node.weights[i] = random.uniform(self.weight_range[0], self.weight_range[1])
# initially an individual is not active except the last output node
node.active = False
for i in range(1, self.n_outputs + 1):
child.nodes[-i].active = True
child.fitness = None
child._active_determined = False
return child
def save(self):
file_object = open(r"SavedBrain", 'w+')
for pos, node in enumerate(self.nodes):
file_object.write(str(node.i_func))
file_object.write("\n")
file_object.write("\n")
if not self._active_determined:
self._determine_active_nodes()
self._active_determined = True
for pos, node in enumerate(self.nodes):
if node.active:
file_object.write(str(node.i_func))
file_object.write("\n")
file_object.write("\n")
activeNodes = []
for node in self.nodes:
if node.active:
activeNodes.append(node)
file_object.write(str(self.function_set[node.i_func].f))
file_object.close()
# function set
def protected_div(a, b):
if abs(b) < 1e-6:
return a
return a / b
fs = [Function(op.add, 2), Function(op.sub, 2), Function(op.mul, 2), Function(protected_div, 2), Function(op.neg, 1), Function(math.cos, 1), Function(math.sin, 1), Function(math.tan, 1), Function(math.atan2, 2)]
Individual.function_set = fs
Individual.max_arity = max(f.arity for f in fs)
def evolve(pop, mut_rate, mu, lambda_):
"""
Evolve the population *pop* using the mu + lambda evolutionary strategy
:param pop: a list of individuals, whose size is mu + lambda. The first mu ones are previous parents.
:param mut_rate: mutation rate
:return: a new generation of individuals of the same size
"""
pop = sorted(pop, key=lambda ind: ind.fitness) # stable sorting
parents = pop[-mu:]
# generate lambda new children via mutation
offspring = []
for _ in range(lambda_):
parent = random.choice(parents)
offspring.append(parent.mutate(mut_rate))
return parents + offspring
def create_population(n):
"""
Create a random population composed of n individuals.
"""
return [Individual() for _ in range(n)]
| 31.879227 | 211 | 0.582513 | 5,245 | 0.794817 | 0 | 0 | 0 | 0 | 0 | 0 | 1,384 | 0.209729 |
b48444fb43f7b50ccaf1395f967f3216e0c9b14c | 824 | py | Python | historia/pops/logic/refiner.py | eranimo/historia | 5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd | [
"MIT"
]
| 6 | 2016-04-26T18:39:36.000Z | 2021-09-01T09:13:38.000Z | historia/pops/logic/refiner.py | eranimo/historia | 5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd | [
"MIT"
]
| null | null | null | historia/pops/logic/refiner.py | eranimo/historia | 5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd | [
"MIT"
]
| 4 | 2016-04-10T23:47:23.000Z | 2021-08-15T11:40:28.000Z | from historia.pops.logic.logic_base import LogicBase
from historia.economy.enums.resource import Good
class RefinerLogic(LogicBase):
def perform(self):
bread = self.get_good(Good.bread)
tools = self.get_good(Good.tools)
iron_ore = self.get_good(Good.iron_ore)
if bread is None or iron_ore is None:
# fine $2 for being idle
self.charge_idle_money()
elif tools is not None:
# convert iron_ore to iron
self.produce(Good.iron, 2)
self.consume(Good.iron_ore, 1)
self.consume(Good.bread, 1)
self.consume(Good.tools, 1, 0.1)
else:
# convert iron_ore to iron
self.produce(Good.iron, 1)
self.consume(Good.iron_ore, 1)
self.consume(Good.bread, 1)
| 32.96 | 52 | 0.601942 | 720 | 0.873786 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.092233 |
b484806f07273e81bf7d1386a37c1b227189ad08 | 2,386 | py | Python | image_processing/manual_features/extract-features.py | ColoredInsaneAsylums/PrivacySensitiveTranscription | 07cf8f8da222b606988e7b4dee6a6bffdd0b6fe6 | [
"BSD-3-Clause"
]
| null | null | null | image_processing/manual_features/extract-features.py | ColoredInsaneAsylums/PrivacySensitiveTranscription | 07cf8f8da222b606988e7b4dee6a6bffdd0b6fe6 | [
"BSD-3-Clause"
]
| 7 | 2021-03-18T20:23:36.000Z | 2022-03-11T23:16:18.000Z | image_processing/manual_features/extract-features.py | ColoredInsaneAsylums/Clustering-PrivacySensitiveTranscription | 07cf8f8da222b606988e7b4dee6a6bffdd0b6fe6 | [
"BSD-3-Clause"
]
| 2 | 2017-11-29T21:36:31.000Z | 2018-10-13T01:45:03.000Z | import argparse
import cv2
import numpy as np
import os
import _pickle as pickle
from descriptors import HOG
#from skimage.morphology import skeletonize
# run image filtering and HOG feature extraction
def main(im_path, desc_name):
print('[INFO] Preparing to extract features for images in \'' + im_path + '\'')
# track HOG feature vectors and corresponding images
features = {}
# image dimensions
width = 128
height = 64
# feature descriptor
print('[INFO] Using the ' + desc_name.upper() + ' feature descriptor')
if desc_name == 'hog':
descriptor = HOG()
# evaluate image files
print('[INFO] Processing images and computing features')
for filename in os.listdir(im_path):
if not filename.endswith('.jpg'):
continue
im = cv2.imread(im_path + filename, cv2.COLOR_BGR2GRAY)
# resize image
im = cv2.resize(im, (width,height))
# binarize using Otsu's method
im = cv2.threshold(im, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
im[im == 255] = 1
# thin using Zhang and Suen's method
#im = skeletonize(im)
#im = im.astype(np.uint8)
# compute features
v = descriptor.compute(im)
features[filename] = v
# save data
print('[INFO] Saving features and corresponding image name to \'features/' + desc_name + '_features.pickle\'')
with open('./features/' + desc_name + '_features.pickle', 'wb') as handle:
pickle.dump(features, handle)
if __name__ == '__main__':
# require image directory and name of descriptor to use
parser = argparse.ArgumentParser(description='Extract image feature vectors using feature descriptors')
parser.add_argument('-p', '--path', required=True,
nargs='?', action='store', const='./images/',
type=str, dest='im_path',
help='The filepath of the image directory')
parser.add_argument('-d', '--descriptor', required=True,
choices=['hog'],
nargs='?', action='store', const='hog',
type=str, dest='desc_name',
help='The name of the descriptor to use')
args = vars(parser.parse_args())
im_path = args['im_path']
desc_name = args['desc_name']
main(im_path, desc_name)
| 32.684932 | 114 | 0.611065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 945 | 0.39606 |
b4854019f4acb3fc387347bdc98278a5e0867d2a | 363 | py | Python | src/exco/extractor_spec/spec_source.py | thegangtechnology/excel_comment_orm | b38156b406ccb3ce87737b8ed049bbf3b8a39050 | [
"MIT"
]
| 2 | 2020-11-10T04:53:07.000Z | 2020-11-12T03:53:46.000Z | src/exco/extractor_spec/spec_source.py | thegangtechnology/excel_comment_orm | b38156b406ccb3ce87737b8ed049bbf3b8a39050 | [
"MIT"
]
| 50 | 2020-11-09T06:30:31.000Z | 2022-01-06T05:00:50.000Z | src/exco/extractor_spec/spec_source.py | thegangtechnology/excel_comment_orm | b38156b406ccb3ce87737b8ed049bbf3b8a39050 | [
"MIT"
]
| null | null | null | import abc
class SpecSource(abc.ABC):
@abc.abstractmethod
def describe(self) -> str:
"""
Returns:
str to print in case there is an error constructing extractor for tracing back
"""
raise NotImplementedError()
class UnknownSource(SpecSource):
def describe(self) -> str:
return 'Unknown Source'
| 19.105263 | 90 | 0.62259 | 346 | 0.953168 | 0 | 0 | 219 | 0.603306 | 0 | 0 | 140 | 0.385675 |
b485f966490bf3490f3dba52ae21698a1d35eef4 | 265 | py | Python | app/admin.py | CS-Hunt/Get-Placed | f79f79f2dd37510405a24578b3a91acea00f9244 | [
"MIT"
]
| 14 | 2021-08-28T04:05:55.000Z | 2022-02-20T07:03:16.000Z | app/admin.py | CS-Hunt/Get-Placed | f79f79f2dd37510405a24578b3a91acea00f9244 | [
"MIT"
]
| null | null | null | app/admin.py | CS-Hunt/Get-Placed | f79f79f2dd37510405a24578b3a91acea00f9244 | [
"MIT"
]
| 9 | 2021-08-28T04:06:03.000Z | 2021-09-26T16:45:28.000Z | from django.contrib import admin
from .models import Placement_Company_Detail,Profile,StudentBlogModel,ResorcesModel
admin.site.register(Placement_Company_Detail)
admin.site.register(Profile)
admin.site.register(StudentBlogModel)
admin.site.register(ResorcesModel) | 37.857143 | 83 | 0.875472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b4860c44ee18b6902dd90728a523b3075912de35 | 3,730 | py | Python | data/mapping.py | wby1905/Graph-Transformer-SSPR | 4d9f2d3b950cde7e7b955247f58c0427f1dda6a1 | [
"MIT"
]
| 2 | 2022-01-19T12:05:08.000Z | 2022-03-02T09:43:50.000Z | data/mapping.py | wby1905/Graph-Transformer | 4d9f2d3b950cde7e7b955247f58c0427f1dda6a1 | [
"MIT"
]
| null | null | null | data/mapping.py | wby1905/Graph-Transformer | 4d9f2d3b950cde7e7b955247f58c0427f1dda6a1 | [
"MIT"
]
| 2 | 2021-06-10T03:39:21.000Z | 2022-01-19T12:05:01.000Z | import torch as t
import torch_geometric.utils as utils
def qw_score(graph):
"""
未实现qw_score,采用度数代替
:param graph:
"""
score = utils.degree(graph.edge_index[0])
return score.sort()
def pre_processing(graph, m, score, trees):
score, indices = score
indices.squeeze_()
old_edges = graph.edge_index
trees[-1] = [-1] * m
def graft(root):
"""
找到分值最大的2阶节点并与源节点连接
和论文有一些不一样,会在加入二阶节点后把它视为一阶节点
:param root: 源节点(度小于m)
"""
nodes_1_hop, _, _, _ = utils.k_hop_subgraph(root, 1, graph.edge_index)
if nodes_1_hop.shape[0] > m:
return
nodes_2_hop, _, _, _ = utils.k_hop_subgraph(root, 2, graph.edge_index)
ma = 0
for node in nodes_2_hop:
if node not in nodes_1_hop:
node = int(node.item())
idx = t.nonzero(indices == node, as_tuple=False).item()
ma = max(ma, idx)
new_edge = t.tensor([[indices[ma], root], [root, indices[ma]]])
degree[root] += 1
graph.edge_index = t.cat((graph.edge_index, new_edge), dim=1)
if degree[root] < m:
graft(root)
elif degree[root] == m:
nodes_1_hop, _, _, _ = utils.k_hop_subgraph(root, 1, graph.edge_index)
trees[root] = ([i.item() for i in nodes_1_hop if i != root])
graph.edge_index = old_edges
def prune(root):
"""
找到分值最小的1阶节点并删除连接
默认图为简单图
:param root: 源节点
"""
nodes_1_hop, _, _, mask = utils.k_hop_subgraph(root, 1, graph.edge_index)
if nodes_1_hop.shape[0] == m + 1:
return
mi = graph.num_nodes + 1
for node in nodes_1_hop:
if node != root:
node = int(node.item())
idx = t.nonzero(indices == node, as_tuple=False).item()
mi = min(idx, mi)
mask = mask.nonzero(as_tuple=False)
edges = graph.edge_index
l, r = 0, 0
for i in mask:
i = i.item()
if edges[0][i] == indices[mi] and edges[1][i] == root:
l = i
elif edges[1][i] == indices[mi] and edges[0][i] == root:
r = i
l, r = sorted([l, r])
graph.edge_index = t.cat((edges[:, :l], edges[:, l + 1:r], edges[:, r + 1:]), dim=1)
degree[root] -= 1
if degree[root] > m:
prune(root)
elif degree[root] == m:
nodes_1_hop, _, _, _ = utils.k_hop_subgraph(root, 1, graph.edge_index)
trees[root] = ([i.item() for i in nodes_1_hop if i != root])
graph.edge_index = old_edges
degree = utils.degree(graph.edge_index[0])
for node, d in enumerate(degree):
tmp = degree[node]
if d > m:
prune(node)
elif d < m:
graft(node)
else:
nodes_1_hop, _, _, _ = utils.k_hop_subgraph(node, 1, graph.edge_index)
trees[node] = ([i.item() for i in nodes_1_hop if i != node])
degree[node] = tmp
for tree in trees:
while len(trees[tree]) < m:
trees[tree].append(-1)
# 对于孤立点对它的子树加哑节点
graph.edge_index = old_edges
return trees
def construct_node_tree(graph, node, trees, opt):
"""
生成目标节点的 K_level, m_ary 树
:param graph:
:param node:
:param opt:
"""
m = opt.m
K = opt.K
tree = [node]
now = 0
for i in range(K - 1):
for j in range(m ** i):
root = tree[now]
tree += trees[root]
now += 1
zero = t.zeros(graph.x[-1].shape)
x = graph.x
graph.x = t.cat([graph.x, zero[None, :]], dim=0)
tree = graph.x[tree]
graph.x = x
return tree
| 30.826446 | 92 | 0.521984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 561 | 0.142314 |
b48654f076f939090f06530d0e1ff49e0fb32d65 | 5,463 | py | Python | .archived/snakecode/0460.py | gearbird/calgo | ab48357100de2a5ea47fda2d9f01ced6dc73fa79 | [
"MIT"
]
| 4 | 2022-01-13T03:39:01.000Z | 2022-03-15T03:16:33.000Z | .archived/snakecode/0460.py | gearbird/calgo | ab48357100de2a5ea47fda2d9f01ced6dc73fa79 | [
"MIT"
]
| null | null | null | .archived/snakecode/0460.py | gearbird/calgo | ab48357100de2a5ea47fda2d9f01ced6dc73fa79 | [
"MIT"
]
| 1 | 2021-12-09T12:33:07.000Z | 2021-12-09T12:33:07.000Z | from typing import Optional, Any
class Node:
def __init__(self, key: int = 0, val: int = 0):
self.key: int = key
self.val: int = val
self.freq: int = 0
self.pre: Optional[Node] = None
self.next: Optional[Node] = None
class DLList:
def __init__(self):
self.size = 0
self._guard = Node()
self._guard.pre = self._guard.next = self._guard
def headify(self, node: Optional[Node]):
assert node
node.pre = self._guard
node.next = self._guard.next
assert node.next is not None
node.next.pre = node
self._guard.next = node
self.size += 1
def pop(self, node: Optional[Node] = None):
if self.size == 0:
return
if not node:
node = self._guard.pre
assert node and node.next and node.pre
node.pre.next = node.next
node.next.pre = node.pre
node.pre, node.next = None, None
self.size -= 1
return node
class LFUCache:
def __init__(self, capacity: int):
self._groups: dict[int, DLList] = {}
self._nodes: dict[int, Node] = {}
self._cap = capacity
self._size = 0
self._minFreq = 1
def get(self, key: int) -> int:
'''Get Node, update Cache and Node status'''
if key not in self._nodes:
return -1
node = self._nodes[key]
self._update(node)
return node.val
def put(self, key: int, value: int):
'''
If it's a existing Node, update Cache and Node.\n
If it's a new Node and there're space, update Cache.\n
If it's a new Node but no space, kick out one Node, update Cache
'''
if self._cap == 0:
return
if key in self._nodes:
node = self._nodes[key]
node.val = value
else:
node = Node(key, value)
if self._size < self._cap:
self._addNew(node)
else:
self._addNew(node, kick=True)
self._update(node)
def _update(self, node: Node):
'''Given a Node in cache, update it's freq and cache status'''
group = self._groups[node.freq]
group.pop(node)
groupNext = self._getGroup(node.freq+1)
groupNext.headify(node)
if node.freq == self._minFreq and group.size == 0:
self._minFreq = node.freq + 1
node.freq += 1
def _addNew(self, node: Node, kick: bool = False):
'''
Simply kickout the least frequently used Node from frequency group and node map\n
Replace it with a new Node, set cache min frequency to 1
'''
if self._cap == 0:
return
if kick:
group = self._getGroup(self._minFreq)
badNode = group.pop()
assert badNode
self._nodes.pop(badNode.key)
self._size -= 1
self._minFreq = node.freq = 0
self._getGroup(node.freq).headify(node)
self._nodes[node.key] = node
self._size += 1
def _getGroup(self, freq: int) -> DLList:
return self._groups.setdefault(freq, DLList())
def test(actions: list[str], val: list[list[int]]):
cache: Optional[LFUCache] = None
result: list[Optional[Any]] = []
for i, v in zip(actions, val):
if i == 'LFUCache':
cache = LFUCache(v[0])
result.append(None)
elif i == 'put':
assert cache
result.append((i, v, cache.put(v[0], v[1])))
elif i == 'get':
assert cache
result.append((i, v, cache.get(v[0])))
print(result)
if __name__ == '__main__':
actions = ["LFUCache","put","put","get","put","get","get","put","get","get","get"]
values = [[2],[1,1],[2,2],[1],[3,3],[2],[3],[4,4],[1],[3],[4]]
test(actions, values)
# actions2 = ["LFUCache","put","put","get","get","get","put","put","get","get","get","get"]
# values2 = [[3],[2,2],[1,1],[2],[1],[2],[3,3],[4,4],[3],[2],[1],[4]]
# test(actions2, values2)
# actions3 = ["LFUCache","put","put","put","put","put","get","put","get","get","put","get","put","put","put","get","put","get","get","get","get","put","put","get","get","get","put","put","get","put","get","put","get","get","get","put","put","put","get","put","get","get","put","put","get","put","put","put","put","get","put","put","get","put","put","get","put","put","put","put","put","get","put","put","get","put","get","get","get","put","get","get","put","put","put","put","get","put","put","put","put","get","get","get","put","put","put","get","put","put","put","get","put","put","put","get","get","get","put","put","put","put","get","put","put","put","put","put","put","put"]
# values3 = [[10],[10,13],[3,17],[6,11],[10,5],[9,10],[13],[2,19],[2],[3],[5,25],[8],[9,22],[5,5],[1,30],[11],[9,12],[7],[5],[8],[9],[4,30],[9,3],[9],[10],[10],[6,14],[3,1],[3],[10,11],[8],[2,14],[1],[5],[4],[11,4],[12,24],[5,18],[13],[7,23],[8],[12],[3,27],[2,12],[5],[2,9],[13,4],[8,18],[1,7],[6],[9,29],[8,21],[5],[6,30],[1,12],[10],[4,15],[7,22],[11,26],[8,17],[9,29],[5],[3,4],[11,30],[12],[4,29],[3],[9],[6],[3,4],[1],[10],[3,29],[10,28],[1,20],[11,13],[3],[3,12],[3,8],[10,9],[3,26],[8],[7],[5],[13,17],[2,27],[11,15],[12],[9,19],[2,15],[3,16],[1],[12,17],[9,1],[6,19],[4],[5],[5],[8,1],[11,7],[5,2],[9,28],[1],[2,2],[7,4],[4,22],[7,24],[9,26],[13,28],[11,26]]
# test(actions3, values3) | 41.386364 | 683 | 0.513271 | 3,145 | 0.575691 | 0 | 0 | 0 | 0 | 0 | 0 | 2,131 | 0.390079 |
b48720b38e6ef7c7ce6bd71cd8a1fc79b8ad2a3a | 3,263 | py | Python | scripts/sha3.py | cidox479/ecc | da4091ff675d0fc757dc7d19bcdd4474a1388011 | [
"BSD-2-Clause"
]
| null | null | null | scripts/sha3.py | cidox479/ecc | da4091ff675d0fc757dc7d19bcdd4474a1388011 | [
"BSD-2-Clause"
]
| null | null | null | scripts/sha3.py | cidox479/ecc | da4091ff675d0fc757dc7d19bcdd4474a1388011 | [
"BSD-2-Clause"
]
| 1 | 2020-09-28T03:06:38.000Z | 2020-09-28T03:06:38.000Z | #/*
# * Copyright (C) 2017 - This file is part of libecc project
# *
# * Authors:
# * Ryad BENADJILA <[email protected]>
# * Arnaud EBALARD <[email protected]>
# * Jean-Pierre FLORI <[email protected]>
# *
# * Contributors:
# * Nicolas VIVET <[email protected]>
# * Karim KHALFALLAH <[email protected]>
# *
# * This software is licensed under a dual BSD and GPL v2 license.
# * See LICENSE file at the root folder of the project.
# */
import struct
keccak_rc = [
0x0000000000000001, 0x0000000000008082, 0x800000000000808A, 0x8000000080008000,
0x000000000000808B, 0x0000000080000001, 0x8000000080008081, 0x8000000000008009,
0x000000000000008A, 0x0000000000000088, 0x0000000080008009, 0x000000008000000A,
0x000000008000808B, 0x800000000000008B, 0x8000000000008089, 0x8000000000008003,
0x8000000000008002, 0x8000000000000080, 0x000000000000800A, 0x800000008000000A,
0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
]
keccak_rot = [
[ 0, 36, 3, 41, 18 ],
[ 1, 44, 10, 45, 2 ],
[ 62, 6, 43, 15, 61 ],
[ 28, 55, 25, 21, 56 ],
[ 27, 20, 39, 8, 14 ],
]
# Keccak function
def keccak_rotl(x, l):
return (((x << l) ^ (x >> (64 - l))) & (2**64-1))
def keccakround(bytestate, rc):
# Import little endian state
state = [0] * 25
for i in range(0, 25):
(state[i],) = struct.unpack('<Q', ''.join(bytestate[(8*i):(8*i)+8]))
# Proceed with the KECCAK core
bcd = [0] * 25
# Theta
for i in range(0, 5):
bcd[i] = state[i] ^ state[i + (5*1)] ^ state[i + (5*2)] ^ state[i + (5*3)] ^ state[i + (5*4)]
for i in range(0, 5):
tmp = bcd[(i+4)%5] ^ keccak_rotl(bcd[(i+1)%5], 1)
for j in range(0, 5):
state[i + (5 * j)] = state[i + (5 * j)] ^ tmp
# Rho and Pi
for i in range(0, 5):
for j in range(0, 5):
bcd[j + (5*(((2*i)+(3*j)) % 5))] = keccak_rotl(state[i + (5*j)], keccak_rot[i][j])
# Chi
for i in range(0, 5):
for j in range(0, 5):
state[i + (5*j)] = bcd[i + (5*j)] ^ (~bcd[((i+1)%5) + (5*j)] & bcd[((i+2)%5) + (5*j)])
# Iota
state[0] = state[0] ^ keccak_rc[rc]
# Pack the output state
output = [0] * (25 * 8)
for i in range(0, 25):
output[(8*i):(8*i)+1] = struct.pack('<Q', state[i])
return output
def keccakf(bytestate):
for rnd in range(0, 24):
bytestate = keccakround(bytestate, rnd)
return bytestate
# SHA-3 context class
class Sha3_ctx(object):
def __init__(self, digest_size):
self.digest_size = digest_size / 8
self.block_size = (25*8) - (2 * (digest_size / 8))
self.idx = 0
self.state = [chr(0)] * (25 * 8)
def digest_size(self):
return self.digest_size
def block_size(self):
return self.block_size
def update(self, message):
for i in range(0, len(message)):
self.state[self.idx] = chr(ord(self.state[self.idx]) ^ ord(message[i]))
self.idx = self.idx + 1
if (self.idx == self.block_size):
self.state = keccakf(self.state)
self.idx = 0
def digest(self):
self.state[self.idx] = chr(ord(self.state[self.idx]) ^ 0x06)
self.state[self.block_size - 1] = chr(ord(self.state[self.block_size - 1]) ^ 0x80)
self.state = keccakf(self.state)
return ''.join(self.state[:self.digest_size])
| 32.63 | 97 | 0.62274 | 801 | 0.24548 | 0 | 0 | 0 | 0 | 0 | 0 | 657 | 0.201348 |
b48883d3a21bfcf226b4a43dc5ce8c081b237e69 | 3,332 | py | Python | lps/seeds.py | fernandoleira/lps-platform | 0d2ac2465c27444e184bbe5357553607b37790da | [
"MIT"
]
| null | null | null | lps/seeds.py | fernandoleira/lps-platform | 0d2ac2465c27444e184bbe5357553607b37790da | [
"MIT"
]
| null | null | null | lps/seeds.py | fernandoleira/lps-platform | 0d2ac2465c27444e184bbe5357553607b37790da | [
"MIT"
]
| null | null | null | import csv
from pathlib import Path
from datetime import datetime
from lps.models import *
from lps.schemas import *
SEED_FOLDER_PATH = Path("db/seeds/")
def import_from_csv(csv_filename):
with open(SEED_FOLDER_PATH / csv_filename) as csv_file:
csv_read = csv.DictReader(csv_file, delimiter=',')
return list(csv_read)
def export_to_csv(model_dict, csv_filename="out.csv"):
if len(model_dict) > 0:
with open(SEED_FOLDER_PATH / csv_filename, "w") as csv_filename:
csv_filename.write(",".join(model_dict[0].keys()) + '\n')
for i in range(len(model_dict)):
csv_filename.write(",".join([str(elm) for elm in model_dict[i].values()]) + '\n')
return True
else:
return False
def seed_database(db):
# Users
seed_data = import_from_csv("users.csv")
for obj in seed_data:
seed = User(obj["username"], obj["email"], obj["phone_number"], obj["password"], is_admin=bool(obj["is_admin"]), is_super=bool(obj["is_super"]), user_id=obj['user_id'])
db.session.add(seed)
print(seed)
db.session.commit()
print()
# Api Key
seed_data = import_from_csv("api_keys.csv")
for obj in seed_data:
seed = ApiKey(obj["user_id"], api_key=obj["api_key"])
db.session.add(seed)
print(seed)
db.session.commit()
print()
# Units
seed_data = import_from_csv("units.csv")
for obj in seed_data:
seed = Unit(obj["name"], obj["user_id"], bool(obj["alert_mail"]), bool(obj["alert_sms"]), unit_id=obj["unit_id"])
db.session.add(seed)
print(seed)
db.session.commit()
print()
# Locator Points
seed_data = import_from_csv("points.csv")
for obj in seed_data:
seed = LocatorPoint(obj["title"], obj["description"], obj["point_type"], float(obj['lat']), float(obj['lon']), obj['unit_id'], point_id=obj['point_id'])
db.session.add(seed)
print(seed)
db.session.commit()
def export_seed():
# Units
units_q = Unit.query.all()
units = UnitSchema(many=True).dump(units_q)
export_check = export_to_csv(units, "units.csv")
if export_check:
print("--> Units export has been completed to 'units.csv'")
else:
print("--> An error has occurred exporting Units")
# Locator Points
points_q = LocatorPoint.query.all()
points = LocatorPointSchema(many=True).dump(points_q)
export_check = export_to_csv(points, "points.csv")
if export_check:
print("--> Locator Points export has been completed to 'points.csv'")
else:
print("--> An error has occurred exporting Locator Points")
# Users
users_q = User.query.all()
users = UserSchema(many=True).dump(users_q)
export_check = export_to_csv(users, "users.csv")
if export_check:
print("--> Users export has been completed to 'users.csv'")
else:
print("--> An error has occurred exporting Users")
# Api Keys
api_keys_q = ApiKey.query.all()
api_keys = ApiKeySchema(many=True).dump(api_keys_q)
export_check = export_to_csv(api_keys, "api_keys.csv")
if export_check:
print("--> Api Keys export has been completed to 'api_keys.csv'")
else:
print("--> An error has occurred exporting Api Keys")
| 30.851852 | 176 | 0.635054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 819 | 0.245798 |
b48ab31d2b65a280be63bfbc1d367523dc945d6a | 141 | py | Python | Python/Least_Common_Multiple_for_large_numbers.py | DeathcallXD/DS-Algo-Point | 70bc4b98fa6648cdcb2e65bccaa8b30298f14d87 | [
"MIT"
]
| null | null | null | Python/Least_Common_Multiple_for_large_numbers.py | DeathcallXD/DS-Algo-Point | 70bc4b98fa6648cdcb2e65bccaa8b30298f14d87 | [
"MIT"
]
| null | null | null | Python/Least_Common_Multiple_for_large_numbers.py | DeathcallXD/DS-Algo-Point | 70bc4b98fa6648cdcb2e65bccaa8b30298f14d87 | [
"MIT"
]
| null | null | null | def GCD(a,b):
if b == 0:
return a
else:
return GCD(b, a%b)
a = int(input())
b = int(input())
print(a*b//(GCD(a,b)))
| 14.1 | 26 | 0.460993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b48b0aa9e22f7e6f7d8092e34b71578abf6b8004 | 2,121 | py | Python | matgendb/builders/examples/maxvalue_builder.py | Tinaatucsd/pymatgen-db | e1fc46b7df8bea8455e7290ae3eea6fd297a09f3 | [
"MIT"
]
| null | null | null | matgendb/builders/examples/maxvalue_builder.py | Tinaatucsd/pymatgen-db | e1fc46b7df8bea8455e7290ae3eea6fd297a09f3 | [
"MIT"
]
| null | null | null | matgendb/builders/examples/maxvalue_builder.py | Tinaatucsd/pymatgen-db | e1fc46b7df8bea8455e7290ae3eea6fd297a09f3 | [
"MIT"
]
| null | null | null | """
Build a derived collection with the maximum
value from each 'group' defined in the source
collection.
"""
__author__ = 'Dan Gunter <[email protected]>'
__date__ = '5/21/14'
from matgendb.builders import core
from matgendb.builders import util
from matgendb.query_engine import QueryEngine
_log = util.get_builder_log("incr")
class MaxValueBuilder(core.Builder):
"""Example of incremental builder that requires
some custom logic for incremental case.
"""
def get_items(self, source=None, target=None):
"""Get all records from source collection to add to target.
:param source: Input collection
:type source: QueryEngine
:param target: Output collection
:type target: QueryEngine
"""
self._groups = self.shared_dict()
self._target_coll = target.collection
self._src = source
return source.query()
def process_item(self, item):
"""Calculate new maximum value for each group,
for "new" items only.
"""
group, value = item['group'], item['value']
if group in self._groups:
cur_val = self._groups[group]
self._groups[group] = max(cur_val, value)
else:
# New group. Could fetch old max. from target collection,
# but for the sake of illustration recalculate it from
# the source collection.
self._src.tracking = False # examine entire collection
new_max = value
for rec in self._src.query(criteria={'group': group},
properties=['value']):
new_max = max(new_max, rec['value'])
self._src.tracking = True # back to incremental mode
# calculate new max
self._groups[group] = new_max
def finalize(self, errs):
"""Update target collection with calculated maximum values.
"""
for group, value in self._groups.items():
doc = {'group': group, 'value': value}
self._target_coll.update({'group': group}, doc, upsert=True)
return True | 35.949153 | 72 | 0.614804 | 1,790 | 0.843942 | 0 | 0 | 0 | 0 | 0 | 0 | 897 | 0.422914 |
b48c5e302c25178ab826b1d7d13350ce7b179b8d | 184 | py | Python | dvc/dependency/ssh.py | yfarjoun/dvc | eaca7dc80c765dd3a8dbe4c8fb3b206656bbc5e2 | [
"Apache-2.0"
]
| 2 | 2021-09-22T15:31:46.000Z | 2021-11-17T10:40:07.000Z | dvc/dependency/ssh.py | yfarjoun/dvc | eaca7dc80c765dd3a8dbe4c8fb3b206656bbc5e2 | [
"Apache-2.0"
]
| null | null | null | dvc/dependency/ssh.py | yfarjoun/dvc | eaca7dc80c765dd3a8dbe4c8fb3b206656bbc5e2 | [
"Apache-2.0"
]
| 1 | 2019-09-02T00:29:40.000Z | 2019-09-02T00:29:40.000Z | from __future__ import unicode_literals
from dvc.output.ssh import OutputSSH
from dvc.dependency.base import DependencyBase
class DependencySSH(DependencyBase, OutputSSH):
pass
| 20.444444 | 47 | 0.831522 | 56 | 0.304348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b48eb31a18679e3dd6c7f6eeeaf59912e0ae7a93 | 1,775 | py | Python | multiscaleloss.py | praveeenbadimala/flow_unsupervised | 07385fd45e9213c06acacfd891e116f07993575e | [
"MIT"
]
| null | null | null | multiscaleloss.py | praveeenbadimala/flow_unsupervised | 07385fd45e9213c06acacfd891e116f07993575e | [
"MIT"
]
| null | null | null | multiscaleloss.py | praveeenbadimala/flow_unsupervised | 07385fd45e9213c06acacfd891e116f07993575e | [
"MIT"
]
| null | null | null | import torch
import torch.nn as nn
import optflow.compute_tvl1_energy as compute_tvl1_energy
def EPE(input_flow, target_flow, sparse=False, mean=True):
EPE_map = torch.norm(target_flow-input_flow,2,1)
if sparse:
EPE_map = EPE_map[target_flow != 0]
if mean:
return EPE_map.mean()
else:
return EPE_map.sum()
def multiscale_energy_loss(network_output_energy, target_flow,img1,img2, weights=None, sparse=False):
def one_scale_mod(output, target, sparse,img1,img2):
b, _, h, w = target.size()
down_sample_img1 =nn.functional.adaptive_avg_pool2d(img1, (h, w))
down_sample_img2 = nn.functional.adaptive_avg_pool2d(img2, (h, w))
target_energy = compute_tvl1_energy.compute_tvl1_energy_optimized_batch(down_sample_img1,
down_sample_img2,
target)
l1_loss = (output - target_energy).abs().sum() / target_energy.size(0)
return l1_loss
if type(network_output_energy) not in [tuple, list]:
network_output_energy = [network_output_energy]
if weights is None:
weights = [0.46,0.23,0.23,0.46] # more preference for starting layers
assert(len(weights) == len(network_output_energy))
loss = 0
flow_index = 0
for output, weight in zip(network_output_energy, weights):
loss += weight * one_scale_mod(output, target_flow[flow_index], sparse,img1,img2)
flow_index = flow_index + 1
return loss
def realEPE(output, target, sparse=False):
b, _, h, w = target.size()
upsampled_output = nn.functional.upsample(output, size=(h,w), mode='bilinear')
return EPE(upsampled_output, target, sparse, mean=True) | 41.27907 | 101 | 0.652958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.026479 |
81e7a0792251a7a66030d5e6a745a97158b8e55f | 8,070 | py | Python | object_detector/src/object_detector/object_detector.py | Ajapaik/ml-2021-ajapaik | 31e318f10329405237a5773a1d963b3ab867fa02 | [
"Apache-2.0"
]
| null | null | null | object_detector/src/object_detector/object_detector.py | Ajapaik/ml-2021-ajapaik | 31e318f10329405237a5773a1d963b3ab867fa02 | [
"Apache-2.0"
]
| 9 | 2021-11-12T16:54:24.000Z | 2021-12-12T14:13:49.000Z | object_detector/src/object_detector/object_detector.py | iharsuvorau/ml-2021-ajapaik | 31e318f10329405237a5773a1d963b3ab867fa02 | [
"Apache-2.0"
]
| 1 | 2022-02-24T21:23:06.000Z | 2022-02-24T21:23:06.000Z | import numpy as np
import time
import cv2
import argparse
import sys
import os
import glob
import json
from pathlib import Path
class ObjectDetector:
def file_exist(file_names_list: list) -> bool:
if all(list(map(os.path.isfile,file_names_list))):
return True
else:
print("Please check one of the Config Files does not exist")
return False
# if coco.names, yolov4.{cfg,weights} are relative to cli-tool
# no need to pass them as cli options
def set_default_config():
LABELS_FILE='coco.names'
CONFIG_FILE='yolov4.cfg'
WEIGHTS_FILE='yolov4.weights'
CONFIDENCE_THRESHOLD=0.25
file_names_list = [LABELS_FILE,CONFIG_FILE,WEIGHTS_FILE]
# Check if the provided file paths exists
if ObjectDetector.file_exist(file_names_list=file_names_list):
return LABELS_FILE,CONFIG_FILE,WEIGHTS_FILE
# load all files matching ext from im_dir
def load_image_files(dir=""):
# defaults to "current" dir
imdir = dir
# various extensions of files that can be fetched
ext = ['png', 'jpg', 'gif','jpeg']
files = []
[files.extend(glob.glob(imdir + '*.' + e)) for e in ext]
return files
def save_to_json(dir,file_name,data):
full_file_path = dir + file_name + ".json"
with open(full_file_path, 'w') as f:
json.dump(data, f, ensure_ascii=False)
print(f" Saved {full_file_path} with bounding box cordinates.")
def object_detection(image_path,output_dir,label_path=None,config_path=None,weight_path=None,threshold=0.25,):
INPUT_FILE= image_path
CONFIDENCE_THRESHOLD = threshold
LABELS_FILE,_,_ = ObjectDetector.set_default_config() if label_path == None else label_path,None,None
_,CONFIG_FILE,_ = ObjectDetector.set_default_config() if config_path == None else None,config_path,None
_,_,WEIGHTS_FILE = ObjectDetector.set_default_config() if weight_path == None else None,None,weight_path
LABELS = open(LABELS_FILE).read().strip().split("\n")
np.random.seed(4)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
net = cv2.dnn.readNetFromDarknet(CONFIG_FILE, WEIGHTS_FILE)
image = cv2.imread(INPUT_FILE)
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
#Maybe output some time Metrics?
#print(" took {:.6f} seconds".format(end - start))
# initialize our lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONFIDENCE_THRESHOLD:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, CONFIDENCE_THRESHOLD,
CONFIDENCE_THRESHOLD)
# how name objects were detected
data = [{"detection_count": len(idxs), "file_name": image_path, "confidence_threshold": CONFIDENCE_THRESHOLD}]
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
data.append({"label":LABELS[classIDs[i]], "confidence":"{:.2f}".format(confidences[i]*100), "left_x":x, "top_y":y,"width":w,"height":h})
# save file ot image_path.json
# Maybe a flag to save results of a batch to same json?
_file_name = Path(image_path).name
# safe file to json
ObjectDetector.save_to_json(output_dir,_file_name,data)
def main():
parser = argparse.ArgumentParser(
description="Script to Object Detection with Yolo4."
)
parser.add_argument(
"--config_path", "-c", dest="config_path",
default="yolov4.cfg", help="Path to yolov4.cfg"
)
parser.add_argument(
"--weight_path", "-w", dest="weight_path",
default="yolov4.weights", help="Path to yolov4.weights."
)
parser.add_argument(
"--label_path", "-l", dest="label_path",
default="coco.names", help="Path to coco.names."
)
parser.add_argument(
"--image_path", "-i", dest="image_path",
default=None, help="Path to Image file. Leaving Blank Searches the current directory"
)
parser.add_argument(
"--threshold", "-t", dest="threshold",
default=float(0.25), help="Detection Confidence Threshold to apply"
)
parser.add_argument(
"--image-dir", "-d", dest="input_dir",
default=os.getcwd(), help="Directory containing image file"
)
parser.add_argument(
"--output-dir", "-o", dest="output_dir",
default=os.getcwd(), help="Directory where output should be stored"
)
args = parser.parse_args()
if args.image_path is None:
print(f"--image_path not provided, searching {args.input_dir} for image files...")
image_files = ObjectDetector.load_image_files(args.input_dir)
if len(image_files) <=0:
print("No Image file(s) found")
for i,image in enumerate(image_files,1):
print(f"Running Object Detection on {i} of {len(image_files)} images")
ObjectDetector.object_detection(
image,
args.output_dir,
args.label_path,
args.config_path,
args.weight_path,
float(args.threshold)
)
else:
ObjectDetector.object_detection(
args.image_path,
args.output_dir,
args.label_path,
args.config_path,
args.weight_path,
float(args.threshold)
)
if __name__ == "__main__":
main()
| 37.887324 | 153 | 0.580421 | 5,610 | 0.695167 | 0 | 0 | 0 | 0 | 0 | 0 | 2,491 | 0.308674 |
81e7bcf77b3d24a119c0b31470b009787721b442 | 15,921 | py | Python | pipeline/tests/engine/core/data/test_api.py | wkma/bk-sops | 8fb5609c0c4495c28d588fbafa9d9f5f2976929b | [
"Apache-2.0"
]
| 2 | 2021-07-28T01:48:31.000Z | 2021-11-17T11:02:26.000Z | pipeline/tests/engine/core/data/test_api.py | wkma/bk-sops | 8fb5609c0c4495c28d588fbafa9d9f5f2976929b | [
"Apache-2.0"
]
| null | null | null | pipeline/tests/engine/core/data/test_api.py | wkma/bk-sops | 8fb5609c0c4495c28d588fbafa9d9f5f2976929b | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import sys
from django.test import TestCase
from django.utils.module_loading import import_string
from pipeline.tests.mock import * # noqa
from pipeline.tests.mock_settings import * # noqa
class EngineDataAPITestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.mock_settings = MagicMock()
cls.settings_patch = patch(ENGINE_DATA_API_SETTINGS, cls.mock_settings)
cls.import_backend_patch = patch(ENGINE_DATA_API_IMPORT_BACKEND, MagicMock())
cls.settings_patch.start()
cls.import_backend_patch.start()
cls.api = import_string("pipeline.engine.core.data.api")
cls.write_methods = ["set_object", "del_object", "expire_cache"]
cls.read_methods = ["get_object", "cache_for"]
cls.method_params = {
"set_object": ["key", "obj"],
"del_object": ["key"],
"expire_cache": ["key", "obj", "expires"],
"cache_for": ["key"],
"get_object": ["key"],
}
@classmethod
def tearDownClass(cls):
cls.settings_patch.stop()
cls.import_backend_patch.stop()
def setUp(self):
self.backend = MagicMock()
self.candidate_backend = MagicMock()
self.mock_settings.PIPELINE_DATA_BACKEND_AUTO_EXPIRE = False
def test_write__without_candidate(self):
for method in self.write_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None):
getattr(self.api, method)(*self.method_params[method])
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_write__without_candidate test\n".format(method)
)
def test_write__without_candiate_raise_err(self):
for method in self.write_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None):
self.assertRaises(
Exception,
getattr(self.api, method),
*self.method_params[method]
)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_write__without_candiate_raise_err test\n".format(method)
)
def test_write__with_candidate(self):
for method in self.write_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
getattr(self.api, method)(*self.method_params[method])
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write("{} pass test_write__with_candidate test\n".format(method))
def test_write__with_candidate_main_raise_err(self):
for method in self.write_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
getattr(self.api, method)(*self.method_params[method])
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_write__with_candidate_main_raise_err test\n".format(
method
)
)
def test_write__with_candidate_raise_err(self):
for method in self.write_methods:
setattr(self.candidate_backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
getattr(self.api, method)(*self.method_params[method])
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_write__with_candidate_raise_err test\n".format(method)
)
def test_write__with_candidate_both_raise_err(self):
for method in self.write_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
setattr(self.candidate_backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
self.assertRaises(
Exception,
getattr(self.api, method),
*self.method_params[method]
)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_write__with_candidate_both_raise_err test\n".format(
method
)
)
def test_write__with_auto_expire(self):
self.mock_settings.PIPELINE_DATA_BACKEND_AUTO_EXPIRE = True
self.mock_settings.PIPELINE_DATA_BACKEND_AUTO_EXPIRE_SECONDS = 30
for method in self.write_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
getattr(self.api, method)(*self.method_params[method])
if method == "set_object":
getattr(self.backend, "expire_cache").assert_called_once_with(
*self.method_params[method], expires=30
)
self.backend.expire_cache.reset_mock()
else:
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_write__with_candidate_both_raise_err test\n".format(
method
)
)
def test_read__without_candidate(self):
for method in self.read_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNotNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_read__without_candidate test\n".format(method)
)
def test_read__without_candidate_raise_err(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None):
self.assertRaises(
Exception,
getattr(self.api, method),
*self.method_params[method]
)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_read__without_candidate_raise_err test\n".format(method)
)
def test_read__with_candidate_not_use(self):
for method in self.read_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNotNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_read__with_candidate_not_use test\n".format(method)
)
def test_read__with_candidate_use(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(return_value=None))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNotNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_read__with_candidate_use test\n".format(method)
)
def test_read__with_candidate_err(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(return_value=None))
setattr(self.candidate_backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_read__with_candidate_err test\n".format(method)
)
def test_read__with_candidate_main_raise_err(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNotNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_read__with_candidate_main_raise_err test\n".format(method)
)
def test_read__with_candidate_both_raise_err(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
setattr(self.candidate_backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
self.assertRaises(
Exception,
getattr(self.api, method),
*self.method_params[method]
)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_read__with_candidate_both_raise_err test\n".format(method)
)
def test_set_schedule_data(self):
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
self.api.set_schedule_data("key", "data")
self.backend.set_object.assert_called_once_with(
"key_schedule_parent_data", "data"
)
self.candidate_backend.set_object.assert_called_once_with(
"key_schedule_parent_data", "data"
)
def test_delete_parent_data(self):
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
self.api.delete_parent_data("key")
self.backend.del_object.assert_called_once_with(
"key_schedule_parent_data"
)
self.candidate_backend.del_object.assert_called_once_with(
"key_schedule_parent_data"
)
def test_get_schedule_parent_data(self):
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = self.api.get_schedule_parent_data("key")
self.assertIsNotNone(data)
self.backend.get_object.assert_called_once_with(
"key_schedule_parent_data"
)
self.candidate_backend.get_object.assert_not_called()
| 43.381471 | 115 | 0.592174 | 14,992 | 0.940586 | 0 | 0 | 868 | 0.054458 | 0 | 0 | 1,873 | 0.117511 |
81e7fbeb1aa79861faba9578ed1ed05e2c7b8c74 | 5,669 | py | Python | pactools/mne_api.py | mathurinm/pactools | 2ad08061c69378368137a26e0519ce3ce6e5c7bd | [
"BSD-3-Clause"
]
| null | null | null | pactools/mne_api.py | mathurinm/pactools | 2ad08061c69378368137a26e0519ce3ce6e5c7bd | [
"BSD-3-Clause"
]
| null | null | null | pactools/mne_api.py | mathurinm/pactools | 2ad08061c69378368137a26e0519ce3ce6e5c7bd | [
"BSD-3-Clause"
]
| null | null | null | import numpy as np
def _check_mne(name):
"""Helper to check if h5py is installed"""
try:
import mne
except ImportError:
raise ImportError('Please install MNE-python to use %s.' % name)
return mne
def raw_to_mask(raw, ixs, events=None, tmin=None, tmax=None):
"""
A function to transform MNE data into pactools input signals.
It select the one channel on which you to estimate PAC, or two channels
for cross-channel PAC. It also returns a mask generator, that mask the
data outside a given window around an event. The mask generator returns
a number of masks equal to the number of events times the number of
windows (i.e. the number of pairs (tmin, tmax)).
Warning: events is stored in indices, tmin and tmax are stored in seconds.
Parameters
----------
raw : an instance of Raw, containing data of shape (n_channels, n_times)
The data used to calculate PAC
ixs : int or couple of int
The indices for the low/high frequency channels. If only one is given,
the same channel is used for both low_sig and high_sig.
events : array, shape (n_events, 3) | array, shape (n_events,) | None
MNE events array. To be supplied if data is 2D and output should be
split by events. In this case, `tmin` and `tmax` must be provided. If
`ndim == 1`, it is assumed to be event indices, and all events will be
grouped together. Otherwise, events will be grouped along the third
dimension.
tmin : float | list of floats, shape (n_windows, ) | None
If `events` is not provided, it is the start time to use in `raw`.
If `events` is provided, it is the time (in seconds) to include before
each event index. If a list of floats is given, then PAC is calculated
for each pair of `tmin` and `tmax`. Defaults to `min(raw.times)`.
tmax : float | list of floats, shape (n_windows, ) | None
If `events` is not provided, it is the stop time to use in `raw`.
If `events` is provided, it is the time (in seconds) to include after
each event index. If a list of floats is given, then PAC is calculated
for each pair of `tmin` and `tmax`. Defaults to `max(raw.times)`.
Attributes
----------
low_sig : array, shape (1, n_points)
Input data for the phase signal
high_sig : array or None, shape (1, n_points)
Input data for the amplitude signal.
If None, we use low_sig for both signals.
mask : MaskIterator instance
Object that behaves like a list of mask, without storing them all.
The PAC will only be evaluated where the mask is False.
Examples
--------
>>> from pactools import raw_to_mask
>>> low_sig, high_sig, mask = raw_to_mask(raw, ixs, events, tmin, tmax)
>>> n_masks = len(mask)
>>> for one_mask in mask:
... pass
"""
mne = _check_mne('raw_to_mask')
if not isinstance(raw, mne.io.BaseRaw):
raise ValueError('Must supply Raw as input')
ixs = np.atleast_1d(ixs)
fs = raw.info['sfreq']
data = raw[:][0]
n_channels, n_points = data.shape
low_sig = data[ixs[0]][None, :]
if ixs.shape[0] > 1:
high_sig = data[ixs[1]][None, :]
else:
high_sig = None
mask = MaskIterator(events, tmin, tmax, n_points, fs)
return low_sig, high_sig, mask
class MaskIterator(object):
"""Iterator that creates the masks one at a time.
Examples
--------
>>> from pactools import MaskIterator
>>> all_masks = MaskIterator(events, tmin, tmax, n_points, fs)
>>> n_masks = len(all_masks)
>>> for one_mask in all_masks:
... pass
"""
def __init__(self, events, tmin, tmax, n_points, fs):
self.events = events
self.tmin = tmin
self.tmax = tmax
self.n_points = n_points
self.fs = float(fs)
self._init()
def _init(self):
self.tmin = np.atleast_1d(self.tmin)
self.tmax = np.atleast_1d(self.tmax)
if len(self.tmin) != len(self.tmax):
raise ValueError('tmin and tmax have differing lengths')
n_windows = len(self.tmin)
if self.events is None:
self.events = np.array([0.])
n_events = 1
if self.events.ndim == 1:
n_events = 1 # number of different event kinds
else:
n_events = np.unique(self.events[:, -1]).shape[0]
self._n_iter = n_windows * n_events
def __iter__(self):
return self.next()
def __len__(self):
return self._n_iter
def next(self):
if self.events.ndim == 1:
event_names = [None, ]
else:
event_names = np.unique(self.events[:, -1])
mask = np.empty((1, self.n_points), dtype=bool)
for event_name in event_names:
if self.events.ndim == 1:
# select all the events since their kind is not specified
these_events = self.events
else:
# select the event indices of one kind of event
these_events = self.events[self.events[:, -1] == event_name, 0]
for tmin, tmax in zip(self.tmin, self.tmax):
mask.fill(True) # it masks everything
for event in these_events:
start, stop = None, None
if tmin is not None:
start = int(event + tmin * self.fs)
if tmax is not None:
stop = int(event + tmax * self.fs)
mask[:, start:stop] = False
yield mask
| 34.150602 | 79 | 0.599929 | 2,263 | 0.399189 | 1,046 | 0.184512 | 0 | 0 | 0 | 0 | 3,222 | 0.568354 |
81e833e9fa8dc0d32e6fa4d7181d790ea3b16866 | 508 | py | Python | 1101-1200/1152-Analyze User Website Visit Pattern/1152-Analyze User Website Visit Pattern.py | jiadaizhao/LeetCode | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | [
"MIT"
]
| 49 | 2018-05-05T02:53:10.000Z | 2022-03-30T12:08:09.000Z | 1101-1200/1152-Analyze User Website Visit Pattern/1152-Analyze User Website Visit Pattern.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
]
| 11 | 2017-12-15T22:31:44.000Z | 2020-10-02T12:42:49.000Z | 1101-1200/1152-Analyze User Website Visit Pattern/1152-Analyze User Website Visit Pattern.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
]
| 28 | 2017-12-05T10:56:51.000Z | 2022-01-26T18:18:27.000Z | import collections
from itertools import combinations
from collections import Counter
class Solution:
def mostVisitedPattern(self, username: List[str], timestamp: List[int], website: List[str]) -> List[str]:
visit = collections.defaultdict(list)
for t, u, w in sorted(zip(timestamp, username, website)):
visit[u].append(w)
table = sum([Counter(set(combinations(w, 3))) for w in visit.values()], Counter())
return list(min(table, key=lambda k: (-table[k], k)))
| 46.181818 | 109 | 0.673228 | 421 | 0.82874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
81eb1d6d6cc6e952fd1154195a1b71aa5f698462 | 870 | py | Python | src/tarot/magicEight.py | tjweldon/St_Germain | ab4114e71733ac4fc8cd835f8d1107340de5852f | [
"MIT"
]
| null | null | null | src/tarot/magicEight.py | tjweldon/St_Germain | ab4114e71733ac4fc8cd835f8d1107340de5852f | [
"MIT"
]
| null | null | null | src/tarot/magicEight.py | tjweldon/St_Germain | ab4114e71733ac4fc8cd835f8d1107340de5852f | [
"MIT"
]
| null | null | null | import random
async def magicEightBall(ctx, message=True):
if message:
eightBall = random.randint(0, 19)
outlooks = [
"As I see it, yes.",
"Ask again later.",
"Better not tell you now.",
"Cannot predict now.",
"Concentrate and ask again.",
"Don’t count on it.",
"It is certain.",
"It is decidedly so.",
"Most likely.",
"My reply is no.",
"My sources say no.",
"Outlook not so good.",
"Outlook good.",
"Reply hazy, try again.",
"Signs point to yes.",
"Very doubtful.",
"Without a doubt.",
"Yes.",
"Yes – definitely.",
"You may rely on it.",
]
await ctx.send('Magic 8: ' + outlooks[eightBall])
| 29 | 57 | 0.452874 | 0 | 0 | 0 | 0 | 0 | 0 | 857 | 0.980549 | 397 | 0.454233 |
81eb4b0e294989b02c9358c7a2349765725c6844 | 970 | py | Python | app/mod_check/MySQL.py | RITC3/Hermes | 7df5cf1cbeaca949918ace9278b2d5c1138d4eac | [
"MIT"
]
| 2 | 2018-03-06T03:39:00.000Z | 2018-03-06T04:31:39.000Z | app/mod_check/MySQL.py | RITC3/Hermes | 7df5cf1cbeaca949918ace9278b2d5c1138d4eac | [
"MIT"
]
| 15 | 2018-01-01T20:55:22.000Z | 2018-06-09T21:37:39.000Z | app/mod_check/MySQL.py | RITC3/Hermes | 7df5cf1cbeaca949918ace9278b2d5c1138d4eac | [
"MIT"
]
| null | null | null | import pymysql.cursors
from ..mod_check import app
@app.task
def check(host, port, username, password, db):
result = None
connection = None
try:
connection = pymysql.connect(host=host,
port=port,
user=username,
password=password,
db=db,
charset='utf8mb4',
autocommit=True,
cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
cursor.execute('SELECT @@version AS version')
res = cursor.fetchone()
if isinstance(res, dict):
result = res.get('version', None)
except pymysql.Error:
result = False
finally:
if connection is not None:
connection.close()
return result
| 29.393939 | 76 | 0.464948 | 0 | 0 | 0 | 0 | 916 | 0.94433 | 0 | 0 | 47 | 0.048454 |
81eb7f6fef6cc7597c7451eea8083a954d98940b | 2,087 | py | Python | src/adafruit-circuitpython-bundle-4.x-mpy-20190713/examples/hue_simpletest.py | mbaaba/solar_panel | 42059d8c61320494ad1298065dbc50cd9b3bd51e | [
"MIT"
]
| 1 | 2020-04-13T16:10:53.000Z | 2020-04-13T16:10:53.000Z | infra/libs-400rc2-20190512/examples/hue_simpletest.py | jadudm/feather-isa | b7419e6698c3f64be4d8122656eb8124631ca859 | [
"MIT"
]
| null | null | null | infra/libs-400rc2-20190512/examples/hue_simpletest.py | jadudm/feather-isa | b7419e6698c3f64be4d8122656eb8124631ca859 | [
"MIT"
]
| null | null | null | import time
import board
import busio
from digitalio import DigitalInOut
from adafruit_esp32spi import adafruit_esp32spi
from adafruit_esp32spi import adafruit_esp32spi_wifimanager
import neopixel
# Import Philips Hue Bridge
from adafruit_hue import Bridge
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi and API secrets are kept in secrets.py, please add them there!")
raise
# ESP32 SPI
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
status_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
# Attempt to load bridge username and IP address from secrets.py
try:
username = secrets['hue_username']
bridge_ip = secrets['bridge_ip']
my_bridge = Bridge(wifi, bridge_ip, username)
except:
# Perform first-time bridge setup
my_bridge = Bridge(wifi)
ip = my_bridge.discover_bridge()
username = my_bridge.register_username()
print('ADD THESE VALUES TO SECRETS.PY: \
\n\t"bridge_ip":"{0}", \
\n\t"hue_username":"{1}"'.format(ip, username))
raise
# Enumerate all lights on the bridge
my_bridge.get_lights()
# Turn on the light
my_bridge.set_light(1, on=True)
# RGB colors to Hue-Compatible HSL colors
hsl_y = my_bridge.rgb_to_hsb([255, 255, 0])
hsl_b = my_bridge.rgb_to_hsb([0, 0, 255])
hsl_w = my_bridge.rgb_to_hsb([255, 255, 255])
hsl_colors = [hsl_y, hsl_b, hsl_w]
# Set the light to Python colors!
for color in hsl_colors:
my_bridge.set_light(1, hue=int(color[0]), sat=int(color[1]), bri=int(color[2]))
time.sleep(5)
# Set a predefinedscene
# my_bridge.set_group(1, scene='AB34EF5')
# Turn off the light
my_bridge.set_light(1, on=False)
| 32.609375 | 84 | 0.712985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 647 | 0.310014 |
81ebc1e0f8c3dd6fa15e9aec38b6811ca3958a6e | 2,081 | py | Python | examples/python-echo/src/echo.py | mdelete/kore | 481bb133a9307614dc7da6ca59453ef154696ce1 | [
"0BSD"
]
| null | null | null | examples/python-echo/src/echo.py | mdelete/kore | 481bb133a9307614dc7da6ca59453ef154696ce1 | [
"0BSD"
]
| null | null | null | examples/python-echo/src/echo.py | mdelete/kore | 481bb133a9307614dc7da6ca59453ef154696ce1 | [
"0BSD"
]
| null | null | null | #
# Copyright (c) 2013-2018 Joris Vink <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import kore
import socket
class EchoServer:
# Setup socket + wrap it inside of a kore socket so we can use it.
def __init__(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.bind(("127.0.0.1", 6969))
sock.listen()
self.conn = kore.socket_wrap(sock)
# Wait for a new client to connect, then create a new task
# that calls handle_client with the ocnnected client as
# the argument.
async def run(self):
while True:
try:
client = await self.conn.accept()
kore.task_create(self.handle_client(client))
client = None
except Exception as e:
kore.fatal("exception %s" % e)
# Each client will run as this co-routine.
async def handle_client(self, client):
while True:
try:
data = await client.recv(1024)
if data is None:
break
await client.send(data)
except Exception as e:
print("client got exception %s" % e)
client.close()
# Setup the server object.
server = EchoServer()
# Create a task that will execute inside of Kore as a co-routine.
kore.task_create(server.run())
| 35.271186 | 74 | 0.655454 | 1,128 | 0.542047 | 0 | 0 | 0 | 0 | 612 | 0.294089 | 1,138 | 0.546852 |
81ec64fdd381f8390bb7b57ec0c4cafc99c38ae3 | 77 | py | Python | src/token/__init__.py | mingz2013/py.script | c7f637f41949d4992e11bb0f694d2dc6aa7bf112 | [
"Apache-2.0"
]
| 1 | 2019-10-23T13:15:59.000Z | 2019-10-23T13:15:59.000Z | src/token/__init__.py | mingz2013/py.cond | c98bc81e2216136bf158177355a8fcc36e1d83a9 | [
"Apache-2.0"
]
| null | null | null | src/token/__init__.py | mingz2013/py.cond | c98bc81e2216136bf158177355a8fcc36e1d83a9 | [
"Apache-2.0"
]
| null | null | null | # -*- coding:utf-8 -*-
"""
"""
__date__ = "14/12/2017"
__author__ = "zhaojm"
| 12.833333 | 23 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.636364 |
81eca3ec2c7fb9bdc184a8e1b344b38081bdf451 | 3,710 | py | Python | pesto-cli/pesto/ws/service/process.py | CS-SI/pesto | 654a961d1064049578050d4c96e6f68f6a6dd770 | [
"Apache-2.0"
]
| 25 | 2020-05-19T16:22:52.000Z | 2022-01-06T13:31:19.000Z | pesto-cli/pesto/ws/service/process.py | CS-SI/pesto | 654a961d1064049578050d4c96e6f68f6a6dd770 | [
"Apache-2.0"
]
| 5 | 2020-10-12T09:30:20.000Z | 2021-12-13T12:49:06.000Z | pesto-cli/pesto/ws/service/process.py | CS-SI/pesto | 654a961d1064049578050d4c96e6f68f6a6dd770 | [
"Apache-2.0"
]
| 5 | 2020-06-19T16:05:13.000Z | 2021-03-11T11:51:19.000Z | import asyncio
import logging
import traceback
import uuid
from typing import Optional, Tuple, Any, Callable
from pesto.ws.core.payload_parser import PayloadParser, PestoConfig
from pesto.ws.core.pesto_feature import PestoFeatures
from pesto.ws.core.utils import load_class, async_exec
from pesto.ws.features.algorithm_wrapper import AlgorithmWrapper
from pesto.ws.features.converter.image.image_roi import ImageROI, DummyImageROI
from pesto.ws.features.payload_converter import PayloadConverter
from pesto.ws.features.payload_debug import PayloadDebug
from pesto.ws.features.response_serializer import ResponseSerializer
from pesto.ws.features.schema_validation import SchemaValidation
from pesto.ws.features.stateful_response import StatefulResponse
from pesto.ws.features.stateless_response import StatelessResponse
from pesto.ws.service.describe import DescribeService
from pesto.ws.service.job_result import ResultType
log = logging.getLogger(__name__)
class ProcessService:
PROCESS_CLASS_NAME = 'algorithm.process.Process'
_algorithm: Optional[Callable] = None
_describe = None
@staticmethod
def init():
if ProcessService._algorithm is not None:
raise ValueError('Process Service already loaded !')
try:
log.info('ProcessService.init() ...')
ProcessService._algorithm = load_class(ProcessService.PROCESS_CLASS_NAME)()
if hasattr(ProcessService._algorithm, 'on_start'):
log.info('ProcessService.on_start() ...')
ProcessService._algorithm.on_start()
log.info('ProcessService.on_start() ... Done !')
log.info('ProcessService.init() ... Done !')
except:
traceback.print_exc()
log.warning('Algorithm {}.on_start() failure !'.format(ProcessService.PROCESS_CLASS_NAME))
def __init__(self, url_root: str):
self.url_root = url_root
@property
def service_description(self):
if ProcessService._describe is None:
ProcessService._describe = DescribeService(self.url_root).compute_describe()
return ProcessService._describe
def process(self, payload: dict) -> dict:
config = PayloadParser.parse(payload)
image_roi: Optional[ImageROI] = config.get(PestoConfig.roi) # if no ROI: None
active_roi: ImageROI = image_roi or DummyImageROI() # bypass compute crop info and remove margins in pipeline
job_id = str(uuid.uuid4().time_low)
is_stateful = self.service_description['asynchronous'] is True
input_schema = self.service_description['input']
output_schema = self.service_description['output']
common_pipeline = filter(None, [
SchemaValidation(schema=input_schema),
active_roi.compute_crop_infos(),
PayloadConverter(image_roi=image_roi, schema=input_schema),
PayloadDebug(schema=input_schema),
AlgorithmWrapper(ProcessService._algorithm),
active_roi.remove_margin(),
ResponseSerializer(schema=output_schema, job_id=job_id),
])
if is_stateful:
pipeline = [
*common_pipeline,
StatefulResponse(self.url_root, job_id)
]
else:
pipeline = [
*common_pipeline,
StatelessResponse(self.url_root, job_id, output_schema)
]
return PestoFeatures(pipeline).process(payload)
async def async_process(self, request_payload: dict) -> Tuple[Any, ResultType]:
return await asyncio.wait_for(
async_exec(lambda: self.process(request_payload)),
timeout=None
)
| 37.857143 | 118 | 0.692453 | 2,747 | 0.740431 | 0 | 0 | 965 | 0.260108 | 216 | 0.058221 | 339 | 0.091375 |
81edcd97dcc8187e9a6274632556bdf350c16b63 | 1,858 | py | Python | migration_runner/helpers.py | beveradb/ecs-digital-interview-test | 7e2d18005263424be016281d3f3f344af4536971 | [
"MIT"
]
| null | null | null | migration_runner/helpers.py | beveradb/ecs-digital-interview-test | 7e2d18005263424be016281d3f3f344af4536971 | [
"MIT"
]
| 49 | 2019-02-14T01:52:14.000Z | 2021-11-15T17:48:17.000Z | migration_runner/helpers.py | beveradb/python-sql-migration-runner | 7e2d18005263424be016281d3f3f344af4536971 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import logging
import os
import re
import sys
class Helpers:
def __init__(self, logger=None):
if logger is None:
self.logger = logging.getLogger(__name__)
else:
self.logger = logger
@staticmethod
def extract_sequence_num(filename):
sequence_num = re.search(
'([0-9]+)[^0-9].+',
os.path.basename(filename)
).group(1)
return int(sequence_num)
def append_migration(self, migrations, filename):
try:
migrations.append((self.extract_sequence_num(filename), filename))
except AttributeError:
self.logger.error("Invalid filename found: {}".format(filename))
sys.exit(1)
def find_migrations(self, sql_directory):
migrations = []
for filename in os.listdir(sql_directory):
if filename.endswith(".sql"):
self.append_migration(
migrations,
str(os.path.join(sql_directory, filename))
)
return migrations
@staticmethod
def sort_migrations(migrations):
if (
all(isinstance(tup, tuple) for tup in migrations) and
all(isinstance(tup[0], int) for tup in migrations) and
all(isinstance(tup[1], str) for tup in migrations)
):
migrations.sort(key=lambda tup: tup[0])
else:
raise TypeError(
"Migrations list did not contain only tuple(int, str)")
def populate_migrations(self, sql_directory):
migrations = self.find_migrations(sql_directory)
self.sort_migrations(migrations)
return migrations
@staticmethod
def get_unprocessed_migrations(db_version, migrations):
return [tup for tup in migrations if tup[0] > int(db_version)]
| 30.459016 | 78 | 0.599031 | 1,785 | 0.96071 | 0 | 0 | 792 | 0.426265 | 0 | 0 | 129 | 0.069429 |
81ee84f6b82809aa8e7f47a4b2060161548b3aab | 1,353 | py | Python | sitri/providers/contrib/ini.py | Elastoo-Team/sitri | d5470d9a37d3c944c0976793fce80a630e5625b1 | [
"MIT"
]
| 11 | 2020-12-16T07:00:29.000Z | 2021-05-25T16:24:50.000Z | sitri/providers/contrib/ini.py | Elastoo-Team/sitri | d5470d9a37d3c944c0976793fce80a630e5625b1 | [
"MIT"
]
| 1 | 2021-06-30T05:42:46.000Z | 2021-09-03T11:45:56.000Z | sitri/providers/contrib/ini.py | Elastoo-Team/sitri | d5470d9a37d3c944c0976793fce80a630e5625b1 | [
"MIT"
]
| null | null | null | import configparser
import os
import typing
from sitri.providers.base import ConfigProvider
class IniConfigProvider(ConfigProvider):
"""Config provider for Initialization file (Ini)."""
provider_code = "ini"
def __init__(
self,
ini_path: str = "./config.ini",
):
"""
:param ini_path: path to ini file
"""
self.configparser = configparser.ConfigParser()
with open(os.path.abspath(ini_path)) as f:
self.configparser.read_file(f)
self._sections = None
@property
def sections(self):
if not self._sections:
self._sections = list(self.configparser.keys())
return self._sections
def get(self, key: str, section: str, **kwargs) -> typing.Optional[typing.Any]: # type: ignore
"""Get value from ini file.
:param key: key or path for search
:param section: section of ini file
"""
if section not in self.sections:
return None
return self.configparser[section].get(key)
def keys(self, section: str, **kwargs) -> typing.List[str]: # type: ignore
"""Get keys of section.
:param section: section of ini file
"""
if section not in self.sections:
return []
return list(self.configparser[section].keys())
| 24.6 | 99 | 0.602365 | 1,257 | 0.929047 | 0 | 0 | 155 | 0.11456 | 0 | 0 | 364 | 0.269032 |
81efee60c11f653a60ba95f712d8bc50076b39ce | 9,512 | py | Python | xpanse/api/assets/v2/ip_range.py | PaloAltoNetworks/cortex-xpanse-python-sdk | 532d3fdb031a0e5943cc492222299f94ad93d030 | [
"0BSD"
]
| 3 | 2021-09-02T16:05:04.000Z | 2021-09-10T01:10:48.000Z | xpanse/api/assets/v2/ip_range.py | PaloAltoNetworks/cortex-xpanse-python-sdk | 532d3fdb031a0e5943cc492222299f94ad93d030 | [
"0BSD"
]
| 7 | 2021-08-31T17:43:33.000Z | 2021-12-01T00:36:48.000Z | xpanse/api/assets/v2/ip_range.py | PaloAltoNetworks/cortex-xpanse-python-sdk | 532d3fdb031a0e5943cc492222299f94ad93d030 | [
"0BSD"
]
| null | null | null | from typing import Any, Dict, List
from xpanse.const import V2_PREFIX
from xpanse.endpoint import ExEndpoint
from xpanse.iterator import ExResultIterator
class IpRangeEndpoint(ExEndpoint):
"""
Part of the Assets v2 API for handling IP Ranges.
See: https://api.expander.expanse.co/api/v1/docs/
"""
def list(self, **kwargs: Any) -> ExResultIterator:
"""
Returns the list of IP Ranges. Arguments should be passed as keyword args using
the names below.
Args:
limit (int, optional):
Returns at most this many results in a single api call.
Default is 100, max is 10000.
offset (int, optional):
Returns results starting at this offset.
Default is 0.
sort (str, optional):
Comma-separated string; orders results by the given fields. If the field name is
prefixed by a -, then the ordering will be descending for that field.
Use a dotted notation to order by fields that are nested.
business_units (str, optional):
Comma-separated string; Returns only results whose Business Unit's ID falls in the provided list.
NOTE: If omitted, API will return results for all Business Units the user has permissions to view.
Also, cannot be used with the business-unit-names parameter.
business_unit_names (str, optional):
Comma-separated string; Returns only results whose Business Unit's name falls in the provided list.
NOTE: If omitted, API will return results for all Business Units the user has permissions to view.
Also, cannot be used with the business-units parameter.
inet (str, optional):
Search for given IP/CIDR block using a single IP (d.d.d.d), a dashed IP range (d.d.d.d-d.d.d.d),
a CIDR block (d.d.d.d/m), a partial CIDR (d.d.), or a wildcard (d.d.*.d).
Returns only results whose [startAddress, endAddress] range overlap with the given IP Address or CIDR.
tags (str, optional):
Comma-separated string; Returns only results who are associated with the provided Tag IDs.
Cannot be used with the tag-names parameter.
tag_names (str, optional):
Comma-separated string; Returns only results who are associated with the provided Tag names.
Cannot be used with the tags parameter.
include (str, optional):
Comma-separated string; Include the provided fields as part of the serialized result. Allowed values are
`annotations`, `severityCounts`, `attributionReasons`, `relatedRegistrationInformation`, `certDetails`, and `locationInformation`
Returns:
:obj:`ExResultIterator`:
An iterator containing all of the ip_range results. Results can be iterated
or called by page using `<iterator>.next()`.
Examples:
>>> # Return all ip ranges and print each range:
>>> for res in client.assets.ip_range.v2.list():
... for ip_r in res:
... print(ip_r)
"""
return ExResultIterator(self._api, f"{V2_PREFIX}/ip-range", kwargs)
def get(self, id: str, **kwargs: Any) -> Dict[str, Any]:
"""
Returns the details for a given IP Range. Arguments should be passed as keyword args using
the names below.
Args:
id (str):
ID for the ip-range. Should be a UUID.
include (str, optional):
Comma-separated string; Include the provided fields as part of the serialized result.
Returns:
:obj:`dict`:
A dictionary containing all of the details about an IP Range.
Examples:
>>> # Return IP Range with severity counts
>>> my_range = client.assets.ip_range.v2.get(<id>, include="severityCounts")
"""
return self._api.get(f"{V2_PREFIX}/ip-range/{id}", params=kwargs).json()
def create(
self, startAddress: str, endAddress: str, parentId: str, **kwargs: Any
) -> Dict[str, Any]:
"""
Creates a new custom IP Range.
NOTE: A validation error will be returned if the start and end addresses of the custom range do not fit within a top level range defined by Xpanse.
Args:
startAddress (str):
Start address of custom ip-range.
endAddress (str):
End address of custom ip-range.
parentId (str):
Id of parent ip-range.
tags (list, optional):
A list of tag annotation names.
additionalNotes (str, optional):
Any additional notes about the custom ip-range.
pointOfContactIds (list, optional):
A lost of point-of-contact annotation ids.
Returns:
:obj:`dict`:
A dictionary containing all of the details about the newly created, custom IP Range.
Examples:
>>> # Create a new ip-range under a parent range
>>> new_range = client.assets.ip_range.v2.create("12.175.114.120", "12.175.114.121", "43a5a569-27b0-39b5-98f4-22b9885546d7", additionalNotes="Business Unit X - Marketing website hosts")
"""
payload: Dict[str, Any] = {
"startAddress": startAddress,
"endAddress": endAddress,
"parentId": parentId,
"annotations": {},
}
if "tags" in kwargs:
payload["annotations"]["tags"] = kwargs["tags"]
if "additionalNotes" in kwargs:
payload["annotations"]["additionalNotes"] = kwargs["additionalNotes"]
if "pointOfContactIds" in kwargs:
payload["annotations"]["pointOfContactIds"] = kwargs["pointOfContactIds"]
return self._api.post(f"{V2_PREFIX}/ip-range", json=payload).json()
def delete(self, id: str) -> bool:
"""
Delete the given IP Range, and all connections to other data.
NOTE: This will only work for user-defined IP Ranges.
Args:
id (str):
ID for the ip-range. Should be a UUID.
Returns:
:obj:`boolean`:
`True` if the range was successfully deleted, otherwise `False`.
Examples:
>>> # Deletes a user defined range
>>> client.assets.ip_range.v2.delete("43a5a569-27b0-39b5-98f4-22b9885546d7")
"""
return (
True
if self._api.delete(f"{V2_PREFIX}/ip-range/{id}").status_code == 204
else False
)
def update(self, id: str, **kwargs: Any) -> Dict[str, Any]:
"""
Allows the partial update of the given IP Range.
Args:
id (str):
ID for the ip-range. Should be a UUID.
startAddress (str, optional):
Start address of custom ip-range.
endAddress (str, optional):
End address of custom ip-range.
parentId (str, optional):
Id of parent ip-range.
tags (list, optional):
A list of tag annotation ids.
additionalNotes (str, optional):
Any additional notes about the custom ip-range.
pointOfContactIds (list, optional):
A lost of point-of-contact annotation ids.
Returns:
:obj:`dict`:
A dictionary containing all of the details about the updated, custom IP Range.
Examples:
>>> # Update an ip-range under a parent range
>>> new_range = client.assets.ip_range.v2.update("43a5a569-27b0-39b5-98f4-22b9885546d7", additionalNotes="Business Unit X - Development Environment")
"""
payload = {}
if "startAddress" in kwargs:
payload["startAddress"] = kwargs["startAddress"]
if "endAddress" in kwargs:
payload["endAddress"] = kwargs["endAddress"]
if "parentId" in kwargs:
payload["parentId"] = kwargs["parentId"]
if any(
arg in ("tags", "additionalNotes", "pointOfContactIds") for arg in kwargs
):
payload["annotations"] = {}
if "tags" in kwargs:
payload["annotations"]["tags"] = kwargs["tags"]
if "additionalNotes" in kwargs:
payload["annotations"]["additionalNotes"] = kwargs["additionalNotes"]
if "pointOfContactIds" in kwargs:
payload["annotations"]["pointOfContactIds"] = kwargs["pointOfContactIds"]
return self._api.patch(f"{V2_PREFIX}/ip-range/{id}", json=payload).json()
def tag(self, ranges: List[str], tags: List[str]) -> bool:
"""
Adds the provided tags to all of the specified ip ranges.
If the any of the provided tags do not exist, they will be created.
Args:
ranges (list):
A list of ip-range IDs. Should be UUIDs.
tags (list):
A list of tag annotation names to add to an ip-range.
Returns:
:obj:`boolean`:
`True` if the ranges were tagged successfully, otherwise `False`.
"""
payload = {"ipRangeIds": ranges, "tags": tags}
return self._api.post(f"{V2_PREFIX}/ip-range/tag", json=payload)
| 43.633028 | 197 | 0.58505 | 9,354 | 0.983389 | 0 | 0 | 0 | 0 | 0 | 0 | 7,658 | 0.805088 |
81f0008b1bc4b02d44062640b174c7fe2264f9eb | 8,337 | py | Python | polus-color-pyramid-builder-plugin/src/main.py | blowekamp/polus-plugins | 87f9c36647b4cf95cf107cfede3a5a1d749415a5 | [
"MIT"
]
| null | null | null | polus-color-pyramid-builder-plugin/src/main.py | blowekamp/polus-plugins | 87f9c36647b4cf95cf107cfede3a5a1d749415a5 | [
"MIT"
]
| 1 | 2020-08-02T23:20:46.000Z | 2020-08-02T23:20:46.000Z | polus-color-pyramid-builder-plugin/src/main.py | gauharbains/polus-plugins | 5e4d1e33bb61d7619d3a76fb7c115d475628a909 | [
"MIT"
]
| null | null | null | from bfio import BioReader
import argparse, logging
import numpy as np
from pathlib import Path
import filepattern, multiprocessing, utils
from concurrent.futures import ThreadPoolExecutor
COLORS = ['red',
'green',
'blue',
'yellow',
'magenta',
'cyan',
'gray']
def get_number(s):
""" Check that s is number
If s is a number, first attempt to convert it to an int.
If integer conversion fails, attempt to convert it to a float.
If float conversion fails, return None.
Inputs:
s - An input string or number
Outputs:
value - Either float, int or None
"""
try:
return [int(si) for si in s.split('-')]
except ValueError:
try:
return [float(si) for si in s.split('-')]
except ValueError:
return None
def get_bounds(br,lower_bound,upper_bound):
""" Calculate upper and lower pixel values for image rescaling
This method calculates the upper and lower percentiles
for a given image. The lower_bound and upper_bound must
be floats in the range 0-1, where 0.01 indicates 1%. The
values returned are pixel intensity values.
Images are read in tiles to prevent images from being
completely read into memory. This permits calculation
of percentiles on images that are larger than memory.
Args:
br (bfio.BioReader): A BioReader object to access a tiled tiff
lower_bound (float): Lower bound percentile, must be in 0.00-1.00
upper_bound (float): Upper bound percentile, must be in 0.00-1.00
Returns:
[list]: List of upper and lower bound values in pixel intensity units.
"""
# TODO: Replace pixel buffer with histogram/fingerprint to handle
# larger images and/or larger percentile values
# Make sure the inputs are properly formatted
assert isinstance(lower_bound,float) and isinstance(upper_bound,float)
assert lower_bound >= 0 and lower_bound <= 1.0
assert upper_bound >= 0 and upper_bound <= 1.0
# Get the image size in pixels
image_size = br.num_x() * br.num_y()
# Get number of pixels needed to get percentile information
upper_bound_size = int(image_size * (1-upper_bound))
lower_bound_size = int(image_size * lower_bound)
# Create the pixel buffer
dtype = br.read_metadata().image().Pixels.get_PixelType()
upper_bound_vals = np.zeros((2*upper_bound_size,),dtype=dtype)
lower_bound_vals = np.full((2*lower_bound_size,),np.iinfo(dtype).max,dtype=dtype)
# Load image tiles and sort pixels
for x in range(0,br.num_x(),1024):
# Load the first tile
tile = br.read_image(X=[x,min([x+1024,br.num_x()])],Z=[0,1])
# Sort the non-zero values
tile_sorted = np.sort(tile[tile.nonzero()],axis=None)
# Store the upper and lower bound pixel values
temp = tile_sorted[-upper_bound_size:]
upper_bound_vals[:temp.size] = temp
temp = tile_sorted[:lower_bound_size]
lower_bound_vals[-temp.size:] = temp
# Resort the pixels
upper_bound_vals = np.sort(upper_bound_vals,axis=None)
lower_bound_vals = np.sort(lower_bound_vals,axis=None)
return [lower_bound_vals[lower_bound_size],upper_bound_vals[-upper_bound_size]]
if __name__=="__main__":
# Initialize the logger
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
''' Argument parsing '''
logger.info("Parsing arguments...")
parser = argparse.ArgumentParser(prog='main', description='Builds a DeepZoom color pyramid.')
# Input arguments
parser.add_argument('--filePattern', dest='filePattern', type=str,
help='Filename pattern used to separate data', required=True)
parser.add_argument('--inpDir', dest='inpDir', type=str,
help='Input image collection to be processed by this plugin', required=True)
parser.add_argument('--layout', dest='layout', type=str,
help='Color ordering (e.g. 1,11,,,,5,6)', required=True)
parser.add_argument('--bounds', dest='bounds', type=str,
help='Set bounds (should be float-float, int-int, or blank, e.g. 0.01-0.99,0-16000,,,,,)', required=False)
# Output arguments
parser.add_argument('--outDir', dest='outDir', type=str,
help='Output pyramid path.', required=True)
# Parse the arguments
args = parser.parse_args()
filePattern = args.filePattern
logger.info('filePattern = {}'.format(filePattern))
inpDir = args.inpDir
if (Path.is_dir(Path(args.inpDir).joinpath('images'))):
# switch to images folder if present
fpath = str(Path(args.inpDir).joinpath('images').absolute())
logger.info('inpDir = {}'.format(inpDir))
layout = args.layout
logger.info('layout = {}'.format(layout))
bounds = args.bounds
logger.info('bounds = {}'.format(bounds))
outDir = args.outDir
logger.info('outDir = {}'.format(outDir))
outDir = Path(outDir)
# Parse the layout
layout = [None if l=='' else int(l) for l in layout.split(',')]
if len(layout)>7:
layout = layout[:7]
# Parse the bounds
if bounds != None:
bounds = [[None] if l=='' else get_number(l) for l in bounds.split(',')]
bounds = bounds[:len(layout)]
else:
bounds = [[None] for _ in layout]
# Parse files
fp = filepattern.FilePattern(inpDir,filePattern)
count = 0
for files in fp.iterate(group_by='c'):
outDirFrame = outDir.joinpath('{}_files'.format(count))
outDirFrame.mkdir()
count += 1
bioreaders = []
threads = []
with ThreadPoolExecutor(max([multiprocessing.cpu_count()//2,2])) as executor:
for i,l in enumerate(layout):
if l == None:
bioreaders.append(None)
continue
f_path = [f for f in files if f['c']==l]
if len(f_path)==0:
continue
f_path = f_path[0]['file']
bioreaders.append(BioReader(f_path,max_workers=multiprocessing.cpu_count()))
if layout[i] != None:
if isinstance(bounds[i][0],float):
logger.info('{}: Getting percentile bounds {}...'.format(Path(bioreaders[-1]._file_path).name,
bounds[i]))
threads.append(executor.submit(get_bounds,bioreaders[-1],bounds[i][0],bounds[i][1]))
elif isinstance(bounds[i][0],int):
bioreaders[-1].bounds = bounds[i]
else:
bioreaders[-1].bounds = [0,np.iinfo(bioreaders[-1].read_metadata().image().Pixels.get_PixelType()).max]
for i in reversed(range(len(layout))):
if isinstance(bounds[i][0],int):
logger.info('Color {}: {} (rescaling to {})'.format(COLORS[i],
Path(Path(bioreaders[i]._file_path).name).name,
bioreaders[i].bounds))
continue
if layout[i] == None:
continue
bioreaders[i].bounds = threads.pop().result()
logger.info('Color {}: {} (rescaling to {})'.format(COLORS[i],
Path(Path(bioreaders[i]._file_path).name).name,
bioreaders[i].bounds))
for br in bioreaders:
if br != None:
br_meta = br
file_info = utils.dzi_file(br_meta,outDirFrame,0)
encoder = utils.DeepZoomChunkEncoder(file_info)
file_writer = utils.DeepZoomWriter(outDirFrame)
utils._get_higher_res(0,bioreaders,file_writer,encoder)
| 41.272277 | 130 | 0.578026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,500 | 0.299868 |
81f05d1e9fcd11eb2b271dc4be5bcb49f68b18a0 | 1,175 | py | Python | kinetics/reaction_classes/general_rate_Law.py | wlawler45/kinetics | dd763ac27d7c0df8a9429419e911a22760ba5c5a | [
"MIT"
]
| 13 | 2020-05-30T21:39:01.000Z | 2022-01-30T22:52:36.000Z | kinetics/reaction_classes/general_rate_Law.py | wlawler45/kinetics | dd763ac27d7c0df8a9429419e911a22760ba5c5a | [
"MIT"
]
| 7 | 2019-06-15T13:16:31.000Z | 2021-12-09T11:16:53.000Z | kinetics/reaction_classes/general_rate_Law.py | willfinnigan/Kinetics | a607096f051c7721a7f2eac73a875366eaefef15 | [
"MIT"
]
| 4 | 2019-09-23T09:46:11.000Z | 2022-03-18T20:30:03.000Z | from kinetics.reaction_classes.reaction_base_class import Reaction
class Generic(Reaction):
"""
This Reaction class allows you to specify your own rate equation.
Enter the parameter names in params, and the substrate names used in the reaction in species.
Type the rate equation as a string in rate_equation, using these same names.
Enter the substrates used up, and the products made in the reaction as normal.
"""
def __init__(self,
params=[], species=[],
rate_equation='',
substrates=[], products=[]):
super().__init__()
self.reaction_substrate_names = species
self.parameter_names=params
self.rate_equation = rate_equation
self.substrates = substrates
self.products = products
def calculate_rate(self, substrates, parameters):
for i, name in enumerate(self.reaction_substrate_names):
locals().update({name: substrates[i]})
for i, name in enumerate(self.parameter_names):
locals().update({name: parameters[i]})
rate = eval(self.rate_equation, locals(), globals())
return rate | 33.571429 | 97 | 0.655319 | 1,107 | 0.942128 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.293617 |
81f0785b99135f37da39fd3dcab4612074c70aed | 718 | py | Python | examples/03-interception/api.py | nomadsinteractive/migi | 7ebcd0d362f642b889c309c20618ff36f1448c28 | [
"Apache-2.0"
]
| 3 | 2022-01-09T10:09:55.000Z | 2022-01-11T03:37:55.000Z | examples/03-interception/api.py | nomadsinteractive/migi | 7ebcd0d362f642b889c309c20618ff36f1448c28 | [
"Apache-2.0"
]
| null | null | null | examples/03-interception/api.py | nomadsinteractive/migi | 7ebcd0d362f642b889c309c20618ff36f1448c28 | [
"Apache-2.0"
]
| null | null | null | from ctypes import *
from migi.decorators import stdcall
@stdcall('MessageBoxW', 'User32.dll', interceptable=True)
def _native_message_box_w(hwnd: c_void_p, content: c_wchar_p, title: c_wchar_p, flags: c_uint32) -> c_int32:
if wstring_at(content) == "I'm in":
return _native_message_box_w.call_original(hwnd, create_unicode_buffer("We're in"), title, flags)
return _native_message_box_w.call_original(hwnd, content, title, flags)
def message_box(content: str, title: str, flags: int = 0) -> c_int32:
return _native_message_box_w(None, create_unicode_buffer(content), create_unicode_buffer(title), flags)
def restore():
_native_message_box_w.restore()
_native_message_box_w.intercept()
| 32.636364 | 108 | 0.763231 | 0 | 0 | 0 | 0 | 388 | 0.54039 | 0 | 0 | 43 | 0.059889 |
81f105c666f6335423e163de23faa2cc06bc611a | 1,002 | py | Python | adapters/heiman/HS1RC.py | russdan/domoticz-zigbee2mqtt-plugin | d47895eab44bc87fc19ce151698d2afe9554fadc | [
"MIT"
]
| 146 | 2018-09-19T11:38:48.000Z | 2022-03-21T11:54:12.000Z | adapters/heiman/HS1RC.py | russdan/domoticz-zigbee2mqtt-plugin | d47895eab44bc87fc19ce151698d2afe9554fadc | [
"MIT"
]
| 783 | 2018-09-28T17:07:14.000Z | 2022-03-31T10:18:27.000Z | adapters/heiman/HS1RC.py | russdan/domoticz-zigbee2mqtt-plugin | d47895eab44bc87fc19ce151698d2afe9554fadc | [
"MIT"
]
| 147 | 2018-09-25T18:39:51.000Z | 2022-03-01T19:31:27.000Z | from adapters.adapter_with_battery import AdapterWithBattery
from devices.switch.selector_switch import SelectorSwitch
class HeimanAlarmRemoteAdapter(AdapterWithBattery):
def __init__(self):
super().__init__()
self.switch = SelectorSwitch('Remote', 'action')
self.switch.add_level('Off', None)
self.switch.add_level('Arm all zones', 'arm_all_zones')
self.switch.add_level('Arm partial zones', 'arm_partial_zones')
self.switch.add_level('Disarm', 'disarm')
self.switch.add_level('Emergency', 'emergency')
self.switch.set_selector_style(SelectorSwitch.SELECTOR_TYPE_MENU)
self.switch.disable_value_check_on_update()
self.devices.append(self.switch)
def convert_message(self, message):
message = super().convert_message(message)
return message
def handleCommand(self, alias, device, device_data, command, level, color):
self.switch.handle_command(device_data, command, level, color)
| 38.538462 | 79 | 0.718563 | 879 | 0.877246 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.126747 |
81f14765ab4db0efd7fd86ef77889b6138ae6170 | 1,236 | py | Python | tennis_model_scraper/tennis_model_scraper/spiders/tennis_data_co_uk_spider.py | DrAndrey/tennis_model | f058becf41747111818599132b43b6147347a96c | [
"Apache-2.0"
]
| null | null | null | tennis_model_scraper/tennis_model_scraper/spiders/tennis_data_co_uk_spider.py | DrAndrey/tennis_model | f058becf41747111818599132b43b6147347a96c | [
"Apache-2.0"
]
| null | null | null | tennis_model_scraper/tennis_model_scraper/spiders/tennis_data_co_uk_spider.py | DrAndrey/tennis_model | f058becf41747111818599132b43b6147347a96c | [
"Apache-2.0"
]
| 1 | 2019-01-21T14:59:34.000Z | 2019-01-21T14:59:34.000Z | # -*- coding: utf-8 -*-
"""
"""
import scrapy
from tennis_model.tennis_model_scraper.tennis_model_scraper import items
class TennisDataCoUkSpider(scrapy.Spider):
name = "tennis_data_co_uk"
allowed_domains = ["www.tennis-data.co.uk"]
start_urls = ["http://www.tennis-data.co.uk/alldata.php"]
custom_settings = {'ITEM_PIPELINES': {'tennis_model_scraper.pipelines.TennisDataCoUkPipeline': 1}}
def _correct_ext(self, link):
if ".zip" in link:
return link
elif "zip" in link:
return ".zip".join(link.split("zip"))
else:
raise Exception("Unknown file extension from url - {0}. 'zip' is expected".format(link))
def parse(self, response):
archive_links = response.xpath("/html/body/table[5]/tr[2]/td[3]/a/@href")
for link in archive_links:
short_file_url = self._correct_ext(link.extract())
is_man_archives = 'w' not in short_file_url.split("/")[0]
if is_man_archives:
full_file_url = response.urljoin(short_file_url)
item = items.TennisDataCoUkItem()
item["file_urls"] = [full_file_url]
yield item
if __name__ == '__main__':
pass
| 31.692308 | 102 | 0.622977 | 1,073 | 0.868123 | 502 | 0.406149 | 0 | 0 | 0 | 0 | 334 | 0.270227 |
81f14ce563d81df9f3673772cac00a024e237a10 | 160 | py | Python | aoc20211219b.py | BarnabyShearer/aoc | 4feb66c668b068f0f42ad99b916e80732eba5a2d | [
"MIT"
]
| null | null | null | aoc20211219b.py | BarnabyShearer/aoc | 4feb66c668b068f0f42ad99b916e80732eba5a2d | [
"MIT"
]
| null | null | null | aoc20211219b.py | BarnabyShearer/aoc | 4feb66c668b068f0f42ad99b916e80732eba5a2d | [
"MIT"
]
| null | null | null | from aoc20211219a import *
def aoc(data):
sensors, _ = slam(parse(data))
return max(sum(abs(x) for x in sub(a, b)) for a in sensors for b in sensors)
| 22.857143 | 80 | 0.6625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
81f32b032af68bf0f0fedb0195eda42a9e169769 | 2,739 | py | Python | pyhmy/rpc/request.py | difengJ/pyhmy | c0bdb7f53b56bf8e9f53d60da6fec61415cebf3f | [
"MIT"
]
| 37 | 2020-05-24T20:09:33.000Z | 2022-03-28T00:59:00.000Z | pyhmy/rpc/request.py | economize/pyhmy | 668d7ef756e4fefe4621f435bb9a3e37cbeb82d9 | [
"MIT"
]
| 12 | 2020-06-09T02:58:54.000Z | 2022-03-20T15:33:55.000Z | pyhmy/rpc/request.py | economize/pyhmy | 668d7ef756e4fefe4621f435bb9a3e37cbeb82d9 | [
"MIT"
]
| 30 | 2020-03-14T03:58:24.000Z | 2022-02-19T18:19:00.000Z | import json
import requests
from .exceptions import (
RequestsError,
RequestsTimeoutError,
RPCError
)
_default_endpoint = 'http://localhost:9500'
_default_timeout = 30
def base_request(method, params=None, endpoint=_default_endpoint, timeout=_default_timeout) -> str:
"""
Basic RPC request
Parameters
---------
method: str
RPC Method to call
params: :obj:`list`, optional
Parameters for the RPC method
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
str
Raw output from the request
Raises
------
TypeError
If params is not a list or None
RequestsTimeoutError
If request timed out
RequestsError
If other request error occured
"""
if params is None:
params = []
elif not isinstance(params, list):
raise TypeError(f'invalid type {params.__class__}')
try:
payload = {
"id": "1",
"jsonrpc": "2.0",
"method": method,
"params": params
}
headers = {
'Content-Type': 'application/json'
}
resp = requests.request('POST', endpoint, headers=headers, data=json.dumps(payload),
timeout=timeout, allow_redirects=True)
return resp.content
except requests.exceptions.Timeout as err:
raise RequestsTimeoutError(endpoint) from err
except requests.exceptions.RequestException as err:
raise RequestsError(endpoint) from err
def rpc_request(method, params=None, endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
RPC request
Parameters
---------
method: str
RPC Method to call
params: :obj:`list`, optional
Parameters for the RPC method
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict
Returns dictionary representation of RPC response
Example format:
{
"jsonrpc": "2.0",
"id": 1,
"result": ...
}
Raises
------
RPCError
If RPC response returned a blockchain error
See Also
--------
base_request
"""
raw_resp = base_request(method, params, endpoint, timeout)
try:
resp = json.loads(raw_resp)
if 'error' in resp:
raise RPCError(method, endpoint, str(resp['error']))
return resp
except json.decoder.JSONDecodeError as err:
raise RPCError(method, endpoint, raw_resp) from err
# TODO: Add GET requests
| 23.410256 | 99 | 0.591457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,386 | 0.506024 |
81f3a40d44b3887a0748b1060fddbf244a3d66a8 | 3,430 | py | Python | gaze_api/scripts/vidPub.py | ajdroid/tobii_ros | b103cb772604458d9822911494af75057902ebb1 | [
"Unlicense"
]
| null | null | null | gaze_api/scripts/vidPub.py | ajdroid/tobii_ros | b103cb772604458d9822911494af75057902ebb1 | [
"Unlicense"
]
| null | null | null | gaze_api/scripts/vidPub.py | ajdroid/tobii_ros | b103cb772604458d9822911494af75057902ebb1 | [
"Unlicense"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import socket
import threading
import rospy
from publisher import *
import cv2
import imagezmq
import numpy as np
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
def parse_sent_msg(msg):
ctr, frame_time = msg.split()
frame_time = float(frame_time)
return frame_time, ctr
# setup socket to python3 video streamer
# Helper class implementing an IO daemon thread for imgzmq recv
class VideoStreamSubscriber:
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
if port == 0: # ipc operation rather than tcp
self.receiver_address = "ipc://{}".format(self.hostname)
else:
self.receiver_address = "tcp://{}:{}".format(self.hostname, self.port)
self._stop = False
self._data_ready = threading.Event()
self._thread = threading.Thread(target=self._run, args=())
self._thread.daemon = True
self._thread.start()
def receive(self, timeout=30.0):
flag = self._data_ready.wait(timeout=timeout)
if not flag:
raise TimeoutError(
"Timeout while reading from subscriber tcp://{}:{}".format(self.hostname, self.port))
self._data_ready.clear()
return self._data
def _run(self):
receiver = imagezmq.ImageHub(self.receiver_address, REQ_REP=False)
while not self._stop:
self._data = receiver.recv_jpg()
# self._data = receiver.recv_image()
self._data_ready.set()
receiver.close()
def close(self):
self._stop = True
# Receive from broadcast
# There are 2 hostname styles; comment out the one you don't need
hostname = "127.0.0.1" # Use to receive from localhost
# hostname = "192.168.86.38" # Use to receive from other computer
if __name__ == '__main__':
try:
# parser = argparse.ArgumentParser()
# parser.add_argument('--gp', action="store_true", help="Option to publish gaze position (2D) data")
# args = parser.parse_args(rospy.myargv()[1:])
'''
Initiate the Video Stream Subscription over Image ZMQ
'''
imgzmq_port = 5555
hostname = "/tmp/tobiiVid"; imgzmq_port = 0
receiver = VideoStreamSubscriber(hostname, imgzmq_port)
'''
Create publisher
'''
# Default publish the 3D gaze position data
vidpub = rospy.Publisher("tobii_video", Image, queue_size=10)
bridge = CvBridge()
rospy.init_node('tobii_image_sender', anonymous=True)
while not rospy.is_shutdown():
# get from py3
sent_msg_string, frame = receiver.receive()
image = cv2.imdecode(np.frombuffer(frame, dtype='uint8'), -1)
image = np.frombuffer(frame, dtype='uint8')
image = image.reshape(1080, 1920, 3)
print(image.shape, sent_msg_string)
# Parse sent message to convert to ros formats
frametime, counter = parse_sent_msg(sent_msg_string)
# publish to ROS
im_ros = bridge.cv2_to_imgmsg(image, "bgr8")
im_ros.header.stamp = rospy.Time.from_sec(frametime)
im_ros.header.frame_id = str(counter)
vidpub.publish(im_ros)
except (rospy.ROSInterruptException, KeyboardInterrupt, SystemExit):
sys.exit(0)
| 32.666667 | 108 | 0.634111 | 1,187 | 0.346064 | 0 | 0 | 0 | 0 | 0 | 0 | 981 | 0.286006 |
81f5675fd7ed78cac4c0fcf799fa59f89177494a | 3,179 | py | Python | LAImapping_SNAP/snappy_backscatterLAI.py | dipankar05/aws4agrisar | f3307f0a90668f4586baf61ac4a322be4d32ea6a | [
"MIT"
]
| 5 | 2021-08-20T05:21:02.000Z | 2021-08-22T09:43:54.000Z | Chapter05/Sec56/LAImapping_SNAP/snappy_backscatterLAI.py | dipankar05/springer-cropradar | b1f2fddab1bb60adafd8c9aba6c34538eb356988 | [
"MIT"
]
| null | null | null | Chapter05/Sec56/LAImapping_SNAP/snappy_backscatterLAI.py | dipankar05/springer-cropradar | b1f2fddab1bb60adafd8c9aba6c34538eb356988 | [
"MIT"
]
| 1 | 2021-07-06T03:18:26.000Z | 2021-07-06T03:18:26.000Z | import sys
import numpy
import numpy as np
from snappy import Product
from snappy import ProductData
from snappy import ProductIO
from snappy import ProductUtils
from snappy import FlagCoding
##############
import csv
###############MSVR
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
########################
if len(sys.argv) != 2:
print("usage: %s <file>" % sys.argv[0])
sys.exit(1)
file = sys.argv[1]
print("Reading...")
product = ProductIO.readProduct(file)
width = product.getSceneRasterWidth()
height = product.getSceneRasterHeight()
name = product.getName()
description = product.getDescription()
band_names = product.getBandNames()
print("Product: %s, %s" % (name, description))
print("Raster size: %d x %d pixels" % (width, height))
print("Start time: " + str(product.getStartTime()))
print("End time: " + str(product.getEndTime()))
print("Bands: %s" % (list(band_names)))
##---------------------------------------------------------------------------------
with open('rice_LUT.csv','r') as dest_f:
data_iter = csv.reader(dest_f,
delimiter = ',',
quotechar = '"')
data = [data for data in data_iter]
data_array = np.asarray(data, dtype = np.float32)
VV = data_array[:,1]
VH = data_array[:,2]
PAI = data_array[:,0]
X=np.column_stack((VV,VH))
Y = PAI
#SVR training
pipeline = make_pipeline(StandardScaler(),
SVR(kernel='rbf', epsilon=0.105, C=250, gamma = 2.8),
)
SVRmodel=pipeline.fit(X,Y)
# Predictfor validation data
valX = X;
y_out = pipeline.predict(valX);
##---------------------------------------------------------------------------------
bandc11 = product.getBand('C11')
bandc22 = product.getBand('C22')
laiProduct = Product('LAI', 'LAI', width, height)
laiBand = laiProduct.addBand('lai', ProductData.TYPE_FLOAT32)
laiFlagsBand = laiProduct.addBand('lai_flags', ProductData.TYPE_UINT8)
writer = ProductIO.getProductWriter('BEAM-DIMAP')
ProductUtils.copyGeoCoding(product, laiProduct)
ProductUtils.copyMetadata(product, laiProduct)
ProductUtils.copyTiePointGrids(product, laiProduct)
laiFlagCoding = FlagCoding('lai_flags')
laiFlagCoding.addFlag("LAI_LOW", 1, "LAI below 0")
laiFlagCoding.addFlag("LAI_HIGH", 2, "LAI above 5")
group = laiProduct.getFlagCodingGroup()
#print(dir(group))
group.add(laiFlagCoding)
laiFlagsBand.setSampleCoding(laiFlagCoding)
laiProduct.setProductWriter(writer)
laiProduct.writeHeader('LAImap_output.dim')
c11 = numpy.zeros(width, dtype=numpy.float32)
c22 = numpy.zeros(width, dtype=numpy.float32)
print("Writing...")
for y in range(height):
print("processing line ", y, " of ", height)
c11 = bandc11.readPixels(0, y, width, 1, c11)
c22 = bandc22.readPixels(0, y, width, 1, c22)
Z=np.column_stack((c11,c22))
#ndvi = (r10 - r7) / (r10 + r7)
lai = pipeline.predict(Z);
laiBand.writePixels(0, y, width, 1, lai)
laiLow = lai < 0.0
laiHigh = lai > 5.0
laiFlags = numpy.array(laiLow + 2 * laiHigh, dtype=numpy.int32)
laiFlagsBand.writePixels(0, y, width, 1, laiFlags)
laiProduct.closeIO()
print("Done.") | 28.383929 | 83 | 0.654608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 634 | 0.199434 |
81f67189d5d5520037d53dd853b5b30e2b4a3514 | 23,097 | py | Python | test_fiona_issue383.py | thomasaarholt/fiona-wheels | 50ff76691ea4286052d552ffb67d83894742b00a | [
"MIT"
]
| null | null | null | test_fiona_issue383.py | thomasaarholt/fiona-wheels | 50ff76691ea4286052d552ffb67d83894742b00a | [
"MIT"
]
| null | null | null | test_fiona_issue383.py | thomasaarholt/fiona-wheels | 50ff76691ea4286052d552ffb67d83894742b00a | [
"MIT"
]
| null | null | null | import fiona
d = {
"type": "Feature",
"id": "0",
"properties": {
"ADMINFORES": "99081600010343",
"REGION": "08",
"FORESTNUMB": "16",
"FORESTORGC": "0816",
"FORESTNAME": "El Yunque National Forest",
"GIS_ACRES": 55829.81,
"SHAPE_AREA": 0.0193062316937,
"SHAPE_LEN": 0.754287568301,
},
"geometry": {
"type": "MultiPolygon",
"coordinates": [
[
[
[-65.73293016000002, 18.33284838999998],
[-65.73293445000002, 18.331367639999996],
[-65.73189660000003, 18.331369719999998],
[-65.73040952000002, 18.33137273],
[-65.72620770999998, 18.33138113000001],
[-65.72303074000001, 18.331387389999975],
[-65.71763471000003, 18.331393549999973],
[-65.71717587, 18.331394069999988],
[-65.71297922999997, 18.331403290000026],
[-65.71248787000002, 18.33140437999998],
[-65.70898332000002, 18.33141236],
[-65.70846269999998, 18.331413540000028],
[-65.70470655999998, 18.331422009999983],
[-65.70340513999997, 18.33142491000001],
[-65.70268779000003, 18.331419400000016],
[-65.70098910000002, 18.33140635000001],
[-65.69978839999999, 18.33139711000001],
[-65.69977925, 18.32948927000001],
[-65.69976860000003, 18.32723274],
[-65.69976336000002, 18.326155840000013],
[-65.69975882, 18.32519180999998],
[-65.69975420999998, 18.324281380000002],
[-65.69975116, 18.323670390000018],
[-65.69974878, 18.323214399999983],
[-65.69972460999998, 18.317907339999977],
[-65.69972661000003, 18.31559458999999],
[-65.69972832000002, 18.314692869999988],
[-65.69972934999998, 18.312400700000012],
[-65.69973214999999, 18.309193600000015],
[-65.69973189000001, 18.308128119999992],
[-65.69971594999998, 18.304170699999986],
[-65.69971009, 18.302713270000027],
[-65.69969680999998, 18.29942688],
[-65.69968705999997, 18.297028839999996],
[-65.69968439000002, 18.294420890000026],
[-65.69968401, 18.294158770000024],
[-65.69968397000002, 18.29406161000003],
[-65.69968146999997, 18.29031968999999],
[-65.69967542, 18.286261500000023],
[-65.6996757, 18.286123120000013],
[-65.69967338999999, 18.284205750000012],
[-65.69967251000003, 18.283497660000023],
[-65.69967014000002, 18.281735219999973],
[-65.69967000000003, 18.28134633000002],
[-65.69994827, 18.28134559],
[-65.70099542999998, 18.28134276999998],
[-65.70358926, 18.28133575999999],
[-65.70616948000003, 18.281328770000016],
[-65.70911901, 18.28132070999999],
[-65.70971071999998, 18.28131909000001],
[-65.71624101999998, 18.28131652000002],
[-65.71624542, 18.276418089999993],
[-65.71624548, 18.27636744],
[-65.71624578000001, 18.275968209999974],
[-65.71624845000002, 18.27300660999998],
[-65.71624307000002, 18.271180739999977],
[-65.71623899999997, 18.26979332000002],
[-65.71623254999997, 18.267581380000024],
[-65.71623254999997, 18.267578500000013],
[-65.71623402, 18.267040029999976],
[-65.71623762000002, 18.265657929999975],
[-65.71623955000001, 18.26496930000002],
[-65.71624981999997, 18.260115170000006],
[-65.71625891999997, 18.257678180000028],
[-65.71625689000001, 18.25766888999999],
[-65.71628033000002, 18.252014929999973],
[-65.71628700000002, 18.250603020000028],
[-65.71629617000002, 18.248364939999988],
[-65.71629643, 18.248011659999975],
[-65.71974196999997, 18.248007089999987],
[-65.72038055000002, 18.24800706000002],
[-65.72076942000001, 18.24800829999998],
[-65.72464429000001, 18.248011910000002],
[-65.72465315, 18.248011519999977],
[-65.72509256000001, 18.24801222000002],
[-65.72707300000002, 18.24801083],
[-65.73231042999998, 18.2480104],
[-65.73397174000002, 18.248009190000005],
[-65.73705114, 18.248008589999984],
[-65.73750502000001, 18.248008190000007],
[-65.73889711999999, 18.24800842000002],
[-65.73978022, 18.248008830000003],
[-65.74408667, 18.248010669999985],
[-65.74502591999999, 18.248009980000006],
[-65.74623288999999, 18.248009120000006],
[-65.74772324000003, 18.248009149999973],
[-65.74924592000002, 18.248014580000017],
[-65.74961603999998, 18.248013990000004],
[-65.74961524000003, 18.244120570000007],
[-65.74961268999999, 18.243257019999987],
[-65.74961502999997, 18.235669789999974],
[-65.74961267999998, 18.235211540000023],
[-65.74961048, 18.234789499999977],
[-65.74961128000001, 18.231243000000006],
[-65.75090724, 18.231235679999998],
[-65.75247086000002, 18.231236500000023],
[-65.75309636999998, 18.231236850000016],
[-65.75896512000003, 18.231239829999993],
[-65.76053288000003, 18.231240590000027],
[-65.76145975999998, 18.231241049999994],
[-65.76266423999999, 18.23124161999999],
[-65.76402088999998, 18.231242259999988],
[-65.76422652999997, 18.231242339999994],
[-65.76459129, 18.231242520000023],
[-65.76506522, 18.231243529999972],
[-65.76575971, 18.231245],
[-65.77265518000002, 18.231259480000006],
[-65.77609515, 18.23126751000001],
[-65.77853763000002, 18.231273129999977],
[-65.78301661, 18.231283440000027],
[-65.78536026, 18.231288749999976],
[-65.78565572000002, 18.231289430000004],
[-65.78587555000001, 18.23129019999999],
[-65.78745778000001, 18.23129352000001],
[-65.79147775000001, 18.231303949999983],
[-65.80175496999999, 18.23133021000001],
[-65.80328739999999, 18.23133408000001],
[-65.80925552999997, 18.23135074999999],
[-65.81185003000002, 18.231357919999994],
[-65.81302187, 18.231352949999973],
[-65.81574820999998, 18.23134140000002],
[-65.81705820000002, 18.231335829999978],
[-65.81733358000002, 18.231334670000024],
[-65.82028713, 18.231322050000017],
[-65.82052381, 18.23132104000001],
[-65.82337763999999, 18.23130882999999],
[-65.82649563000001, 18.231295439999997],
[-65.82811142999998, 18.231288459999973],
[-65.83293057999998, 18.23127384999998],
[-65.83292964999998, 18.231761140000003],
[-65.83293025, 18.234220730000004],
[-65.83292996, 18.23624890000002],
[-65.83292955000002, 18.239821380000024],
[-65.83292905000002, 18.244286690000024],
[-65.83292845, 18.244807849999972],
[-65.83292886999999, 18.245117160000007],
[-65.83292883000001, 18.24573097000001],
[-65.83292870999998, 18.247063589999982],
[-65.83292857999999, 18.248008060000018],
[-65.83315374, 18.248008760000005],
[-65.83325909000001, 18.248009089999982],
[-65.83590992, 18.248030509999978],
[-65.84442614, 18.248036909999996],
[-65.84617400000002, 18.248038199999996],
[-65.84807433999998, 18.24803958000001],
[-65.84813063000001, 18.248039609999978],
[-65.84903366999998, 18.248040240000023],
[-65.85197088000001, 18.24804229],
[-65.85535651999999, 18.24804193],
[-65.85613706999999, 18.248041839999985],
[-65.85719701, 18.248041699999987],
[-65.8638446, 18.24804075999998],
[-65.86544515000003, 18.24804051000001],
[-65.87069150999997, 18.248039570000003],
[-65.87385301, 18.248038310000027],
[-65.87461352999998, 18.248020329999974],
[-65.87817146999998, 18.248007959999995],
[-65.88441703000001, 18.24800984000001],
[-65.89088908999997, 18.248012580000022],
[-65.89899125, 18.248013500000013],
[-65.89925985999997, 18.24801395999998],
[-65.90513017, 18.248014790000013],
[-65.90874113000001, 18.248012710000012],
[-65.91595359000002, 18.248011819999988],
[-65.91629429, 18.248011819999988],
[-65.9162887, 18.250010359999976],
[-65.9162852, 18.25164811000002],
[-65.91628292000001, 18.25191947000002],
[-65.91627997, 18.253774229999976],
[-65.91627848000002, 18.25477933000002],
[-65.91627578999999, 18.255991100000017],
[-65.91626445999998, 18.261137089999977],
[-65.91625448000002, 18.26512563],
[-65.91625524, 18.26536785000002],
[-65.91625922999998, 18.266019389999997],
[-65.91632637999999, 18.266198929999973],
[-65.91632625, 18.266542049999998],
[-65.91631202000002, 18.267959780000012],
[-65.91631167000003, 18.267977850000022],
[-65.91630744000003, 18.268755800000008],
[-65.91630715999997, 18.268808560000025],
[-65.91625932, 18.270663520000028],
[-65.91625911, 18.270671989999983],
[-65.91625876, 18.270887870000024],
[-65.91625875, 18.27455298000001],
[-65.91625871999997, 18.274613149999993],
[-65.91625811, 18.279979179999998],
[-65.91626000000002, 18.280340190000004],
[-65.91625800000003, 18.281121770000027],
[-65.91625804, 18.281356930000015],
[-65.91618933000001, 18.281356570000014],
[-65.91500064000002, 18.281350369999984],
[-65.91296770999998, 18.281339800000012],
[-65.91253340999998, 18.281337529999973],
[-65.91229578999997, 18.281336280000005],
[-65.90998387000002, 18.281324219999988],
[-65.90871597, 18.281318759999976],
[-65.90216367, 18.28129032999999],
[-65.90111256, 18.281285760000003],
[-65.89913740999998, 18.28127711000002],
[-65.89885119000002, 18.28127286],
[-65.89237293000002, 18.281247450000023],
[-65.89048616000002, 18.281239140000025],
[-65.88711766, 18.28122424999998],
[-65.88599235999999, 18.281219249999992],
[-65.88291291000002, 18.28120555999999],
[-65.88291178999998, 18.28584490999998],
[-65.88291048999997, 18.291010749999998],
[-65.88290905000002, 18.29165870999998],
[-65.88291565999998, 18.302684020000015],
[-65.88291612, 18.303763930000002],
[-65.88291874999999, 18.31314200999998],
[-65.88292098, 18.314737100000002],
[-65.88292178, 18.316319510000028],
[-65.88292336, 18.320099939999977],
[-65.88292583999998, 18.325711160000026],
[-65.88292658, 18.32707603],
[-65.88292819999998, 18.330798640000012],
[-65.88292837, 18.331260059999977],
[-65.88087401000001, 18.331255440000007],
[-65.87894735999998, 18.331251090000023],
[-65.87603802000001, 18.33124448000001],
[-65.87461601000001, 18.33124122999999],
[-65.86804993999999, 18.331420340000022],
[-65.86763531000003, 18.331420009999988],
[-65.86672666999999, 18.33141931],
[-65.86648867999997, 18.331419100000005],
[-65.86635653000002, 18.331419170000004],
[-65.86273363999999, 18.331421009999985],
[-65.85793086000001, 18.331423389999998],
[-65.85789242999999, 18.33142171999998],
[-65.85542400000003, 18.331424019999986],
[-65.85350249999999, 18.331425749999994],
[-65.84982063000001, 18.33142908000002],
[-65.84969439000002, 18.331429189999994],
[-65.84969428, 18.331550279999988],
[-65.84969804000002, 18.33796344000001],
[-65.84969840999997, 18.338737999999978],
[-65.8497021, 18.345083629999976],
[-65.84970268000001, 18.346151969999994],
[-65.84970370000002, 18.34806388999999],
[-65.84281220000003, 18.348051429999998],
[-65.83631126, 18.348039400000005],
[-65.83572038, 18.348038309999993],
[-65.82972193, 18.348027020000018],
[-65.82915395999999, 18.348025940000014],
[-65.82799924, 18.34802375999999],
[-65.82479099, 18.34801637999999],
[-65.82399432, 18.34801453],
[-65.82321229000001, 18.348012719999986],
[-65.82141923, 18.348008540000023],
[-65.82131368, 18.34800831000001],
[-65.81955477000002, 18.348004189999983],
[-65.81593006999998, 18.347995690000005],
[-65.81524768000003, 18.347994099999994],
[-65.81430688, 18.347991850000028],
[-65.81409592, 18.34799134000002],
[-65.81219464999998, 18.347986839999976],
[-65.81037927, 18.347982520000016],
[-65.80875237999999, 18.347978650000016],
[-65.80848982999998, 18.34797801000002],
[-65.80829098999999, 18.347977609999987],
[-65.80772302000003, 18.347976930000016],
[-65.80733909999998, 18.34797567999999],
[-65.80353065000003, 18.347967859999983],
[-65.80071562, 18.347962040000027],
[-65.79902959999998, 18.34795853999998],
[-65.79798546, 18.34795637000002],
[-65.79009180999998, 18.347941110000022],
[-65.78932427000001, 18.347939639999993],
[-65.78840032, 18.347937820000027],
[-65.78753816, 18.347936129999994],
[-65.78601164000003, 18.347933119999993],
[-65.78038322999998, 18.347921919999976],
[-65.77934201, 18.347919479999973],
[-65.77871169000002, 18.347918520000007],
[-65.77776547000002, 18.347916520000012],
[-65.77676473999998, 18.347914670000023],
[-65.77662666999998, 18.347914370000012],
[-65.77532722000001, 18.347911739999972],
[-65.77499889, 18.347911039999985],
[-65.77385053, 18.347908700000005],
[-65.77354066999999, 18.34790806000001],
[-65.76955748, 18.347899840000025],
[-65.76888499, 18.347898439999994],
[-65.76835487, 18.347897349999982],
[-65.76683013000002, 18.34789416000001],
[-65.76222604999998, 18.347884490000013],
[-65.75909141, 18.347877840000024],
[-65.75869390000003, 18.347874339999976],
[-65.75078702000002, 18.34780397999998],
[-65.74961532999998, 18.347793539999998],
[-65.74804139999998, 18.347743690000016],
[-65.74783091, 18.347737010000003],
[-65.74728348000002, 18.347736259999976],
[-65.74297489999998, 18.347730169999977],
[-65.74044021999998, 18.347710549999988],
[-65.73974084000002, 18.347705140000016],
[-65.73561567000002, 18.34767314999999],
[-65.73484725999998, 18.347665380000024],
[-65.73302854000002, 18.347646950000012],
[-65.73294028999999, 18.347646069999996],
[-65.73293561999998, 18.346632310000018],
[-65.73292482, 18.344269059999988],
[-65.73292071999998, 18.343373789999987],
[-65.73291719000002, 18.34259155000001],
[-65.73290365999998, 18.339655180000022],
[-65.73291784000003, 18.337885169999993],
[-65.73292518, 18.334980180000002],
[-65.73292579000002, 18.334753429999978],
[-65.73293016000002, 18.33284838999998],
]
],
[
[
[-66.16262245000001, 18.051031109999997],
[-66.16184043999999, 18.049737929999992],
[-66.1619091, 18.04731941],
[-66.16514587, 18.04502678],
[-66.16511536000002, 18.044198989999984],
[-66.16511725999999, 18.043462750000003],
[-66.16511725999999, 18.043279649999988],
[-66.16594887000002, 18.04355812],
[-66.16832161000002, 18.041448590000016],
[-66.16813087000003, 18.040346150000005],
[-66.16640091, 18.04031180999999],
[-66.16698073999999, 18.03862952999998],
[-66.16720580999998, 18.037527080000018],
[-66.16765975999999, 18.033853529999988],
[-66.16861915999999, 18.034097669999994],
[-66.16942024000002, 18.033731460000013],
[-66.16954613000001, 18.03507804999998],
[-66.16970443999998, 18.036489490000008],
[-66.16989517000002, 18.037008290000017],
[-66.17005347999998, 18.038480760000027],
[-66.17072487000002, 18.03927802999999],
[-66.17091750999998, 18.039522169999998],
[-66.17117309999998, 18.039552689999994],
[-66.17162131999999, 18.039552689999994],
[-66.17216492, 18.039308549999987],
[-66.17245293000002, 18.039155960000016],
[-66.17293358, 18.039094920000025],
[-66.17320251000001, 18.039094920000025],
[-66.17344666000002, 18.039094920000025],
[-66.17376709000001, 18.03928185000001],
[-66.17305756000002, 18.042036059999987],
[-66.17280005999999, 18.04304695000002],
[-66.17234993, 18.044912339999996],
[-66.17170142999998, 18.050027849999992],
[-66.17182922, 18.050394059999974],
[-66.17035484000002, 18.051618580000024],
[-66.16718483, 18.05198096999999],
[-66.16692733999997, 18.051458360000026],
[-66.16661072, 18.050817489999986],
[-66.16660117999999, 18.050874710000016],
[-66.16659355000002, 18.05092811999998],
[-66.16641808000003, 18.052057269999978],
[-66.16641426000001, 18.052072529999975],
[-66.16576958000002, 18.05623436000002],
[-66.16262245000001, 18.051031109999997],
]
],
[
[
[-66.53508758999999, 18.392507550000005],
[-66.53519820999998, 18.391786579999973],
[-66.53970336999998, 18.392427440000006],
[-66.53828812, 18.397306440000023],
[-66.53822708000001, 18.39755821],
[-66.53777313, 18.398542399999997],
[-66.53761481999999, 18.400304790000007],
[-66.53463554000001, 18.40027046],
[-66.53440475000002, 18.399271010000007],
[-66.53497124, 18.39718819000001],
[-66.53505897999997, 18.396612170000026],
[-66.53450774999999, 18.395158770000023],
[-66.53466796999999, 18.394887919999974],
[-66.53466796999999, 18.39454841999998],
[-66.53477286999998, 18.394208909999975],
[-66.53480911000003, 18.393922809999992],
[-66.53482628, 18.39348030000002],
[-66.5349865, 18.393175129999975],
[-66.53508758999999, 18.392507550000005],
]
],
],
},
}
from shapely.geometry import shape
print(shape(d["geometry"]))
| 55.789855 | 61 | 0.50972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.010131 |
81f80e27b98817c9ba8d1f9fbc633ce2a51ca059 | 850 | py | Python | pre_commit_hooks/forbid_crlf.py | henryiii/pre-commit-hooks | 93b709d9ca27610518f5435fa9a51d98f7ada18d | [
"MIT"
]
| 62 | 2015-07-26T06:20:40.000Z | 2022-03-26T21:05:59.000Z | pre_commit_hooks/forbid_crlf.py | henryiii/pre-commit-hooks | 93b709d9ca27610518f5435fa9a51d98f7ada18d | [
"MIT"
]
| 25 | 2015-07-24T08:34:52.000Z | 2022-03-10T20:22:45.000Z | pre_commit_hooks/forbid_crlf.py | henryiii/pre-commit-hooks | 93b709d9ca27610518f5435fa9a51d98f7ada18d | [
"MIT"
]
| 30 | 2015-07-18T17:40:25.000Z | 2022-02-10T19:55:53.000Z | from __future__ import print_function
import argparse, sys
from .utils import is_textfile
def contains_crlf(filename):
with open(filename, mode='rb') as file_checked:
for line in file_checked.readlines():
if line.endswith(b'\r\n'):
return True
return False
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='filenames to check')
args = parser.parse_args(argv)
text_files = [f for f in args.filenames if is_textfile(f)]
files_with_crlf = [f for f in text_files if contains_crlf(f)]
return_code = 0
for file_with_crlf in files_with_crlf:
print('CRLF end-lines detected in file: {0}'.format(file_with_crlf))
return_code = 1
return return_code
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 32.692308 | 76 | 0.682353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.109412 |
81f8404fe9a704d334f1bcd998cf63672186b71f | 55 | py | Python | backend/code/start.py | socek/iep | 793e35ca5304eef7b7dacb5dd8d486622f497759 | [
"Apache-2.0"
]
| null | null | null | backend/code/start.py | socek/iep | 793e35ca5304eef7b7dacb5dd8d486622f497759 | [
"Apache-2.0"
]
| null | null | null | backend/code/start.py | socek/iep | 793e35ca5304eef7b7dacb5dd8d486622f497759 | [
"Apache-2.0"
]
| null | null | null | if __name__ == "__main__":
print("Nothing yet...")
| 18.333333 | 27 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.472727 |
81f8ba9bf744d6a257f9ade3faa2ce783ae335eb | 2,317 | py | Python | vantage6/server/model/organization.py | jaspersnel/vantage6-server | 88ad40d23cc36eaba57c170929f7ccdd0011720a | [
"Apache-2.0"
]
| 2 | 2020-10-19T08:59:08.000Z | 2022-03-07T10:30:21.000Z | vantage6/server/model/organization.py | jaspersnel/vantage6-server | 88ad40d23cc36eaba57c170929f7ccdd0011720a | [
"Apache-2.0"
]
| 67 | 2020-04-15T09:43:31.000Z | 2022-03-18T08:29:17.000Z | vantage6/server/model/organization.py | jaspersnel/vantage6-server | 88ad40d23cc36eaba57c170929f7ccdd0011720a | [
"Apache-2.0"
]
| 2 | 2021-01-21T15:09:26.000Z | 2021-04-19T14:58:10.000Z | import base64
from sqlalchemy import Column, String, LargeBinary
from sqlalchemy.orm import relationship
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.exc import NoResultFound
from vantage6.common.globals import STRING_ENCODING
from .base import Base, Database
class Organization(Base):
"""A legal entity.
An organization plays a central role in managing distributed tasks. Each
Organization contains a public key which other organizations can use to
send encrypted messages that only this organization can read.
"""
# fields
name = Column(String)
domain = Column(String)
address1 = Column(String)
address2 = Column(String)
zipcode = Column(String)
country = Column(String)
_public_key = Column(LargeBinary)
# relations
collaborations = relationship("Collaboration", secondary="Member",
back_populates="organizations")
results = relationship("Result", back_populates="organization")
nodes = relationship("Node", back_populates="organization")
users = relationship("User", back_populates="organization")
created_tasks = relationship("Task", back_populates="initiator")
roles = relationship("Role", back_populates="organization")
@classmethod
def get_by_name(cls, name):
session = Database().Session
try:
return session.query(cls).filter_by(name=name).first()
except NoResultFound:
return None
@hybrid_property
def public_key(self):
if self._public_key:
# TODO this should be fixed properly
try:
return base64.b64decode(self._public_key)\
.decode(STRING_ENCODING)
except Exception:
return ""
else:
return ""
@public_key.setter
def public_key(self, public_key_b64):
"""Assumes that the public key is in b64-encoded."""
self._public_key = base64.b64decode(
public_key_b64.encode(STRING_ENCODING)
)
def __repr__(self):
number_of_users = len(self.users)
return (
"<Organization "
f"name:{self.name}, "
f"domain:{self.domain}, "
f"users:{number_of_users}"
">"
)
| 30.893333 | 76 | 0.643073 | 2,026 | 0.874407 | 0 | 0 | 775 | 0.334484 | 0 | 0 | 585 | 0.252482 |
81f8d698a3ddfe36ef13f1113078ded3a3fb3cf5 | 865 | py | Python | checkov/terraform/checks/resource/aws/EKSSecretsEncryption.py | cclauss/checkov | 60a385fcaff1499cf00c2d0018575fe5ab71f556 | [
"Apache-2.0"
]
| 1 | 2021-01-26T12:46:32.000Z | 2021-01-26T12:46:32.000Z | checkov/terraform/checks/resource/aws/EKSSecretsEncryption.py | cclauss/checkov | 60a385fcaff1499cf00c2d0018575fe5ab71f556 | [
"Apache-2.0"
]
| 1 | 2021-06-02T02:53:31.000Z | 2021-06-02T02:53:31.000Z | checkov/terraform/checks/resource/aws/EKSSecretsEncryption.py | cclauss/checkov | 60a385fcaff1499cf00c2d0018575fe5ab71f556 | [
"Apache-2.0"
]
| null | null | null | from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class EKSSecretsEncryption(BaseResourceCheck):
def __init__(self):
name = "Ensure EKS Cluster has Secrets Encryption Enabled"
id = "CKV_AWS_58"
supported_resources = ['aws_eks_cluster']
categories = [CheckCategories.KUBERNETES]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
if "encryption_config" in conf.keys() and "resources" in conf["encryption_config"][0] and \
"secrets" in conf["encryption_config"][0]["resources"][0]:
return CheckResult.PASSED
else:
return CheckResult.FAILED
check = EKSSecretsEncryption()
| 39.318182 | 106 | 0.713295 | 676 | 0.781503 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.19422 |
81f9249901387ee78233d110ebdfbf78c9c6a8d9 | 46,176 | py | Python | analysis/models/nodes/analysis_node.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
]
| 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | analysis/models/nodes/analysis_node.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
]
| 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | analysis/models/nodes/analysis_node.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
]
| null | null | null | """ AnalysisNode is the base class that all analysis nodes inherit from. """
import logging
import operator
from functools import reduce
from random import random
from time import time
from typing import Tuple, Sequence, List, Dict, Optional
from celery.canvas import Signature
from django.conf import settings
from django.core.cache import cache
from django.db import connection, models
from django.db.models import Value, IntegerField
from django.db.models.aggregates import Count
from django.db.models.deletion import CASCADE, SET_NULL
from django.db.models.query_utils import Q
from django.dispatch import receiver
from django.utils import timezone
from django_dag.models import node_factory, edge_factory
from django_extensions.db.models import TimeStampedModel
from lazy import lazy
from model_utils.managers import InheritanceManager
from analysis.exceptions import NonFatalNodeError, NodeParentErrorsException, NodeConfigurationException, \
NodeParentNotReadyException, NodeNotFoundException, NodeOutOfDateException
from analysis.models.enums import GroupOperation, NodeStatus, NodeColors, NodeErrorSource, AnalysisTemplateType
from analysis.models.models_analysis import Analysis
from analysis.models.nodes.node_counts import get_extra_filters_q, get_node_counts_and_labels_dict
from annotation.annotation_version_querysets import get_variant_queryset_for_annotation_version
from classification.models import Classification, post_delete
from library.database_utils import queryset_to_sql
from library.django_utils import thread_safe_unique_together_get_or_create
from library.log_utils import report_event
from library.utils import format_percent
from snpdb.models import BuiltInFilters, Sample, Variant, VCFFilter, Wiki, Cohort, VariantCollection, \
ProcessingStatus, GenomeBuild, AlleleSource
from snpdb.variant_collection import write_sql_to_variant_collection
from variantgrid.celery import app
def _default_position():
return 10 + random() * 50
class AnalysisNode(node_factory('AnalysisEdge', base_model=TimeStampedModel)):
model = Variant
objects = InheritanceManager()
analysis = models.ForeignKey(Analysis, on_delete=CASCADE)
name = models.TextField(blank=True)
x = models.IntegerField(default=_default_position)
y = models.IntegerField(default=_default_position)
version = models.IntegerField(default=0) # Queryset version
appearance_version = models.IntegerField(default=0)
auto_node_name = models.BooleanField(default=True)
output_node = models.BooleanField(default=False)
hide_node_and_descendants_upon_template_configuration_error = models.BooleanField(default=False)
ready = models.BooleanField(default=True)
valid = models.BooleanField(default=False)
visible = models.BooleanField(default=True)
count = models.IntegerField(null=True, default=None)
errors = models.TextField(null=True)
shadow_color = models.TextField(null=True)
load_seconds = models.FloatField(null=True)
parents_should_cache = models.BooleanField(default=False) # Node suggests parents use a cache
# This is set to node/version you cloned - cleared upon modification
cloned_from = models.ForeignKey('NodeVersion', null=True, on_delete=SET_NULL)
status = models.CharField(max_length=1, choices=NodeStatus.choices, default=NodeStatus.DIRTY)
PARENT_CAP_NOT_SET = -1
min_inputs = 1
max_inputs = 1
uses_parent_queryset = True
disabled = False
UPDATE_TASK = "analysis.tasks.node_update_tasks.update_node_task"
NODE_CACHE_TASK = "analysis.tasks.node_update_tasks.node_cache_task"
WAIT_FOR_CACHE_TASK = "analysis.tasks.node_update_tasks.wait_for_cache_task"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.appearance_dirty = False
self.ancestor_input_samples_changed = False
self.parents_changed = False
self.queryset_dirty = False
self.update_children = True
def get_subclass(self):
""" Returns the node loaded as a subclass """
return AnalysisNode.objects.get_subclass(pk=self.pk)
def check_still_valid(self):
""" Checks that the node is still there and has the version we expect - or throw exception """
version_qs = AnalysisNode.objects.filter(pk=self.pk).values_list("version", flat=True)
if version_qs:
db_version = version_qs[0]
if db_version > self.version:
raise NodeOutOfDateException()
else:
raise NodeNotFoundException(self.pk)
def _get_cohorts_and_sample_visibility_for_node(self) -> Tuple[Sequence[Cohort], Dict]:
""" Visibility = can see on grid """
return [], {}
@staticmethod
def _get_visible_samples_from_cohort(cohorts, visibility):
samples = set()
for c in cohorts:
for s in c.get_samples():
if visibility.get(s):
samples.add(s)
return sorted(samples)
def _get_model_queryset(self):
self.analysis.check_valid()
return get_variant_queryset_for_annotation_version(self.analysis.annotation_version)
def get_cohorts_and_sample_visibility(self, sort=True) -> Tuple[Sequence[Cohort], Dict]:
""" Returns all node + ancestor cohorts (and visibilities of their samples)
The underlying data for all samples/cohorts/sub-cohorts/trios/pedigrees is Cohorts, so need to know which
to retrieve from DB (and what sample info to extract from packed columns) to filter + show on grid """
cohorts, visibility = self._get_cohorts_and_sample_visibility_for_node()
cohorts = set(cohorts)
if self.has_input():
parents, _ = self.get_parent_subclasses_and_errors()
for parent in parents:
c, v = parent.get_cohorts_and_sample_visibility(sort=False)
cohorts.update(c)
visibility.update(v)
# May have sub-cohorts, so get unique base cohorts
cohorts = {c.get_base_cohort() for c in cohorts}
if sort:
cohorts = sorted(cohorts)
return cohorts, visibility
def get_sample_ids(self) -> List[Sample]:
return [s.pk for s in self.get_samples()]
def get_samples_from_node_only_not_ancestors(self):
cohorts, visibility = self._get_cohorts_and_sample_visibility_for_node()
return self._get_visible_samples_from_cohort(cohorts, visibility)
def _get_proband_sample_for_node(self) -> Optional[Sample]:
""" Sample of the object of a study, if known """
return None
def get_proband_sample(self) -> Optional[Sample]:
""" Sample of the object of a study if known """
proband_samples = set()
if proband_sample := self._get_proband_sample_for_node():
proband_samples.add(proband_sample)
if self.has_input():
parents, _ = self.get_parent_subclasses_and_errors()
for parent in parents:
if parent_proband_sample := parent.get_proband_sample():
proband_samples.add(parent_proband_sample)
proband_sample = None
if len(proband_samples) == 1: # If ambiguous, then just give up
proband_sample = proband_samples.pop()
return proband_sample
def get_samples(self) -> List[Sample]:
""" Return all ancestor samples for a node"""
cohorts, visibility = self.get_cohorts_and_sample_visibility(sort=False)
return self._get_visible_samples_from_cohort(cohorts, visibility)
def get_bams_dict(self):
bams_dict = {}
for sample in self.get_samples():
if sample.bam_file_path:
bams_dict[sample.pk] = sample.bam_file_path
return bams_dict
def get_connection_data(self, parent):
""" Return dict of source_id/target_id for sending as JSON """
return {"source_id": parent.get_css_id(),
"target_id": self.get_css_id()}
def get_rendering_args(self):
return {}
def get_css_id(self):
if self.pk:
css_id = f"analysis-node-{self.pk}"
else:
css_id = None
return css_id
def get_update_task(self):
return Signature(self.UPDATE_TASK, args=(self.pk, self.version), immutable=True)
def get_cache_task_args_objs_set(self, force_cache=False):
""" returns Celery tasks which are called in node_utils.get_analysis_update_task before children are loaded
Uses tasks not signatures so they are hashable in a set to be able to remove dupes """
task_args_objs_set = set()
if self.is_valid() and (force_cache or self.use_cache):
if parent := self.get_unmodified_single_parent_node():
return parent.get_cache_task_args_objs_set(force_cache=force_cache)
node_cache, created = NodeCache.get_or_create_for_node(self)
if created:
task_args_objs_set.add((self.NODE_CACHE_TASK, (self.pk, self.version), node_cache))
else:
# Cache has been launched already, we just need to make sure it's ready, so launch a task
# waiting on it, to be used as a dependency
task_args_objs_set.add((self.WAIT_FOR_CACHE_TASK, (node_cache.pk, ), node_cache))
return task_args_objs_set
def get_parent_subclasses_and_errors(self):
qs = AnalysisNode.objects.filter(children=self.id, children__isnull=False)
parents = list(qs.select_subclasses())
num_parents = len(parents)
errors = []
if self.min_inputs != AnalysisNode.PARENT_CAP_NOT_SET and num_parents < self.min_inputs:
errors.append((NodeErrorSource.CONFIGURATION, f"{num_parents} parents < minimum of {self.min_inputs}"))
elif self.max_inputs != AnalysisNode.PARENT_CAP_NOT_SET and num_parents > self.max_inputs:
errors.append((NodeErrorSource.CONFIGURATION, f"{num_parents} parents > maximum of {self.max_inputs}"))
for parent in parents:
if NodeStatus.is_error(parent.status):
errors.append((NodeErrorSource.PARENT, "Parent has errors"))
break
return parents, errors
def get_parent_subclasses(self):
""" Gets parents, throws an Exception if any errors """
parents, errors = self.get_parent_subclasses_and_errors()
if errors:
AnalysisNode.throw_errors_exception(errors)
return parents
def get_non_empty_parents(self, require_parents_ready=True):
""" Returns non-empty (count > 0) parents.
If require_parents_ready=True, die if parents not ready
Otherwise, return them as we don't know if they're empty or not """
non_empty_parents = []
for p in self.get_parent_subclasses():
if p.is_ready():
if p.count == 0:
continue
elif require_parents_ready:
raise NodeParentNotReadyException(f"Parent {p} is not ready!")
non_empty_parents.append(p)
return non_empty_parents
def get_single_parent(self):
if self.min_inputs != 1:
msg = "get_single_parent() should only be called for single parent nodes"
raise ValueError(msg)
parents, errors = self.get_parent_subclasses_and_errors()
if errors:
errors = AnalysisNode.flatten_errors(errors)
msg = "Parent had errors: " + ', '.join(errors)
raise NonFatalNodeError(msg)
num_parents = len(parents)
if num_parents != 1:
msg = f"get_single_parent() called for node with {num_parents} parents"
raise ValueError(msg)
return parents[0]
def get_single_parent_q(self):
parent = self.get_single_parent()
if parent.is_ready():
if parent.count == 0:
q = self.q_none()
else:
q = parent.get_q()
else:
# This should never happen...
raise ValueError("get_single_parent_q called when single parent not ready!!!")
return q
def _get_annotation_kwargs_for_node(self) -> Dict:
""" Override this method per-node.
Any key/values in here MUST be consistent - as annotation_kwargs from multiple
nodes may be combined in the MergeNode
"""
annotation_kwargs = {}
if self.node_cache:
annotation_kwargs.update(self.node_cache.variant_collection.get_annotation_kwargs())
return annotation_kwargs
def get_annotation_kwargs(self) -> Dict:
""" Passed to Variant QuerySet annotate()
Can be used w/FilteredRelation to force a join to a partition, in which case you need to use
the alias given in annotate. @see https://github.com/SACGF/variantgrid/wiki/Data-Partitioning """
a_kwargs = {}
# Only apply parent annotation kwargs if you actually use their queryset
if self.has_input() and self.uses_parent_queryset:
for parent in self.get_non_empty_parents():
a_kwargs.update(parent.get_annotation_kwargs())
a_kwargs.update(self._get_annotation_kwargs_for_node())
return a_kwargs
@property
def queryset_requires_distinct(self):
if self._queryset_requires_distinct():
return True
if self.has_input() and self.uses_parent_queryset:
for parent in self.get_non_empty_parents():
if parent.queryset_requires_distinct:
return True
return False
def _queryset_requires_distinct(self):
""" Override if you need this - don't do by default as it's slow """
return False
@staticmethod
def q_all():
return Q(pk__isnull=False)
@staticmethod
def q_none():
return ~AnalysisNode.q_all()
def _get_cache_key(self) -> str:
nv = NodeVersion.get(self)
return str(nv.pk)
def get_q(self, disable_cache=False):
""" A Django Q object representing the Variant filters for this node.
This is the method to override in subclasses - not get_queryset() as:
Chains of filters to a reverse foreign key relationship causes
Multiple joins, so use Q objects which are combined at the end
qs = qs.filter(table_1__val=1)
qs = qs.filter(table_2__val=2)
This is not necessarily equal to:
qs.filter(table_1__val=1, table_2__val=2)
@see https://docs.djangoproject.com/en/2/topics/db/queries/#spanning-multi-valued-relationships
"""
# We need this for node counts, and doing a grid query (each page) - and it can take a few secs to generate
# for some nodes (Comp HET / pheno) so cache it
cache_key = self._get_cache_key() + f"q_cache={disable_cache}"
q: Optional[Q] = None
if settings.ANALYSIS_NODE_CACHE_Q: # Disable for unit tests
q = cache.get(cache_key)
if q is None:
if disable_cache is False:
if cache_q := self._get_node_cache_q():
return cache_q
if self.has_input():
q = self.get_parent_q()
if self.modifies_parents():
if node_q := self._get_node_q():
q &= node_q
else:
q = self.q_all()
if node_q := self._get_node_q():
q = node_q
cache.set(cache_key, q)
return q
def get_parent_q(self):
if self.min_inputs == 1:
return self.get_single_parent_q()
raise NotImplementedError("You need to implement a non-default 'get_parent_q' if you have more than 1 parent")
@property
def use_cache(self):
""" At the moment we only cache when a child requests it """
return AnalysisEdge.objects.filter(parent=self, child__parents_should_cache=True).exists()
def write_cache(self, variant_collection: VariantCollection):
qs = self.get_queryset(disable_cache=True)
qs = qs.annotate(variant_collection_id=Value(variant_collection.pk, output_field=IntegerField()))
sql = queryset_to_sql(qs.values_list('pk', 'variant_collection_id'))
write_sql_to_variant_collection(variant_collection, sql)
@lazy
def node_version(self):
return NodeVersion.get(self)
@lazy
def node_cache(self) -> Optional['NodeCache']:
if parent := self.get_unmodified_single_parent_node():
return parent.node_cache
return NodeCache.objects.filter(node_version=self.node_version,
variant_collection__status=ProcessingStatus.SUCCESS).first()
def _get_node_cache_q(self) -> Optional[Q]:
q = None
if self.node_cache:
q = self.node_cache.variant_collection.get_q()
return q
def _get_node_q(self) -> Optional[Q]:
raise NotImplementedError()
def _get_unfiltered_queryset(self, **extra_annotation_kwargs):
""" Unfiltered means before the get_q() is applied
extra_annotation_kwargs is applied AFTER node's annotation kwargs
"""
qs = self._get_model_queryset()
a_kwargs = self.get_annotation_kwargs()
a_kwargs.update(extra_annotation_kwargs)
if a_kwargs:
# Clear ordering, @see
# https://docs.djangoproject.com/en/3.0/topics/db/aggregation/#interaction-with-default-ordering-or-order-by
qs = qs.annotate(**a_kwargs).order_by()
return qs
def get_queryset(self, extra_filters_q=None, extra_annotation_kwargs=None,
inner_query_distinct=False, disable_cache=False):
if extra_annotation_kwargs is None:
extra_annotation_kwargs = {}
qs = self._get_unfiltered_queryset(**extra_annotation_kwargs)
q = self.get_q(disable_cache=disable_cache)
if extra_filters_q:
q &= extra_filters_q
filtered_qs = qs.filter(q)
if self.queryset_requires_distinct:
if inner_query_distinct:
qs = qs.filter(pk__in=filtered_qs.values_list("pk", flat=True))
else:
qs = filtered_qs.distinct()
else:
qs = filtered_qs
return qs
def get_extra_grid_config(self):
return {}
def get_class_name(self):
return self.__class__.__name__
def get_identifier(self):
return f"{self.get_class_name()}-{self.pk}"
def get_css_classes(self):
""" returns list of css classes - set on "node > .node-overlay" on node appearance update """
css_classes = []
if self.output_node:
css_classes.append("output-node")
if self.analysis.template_type == AnalysisTemplateType.TEMPLATE and self.analysisvariable_set.exists():
css_classes.append("variable-node")
return css_classes
def get_input_count(self):
parents = self.get_non_empty_parents()
return sum([p.get_output_count() for p in parents])
def get_output_count(self):
# TODO: Move the if not modify parents code in here.
if self.count is not None:
return self.count
count = self.get_queryset().count()
self.count = count
self.save()
return count
def _get_method_summary(self):
raise NotImplementedError()
def get_method_summary(self):
errors = self.get_errors(flat=True)
if not errors:
html_summary = self._get_method_summary()
else:
html_summary = "<b>incorrectly configured</b><ul>"
for error in errors:
html_summary += f"<li>{error}</li>"
html_summary += "</ul>"
return html_summary
def get_node_name(self):
""" Automatic node name """
raise NotImplementedError(f"Node Class: {self.get_class_name()}")
@staticmethod
def get_help_text() -> str:
raise NotImplementedError()
@staticmethod
def get_node_class_label():
""" Used in create node dropdown """
raise NotImplementedError()
def _get_genome_build_errors(self, field_name, field_genome_build: GenomeBuild) -> List:
""" Used to quickly add errors about genome build mismatches
This only happens in templates (ran template on sample with different build than hardcoded data)
In normal analyses, autocomplete restrictions should not allow you to configure data from other builds """
errors = []
if field_genome_build != self.analysis.genome_build:
msg = f"{field_name} genome build: {field_genome_build} different from analysis build: {self.analysis.genome_build}"
errors.append(msg)
return errors
def _get_configuration_errors(self) -> List:
return []
def get_parents_and_errors(self):
""" Returns error array, includes any min/max parent error and node config error """
if self.has_input():
return self.get_parent_subclasses_and_errors()
return [], []
def get_errors(self, include_parent_errors=True, flat=False):
""" returns a tuple of (NodeError, str) unless flat=True where it's only string """
errors = []
for analysis_error in self.analysis.get_errors():
errors.append((NodeErrorSource.ANALYSIS, analysis_error))
_, parent_errors = self.get_parents_and_errors()
if include_parent_errors:
errors.extend(parent_errors)
if self.errors:
errors.append((NodeErrorSource.INTERNAL_ERROR, self.errors))
errors.extend((NodeErrorSource.CONFIGURATION, ce) for ce in self._get_configuration_errors())
if flat:
errors = AnalysisNode.flatten_errors(errors)
return errors
@staticmethod
def flatten_errors(errors):
return [f"{NodeErrorSource(nes).label}: {error}" for nes, error in errors]
@staticmethod
def get_status_from_errors(errors):
ERROR_STATUS = {
NodeErrorSource.INTERNAL_ERROR: NodeStatus.ERROR,
NodeErrorSource.ANALYSIS: NodeStatus.ERROR_WITH_PARENT,
NodeErrorSource.PARENT: NodeStatus.ERROR_WITH_PARENT,
NodeErrorSource.CONFIGURATION: NodeStatus.ERROR_CONFIGURATION,
}
if not errors:
raise ValueError("Passed in empty errors!")
error_sources = {s for s, _ in errors}
for source, status in ERROR_STATUS.items():
if source in error_sources:
return status
raise ValueError("No error source found")
@staticmethod
def throw_errors_exception(errors):
ERROR_EXCEPTIONS = {
NodeErrorSource.INTERNAL_ERROR: ValueError,
NodeErrorSource.ANALYSIS: NonFatalNodeError,
NodeErrorSource.PARENT: NodeParentErrorsException,
NodeErrorSource.CONFIGURATION: NodeConfigurationException,
}
if not errors:
raise ValueError("Passed in empty errors!")
error_sources = {s for s, _ in errors}
for source, exception_klass in ERROR_EXCEPTIONS.items():
if source in error_sources:
raise exception_klass()
raise ValueError("No error source found")
def inherits_parent_columns(self):
return self.min_inputs == 1 and self.max_inputs == 1
def _get_node_extra_columns(self):
return []
def _get_inherited_columns(self):
extra_columns = []
if self.inherits_parent_columns():
parent = self.get_single_parent()
extra_columns.extend(parent.get_extra_columns())
return extra_columns
def get_extra_columns(self):
cache_key = self._get_cache_key() + "_extra_columns"
extra_columns = cache.get(cache_key)
if extra_columns is None:
extra_columns = []
if self.is_valid():
extra_columns.extend(self._get_inherited_columns())
# Only add columns that are unique, as otherwise filters get added twice.
node_extra_columns = self._get_node_extra_columns()
for col in node_extra_columns:
if col not in extra_columns:
extra_columns.append(col)
cache.set(cache_key, extra_columns)
return extra_columns
def _get_node_extra_colmodel_overrides(self):
""" Subclasses should override to add colmodel overrides for JQGrid """
return {}
def _get_inherited_colmodel_overrides(self):
extra_overrides = {}
if self.inherits_parent_columns():
parent = self.get_single_parent()
extra_overrides.update(parent.get_extra_colmodel_overrides())
return extra_overrides
def get_extra_colmodel_overrides(self):
""" For JQGrid - subclasses should override _get_node_extra_colmodel_overrides """
extra_overrides = {}
if self.is_valid() and self.uses_parent_queryset:
extra_overrides.update(self._get_inherited_colmodel_overrides())
extra_overrides.update(self._get_node_extra_colmodel_overrides())
return extra_overrides
def get_node_classification(self):
if self.is_source():
classification = "source"
else:
classification = "filter"
return classification
def has_input(self):
return self.max_inputs != 0
def is_source(self):
return self.has_input() is False
def is_valid(self):
return not self.get_errors()
def is_ready(self):
return NodeStatus.is_ready(self.status)
def bump_version(self):
if self.version > 0:
DELETE_CACHE_TASK = "analysis.tasks.node_update_tasks.delete_old_node_versions"
app.send_task(DELETE_CACHE_TASK, args=(self.pk, self.version))
self.version += 1
self.status = NodeStatus.DIRTY
self.count = None
self.errors = None
self.cloned_from = None
def modifies_parents(self):
""" Can overwrite and set to False to use parent counts """
return True
def get_unmodified_single_parent_node(self) -> Optional['AnalysisNode']:
""" If a node doesn't modify single parent - can use that in some places to re-use cache """
if self.is_valid() and self.has_input() and not self.modifies_parents():
try:
return self.get_single_parent()
except ValueError:
pass
return None
def _get_cached_label_count(self, label) -> Optional[int]:
""" Override for optimisation.
Returning None means we need to run the SQL to get the count """
try:
if self.cloned_from:
# If cloned (and we or original haven't changed) - use those counts
try:
node_count = NodeCount.load_for_node_version(self.cloned_from, label)
return node_count.count
except NodeCount.DoesNotExist:
# Should only ever happen if original bumped version since we were loaded
# otherwise should have cascade set cloned_from to NULL
pass
if self.has_input():
parent_non_zero_label_counts = []
for parent in self.get_non_empty_parents():
if parent.count != 0: # count=0 has 0 for all labels
parent_node_count = NodeCount.load_for_node(parent, label)
if parent_node_count.count != 0:
parent_non_zero_label_counts.append(parent_node_count.count)
if not parent_non_zero_label_counts:
# logging.info("all parents had 0 %s counts", label)
return 0
if not self.modifies_parents():
if len(parent_non_zero_label_counts) == 1:
# logging.info("Single parent, no modification, using that")
return parent_non_zero_label_counts[0]
except NodeCount.DoesNotExist:
pass
except Exception as e:
logging.warning("Trouble getting cached %s count: %s", label, e)
return None
def get_grid_node_id_and_version(self):
""" Uses parent node_id/version if possible to re-use cache """
node_id = self.pk
version = self.version
if self.cloned_from:
node_id = self.cloned_from.node_id
version = self.cloned_from.version
if parent := self.get_unmodified_single_parent_node():
node_id, version = parent.get_grid_node_id_and_version()
return node_id, version
def node_counts(self):
""" This is inside Celery task """
self.count = None
counts_to_get = {BuiltInFilters.TOTAL}
counts_to_get.update([i[0] for i in self.analysis.get_node_count_types()])
label_counts = {}
for label in counts_to_get:
label_count = self._get_cached_label_count(label)
if label_count is not None:
label_counts[label] = label_count
counts_to_get -= set(label_counts)
logging.debug("%s cached counts: %s", self, label_counts)
if counts_to_get:
logging.debug("%s needs DB request for %s", self, counts_to_get)
retrieved_label_counts = get_node_counts_and_labels_dict(self)
label_counts.update(retrieved_label_counts)
node_version = NodeVersion.get(self)
for label, count in label_counts.items():
NodeCount.objects.create(node_version=node_version, label=label, count=count)
return NodeStatus.READY, label_counts[BuiltInFilters.TOTAL]
def _load(self):
""" Override to do anything interesting """
pass
def load(self):
""" load is called after parents are run """
# logging.debug("node %d (%d) load()", self.id, self.version)
start = time()
self._load() # Do before counts in case it affects anything
status, count = self.node_counts()
load_seconds = time() - start
self.update(status=status, count=count, load_seconds=load_seconds)
def add_parent(self, parent, *args, **kwargs):
if not parent.visible:
raise NonFatalNodeError("Not connecting children to invisible nodes!")
existing_connect = parent.children.through.objects.filter(parent=parent, child=self)
if not existing_connect.exists():
super().add_parent(parent)
self.parents_changed = True
else:
logging.error("Node(pk=%d).add_parent(pk=%d) already exists!", self.pk, parent.pk)
def remove_parent(self, parent):
""" disconnects parent by deleting edge """
# Ok to have multiple, just delete first
edge = parent.children.through.objects.filter(parent=parent, child=self).first()
if edge: # could be some kind of race condition?
edge.delete()
self.parents_changed = True
def handle_ancestor_input_samples_changed(self):
pass
def update(self, **kwargs):
""" Updates Node if self.version matches DB - otherwise throws NodeOutOfDateException """
self_qs = AnalysisNode.objects.filter(pk=self.pk, version=self.version)
updated = self_qs.update(**kwargs)
if not updated:
raise NodeOutOfDateException()
def save(self, **kwargs):
""" To avoid race conditions, don't use save() in a celery task (unless running in scheduling_single_worker)
instead use update() method above """
# logging.debug("save: pk=%s kwargs=%s", self.pk, str(kwargs))
super_save = super().save
if self.parents_changed or self.ancestor_input_samples_changed:
self.handle_ancestor_input_samples_changed()
if self.auto_node_name:
self.name = self.get_node_name()
# TODO: This causes lots of DB queries... should we change this?
self.valid = self.is_valid()
if not self.valid:
self.shadow_color = NodeColors.ERROR
self.appearance_dirty = True
elif self.shadow_color == NodeColors.ERROR: # Need to allow nodes to set to warning
self.shadow_color = NodeColors.VALID
self.appearance_dirty = True
if self.appearance_dirty:
self.appearance_version += 1
if self.parents_changed or self.queryset_dirty:
self.bump_version()
super_save(**kwargs)
if self.update_children:
# We also need to bump if node has it's own sample - as in templates, we set fields in toposort order
# So we could go from having multiple proband samples to only one later (thus can set descendants)
for kid in self.children.select_subclasses():
kid.ancestor_input_samples_changed = self.is_source() or self.ancestor_input_samples_changed or \
self.get_samples_from_node_only_not_ancestors()
kid.appearance_dirty = False
kid.queryset_dirty = True
kid.save() # Will bump versions
else:
super_save(**kwargs)
# Make sure this always exists
NodeVersion.objects.get_or_create(node=self, version=self.version)
# Modify our analyses last updated time
Analysis.objects.filter(pk=self.analysis.pk).update(modified=timezone.now())
def set_node_task_and_status(self, celery_task, status):
cursor = connection.cursor()
db_pid = cursor.db.connection.get_backend_pid()
self.update(status=status)
NodeTask.objects.filter(node=self, version=self.version).update(celery_task=celery_task, db_pid=db_pid)
def adjust_cloned_parents(self, old_new_map):
""" If you need to do something with old/new parents """
pass
def save_clone(self):
node_id = self.pk
try:
# Have sometimes had race condition where we try to clone a node that has been updated
# In that case we'll just miss out on the cache
original_node_version = NodeVersion.get(self)
except NodeVersion.DoesNotExist:
original_node_version = None
copy = self
# Have to set both id/pk to None when using model inheritance
copy.id = None
copy.pk = None
copy.version = 1 # 0 is for those being constructed in analysis templates
# Store cloned_from so we can use original's NodeCounts
copy.cloned_from = original_node_version
copy.save()
for npf in NodeVCFFilter.objects.filter(node_id=node_id):
npf.pk = None
npf.node = copy
npf.save()
naff = NodeAlleleFrequencyFilter.objects.filter(node_id=node_id).first() # 1-to-1
if naff:
af_frequency_ranges = list(naff.nodeallelefrequencyrange_set.all().values_list("min", "max"))
# Use existing if already created for node (eg AlleleFrequencyNode always makes one)
copy_naff, created = NodeAlleleFrequencyFilter.objects.get_or_create(node=copy)
if not created:
# Wipe out defaults to clear way for clone
copy_naff.nodeallelefrequencyrange_set.all().delete()
copy_naff.group_operation = naff.group_operation
copy_naff.save()
for min_value, max_value in af_frequency_ranges:
copy_naff.nodeallelefrequencyrange_set.create(min=min_value, max=max_value)
return copy
def __str__(self):
return self.name
@classmethod
def depth_first(cls, node):
parents = node.get_parent_subclasses()
l = []
for p in parents:
l.extend(cls.depth_first(p))
l.append(node)
return l
class AnalysisEdge(edge_factory(AnalysisNode, concrete=False)):
pass
class NodeTask(TimeStampedModel):
""" Used to track/lock celery update tasks for nodes (uses DB constraints to ensure 1 per node/version) """
node = models.ForeignKey(AnalysisNode, on_delete=CASCADE)
version = models.IntegerField(null=False)
analysis_update_uuid = models.UUIDField()
celery_task = models.CharField(max_length=36, null=True)
db_pid = models.IntegerField(null=True)
class Meta:
unique_together = ("node", "version")
def __str__(self):
return f"NodeTask: {self.analysis_update_uuid} - {self.node.pk}/{self.version}"
class NodeWiki(Wiki):
node = models.OneToOneField(AnalysisNode, on_delete=CASCADE)
def _get_restricted_object(self):
return self.node.analysis
class AnalysisNodeAlleleSource(AlleleSource):
""" Used to link a nodes variants to alleleles and liftover to other builds """
node = models.ForeignKey(AnalysisNode, null=True, on_delete=SET_NULL)
def get_genome_build(self):
if self.node:
genome_build = self.node.analysis.genome_build
else:
genome_build = None
return genome_build
def get_variant_qs(self):
if self.node:
qs = self.node.get_subclass().get_queryset()
else:
qs = Variant.objects.none()
return qs
def liftover_complete(self, genome_build: GenomeBuild):
report_event('Completed AnalysisNode liftover',
extra_data={'node_id': self.node_id, 'allele_count': self.get_allele_qs().count()})
class NodeVersion(models.Model):
""" This will be deleted once a node updates, so make all version specific caches cascade delete from this """
node = models.ForeignKey(AnalysisNode, on_delete=CASCADE)
version = models.IntegerField(null=False)
class Meta:
unique_together = ("node", "version")
@staticmethod
def get(node: AnalysisNode):
try:
return NodeVersion.objects.get(node=node, version=node.version)
except NodeVersion.DoesNotExist:
node.check_still_valid()
raise
def __str__(self):
return f"{self.node.pk} (v{self.version})"
class NodeCache(models.Model):
node_version = models.OneToOneField(NodeVersion, on_delete=CASCADE)
variant_collection = models.OneToOneField(VariantCollection, on_delete=CASCADE)
@staticmethod
def get_or_create_for_node(node: AnalysisNode) -> Tuple['NodeCache', bool]:
variant_collection = VariantCollection.objects.create(name=f"NodeCache {node.node_version}")
defaults = {"variant_collection": variant_collection}
node_cache, created = thread_safe_unique_together_get_or_create(NodeCache, node_version=node.node_version,
defaults=defaults)
if not created:
variant_collection.delete()
return node_cache, created
def __str__(self):
return f"NodeCache {self.node_version}: {self.variant_collection.get_status_display()}"
@receiver(post_delete, sender=NodeCache)
def post_delete_node_cache(sender, instance, **kwargs): # pylint: disable=unused-argument
""" This can sometimes be called multiple times - if node updated again before previous updates
delete_old_node_versions is finished """
try:
if instance.variant_collection:
instance.variant_collection.delete_related_objects()
instance.variant_collection.delete()
except VariantCollection.DoesNotExist:
# Deleted already
pass
class NodeCount(models.Model):
node_version = models.ForeignKey(NodeVersion, on_delete=CASCADE)
label = models.CharField(max_length=100)
count = models.IntegerField(null=False)
class Meta:
unique_together = ("node_version", "label")
@staticmethod
def load_for_node_version(node_version: NodeVersion, label: str) -> 'NodeCount':
return NodeCount.objects.get(node_version=node_version, label=label)
@staticmethod
def load_for_node(node: AnalysisNode, label: str) -> 'NodeCount':
return NodeCount.load_for_node_version(NodeVersion.get(node), label=label)
def __str__(self):
return f"NodeCount({self.node_version}, {self.label}) = {self.count}"
class NodeColumnSummaryCacheCollection(models.Model):
node_version = models.ForeignKey(NodeVersion, on_delete=CASCADE)
variant_column = models.TextField(null=False)
extra_filters = models.TextField(null=False)
@staticmethod
def get_counts_for_node(node, variant_column, extra_filters):
node_version = NodeVersion.get(node)
ncscc, created = NodeColumnSummaryCacheCollection.objects.get_or_create(node_version=node_version,
variant_column=variant_column,
extra_filters=extra_filters)
if created:
extra_filters_q = get_extra_filters_q(node.analysis.user, node.analysis.genome_build, extra_filters)
queryset = node.get_queryset(extra_filters_q)
count_qs = queryset.values_list(variant_column).distinct().annotate(Count('id'))
data_list = []
for value, count in count_qs:
data = NodeColumnSummaryData(collection=ncscc,
value=value,
count=count)
data_list.append(data)
if data_list:
NodeColumnSummaryData.objects.bulk_create(data_list)
else:
data_list = ncscc.nodecolumnsummarydata_set.all()
counts = {}
for ncsd in data_list:
counts[ncsd.value] = ncsd.count
return counts
class NodeColumnSummaryData(models.Model):
collection = models.ForeignKey(NodeColumnSummaryCacheCollection, on_delete=CASCADE)
value = models.TextField(null=True)
count = models.IntegerField(null=False)
class NodeVCFFilter(models.Model):
""" If these exist, they mean use that filter """
node = models.ForeignKey(AnalysisNode, on_delete=CASCADE)
vcf_filter = models.ForeignKey(VCFFilter, on_delete=CASCADE, null=True) # null = 'PASS'
@staticmethod
def filter_for_node(node, vcf):
""" returns vfc but also where vcf_filter is NULL (for pass) """
q_vcf_filter = Q(vcf_filter__isnull=True) | Q(vcf_filter__vcf=vcf)
return NodeVCFFilter.objects.filter(q_vcf_filter, node=node)
class NodeAlleleFrequencyFilter(models.Model):
""" Used for various nodes """
node = models.OneToOneField(AnalysisNode, on_delete=CASCADE)
group_operation = models.CharField(max_length=1, choices=GroupOperation.choices, default=GroupOperation.ANY)
def get_q(self, allele_frequency_path: str, allele_frequency_percent: bool):
af_q = None
try:
filters = []
for af_range in self.nodeallelefrequencyrange_set.all():
# Only apply filter if restricted range.
# Missing value (historical data) == -1 so those will come through
and_filters = []
if af_range.min > 0:
min_value = af_range.min
if allele_frequency_percent:
min_value *= 100.0
and_filters.append(Q(**{allele_frequency_path + "__gte": min_value}))
if af_range.max < 1:
max_value = af_range.max
if allele_frequency_percent:
max_value *= 100.0
and_filters.append(Q(**{allele_frequency_path + "__lte": max_value}))
if and_filters:
and_q = reduce(operator.and_, and_filters)
filters.append(and_q)
if filters:
group_op = GroupOperation.get_operation(self.group_operation)
af_q = reduce(group_op, filters)
except NodeAlleleFrequencyFilter.DoesNotExist:
pass
return af_q
@staticmethod
def get_sample_q(node: AnalysisNode, sample: Sample) -> Optional[Q]:
af_q = None
if sample:
try:
allele_frequency_path = sample.get_cohort_genotype_field("allele_frequency")
allele_frequency_percent = sample.vcf.allele_frequency_percent
af_q = node.nodeallelefrequencyfilter.get_q(allele_frequency_path, allele_frequency_percent)
except NodeAlleleFrequencyFilter.DoesNotExist:
pass
return af_q
def get_description(self):
# TODO: do this properly with group operators etc
af_ranges = list(self.nodeallelefrequencyrange_set.all())
if len(af_ranges) == 1:
description = str(af_ranges[0])
else:
description = f"{self.get_group_operation_display()} of {len(af_ranges)} filters"
return description
class NodeAlleleFrequencyRange(models.Model):
MIN_VALUE = 0
MAX_VALUE = 1
filter = models.ForeignKey(NodeAlleleFrequencyFilter, on_delete=CASCADE)
min = models.FloatField(null=False)
max = models.FloatField(null=False)
def __str__(self):
has_min = self.min is not None and self.min > self.MIN_VALUE
has_max = self.max is not None and self.max < self.MAX_VALUE
min_perc = format_percent(self.min, is_unit=True)
max_perc = format_percent(self.max, is_unit=True)
if has_min and has_max:
return f"{min_perc} - {max_perc}"
if has_min:
return f">={min_perc}"
if has_max:
return f"<={max_perc}"
return ""
class AnalysisClassification(models.Model):
analysis = models.ForeignKey(Analysis, on_delete=CASCADE)
classification = models.ForeignKey(Classification, on_delete=CASCADE)
| 40.293194 | 128 | 0.652027 | 43,631 | 0.944885 | 0 | 0 | 6,973 | 0.151009 | 0 | 0 | 9,247 | 0.200256 |
81f9aae1477fdf2636841fe260c82468ac2cde52 | 2,077 | py | Python | lbrc_flask/standard_views.py | LCBRU/lbrc_flask_ui | 35dea4581bbd92e9b53134342052b2ce02f903d4 | [
"MIT"
]
| null | null | null | lbrc_flask/standard_views.py | LCBRU/lbrc_flask_ui | 35dea4581bbd92e9b53134342052b2ce02f903d4 | [
"MIT"
]
| null | null | null | lbrc_flask/standard_views.py | LCBRU/lbrc_flask_ui | 35dea4581bbd92e9b53134342052b2ce02f903d4 | [
"MIT"
]
| null | null | null | import os
import traceback
from flask import render_template, send_from_directory, current_app, g
from .emailing import email
def init_standard_views(app):
@app.route("/favicon.ico")
def favicon():
return send_from_directory(
os.path.join(app.root_path, "static"),
"favicon.ico",
mimetype="image/vnd.microsoft.icon",
)
@app.errorhandler(400)
def missing_page(exception):
"""Catch internal 404 errors, display
a nice error page and log the error.
"""
return render_template("lbrc_flask/404.html"), 400
@app.errorhandler(401)
def missing_page(exception):
"""Catch internal 404 errors, display
a nice error page and log the error.
"""
return render_template("lbrc_flask/404.html"), 401
@app.errorhandler(403)
def forbidden_page(exception):
"""Catch internal 404 errors, display
a nice error page and log the error.
"""
return render_template("lbrc_flask/404.html"), 403
@app.errorhandler(404)
def missing_page(exception):
"""Catch internal 404 errors, display
a nice error page and log the error.
"""
return render_template("lbrc_flask/404.html"), 404
@app.errorhandler(500)
@app.errorhandler(Exception)
def internal_error(exception):
"""Catch internal exceptions and 500 errors, display
a nice error page and log the error.
"""
if 'lbrc_flask_title' in g:
app_name = g.lbrc_flask_title
else:
app_name = 'Application'
print(traceback.format_exc())
app.logger.error(traceback.format_exc())
email(
subject="{} {} Error".format(current_app.config["ORGANISATION_NAME"], app_name),
message=traceback.format_exc(),
recipients=[current_app.config["ADMIN_EMAIL_ADDRESS"]],
)
return render_template("lbrc_flask/500.html"), 500
| 32.968254 | 93 | 0.604718 | 0 | 0 | 0 | 0 | 1,866 | 0.898411 | 0 | 0 | 765 | 0.36832 |
81fb080ba80dc6a4245823877af0ad4179c7b39d | 87 | py | Python | tests/data/program_analysis/PyAST2CAST/import/test_import_3.py | rsulli55/automates | 1647a8eef85c4f03086a10fa72db3b547f1a0455 | [
"Apache-2.0"
]
| 17 | 2018-12-19T16:32:38.000Z | 2021-10-05T07:58:15.000Z | tests/data/program_analysis/PyAST2CAST/import/test_import_3.py | rsulli55/automates | 1647a8eef85c4f03086a10fa72db3b547f1a0455 | [
"Apache-2.0"
]
| 183 | 2018-12-20T17:03:01.000Z | 2022-02-23T22:21:42.000Z | tests/data/program_analysis/PyAST2CAST/import/test_import_3.py | rsulli55/automates | 1647a8eef85c4f03086a10fa72db3b547f1a0455 | [
"Apache-2.0"
]
| 5 | 2019-01-04T22:37:49.000Z | 2022-01-19T17:34:16.000Z | # 'from ... import ...' statement
from sys import exit
def main():
exit(0)
main() | 12.428571 | 33 | 0.597701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.37931 |
81fb14b3776d226412fc52409bbe9c75a0f071a1 | 1,455 | py | Python | postmanparser/form_parameter.py | appknox/postmanparser | bef9581d1c81e111fe635e7ea851333e7b2261e5 | [
"Apache-2.0"
]
| 5 | 2021-06-25T10:14:40.000Z | 2022-03-31T20:52:51.000Z | postmanparser/form_parameter.py | appknox/postmanparser | bef9581d1c81e111fe635e7ea851333e7b2261e5 | [
"Apache-2.0"
]
| 4 | 2021-06-07T17:05:27.000Z | 2021-09-07T13:26:20.000Z | postmanparser/form_parameter.py | appknox/postmanparser | bef9581d1c81e111fe635e7ea851333e7b2261e5 | [
"Apache-2.0"
]
| 2 | 2021-09-06T15:41:50.000Z | 2021-11-10T14:22:46.000Z | from dataclasses import dataclass
from typing import List
from typing import Union
from postmanparser.description import Description
from postmanparser.exceptions import InvalidObjectException
from postmanparser.exceptions import MissingRequiredFieldException
@dataclass
class FormParameter:
key: str
value: str = ""
src: Union[List, str, None] = None
disabled: bool = False
form_param_type: str = ""
content_type: str = "" # should override content-type in header
description: Union[Description, None, str] = None
@classmethod
def parse(cls, data: dict):
key = data.get("key")
if key is None:
raise MissingRequiredFieldException(
"'formparameter' object should have 'key' property"
)
value = data.get("value", "")
src = data.get("src")
if value and src is not None:
raise InvalidObjectException(
"'formparamter' object can eiher have src or value and not both."
)
description = data.get("description")
if isinstance(description, dict):
description = Description.parse(description)
return cls(
key,
value=value,
src=src,
disabled=data.get("disabled", False),
form_param_type=data.get("type", ""),
content_type=data.get("contentType", ""),
description=description,
)
| 32.333333 | 81 | 0.620619 | 1,180 | 0.810997 | 0 | 0 | 1,191 | 0.818557 | 0 | 0 | 227 | 0.156014 |
81fd3d016f2f7329e2389892dcfbd3f365d1769d | 844 | py | Python | pip-check.py | Urucas/pip-check | 777d8208bb89f566b95885a6711c773580a9c80f | [
"MIT"
]
| null | null | null | pip-check.py | Urucas/pip-check | 777d8208bb89f566b95885a6711c773580a9c80f | [
"MIT"
]
| null | null | null | pip-check.py | Urucas/pip-check | 777d8208bb89f566b95885a6711c773580a9c80f | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import pip
import os
import sys
def err(msg):
print "\033[31m✗ \033[0m%s" % msg
def ok(msg):
print "\033[32m✓ \033[0m%s" % msg
def main():
cwd = os.getcwd()
json_file = os.path.join(cwd, 'dependencies.json')
if os.path.isfile(json_file) == False:
err("dependencies.json not found in current folder")
sys.exit(1)
with open(json_file) as data_file:
data = json.load(data_file)
dependencies = data["dependencies"]
for lib in dependencies:
command = pip.commands.install.InstallCommand()
opts, args = command.parser.parse_args()
requirements_set = command.run(opts, [lib])
requirements_set.install(opts)
ok("Successfuly installed mising dependencies")
if __name__ == "__main__":
main()
| 23.444444 | 60 | 0.640995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.262972 |
81fda8338c93f1b3783533f708ac76a777187c33 | 36,148 | py | Python | hxl/scripts.py | HXLStandard/libhxl-python | 8d32d32c8165ec2ef4f7d970c30ca037fcc46b35 | [
"Unlicense"
]
| 30 | 2015-06-02T22:06:22.000Z | 2020-07-30T08:43:40.000Z | hxl/scripts.py | HXLStandard/libhxl-python | 8d32d32c8165ec2ef4f7d970c30ca037fcc46b35 | [
"Unlicense"
]
| 262 | 2015-01-27T16:43:28.000Z | 2022-03-28T15:33:53.000Z | hxl/scripts.py | HXLStandard/libhxl-python | 8d32d32c8165ec2ef4f7d970c30ca037fcc46b35 | [
"Unlicense"
]
| 9 | 2015-01-20T06:06:28.000Z | 2020-07-13T23:54:03.000Z | """
Console scripts
David Megginson
April 2015
This is a big, ugly module to support the libhxl
console scripts, including (mainly) argument parsing.
License: Public Domain
Documentation: https://github.com/HXLStandard/libhxl-python/wiki
"""
from __future__ import print_function
import argparse, json, logging, os, re, requests, sys
# Do not import hxl, to avoid circular imports
import hxl.converters, hxl.filters, hxl.io
logger = logging.getLogger(__name__)
# In Python2, sys.stdin is a byte stream; in Python3, it's a text stream
STDIN = sys.stdin.buffer
# Posix exit codes
EXIT_OK = 0
EXIT_ERROR = 1
EXIT_SYNTAX = 2
#
# Console script entry points
#
def hxladd():
"""Console script for hxladd."""
run_script(hxladd_main)
def hxlappend():
"""Console script for hxlappend."""
run_script(hxlappend_main)
def hxlclean():
"""Console script for hxlclean"""
run_script(hxlclean_main)
def hxlcount():
"""Console script for hxlcount."""
run_script(hxlcount_main)
def hxlcut():
"""Console script for hxlcut."""
run_script(hxlcut_main)
def hxldedup():
"""Console script for hxldedup."""
run_script(hxldedup_main)
def hxlhash():
"""Console script for hxlhash."""
run_script(hxlhash_main)
def hxlmerge():
"""Console script for hxlmerge."""
run_script(hxlmerge_main)
def hxlrename():
"""Console script for hxlrename."""
run_script(hxlrename_main)
def hxlreplace():
"""Console script for hxlreplace."""
run_script(hxlreplace_main)
def hxlfill():
"""Console script for hxlreplace."""
run_script(hxlfill_main)
def hxlexpand():
"""Console script for hxlexpand."""
run_script(hxlexpand_main)
def hxlexplode():
"""Console script for hxlexplode."""
run_script(hxlexplode_main)
def hxlimplode():
"""Console script for hxlimplode."""
run_script(hxlimplode_main)
def hxlselect():
"""Console script for hxlselect."""
run_script(hxlselect_main)
def hxlsort():
"""Console script for hxlsort."""
run_script(hxlsort_main)
def hxlspec():
"""Console script for hxlspec."""
run_script(hxlspec_main)
def hxltag():
"""Console script for hxltag."""
run_script(hxltag_main)
def hxlvalidate():
"""Console script for hxlvalidate."""
run_script(hxlvalidate_main)
#
# Main scripts for command-line tools.
#
def hxladd_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxladd with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Add new columns with constant values to a HXL dataset.')
parser.add_argument(
'-s',
'--spec',
help='Constant value to add to each row (may repeat option)',
metavar='header#<tag>=<value>',
action='append',
required=True
)
parser.add_argument(
'-b',
'--before',
help='Add new columns before existing ones rather than after them.',
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.AddColumnsFilter(source, specs=args.spec, before=args.before)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlappend_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlappend with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Concatenate two HXL datasets')
# repeatable argument
parser.add_argument(
'-a',
'--append',
help='HXL file to append (may repeat option).',
metavar='file_or_url',
action='append',
default=[]
)
parser.add_argument(
'-l',
'--list',
help='URL or filename of list of URLs (may repeat option). Will appear after sources in -a options.',
action='append',
default=[]
)
parser.add_argument(
'-x',
'--exclude-extra-columns',
help='Don not add extra columns not in the original dataset.',
action='store_const',
const=True,
default=False
)
add_queries_arg(parser, 'From --append datasets, include only rows matching at least one query.')
args = parser.parse_args(args)
do_common_args(args)
append_sources = []
for append_source in args.append:
append_sources.append(hxl.data(append_source, True))
for list_source in args.list:
for append_source in hxl.filters.AppendFilter.parse_external_source_list(hxl.data(list_source, True)):
append_sources.append(hxl.data(append_source, True))
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.AppendFilter(
source,
append_sources=append_sources,
add_columns=(not args.exclude_extra_columns),
queries=args.query
)
hxl.io.write_hxl(output.output, filter, show_headers=not args.remove_headers, show_tags=not args.strip_tags)
return EXIT_OK
def hxlclean_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlclean with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Clean data in a HXL file.')
parser.add_argument(
'-w',
'--whitespace',
help='Comma-separated list of tag patterns for whitespace normalisation.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-u',
'--upper',
help='Comma-separated list of tag patterns for uppercase conversion.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-l',
'--lower',
help='Comma-separated list of tag patterns for lowercase conversion.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-d',
'--date',
help='Comma-separated list of tag patterns for date normalisation.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'--date-format',
help='Date formatting string in strftime format (defaults to %%Y-%%m-%%d).',
default=None,
metavar='format',
)
parser.add_argument(
'-n',
'--number',
help='Comma-separated list of tag patternss for number normalisation.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'--number-format',
help='Number formatting string in printf format (without leading %%).',
default=None,
metavar='format',
)
parser.add_argument(
'--latlon',
help='Comma-separated list of tag patterns for lat/lon normalisation.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-p',
'--purge',
help='Purge unparseable dates, numbers, and lat/lon during cleaning.',
action='store_const',
const=True,
default=False
)
add_queries_arg(parser, 'Clean only rows matching at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.CleanDataFilter(
source, whitespace=args.whitespace, upper=args.upper, lower=args.lower,
date=args.date, date_format=args.date_format, number=args.number, number_format=args.number_format,
latlon=args.latlon, purge=args.purge, queries=args.query
)
hxl.io.write_hxl(output.output, filter, show_headers=not args.remove_headers, show_tags=not args.strip_tags)
return EXIT_OK
def hxlcount_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlcount with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
# Command-line arguments
parser = make_args('Generate aggregate counts for a HXL dataset')
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of column tags to count.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list,
default='loc,org,sector,adm1,adm2,adm3'
)
parser.add_argument(
'-a',
'--aggregator',
help='Aggregator statement',
metavar='statement',
action='append',
type=hxl.filters.Aggregator.parse,
default=[]
)
add_queries_arg(parser, 'Count only rows that match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.CountFilter(source, patterns=args.tags, aggregators=args.aggregator, queries=args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlcut_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
parser = make_args('Cut columns from a HXL dataset.')
parser.add_argument(
'-i',
'--include',
help='Comma-separated list of column tags to include',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-x',
'--exclude',
help='Comma-separated list of column tags to exclude',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-s',
'--skip-untagged',
help="Skip columns without HXL hashtags",
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.ColumnFilter(source, args.include, args.exclude, args.skip_untagged)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxldedup_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
parser = make_args('Remove duplicate rows from a HXL dataset.')
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of column tags to use for deduplication (by default, use all values).',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
add_queries_arg(parser, 'Leave rows alone if they don\'t match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.DeduplicationFilter(source, args.tags, args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlhash_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
parser = make_args(
'Generate an MD5 hash for a HXL dataset (or just its header rows).',
hxl_output=False
)
parser.add_argument(
'-H',
'--headers-only',
help='Hash only the header and hashtag rows.',
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source:
if args.headers_only:
print(source.columns_hash)
else:
print(source.data_hash)
return EXIT_OK
def hxlmerge_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlmerge with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Merge part of one HXL dataset into another.')
parser.add_argument(
'-m',
'--merge',
help='HXL file to write (if omitted, use standard output).',
metavar='filename',
required=True
)
parser.add_argument(
'-k',
'--keys',
help='HXL tag(s) to use as a shared key.',
metavar='tag,tag...',
required=True,
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of column tags to include from the merge dataset.',
metavar='tag,tag...',
required=True,
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-r',
'--replace',
help='Replace empty values in existing columns (when available) instead of adding new ones.',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'-O',
'--overwrite',
help='Used with --replace, overwrite existing values.',
action='store_const',
const=True,
default=False
)
add_queries_arg(parser, 'Merged data only from rows that match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output, hxl.io.data(args.merge, True) if args.merge else None as merge_source:
filter = hxl.filters.MergeDataFilter(
source, merge_source=merge_source,
keys=args.keys, tags=args.tags, replace=args.replace, overwrite=args.overwrite,
queries=args.query
)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlrename_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlrename with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Rename and retag columns in a HXL dataset')
parser.add_argument(
'-r',
'--rename',
help='Rename an old tag to a new one, with an optional new text header (may repeat option).',
action='append',
metavar='#?<original_tag>:<Text header>?#?<new_tag>',
default=[],
type=hxl.filters.RenameFilter.parse_rename
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.RenameFilter(source, args.rename)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlreplace_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlreplace with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Replace strings in a HXL dataset')
inline_group = parser.add_argument_group('Inline replacement')
map_group = parser.add_argument_group('External substitution map')
inline_group.add_argument(
'-p',
'--pattern',
help='String or regular expression to search for',
nargs='?'
)
inline_group.add_argument(
'-s',
'--substitution',
help='Replacement string',
nargs='?'
)
inline_group.add_argument(
'-t',
'--tags',
help='Tag patterns to match',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
inline_group.add_argument(
'-r',
'--regex',
help='Use a regular expression instead of a string',
action='store_const',
const=True,
default=False
)
map_group.add_argument(
'-m',
'--map',
help='Filename or URL of a mapping table using the tags #x_pattern (required), #x_substitution (required), #x_tag (optional), and #x_regex (optional), corresponding to the inline options above, for multiple substitutions.',
metavar='PATH',
nargs='?'
)
add_queries_arg(parser, 'Replace only in rows that match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
if args.map:
replacements = hxl.filters.ReplaceDataFilter.Replacement.parse_map(hxl.io.data(args.map, True))
else:
replacements = []
if args.pattern:
for tag in args.tags:
replacements.append(hxl.filters.ReplaceDataFilter.Replacement(args.pattern, args.substitution, tag, args.regex))
filter = hxl.filters.ReplaceDataFilter(source, replacements, queries=args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlfill_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlfill with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Fill empty cells in a HXL dataset')
parser.add_argument(
'-t',
'--tag',
help='Fill empty cells only in matching columns (default: fill in all)',
metavar='tagpattern,...',
type=hxl.model.TagPattern.parse,
)
add_queries_arg(parser, 'Fill only in rows that match at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.FillDataFilter(source, pattern=args.tag, queries=args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlexpand_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlexpand with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Expand lists in cells by repeating rows')
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of tag patterns for columns with lists to expand',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list,
nargs="?"
)
parser.add_argument(
"-s",
'--separator',
help='string separating list items (defaults to "|")',
metavar='string',
default="|"
)
parser.add_argument(
"-c",
'--correlate',
help='correlate list values instead of producing a cartesian product',
action='store_const',
const=True,
default=False
)
add_queries_arg(parser, 'Limit list expansion to rows matching at least one query.')
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.ExpandListsFilter(source, patterns=args.tags, separator=args.separator, correlate=args.correlate, queries=args.query)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlexplode_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlexplode with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Explode a wide dataset into a long dataset')
parser.add_argument(
'-H',
'--header-att',
help='attribute to add to the label column (defaults to "label")',
metavar='att',
default="label"
)
parser.add_argument(
'-V',
'--value-att',
help='attribute to add to the value column (defaults to "value")',
metavar='tagpattern',
default="value"
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.ExplodeFilter(source, header_attribute=args.header_att, value_attribute=args.value_att)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlimplode_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlexplode with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Implode a long dataset into a wide dataset.')
parser.add_argument(
'-L',
'--label',
help='HXL tag pattern for the label column',
metavar='tagpattern',
required=True,
type=hxl.model.TagPattern.parse,
)
parser.add_argument(
'-V',
'--value',
help='HXL tag pattern for the value column',
metavar='tagpattern',
required=True,
type=hxl.model.TagPattern.parse,
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.ImplodeFilter(source, label_pattern=args.label, value_pattern=args.value)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlselect_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlselect with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
# Command-line arguments
parser = make_args('Filter rows in a HXL dataset.')
parser.add_argument(
'-q',
'--query',
help='Query expression for selecting rows (may repeat option for logical OR). <op> may be =, !=, <, <=, >, >=, ~, or !~',
action='append',
metavar='<tagspec><op><value>',
required=True
)
parser.add_argument(
'-r',
'--reverse',
help='Show only lines *not* matching criteria',
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.RowFilter(source, queries=args.query, reverse=args.reverse)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlsort_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlcut with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Sort a HXL dataset.')
parser.add_argument(
'-t',
'--tags',
help='Comma-separated list of tags to for columns to use as sort keys.',
metavar='tag,tag...',
type=hxl.model.TagPattern.parse_list
)
parser.add_argument(
'-r',
'--reverse',
help='Flag to reverse sort order.',
action='store_const',
const=True,
default=False
)
args = parser.parse_args(args)
do_common_args(args)
with make_source(args, stdin) as source, make_output(args, stdout) as output:
filter = hxl.filters.SortFilter(source, args.tags, args.reverse)
hxl.io.write_hxl(output.output, filter, show_tags=not args.strip_tags)
return EXIT_OK
def hxlspec_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
""" Run hxlspec with command-line arguments.
Args:
args (list): a list of command-line arguments
stdin (io.IOBase): alternative standard input (mainly for testing)
stdout (io.IOBase): alternative standard output (mainly for testing)
stderr (io.IOBase): alternative standard error (mainly for testing)
"""
def get_json (url_or_filename):
if not url_or_filename:
return json.load(stdin)
if re.match(r'^(?:https?|s?ftp)://', url_or_filename.lower()):
headers = make_headers(args)
response = requests.get(url_or_filename, verify=(not args.ignore_certs), headers=headers)
response.raise_for_status()
return response.json()
else:
with open(url_or_filename, "r") as input:
return json.load(input)
parser = make_args('Process a HXL JSON spec')
args = parser.parse_args(args)
do_common_args(args)
spec = get_json(args.infile)
source = hxl.io.from_spec(spec, allow_local_ok=True)
with make_output(args, stdout) as output:
hxl.io.write_hxl(output.output, source, show_tags=not args.strip_tags)
def hxltag_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxltag with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Add HXL tags to a raw CSV file.')
parser.add_argument(
'-a',
'--match-all',
help='Match the entire header text (not just a substring)',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'-m',
'--map',
help='Mapping expression',
required=True,
action='append',
metavar='Header Text#tag',
type=hxl.converters.Tagger.parse_spec
)
parser.add_argument(
'-d',
'--default-tag',
help='Default tag for non-matching columns',
metavar='#tag',
type=hxl.model.Column.parse
)
args = parser.parse_args(args)
do_common_args(args)
with make_input(args, stdin) as input, make_output(args, stdout) as output:
tagger = hxl.converters.Tagger(input, args.map, default_tag=args.default_tag, match_all=args.match_all)
hxl.io.write_hxl(output.output, hxl.io.data(tagger), show_tags=not args.strip_tags)
return EXIT_OK
def hxlvalidate_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
Run hxlvalidate with command-line arguments.
@param args A list of arguments, excluding the script name
@param stdin Standard input for the script
@param stdout Standard output for the script
@param stderr Standard error for the script
"""
parser = make_args('Validate a HXL dataset.')
parser.add_argument(
'-s',
'--schema',
help='Schema file for validating the HXL dataset (if omitted, use the default core schema).',
metavar='schema',
default=None
)
parser.add_argument(
'-a',
'--all',
help='Include all rows in the output, including those without errors',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'-e',
'--error-level',
help='Minimum error level to show (defaults to "info") ',
choices=['info', 'warning', 'error'],
metavar='info|warning|error',
default='info'
)
args = parser.parse_args(args)
do_common_args(args)
with make_input(args, stdin) as input, make_output(args, stdout) as output:
class Counter:
infos = 0
warnings = 0
errors = 0
def callback(e):
"""Show a validation error message."""
if e.rule.severity == 'info':
if args.error_level != 'info':
return
Counter.infos += 1
elif e.rule.severity == 'warning':
if args.error_level == 'error':
return
Counter.warnings += 1
else:
Counter.errors += 1
message = '[{}] '.format(e.rule.severity)
if e.row:
if e.rule:
message += "{},{}: ".format(e.row.row_number + 1, e.rule.tag_pattern)
else:
message += "{}: ".format(e.row.row_number + 1)
elif e.rule:
message += "<dataset>,{}: ".format(e.rule.tag_pattern)
else:
message += "<dataset>: "
if e.value:
message += '"{}" '.format(e.value)
if e.message:
message += e.message
message += "\n"
output.write(message)
output.write("Validating {} with schema {} ...\n".format(args.infile or "<standard input>", args.schema or "<default>"))
source = hxl.io.data(input)
if args.schema:
with make_input(args, None, args.schema) as schema_input:
schema = hxl.schema(schema_input, callback=callback)
else:
schema = hxl.schema(callback=callback)
schema.validate(source)
if args.error_level == 'info':
output.write("{:,} error(s), {:,} warnings, {:,} suggestions\n".format(Counter.errors, Counter.warnings, Counter.infos))
elif args.error_level == 'warning':
output.write("{:,} error(s), {:,} warnings\n".format(Counter.errors, Counter.warnings))
else:
output.write("{:,} error(s)\n".format(Counter.errors))
if Counter.errors > 0:
output.write("Validation failed.\n")
return EXIT_ERROR
else:
output.write("Validation succeeded.\n")
return EXIT_OK
#
# Utility functions
#
def run_script(func):
"""Try running a command-line script, with exception handling."""
try:
sys.exit(func(sys.argv[1:], STDIN, sys.stdout))
except KeyboardInterrupt:
logger.error("Interrupted")
sys.exit(EXIT_ERROR)
def make_args(description, hxl_output=True):
"""Set up parser with default arguments.
@param description: usage description to show
@param hxl_output: if True (default), include options for HXL output.
@returns: an argument parser, partly set up.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'infile',
help='HXL file to read (if omitted, use standard input).',
nargs='?'
)
if hxl_output:
parser.add_argument(
'outfile',
help='HXL file to write (if omitted, use standard output).',
nargs='?'
)
parser.add_argument(
'--sheet',
help='Select sheet from a workbook (1 is first sheet)',
metavar='number',
type=int,
nargs='?'
)
parser.add_argument(
'--selector',
help='JSONPath expression for starting point in JSON input',
metavar='path',
nargs='?'
)
parser.add_argument(
'--http-header',
help='Custom HTTP header to send with request',
metavar='header',
action='append'
)
if hxl_output:
parser.add_argument(
'--remove-headers',
help='Strip text headers from the CSV output',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'--strip-tags',
help='Strip HXL tags from the CSV output',
action='store_const',
const=True,
default=False
)
parser.add_argument(
"--ignore-certs",
help="Don't verify SSL connections (useful for self-signed)",
action='store_const',
const=True,
default=False
)
parser.add_argument(
'--log',
help='Set minimum logging level',
metavar='debug|info|warning|error|critical|none',
choices=['debug', 'info', 'warning', 'error', 'critical'],
default='error'
)
return parser
def add_queries_arg(parser, help='Apply only to rows matching at least one query.'):
parser.add_argument(
'-q',
'--query',
help=help,
metavar='<tagspec><op><value>',
action='append'
)
return parser
def do_common_args(args):
"""Process standard args"""
logging.basicConfig(format='%(levelname)s (%(name)s): %(message)s', level=args.log.upper())
def make_source(args, stdin=STDIN):
"""Create a HXL input source."""
# construct the input object
input = make_input(args, stdin)
return hxl.io.data(input)
def make_input(args, stdin=sys.stdin, url_or_filename=None):
"""Create an input object"""
if url_or_filename is None:
url_or_filename = args.infile
# sheet index
sheet_index = args.sheet
if sheet_index is not None:
sheet_index -= 1
# JSONPath selector
selector = args.selector
http_headers = make_headers(args)
return hxl.io.make_input(
url_or_filename or stdin,
sheet_index=sheet_index,
selector=selector,
allow_local=True,
http_headers=http_headers,
verify_ssl=(not args.ignore_certs)
)
def make_output(args, stdout=sys.stdout):
"""Create an output stream."""
if args.outfile:
return FileOutput(args.outfile)
else:
return StreamOutput(stdout)
def make_headers (args):
# get custom headers
header_strings = []
header = os.environ.get("HXL_HTTP_HEADER")
if header is not None:
header_strings.append(header)
if args.http_header is not None:
header_strings += args.http_header
http_headers = {}
for header in header_strings:
parts = header.partition(':')
http_headers[parts[0].strip()] = parts[2].strip()
return http_headers
class FileOutput(object):
def __init__(self, filename):
self.output = open(filename, 'w')
def __enter__(self):
return self
def __exit__(self, value, type, traceback):
self.output.close()
class StreamOutput(object):
def __init__(self, output):
self.output = output
def __enter__(self):
return self
def __exit__(self, value, type, traceback):
pass
def write(self, s):
self.output.write(s)
| 30.478921 | 231 | 0.628389 | 560 | 0.015492 | 0 | 0 | 0 | 0 | 0 | 0 | 12,894 | 0.3567 |
81fdb0e1136255e877c9ae2c151c33d3b0b0ee1d | 338 | py | Python | 1801-1900/1807.evaluate-thebracket-pairs-of-a-string.py | guangxu-li/leetcode-in-python | 8a5a373b32351500342705c141591a1a8f5f1cb1 | [
"MIT"
]
| null | null | null | 1801-1900/1807.evaluate-thebracket-pairs-of-a-string.py | guangxu-li/leetcode-in-python | 8a5a373b32351500342705c141591a1a8f5f1cb1 | [
"MIT"
]
| null | null | null | 1801-1900/1807.evaluate-thebracket-pairs-of-a-string.py | guangxu-li/leetcode-in-python | 8a5a373b32351500342705c141591a1a8f5f1cb1 | [
"MIT"
]
| null | null | null | #
# @lc app=leetcode id=1807 lang=python3
#
# [1807] Evaluate the Bracket Pairs of a String
#
# @lc code=start
import re
class Solution:
def evaluate(self, s: str, knowledge: list[list[str]]) -> str:
mapping = dict(knowledge)
return re.sub(r"\((\w+?)\)", lambda m: mapping.get(m.group(1), "?"), s)
# @lc code=end
| 18.777778 | 79 | 0.612426 | 196 | 0.579882 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.399408 |
81fdf45abe6a280ed6357190764ee304c85901c1 | 787 | py | Python | Math Functions/Uncategorized/Herons formula.py | adrikagupta/Must-Know-Programming-Codes | d403428bb9e619b855bde1ae9f46f41a2952b4fa | [
"MIT"
]
| 13 | 2017-10-11T09:03:48.000Z | 2020-06-09T16:00:50.000Z | Math Functions/Uncategorized/Herons formula.py | adrikagupta/Must-Know-Programming-Codes | d403428bb9e619b855bde1ae9f46f41a2952b4fa | [
"MIT"
]
| 4 | 2017-10-15T06:23:10.000Z | 2017-10-22T08:22:49.000Z | Math Functions/Uncategorized/Herons formula.py | adrikagupta/Must-Know-Programming-Codes | d403428bb9e619b855bde1ae9f46f41a2952b4fa | [
"MIT"
]
| 23 | 2017-10-14T05:22:33.000Z | 2019-10-30T19:35:42.000Z | #Heron's formula#
import math
unit_of_measurement = "cm"
side1 = int(input("Enter the length of side A in cm: "))
side2 = int(input("Enter the length of side B in cm: "))
side3 = int(input("Enter the length of side C in cm: "))
braket1 = (side1 ** 2) * (side2**2) + (side1**2)*(side3**2) + (side2**2)*(side3**2)
braket2 = (side1**2)+(side2**2)+(side3**2)
function_braket1 = 4*braket1
function_braket2 = braket2**2
both_brakets = function_braket1 - function_braket2
result1 = math.sqrt(both_brakets)
area_of_triangle = result1 / 4
print("Side A", "=", side1, sep="")
print("Side B", "=", side2, sep="")
print("Side C", "=", side3, sep="")
print()
print("Calculated using Heron's Formula")
print()
print("Area of triangle"), print(area_of_triangle, unit_of_measurement, "2", sep="")
| 29.148148 | 84 | 0.672173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.285896 |
81fe4dff84aff0d592f6a1cba826b336e24d2573 | 4,605 | py | Python | viewer/bitmap_from_array.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
]
| 2 | 2021-03-17T11:25:46.000Z | 2021-11-18T04:20:54.000Z | viewer/bitmap_from_array.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
]
| null | null | null | viewer/bitmap_from_array.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
]
| null | null | null | from __future__ import absolute_import, division, print_function
import numpy as np
import wx
from dials.array_family import flex
from dials_viewer_ext import rgb_img
class wxbmp_from_np_array(object):
def __init__(
self, lst_data_in, show_nums=True, palette="black2white", lst_data_mask_in=None
):
self.wx_bmp_arr = rgb_img()
if lst_data_in is None and lst_data_mask_in is None:
self._ini_wx_bmp_lst = None
else:
self._ini_wx_bmp_lst = []
for lst_pos in range(len(lst_data_in)):
data_3d_in = lst_data_in[lst_pos]
xmax = data_3d_in.shape[1]
ymax = data_3d_in.shape[2]
# remember to put here some assertion to check that
# both arrays have the same shape
if lst_data_mask_in is not None:
data_3d_in_mask = lst_data_mask_in[lst_pos]
self.vl_max = float(np.amax(data_3d_in))
self.vl_min = float(np.amin(data_3d_in))
tmp_data2d = np.zeros((xmax, ymax), "double")
tmp_data2d_mask = np.zeros((xmax, ymax), "double")
z_dp = data_3d_in.shape[0]
single_block_lst_01 = []
for z in range(z_dp):
# print "z =", z
tmp_data2d[:, :] = data_3d_in[z : z + 1, :, :]
if lst_data_mask_in is not None:
tmp_data2d_mask[:, :] = data_3d_in_mask[z : z + 1, :, :]
else:
tmp_data2d_mask = None
data_sigle_img = self._wx_img_w_cpp(
tmp_data2d, show_nums, palette, tmp_data2d_mask
)
single_block_lst_01.append(data_sigle_img)
self._ini_wx_bmp_lst.append(single_block_lst_01)
def bmp_lst_scaled(self, scale=1.0):
if self._ini_wx_bmp_lst is None:
NewW = 350
wx_image = wx.Image(NewW, NewW)
wxBitmap = wx_image.ConvertToBitmap()
dc = wx.MemoryDC(wxBitmap)
text = "No Shoebox data"
w, h = dc.GetSize()
tw, th = dc.GetTextExtent(text)
dc.Clear()
dc.DrawText(text, (w - tw) / 2, (h - th) / 2) # display text in center
dc.SelectObject(wxBitmap)
del dc
wx_bmp_lst = [[wxBitmap]]
else:
wx_bmp_lst = []
for data_3d in self._ini_wx_bmp_lst:
single_block_lst = []
for sigle_img_data in data_3d:
single_block_lst.append(self._wx_bmp_scaled(sigle_img_data, scale))
wx_bmp_lst.append(single_block_lst)
return wx_bmp_lst
def _wx_img_w_cpp(self, np_2d_tmp, show_nums, palette, np_2d_mask=None):
xmax = np_2d_tmp.shape[1]
ymax = np_2d_tmp.shape[0]
if np_2d_mask is None:
np_2d_mask = np.zeros((ymax, xmax), "double")
transposed_data = np.zeros((ymax, xmax), "double")
transposed_mask = np.zeros((ymax, xmax), "double")
transposed_data[:, :] = np_2d_tmp
transposed_mask[:, :] = np_2d_mask
flex_data_in = flex.double(transposed_data)
flex_mask_in = flex.double(transposed_mask)
if palette == "black2white":
palette_num = 1
elif palette == "white2black":
palette_num = 2
elif palette == "hot ascend":
palette_num = 3
else: # assuming "hot descend"
palette_num = 4
img_array_tmp = self.wx_bmp_arr.gen_bmp(
flex_data_in, flex_mask_in, show_nums, palette_num
)
np_img_array = img_array_tmp.as_numpy_array()
height = np.size(np_img_array[:, 0:1, 0:1])
width = np.size(np_img_array[0:1, :, 0:1])
img_array = np.empty((height, width, 3), "uint8")
img_array[:, :, :] = np_img_array[:, :, :]
self._wx_image = wx.Image(width, height)
self._wx_image.SetData(img_array.tostring())
data_to_become_bmp = (self._wx_image, width, height)
return data_to_become_bmp
def _wx_bmp_scaled(self, data_to_become_bmp, scale):
to_become_bmp = data_to_become_bmp[0]
width = data_to_become_bmp[1]
height = data_to_become_bmp[2]
NewW = int(width * scale)
NewH = int(height * scale)
to_become_bmp = to_become_bmp.Scale(NewW, NewH, wx.IMAGE_QUALITY_NORMAL)
wxBitmap = to_become_bmp.ConvertToBitmap()
return wxBitmap
| 33.369565 | 87 | 0.569381 | 4,433 | 0.962649 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.057112 |
81fea7350bf3a22df6647f4ff0e42232c0fd7743 | 191 | py | Python | spinesTS/utils/_validation.py | BirchKwok/spinesTS | b88ec333f41f58979e0570177d1fdc364d976056 | [
"Apache-2.0"
]
| 2 | 2021-08-15T09:29:37.000Z | 2022-03-10T13:56:13.000Z | spinesTS/utils/_validation.py | BirchKwok/spinesTS | b88ec333f41f58979e0570177d1fdc364d976056 | [
"Apache-2.0"
]
| null | null | null | spinesTS/utils/_validation.py | BirchKwok/spinesTS | b88ec333f41f58979e0570177d1fdc364d976056 | [
"Apache-2.0"
]
| null | null | null | import numpy as np
def check_x_y(x, y):
assert isinstance(x, np.ndarray) and isinstance(y, np.ndarray)
assert np.ndim(x) <= 3 and np.ndim(y) <= 2
assert len(x) == len(y)
| 23.875 | 67 | 0.612565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
81feb43ec495c8683dc8e553db15a96568c7f33c | 34,772 | py | Python | sphinxsharp-pro/sphinxsharp.py | madTeddy/sphinxsharp-pro | e7a164214113cef33bca96e9fbbab3feafe16823 | [
"MIT"
]
| 2 | 2019-04-22T12:59:26.000Z | 2021-07-30T21:32:44.000Z | sphinxsharp-pro/sphinxsharp.py | madTeddy/sphinxsharp-pro | e7a164214113cef33bca96e9fbbab3feafe16823 | [
"MIT"
]
| null | null | null | sphinxsharp-pro/sphinxsharp.py | madTeddy/sphinxsharp-pro | e7a164214113cef33bca96e9fbbab3feafe16823 | [
"MIT"
]
| null | null | null | """
CSharp (С#) domain for sphinx
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sphinxsharp Pro (with custom styling)
:copyright: Copyright 2021 by MadTeddy
"""
import re
import warnings
from os import path
from collections import defaultdict, namedtuple
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from sphinx.locale import get_translation
from sphinx.domains import Domain, Index, ObjType
from sphinx.roles import XRefRole
from sphinx.directives import ObjectDescription
from sphinx.util.docfields import DocFieldTransformer
from sphinx.util.nodes import make_refnode
from sphinx import addnodes
from sphinx.util.fileutil import copy_asset
MODIFIERS = ('public', 'private', 'protected', 'internal',
'static', 'sealed', 'abstract', 'const', 'partial',
'readonly', 'virtual', 'extern', 'new', 'override',
'unsafe', 'async', 'event', 'delegate')
VALUE_KEYWORDS = ('char', 'ulong', 'byte', 'decimal',
'double', 'bool', 'int', 'null', 'sbyte',
'float', 'long', 'object', 'short', 'string',
'uint', 'ushort', 'void')
PARAM_MODIFIERS = ('ref', 'out', 'params')
MODIFIERS_RE = '|'.join(MODIFIERS)
PARAM_MODIFIERS_RE = '|'.join(PARAM_MODIFIERS)
TYPE_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE
+ r')\s+)*)?(\w+)\s([\w\.]+)(?:<(.+)>)?(?:\s?\:\s?(.+))?$')
REF_TYPE_RE = re.compile(r'^(?:(new)\s+)?([\w\.]+)\s*(?:<(.+)>)*(\[\])*\s?(?:\((.*)\))?$')
METHOD_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE
+ r')\s+)*)?([^\s=\(\)]+\s+)?([^\s=\(\)]+)\s?(?:\<(.+)\>)?\s?(?:\((.+)*\))$')
PARAM_SIG_RE = re.compile(r'^(?:(?:(' + PARAM_MODIFIERS_RE + r')\s)*)?([^=]+)\s+([^=]+)\s*(?:=\s?(.+))?$')
VAR_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?([^=]+)\s+([^\s=]+)\s*(?:=\s*(.+))?$')
PROP_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE
+ r')\s+)*)?(.+)\s+([^\s]+)\s*(?:{(\s*get;\s*)?((?:'
+ MODIFIERS_RE + r')?\s*set;\s*)?})$')
ENUM_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?(?:enum)\s?(\w+)$')
_ = get_translation('sphinxsharp')
class CSharpObject(ObjectDescription):
PARENT_ATTR_NAME = 'sphinxsharp:parent'
PARENT_TYPE_NAME = 'sphinxsharp:type'
ParentType = namedtuple('ParentType', ['parent', 'name', 'type', 'override'])
option_spec = {
'noindex': directives.flag
}
def __init__(self, *args, **kwargs):
super(CSharpObject, self).__init__(*args, **kwargs)
self.parentname_set = None
self.parentname_saved = None
def run(self):
if ':' in self.name:
self.domain, self.objtype = self.name.split(':', 1)
else:
self.domain, self.objtype = '', self.name
self.indexnode = addnodes.index(entries=[])
node = addnodes.desc()
node.document = self.state.document
node['domain'] = self.domain
node['classes'].append('csharp')
node['objtype'] = node['desctype'] = self.objtype
node['noindex'] = noindex = ('noindex' in self.options)
self.names = []
signatures = self.get_signatures()
for i, sig in enumerate(signatures):
beforesignode = CSNodes.EmptyNode()
node.append(beforesignode)
signode = addnodes.desc_signature(sig, '')
signode['first'] = False
node.append(signode)
self.before_sig(beforesignode)
try:
name = self.handle_signature(sig, signode)
except ValueError:
signode.clear()
signode += addnodes.desc_name(sig, sig)
continue
if name not in self.names:
self.names.append(name)
if not noindex:
self.add_target_and_index(name, sig, signode)
aftersignode = CSNodes.EmptyNode()
node.append(aftersignode)
self.after_sig(aftersignode)
contentnode = addnodes.desc_content()
node.append(contentnode)
self.before_content_node(contentnode)
if self.names:
self.env.temp_data['object'] = self.names[0]
self.before_content()
self.state.nested_parse(self.content, self.content_offset, contentnode)
self.after_content_node(contentnode)
DocFieldTransformer(self).transform_all(contentnode)
self.env.temp_data['object'] = None
self.after_content()
return [self.indexnode, node]
def before_sig(self, signode):
"""
Called before main ``signode`` appends
"""
pass
def after_sig(self, signode):
"""
Called after main ``signode`` appends
"""
pass
def before_content_node(self, node):
"""
Get ``contentnode`` before main content will append
"""
pass
def after_content_node(self, node):
"""
Get ``contentnode`` after main content was appended
"""
pass
def before_content(self):
obj = self.env.temp_data['object']
if obj:
self.parentname_set = True
self.parentname_saved = self.env.ref_context.get(self.PARENT_ATTR_NAME)
self.env.ref_context[self.PARENT_ATTR_NAME] = obj
else:
self.parentname_set = False
def after_content(self):
if self.parentname_set:
self.env.ref_context[self.PARENT_ATTR_NAME] = self.parentname_saved
def has_parent(self):
return self._check_parent(self.PARENT_ATTR_NAME)
def has_parent_type(self):
return self._check_parent(self.PARENT_TYPE_NAME)
def _check_parent(self, attr):
return attr in self.env.ref_context and \
self.env.ref_context[attr] is not None
def get_parent(self):
return self.env.ref_context.get(self.PARENT_ATTR_NAME)
def get_type_parent(self):
return self.env.ref_context.get(self.PARENT_TYPE_NAME)
def get_index_text(self, sig, name, typ):
raise NotImplementedError('Must be implemented in subclass')
def parse_signature(self, sig):
raise NotImplementedError('Must be implemented in subclass')
def add_target_and_index(self, name, sig, signode):
objname, objtype = self.get_obj_name(sig)
type_parent = self.get_type_parent() if self.has_parent_type() else None
if self.objtype != 'type' and type_parent:
self.env.ref_context[self.PARENT_ATTR_NAME] = '{}{}'.format(type_parent.parent + '.' \
if type_parent.parent else '',
type_parent.name)
name = self.get_fullname(objname)
self.names.clear()
self.names.append(name)
anchor = '{}-{}'.format(self.objtype, name)
if anchor not in self.state.document.ids:
signode['names'].append(anchor)
signode['ids'].append(anchor)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['sphinxsharp']['objects']
key = (self.objtype, name)
if key in objects:
warnings.warn('duplicate description of {}, other instance in {}'.format(
key, self.env.doc2path(objects[key][0])), Warning)
objects[key] = (self.env.docname, 'delegate' if self.objtype == 'method' else objtype)
index_text = self.get_index_text(sig, objname, objtype)
if index_text:
parent = self.get_parent() if self.has_parent() else None
if type_parent and type_parent.override and type_parent.name != objname:
type_parent = self.ParentType(parent=type_parent.parent, name=type_parent.name, type=type_parent.type,
override=None)
index_format = '{parent} (C# {namespace});{text}' \
if (type_parent and type_parent.parent and (type_parent.name == objname and self.objtype == 'type') \
and not type_parent.override) or (parent and not type_parent) \
else '{name} (C# {type} {in_text} {parent});{text}' if type_parent and type_parent.name else '{text}'
self.indexnode['entries'].append(('single', index_format.format(
parent=type_parent.parent if type_parent else parent if parent else '',
namespace=_('namespace'),
text=index_text,
name=type_parent.override if type_parent and type_parent.override \
else type_parent.name if type_parent else '',
type=_(type_parent.type) if type_parent else '',
in_text=_('in')
), anchor, None, None))
def get_fullname(self, name):
fullname = '{parent}{name}'.format(
parent=self.get_parent() + '.' if self.has_parent() else '', name=name)
return fullname
def get_obj_name(self, sig):
raise NotImplementedError('Must be implemented in subclass')
def append_ref_signature(self, typname, signode, append_generic=True):
match = REF_TYPE_RE.match(typname.strip())
if not match:
raise Exception('Invalid reference type signature. Got: {}'.format(typname))
is_new, name, generic, is_array, constr = match.groups()
tnode = addnodes.desc_type()
if is_new:
tnode += CSNodes.Keyword(text='new')
tnode += CSNodes.TextNode(text=' ')
types = name.split('.')
explicit_path = []
i = 1
for t in types:
styp = t.strip()
refnode = None
if styp not in VALUE_KEYWORDS:
explicit_path.append(styp)
refnode = addnodes.pending_xref('', refdomain='sphinxsharp', reftype=None,
reftarget=styp, modname=None, classname=None)
if not self.has_parent():
refnode[self.PARENT_ATTR_NAME] = None
else:
refnode[self.PARENT_ATTR_NAME] = self.get_parent()
if len(explicit_path) > 1:
target_path = '.'.join(explicit_path[:-1])
type_par = self.get_type_parent() if self.has_parent_type() else None
refnode[self.PARENT_ATTR_NAME] = (type_par.parent + '.' \
if type_par and type_par.parent \
else '') + target_path
refnode += CSNodes.UnknownType(typ=None, text=styp)
else:
refnode = CSNodes.Keyword(text=styp)
tnode += refnode
if i < len(types):
tnode += CSNodes.TextNode(text='.')
i += 1
if append_generic and generic:
gnode = CSNodes.EmptyNode()
gnode += CSNodes.TextNode(text='<')
gen_groups = split_sig(generic)
i = 1
for g in gen_groups:
self.append_ref_signature(g, gnode, append_generic)
if i < len(gen_groups):
gnode += CSNodes.TextNode(text=', ')
i += 1
gnode += CSNodes.TextNode(text='>')
tnode += gnode
if is_array:
tnode += CSNodes.TextNode(text='[]')
if constr is not None:
tnode += CSNodes.TextNode(text='()')
signode += tnode
def append_generic(self, generic, signode):
gnode = CSNodes.EmptyNode()
gnode += CSNodes.TextNode(text='<')
generics = generic.split(',')
i = 1
for g in generics:
gnode += CSNodes.Generic(text=g)
if i < len(generics):
gnode += CSNodes.TextNode(text=', ')
i += 1
gnode += CSNodes.TextNode(text='>')
signode += gnode
class CSharpType(CSharpObject):
option_spec = {
**CSharpObject.option_spec,
'nonamespace': directives.flag,
'parent': directives.unchanged
}
def before_sig(self, signode):
if 'nonamespace' not in self.options and self.has_parent():
signode += CSNodes.Description(title=_('namespace'), desc=self.get_parent())
def handle_signature(self, sig, signode):
mod, typ, name, generic, inherits = self.parse_signature(sig)
tnode = CSNodes.EmptyNode()
tnode += CSNodes.Modificator(text='{}'.format(mod if mod else 'private'))
tnode += CSNodes.TextNode(text=' ')
tnode += CSNodes.Keyword(text='{}'.format(typ))
tnode += CSNodes.TextNode(text=' ')
tnode += CSNodes.UnknownType(typ=typ, text=name)
if generic:
self.append_generic(generic, tnode)
if inherits:
inherits_node = CSNodes.EmptyNode()
inherits_node += CSNodes.TextNode(text=' : ')
inherit_types = split_sig(inherits)
i = 1
for t in inherit_types:
self.append_ref_signature(t, inherits_node)
if i < len(inherit_types):
inherits_node += CSNodes.TextNode(text=', ')
i += 1
tnode += inherits_node
signode += tnode
opt_parent = self.options['parent'] if 'parent' in self.options else None
form = '{}.{}' if self.has_parent() and opt_parent else '{}{}'
parent = form.format(self.get_parent() if self.has_parent() else '', opt_parent if opt_parent else '')
self.env.ref_context[CSharpObject.PARENT_TYPE_NAME] = self.ParentType(
parent=parent, name=name, type=typ, override=opt_parent)
if opt_parent:
self.env.ref_context[self.PARENT_ATTR_NAME] = parent
return self.get_fullname(name)
def get_index_text(self, sig, name, typ):
rname = '{} (C# {})'.format(name, _(typ))
return rname
def parse_signature(self, sig):
match = TYPE_SIG_RE.match(sig.strip())
if not match:
raise Exception('Invalid type signature. Got: {}'.format(sig))
mod, typ, names, generic, inherits = match.groups()
return mod, typ.strip(), names, generic, inherits
def get_obj_name(self, sig):
_, typ, name, _, _ = self.parse_signature(sig)
return name, typ
class CSharpEnum(CSharpObject):
option_spec = {**CSharpObject.option_spec, 'values': directives.unchanged_required,
**dict(zip([('val(' + str(i) + ')') for i in range(1, 21)],
[directives.unchanged] * 20))}
def handle_signature(self, sig, signode):
mod, name = self.parse_signature(sig)
node = CSNodes.EmptyNode()
if mod:
node += CSNodes.Modificator(text='{}'.format(mod.strip()))
node += CSNodes.TextNode(text=' ')
node += CSNodes.Keyword(text='enum')
node += CSNodes.TextNode(text=' ')
node += CSNodes.Enum(text='{}'.format(name.strip()))
signode += node
return self.get_fullname(name)
def after_content_node(self, node):
options = self.options['values'].split()
node += CSNodes.Description(title=_('values').title(), desc=', '.join(options))
options_values = list(value for key, value in self.options.items() \
if key not in ('noindex', 'values') and value)
if not options_values:
return
i = 0
for vname in options:
if i < len(options_values):
node += CSNodes.Description(title=vname, desc=options_values[i])
i += 1
def parse_signature(self, sig):
match = ENUM_SIG_RE.match(sig.strip())
if not match:
raise Exception('Invalid enum signature. Got: {}'.format(sig))
mod, name = match.groups()
return mod, name.strip()
def get_index_text(self, sig, name, typ):
rname = '{} (C# {})'.format(name, _('enum'))
return rname
def get_obj_name(self, sig):
_, name = self.parse_signature(sig)
return name, 'enum'
class CSharpVariable(CSharpObject):
_default = ''
def handle_signature(self, sig, signode):
mod, typ, name, self._default = self.parse_signature(sig)
node = CSNodes.EmptyNode()
node += CSNodes.Modificator(text='{}'.format(mod if mod else 'private'))
node += CSNodes.TextNode(text=' ')
self.append_ref_signature(typ, node)
node += CSNodes.TextNode(text=' ')
node += CSNodes.VariableName(text='{}'.format(name))
signode += node
return self.get_fullname(name)
def before_content_node(self, node):
if self._default:
node += CSNodes.Description(title=_('value').title(), desc=self._default)
def parse_signature(self, sig):
match = VAR_SIG_RE.match(sig.strip())
if not match:
raise Exception('Invalid variable signature. Got: {}'.format(sig))
mod, typ, name, default = match.groups()
return mod, typ.strip(), name.strip(), default
def get_index_text(self, sig, name, typ):
rname = '{} (C# {})->{}'.format(name, _('variable'), typ)
return rname
def get_obj_name(self, sig):
_, typ, name, _ = self.parse_signature(sig)
return name, typ
class CSharpProperty(CSharpObject):
def handle_signature(self, sig, signode):
mod, typ, name, getter, setter = self.parse_signature(sig)
node = CSNodes.EmptyNode()
node += CSNodes.Modificator(text='{}'.format(mod if mod else 'private'))
node += CSNodes.TextNode(text=' ')
self.append_ref_signature(typ, node)
node += CSNodes.TextNode(text=' ')
node += CSNodes.MethodName(text='{}'.format(name))
node += CSNodes.TextNode(text=' { ')
accessors = []
if getter:
accessors.append('get;')
if setter:
accessors.append(setter.strip())
node += CSNodes.Modificator(text=' '.join(accessors))
node += CSNodes.TextNode(text=' } ')
signode += node
return self.get_fullname(name)
def parse_signature(self, sig):
match = PROP_SIG_RE.match(sig.strip())
if not match:
raise Exception('Invalid property signature. Got: {}'.format(sig))
mod, typ, name, getter, setter = match.groups()
return mod, typ.strip(), name.strip(), getter, setter
def get_index_text(self, sig, name, typ):
rname = '{} (C# {})->{}'.format(name, _('property'), typ)
return rname
def get_obj_name(self, sig):
_, typ, name, _, _ = self.parse_signature(sig)
return name, typ
class CSharpMethod(CSharpObject):
option_spec = {**CSharpObject.option_spec,
'returns': directives.unchanged,
**dict(zip([('param(' + str(i) + ')') for i in range(1, 8)],
[directives.unchanged] * 7))}
_params_list = ()
def handle_signature(self, sig, signode):
mod, typ, name, generic, params = self.parse_signature(sig)
node = CSNodes.EmptyNode()
node += CSNodes.Modificator(text='{}'.format(mod if mod else 'private'))
node += CSNodes.TextNode(text=' ')
self.append_ref_signature(typ if typ else name, node)
if typ:
node += CSNodes.TextNode(text=' ')
node += CSNodes.MethodName(text='{}'.format(name))
if generic:
self.append_generic(generic, node)
param_node = CSNodes.EmptyNode()
param_node += CSNodes.TextNode(text='(')
if params:
self._params_list = self._get_params(params)
i = 1
for (pmod, ptyp, pname, pvalue) in self._params_list:
pnode = CSNodes.EmptyNode()
if pmod:
pnode += CSNodes.Keyword(text='{}'.format(pmod))
pnode += CSNodes.TextNode(text=' ')
self.append_ref_signature(ptyp, pnode)
pnode += CSNodes.TextNode(text=' ')
pnode += CSNodes.TextNode(text='{}'.format(pname))
if pvalue:
pnode += CSNodes.TextNode(text=' = ')
self.append_ref_signature(pvalue, pnode)
param_node += pnode
if i < len(self._params_list):
param_node += CSNodes.TextNode(text=', ')
i += 1
param_node += CSNodes.TextNode(text=')')
node += param_node
signode += node
return self.get_fullname(name)
def before_content_node(self, node):
if 'returns' in self.options:
node += CSNodes.Description(title=_('returns').title(), desc=self.options['returns'])
def after_content_node(self, node):
options_values = list(value for key, value in self.options.items() if key != 'noindex')
i = 0
for (_, _, pname, _) in self._params_list:
if i < len(options_values):
node += CSNodes.Description(title=pname, desc=options_values[i], lower=True)
i += 1
def after_content(self):
super().after_content()
if self._params_list is not None and len(self._params_list) > 0:
del self._params_list
def parse_signature(self, sig):
match = METHOD_SIG_RE.match(sig.strip())
if not match:
raise Exception('Invalid method signature. Got: {}'.format(sig))
mod, typ, name, generic, params = match.groups()
return mod, typ, name.strip(), generic, params
@staticmethod
def parse_param_signature(sig):
match = PARAM_SIG_RE.match(sig.strip())
if not match:
raise Exception('Invalid parameter signature. Got: {}'.format(sig))
mod, typ, name, value = match.groups()
return mod, typ.strip(), name.strip(), value
def _get_params(self, params):
if not params:
return None
result = []
params_group = split_sig(params)
for param in params_group:
pmod, ptyp, pname, pvalue = self.parse_param_signature(param)
result.append((pmod, ptyp, pname, pvalue))
return result
def get_index_text(self, sig, name, typ):
params_text = ''
if self._params_list:
names = [pname
for _, _, pname, _
in self._params_list]
params_text = '({})'.format(', '.join(names))
if typ:
rname = '{}{} (C# {})->{}'.format(name, params_text, _('method'), typ)
else:
rname = '{}{} (C# {})->{}'.format(name, params_text, _('constructor'), name)
return rname
def get_obj_name(self, sig):
_, typ, name, _, _ = self.parse_signature(sig)
return name, typ
class CSharpNamespace(Directive):
required_arguments = 1
def run(self):
env = self.state.document.settings.env
namespace = self.arguments[0].strip()
if namespace is None:
env.ref_context.pop(CSharpObject.PARENT_ATTR_NAME, None)
else:
env.ref_context[CSharpObject.PARENT_ATTR_NAME] = namespace
return []
class CSharpEndType(Directive):
required_arguments = 0
def run(self):
env = self.state.document.settings.env
if CSharpObject.PARENT_TYPE_NAME in env.ref_context:
env.ref_context.pop(CSharpObject.PARENT_TYPE_NAME, None)
return []
class CSharpXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
refnode[CSharpObject.PARENT_ATTR_NAME] = env.ref_context.get(
CSharpObject.PARENT_ATTR_NAME)
return super(CSharpXRefRole, self).process_link(env, refnode,
has_explicit_title, title, target)
class CSharpIndex(Index):
name = 'csharp'
localname = 'CSharp Index'
shortname = 'CSharp'
def generate(self, docnames=None):
content = defaultdict(list)
objects = self.domain.get_objects()
objects = sorted(objects, key=lambda obj: obj[0])
for name, dispname, objtype, docname, anchor, _ in objects:
content[dispname.split('.')[-1][0].lower()].append(
(dispname, 0, docname, anchor, docname, '', objtype))
content = sorted(content.items())
return content, True
class CSharpDomain(Domain):
name = 'sphinxsharp'
label = 'C#'
roles = {
'type': CSharpXRefRole(),
'var': CSharpXRefRole(),
'prop': CSharpXRefRole(),
'meth': CSharpXRefRole(),
'enum': CSharpXRefRole()
}
object_types = {
'type': ObjType(_('type'), 'type', 'obj'),
'variable': ObjType(_('variable'), 'var', 'obj'),
'property': ObjType(_('property'), 'prop', 'obj'),
'method': ObjType(_('method'), 'meth', 'obj'),
'enum': ObjType(_('enum'), 'enum', 'obj')
}
directives = {
'namespace': CSharpNamespace,
'end-type': CSharpEndType,
'type': CSharpType,
'variable': CSharpVariable,
'property': CSharpProperty,
'method': CSharpMethod,
'enum': CSharpEnum
}
indices = {
CSharpIndex
}
initial_data = {
'objects': {} # (objtype, name) -> (docname, objtype(class, struct etc.))
}
def clear_doc(self, docname):
for (objtype, name), (doc, _) in self.data['objects'].copy().items():
if doc == docname:
del self.data['objects'][(objtype, name)]
def get_objects(self):
for (objtype, name), (docname, _) in self.data['objects'].items():
yield (name, name, objtype, docname, '{}-{}'.format(objtype, name), 0)
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
targets = get_targets(target, node)
objects = self.data['objects']
roletypes = self.objtypes_for_role(typ)
types = ('type', 'enum', 'method') if typ is None else roletypes
for t in targets:
for objtyp in types:
key = (objtyp, t)
if key in objects:
obj = objects[key]
if typ is not None:
role = self.role_for_objtype(objtyp)
node['reftype'] = role
else:
contnode = CSNodes.UnknownType(typ=obj[1], text=target)
return make_refnode(builder, fromdocname, obj[0],
'{}-{}'.format(objtyp, t), contnode,
'{} {}'.format(obj[1], t))
if typ is None:
contnode = CSNodes.UnknownType(text=target)
return None
def merge_domaindata(self, docnames, otherdata):
for (objtype, name), (docname, typ) in otherdata['objects'].items():
if docname in docnames:
self.data['objects'][(objtype, name)] = (docname, typ)
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
for typ in self.roles:
xref = self.resolve_xref(env, fromdocname, builder, typ,
target, node, contnode)
if xref:
return [('sphinxsharp:{}'.format(typ), xref)]
return []
class CSNodes:
_TYPES = ('class', 'struct', 'interface', 'enum', 'delegate')
class BaseNode(nodes.Element):
def __init__(self, rawsource='', *children, **attributes):
super().__init__(rawsource, *children, **attributes)
@staticmethod
def visit_html(self, node):
self.body.append(self.starttag(node, 'div'))
@staticmethod
def depart_html(self, node):
self.body.append('</div>')
class EmptyNode(BaseNode):
def __init__(self, rawsource='', *children, **attributes):
super().__init__(rawsource, *children, **attributes)
@staticmethod
def visit_html(self, node): pass
@staticmethod
def depart_html(self, node): pass
class InlineText(BaseNode):
def __init__(self, rawsource, type_class, text, *children, **attributes):
super().__init__(rawsource, *children, **attributes)
if type_class is None:
return
self['classes'].append(type_class)
if text:
self.append(nodes.raw(text=text, format='html'))
@staticmethod
def visit_html(self, node):
self.body.append(self.starttag(node, 'span').replace('\n', ''))
@staticmethod
def depart_html(self, node):
self.body.append('</span>')
class Description(BaseNode):
def __init__(self, rawsource='', title='', desc='', *children, **attributes):
super().__init__(rawsource, *children, **attributes)
self['classes'].append('desc')
if title and desc:
if 'lower' not in attributes:
title = title[0].upper() + title[1:]
node = nodes.raw(
text='<strong class="first">{}:</strong><span class="last">{}</span>'.format(title, desc),
format='html')
self.append(node)
else:
raise Exception('Title and description must be assigned.')
class Modificator(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'mod', text, *children, **attributes)
class UnknownType(InlineText):
def __init__(self, rawsource='', typ='', text='', *children, **attributes):
objclass = typ
if not text:
super().__init__(rawsource, None, text, *children, **attributes)
return
if typ not in CSNodes._TYPES:
objclass = 'kw'
if typ not in VALUE_KEYWORDS:
objclass = 'unknown'
super().__init__(rawsource, objclass, text, *children, **attributes)
class TextNode(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'text', text, *children, **attributes)
class MethodName(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'meth-name', text, *children, **attributes)
class VariableName(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'var-name', text, *children, **attributes)
class Keyword(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'kw', text, *children, **attributes)
class Enum(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'enum', text, *children, **attributes)
class Generic(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'generic', text, *children, **attributes)
@staticmethod
def add_nodes(app):
app.add_node(CSNodes.Description,
html=(CSNodes.Description.visit_html, CSNodes.Description.depart_html))
app.add_node(CSNodes.Modificator,
html=(CSNodes.Modificator.visit_html, CSNodes.Modificator.depart_html))
app.add_node(CSNodes.UnknownType,
html=(CSNodes.UnknownType.visit_html, CSNodes.UnknownType.depart_html))
app.add_node(CSNodes.TextNode,
html=(CSNodes.TextNode.visit_html, CSNodes.TextNode.depart_html))
app.add_node(CSNodes.Enum,
html=(CSNodes.Enum.visit_html, CSNodes.Enum.depart_html))
app.add_node(CSNodes.Keyword,
html=(CSNodes.Keyword.visit_html, CSNodes.Keyword.depart_html))
app.add_node(CSNodes.MethodName,
html=(CSNodes.MethodName.visit_html, CSNodes.MethodName.depart_html))
app.add_node(CSNodes.VariableName,
html=(CSNodes.VariableName.visit_html, CSNodes.VariableName.depart_html))
app.add_node(CSNodes.BaseNode,
html=(CSNodes.BaseNode.visit_html, CSNodes.BaseNode.depart_html))
app.add_node(CSNodes.EmptyNode,
html=(CSNodes.EmptyNode.visit_html, CSNodes.EmptyNode.depart_html))
app.add_node(CSNodes.Generic,
html=(CSNodes.Generic.visit_html, CSNodes.Generic.depart_html))
def split_sig(params):
if not params:
return None
result = []
current = ''
level = 0
for char in params:
if char in ('<', '{', '['):
level += 1
elif char in ('>', '}', ']'):
level -= 1
if char != ',' or level > 0:
current += char
elif char == ',' and level == 0:
result.append(current)
current = ''
if current.strip() != '':
result.append(current)
return result
def get_targets(target, node):
targets = [target]
if node[CSharpObject.PARENT_ATTR_NAME] is not None:
parts = node[CSharpObject.PARENT_ATTR_NAME].split('.')
while parts:
targets.append('{}.{}'.format('.'.join(parts), target))
parts = parts[:-1]
return targets
def copy_asset_files(app, exc):
package_dir = path.abspath(path.dirname(__file__))
asset_files = [path.join(package_dir, '_static/css/sphinxsharp.css')]
if exc is None: # build succeeded
for asset_path in asset_files:
copy_asset(asset_path, path.join(app.outdir, '_static'))
def setup(app):
app.connect('build-finished', copy_asset_files)
package_dir = path.abspath(path.dirname(__file__))
app.add_domain(CSharpDomain)
app.add_css_file('sphinxsharp.css')
override_file = path.join(app.confdir, '_static/sphinxsharp-override.css')
if path.exists(override_file):
app.add_css_file('sphinxsharp-override.css')
CSNodes.add_nodes(app)
locale_dir = path.join(package_dir, 'locales')
app.add_message_catalog('sphinxsharp', locale_dir)
return {
'version': '1.0.2',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 38.085433 | 118 | 0.569884 | 30,805 | 0.885888 | 180 | 0.005176 | 2,271 | 0.065309 | 0 | 0 | 3,684 | 0.105944 |
81ff4f468611ece2f0ec909a6f48f5be0e5338fb | 404 | py | Python | articles/migrations/0003_article_published_at.py | mosalaheg/django3.2 | 551ecd0c8f633bcd9c37a95688e7bed958c0b91c | [
"MIT"
]
| null | null | null | articles/migrations/0003_article_published_at.py | mosalaheg/django3.2 | 551ecd0c8f633bcd9c37a95688e7bed958c0b91c | [
"MIT"
]
| null | null | null | articles/migrations/0003_article_published_at.py | mosalaheg/django3.2 | 551ecd0c8f633bcd9c37a95688e7bed958c0b91c | [
"MIT"
]
| null | null | null | # Generated by Django 3.2.7 on 2021-10-02 08:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0002_auto_20211002_1019'),
]
operations = [
migrations.AddField(
model_name='article',
name='published_at',
field=models.DateTimeField(blank=True, null=True),
),
]
| 21.263158 | 62 | 0.608911 | 311 | 0.769802 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.259901 |
81ff80a60fb6d3f51394a723adeda192add9c640 | 5,622 | py | Python | kratos/mpi/tests/test_data_communicator_factory.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
]
| 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | kratos/mpi/tests/test_data_communicator_factory.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
]
| 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | kratos/mpi/tests/test_data_communicator_factory.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
]
| 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z | from KratosMultiphysics import ParallelEnvironment, IsDistributedRun
if IsDistributedRun():
from KratosMultiphysics.mpi import DataCommunicatorFactory
import KratosMultiphysics.KratosUnittest as UnitTest
import math
class TestDataCommunicatorFactory(UnitTest.TestCase):
def setUp(self):
self.registered_comms = []
self.default_data_communicator = ParallelEnvironment.GetDefaultDataCommunicator()
self.original_default = ParallelEnvironment.GetDefaultDataCommunicatorName()
def tearDown(self):
if len(self.registered_comms) > 0:
ParallelEnvironment.SetDefaultDataCommunicator(self.original_default)
for comm_name in self.registered_comms:
ParallelEnvironment.UnregisterDataCommunicator(comm_name)
def markForCleanUp(self,comm_name):
self.registered_comms.append(comm_name)
@UnitTest.skipUnless(IsDistributedRun(), "Test is distributed.")
def testDataCommunicatorDuplication(self):
duplicate_comm = DataCommunicatorFactory.DuplicateAndRegister(self.default_data_communicator, "Duplicate")
self.markForCleanUp("Duplicate") # to clean up during tearDown
self.assertEqual(duplicate_comm.Rank(), self.default_data_communicator.Rank())
self.assertEqual(duplicate_comm.Size(), self.default_data_communicator.Size())
@UnitTest.skipUnless(IsDistributedRun(), "Test is distributed.")
def testDataCommunicatorSplit(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
split_comm = DataCommunicatorFactory.SplitAndRegister(self.default_data_communicator, rank % 2, 0, "EvenOdd")
self.markForCleanUp("EvenOdd") # to clean up during tearDown
expected_rank = rank // 2
if rank % 2 == 0:
expected_size = math.ceil(size/2)
else:
expected_size = math.floor(size/2)
self.assertEqual(split_comm.Rank(), expected_rank)
self.assertEqual(split_comm.Size(), expected_size)
@UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 1, "Test requires at least two ranks.")
def testDataCommunicatorCreateFromRange(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
# Create a communicator using all ranks except the first
ranks = [i for i in range(1,size)]
range_comm = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, ranks, "AllExceptFirst")
self.markForCleanUp("AllExceptFirst") # to clean up during tearDown
if rank == 0:
self.assertTrue(range_comm.IsNullOnThisRank())
self.assertFalse(range_comm.IsDefinedOnThisRank())
else:
self.assertEqual(range_comm.Rank(), rank-1)
self.assertEqual(range_comm.Size(), size-1)
@UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 2, "Test requires at least three ranks.")
def testDataCommunicatorCreateUnion(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
# Create a communicator using all ranks except the first
all_except_first = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(1,size)], "AllExceptFirst")
self.markForCleanUp("AllExceptFirst") # to clean up during tearDown
all_except_last = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(0,size-1)], "AllExceptLast")
self.markForCleanUp("AllExceptLast") # to clean up during tearDown
# Create union communicator (should contain all ranks)
union_comm = DataCommunicatorFactory.CreateUnionAndRegister(all_except_first, all_except_last, self.default_data_communicator, "Union")
self.markForCleanUp("Union") # to clean up during tearDown
self.assertFalse(union_comm.IsNullOnThisRank())
self.assertEqual(union_comm.Rank(), rank)
self.assertEqual(union_comm.Size(), size)
@UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 2, "Test requires at least three ranks.")
def testDataCommunicatorCreateIntersection(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
# Create a communicator using all ranks except the first
all_except_first = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(1,size)], "AllExceptFirst")
self.markForCleanUp("AllExceptFirst") # to clean up during tearDown
all_except_last = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(0,size-1)], "AllExceptLast")
self.markForCleanUp("AllExceptLast") # to clean up during tearDown
intersection_comm = DataCommunicatorFactory.CreateIntersectionAndRegister(
all_except_first, all_except_last, self.default_data_communicator, "Intersection")
self.markForCleanUp("Intersection") # to clean up during tearDown
if rank == 0 or rank == size - 1:
# The first and last ranks do not participate in the intersection communicator
self.assertTrue(intersection_comm.IsNullOnThisRank())
else:
self.assertEqual(intersection_comm.Rank(), rank - 1 )
self.assertEqual(intersection_comm.Size(), size - 2 )
if __name__ == "__main__":
UnitTest.main()
| 52.055556 | 155 | 0.730523 | 5,351 | 0.951797 | 0 | 0 | 4,671 | 0.830843 | 0 | 0 | 962 | 0.171113 |
81ffc4260214e21a8fbb8d247a68944ab547969b | 643 | py | Python | example/usage/example_kate.py | vodka2/vkaudiotoken-python | 5720e4cf77f5e1b20c3bf57f3df0717638a539e0 | [
"MIT"
]
| 32 | 2020-07-21T18:32:59.000Z | 2022-03-20T21:16:11.000Z | example/usage/example_kate.py | vodka2/vkaudiotoken-python | 5720e4cf77f5e1b20c3bf57f3df0717638a539e0 | [
"MIT"
]
| 1 | 2020-10-04T04:41:06.000Z | 2020-10-05T11:43:48.000Z | example/usage/example_kate.py | vodka2/vkaudiotoken-python | 5720e4cf77f5e1b20c3bf57f3df0717638a539e0 | [
"MIT"
]
| 2 | 2021-09-21T01:17:05.000Z | 2022-03-17T10:17:22.000Z | from __future__ import print_function
try:
import vkaudiotoken
except ImportError:
import path_hack
from vkaudiotoken import supported_clients
import sys
import requests
import json
token = sys.argv[1]
user_agent = supported_clients.KATE.user_agent
sess = requests.session()
sess.headers.update({'User-Agent': user_agent})
def prettyprint(result):
print(json.dumps(json.loads(result.content.decode('utf-8')), indent=2))
prettyprint(sess.get(
"https://api.vk.com/method/audio.getById",
params=[('access_token', token),
('audios', '371745461_456289486,-41489995_202246189'),
('v', '5.95')]
))
| 21.433333 | 75 | 0.715397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.205288 |
c3013dccb7b2137642e33db7031ac540e1d949e4 | 811 | py | Python | create_order.py | behnam71/Crypto_P | 1196f06c611eac65dece323d62104233cf2386b1 | [
"MIT"
]
| null | null | null | create_order.py | behnam71/Crypto_P | 1196f06c611eac65dece323d62104233cf2386b1 | [
"MIT"
]
| null | null | null | create_order.py | behnam71/Crypto_P | 1196f06c611eac65dece323d62104233cf2386b1 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import os
import sys
from pprint import pprint
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
exchange = ccxt.binance({
'apiKey': 'SmweB9bNM2qpYkgl4zaQSFPpSzYpyoJ6B3BE9rCm0XYcAdIE0b7n6bm11e8jMwnI',
'secret': '8x6LtJztmIeGPZyiJOC7lVfg2ixCUYkhVV7CKVWq2LVlPh8mo3Ab7SMkaC8qTZLt',
'enableRateLimit': True,
})
exchange.urls['api'] = exchange.urls['test'] # use the testnet
symbol = 'BTC/USDT'; type = 'market' # or limit
amount = 0.01; price = None; side = 'buy' # or sell
# extra params and overrides if needed
params = {
'test': True, # test if it's valid, but don't actually place it
}
order = exchange.create_order(symbol, type, side, amount, price)
pprint(order)
| 27.033333 | 83 | 0.713933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 372 | 0.458693 |
c301529eb7d8f8a6047d8e286ff806d7da8427d3 | 2,235 | py | Python | tools/testrunner/outproc/message.py | LancerWang001/v8 | 42ff4531f590b901ade0a18bfd03e56485fe2452 | [
"BSD-3-Clause"
]
| 20,995 | 2015-01-01T05:12:40.000Z | 2022-03-31T21:39:18.000Z | tools/testrunner/outproc/message.py | Andrea-MariaDB-2/v8 | a0f0ebd7a876e8cb2210115adbfcffe900e99540 | [
"BSD-3-Clause"
]
| 333 | 2020-07-15T17:06:05.000Z | 2021-03-15T12:13:09.000Z | tools/testrunner/outproc/message.py | Andrea-MariaDB-2/v8 | a0f0ebd7a876e8cb2210115adbfcffe900e99540 | [
"BSD-3-Clause"
]
| 4,523 | 2015-01-01T15:12:34.000Z | 2022-03-28T06:23:41.000Z | # Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import os
import re
from . import base
class OutProc(base.ExpectedOutProc):
def __init__(self, expected_outcomes, basepath, expected_fail,
expected_filename, regenerate_expected_files):
super(OutProc, self).__init__(expected_outcomes, expected_filename,
regenerate_expected_files)
self._basepath = basepath
self._expected_fail = expected_fail
def _is_failure_output(self, output):
fail = output.exit_code != 0
if fail != self._expected_fail:
return True
expected_lines = []
# Can't use utils.ReadLinesFrom() here because it strips whitespace.
with open(self._basepath + '.out') as f:
for line in f:
if line.startswith("#") or not line.strip():
continue
expected_lines.append(line)
raw_lines = output.stdout.splitlines()
actual_lines = [ s for s in raw_lines if not self._ignore_line(s) ]
if len(expected_lines) != len(actual_lines):
return True
# Try .js first, and fall back to .mjs.
# TODO(v8:9406): clean this up by never separating the path from
# the extension in the first place.
base_path = self._basepath + '.js'
if not os.path.exists(base_path):
base_path = self._basepath + '.mjs'
env = {
'basename': os.path.basename(base_path),
}
for (expected, actual) in itertools.izip_longest(
expected_lines, actual_lines, fillvalue=''):
pattern = re.escape(expected.rstrip() % env)
pattern = pattern.replace('\\*', '.*')
pattern = pattern.replace('\\{NUMBER\\}', '\d+(?:\.\d*)?')
pattern = '^%s$' % pattern
if not re.match(pattern, actual):
return True
return False
def _ignore_line(self, string):
"""Ignore empty lines, valgrind output, Android output."""
return (
not string or
not string.strip() or
string.startswith("==") or
string.startswith("**") or
string.startswith("ANDROID") or
# Android linker warning.
string.startswith('WARNING: linker:')
)
| 32.867647 | 72 | 0.648322 | 2,010 | 0.899329 | 0 | 0 | 0 | 0 | 0 | 0 | 561 | 0.251007 |
c3027f734157db362e121ea8ce2b5d36ad4e6075 | 604 | py | Python | gemtown/users/urls.py | doramong0926/gemtown | 2c39284e3c68f0cc11994bed0ee2abaad0ea06b6 | [
"MIT"
]
| null | null | null | gemtown/users/urls.py | doramong0926/gemtown | 2c39284e3c68f0cc11994bed0ee2abaad0ea06b6 | [
"MIT"
]
| 5 | 2020-09-04T20:13:39.000Z | 2022-02-17T22:03:33.000Z | gemtown/users/urls.py | doramong0926/gemtown | 2c39284e3c68f0cc11994bed0ee2abaad0ea06b6 | [
"MIT"
]
| null | null | null | from django.urls import path
from . import views
app_name = "users"
urlpatterns = [
path("all/", view=views.UserList.as_view(), name="all_user"),
path("<int:user_id>/password/", view=views.ChangePassword.as_view(), name="change password"),
path("<int:user_id>/follow/", view=views.FollowUser.as_view(), name="follow user"),
path("<int:user_id>/unfollow/", view=views.UnfollowUser.as_view(), name="unfollow user"),
path("<int:user_id>/", view=views.UserFeed.as_view(), name="user_detail_infomation"),
path("login/facebook/", view=views.FacebookLogin.as_view(), name="fb_login"),
] | 50.333333 | 97 | 0.701987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.344371 |
c302fe24cced11c5bc506098882205738bad2b79 | 3,132 | py | Python | Packs/Thycotic/Integrations/Thycotic/Thycotic_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
]
| 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/Thycotic/Integrations/Thycotic/Thycotic_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
]
| 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/Thycotic/Integrations/Thycotic/Thycotic_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
]
| 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | import pytest
from Thycotic import Client, \
secret_password_get_command, secret_username_get_command, \
secret_get_command, secret_password_update_command, secret_checkout_command, secret_checkin_command, \
secret_delete_command, folder_create_command, folder_delete_command, folder_update_command
from test_data.context import GET_PASSWORD_BY_ID_CONTEXT, GET_USERNAME_BY_ID_CONTENT, \
SECRET_GET_CONTENT, SECRET_PASSWORD_UPDATE_CONTEXT, SECRET_CHECKOUT_CONTEXT, SECRET_CHECKIN_CONTEXT, \
SECRET_DELETE_CONTEXT, FOLDER_CREATE_CONTEXT, FOLDER_DELETE_CONTEXT, FOLDER_UPDATE_CONTEXT
from test_data.http_responses import GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_RAW_RESPONSE, \
SECRET_GET_RAW_RESPONSE, SECRET_PASSWORD_UPDATE_RAW_RESPONSE, SECRET_CHECKOUT_RAW_RESPONSE, \
SECRET_CHECKIN_RAW_RESPONSE, SECRET_DELETE_RAW_RESPONSE, FOLDER_CREATE_RAW_RESPONSE, FOLDER_DELETE_RAW_RESPONSE, \
FOLDER_UPDATE_RAW_RESPONSE
GET_PASSWORD_BY_ID_ARGS = {"secret_id": "4"}
GET_USERNAME_BY_ID_ARGS = {"secret_id": "4"}
SECRET_GET_ARGS = {"secret_id": "4"}
SECRET_PASSWORD_UPDATE_ARGS = {"secret_id": "4", "newpassword": "NEWPASSWORD1"}
SECRET_CHECKOUT_ARGS = {"secret_id": "4"}
SECRET_CHECKIN_ARGS = {"secret_id": "4"}
SECRET_DELETE_ARGS = {"id": "9"}
FOLDER_CREATE_ARGS = {"folderName": "xsoarFolderTest3", "folderTypeId": "1", "parentFolderId": "3"}
FOLDER_DELETE_ARGS = {"folder_id": "9"}
FOLDER_UPDATE_ARGS = {"id": "12", "folderName": "xsoarTF3New"}
@pytest.mark.parametrize('command, args, http_response, context', [
(secret_password_get_command, GET_PASSWORD_BY_ID_ARGS, GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_PASSWORD_BY_ID_CONTEXT),
(secret_username_get_command, GET_USERNAME_BY_ID_ARGS, GET_USERNAME_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_CONTENT),
(secret_get_command, SECRET_GET_ARGS, SECRET_GET_RAW_RESPONSE, SECRET_GET_CONTENT),
(secret_password_update_command, SECRET_PASSWORD_UPDATE_ARGS, SECRET_PASSWORD_UPDATE_RAW_RESPONSE,
SECRET_PASSWORD_UPDATE_CONTEXT),
(secret_checkout_command, SECRET_CHECKOUT_ARGS, SECRET_CHECKOUT_RAW_RESPONSE, SECRET_CHECKOUT_CONTEXT),
(secret_checkin_command, SECRET_CHECKIN_ARGS, SECRET_CHECKIN_RAW_RESPONSE, SECRET_CHECKIN_CONTEXT),
(secret_delete_command, SECRET_DELETE_ARGS, SECRET_DELETE_RAW_RESPONSE, SECRET_DELETE_CONTEXT),
(folder_create_command, FOLDER_CREATE_ARGS, FOLDER_CREATE_RAW_RESPONSE, FOLDER_CREATE_CONTEXT),
(folder_delete_command, FOLDER_DELETE_ARGS, FOLDER_DELETE_RAW_RESPONSE, FOLDER_DELETE_CONTEXT),
(folder_update_command, FOLDER_UPDATE_ARGS, FOLDER_UPDATE_RAW_RESPONSE, FOLDER_UPDATE_CONTEXT)
])
def test_thycotic_commands(command, args, http_response, context, mocker):
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://thss.softwarium.net/SecretServer", username="xsoar1", password="HfpuhXjv123",
proxy=False, verify=False)
mocker.patch.object(Client, '_http_request', return_value=http_response)
outputs = command(client, **args)
results = outputs.to_context()
assert results.get("EntryContext") == context
| 60.230769 | 120 | 0.814815 | 0 | 0 | 0 | 0 | 1,647 | 0.525862 | 0 | 0 | 379 | 0.121009 |
c3044c3a6846d86e6151eb00472156db75ba2d69 | 1,046 | py | Python | xml_to_csv.py | bhavdeepsingh33/blood-cell-detection | 1afe0ce7aba7c621eb13fc055cc706981fcf4962 | [
"MIT"
]
| null | null | null | xml_to_csv.py | bhavdeepsingh33/blood-cell-detection | 1afe0ce7aba7c621eb13fc055cc706981fcf4962 | [
"MIT"
]
| null | null | null | xml_to_csv.py | bhavdeepsingh33/blood-cell-detection | 1afe0ce7aba7c621eb13fc055cc706981fcf4962 | [
"MIT"
]
| null | null | null | import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path):
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (root.find('filename').text,
member[0].text,
int(member[4][0].text),
int(member[4][2].text),
int(member[4][1].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['image_names', 'cell_type', 'xmin', 'xmax', 'ymin', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def main():
for folder in ['train','test']:
image_path = os.path.join(os.getcwd(), ('images/' + folder))
xml_df = xml_to_csv(image_path)
xml_df.to_csv(('images/' + folder + '_labels.csv'), index=None)
print('Successfully converted xml to csv.')
main()
| 29.885714 | 78 | 0.543977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.147228 |
c304c12fe37620c738efd7817690de209aad07c4 | 1,190 | py | Python | src/pynnet/test.py | RalphMao/kaldi-pynnet | a8c050e976a138b43ff0c2ea2a1def72f51f9177 | [
"Apache-2.0"
]
| null | null | null | src/pynnet/test.py | RalphMao/kaldi-pynnet | a8c050e976a138b43ff0c2ea2a1def72f51f9177 | [
"Apache-2.0"
]
| null | null | null | src/pynnet/test.py | RalphMao/kaldi-pynnet | a8c050e976a138b43ff0c2ea2a1def72f51f9177 | [
"Apache-2.0"
]
| null | null | null | import _nnet
import numpy as np
import IPython
net = _nnet.Nnet()
net.read('/home/maohz12/online_50h_Tsinghua/exp_train_50h/lstm_karel_bak/nnet/nnet_iter14_learnrate7.8125e-07_tr1.2687_cv1.6941')
# Test1
blobs = net.layers[0].get_params()
x = blobs[1].data.flatten()
x_test = np.fromfile('test/1.bin', 'f')
assert np.sum(abs(x-x_test)) < 1e-5
x = blobs[4].data.flatten()
x_test = np.fromfile('test/4.bin', 'f')
assert np.sum(abs(x-x_test)) < 1e-5
blobs[1].data[:] = np.arange(blobs[1].data.size).reshape(blobs[1].data.shape)
blobs[4].data[:] = np.arange(blobs[4].data.size).reshape(blobs[4].data.shape)
net.layers[0].set_params(blobs)
net.write('test/test_nnet', 0)
pointer, read_only_flag = blobs[1].data.__array_interface__['data']
# Test 2
data_copy = blobs[1].data.copy()
del net
pointer, read_only_flag = blobs[1].data.__array_interface__['data']
assert np.sum(abs(blobs[1].data - data_copy)) < 1e-5
# Test 3
net = _nnet.Nnet()
net.read('test/test_nnet')
blobs_new = net.layers[0].get_params()
x = blobs[1].data
x_test = blobs_new[1].data
assert np.sum(abs(x-x_test)) < 1e-5
x = blobs[4].data
x_test = blobs_new[4].data
assert np.sum(abs(x-x_test)) < 1e-5
print "Test passed"
| 27.045455 | 129 | 0.715966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.192437 |
c304d7f7756e66ca85c0f39399c15dd4c7181588 | 1,329 | py | Python | collegiate-explorer-admin/cc_admin/cc_admin/test.py | Chit-Chaat/Collegiate_Explorer_APP | f30171d01fec62a836332b5508374144fbb487c7 | [
"MIT"
]
| 3 | 2021-05-24T23:06:40.000Z | 2021-11-08T10:32:42.000Z | collegiate-explorer-admin/cc_admin/cc_admin/test.py | Chit-Chaat/Collegiate_Explorer_APP | f30171d01fec62a836332b5508374144fbb487c7 | [
"MIT"
]
| 4 | 2020-10-12T03:00:43.000Z | 2020-11-17T01:47:56.000Z | collegiate-explorer-admin/cc_admin/cc_admin/test.py | Chit-Chaat/Collegiate_Explorer_APP | f30171d01fec62a836332b5508374144fbb487c7 | [
"MIT"
]
| 2 | 2021-03-01T15:30:26.000Z | 2022-01-13T21:30:20.000Z | __author__ = 'Aaron Yang'
__email__ = '[email protected]'
__date__ = '10/28/2020 4:52 PM'
# import re
#
#
# def format_qs_score(score_str):
# """
# help you generate a qs score
# 1 - 100 : 5
# 141-200 : 4
# =100: 4
# N/A 3
# :param score_str:
# :return:
# """
# score = 3
# if not score_str or score_str != "N/A":
# try:
# parts = int(list(filter(lambda val: val,
# list(re.split('-|=', score_str))))[0])
# except:
# return 3
# score = 5 - int(parts / 100)
# if score > 5 or score < 1:
# return 3
# return score
#
#
# print(format_qs_score("=100"))
#
# print(list(filter(lambda val: val, re.split('-|=', "=100"))))
# import csv
# import numpy as np
# import requests
#
# with open('./college_explorer.csv', newline='', encoding='utf-8') as file:
# data = list(csv.reader(file))
# data = np.array(data)
# img_list = data[1:, 33].tolist()
#
# img_list = list(filter(lambda url: url != 'N/A', img_list))
#
#
# for url in img_list:
# response = requests.get(url)
# if response.status_code == 200:
# school_name = url.split('/')[-1].split('_')[0]
# with open("./images/" + school_name + ".jpg", 'wb') as f:
# f.write(response.content) | 26.058824 | 76 | 0.527464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,243 | 0.93529 |
c306596434700224d0b28f389b38ad8d05d0205f | 4,311 | py | Python | djangocms_translations/utils.py | divio/djangocms-translations | 9bfde2fed91973160bbe50ccbd6b4e2a2f4ba07f | [
"BSD-3-Clause"
]
| 3 | 2019-01-14T13:30:38.000Z | 2020-08-10T22:16:06.000Z | djangocms_translations/utils.py | divio/djangocms-translations | 9bfde2fed91973160bbe50ccbd6b4e2a2f4ba07f | [
"BSD-3-Clause"
]
| 5 | 2018-12-20T13:56:47.000Z | 2021-07-20T07:13:01.000Z | djangocms_translations/utils.py | divio/djangocms-translations | 9bfde2fed91973160bbe50ccbd6b4e2a2f4ba07f | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
import json
from itertools import chain
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import BooleanField
from django.forms import modelform_factory
from django.utils.lru_cache import lru_cache
from django.utils.safestring import mark_safe
from django.utils.translation import get_language_info
from djangocms_transfer.utils import get_plugin_class, get_plugin_model
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import JsonLexer
from yurl import URL
from .conf import TRANSLATIONS_CONF
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
USE_HTTPS = getattr(settings, 'URLS_USE_HTTPS', False)
def get_plugin_form_class(plugin_type, fields):
plugin_class = get_plugin_class(plugin_type)
plugin_fields = chain(
plugin_class.model._meta.concrete_fields,
plugin_class.model._meta.private_fields,
plugin_class.model._meta.many_to_many,
)
plugin_fields_disabled = [
field.name for field in plugin_fields
if not getattr(field, 'editable', False)
]
plugin_form_class = modelform_factory(
plugin_class.model,
fields=fields,
exclude=plugin_fields_disabled,
)
return plugin_form_class
def get_plugin_form(plugin_type, data):
_data = data.copy()
plugin_form_class = get_plugin_form_class(plugin_type, fields=data.keys())
multi_value_fields = [
(name, field) for name, field in plugin_form_class.base_fields.items()
if hasattr(field.widget, 'decompress') and name in data
]
for name, field in multi_value_fields:
# The value used on the form data is compressed,
# and the form contains multi-value fields which expect
# a decompressed value.
compressed = data[name]
try:
decompressed = field.widget.decompress(compressed)
except ObjectDoesNotExist:
break
for pos, value in enumerate(decompressed):
_data['{}_{}'.format(name, pos)] = value
return plugin_form_class(_data)
def add_domain(url, domain=None):
# add the domain to this url.
if domain is None:
domain = Site.objects.get_current().domain
url = URL(url)
if USE_HTTPS:
url = url.replace(scheme='https')
else:
url = url.replace(scheme='http')
return str(url.replace(host=domain))
def pretty_data(data, LexerClass):
formatter = HtmlFormatter(style='colorful')
data = highlight(data, LexerClass(), formatter)
style = '<style>' + formatter.get_style_defs() + '</style><br>'
return mark_safe(style + data)
def pretty_json(data):
data = json.dumps(json.loads(data), sort_keys=True, indent=2)
return pretty_data(data, JsonLexer)
@lru_cache(maxsize=None)
def get_translatable_fields(plugin_type):
conf = TRANSLATIONS_CONF.get(plugin_type, {})
if 'fields' in conf:
fields = conf['fields']
else:
model = get_plugin_model(plugin_type)
opts = model._meta.concrete_model._meta
fields = opts.local_fields
fields = [
field.name
for field in fields
if (
not field.is_relation and
not field.primary_key and
not field.choices and
not isinstance(field, BooleanField)
)
]
excluded = conf.get('excluded_fields', [])
return set(fields).difference(set(excluded))
@lru_cache(maxsize=None)
def get_text_field_child_label(plugin_type):
return settings.DJANGOCMS_TRANSLATIONS_CONF.get(plugin_type, {}).get('text_field_child_label')
def get_language_name(lang_code):
info = get_language_info(lang_code)
if info['code'] == lang_code:
return info['name']
try:
return dict(settings.LANGUAGES)[lang_code]
except KeyError:
# fallback to known name
return info['name']
def get_page_url(page, language, is_https=False):
return urljoin(
'http{}://{}'.format(
's' if is_https else '',
page.node.site.domain,
),
page.get_absolute_url(language=language),
)
| 28.549669 | 98 | 0.68128 | 0 | 0 | 0 | 0 | 868 | 0.201345 | 0 | 0 | 386 | 0.089538 |
c3068d09ac63699b36a60bd71dd55030585fc665 | 597 | py | Python | bin/render_ingress.py | phplaboratory/madcore-ai | ea866334480d77b084ce971506cfdb285405c122 | [
"MIT"
]
| null | null | null | bin/render_ingress.py | phplaboratory/madcore-ai | ea866334480d77b084ce971506cfdb285405c122 | [
"MIT"
]
| null | null | null | bin/render_ingress.py | phplaboratory/madcore-ai | ea866334480d77b084ce971506cfdb285405c122 | [
"MIT"
]
| null | null | null | import sys, os, json, jinja2, redis
from jinja2 import Template
r_server = redis.StrictRedis('127.0.0.1', db=2)
i_key = "owner-info"
json_data = r_server.get(i_key)
if json_data is not None:
data = json.loads(json_data)
main_domain = data['Hostname']
fqdn = sys.argv[1] + ".ext." + main_domain
config_template = open('/opt/madcore/bin/templates/ingress.template').read()
template = Template(config_template)
config = (template.render(HOST=fqdn, SERVICE_NAME=sys.argv[2], SERVICE_PORT=sys.argv[3], NAMESPACE=sys.argv[4]))
open("/opt/ingress/" + sys.argv[2] + ".yaml", "w").write(config)
| 35.117647 | 112 | 0.715243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.184255 |
c307055a5d64c20c7212a67b032444ffbf9d764a | 569 | py | Python | Linear_Insertion_Sort.py | toppassion/python-master-app | 21d854186664440f997bfe53010b242f62979e7f | [
"MIT"
]
| null | null | null | Linear_Insertion_Sort.py | toppassion/python-master-app | 21d854186664440f997bfe53010b242f62979e7f | [
"MIT"
]
| null | null | null | Linear_Insertion_Sort.py | toppassion/python-master-app | 21d854186664440f997bfe53010b242f62979e7f | [
"MIT"
]
| 1 | 2021-12-08T11:38:20.000Z | 2021-12-08T11:38:20.000Z | def Linear_Search(Test_arr, val):
index = 0
for i in range(len(Test_arr)):
if val > Test_arr[i]:
index = i+1
return index
def Insertion_Sort(Test_arr):
for i in range(1, len(Test_arr)):
val = Test_arr[i]
j = Linear_Search(Test_arr[:i], val)
Test_arr.pop(i)
Test_arr.insert(j, val)
return Test_arr
if __name__ == "__main__":
Test_list = input("Enter the list of Numbers: ").split()
Test_list = [int(i) for i in Test_list]
print(f"Binary Insertion Sort: {Insertion_Sort(Test_list)}") | 27.095238 | 64 | 0.616872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.161687 |
c30749f6e672c3d0997217dae6e0ef97c37975d8 | 631 | py | Python | scripts/tests/snapshots/snap_keywords_test.py | Duroktar/Wolf | c192d5c27eb2098e440f7726eb1bff40ed004db5 | [
"Apache-2.0"
]
| 105 | 2018-02-07T22:07:47.000Z | 2022-03-31T18:16:47.000Z | scripts/tests/snapshots/snap_keywords_test.py | Duroktar/Wolf | c192d5c27eb2098e440f7726eb1bff40ed004db5 | [
"Apache-2.0"
]
| 57 | 2018-02-07T23:07:41.000Z | 2021-11-21T17:14:06.000Z | scripts/tests/snapshots/snap_keywords_test.py | Duroktar/Wolf | c192d5c27eb2098e440f7726eb1bff40ed004db5 | [
"Apache-2.0"
]
| 10 | 2018-02-24T23:44:51.000Z | 2022-03-02T07:52:27.000Z | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_keywords 1'] = '[{"lineno": 7, "source": [" a\\n"], "value": "1"}, {"lineno": 7, "source": [" a\\n"], "value": "2"}, {"lineno": 7, "source": [" a\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "0"}, {"lineno": 13, "source": [" i\\n"], "value": "1"}, {"lineno": 13, "source": [" i\\n"], "value": "2"}, {"lineno": 13, "source": [" i\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "4"}]'
| 57.363636 | 462 | 0.505547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 513 | 0.812995 |
c307664c89867d683750f1b12c8b33f9be0a22ae | 443 | py | Python | pyside/lesson_10_main.py | LueyEscargot/pyGuiTest | c072fe29a7c94dc60ec54344a5d4a91253d25f3f | [
"MIT"
]
| null | null | null | pyside/lesson_10_main.py | LueyEscargot/pyGuiTest | c072fe29a7c94dc60ec54344a5d4a91253d25f3f | [
"MIT"
]
| null | null | null | pyside/lesson_10_main.py | LueyEscargot/pyGuiTest | c072fe29a7c94dc60ec54344a5d4a91253d25f3f | [
"MIT"
]
| null | null | null | import sys
from PySide2.QtWidgets import QApplication, QMainWindow
from PySide2.QtCore import QFile
from lesson_10_mainWidget import Ui_MainWindow
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| 23.315789 | 55 | 0.708804 | 161 | 0.363431 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.022573 |
c308e55ef9a8f6ca2122399901177b70c65eef30 | 1,208 | py | Python | test/test_everything.py | jameschapman19/Eigengame | 165d1bf35076fbfc6e65a987cb2e09a174776927 | [
"MIT"
]
| null | null | null | test/test_everything.py | jameschapman19/Eigengame | 165d1bf35076fbfc6e65a987cb2e09a174776927 | [
"MIT"
]
| null | null | null | test/test_everything.py | jameschapman19/Eigengame | 165d1bf35076fbfc6e65a987cb2e09a174776927 | [
"MIT"
]
| null | null | null | import jax.numpy as jnp
import numpy as np
from jax import random
from algorithms import Game, GHA, Oja, Krasulina, Numpy
def test_pca():
"""
At the moment just checks they all run.
Returns
-------
"""
n = 10
p = 2
n_components = 2
batch_size = 2
epochs = 10
key = random.PRNGKey(0)
X = random.normal(key, (n, p))
X = X / jnp.linalg.norm(X, axis=0)
numpy = Numpy(n_components=n_components).fit(X)
game = Game(
n_components=n_components, batch_size=batch_size, epochs=epochs
).fit(X)
gha = GHA(n_components=n_components, batch_size=batch_size, epochs=epochs).fit(
X
)
oja = Oja(n_components=n_components, batch_size=batch_size, epochs=epochs).fit(
X
)
krasulina = Krasulina(
n_components=n_components, batch_size=batch_size, epochs=epochs
).fit(X)
assert (
np.testing.assert_almost_equal(
[
game.score(X),
gha.score(X),
oja.score(X),
krasulina.score(X),
],
numpy.score(X),
decimal=0,
)
is None
)
| 24.16 | 83 | 0.543874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.067053 |
c309029b235f5e5d51eecba38f135cf9e46dd8c7 | 971 | py | Python | script/python/result_get.py | yztong/LeNet_RTL | 0f714522dd7adc491bfbcf34b22f9f594e7442af | [
"MIT"
]
| 29 | 2018-03-22T07:26:37.000Z | 2022-03-21T08:28:36.000Z | script/python/result_get.py | honorpeter/LeNet_RTL | 0f714522dd7adc491bfbcf34b22f9f594e7442af | [
"MIT"
]
| 1 | 2021-04-12T13:28:46.000Z | 2021-04-21T10:00:53.000Z | script/python/result_get.py | honorpeter/LeNet_RTL | 0f714522dd7adc491bfbcf34b22f9f594e7442af | [
"MIT"
]
| 9 | 2019-04-06T06:27:41.000Z | 2021-12-28T12:11:18.000Z | import numpy as np
from radix import radixConvert
c = radixConvert()
a = np.load("../../data/5/layer4.npy")
print(a.shape)
a = a*128
a = np.around(a).astype(np.int16)
print(a)
a = np.load('../../data/6.npy')
a = a*128
a = np.around(a).astype(np.int8)
print(a.shape)
for i in range(84):
print(i)
print(a[i])
'''
a = a*128
print(a)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
if a[i][j] > 127:
a[i][j] = 127
a = np.around(a).astype(np.int8)
print(a)
print(a[4][17])
weight_file = open('f1_rom.coe', 'w')
weight_file.write('MEMORY_INITIALIZATION_RADIX=2;\n')
weight_file.write('MEMORY_INITIALIZATION_VECTOR=\n')
for i in range(32):
for j in range(32):
if(i < 2 or i > 29):
weight_file.write(c.dec2Bincmpmt('0', 8)+';\n')
elif(j < 2 or j > 29):
weight_file.write(c.dec2Bincmpmt('0', 8)+';\n')
else:
weight_file.write(c.dec2Bincmpmt(str(a[i-2][j-2]), 8)+',\n')
'''
| 23.682927 | 72 | 0.582904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 695 | 0.715757 |
c309031e8b5363fdf7fbbe96607fc907a7dbcec7 | 541 | py | Python | anygraph/wrapper.py | gemerden/anygraph | c20cab82ad4a7f4117690a445e136c2b0e84f0f3 | [
"MIT"
]
| 10 | 2020-06-11T14:11:58.000Z | 2021-12-31T11:59:26.000Z | anygraph/wrapper.py | gemerden/anygraph | c20cab82ad4a7f4117690a445e136c2b0e84f0f3 | [
"MIT"
]
| null | null | null | anygraph/wrapper.py | gemerden/anygraph | c20cab82ad4a7f4117690a445e136c2b0e84f0f3 | [
"MIT"
]
| null | null | null | class Wrapper(object):
wrapper_classes = {}
@classmethod
def wrap(cls, obj):
return cls(obj)
def __init__(self, wrapped):
self.__dict__['wrapped'] = wrapped
def __getattr__(self, name):
return getattr(self.wrapped, name)
def __setattr__(self, name, value):
setattr(self.wrapped, name, value)
def __delattr__(self, name):
delattr(self.wrapped, name)
def __str__(self):
return str(self.wrapped)
def __repr__(self):
return repr(self.wrapped)
| 19.321429 | 42 | 0.619224 | 537 | 0.992606 | 0 | 0 | 60 | 0.110906 | 0 | 0 | 9 | 0.016636 |
c3092b2cfa6e3e6a80652151da3eb7e1dffe233e | 1,237 | py | Python | saifooler/classifiers/image_net_classifier.py | sailab-code/SAIFooler | 76f91c33624273227d8ee2d974aa5b7b90ace5ac | [
"MIT"
]
| null | null | null | saifooler/classifiers/image_net_classifier.py | sailab-code/SAIFooler | 76f91c33624273227d8ee2d974aa5b7b90ace5ac | [
"MIT"
]
| null | null | null | saifooler/classifiers/image_net_classifier.py | sailab-code/SAIFooler | 76f91c33624273227d8ee2d974aa5b7b90ace5ac | [
"MIT"
]
| null | null | null | from saifooler.classifiers.classifier import Classifier
import torch
import json
import os
class ImageNetClassifier(Classifier):
def __init__(self, model, *args, **kwargs):
super().__init__(model, *args, **kwargs)
self.std = torch.tensor([0.229, 0.224, 0.225], device=self.device)
self.mean = torch.tensor([0.485, 0.456, 0.406], device=self.device)
class_index_path = os.path.join(
os.path.dirname(__file__),
"imagenet_class_index.json"
)
self.class_dict = {
int(key): val[1]
for key, val in json.load(open(class_index_path)).items()
}
def to(self, device):
super().to(device)
self.mean = self.mean.to(device)
self.std = self.std.to(device)
def get_class_label(self, class_id: int):
return self.class_dict[class_id]
def normalize_image(self, image):
"""
:param image: tensor of shape (N, W, H, C)
:return: image normalized for ImageNet and permuted in the shape (N, C, W, H) which is the shape
used by torchvision models
"""
image = (image - self.mean) / self.std
image = image.permute(0, 3, 1, 2)
return image
| 29.452381 | 104 | 0.600647 | 1,141 | 0.922393 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.189976 |
c3093f2af126d570c81bda760a6f55d8df7bb8fb | 12,130 | py | Python | bertsification-multi-lstm.py | linhd-postdata/alberti | 4006eb0b97fe9e9bf3d8d1b014b1080713496da1 | [
"Apache-2.0"
]
| null | null | null | bertsification-multi-lstm.py | linhd-postdata/alberti | 4006eb0b97fe9e9bf3d8d1b014b1080713496da1 | [
"Apache-2.0"
]
| null | null | null | bertsification-multi-lstm.py | linhd-postdata/alberti | 4006eb0b97fe9e9bf3d8d1b014b1080713496da1 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# coding: utf-8
# conda install pytorch>=1.6 cudatoolkit=10.2 -c pytorch
# wandb login XXX
import json
import logging
import os
import re
import sklearn
import time
from itertools import product
import numpy as np
import pandas as pd
import wandb
#from IPython import get_ipython
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
from simpletransformers.classification import MultiLabelClassificationModel
from sklearn.model_selection import train_test_split
truthy_values = ("true", "1", "y", "yes")
TAG = os.environ.get("TAG", "bertsification")
LANGS = [lang.strip() for lang in os.environ.get("LANGS", "es,ge,en,multi").lower().split(",")]
MODELNAMES = os.environ.get("MODELNAMES")
EVAL = os.environ.get("EVAL", "True").lower() in truthy_values
OVERWRITE = os.environ.get("OVERWRITE", "False").lower() in truthy_values
logging.basicConfig(level=logging.INFO, filename=time.strftime("models/{}-%Y-%m-%dT%H%M%S.log".format(TAG)))
with open('pid', 'w') as pid:
pid.write(str(os.getpid()))
logging.info("Experiment '{}' on {}, (eval = {}, pid = {})".format(
TAG, LANGS, str(EVAL), str(os.getpid()),
))
# SimpleTransformers (based on HuggingFace/Transformers) for Multilingual Scansion
# We will be using `simpletransformers`, a wrapper of `huggingface/transformers` to fine-tune different BERT-based and other architecture models with support for Spanish.
# Utils
def clean_text(string):
output = string.strip()
# replacements = (("“", '"'), ("”", '"'), ("//", ""), ("«", '"'), ("»",'"'))
replacements = (
("“", ''), ("”", ''), ("//", ""), ("«", ''), ("»",''), (",", ''),
(";", ''), (".", ''),
# ("?", ''), ("¿", ''), ("¡", ''), ("!", ''), ("-", ' '),
)
for replacement in replacements:
output = output.replace(*replacement)
# Any sequence of two or more spaces should be converted into one space
output = re.sub(r'(?is)\s+', ' ', output)
return output.strip()
def metric2binary(meter, pad=11):
return ([1 if syllable == "+" else 0 for syllable in meter] + [0] * (11 - len(meter)))[:pad]
def label2metric(label):
return "".join("+" if l else "-" for l in label)
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
# Spanish
# if not os.path.isfile("adso100.json"):
# get_ipython().system("averell export adso100 --filename adso100.json")
# if not os.path.isfile("adso.json"):
# get_ipython().system("averell export adso --filename adso.json")
es_test = (pd
.read_json(open("adso100.json"))
.query("manually_checked == True")[["line_text", "metrical_pattern"]]
.assign(
line_text=lambda x: x["line_text"].apply(clean_text),
length=lambda x: x["metrical_pattern"].str.len()
)
.drop_duplicates("line_text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter"})
)
es_test = es_test[es_test["length"] == 11]
es = (pd
.read_json(open("adso.json"))
.query("manually_checked == True")[["line_text", "metrical_pattern"]]
.assign(
line_text=lambda x: x["line_text"].apply(clean_text),
length=lambda x: x["metrical_pattern"].str.len()
)
.drop_duplicates("line_text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter"})
)
es = es[~es["text"].isin(es_test["text"])][es["length"] == 11]
es["labels"] = es.meter.apply(metric2binary)
es_train, es_eval = train_test_split(
es[["text", "labels"]], test_size=0.25, random_state=42)
logging.info("Spanish")
logging.info("- Lines: {} train, {} eval, {} test".format(es_train.shape[0], es_eval.shape[0], es_test.shape[0]))
# English
en_test = (pd
.read_csv("4b4v_prosodic_meter.csv")
.assign(
text=lambda x: x["text"].apply(clean_text),
length=lambda x: x["meter"].str.len()
)
.drop_duplicates("text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter", "prosodic_meter": "sota"})
)
en_test = en_test.query("length in (5,6,7,8,9,10,11)")
# if not os.path.isfile("ecpa.json"):
# get_ipython().system("averell export ecpa --filename ecpa.json")
en = (pd
.read_json(open("ecpa.json"))
.query("manually_checked == True")[["line_text", "metrical_pattern"]]
.assign(
line_text=lambda x: x["line_text"].apply(clean_text),
metrical_pattern=lambda x: x["metrical_pattern"].str.replace("|", "").str.replace("(", "").str.replace(")", "")
)
.assign(
length=lambda x: x["metrical_pattern"].str.len(),
)
.drop_duplicates("line_text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter", "prosodic_meter": "sota"})
)
en = en[~en["text"].isin(en_test["text"])].query("length in (5,6,7,8,9,10,11)")
en["labels"] = en.meter.apply(metric2binary)
en_train, en_eval = train_test_split(
en[["text", "labels"]], test_size=0.25, random_state=42)
logging.info("English")
logging.info("- Lines: {} train, {} eval, {} test".format(en_train.shape[0], en_eval.shape[0], en_test.shape[0]))
# sota
en_sota = sum(en_test.meter == en_test.sota) / en_test.meter.size
# German
ge = (pd
.read_csv("po-emo-metricalizer.csv")
.rename(columns={"verse": "text", "annotated_pattern": "meter", "metricalizer_pattern": "sota"})
.assign(
text=lambda x: x["text"].apply(clean_text),
length=lambda x: x["meter"].str.len()
)
.drop_duplicates("text")
.query("length in (5, 6, 7, 8, 9, 10, 11)")
)
ge["labels"] = ge.meter.apply(metric2binary)
ge_train_eval, ge_test = train_test_split(ge, test_size=0.15, random_state=42)
ge_train, ge_eval = train_test_split(
ge_train_eval[["text", "labels"]], test_size=0.176, random_state=42)
logging.info("German")
logging.info("- Lines: {} train, {} eval, {} test".format(ge_train.shape[0], ge_eval.shape[0], ge_test.shape[0]))
# sota
ge_sota = sum(ge_test.meter == ge_test.sota) / ge_test.meter.size
# training
# Multilingual inputs
# - bert bert-base-multilingual-cased
# - distilbert distilbert-base-multilingual-cased
# - xlmroberta, xlm-roberta-base
# - xlmroberta, xlm-roberta-large
# Only English
# - roberta roberta-base
# - roberta roberta-large
# - albert albert-xxlarge-v2
# You can set class weights by using the optional weight argument
models = (
# ("xlnet", "xlnet-base-cased"),
("bert", "bert-base-multilingual-cased"),
("distilbert", "distilbert-base-multilingual-cased"),
("roberta", "roberta-base"),
("roberta", "roberta-large"),
("xlmroberta", "xlm-roberta-base"),
("xlmroberta", "xlm-roberta-large"),
("electra", "google/electra-base-discriminator"),
("albert", "albert-base-v2"),
("albert", "albert-large-v2"),
)
if MODELNAMES:
models = [list(map(str.strip, modelname.split(",")))
for modelname in MODELNAMES.split(";")]
langs = LANGS or ("es", "ge", "en", "multi")
for lang, (model_type, model_name) in product(langs, models):
model_output = 'models/{}-{}-{}-{}'.format(TAG, lang, model_type, model_name.replace("/", "-"))
if OVERWRITE is False and os.path.exists(model_output):
logging.info("Skipping training of {} for {}".format(model_name, lang))
continue
logging.info("Starting training of {} for {}".format(model_name, lang))
run = wandb.init(project=model_output.split("/")[-1], reinit=True)
model = MultiLabelClassificationModel(
model_type, model_name, num_labels=11, args={
'output_dir': model_output,
'best_model_dir': '{}/best'.format(model_output),
'reprocess_input_data': True,
'overwrite_output_dir': True,
'use_cached_eval_features': True,
'num_train_epochs': 100, # For BERT, 2, 3, 4
'save_steps': 10000,
'early_stopping_patience': 5,
'evaluate_during_training': EVAL,
#'early_stopping_metric': "accuracy_score",
'evaluate_during_training_steps': 1000,
'early_stopping_delta': 0.00001,
'manual_seed': 42,
# 'learning_rate': 2e-5, # For BERT, 5e-5, 3e-5, 2e-5
# For BERT 16, 32. It could be 128, but with gradient_acc_steps set to 2 is equivalent
'train_batch_size': 16 if "large" in model_name else 32,
'eval_batch_size': 16 if "large" in model_name else 32,
# Doubles train_batch_size, but gradients and wrights are calculated once every 2 steps
'gradient_accumulation_steps': 2 if "large" in model_name else 1,
'max_seq_length': 32,
'use_early_stopping': True,
'wandb_project': model_output.split("/")[-1],
#'wandb_kwargs': {'reinit': True},
# "adam_epsilon": 3e-5, # 1e-8
"silent": False,
"fp16": False,
"n_gpu": 2,
})
# train the model
if lang == "multi":
train_df = pd.concat([es_train, en_train, ge_train], ignore_index=True)
eval_df = pd.concat([es_eval, en_eval, ge_eval], ignore_index=True)
elif lang == "es":
train_df = es_train
eval_df = es_eval
elif lang == "en":
train_df = en_train
eval_df = en_eval
elif lang == "ge":
train_df = ge_train
eval_df = ge_eval
if EVAL:
model.train_model(train_df, eval_df=eval_df)
# evaluate the model
result, model_outputs, wrong_predictions = model.eval_model(eval_df)
logging.info(str(result))
#logging.info(str(model_outputs))
else:
train_eval_df = pd.concat([train_df, eval_df, ge_train], ignore_index=True)
model.train_model(train_eval_df)
if lang in ("es", "multi"):
es_test["predicted"], *_ = model.predict(es_test.text.values)
es_test["predicted"] = es_test["predicted"].apply(label2metric)
es_test["pred"] = es_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
es_bert = sum(es_test.meter == es_test.pred) / es_test.meter.size
logging.info("Accuracy [{}:es]: {} ({})".format(lang, es_bert, model_name))
wandb.log({"accuracy_es": es_bert})
if lang in ("en", "multi"):
en_test["predicted"], *_ = model.predict(en_test.text.values)
en_test["predicted"] = en_test["predicted"].apply(label2metric)
en_test["pred"] = en_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
en_bert = sum(en_test.meter == en_test.pred) / en_test.meter.size
logging.info("Accuracy [{}:en]: {} ({})".format(lang, en_bert, model_name))
wandb.log({"accuracy_en": en_bert})
if lang in ("ge", "multi"):
ge_test["predicted"], *_ = model.predict(ge_test.text.values)
ge_test["predicted"] = ge_test["predicted"].apply(label2metric)
ge_test["pred"] = ge_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
ge_bert = sum(ge_test.meter == ge_test.pred) / ge_test.meter.size
logging.info("Accuracy [{}:ge]: {} ({})".format(lang, ge_bert, model_name))
wandb.log({"accuracy_ge": ge_bert})
if lang in ("multi", ):
test_df = pd.concat([es_test, en_test, ge_test], ignore_index=True)
test_df["predicted"], *_ = model.predict(test_df.text.values)
test_df["predicted"] = test_df["predicted"].apply(label2metric)
test_df["pred"] = test_df.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
multi_bert = sum(test_df.meter == test_df.pred) / test_df.meter.size
logging.info("Accuracy [{}:multi]: {} ({})".format(lang, multi_bert, model_name))
wandb.log({"accuracy_multi": multi_bert})
run.finish()
logging.info("Done training '{}'".format(model_output))
# get_ipython().system("rm -rf `ls -dt models/{}-*/checkpoint*/ | awk 'NR>5'`".format(TAG))
logging.info("Done training")
| 41.683849 | 170 | 0.639077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,424 | 0.364295 |
c309cc940b59cd3830a59d4a46d48907f9c3e32d | 515 | py | Python | go_server_app/views.py | benjaminaaron/simple-go-server | 0ebe6756f72f896fd014d060252c27c2907e7ae8 | [
"MIT"
]
| 1 | 2017-11-29T22:39:05.000Z | 2017-11-29T22:39:05.000Z | go_server_app/views.py | benjaminaaron/simple-go-server | 0ebe6756f72f896fd014d060252c27c2907e7ae8 | [
"MIT"
]
| 1 | 2017-11-09T18:41:41.000Z | 2017-11-09T19:14:08.000Z | go_server_app/views.py | benjaminaaron/simple-go-server | 0ebe6756f72f896fd014d060252c27c2907e7ae8 | [
"MIT"
]
| null | null | null | from django.shortcuts import render
from .models import GameMeta
def index(request):
return render(request, 'go_server_app/index.html')
def dashboard(request):
return render(request, 'go_server_app/dashboard.html', {'games_list': GameMeta.objects.all()})
def game(request, game_id):
game_meta = GameMeta.objects.get(game_id=game_id)
return render(request, 'go_server_app/game.html', {'game_meta': game_meta})
def terminal(request):
return render(request, 'go_server_app/terminal.html')
| 24.52381 | 98 | 0.747573 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.258252 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.