max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
codility.com/sorting/__twice_for.py | Jagrmi-C/jagrmitest | 0 | 12791651 | <reponame>Jagrmi-C/jagrmitest
ar = [1, 4, 7, 2, 6]
for i in range(5):
for k in range(i+1, 5):
print(i, k)
print("array")
n = len(ar)
for i in range(n):
for k in range(i, n):
print(ar[i], ar[k])
| 3.453125 | 3 |
server/fire_watch/log/log_configs.py | Aradhya-Tripathi/free-watch | 5 | 12791652 | <filename>server/fire_watch/log/log_configs.py
import logging
import fire_watch
from fire_watch.errorfactory import LogsNotEnabled
FMT = "%(asctime)s:%(name)s:%(message)s"
def get_logger(
logger_name: str,
filename: str,
level: int = 10,
) -> logging.getLogger:
"""Simple logger configuration implemented to support
safe logging.
Args:
logger_name (str): name given to current logger.
level (int): severity level.
filename (str): file to throw all logs to.
Raises:
LogsNotEnabled: Raised if logging is tried with out enabling logger in configurations
Returns:
logging.getLogger: logger object
"""
if fire_watch.conf["logs"]:
logger = logging.getLogger(logger_name)
file_handler = logging.FileHandler(filename, mode="a")
file_handler.setFormatter(logging.Formatter(FMT))
file_handler.setLevel(level=level)
logger.addHandler(file_handler)
return logger
raise LogsNotEnabled
| 2.578125 | 3 |
dero/ml/typing.py | whoopnip/dero | 0 | 12791653 | from typing import List, Dict, Optional, Union, Any
import pandas as pd
ModelParam = Optional[Union[str, int, float]]
ParamDict = Dict[str, ModelParam]
ModelDict = Dict[str, Union[ParamDict, float]]
AllModelResultsDict = Dict[str, List[ModelDict]]
DfDict = Dict[str, pd.DataFrame]
ModelOptionPossibilitiesDict = Dict[str, List[Any]]
AllModelOptionPossibilitiesDict = Dict[str, ModelOptionPossibilitiesDict]
AllModelKwargs = List[Dict[str, Any]]
AllModelKwargsDict = Dict[str, AllModelKwargs] | 2.453125 | 2 |
agents/Power_Supply/8320/power_supply_monitors.2.0.py | nishanthprakash-hpe/nae-scripts | 0 | 12791654 | # -*- coding: utf-8 -*-
#
# (c) Copyright 2018 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
Manifest = {
'Name': 'power_supply_monitor',
'Description': 'System Power Supply monitoring agent',
'Version': '2.0',
'Author': 'Aruba Networks'
}
class Agent(NAE):
def __init__(self):
uri1 = '/rest/v1/system/subsystems/chassis/base/power_supplies/*?' \
'attributes=status'
self.m1 = Monitor(uri1, 'PSU status')
self.graph_status_transition = Graph([self.m1], title=Title(
"PSU Status Transition"), dashboard_display=True)
self.r1 = Rule('PSU status transition: OK to Output Fault')
self.r1.condition(
'transition {} from "ok" to "fault_output"',
[self.m1])
self.r1.action(self.status_ok_to_fault_output)
self.r2 = Rule('PSU status transition: OK to Input Fault')
self.r2.condition(
'transition {} from "ok" to "fault_input"',
[self.m1])
self.r2.action(self.status_ok_to_fault_input)
self.r3 = Rule('PSU status transition: OK to Warning')
self.r3.condition('transition {} from "ok" to "warning"', [self.m1])
self.r3.action(self.status_ok_to_warning)
self.r4 = Rule('PSU status transition: Output Fault to OK')
self.r4.condition(
'transition {} from "fault_output" to "ok"',
[self.m1])
self.r4.action(self.status_fault_output_to_ok)
self.r5 = Rule('PSU status transition: Input Fault to OK')
self.r5.condition(
'transition {} from "fault_input" to "ok"',
[self.m1])
self.r5.action(self.status_fault_input_to_ok)
self.r6 = Rule('PSU status transition: Output Fault to OK')
self.r6.condition(
'transition {} from "fault_output" to "ok"',
[self.m1])
self.r6.action(self.status_fault_output_to_ok)
self.r7 = Rule('PSU status transition: Warning to OK')
self.r7.condition('transition {} from "warning" to "ok"', [self.m1])
self.r7.action(self.status_warning_to_ok)
self.r8 = Rule('PSU status transition: Unknown to OK')
self.r8.condition('transition {} from "unknown" to "ok"', [self.m1])
self.r8.action(self.status_unknown_to_ok)
self.r9 = Rule('PSU status transition: OK to Unknown')
self.r9.condition('transition {} from "ok" to "unknown"', [self.m1])
self.r9.action(self.status_ok_to_unknown)
self.r10 = Rule('PSU status transition: Absent to OK')
self.r10.condition(
'transition {} from "fault_absent" to "ok"',
[self.m1])
self.r10.action(self.status_fault_absent_to_ok)
self.r11 = Rule('PSU status transition: OK to Absent')
self.r11.condition(
'transition {} from "ok" to "fault_absent"',
[self.m1])
self.r11.action(self.status_ok_to_fault_absent)
uri2 = '/rest/v1/system/subsystems/chassis/base/power_supplies/*?' \
'attributes=characteristics.maximum_power'
self.m2 = Monitor(uri2, 'maximum (Power in Watts)')
self.graph_max_power = Graph([self.m2], title=Title(
"PSU Maximum Power in Watts"), dashboard_display=False)
uri3 = '/rest/v1/system/subsystems/chassis/base/power_supplies/*?' \
'attributes=characteristics.instantaneous_power'
self.m3 = Monitor(uri3, 'instantaneous (Power in Watts)')
self.graph_instantaneous = Graph([self.m3], title=Title(
"PSU Instantaneous Power in Watts"), dashboard_display=False)
def status_ok_to_fault_input(self, event):
label = event['labels']
self.psu_transition_action(label, 'OK to Input Fault')
def status_ok_to_fault_output(self, event):
label = event['labels']
self.psu_transition_action(label, 'OK to Output Fault')
def status_ok_to_warning(self, event):
label = event['labels']
self.psu_transition_action(label, 'OK to Warning')
def status_fault_input_to_ok(self, event):
label = event['labels']
self.psu_transition_action(label, 'Input Fault to OK')
def status_fault_output_to_ok(self, event):
label = event['labels']
self.psu_transition_action(label, 'Output Fault to OK')
def status_warning_to_ok(self, event):
label = event['labels']
self.psu_transition_action(label, 'Warning to OK')
def status_unknown_to_ok(self, event):
label = event['labels']
self.psu_transition_action(label, 'Unknown to OK')
def status_ok_to_unknown(self, event):
label = event['labels']
self.psu_transition_action(label, 'OK to Unknown')
def status_fault_absent_to_ok(self, event):
label = event['labels']
self.psu_transition_action(label, 'Absent to OK')
def status_ok_to_fault_absent(self, event):
label = event['labels']
self.psu_transition_action(label, 'OK to Absent')
def psu_transition_action(self, label, transition):
_, psu = label.split(',')[0].split('=')
self.logger.debug('PSU(' + psu + ') has changed from ' + transition)
ActionSyslog(psu + ' status transition: ' + transition)
ActionCLI('show environment power-supply')
| 2.078125 | 2 |
blog/admin/__init__.py | hentt30/education4all | 0 | 12791655 | """
Admin access page settings
"""
from django.contrib import admin
from blog.models import get_model_factory
from .posts_admin import PostAdmin
# Register your models here.
admin.site.register(get_model_factory('PostsFactory').create(), PostAdmin)
| 1.484375 | 1 |
Road2Knowledge/inScripter/processALL_OpenAire.py | fbellidopazos/OpenScience-Public | 0 | 12791656 | <gh_stars>0
# %%
import jsonlines
from multiprocessing.pool import ThreadPool
import glob
import json
import os
# %%
whereData = "../OpenAire/publication/" # where to store and access OpenAire data
# MUST end with a slash!!!!
def process_one_file(outputName,inputFile):
output = open(outputName,"w")
output.write("[\n")
print(f">> Processing file: {inputFile} ...")
with jsonlines.open(inputFile) as reader:
for obj in reader:
output.write(json.dumps(obj))
output.write(",\n")
output.write("{ }]")
output.close()
print(f">> Done processing file: {inputFile} ...")
print
os.remove(inputFile)
# %%
allFiles = sorted(glob.glob(f"{whereData}*.json")) # List of all the files
for i in allFiles:
process_one_file(f"{i}.2mkgc",i)
print(">> Done!")
| 2.671875 | 3 |
setup.py | astariul/pytere | 21 | 12791657 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
# # The following code can be used if you have private dependencies. Basically it requires the user to set an
# # environment variable `GH_PAT` to a Github Personal Access Token (with access to the private repository). If the env
# # var cannot be found, an error is raised. If it can be found, the private package is installed.
# import os
# try:
# gh_pat = os.environ["GH_PAT"]
# except KeyError as e:
# raise RuntimeError("You didn't set the environment variable `GH_PAT`. This is necessary because this package "
# "relies on private package(s), and you need to be authenticated to install these. Please set "
# "`GH_PAT` environment variable to your Personnal Access Token (from Github).") from e
# # Example of specifying private dependencies :
# reqs = [f"<package_name> @ git+https://{gh_pat}@github.com/<user>/<repo>@<tag>#egg=<package_name>"]
reqs = []
extras_require = {
"test": ["pytest~=7.0", "pytest-cov~=3.0", "coverage-badge~=1.0"],
"hook": ["pre-commit~=2.15"],
"lint": ["isort~=5.9", "black~=22.1", "flake518~=1.2", "darglint~=1.8"],
"docs": ["mkdocs-material~=8.1", "mkdocstrings[python]~=0.18", "mike~=1.1"],
}
extras_require["all"] = sum(extras_require.values(), [])
extras_require["dev"] = (
extras_require["test"] + extras_require["hook"] + extras_require["lint"] + extras_require["docs"]
)
setuptools.setup(
name="pytere",
version="1.0.0.dev0",
author="<NAME>",
author_email="<EMAIL>",
description="A Python Template Repository",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/astariul/pytere",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
install_requires=reqs,
extras_require=extras_require,
)
| 1.953125 | 2 |
Web/sessionManager.py | cmd2001/Open-TesutoHime | 11 | 12791658 | <reponame>cmd2001/Open-TesutoHime
from userManager import UserManager
from flask import request
class SessionManager:
def __init__(self):
self.mem = {}
return
def check_user_status(self) -> bool: # to check whether current user has logged in properly
lid = request.cookies.get('Login_ID')
return lid in self.mem
def new_session(self, username: str, login_id: str):
self.mem[login_id] = username
return
def get_username(self) -> str:
lid = request.cookies.get('Login_ID')
return self.mem[lid] if lid in self.mem else ''
def get_friendly_name(self) -> str:
lid = request.cookies.get('Login_ID')
if not (lid in self.mem):
return ''
return UserManager().get_friendly_name(self.mem[lid])
def get_privilege(self) -> int:
lid = request.cookies.get('Login_ID')
if not (lid in self.mem):
return -1 # lowest Privilege for Guests
return UserManager().get_privilege(self.mem[lid])
Login_Manager = SessionManager()
| 2.78125 | 3 |
miniboss/__init__.py | afroisalreadyinu/miniboss | 633 | 12791659 | from .main import cli
from .services import Service
from .context import Context
from .types import set_group_name as group_name
| 1.125 | 1 |
plotsky.py | hagabbar/VItamin | 13 | 12791660 | import numpy as np
from ligo.skymap import kde
import matplotlib
matplotlib.use('Agg')
from matplotlib.colors import to_rgb
from matplotlib import pyplot as plt
from mpl_toolkits.basemap import Basemap
#matplotlib.rc('text', usetex=True)
def greedy(density):
i,j = np.shape(density)
idx = np.argsort(density.flatten())[::-1]
c = np.cumsum(density.flatten()[idx])
c = c/c[-1]
np.append(c,1.0)
p = np.zeros(i*j)
p[idx] = c[:]
return p.reshape(i,j)
def plot_sky(pts,contour=True,filled=False,ax=None,trueloc=None,cmap='Reds',col='red'):
cls = kde.Clustered2DSkyKDE
pts[:,0] = pts[:,0] - np.pi
skypost = cls(pts, trials=5, jobs=8)
# make up some data on a regular lat/lon grid.
# nlats = 145; nlons = 291; delta = 2.*np.pi/(nlons-1)
nlats = 145; nlons = 291; delta = 2.*np.pi/(nlons-1)
lats = (0.5*np.pi-delta*np.indices((nlats,nlons))[0,:,:])
# lons = (delta*np.indices((nlats,nlons))[1,:,:])
lons = (delta*np.indices((nlats,nlons))[1,:,:]-np.pi)
locs = np.column_stack((lons.flatten(),lats.flatten()))
prob = skypost(locs).reshape(nlats,nlons)
p1 = greedy(prob)
# compute mean location of samples
nx = np.cos(pts[:,1])*np.cos(pts[:,0])
ny = np.cos(pts[:,1])*np.sin(pts[:,0])
nz = np.sin(pts[:,1])
mean_n = [np.mean(nx),np.mean(ny),np.mean(nz)]
# bestloc = [np.remainder(np.arctan2(mean_n[1],mean_n[0]),2.0*np.pi),np.arctan2(mean_n[2],np.sqrt(mean_n[0]**2 + mean_n[1]**2))]
bestloc = [trueloc[0],trueloc[1]]
if ax is None:
# map = Basemap(projection='ortho',lon_0=-bestloc[0]*180/np.pi,lat_0=bestloc[1]*180/np.pi,resolution=None,celestial=True)
map = Basemap(projection='moll',lon_0=0,resolution=None,celestial=True)
map.drawmapboundary(fill_color='white')
# draw lat/lon grid lines every 30 degrees.
# map.drawmeridians(np.arange(0,360,30))
meridian = ["-180","-150","-120","-90","-60","-30","0","30","+60","+90","+120","+150"]
map.drawmeridians(np.arange(-180,180,30),labels=[1,1,1,1])
for i in np.arange(len(meridian)):
plt.annotate(r"$\textrm{%s}$" % meridian[i] + u"\u00b0",xy=map(np.arange(-180,180,30)[i],0),xycoords='data')
map.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0])
else:
map = ax
# compute native map projection coordinates of lat/lon grid.
# x, y = map(lons*180./np.pi, lats*180./np.pi)
x, y = map(lons*180./np.pi, lats*180./np.pi)
# contour data over the map.
if filled:
base_color = np.array(to_rgb(col))
opp_color = 1.0 - base_color
cs1 = map.contourf(x,y,1.0-p1,levels=[0.0,0.1,0.5,1.0],colors=[base_color+opp_color,base_color+0.8*opp_color,base_color+0.6*opp_color,base_color])
cs2 = map.contour(x,y,p1,levels=[0.5,0.9],linewidths=2.0,colors=col)
if trueloc is not None:
xx, yy = map((trueloc[0]*180./np.pi)-180.0, trueloc[1]*180./np.pi)
map.plot(xx,yy,marker='+',markersize=20,linewidth=5,color='black')
return map
| 2.421875 | 2 |
TEKDB/TEKDB/apps.py | Ecotrust/TEKDB | 4 | 12791661 | # TEKDB/apps.py
from django.apps import AppConfig
class TEKDBConfig(AppConfig):
name = 'TEKDB'
verbose_name = 'Records'
| 1.15625 | 1 |
DataStructures/LinkedList/CycleDetection.py | baby5/HackerRank | 0 | 12791662 | <reponame>baby5/HackerRank<filename>DataStructures/LinkedList/CycleDetection.py
#coding:utf-8
def has_cycle(head):
ptr1 = head
ptr2 = head
while ptr2 and ptr1.next:
ptr1 = ptr1.next.next
ptr2 = ptr2.next
if ptr1 is ptr2:
return 1
return 0
| 3.671875 | 4 |
utils/trainer.py | tonouchi510/kfp-project | 0 | 12791663 | # DLトレーニングで共通のロジック・モジュール
import tensorflow as tf
from tensorflow.python.data.ops.readers import TFRecordDatasetV2
from tensorflow.python.keras.callbacks import History
from google.cloud import storage
from typing import Callable, List
import os
def get_tfrecord_dataset(
dataset_path: str,
preprocessing: Callable,
global_batch_size: int,
split: str,
data_augmentation: Callable = lambda x, y: (x, y),
) -> TFRecordDatasetV2:
"""TFRecordからデータパイプラインを構築する.
Args:
dataset_path (str): 目的のTFRecordファイルが保存されているパス.
preprocessing (Callable): 適用する前処理関数.
global_batch_size (int): バッチサイズ(分散処理の場合は合計).
split (str): train or valid
data_augmentation (Callable, optional): データオーグメンテーション関数. Defaults to lambdax:x.
Raises:
FileNotFoundError: dataset_pathにファイルが存在しない場合.
Returns:
TFRecordDatasetV2: 定義済みのデータパイプライン.
"""
# Build a pipeline
file_names = tf.io.gfile.glob(
f"{dataset_path}/{split}-*.tfrec"
)
dataset = tf.data.TFRecordDataset(
file_names, num_parallel_reads=tf.data.AUTOTUNE)
if not file_names:
raise FileNotFoundError(f"Not found: {dataset}")
option = tf.data.Options()
if split == "train":
option.experimental_deterministic = False
dataset = dataset.with_options(option) \
.map(lambda example: preprocessing(example=example), num_parallel_calls=tf.data.AUTOTUNE) \
.map(lambda x, y: data_augmentation(x, y)) \
.shuffle(512, reshuffle_each_iteration=True) \
.batch(global_batch_size, drop_remainder=True) \
.prefetch(tf.data.AUTOTUNE)
else:
option.experimental_deterministic = True
dataset = dataset.with_options(option) \
.map(lambda example: preprocessing(example=example), num_parallel_calls=tf.data.AUTOTUNE) \
.batch(global_batch_size, drop_remainder=False) \
.prefetch(tf.data.AUTOTUNE)
return dataset
class Training:
def __init__(
self,
build_model_func: Callable,
job_dir: str,
artifacts_dir: str = "",
use_tpu: bool = True,
) -> None:
"""トレーニングの初期設定を行う.
TPUノードの管理、TPUStrategyの設定、モデルのロード、コンパイル、checkpointの復旧などを行う.
Arguments:
build_model_func (Callable): 実験に使うモデルのbuild関数を渡す.
job_dir (str): job管理用のGCSパス. checkpointやlogの保存をする.
artifacts_dir (str): 実験結果の保存先GCSパス.
use_tpu (bool): トレーニングにTPUを使うかどうか.
"""
# For job management
self.job_dir = job_dir
self.artifacts_dir = artifacts_dir
self.use_tpu = use_tpu
self.last_epoch = self._get_last_epoch()
if self.use_tpu:
# Tpu cluster setup
cluster = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(cluster)
tf.tpu.experimental.initialize_tpu_system(cluster)
self.distribute_strategy = tf.distribute.TPUStrategy(cluster)
# Load model in distribute_strategy scope
with self.distribute_strategy.scope():
self._setup_model(build_model=build_model_func)
else:
self._setup_model(build_model=build_model_func)
self.callbacks = [
tf.keras.callbacks.TensorBoard(log_dir=f"{self.job_dir}/logs", histogram_freq=1),
tf.keras.callbacks.TerminateOnNaN(),
tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(self.job_dir, "checkpoints/{epoch:05d}"),
save_weights_only=True,
save_freq="epoch"
)
]
def _setup_model(self, build_model: Callable) -> None:
if self.last_epoch == 0:
self.model = build_model()
else:
checkpoint = f"{self.job_dir}/checkpoints/{self.last_epoch:0>5}"
self.model = build_model(checkpoint=checkpoint)
def _get_last_epoch(self) -> int:
client = storage.Client()
bucket_name = self.job_dir.split("/")[2]
dest = self.job_dir.replace(f"gs://{bucket_name}/", "")
blobs = client.list_blobs(bucket_name, prefix=f"{dest}/checkpoints")
checkpoints = [0]
for b in blobs:
epoch = b.name.replace(f"{dest}/checkpoints/", "").split(".")[0]
if epoch:
checkpoints.append(int(epoch))
last_epoch = max(checkpoints)
return last_epoch
def add_callbacks(self, callbacks: List) -> None:
self.callbacks.extend(callbacks)
def run_train(
self,
train_ds: TFRecordDatasetV2,
valid_ds: TFRecordDatasetV2,
epochs: int
) -> History:
"""トレーニングを実施し、ログや結果を保存する.
tf.keras.Model.fitでのトレーニングを行う.
複雑なトレーニングループが必要な場合もtf.keras.Model.train_stepをオーバーライドするなどして使う.
Arguments:
train_ds (TFRecordDatasetV2): tensorflowのデータセットパイプライン(学習用).
valid_ds (TFRecordDatasetV2): tensorflowのデータセットパイプライン(検証用).
epochs (int): トレーニングを回す合計エポック数.
"""
history = self.model.fit(
train_ds,
validation_data=valid_ds,
callbacks=self.callbacks,
initial_epoch=self.last_epoch,
epochs=epochs
)
if self.artifacts_dir:
self.model.save(f"{self.artifacts_dir}/saved_model", include_optimizer=False)
return history
| 2.671875 | 3 |
cameramodels/align.py | iory/cameramodels | 9 | 12791664 | import numpy as np
def align_depth_to_rgb(
depth,
bgr_cameramodel,
depth_cameramodel,
depth_to_rgb_transform):
"""Align depth image to color image.
Parameters
----------
depth : numpy.ndarray
depth image in meter order.
bgr_cameramodel : cameramodels.PinholeCameraModel
bgr cameramodel
depth_cameramodel : cameramodels.PinholeCameraModel
depth cameramodel
depth_to_rgb_transform : numpy.ndarray
4x4 transformation matrix.
Returns
-------
aligned_img : numpy.ndarray
aligned image.
"""
if depth.shape[0] != depth_cameramodel.height \
or depth.shape[1] != depth_cameramodel.width:
raise ValueError
depth = depth.copy()
aligned_img = np.zeros((bgr_cameramodel.height, bgr_cameramodel.width),
dtype=np.float32)
depth[np.isnan(depth)] = 0
v, u = np.array(np.where(depth))
uv = np.array([u, v]).T
rotation = depth_to_rgb_transform[:3, :3]
translation = depth_to_rgb_transform[:3, 3]
xyz_depth_frame = depth_cameramodel.batch_project_pixel_to_3d_ray(
uv, depth=depth[depth > 0])
xyz_rgb_frame = (np.matmul(
rotation.T, xyz_depth_frame.T)
- np.matmul(
rotation.T, translation).reshape(3, -1)).T
rgb_uv, indices = bgr_cameramodel.batch_project3d_to_pixel(
xyz_rgb_frame,
project_valid_depth_only=True,
return_indices=True)
aligned_img.reshape(-1)[bgr_cameramodel.flatten_uv(rgb_uv)] = \
depth[depth > 0][indices]
return aligned_img
| 2.90625 | 3 |
tests/fixtures/me.py | akram/ovh-cli | 42 | 12791665 | <filename>tests/fixtures/me.py<gh_stars>10-100
# -*- coding: utf-8 -*-
def info():
return {
'country': 'FR',
'firstname': 'John',
'legalform': 'individual',
'name': 'Doe',
'currency': {
'code': 'EUR',
'symbol': 'EURO'
},
'ovhSubsidiary': 'FR',
'birthDay': None,
'organisation': '',
'spareEmail': None,
'area': '',
'phone': '+33.123456789',
'nationalIdentificationNumber': None,
'ovhCompany': 'ovh',
'email': '<EMAIL>',
'companyNationalIdentificationNumber': None,
'language': 'fr_FR',
'fax': '',
'zip': '59100',
'nichandle': 'dj12345-ovh',
'corporationType': None,
'sex': None,
'birthCity': None,
'state': 'complete',
'city': 'Roubaix',
'vat': '',
'address': '2 rue Kellermann'
}
def get_applications():
return [
{
'status': 'active',
'applicationKey': '<KEY>',
'applicationId': 20001,
'name': 'foobar-1',
'description': 'Lorem ipsum 1'
}, {
'status': 'active',
'applicationKey': '<KEY>',
'applicationId': 20003,
'name': 'foobar-3',
'description': 'Lorem ipsum 3'
}, {
'status': 'active',
'applicationKey': '<KEY>',
'applicationId': 20002,
'name': 'foobar-2',
'description': 'Lorem ipsum 2'
}
]
def get_credentials(app_id):
return [cred for cred in [
{
'ovhSupport': False,
'rules': [
{
'method': 'GET', 'path': '/*'
}, {
'method': 'POST', 'path': '/*'
}, {
'method': 'PUT', 'path': '/*'
}, {
'method': 'DELETE', 'path': '/*'
}
],
'expiration': '2016-08-04T17:52:21+02:00',
'status': 'validated',
'credentialId': 50000002,
'applicationId': 20001,
'creation': '2016-08-03T17:52:21+02:00',
'lastUse': '2016-08-03T17:51:12+02:00'
}, {
'ovhSupport': True,
'rules': [
{
'method': 'GET', 'path': '/*'
}, {
'method': 'POST', 'path': '/*'
}, {
'method': 'PUT', 'path': '/*'
}, {
'method': 'DELETE', 'path': '/*'
}
],
'expiration': '2016-08-04T17:47:33+02:00',
'status': 'validated',
'credentialId': 50000001,
'applicationId': 20001,
'creation': '2016-08-03T17:47:33+02:00',
'lastUse': '2016-08-03T17:50:23+02:00'
}
] if cred['applicationId'] == int(app_id)]
def get_application(app_id):
return next((app for app in get_applications()
if app['applicationId'] == int(app_id)))
def get_credential(credential_id):
return next((app for app in get_credentials('20001')
if app['credentialId'] == int(credential_id)))
def get_rules(credential_id):
return next((app for app in get_credentials('20001')
if app['credentialId'] == int(credential_id)))['rules']
| 1.757813 | 2 |
homeassistant/components/hardkernel/const.py | liangleslie/core | 30,023 | 12791666 | <filename>homeassistant/components/hardkernel/const.py
"""Constants for the Hardkernel integration."""
DOMAIN = "hardkernel"
| 1.039063 | 1 |
DataConnector/AppDataPublisher.py | twatteynelinear/dustlink_sierra | 4 | 12791667 | #!/usr/bin/python
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('AppDataPublisher')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import copy
import threading
from DustLinkData import DustLinkData
from EventBus import EventBusClient
class AppDataPublisher(EventBusClient.EventBusClient):
'''
\brief Publishes the data into the DustLinkData database.
One instance of this class is created for each application.
'''
def __init__(self,appName):
# store params
self._appName = appName
# log
log.info('creating instance')
# initialize parent class
EventBusClient.EventBusClient.__init__(self,
'parsedAppData_{0}'.format(self._appName),
self._publish,
)
self.name = 'DataConnector_AppDataPublisher_{0}'.format(self._appName)
# add stats
# local variables
#======================== public ==========================================
#======================== private =========================================
def _publish(self,sender,signal,data):
dld = DustLinkData.DustLinkData()
if not dld.getFastMode():
# add mote if needed
try:
dld.addMote(data['mac'])
except ValueError:
pass # happens when mote already exists
# in demo mode, add demo mode apps to mote
if dld.getDemoMode():
for appname in dld.DEMO_MODE_APPS.keys():
try:
dld.attachAppToMote(data['mac'],appname)
except ValueError:
pass # happens when app does not exist, or already attached
# attach app to mote if needed
try:
dld.attachAppToMote(data['mac'],self._appName)
except ValueError:
pass # happens when app not known, or app already attached to mote
# publish in DustLinkData
dld.indicateData(data['mac'],
self._appName,
data['fields'],
timestamp=data['timestamp'],
)
# log
if log.isEnabledFor(logging.DEBUG):
log.debug('published {0}'.format(data))
| 2.1875 | 2 |
tests/UnitTests/Morphology/Disambiguator/disambiguator_prefix_rule1_test.py | ZenaNugraha/PySastrawi | 282 | 12791668 | import unittest
from Sastrawi.Morphology.Disambiguator.DisambiguatorPrefixRule1 import DisambiguatorPrefixRule1a, DisambiguatorPrefixRule1b
class Test_DisambiguatorPrefixRule1Test(unittest.TestCase):
def setUp(self):
self.subject1a = DisambiguatorPrefixRule1a()
self.subject1b = DisambiguatorPrefixRule1b()
return super(Test_DisambiguatorPrefixRule1Test, self).setUp()
def test_disambiguate1a(self):
self.assertEquals('ia-ia', self.subject1a.disambiguate('beria-ia'))
self.assertIsNone(self.subject1a.disambiguate('berlari'))
def test_disambiguate1b(self):
self.assertEquals('rakit', self.subject1b.disambiguate('berakit'))
self.assertIsNone(self.subject1b.disambiguate('bertabur'))
if __name__ == '__main__':
unittest.main()
| 2.78125 | 3 |
move.py | laybatin/move-to-registry | 0 | 12791669 | <filename>move.py<gh_stars>0
import subprocess
import requests
import json
host='registry.host.com'
port='16443'
url_addr='{}:{}/v2'.format(host,port)
print(url_addr)
r = requests.get('https://{}/_catalog'.format(url_addr))
js = json.loads(r.content)
#print(js)
tag_format='https://' + url_addr + '/{IMAGE_NAME}/tags/list'
new_port = '' #ex) :5000
if js['repositories'] != None:
for v in js['repositories']:
tag_request = json.loads(requests.get(tag_format.format(IMAGE_NAME=v)).content)
if tag_request['tags']:
for tag in tag_request['tags']:
image_path = '{HOST}:{PORT}/{IMAGE_NAME}:{TAG}'.format(HOST=host, PORT=port, IMAGE_NAME=v, TAG=tag)
change_image_path = '{HOST}{PORT}/{IMAGE_NAME}:{TAG}'.format(HOST=host, PORT=new_port, IMAGE_NAME=v, TAG=tag)
print(image_path + "-->" + change_image_path)
subprocess.check_output(['docker', 'pull', image_path], universal_newlines=True)
subprocess.check_output(['docker', 'tag', image_path, change_image_path], universal_newlines=True)
subprocess.check_output(['docker', 'push', change_image_path], universal_newlines=True)
| 2.375 | 2 |
Phase_1/O-18-by-Yuewei.py | yapanliu/ashrae-ob-database | 0 | 12791670 | <filename>Phase_1/O-18-by-Yuewei.py
import pandas as pd
import numpy as np
import os
# specify the path
data_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-06-03-raw-data/Annex 79 Data Collection/O-18-Nan Gao/CornishCollege_CleanEXPORT (6)/'
template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/'
save_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-06-03-raw-data/Annex 79 Data Collection/O-18-Nan Gao/_yapan_processing/'
# read templates into pandas
template_occ_num = pd.read_csv(template_path + 'Occupant_Number_Measurement.csv')
template_outdoor = pd.read_csv(template_path + 'Outdoor_Measurement.csv')
template_hvac = pd.read_csv(template_path + 'HVAC_Measurement.csv')
os.chdir(data_path) # pwd
df_1 = pd.read_csv('19.csv')
df_2 = pd.read_csv('20.csv')
df_3 = pd.read_csv('27.csv')
df_4 = pd.read_csv('28.csv')
df_5 = pd.read_csv('29.csv')
df_6 = pd.read_csv('30.csv')
df_7 = pd.read_csv('31.csv')
df_8 = pd.read_csv('40.csv')
df_9 = pd.read_csv('41.csv')
df_10 = pd.read_csv('43.csv')
df_11 = pd.read_csv('KB1.csv')
df_12 = pd.read_csv('KB2.csv')
df_13 = pd.read_csv('KB3.csv')
df_14 = pd.read_csv('KB4.csv')
df_15 = pd.read_csv('KB5.csv')
df_16 = pd.read_csv('KB6.csv')
df_1['Room_ID'] = 1
df_2['Room_ID'] = 2
df_3['Room_ID'] = 3
df_4['Room_ID'] = 4
df_5['Room_ID'] = 5
df_6['Room_ID'] = 6
df_7['Room_ID'] = 7
df_8['Room_ID'] = 8
df_9['Room_ID'] = 9
df_10['Room_ID'] = 10
df_11['Room_ID'] = 11
df_12['Room_ID'] = 12
df_13['Room_ID'] = 13
df_14['Room_ID'] = 14
df_15['Room_ID'] = 15
df_16['Room_ID'] = 16
df = pd.concat([df_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8, df_9, df_10, df_11, df_12, df_13, df_14, df_15, df_16], ignore_index=True)
Date_Time = df['Unnamed: 0']
Occupancy_Measurement = df['Occupied']
Tin = df['IndoorTemperature']
RHin = df['IndoorHumidity']
CO2in = df['IndoorCO2']
Tout = df['OutdoorTemperature']
RHout = df['OutdoorHumidity']
Wind_Direction = df['OutdoorWindDirection']
Wind_Speed = df['OutdoorWindSpeed']
Room_ID = df['Room_ID']
tem_1 = pd.read_csv('Indoor_Measurement.csv')
tem_1['Date_Time'] = Date_Time
tem_1['Indoor_Temp'] = Tin
tem_1['Indoor_RH'] = RHin
tem_1['Indoor_CO2'] = CO2in
tem_1['Room_ID'] = Room_ID
tem_1['Date_Time'].fillna(-999)
tem_1['Date_Time'].fillna(-999)
tem_1['Indoor_Temp'].fillna(-999)
tem_1['Indoor_RH'].fillna(-999)
tem_1['Indoor_CO2'].fillna(-999)
tem_1['Room_ID'].fillna(-999)
tem_1.to_csv('Indoor_Measurement_18.csv', index=False, header=True)
tem_2 = pd.read_csv('Occupancy_Measurement.csv')
tem_2['Date_Time'] = Date_Time
tem_2['Occupancy_Measurement'] = Occupancy_Measurement
tem_2['Room_ID'] = Room_ID
tem_2['Date_Time'].fillna(-999)
tem_2['Room_ID'].fillna(-999)
tem_2['Occupancy_Measurement'].fillna(-999)
tem_2.to_csv('Occupancy_Measurement_18.csv', index=False, header=True)
tem_3 = pd.read_csv('Outdoor_Measurement.csv')
tem_3['Date_Time'] = Date_Time
tem_3['Outdoor_Temp'] = Tout
tem_3['Outdoor_RH'] = RHout
tem_3['Wind_Speed'] = Wind_Speed
tem_3['Wind_Direction'] = Wind_Direction
tem_3['Building_ID'] = 1
tem_3['Date_Time'].fillna(-999)
tem_3['Outdoor_Temp'].fillna(-999)
tem_3['Outdoor_RH'].fillna(-999)
tem_3['Wind_Speed'].fillna(-999)
tem_3['Wind_Direction'].fillna(-999)
tem_3.to_csv('Outdoor_Measurement_18.csv', index=False, header=True)
''' yapan added '''
df = df.rename(columns={'Unnamed: 0': 'Date_Time'})
# indoor_cols = ['Date_Time', 'IndoorTemperature', 'IndoorHumidity', 'IndoorCO2', 'Room_ID']
outdoor_cols = ['Date_Time', 'OutdoorTemperature', 'OutdoorHumidity', 'OutdoorWindDirection',
'OutdoorWindSpeed', 'Precipitation', 'SolarRadiation', 'Room_ID']
# occ_cols = ['Date_Time', 'Occupied', 'Room_ID']
hvac_cols = ['Date_Time', 'HeatingState', 'CoolingState', 'Room_ID']
outdoor_df = df[outdoor_cols]
outdoor_df.columns = ['Date_Time', 'Outdoor_Temp', 'Outdoor_RH', 'Wind_Direction', 'Wind_Speed',
'Precipitation', 'Solar_Radiation', 'Room_ID']
template_outdoor = pd.concat([template_outdoor, outdoor_df], ignore_index=True)
template_outdoor['Building_ID'] = 1
template_hvac.columns
hvac_df = df[hvac_cols]
hvac_df.columns = ['Date_Time', 'Heating_Status', 'Cooling_Status', 'Room_ID']
template_hvac = pd.concat([template_hvac, hvac_df], ignore_index=True)
template_hvac['Building_ID'] = 1
template_hvac['HVAC_Zone_ID'] = 1
# check data
print(template_outdoor.isnull().sum())
print(template_outdoor.dtypes)
print(template_hvac.isnull().sum())
print(template_hvac.dtypes)
# change data types
template_outdoor['Date_Time'] = pd.to_datetime(template_outdoor['Date_Time'], format="%Y-%m-%d %H:%M:%S")
template_hvac['Date_Time'] = pd.to_datetime(template_hvac['Date_Time'], format="%Y-%m-%d %H:%M:%S")
template_outdoor['Building_ID'] = template_outdoor['Building_ID'].astype(int)
template_outdoor['Room_ID'] = template_outdoor['Room_ID'].astype(int)
template_hvac['Heating_Status'] = template_hvac['Heating_Status'].astype(int)
template_hvac['Cooling_Status'] = template_hvac['Cooling_Status'].astype(int)
template_hvac['Building_ID'] = template_hvac['Building_ID'].astype(int)
template_hvac['Room_ID'] = template_hvac['Room_ID'].astype(int)
template_hvac['HVAC_Zone_ID'] = template_hvac['HVAC_Zone_ID'].astype(int)
# save data
template_outdoor.to_csv(save_path + 'Outdoor_Measurement.csv', index=False)
template_hvac.to_csv(save_path + 'HVAC_Measurement.csv', index=False) | 2.421875 | 2 |
main.py | pteoh/serverless-telegram-bot | 2 | 12791671 | <gh_stars>1-10
import os
import telegram
import random
bot = telegram.Bot(token=os.environ["TELEGRAM_TOKEN"])
# dictionary of trigger words with single 1:1 reply
singlereplydict = {
"tableflip": "(╯°□°)╯︵ ┻━┻",
"bagelflip": "(╯°□°)╯︵ 🥯",
"tacoflip": "(╯°□°)╯︵ 🌮",
"pizzaflip": "(╯°□°)╯︵ 🍕",
"hotdogflip": "(╯°□°)╯︵ 🌭",
"kittyparty": "🐈🐱🐆🙌🦁🐅🐯",
"puppyparty": "🐕🐩🐕🙌🐩🐕🐩",
"ponyparty": "🐎🦄🎠🙌🐎🦄🎠",
"piggyparty": "🐖🐽🐷🙌🐷🐽🐖",
"bunnyparty": "🥕🐇🥬🐰🙌🐰🥬🐇🥕",
"flowerbeam": "( ・◡・)つ━☆🌸🌺🌼",
"pastryparty": "🍞🥖🥐🥯🥨🥞🍩🍪🍰🧁",
"doubleflip": "┻━┻︵ \\(°□°)/ ︵ ┻━┻",
"musicdance": "♪┏(°.°)┛┗(°.°)┓┗(°.°)┛┏(°.°)┓ ♪",
"shame": "🔔 🔔 🔔",
"shrug": "🤷 ¯\_(ツ)_/¯",
"disapprove": "ಠ_ಠ",
"octodisco": "🎶🐙🎶",
"octodance": "🎶🐙🎶",
}
# dictionary of trigger words with multiple random responses
multireplydict = {
"backpack": [
"https://media.giphy.com/media/xUA7aXRRUlmqhoG7q8/giphy.gif",
"https://media.giphy.com/media/2DjXJ5UmrqYPm/giphy.gif",
"https://media.giphy.com/media/E1MTLQN0vFac8/giphy.gif",
"Mmm... yeah... the pack for the back.", "I like turtles.", "I like pie.",
"Das ist ein rucksack auf Deutsch!",
"Oh, and remember, next Friday is Swedish luggage day, so, you know, if you want to, go ahead and wear a bäckpäck.",
],
"dumpsterfire": [
"https://media.giphy.com/media/QLyhWVTvAHbAbAdWcp/giphy.gif",
"https://media.giphy.com/media/134vVkHV9wQtaw/giphy.gif",
"https://media.giphy.com/media/FqtWrearu5vb2/giphy.gif",
],
"chika": [
"https://media1.tenor.com/images/38b0f21d0e76dec2ff58d19e37fcc716/tenor.gif?itemid=4484736",
"https://1funny.com/wp-content/uploads/2009/07/diabeetus-cat.jpg",
"http://rs367.pbsrc.com/albums/oo112/Aim_fire/sdgfasfdgd.jpg~c200",
"https://c1.staticflickr.com/3/2254/2334517660_c5a9522dbd.jpg",
"https://media.giphy.com/media/Xbvni0CPHxdRK/giphy.gif",
"https://media.giphy.com/media/2oLrxIsfNcMH6/giphy.gif",
],
# source https://www.factretriever.com/cat-facts
"catfact": [
"(1) Unlike dogs, cats do not have a sweet tooth. Scientists believe this is due to a mutation in a key taste receptor.",
"(2) When a cat chases its prey, it keeps its head level. Dogs and humans bob their heads up and down.",
"(3) The technical term for a cat’s hairball is a “bezoar”.",
"(4) A group of cats is called a “clowder”.",
"(5) A cat can’t climb head first down a tree because every claw on a cat’s paw points the same way. To get down from a tree, a cat must back down.",
"(6) Cats make about 100 different sounds. Dogs make only about 10.",
"(7) Every year, nearly four million cats are eaten in Asia.",
"(8) There are more than 500 million domestic cats in the world, with approximately 40 recognized breeds.",
"(9) Approximately 24 cat skins can make a coat.",
"(10) While it is commonly thought that the ancient Egyptians were the first to domesticate cats, the oldest known pet cat was recently found in a 9,500-year-old grave on the Mediterranean island of Cyprus. This grave predates early Egyptian art depicting cats by 4,000 years or more.",
"(11) During the time of the Spanish Inquisition, Pope Innocent VIII condemned cats as evil and thousands of cats were burned. Unfortunately, the widespread killing of cats led to an explosion of the rat population, which exacerbated the effects of the Black Death.",
"(12) During the Middle Ages, cats were associated with withcraft, and on St. John’s Day, people all over Europe would stuff them into sacks and toss the cats into bonfires. On holy days, people celebrated by tossing cats from church towers.",
"(13) The first cat in space was a French cat named Felicette (a.k.a. “Astrocat”) In 1963, France blasted the cat into outer space. Electrodes implanted in her brains sent neurological signals back to Earth. She survived the trip.",
"(14) The group of words associated with cat (catt, cath, chat, katze) stem from the Latin catus, meaning domestic cat, as opposed to feles, or wild cat.",
"(15) The term “puss” is the root of the principal word for “cat” in the Romanian term pisica and the root of secondary words in Lithuanian (puz) and Low German puus. Some scholars suggest that “puss” could be imitative of the hissing sound used to get a cat’s attention. As a slang word for the female pudenda, it could be associated with the connotation of a cat being soft, warm, and fuzzy.",
"(16) Approximately 40,000 people are bitten by cats in the U.S. annually.",
"(17) Cats are North America’s most popular pets: there are 73 million cats compared to 63 million dogs. Over 30% of households in North America own a cat.",
"(18) According to Hebrew legend, Noah prayed to God for help protecting all the food he stored on the ark from being eaten by rats. In reply, God made the lion sneeze, and out popped a cat.",
"(19) A cat’s hearing is better than a dog’s. And a cat can hear high-frequency sounds up to two octaves higher than a human.",
"(20) A cat can travel at a top speed of approximately 31 mph (49 km) over a short distance.",
"(21) A cat rubs against people not only to be affectionate but also to mark out its territory with scent glands around its face. The tail area and paws also carry the cat’s scent.",
"(22) Researchers are unsure exactly how a cat purrs. Most veterinarians believe that a cat purrs by vibrating vocal folds deep in the throat. To do this, a muscle in the larynx opens and closes the air passage about 25 times per second.",
"(23) When a family cat died in ancient Egypt, family members would mourn by shaving off their eyebrows. They also held elaborate funerals during which they drank wine and beat their breasts. The cat was embalmed with a sculpted wooden mask and the tiny mummy was placed in the family tomb or in a pet cemetery with tiny mummies of mice.",
"(24) In 1888, more than 300,000 mummified cats were found an Egyptian cemetery. They were stripped of their wrappings and carted off to be used by farmers in England and the U.S. for fertilizer.",
"(25) Most cats give birth to a litter of between one and nine kittens. The largest known litter ever produced was 19 kittens, of which 15 survived.",
"(26) Smuggling a cat out of ancient Egypt was punishable by death. Phoenician traders eventually succeeded in smuggling felines, which they sold to rich people in Athens and other important cities.",
"(27) The earliest ancestor of the modern cat lived about 30 million years ago. Scientists called it the Proailurus, which means “first cat” in Greek. The group of animals that pet cats belong to emerged around 12 million years ago.",
"(28) The biggest wildcat today is the Siberian Tiger. It can be more than 12 feet (3.6 m) long (about the size of a small car) and weigh up to 700 pounds (317 kg).",
"(29) A cat’s brain is biologically more similar to a human brain than it is to a dog’s. Both humans and cats have identical regions in their brains that are responsible for emotions.",
"(30) Many Egyptians worshipped the goddess Bast, who had a woman’s body and a cat’s head.",
"(31) Mohammed loved cats and reportedly his favorite cat, Muezza, was a tabby. Legend says that tabby cats have an “M” for Mohammed on top of their heads because Mohammad would often rest his hand on the cat’s head.",
"(32) While many parts of Europe and North America consider the black cat a sign of bad luck, in Britain and Australia, black cats are considered lucky.",
"(33) The most popular pedigreed cat is the Persian cat, followed by the Maine Coon cat and the Siamese cat.",
"(34) The smallest pedigreed cat is a Singapura, which can weigh just 4 lbs (1.8 kg), or about five large cans of cat food. The largest pedigreed cats are Maine Coon cats, which can weigh 25 lbs (11.3 kg), or nearly twice as much as an average cat weighs.",
"(35) Some cats have survived falls of over 65 feet (20 meters), due largely to their “righting reflex.” The eyes and balance organs in the inner ear tell it where it is in space so the cat can land on its feet. Even cats without a tail have this ability.",
"(36) Some Siamese cats appear cross-eyed because the nerves from the left side of the brain go to mostly the right eye and the nerves from the right side of the brain go mostly to the left eye. This causes some double vision, which the cat tries to correct by “crossing” its eyes.",
"(37) Researchers believe the word “tabby” comes from Attabiyah, a neighborhood in Baghdad, Iraq. Tabbies got their name because their striped coats resembled the famous wavy patterns in the silk produced in this city.",
"(38) A cat can jump up to five times its own height in a single bound.",
"(39) Cats hate the water because their fur does not insulate well when it’s wet. The Turkish Van, however, is one cat that likes swimming. Bred in central Asia, its coat has a unique texture that makes it water resistant.",
"(40) The Egyptian Mau is probably the oldest breed of cat. In fact, the breed is so ancient that its name is the Egyptian word for “cat.”",
"(41) The first commercially cloned pet was a cat named “<NAME>”. He cost his owner $50,000, making him one of the most expensive cats ever.",
"(42) A cat usually has about 12 whiskers on each side of its face.",
"(43) A cat’s eyesight is both better and worse than humans. It is better because cats can see in much dimmer light and they have a wider peripheral view. It’s worse because they don’t see color as well as humans do. Scientists believe grass appears red to cats.",
"(44) Spanish-Jewish folklore recounts that Adam’s first wife, Lilith, became a black vampire cat, sucking the blood from sleeping babies. This may be the root of the superstition that a cat will smother a sleeping baby or suck out the child’s breath.",
"(45) Perhaps the most famous comic cat is the Cheshire Cat in Lewis Carroll’s Alice in Wonderland. With the ability to disappear, this mysterious character embodies the magic and sorcery historically associated with cats.",
"(46) The smallest wildcat today is the Black-footed cat. The females are less than 20 inches (50 cm) long and can weigh as little as 2.5 lbs (1.2 kg).",
"(47) On average, cats spend 2/3 of every day sleeping. That means a nine-year-old cat has been awake for only three years of its life.",
"(48) In the original Italian version of Cinderella, the benevolent fairy godmother figure was a cat.",
"(49) The little tufts of hair in a cat’s ear that help keep out dirt direct sounds into the ear, and insulate the ears are called “ear furnishings.”",
"(50) The ability of a cat to find its way home is called “psi-traveling.” Experts think cats either use the angle of the sunlight to find their way or that cats have magnetized cells in their brains that act as compasses.",
],
}
def webhook(request):
if request.method == "POST":
update = telegram.Update.de_json(request.get_json(force=True), bot)
chat_id = update.effective_message.chat.id
messagetext = update.effective_message.text
# direct 1:1 mapped responses
for trigger in singlereplydict:
try:
if trigger in messagetext.lower():
replytext = singlereplydict[trigger]
bot.sendMessage(chat_id=chat_id, text=replytext)
except AttributeError:
pass
# these responses have several options to be selected at random
for trigger in multireplydict:
try:
if trigger in messagetext.lower():
replytext = random.choice(multireplydict[trigger])
bot.sendMessage(chat_id=chat_id, text=replytext)
except AttributeError:
pass
return "ok"
| 2.46875 | 2 |
python/anagram_solver.py | patrickleweryharris/code-snippets | 5 | 12791672 | import itertools
# This snippet has been turned into a full repo:
# github.com/patrickleweryharris/anagram_solver
def anagram_solver(lst):
"""
Return all possible combinations of letters in lst
@type lst: [str]
@rtype: None
"""
for i in range(0, len(lst) + 1):
for subset in itertools.permutations(lst, i):
possible = ''
for letter in subset:
possible += letter
if len(possible) == len(lst):
# itertools.permutations returns smaller lists
print(possible)
if __name__ == '__main__':
lst = ['o', 'r', 'y', 'a', 'n']
anagram_solver(lst)
| 3.84375 | 4 |
pwm_motor_control_ros/test/integration/pwm_test_support/xbox_controller_joy_stub.py | GTAeberhard/pwm_motor_control_ros | 0 | 12791673 | <filename>pwm_motor_control_ros/test/integration/pwm_test_support/xbox_controller_joy_stub.py
from sensor_msgs.msg import Joy
class XboxControllerJoyStub:
@staticmethod
def right_trigger_depressed():
joy_msg = XboxControllerJoyStub.idle_controller()
joy_msg.axes[5] = -1.0
return joy_msg
@staticmethod
def right_trigger_half_depressed():
joy_msg = XboxControllerJoyStub.idle_controller()
joy_msg.axes[5] = 0.0
return joy_msg
@staticmethod
def left_trigger_depressed():
joy_msg = XboxControllerJoyStub.idle_controller()
joy_msg.axes[2] = -1.0
return joy_msg
@staticmethod
def left_trigger_half_depressed():
joy_msg = XboxControllerJoyStub.idle_controller()
joy_msg.axes[2] = 0.0
return joy_msg
@staticmethod
def idle_controller():
joy_msg = Joy()
joy_msg.axes = [ 0.0 for i in range(8)]
joy_msg.buttons = [ 0 for i in range(15)]
joy_msg.axes[2] = 1.0
joy_msg.axes[5] = 1.0
return joy_msg
| 2.28125 | 2 |
BranchFilters/HeadToMasterBranchFilterer.py | Christian-Nunnally/git-chains | 0 | 12791674 | from BranchFilters.BranchFilterer import BranchFilterer
from Interoperability.ShellCommandExecuter import ShellCommandExecuter
from RepositoryWalkers.BranchToCommitWalker import BranchToCommitWalker
from Logger import Logger
class HeadToMasterBranchFilterer(BranchFilterer):
def __init__(self, repository):
self.logger = Logger(self)
self.repository = repository
self.repository_directory = repr(repository).split('\'')[1][:-4]
self.head_branch_name = self.repository.head.name[11:]
self.generate_log_from_head_to_merge_base()
def generate_log_from_head_to_merge_base(self):
self.logger.log("Determining commit ids between the current head and master:")
self.log_from_head_to_merge_base = []
self.logger.log("v head v")
for id in self.walk_log_from_head_to_merge_base():
self.log_from_head_to_merge_base.append(id)
self.logger.log(id)
self.logger.log("^ master ^")
def walk_log_from_head_to_merge_base(self):
head_master_merge_base = self.get_merge_base("master", self.head_branch_name)
walker = BranchToCommitWalker(self.repository, head_master_merge_base)
head_branch = self.repository.branches[self.head_branch_name]
for commit in walker.walk(head_branch):
yield commit.hex
def get_merge_base(self, branch_name, other_branch_name):
args = ['git', 'merge-base', branch_name, other_branch_name]
executer = ShellCommandExecuter(self.repository_directory, args)
return executer.execute_for_output()
def should_include_branch(self, branch_name):
merge_base = self.get_merge_base(self.head_branch_name, branch_name)
return merge_base in self.log_from_head_to_merge_base
| 2.40625 | 2 |
arviz/plots/backends/__init__.py | Ban-zee/arviz | 0 | 12791675 | <gh_stars>0
"""ArviZ plotting backends."""
| 1.070313 | 1 |
Data Science With Python/14-interactive-data-visualization-with-bokeh/01-basic-plotting-with-bokeh/12-color-mapping.py | aimanahmedmoin1997/DataCamp | 3 | 12791676 | '''
Colormapping
The final glyph customization we'll practice is using the CategoricalColorMapper to color each glyph by a categorical property.
Here, you're going to use the automobile dataset to plot miles-per-gallon vs weight and color each circle glyph by the region where the automobile was manufactured.
The origin column will be used in the ColorMapper to color automobiles manufactured in the US as blue, Europe as red and Asia as green.
The automobile data set is provided to you as a Pandas DataFrame called df. The figure is provided for you as p.
INSTRUCTIONS
100XP
Import CategoricalColorMapper from bokeh.models.
Convert the DataFrame df to a ColumnDataSource called source. This has already been done for you.
Make a CategoricalColorMapper object called color_mapper with the CategoricalColorMapper() function. It has two parameters here: factors and palette.
Add a circle glyph to the figure p to plot 'mpg' (on the y-axis) vs 'weight' (on the x-axis). Remember to pass in source and 'origin' as arguments to source and legend. For the color parameter, use dict(field='origin', transform=color_mapper).
'''
#Import CategoricalColorMapper from bokeh.models
from bokeh.models import CategoricalColorMapper
# Convert df to a ColumnDataSource: source
source = ColumnDataSource(df)
# Make a CategoricalColorMapper object: color_mapper
color_mapper = CategoricalColorMapper(factors=['Europe', 'Asia', 'US'],
palette=['red', 'green', 'blue'])
# Add a circle glyph to the figure p
p.circle('weight', 'mpg', source=source,
color=dict(field='origin', transform=color_mapper),
legend='origin')
# Specify the name of the output file and show the result
output_file('colormap.html')
show(p)
| 3.765625 | 4 |
esipysi/esipysi.py | FlyingKiwiBird/EsiPysi | 7 | 12791677 | import asyncio
import aiohttp
import requests
import json
from .op import EsiOp
from .auth import EsiAuth
from .cache import EsiCache, DictCache
from .esisession import EsiSession
import logging
logger = logging.getLogger("EsiPysi")
class EsiPysi(object):
"""
The EsiPysi class creates "EsiOp" operations based on a provided swagger spec
"""
def __init__(self, swagger_url, **kwargs):
"""
Initialize the class
Arguments:
swagger_url -- Url to the swagger spec
Keyword arguments:
user_agent -- user agent to send with ESI calls
cache -- EsiCache object to use for caching
auth -- EsiAuth to use for authorized calls to ESI
retries -- Number of retries when ESI returns a retryable error, 0 disables, -1 is unlimited
loop -- Event loop to use for asyncio
session -- aiohttp session to use, note: loop will be useless if set with session, set the loop you want in the session instead
"""
self.args = kwargs
cache = kwargs.get("cache", DictCache())
if cache is not None:
if not issubclass(type(cache), EsiCache):
raise TypeError("cache should be of the type EsiCache")
session = self.args.get('session')
if session is not None:
if not isinstance(type(session), aiohttp.ClientSession):
raise TypeError("session must be a aiohttp ClientSession")
self.operations = {}
self.data = {}
r = requests.get(swagger_url)
try:
data = r.json()
except:
logger.exception("Parse error, spec written to file")
with open('esi-spec-error.json', 'w') as esifile:
esifile.write(r.text)
return
finally:
r.close()
self.data = data
self.__analyze_swagger()
def session(self):
session = EsiSession(self.base_url, self.operations, **self.args)
return session
def __analyze_swagger(self):
#Get base url
self.base_url = "https://" + self.data.get("host","") + self.data.get("basePath", "")
#Reformat json
paths = self.data.get("paths", {})
#each path
for route, verbs in paths.items():
#each http verb in a path
for verb, operation in verbs.items():
operation_id = operation.get("operationId")
if operation_id is None:
continue
new_op = operation.copy()
new_op["path"] = route
new_op["verb"] = verb
#Handle parameter refs
params = operation.get("parameters")
new_op["parameters"] = {}
for param in params:
path = param.get("$ref")
if path is None:
param_details = param.copy()
else:
param_details = self.__get_ref(path)
param_name = param_details.get("name")
new_op["parameters"][param_name] = param_details
self.operations[operation_id] = new_op
def __get_ref(self, path):
path_split = path.split("/")
if path_split[0] != "#":
#Unsupported
return None
ref = self.data
for i in range(1, len(path_split)):
ref = ref.get(path_split[i], {})
return ref
| 2.53125 | 3 |
language/__init__.py | UoA-ECE-RP/sha | 1 | 12791678 | __author__ = 'Avinash'
| 0.988281 | 1 |
Part 1/Chapter 4/example 1.1.py | MineSelf2016/PythonInEconomicManagement | 0 | 12791679 | <filename>Part 1/Chapter 4/example 1.1.py<gh_stars>0
score = 92
print("优秀") if score >= 90 else print("及格")
a = 1
b = 2
print(type(a))
print(type(b))
print(a/b) | 3.328125 | 3 |
core/extract_plain_text.py | Gatorix/tranSub | 1 | 12791680 | <filename>core/extract_plain_text.py
import utils
import sys
path = r'/Users/caosheng/Downloads/Kota Factory (webm)/(English)(499) Kota Factory - EP 01 - Inventory - YouTube.srt'
output_file_name = utils.get_filename(path)
def extract_plain_text(path, english_only=False, chinese_only=False):
timer = utils.Timer()
timer.start()
subs = utils.load_sub_file(path)
plaintext = utils.get_plaintext(subs)
if english_only and chinese_only == True:
print('仅保留中文和仅保留英文不能同时勾选\nChinese only and English only cannot be checked at the same time')
sys.exit(0)
elif chinese_only:
chinese_lines=[]
for i in range(len(plaintext)):
chinese_lines.append(utils.chinese_only(plaintext[i])+'\n')
utils.write_lines('%s.txt' % (output_file_name), chinese_lines)
elif english_only:
english_lines=[]
for i in range(len(plaintext)):
english_lines.append(utils.english_only(plaintext[i])+'\n')
utils.write_lines('%s.txt' % (output_file_name), english_lines)
else:
utils.write_lines('%s.txt' % (output_file_name), plaintext)
timer.stop()
print('提取完成,用时%.2f秒' % (timer.elapsed))
extract_plain_text(path)
| 2.796875 | 3 |
python/gpio.py | trojanspike/qbian-server | 0 | 12791681 | <gh_stars>0
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(g.BCM)
try:
GPIO.setup(17, GPIO.OUT) # GPIO17
while True:
GPIO.output(17, GPIO.HIGH)
sleep(1.5)
GPIO.output(17, GPIO.LOW)
sleep(1.5)
except KeyboardInterrupt:
GPIO.cleanup();
| 2.828125 | 3 |
branchdb/git_tools.py | CalgaryMichael/branchdb-python | 0 | 12791682 | <reponame>CalgaryMichael/branchdb-python
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from git import Repo
def get_repo():
"""Returns the Repo of the current directory"""
call_dir = os.getcwd()
return Repo(call_dir, search_parent_directories=True)
def get_project_root(repo=None):
"""Returns the path to the top-level directory of current project"""
if repo is None:
repo = get_repo()
return repo.git.rev_parse(u"--show-toplevel")
def get_branch_and_root():
"""Returns the active branch name and the current project's root path"""
repo = get_repo()
root = get_project_root(repo)
return repo.active_branch.name, root
| 2.6875 | 3 |
discord/webhook/sync.py | RamzziSudip/nextcord | 3 | 12791683 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Copyright (c) 2021-present tag-epic
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------
Aliased moodule. See the same file in the nextcord folder for more information
Autogenerated by aliasgen.py
"""
from nextcord.webhook.sync import (
MISSING,
TYPE_CHECKING,
Any,
BaseWebhook,
DeferredLock,
Dict,
DiscordServerError,
Forbidden,
HTTPException,
InvalidArgument,
List,
Literal,
Message,
NotFound,
Optional,
PartialMessageable,
Route,
SyncWebhook,
SyncWebhookMessage,
Tuple,
Type,
TypeVar,
Union,
WebhookAdapter,
_context,
_get_webhook_adapter,
_log,
_WebhookContext,
_WebhookState,
annotations,
handle_message_parameters,
json,
logging,
overload,
re,
threading,
time,
urlquote,
utils,
)
__all__ = ("SyncWebhook", "SyncWebhookMessage")
| 0.96875 | 1 |
player.py | duct-tape/taped-car-stereo | 0 | 12791684 | import pifacecad
import asyncore
from mplayer.async import AsyncPlayer
def handle_data(data):
if not data.startswith('EOF code'):
print('log: %s' % (data, ))
else:
player.quit()
def init_player():
# Don't autospawn because we want to setup the args later
player = AsyncPlayer(autospawn=False)
# Setup additional args
player.args = ['-really-quiet', '-msglevel', 'global=6']
# hook a subscriber to MPlayer's stdout
player.stdout.hook(handle_data)
# Manually spawn the MPlayer process
player.spawn()
# play a file
player.loadfile('/home/pi/y.mp3')
metadata = player.metadata or {}
cad = init_cad()
cad.lcd.write('P: {name}'.format(name=metadata.get('Title', '')))
listener = pifacecad.SwitchEventListener(chip=cad)
def play_next(event):
print(str(event.pin_num))
player.loadfile('/home/pi/c.mp3')
# for i in range(8):
listener.register(0, pifacecad.IODIR_FALLING_EDGE, play_next)
listener.activate()
# run the asyncore event loop
asyncore.loop()
def init_cad():
cad = pifacecad.PiFaceCAD()
return cad
if __name__ == '__main__':
init_player()
| 2.625 | 3 |
i_xero2/__init__.py | aracnid/i-xero2 | 0 | 12791685 | """A set of functions to retrieve and save data into Xero.
"""
# import package modules
from i_xero2.i_xero import ExpiredCredentialsException
from i_xero2.i_xero import XeroInterface
from i_xero2.i_xero_ui import XeroInterfaceUI
__version__ = '2.4.2'
| 1.710938 | 2 |
lib/utils.py | chawins/entangle-rep | 15 | 12791686 | import numpy as np
import torch
def compute_lid(x, x_train, k, exclude_self=False):
"""
Calculate LID using the estimation from [1]
[1] Ma et al., "Characterizing Adversarial Subspaces Using
Local Intrinsic Dimensionality," ICLR 2018.
"""
with torch.no_grad():
x = x.view((x.size(0), -1))
x_train = x_train.view((x_train.size(0), -1))
lid = torch.zeros((x.size(0), ))
for i, x_cur in enumerate(x):
dist = (x_cur.view(1, -1) - x_train).norm(2, 1)
# `largest` should be True when using cosine distance
if exclude_self:
topk_dist = dist.topk(k + 1, largest=False)[0][1:]
else:
topk_dist = dist.topk(k, largest=False)[0]
mean_log = torch.log(topk_dist / topk_dist[-1]).mean()
lid[i] = -1 / mean_log
return lid
# def cal_class_lid(x, x_train, k, exclude_self=False):
# """
# Calculate LID on sample using the estimation from [1]
# [1] Ma et al., "Characterizing Adversarial Subspaces Using
# Local Intrinsic Dimensionality," ICLR 2018.
# """
# x = x.view((x.size(0), -1))
# x_train = x_train.view((x_train.size(0), -1))
# lid = torch.zeros((x.size(0), ))
# for i, x_cur in enumerate(x):
# dist = (x_cur.view(1, -1) - x_train).norm(2, 1)
# # `largest` should be True when using cosine distance
# if exclude_self:
# topk_dist = dist.topk(k + 1, largest=False)[0][1:]
# else:
# topk_dist = dist.topk(k, largest=False)[0]
# mean_log = torch.log(topk_dist / topk_dist[-1]).mean()
# lid[i] = -1 / mean_log
# return lid
def compute_spnorm(inputs, dknn, layers, batch_size=200):
assert inputs.requires_grad
num_total = inputs.size(0)
norm = np.zeros((num_total, len(layers)))
num_batches = int(np.ceil(num_total / batch_size))
for i in range(num_batches):
begin, end = i * batch_size, (i + 1) * batch_size
x = inputs[begin:end]
reps = dknn.get_activations(x)
for l, layer in enumerate(layers):
y = reps[layer]
norm[begin:end, l] = compute_spnorm_batch(x, y)
return norm
def compute_spnorm_batch(inputs, output):
"""
:param inputs: (batch_size, input_size)
:param output: (batch_size, output_size)
:return: jacobian: (batch_size, output_size, input_size)
"""
batch_size, input_dim = inputs.view(inputs.size(0), -1).size()
output = output.view(batch_size, -1)
jacobian = torch.zeros((batch_size, output.size(1), input_dim))
for i in range(output.size(1)):
grad = torch.autograd.grad(
output[:, i].sum(), inputs, retain_graph=True)[0]
jacobian[:, i, :] = grad.view(batch_size, input_dim)
norm = np.zeros((batch_size, ))
for i in range(batch_size):
norm[i] = np.linalg.norm(jacobian[i].detach().cpu().numpy(), 2)
return norm
| 2.4375 | 2 |
plico/utils/loop.py | lbusoni/plico | 0 | 12791687 | import abc
from six import with_metaclass
class Loop(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def name(self):
assert False
@abc.abstractmethod
def close(self):
assert False
@abc.abstractmethod
def open(self):
assert False
@abc.abstractmethod
def isClosed(self):
assert False
@abc.abstractmethod
def performOnePass(self):
assert False
@abc.abstractmethod
def getConvergenceStepCount(self):
assert False
@abc.abstractmethod
def hasConverged(self):
assert False
class LoopException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
| 2.9375 | 3 |
neuralmt/utils.py | anoopsarkar/nlp-class-hw | 7 | 12791688 | <gh_stars>1-10
import io
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from PIL import Image
def alphaPlot(alpha,
output,
source):
plt.figure(figsize=(14, 6))
sns.heatmap(alpha, xticklabels=output.split(), yticklabels=source.split(),
linewidths=.05, cmap="Blues")
plt.ylabel('Source')
plt.xlabel('Target')
plt.xticks(rotation=60)
plt.yticks(rotation=0)
plt.tight_layout()
buff = io.BytesIO()
plt.savefig(buff, format='jpg')
buff.seek(0)
return np.array(Image.open(buff))
| 2.9375 | 3 |
bookorbooks/school/models/class_model.py | talhakoylu/SummerInternshipBackend | 1 | 12791689 | <reponame>talhakoylu/SummerInternshipBackend
from django.core.exceptions import ValidationError
from constants.school_strings import SchoolStrings
from django.db import models
from school.models.abstract_base_model import AbstractSchoolBaseModel
class Class(AbstractSchoolBaseModel):
school = models.ForeignKey(
"school.School",
on_delete=models.CASCADE,
related_name="classes_school",
verbose_name=SchoolStrings.ClassStrings.school_verbose_name)
instructor = models.ForeignKey(
"account.InstructorProfile",
on_delete=models.CASCADE,
related_name="instructors_school",
verbose_name=SchoolStrings.ClassStrings.instructor_verbose_name)
name = models.CharField(
max_length=50,
verbose_name=SchoolStrings.ClassStrings.name_verbose_name)
grade = models.IntegerField(
verbose_name=SchoolStrings.ClassStrings.grade_verbose_name)
class Meta:
verbose_name = SchoolStrings.ClassStrings.meta_verbose_name
verbose_name_plural = SchoolStrings.ClassStrings.meta_verbose_name_plural
ordering = ["name", "grade"]
def __str__(self):
return f"{self.school.name} - {self.name} - Grade: {self.grade}"
def clean(self) -> None:
"""
This method checks whether the teacher trying to be assigned to the class is working in that school.
"""
if self.instructor.school != self.school:
raise ValidationError(SchoolStrings.ClassStrings.instructor_not_working_at_this_school_error) | 2.375 | 2 |
app/admin/__init__.py | sunshineinwater/flask-Purchase_and_sale | 122 | 12791690 | <filename>app/admin/__init__.py
#-*- coding:utf-8 -*-
# author:Agam
# datetime:2018-11-05
from flask import Blueprint
admin=Blueprint('admin',__name__)
import app.admin.views
| 1.46875 | 1 |
cstorage/tests/listen-gcs.py | sebgoa/triggers | 4 | 12791691 | <filename>cstorage/tests/listen-gcs.py
#!/usr/bin/env python
from google.cloud import pubsub_v1
sub = pubsub_v1.SubscriberClient()
#topic = 'project/skippbox/topics/barfoo'
sub_name = 'projects/skippbox/subscriptions/carogoasub'
subscription = sub.subscribe(sub_name)
def callback(message):
print message
message.ack()
future = subscription.open(callback)
future.result()
| 2.078125 | 2 |
scripts/irgen.py | srijan-paul/meep | 6 | 12791692 | <gh_stars>1-10
# This is a helper script to automatically generate code
# since Javascript doesn't have enums and using objects as \
# enums is painful.
import re
IR = """
pop_ push_ inc dec
add sub equals
set_var get_var
inc_n false_ true_
load_byte print start_if close_if_body
end_if start_else end_else start_loop end_loop
popn cmp_less cmp_greater load_string make_bus
index_var not make_sized_bus set_at_index input
len
"""
opcodes = re.findall(r"\w+", IR)
irFile = open('../src/ir.js', 'w')
out = "const IR = Object.freeze({\n"
k = 0
for op in opcodes:
out += f"\t{op}: {k},\n"
k += 1
out += "});\n\n"
out += """
function irToString(op) {
\tswitch(op) {
"""
for op in opcodes:
out += f"\tcase IR.{op}: return '{op.upper()}';\n"
out += "\t}\n}"
out += """
module.exports = {IR, irToString};
"""
irFile.write(out)
| 2.609375 | 3 |
janitor/functions/groupby_agg.py | thatlittleboy/pyjanitor | 225 | 12791693 | from typing import Callable, List, Union
import pandas_flavor as pf
import pandas as pd
from janitor.utils import deprecated_alias
@pf.register_dataframe_method
@deprecated_alias(new_column="new_column_name", agg_column="agg_column_name")
def groupby_agg(
df: pd.DataFrame,
by: Union[List, Callable, str],
new_column_name: str,
agg_column_name: str,
agg: Union[Callable, str],
dropna: bool = True,
) -> pd.DataFrame:
"""Shortcut for assigning a groupby-transform to a new column.
This method does not mutate the original DataFrame.
Intended to be the method-chaining equivalent of:
```python
df = df.assign(...=df.groupby(...)[...].transform(...))
```
Example: Basic usage.
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({
... "item": ["shoe", "shoe", "bag", "shoe", "bag"],
... "quantity": [100, 120, 75, 200, 25],
... })
>>> df.groupby_agg(
... by="item",
... agg="mean",
... agg_column_name="quantity",
... new_column_name="avg_quantity",
... )
item quantity avg_quantity
0 shoe 100 140.0
1 shoe 120 140.0
2 bag 75 50.0
3 shoe 200 140.0
4 bag 25 50.0
Example: Set `dropna=False` to compute the aggregation, treating the null
values in the `by` column as an isolated "group".
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({
... "x": ["a", "a", None, "b"], "y": [9, 9, 9, 9],
... })
>>> df.groupby_agg(
... by="x",
... agg="count",
... agg_column_name="y",
... new_column_name="y_count",
... dropna=False,
... )
x y y_count
0 a 9 2
1 a 9 2
2 None 9 1
3 b 9 1
:param df: A pandas DataFrame.
:param by: Column(s) to groupby on, will be passed into `DataFrame.groupby`.
:param new_column_name: Name of the aggregation output column.
:param agg_column_name: Name of the column to aggregate over.
:param agg: How to aggregate.
:param dropna: Whether or not to include null values, if present in the
`by` column(s). Default is True (null values in `by` are assigned NaN in
the new column).
:returns: A pandas DataFrame.
""" # noqa: E501
return df.assign(
**{
new_column_name: df.groupby(by, dropna=dropna)[
agg_column_name
].transform(agg),
}
)
| 3.578125 | 4 |
lambda/build/lambda_start_pipeline.py | acere/amazon-sagemaker-drift-detection | 27 | 12791694 | <gh_stars>10-100
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError
import json
import os
import logging
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
config = Config(retries={"max_attempts": 10, "mode": "standard"})
codepipeline = boto3.client("codepipeline", config=config)
sm_client = boto3.client("sagemaker")
def check_pipeline(job_id, pipeline_name, pipeline_execution_arn=None):
try:
if pipeline_execution_arn is None:
logger.info(
f"Starting SageMaker Pipeline: {pipeline_name} for job: {job_id}"
)
response = sm_client.start_pipeline_execution(
PipelineName=pipeline_name,
PipelineExecutionDisplayName=f"codepipeline-{job_id}",
PipelineParameters=[
{"Name": "InputSource", "Value": "CodePipeline"},
],
PipelineExecutionDescription="SageMaker Drift Detection Pipeline",
ClientRequestToken=job_id,
)
logger.debug(response)
pipeline_execution_arn = response["PipelineExecutionArn"]
logger.info(f"SageMaker Pipeline arn: {pipeline_execution_arn} started")
else:
logger.info(
f"Checking SageMaker Pipeline: {pipeline_execution_arn} for job: {job_id}"
)
response = sm_client.describe_pipeline_execution(
PipelineExecutionArn=pipeline_execution_arn
)
logger.debug(response)
pipeline_execution_status = response["PipelineExecutionStatus"]
logger.info(
f"SageMaker Pipeline arn: {pipeline_execution_arn} {pipeline_execution_status}"
)
if pipeline_execution_status in ["Failed", "Stopped"]:
result = {
"type": "JobFailed",
"message": f"Pipeline Status is {pipeline_execution_status}",
"externalExecutionId": pipeline_execution_arn,
}
codepipeline.put_job_failure_result(jobId=job_id, failureDetails=result)
return 400, result
elif pipeline_execution_status in ["Executing", "Succeeded"]:
result = {
"Status": pipeline_execution_status,
"PipelineExecutionArn": pipeline_execution_arn,
}
codepipeline.put_job_success_result(
jobId=job_id, outputVariables=result
)
return 200, result
logger.info(f"Continuing code pipeline job: {job_id}")
codepipeline.put_job_success_result(
jobId=job_id,
continuationToken=pipeline_execution_arn,
)
return 202, {"PipelineExecutionArn": pipeline_execution_arn}
except ClientError as e:
error_code = e.response["Error"]["Code"]
error_message = e.response["Error"]["Message"]
result = {
"type": "JobFailed",
"message": error_message,
}
logger.error(error_message)
if error_code != "InvalidJobStateException":
codepipeline.put_job_failure_result(jobId=job_id, failureDetails=result)
return 500, result
except Exception as e:
logger.error(e)
raise e
def lambda_handler(event, context):
logger.debug(json.dumps(event))
job_id = event["CodePipeline.job"]["id"]
job_data = event["CodePipeline.job"]["data"]
user_parameters = job_data["actionConfiguration"]["configuration"]["UserParameters"]
pipeline_name = json.loads(user_parameters)["PipelineName"]
pipeline_execution_arn = None
if "continuationToken" in job_data:
pipeline_execution_arn = job_data["continuationToken"]
status_code, result = check_pipeline(job_id, pipeline_name, pipeline_execution_arn)
logger.debug(json.dumps(result))
return {"statusCode": status_code, "body": json.dumps(result)}
| 1.992188 | 2 |
archive/srcKelly/optimizationMooring.py | mattEhall/FloatingSE | 1 | 12791695 | <gh_stars>1-10
from openmdao.main.api import Component, Assembly, convert_units
from openmdao.main.datatypes.api import Float, Array, Enum, Str, Int, Bool
from openmdao.lib.drivers.api import COBYLAdriver, SLSQPdriver
from mooring import Mooring
import time
import numpy as np
class optimizationMooring(Assembly):
# variables
def configure(self):
self.add('driver',COBYLAdriver())
self.add('mooring',Mooring())
self.driver.workflow.add('mooring')
self.driver.add_objective('mooring.mooring_total_cost')
self.driver.add_parameter('mooring.scope_ratio',low=15.,high=50.,scaler=0.1)
self.driver.add_parameter('mooring.pretension_percent',low=2.5,high=20.)
self.driver.add_parameter('mooring.mooring_diameter',low=3.,high=10.,scaler=0.01)
self.driver.add_constraint('mooring.heel_angle <= 6.')
self.driver.add_constraint('mooring.min_offset_unity < 1.0')
self.driver.add_constraint('mooring.max_offset_unity < 1.0')
def sys_print(example):
print 'scope ratio: ',example.scope_ratio
print 'pretension percent: ',example.pretension_percent
print 'mooring diameter: ',example.mooring_diameter
print 'heel angle: ',example.heel_angle
print 'min offset unity: ',example.min_offset_unity
print 'max offset unity: ',example.max_offset_unity
print 'total mooring cost: ',example.mooring_total_cost
def example_218WD_3MW():
tt = time.time()
example = optimizationMooring()
# Mooring,settings
example.mooring.fairlead_depth = 13.
example.mooring.scope_ratio = 1.5
example.mooring.pretension_percent = 5.0
example.mooring.mooring_diameter = 0.090
example.mooring.number_of_mooring_lines = 3
example.mooring.permanent_ballast_height = 3.
example.mooring.fixed_ballast_height = 5.
example.mooring.permanent_ballast_density = 4492.
example.mooring.fixed_ballast_density = 4000.
example.mooring.water_depth = 218.
example.mooring.mooring_type = 'CHAIN'
example.mooring.anchor_type = 'PILE'
example.mooring.fairlead_offset_from_shell = 0.5
# from,spar.py
example.mooring.shell_buoyancy = [0.000,144905.961,688303.315,3064761.078]
example.mooring.shell_mass = [40321.556,88041.563,137796.144,518693.048]
example.mooring.bulkhead_mass = [0.000,10730.836,0.000,24417.970]
example.mooring.ring_mass = [1245.878,5444.950,6829.259,28747.490]
example.mooring.spar_start_elevation = [13., 7., -5., -20.]
example.mooring.spar_end_elevation = [7., -5., -20., -67.]
example.mooring.spar_keel_to_CG = 35.861
example.mooring.spar_keel_to_CB = 30.324
example.mooring.spar_outer_diameter = [5.000,6.000,6.000,9.000]
example.mooring.spar_wind_force = [1842.442,1861.334,0.000,0.000]
example.mooring.spar_wind_moment = [100965.564,85586.296,0.000,0.000]
example.mooring.spar_current_force = [0.000,449016.587,896445.823,49077.906]
example.mooring.spar_current_moment = [0.000,19074749.640,28232958.052,72692.688]
example.mooring.wall_thickness = [0.05,0.05,0.05,0.05]
example.mooring.load_condition = 'N'
# from,tower_RNA.py
example.mooring.RNA_mass = 125000.000
example.mooring.tower_mass = 127877.000
example.mooring.tower_center_of_gravity = 23.948
example.mooring.RNA_keel_to_CG = 142.000
example.mooring.tower_wind_force = 19950.529
example.mooring.tower_wind_moment = 1634522.835
example.mooring.RNA_wind_force = 391966.178
example.mooring.RNA_wind_moment = 47028560.389
example.mooring.RNA_center_of_gravity_x = 4.1
example.run()
print '--------------example_218WD_3MW------------------'
print "Elapsed time: ", time.time()-tt, " seconds"
sys_print(example.mooring)
def example_218WD_6MW():
tt = time.time()
example = optimizationMooring()
example.mooring.fairlead_depth = 13.
example.mooring.scope_ratio = 1.5
example.mooring.pretension_percent = 5.0
example.mooring.mooring_diameter = 0.090
example.mooring.number_of_mooring_lines = 3
example.mooring.permanent_ballast_height = 3.
example.mooring.fixed_ballast_height = 7.
example.mooring.permanent_ballast_density = 4492.
example.mooring.fixed_ballast_density = 4000.
example.mooring.water_depth = 218.
example.mooring.mooring_type = 'CHAIN'
example.mooring.anchor_type = 'PILE'
example.mooring.fairlead_offset_from_shell = 0.5
example.mooring.shell_buoyancy = [0.000,257610.598,1356480.803,7074631.036]
example.mooring.shell_mass = [55118.458,117635.366,193284.525,830352.783]
example.mooring.bulkhead_mass = [0.000,19239.055,0.000,51299.008]
example.mooring.ring_mass = [3838.515,16391.495,21578.677,127137.831]
example.mooring.spar_start_elevation = [13., 7., -5., -20.]
example.mooring.spar_end_elevation = [7., -5., -20., -72.]
example.mooring.spar_keel_to_CG = 37.177
example.mooring.spar_keel_to_CB = 32.337
example.mooring.spar_outer_diameter = [7.,8.,8.,13.]
example.mooring.spar_wind_force = [2374.194,2345.237,0.000,0.000]
example.mooring.spar_wind_moment = [137246.585,114777.740,0.000,0.0000]
example.mooring.spar_current_force = [0.000,824040.566,1968613.701,182335.850]
example.mooring.spar_current_moment = [0.000,37445057.967,67469109.912,353876.402]
example.mooring.wall_thickness = [0.05,0.05,0.05,0.05]
example.mooring.load_condition = 'N'
example.mooring.RNA_mass = 365500.000
example.mooring.tower_mass = 366952.000
example.mooring.tower_center_of_gravity = 33.381
example.mooring.RNA_keel_to_CG = 169.000
example.mooring.tower_wind_force = 33125.492
example.mooring.tower_wind_moment = 3124462.452
example.mooring.RNA_wind_force = 820818.422
example.mooring.RNA_wind_moment = 118970074.187
example.mooring.RNA_center_of_gravity_x = 5.750
example.run()
print '--------------example_218WD_6MW------------------'
print "Elapsed time: ", time.time()-tt, " seconds"
sys_print(example.mooring)
def example_218WD_10MW():
tt = time.time()
example = optimizationMooring()
example.mooring.fairlead_depth = 13.
example.mooring.scope_ratio = 1.5
example.mooring.pretension_percent = 5.0
example.mooring.mooring_diameter = 0.090
example.mooring.number_of_mooring_lines = 3
example.mooring.permanent_ballast_height = 4.
example.mooring.fixed_ballast_height = 9.
example.mooring.permanent_ballast_density = 4492.
example.mooring.fixed_ballast_density = 4000.
example.mooring.water_depth = 218.
example.mooring.mooring_type = 'CHAIN'
example.mooring.anchor_type = 'PILE'
example.mooring.fairlead_offset_from_shell = 0.5
example.mooring.shell_buoyancy = [0.000,326038.413,1775098.024,13041536.503]
example.mooring.shell_mass = [62516.908,132432.268,221028.715,1335368.667]
example.mooring.bulkhead_mass = [0.000,24417.970,0.000,68438.752]
example.mooring.ring_mass = [6963.553,29512.202,39460.135,617575.510]
example.mooring.spar_start_elevation = [13., 7., -5., -20.]
example.mooring.spar_end_elevation = [7., -5., -20., -92.]
example.mooring.spar_keel_to_CG = 45.
example.mooring.spar_keel_to_CB = 42.108
example.mooring.spar_outer_diameter = [8.,9.,9.,15.]
example.mooring.spar_wind_force = [2572.428,2522.369,0.000,0.000]
example.mooring.spar_wind_moment = [183034.454,157067.701,0.000,0.000]
example.mooring.spar_current_force = [0.000,1125719.734,3051908.296,425853.543]
example.mooring.spar_current_moment = [0.000,66158450.987,145104271.963,2244211.189]
example.mooring.wall_thickness = [0.050,0.050,0.050,0.050]
example.mooring.load_condition = 'N'
example.mooring.RNA_mass = 677000.000
example.mooring.tower_mass = 698235.000
example.mooring.tower_center_of_gravity = 40.983
example.mooring.RNA_keel_to_CG = 211.000
example.mooring.tower_wind_force = 53037.111
example.mooring.tower_wind_moment = 6112673.024
example.mooring.RNA_wind_force = 1743933.574
example.mooring.RNA_wind_moment = 314378753.986
example.mooring.RNA_center_of_gravity_x = 7.070
example.run()
print '--------------example_218WD_10MW------------------'
print "Elapsed time: ", time.time()-tt, " seconds"
sys_print(example.mooring)
if __name__ == "__main__":
#example_218WD_3MW()
#example_218WD_6MW()
example_218WD_10MW() | 2.0625 | 2 |
OCR.py | developerVictorNkuna/frontend-tools | 0 | 12791696 | import pytesseract
import requests
from PIL import Image
from PIL import ImageFilter
import io
def process_image(url):
image= _get_image(url)
image.filter(ImageFilter.SHARPEN)
return pytesseract.image_to_string(image)
def _get_image(url):
pattern_string = requests.get(url).content()
return Image.open(io.StringIO(pattern_string)) | 2.875 | 3 |
Jasper/Light.py | Granyy/maison_intelligente | 0 | 12791697 | #******************************************************************************#
#* @TITRE : Light.py *#
#* @VERSION : 1.0 *#
#* @CREATION : 05 01, 2017 *#
#* @MODIFICATION : 05 21, 2017 *#
#* @AUTEURS : <NAME> *#
#* @COPYRIGHT : Copyright (c) 2017 *#
#* <NAME> *#
#* <NAME> *#
#* <NAME> *#
#* <NAME> *#
#* <NAME> *#
#* @LICENSE : MIT License (MIT) *#
#******************************************************************************#
import re
import datetime
import struct
import urllib
import feedparser
import requests
import bs4
from client.app_utils import getTimezone
from semantic.dates import DateService
WORDS = ["LIGHT", "DOWN", "ON"]
def handle(text, mic, profile):
targetUrl = profile['target']["IP_PORT"]
targetMn = profile['target']["ID_MN"]
targetAE = 'LED1'
url = 'http://' + targetUrl + '/~/' + targetMn + '/mn-name/' + targetAE
if re.search(r'\bDOWN\b',text,re.IGNORECASE):
querystring = {"op":"ALLfalse"}
sentence = "I have turned down the light, sir"
else:
querystring = {"op":"ALLtrue"}
sentence = "I have turned on the light, sir"
headers = {
'x-m2m-origin': "admin:admin",
'cache-control': "no-cache",
'postman-token': "<PASSWORD>"
}
response = requests.request("POST", url, headers=headers, params=querystring)
print(response.text)
print sentence
mic.say(sentence)
def isValid(text):
return bool(re.search(r'\blight\b', text, re.IGNORECASE))
| 2.21875 | 2 |
plots/model_explorer/app_hooks.py | ZviBaratz/pylabber | 3 | 12791698 | <filename>plots/model_explorer/app_hooks.py
from .setup import load_django
def on_server_loaded(server_context):
load_django()
| 1.367188 | 1 |
routely/__init__.py | jhags/routely | 1 | 12791699 | <filename>routely/__init__.py
from .routely import Route | 1.304688 | 1 |
nc/migrations/0035_portfolio_rawportfoliodata.py | kfarrelly/nucleo | 1 | 12791700 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-16 15:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('nc', '0034_auto_20180710_2209'),
]
operations = [
migrations.CreateModel(
name='Portfolio',
fields=[
('profile', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='portfolio', serialize=False, to='nc.Profile')),
],
),
migrations.CreateModel(
name='RawPortfolioData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('value', models.FloatField(default=-1.0)),
('portfolio', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rawdata', to='nc.Portfolio')),
],
options={
'ordering': ('-created',),
'abstract': False,
'get_latest_by': 'created',
},
),
]
| 1.6875 | 2 |
src/scs_core/sys/modem.py | south-coast-science/scs_core | 3 | 12791701 | """
Created on 24 Mar 2021
@author: <NAME> (<EMAIL>)
Modem
-----
modem.generic.device-identifier : 3f07553c31ce11715037ac16c24ceddcfb6f7a0b
modem.generic.manufacturer : QUALCOMM INCORPORATED
modem.generic.model : QUECTEL Mobile Broadband Module
modem.generic.revision : EC21EFAR06A01M4G
...
modem.3gpp.imei : 867962041294151
example JSON:
{"id": "3f07553c31ce11715037ac16c24ceddcfb6f7a0b", "imei": "867962041294151", "mfr": "QUALCOMM INCORPORATED",
"model": "QUECTEL Mobile Broadband Module", "rev": "EC21EFAR06A01M4G"}
ModemConnection
---------------
modem.generic.state : connected
modem.generic.state-failed-reason : --
modem.generic.signal-quality.value : 67
modem.generic.signal-quality.recent : yes
example JSON:
{"state": "connected", "signal": {"quality": 67, "recent": true}}
SIM (Subscriber Identity Module)
--------------------------------
sim.dbus-path : /org/freedesktop/ModemManager1/SIM/0
sim.properties.imsi : 234104886708667
sim.properties.iccid : 8944110068256270054
sim.properties.operator-code : 23410
sim.properties.operator-name : giffgaff
sim.properties.emergency-numbers.length : 2
sim.properties.emergency-numbers.value[1] : 999
sim.properties.emergency-numbers.value[2] : 00112
example JSON:
{"imsi": "123", "iccid": "456", "operator-code": "789 012", "operator-name": "<NAME>"}
"""
import re
from collections import OrderedDict
from scs_core.data.datum import Datum
from scs_core.data.json import JSONable
# --------------------------------------------------------------------------------------------------------------------
class ModemList(object):
"""
modem-list.value[1] : /org/freedesktop/ModemManager1/Modem/0
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_mmcli(cls, lines):
modems = []
for line in lines:
match = re.match(r'modem-list.value\[[\d]+]\s+:\s+([\S]+)', line)
if match:
modems.append(match.groups()[0])
return cls(modems)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, modems):
"""
Constructor
"""
self.__modems = modems # array of string
def __len__(self):
return len(self.__modems)
# ----------------------------------------------------------------------------------------------------------------
def modem(self, index):
return self.__modems[index]
def number(self, index):
pieces = self.modem(index).split('/')
return pieces[-1]
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "ModemList:{modems:%s}" % self.__modems
# --------------------------------------------------------------------------------------------------------------------
class Modem(JSONable):
"""
modem.generic.device-identifier : 3f07553c31ce11715037ac16c24ceddcfb6f7a0b
modem.generic.manufacturer : QUALCOMM INCORPORATED
modem.generic.model : QUECTEL Mobile Broadband Module
modem.generic.revision : EC21EFAR06A01M4G
...
modem.3gpp.imei : 867962041294151
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict):
if not jdict:
return None
id = jdict.get('id')
imei = jdict.get('imei')
mfr = jdict.get('mfr')
model = jdict.get('model')
rev = jdict.get('rev')
return cls(id, imei, mfr, model, rev)
@classmethod
def construct_from_mmcli(cls, lines):
id = None
imei = None
mfr = None
model = None
rev = None
for line in lines:
match = re.match(r'modem\.generic\.device-identifier\s+:\s+(\S+)', line)
if match:
id = match.groups()[0]
continue
match = re.match(r'.*\.imei\s+:\s+(\d+)', line)
if match:
imei = match.groups()[0]
continue
match = re.match(r'modem\.generic\.manufacturer\s+:\s+(\S.*\S)', line)
if match:
mfr = match.groups()[0]
continue
match = re.match(r'modem\.generic\.model\s+:\s+(\S.*\S)', line)
if match:
model = match.groups()[0]
continue
match = re.match(r'modem\.generic\.revision\s+:\s+(\S+)', line)
if match:
rev = match.groups()[0]
continue
return cls(id, imei, mfr, model, rev)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, id, imei, mfr, model, rev):
"""
Constructor
"""
self.__id = id # string
self.__imei = imei # string
self.__mfr = mfr # string
self.__model = model # string
self.__rev = rev # string
def __eq__(self, other):
try:
return self.id == other.id and self.imei == other.imei and self.mfr == other.mfr and \
self.model == other.model and self.rev == other.rev
except (TypeError, AttributeError):
return False
# ----------------------------------------------------------------------------------------------------------------
@property
def id(self):
return self.__id
@property
def imei(self):
return self.__imei
@property
def mfr(self):
return self.__mfr
@property
def model(self):
return self.__model
@property
def rev(self):
return self.__rev
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['id'] = self.id
jdict['imei'] = self.imei
jdict['mfr'] = self.mfr
jdict['model'] = self.model
jdict['rev'] = self.rev
return jdict
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Modem:{id:%s, imei:%s, mfr:%s, model:%s, rev:%s}" % \
(self.id, self.imei, self.mfr, self.model, self.rev)
# --------------------------------------------------------------------------------------------------------------------
class ModemConnection(JSONable):
"""
modem.generic.state : connected
modem.generic.state-failed-reason : --
modem.generic.signal-quality.value : 67
modem.generic.signal-quality.recent : yes
"""
UNAVAILABLE_STATE = "unavailable"
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict):
if not jdict:
return None
state = jdict.get('state')
failure = jdict.get('failure')
signal = Signal.construct_from_jdict(jdict.get('signal'))
return cls(state, failure, signal)
@classmethod
def construct_from_mmcli(cls, lines):
state = None
failure = None
quality = None
recent = None
for line in lines:
match = re.match(r'modem\.generic\.state\s+:\s+([a-z]+)', line)
if match:
state = match.groups()[0]
continue
match = re.match(r'modem\.generic\.state-failed-reason\s+:\s+(\S.*\S)', line)
if match:
reported_failure = match.groups()[0]
failure = None if reported_failure == '--' else reported_failure
continue
match = re.match(r'modem\.generic\.signal-quality\.value\s+:\s+([\d]+)', line)
if match:
quality = match.groups()[0]
continue
match = re.match(r'modem\.generic\.signal-quality\.recent\s+:\s+([a-z]+)', line)
if match:
recent = match.groups()[0] == 'yes'
continue
return cls(state, failure, Signal(quality, recent))
@classmethod
def null_datum(cls):
return cls(cls.UNAVAILABLE_STATE, None, Signal.null_datum())
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, state, failure, signal):
"""
Constructor
"""
self.__state = state # string
self.__failure = failure # string
self.__signal = signal # Signal
# ----------------------------------------------------------------------------------------------------------------
@property
def state(self):
return self.__state
@property
def failure(self):
return self.__failure
@property
def signal(self):
return self.__signal
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['state'] = self.state
if self.failure is not None:
jdict['failure'] = self.failure
jdict['signal'] = self.signal
return jdict
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "ModemConnection:{state:%s, failure:%s, signal:%s}" % (self.state, self.failure, self.signal)
# --------------------------------------------------------------------------------------------------------------------
class Signal(JSONable):
"""
modem.generic.signal-quality.value : 67
modem.generic.signal-quality.recent : yes
"""
__SIGNIFICANT_QUALITY_DIFFERENCE = 10
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict):
if not jdict:
return None
quality = jdict.get('quality')
recent = jdict.get('recent')
return cls(quality, recent)
@classmethod
def null_datum(cls):
return cls(None, None)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, quality, recent):
"""
Constructor
"""
self.__quality = Datum.int(quality) # int
self.__recent = recent # bool
# ----------------------------------------------------------------------------------------------------------------
@property
def quality(self):
return self.__quality
@property
def recent(self):
return self.__recent
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['quality'] = self.quality
jdict['recent'] = self.recent
return jdict
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Signal:{quality:%s, recent:%s}" % (self.quality, self.recent)
# --------------------------------------------------------------------------------------------------------------------
class SIMList(object):
"""
modem.generic.sim : /org/freedesktop/ModemManager1/SIM/0
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_mmcli(cls, lines):
sims = []
for line in lines:
match = re.match(r'modem\.generic\.sim\s+:\s+([\S]+)', line)
if match:
sims.append(match.groups()[0])
return cls(sims)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, sims):
"""
Constructor
"""
self.__sims = sims # array of string
def __len__(self):
return len(self.__sims)
# ----------------------------------------------------------------------------------------------------------------
def sim(self, index):
return self.__sims[index]
def number(self, index):
pieces = self.sim(index).split('/')
return pieces[-1]
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "SIMList:{sims:%s}" % self.__sims
# --------------------------------------------------------------------------------------------------------------------
class SIM(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict):
if not jdict:
return None
imsi = jdict.get('imsi')
iccid = jdict.get('iccid')
operator_code = jdict.get('operator-code')
operator_name = jdict.get('operator-name')
return cls(imsi, iccid, operator_code, operator_name)
@classmethod
def construct_from_mmcli(cls, lines):
imsi = None
iccid = None
operator_code = None
operator_name = None
for line in lines:
match = re.match(r'sim\.properties\.imsi\s+:\s+([\d]+)', line)
if match:
imsi = match.groups()[0]
continue
match = re.match(r'sim\.properties\.iccid\s+:\s+([\d]+)', line)
if match:
iccid = match.groups()[0]
continue
match = re.match(r'sim\.properties\.operator-code\s+:\s+([\d]+)', line)
if match:
operator_code = match.groups()[0]
continue
match = re.match(r'sim\.properties\.operator-name\s+:\s+(\S.*)', line)
if match:
reported_name = match.groups()[0].strip()
operator_name = None if reported_name == '--' else reported_name
return cls(imsi, iccid, operator_code, operator_name)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, imsi, iccid, operator_code, operator_name):
"""
Constructor
"""
self.__imsi = imsi # numeric string
self.__iccid = iccid # numeric string
self.__operator_code = operator_code # string
self.__operator_name = operator_name # string
def __eq__(self, other):
try:
return self.imsi == other.imsi and self.iccid == other.iccid and \
self.operator_code == other.operator_code and self.operator_name == other.operator_name
except (TypeError, AttributeError):
return False
# ----------------------------------------------------------------------------------------------------------------
@property
def imsi(self):
return self.__imsi
@property
def iccid(self):
return self.__iccid
@property
def operator_code(self):
return self.__operator_code
@property
def operator_name(self):
return self.__operator_name
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['imsi'] = str(self.imsi)
jdict['iccid'] = str(self.iccid)
jdict['operator-code'] = self.operator_code
jdict['operator-name'] = self.operator_name
return jdict
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "SIM:{imsi:%s, iccid:%s, operator_code:%s, operator_name:%s}" % \
(self.imsi, self.iccid, self.operator_code, self.operator_name)
| 1.1875 | 1 |
build/lib.linux-armv7l-2.7/bibliopixel/drivers/WS2801.py | sethshill/final | 6 | 12791702 | <reponame>sethshill/final<gh_stars>1-10
from spi_driver_base import DriverSPIBase, ChannelOrder
import os
from .. import gamma
class DriverWS2801(DriverSPIBase):
"""Main driver for WS2801 based LED strips on devices like the Raspberry Pi and BeagleBone"""
def __init__(self, num, c_order=ChannelOrder.RGB, use_py_spi=True, dev="/dev/spidev0.0", SPISpeed=1):
if SPISpeed > 1 or SPISpeed <= 0:
raise ValueError(
"WS2801 requires an SPI speed no greater than 1MHz or SPI speed was set <= 0")
super(DriverWS2801, self).__init__(num, c_order=c_order,
use_py_spi=use_py_spi, dev=dev, SPISpeed=SPISpeed)
self.gamma = gamma.WS2801
# WS2801 requires gamma correction so we run it through gamma as the
# channels are ordered
def _fixData(self, data):
for a, b in enumerate(self.c_order):
self._buf[a:self.numLEDs * 3:3] = [self.gamma[v]
for v in data[b::3]]
MANIFEST = [
{
"id": "WS2801",
"class": DriverWS2801,
"type": "driver",
"display": "WS2801 (SPI Native)",
"desc": "Interface with WS2801 strips over a native SPI port (Pi, BeagleBone, etc.)",
"params": [{
"id": "num",
"label": "# Pixels",
"type": "int",
"default": 1,
"min": 1,
"help": "Total pixels in display."
}, {
"id": "c_order",
"label": "Channel Order",
"type": "combo",
"options": {
0: "RGB",
1: "RBG",
2: "GRB",
3: "GBR",
4: "BRG",
5: "BGR"
},
"options_map": [
[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 0, 1],
[2, 1, 0]
],
"default": 0
}, {
"id": "dev",
"label": "SPI Device Path",
"type": "str",
"default": "/dev/spidev0.0",
}, {
"id": "use_py_spi",
"label": "Use PySPI",
"type": "bool",
"default": True,
"group": "Advanced"
}]
}
]
| 2.640625 | 3 |
migrations/versions/004_Create_User_table.py | LCBRU/batch_demographics | 0 | 12791703 | <gh_stars>0
from sqlalchemy import (
MetaData,
Table,
Column,
Integer,
NVARCHAR,
DateTime,
Boolean,
DateTime,
)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
user = Table(
"user",
meta,
Column("id", Integer, primary_key=True),
Column("email", NVARCHAR(255), nullable=False, unique=True),
Column("password", NVARCHAR(255), nullable=False),
Column("first_name", NVARCHAR(255)),
Column("last_name", NVARCHAR(255)),
Column("active", Boolean()),
Column("confirmed_at", DateTime()),
Column("last_login_at", DateTime()),
Column("current_login_at", DateTime()),
Column("last_name", NVARCHAR(255)),
Column("last_login_ip", NVARCHAR(255)),
Column("current_login_ip", NVARCHAR(255)),
Column("login_count", Integer()),
Column("created_date", DateTime(), nullable=False),
)
user.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
user = Table("user", meta, autoload=True)
user.drop()
| 2.359375 | 2 |
marsDemonstrator/__init__.py | tum-fml/marsDemonstrator | 1 | 12791704 | <reponame>tum-fml/marsDemonstrator
from .designMethods import MARSInput, ENComputation, LoadCollectivePrediction, load_all_gps, InputFileError # noqa: F401
from .main_app import MainApplication, ResultWriter # noqa: F401
__all__ = ["MARSInput", "Computation", "LoadCollectivePrediction", "MainApplication", "ResultWriter"]
| 1.398438 | 1 |
pydnameth/model/tree.py | AaronBlare/pydnameth | 2 | 12791705 | <filename>pydnameth/model/tree.py
from anytree import PostOrderIter
from pydnameth.infrastucture.save.info import save_info
from pydnameth.model.context import Context
import hashlib
from anytree.exporter import JsonExporter
def calc_tree(root):
for node in PostOrderIter(root):
config = node.config
configs_child = [node_child.config for node_child in node.children]
context = Context(config)
context.pipeline(config, configs_child)
def build_tree(root):
for node in PostOrderIter(root):
node_status = node.config.is_root
node.config.is_root = True
node.name = str(node.config)
exporter = JsonExporter(sort_keys=True)
node_json = exporter.export(node).encode('utf-8')
hash = hashlib.md5(node_json).hexdigest()
node.config.set_hash(hash)
if node.config.is_run:
save_info(node)
node.config.is_root = node_status
node.name = str(node.config)
| 2.375 | 2 |
B3Analyzer/utilities.py | mauromatsudo/brazilian-stocks-analyzer | 0 | 12791706 | <filename>B3Analyzer/utilities.py
from requests import get
from bs4 import BeautifulSoup
import pandas as pd
class Stock:
def __init__(self, ticker):
self._ticker = ticker
self._url = f'https://statusinvest.com.br/acoes/{self._ticker}'
def get_all_indicators(self):
html = get(self._url).text
soup = BeautifulSoup(html, 'html.parser')
html_table = soup.select("div.width-auto:nth-child(2)")[0] # select the exactly table with the fundamental indicators
indicators = [element.text for element in html_table.find_all('h3')]
valuations = [element.text for element in html_table.find_all('strong')]
if len(indicators) != len(valuations):
# the h3 tags show the indicador name on site, however that are some duplicates h3 tags that are not showm in the front end, but exists and
# refers to the same indicador, to avoid errors, the
shown = ('P/VP', 'P/L', 'P/Ebitda', 'P/Ebit', 'P/Ativo', 'EV/Ebitda', 'EV/EBIT', 'PSR', 'P/Cap.Giro', 'P/Ativo Circ Liq', 'Margem Bruta',
'Margem Ebitda', 'Margem Ebit', 'Margem Líquida', 'Giro Ativos', 'ROE', 'ROA', 'ROIC', 'LPA', 'VPA', 'Dívida Líquida / Patrimônio',
'Dívida Líquida / EBITDA', 'Dívida Líquida / EBIT', 'Patrimônio / Ativos', 'Passivos / Ativos', 'Liquidez Corrente', 'CAGR Receitas 5 Anos',
'CAGR Lucros 5 Anos')
indicators = [element for element in indicators if (element in shown) == True]
for index in range(len(valuations)):
value = valuations[index]
if '%' in value:
value = value[:-1]
try:
value = value.replace(',', '.')
value = float(value)
except ValueError:
value = 'Not avaiable'
valuations[index] = value
fundamental_indicators = dict(zip(indicators, valuations))
return fundamental_indicators
class Firm(Stock):
def __repr__(self):
return f'BrazilianStock object <{self._ticker.upper()}>'
@property
def profit_indicators(super):
indicators = ('ROE', 'ROIC', 'Margem Ebitda', 'Margem Líquida')
return {key: values for (key, values) in super.get_all_indicators().items() if key in indicators}
@property
def price_indicators(self):
indicators = ('P/VP', 'P/L', 'P/Ativo')
return {key:values for (key, values) in self.get_all_indicators().items() if key in indicators}
@property
def debt_indicators(self):
indicators = ('Dívida Líquida / Patrimônio', 'Dívida Líquida / EBITDA', 'Passivos / Ativos')
# there is a pŕoblem with this indicators, because the html source get more h3 tags here, and some
# indicadors are not the same
return {key: values for (key, values) in self.get_all_indicators().items() if key in indicators}
class B3:
def __repr__(self):
return 'Brazilian Trader object'
def __init__(self):
self._firms = pd.read_excel('data/B3_list.xlsx')
@property
def overall_report(self):
return self._firms
@property
def companies_list(self):
return tuple(self._firms['Ticker'])
def get_by_industry(self, industry):
return tuple(self._firms[self._firms['Industry'] == industry]['Ticker'])
class Analyzer:
def __repr__(self):
return f'FundamentalAnalyzer object'
def __init__(self, ticker):
self._ticker = ticker
self._points = 0
self._basic_fundamentals = {
'price_indicators': {
'P/VP': 3,
'P/L': 20,
'P/Ativo': 2},
'profit_indicators': {
'Margem Ebtida': 15,
'Margem Líquida': 8,
'ROE': 10,
'ROIC': 5},
'debt_indicadors': {
'Dívida Líq/Patrim': 1,
'Dívida Líq/EBITDA': 3,
'Passivos / Ativos': 1}}
def analyze_metrics(self, indicators):
chosen_metrics = ('ROE', 'ROIC', 'Margem Ebitda', 'Margem Líquida', 'P/VP', 'P/L', 'P/Ativo',
'Dívida Líquida / Patrimônio', 'Dívida Líquida / EBITDA', 'Passivos / Ativos')
indicators = {key: indicators[key] for key in chosen_metrics}
metrics_df = pd.DataFrame.from_dict(indicators, orient='index', columns=["Current Value"])
for column in ("Min", "Max", "Weigh", " +Points", "-Points"):
metrics_df[column] = pd.Series()
if __name__ == "__main__":
# Testing area
'''request = get('https://statusinvest.com.br/acoes/cvcb3').text
soup = BeautifulSoup(request, 'html.parser')
print(soup.select("div.width-auto:nth-child(2)"))
petr = Data(ticker='petr4')
cvc = Data('cvcb3')
print(cvc.get_all_indicators())
print(cvc.get_all_indicators())
print(petr.profit_indicators)
clas = Stock('cvcb3')
print(clas.get_all_indicators())
print(clas.price_indicators, '\n', clas.profit_indicators, '\n', clas.debt_indicators)'''
print(B3().get_by_industry('Materiais Básicos'))
| 3.078125 | 3 |
virtual/bin/django-admin.py | vinnyotach7/insta-photo | 0 | 12791707 | #!/home/moringaschool/Documents/django projects/insta-moringa/virtual/bin/python3.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 1.070313 | 1 |
pelayanan/apps.py | diaksizz/Adisatya | 0 | 12791708 | from django.apps import AppConfig
class PelayananConfig(AppConfig):
name = 'pelayanan'
| 1.171875 | 1 |
dqn_cartpole.py | subinlab/dqn | 1 | 12791709 | import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
env = gym.make('CartPole-v0').unwrapped
# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
from IPython import display
plt.ion()
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Transition = namedtuple('Transition',('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position+1)%self.capacity
def sample(self, batch_size):
"""Select a random batch of transitions for training"""
return random.sample(self.memory, batch_size)
def __len__(self):
"""Return length of replay memory"""
return len(self.memory)
class Network(object):
def __init__(self, h, w, outputs):
self.convNet = self.convNet(h, w, outputs)
def convNet(h, w, outputs):
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size = 5, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
class DQN(object):
def __init__(self, batch_size=64, discount_rate=0.99, max_episodes=300):
self.env = gym.make('CartPole-v0')
self.obs = self.env.observation_space.shape[0]
self.n_actions = self.env.action_space.n
self.BATCH_SIZE = batch_size
self.DISCOUNT_RATE = discount_rate
self.MAX_EPISODES = max_episodes
self.TARGET_UPDAT_FREQUENCY = 5
self.EPS_START = 0.9
self.EPS_END = 0.05
self.EPS_DECAY = 200
self.main_Qnet = self.Network(obs, n_actions)
self.target_Qnet = self.Network(obs, n_actions)
def plot_durations():
plt.figure(2)
plt.clf()
durations_t = torch.tensor(episode_durations, dtype=torch.float)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
# Take 100 episode averages and plot them too
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
def train(self):
buffer = ReplayMemory()
for episode in range(self.MAX_EPISODES):
e = 1/.((episode/10)+1)
done = False
step_count = 0
state = self.env.reset()
while not done:
state = np.reshape(state, (1,self.obs))
if np.random.rand() < e:
action = self.env.action_space.sample()
else:
action = np.argmax(self.main_Qnet.predict(state))
next_state, reward, done, info = self.env.step(action)
if done:
reward = -1
buffer.push((state, action, reward, next_state))
if len(buffer) > self.BATCH_SIZE:
minibatch = random.sample(buffer, self.BATCH_SIZE)
states = np.vstack([x[0] for x in minibatch])
actions = np.array([x[1] for x in minibatch])
rewards = np.array([x[2] for x in minibatch])
next_states = np.vstack([x[3] for x in minibatch])
Q_target = rewards + self.DISCOUNT_RATE+np.max(self.target_Qnet.forward(next_states), axis=1)
y = self.main_Qnet(states)
y[np.arrange(len(states)), actions] = Q_target
# self.main_Qnet.train_on
if step_count % self.TARGET_UPDAT_FREQUENCY == 0:
# self.target_Qnet
state = next_state
step_count +=1
print("Episode: {} steps: {}".format(episode, step_count))
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Activation
from keras import regularizers
from keras.models import Model
from keras.optimizers import Adam
import numpy as np
# from collections import deque
import random
import gym
# from typing import List
import argparse
class DQN():
def __init__(self, discount=0.99, batch_size = 64, max_episodes = 300):
self.env = gym.make('CartPole-v0')
# self.env.wrappers.Monitor(env, directory="results/", force=True)
self.input_size= self.env.observation_space.shape[0]
self.output_size= self.env.action_space.n
self.DISCOUNT_RATE=discount
self.BATCH_SIZE = batch_size
self.TARGET_UPDATE_FREQUENCY = 5
self.MAX_EPISODES = max_episodes
self.main_dqn = self.build()
self.target_dqn = self.build()
self.main_dqn.compile(optimizer = Adam(), loss ="mean_squared_error")
self.target_dqn.set_weights(self.main_dqn.get_weights())
def build(self, h_size = 16, lr = 0.001):
state = Input(shape=(self.input_size,))
dense1 = Dense(h_size, activation = "relu")(state)
action = Dense(self.output_size, kernel_regularizer=regularizers.l2(0.01))(dense1)
model = Model(state, action)
return model
def train(self):
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
# Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
# detailed explanation). This converts batch-array of Transitions
# to Transition of batch-arrays.
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=device, dtype=torch.uint8)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
Q_value = main_Qnet(state_batch).gather(1, action_batch)
next_state_values = torch.zeros(BATCH_SIZE, device=device)
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
next_Q_values = (next_state_values * DISCOUNT_RATE) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
num_episodes = 50
for i_episode in range(num_episodes):
# Initialize the environment and state
env.reset()
last_screen = get_screen()
current_screen = get_screen()
state = current_screen - last_screen
for t in count():
# Select and perform an action
action = select_action(state)
_, reward, done, _ = env.step(action.item())
reward = torch.tensor([reward], device=device)
# Observe new state
last_screen = current_screen
current_screen = get_screen()
if not done:
next_state = current_screen - last_screen
else:
next_state = None
memory.push(state, action, next_state, reward)
# Move to the next state
state = next_state
optimize_model()
if done:
episode_durations.append(t + 1)
plot_durations()
break
if i_episode % TARGET_UPDATE == 0:
target_net.load_state_dict(policy_net.state_dict())
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-b", "--batch", required=False)
ap.add_argument("-d", "--discount", required=False)
ap.add_argument("-ep", "--max", required=False)
args = vars(ap.parse_args())
dqn = DQN(int(args["batch"]), float(args["discount"]), int(args["max"]))
dqn.train()
print('Complete')
env.render()
env.close()
plt.ioff()
plt.show()
| 2.640625 | 3 |
src/transposer.py | polifonia-project/harmonic-similarity | 0 | 12791710 | import sys
import os
import re, getopt
key_list = [('A',), ('A#', 'Bb'), ('B', 'Cb'), ('C',), ('C#', 'Db'), ('D',),
('D#', 'Eb'), ('E',), ('F',), ('F#', 'Gb'), ('G',), ('G#', 'Ab')]
sharp_flat = ['#', 'b']
sharp_flat_preferences = {
'A': '#',
'A#': 'b',
'Bb': 'b',
'B': '#',
'C': 'b',
'C#': 'b',
'Db': 'b',
'D': '#',
'D#': 'b',
'Eb': 'b',
'E': '#',
'F': 'b',
'F#': '#',
'Gb': '#',
'G': '#',
'G#': 'b',
'Ab': 'b',
}
key_regex = re.compile(r"[ABCDEFG][#b]?")
def get_index_from_key(source_key):
"""Gets the internal index of a key
>>> get_index_from_key('Bb')
1
"""
for key_names in key_list:
if source_key in key_names:
return key_list.index(key_names)
raise Exception("Invalid key: %s" % source_key)
def get_key_from_index(index, to_key):
"""Gets the key at the given internal index.
Sharp or flat depends on the target key.
>>> get_key_from_index(1, 'Eb')
'Bb'
"""
key_names = key_list[index % len(key_list)]
if len(key_names) > 1:
sharp_or_flat = sharp_flat.index(sharp_flat_preferences[to_key])
return key_names[sharp_or_flat]
return key_names[0]
def get_transponation_steps(source_key, target_key):
"""Gets the number of half tones to transpose
>>> get_transponation_steps('D', 'C')
-2
"""
source_index = get_index_from_key(source_key)
target_index = get_index_from_key(target_key)
return target_index - source_index
def transpose_file(file_name, from_key, to_key):
"""Transposes a file from a key to another.
>>> transpose_file('example.txt', 'D', 'E')
'Rocking start, jazzy ending\\n| E | A B | Cm7#11/D# |\\n'
"""
direction = get_transponation_steps(from_key, to_key)
result = ''
try:
for line in open(file_name):
result += transpose_line(line, direction, to_key)
return result
except IOError:
print("Invalid filename!")
usage()
def transpose_line(source_line, direction, to_key):
"""Transposes a line a number of keys if it starts with a pipe. Examples:
>>> transpose_line('| A | A# | Bb | C#m7/F# |', -2, 'C')
'| G | Ab | Ab | Bm7/E |'
Different keys will be sharp or flat depending on target key.
>>> transpose_line('| A | A# | Bb | C#m7/F# |', -2, 'D')
'| G | G# | G# | Bm7/E |'
It will use the more common key if sharp/flat, for example F# instead of Gb.
>>> transpose_line('| Gb |', 0, 'Gb')
'| F# |'
Lines not starting with pipe will not be transposed
>>> transpose_line('A | Bb |', -2, 'C')
'A | Bb |'
"""
if source_line[0] != '|':
return source_line
source_chords = key_regex.findall(source_line)
return recursive_line_transpose(source_line, source_chords, direction, to_key)
def recursive_line_transpose(source_line, source_chords, direction, to_key):
if not source_chords or not source_line:
return source_line
source_chord = source_chords.pop(0)
chord_index = source_line.find(source_chord)
after_chord_index = chord_index + len(source_chord)
return source_line[:chord_index] + \
transpose(source_chord, direction, to_key) + \
recursive_line_transpose(source_line[after_chord_index:], source_chords, direction, to_key)
def transpose(source_chord, direction, to_key):
"""Transposes a chord a number of half tones.
Sharp or flat depends on target key.
>>> transpose('C', 3, 'Bb')
'Eb'
"""
source_index = get_index_from_key(source_chord)
return get_key_from_index(source_index + direction, to_key)
def usage():
print()
'Usage:'
print()
'%s --from=Eb --to=F# input_filename' % os.path.basename(__file__)
sys.exit(2)
def main():
from_key = 'C'
to_key = 'C'
file_name = None
try:
options, arguments = getopt.getopt(sys.argv[1:], 'f:t:', ['from=', 'to=', 'doctest'])
except getopt.GetoptError as err:
print(str(err), usage(), sys.exit(2))
for option, value in options:
if option in ('-f', '--from'):
from_key = value
elif option in ('-t', '--to'):
to_key = value
elif option == '--doctest':
import doctest
doctest.testmod()
exit()
else:
usage()
if arguments:
file_name = arguments[0]
else:
usage()
result = transpose_file(file_name, from_key, to_key)
print("Result (%s -> %s):" % (from_key, to_key))
print(result)
if __name__ == '__main__':
print(transpose_line('|Eb', 2, 'C'))
| 2.828125 | 3 |
web/views/blog.py | aHugues/blog | 0 | 12791711 | <reponame>aHugues/blog
from flask import Blueprint
from flask import render_template
from flask import request
from flask import jsonify
from ..services import ArticlesService
blog_views = Blueprint('blog_views', __name__)
articles_service = ArticlesService()
@blog_views.route('/blog')
def home_page():
articles = articles_service.listArticles()
return render_template('blog.html',
articles=articles,
current_page="blog",
)
@blog_views.route('/api/articles', methods=["GET", "POST"])
def display_articles_list():
if request.method == 'GET':
articles = articles_service.listArticles()
return jsonify(articles)
elif request.method == 'POST':
article_title = request.json['title']
article_content = request.json['content']
articles_service.addArticle(article_title, article_content)
return "ok", 200 | 2.859375 | 3 |
worker/worker.py | GeorgianBadita/remote-code-execution-engine | 0 | 12791712 | import logging
import subprocess
from typing import Optional
from celery import Celery
from celery.utils.log import get_logger
from code_execution.code_execution import CodeExcution
from utils import generate_random_file
tmp_dir_path = '/worker/tmp'
compiled_dir_path = '/worker/tmp/compiled_files'
# Create the celery app and get the logger
celery_app = Celery('code-executions-tasks',
broker='pyamqp://guest@rabbit//', backend='amqp://guest@rabbit//')
# Add CELERY_ACKS_LATE in order to wait for infinite loop code executions
# celery_app.conf.update(
# CELERY_ACKS_LATE=True
# )
logger = get_logger(__name__)
@celery_app.task
def execute_code(language: str, code: str, submission: bool = False, timeout: Optional[float] = 10) -> dict:
"""
Task for code execution
@param language: code programming language
@param code: code to be executed
@param submission: flag which tells if the code to be executed is a submission or a normal execution
@param timeout: maximum time the code is allowed to run
@return: dict containgin execution results
"""
logger.info("Starting code execution")
in_file_path = (f"{tmp_dir_path}/in_files/{generate_random_file()}."
f"{CodeExcution.get_lang_extension(language)}")
compiled_file = f'{compiled_dir_path}/{generate_random_file()}.out'
command_to_execute_code = CodeExcution.provide_code_execution_command(
in_file_path, language, compiled_file, submission)
default_dict = {
"has_error": False,
"out_of_resources": False,
"exit_code": 0,
"out_of_time": False,
"raw_output": ""
}
try:
code_output = CodeExcution.execute_code(
command_to_execute_code, in_file_path, compiled_file, code, timeout)
logging.info(f"Code Returned, result: {code_output}")
default_dict["raw_output"] = code_output
except subprocess.CalledProcessError as cpe:
logging.debug(f"Code execution was errored: {cpe}")
default_dict["has_error"] = True
default_dict["exit_code"] = cpe.returncode
default_dict["raw_output"] = cpe.output
except subprocess.TimeoutExpired as te:
logger.debug(f"Code timeout after: {te.timeout}")
default_dict["has_error"] = True
default_dict["exit_code"] = 124
default_dict["out_of_time"] = True
default_dict["raw_output"] = "Time Limit Exceeded"
return default_dict
| 2.484375 | 2 |
databroker/databroker.py | EliasKal/ai4eu_pipeline_visualization | 0 | 12791713 | #imports
import haversine as hs
import pandas as pd
import numpy as np
import random
import time
from concurrent import futures
import grpc
import databroker_pb2_grpc
import databroker_pb2
port = 8061
class Databroker(databroker_pb2_grpc.DatabrokerServicer):
def __init__(self):
self.current_row = 0
#load required datasets
self.no2_data = pd.read_csv('./data/no2_testset.csv')
self.pm10_data = pd.read_csv('./data/pm10_testset.csv')
self.pm25_data = pd.read_csv('./data/pm25_testset.csv')
self.gps_data = pd.read_csv('./data/sensor_gps.csv')
self.sensor_gps = pd.read_csv('./data/low_cost_sensors.csv')
def get_next(self, request, context):
response = databroker_pb2.Features()
if self.current_row >= self.no2_data.shape[0]:
context.set_code(grpc.StatusCode.NOT_FOUND)
context.set_details("all data has been processed")
else:
#load 1 row from each dataset and convert to numpy
# create response format dataframe
no2 = pd.DataFrame(data=None, columns=self.no2_data.columns)
pm10 = pd.DataFrame(data=None, columns=self.pm10_data.columns)
pm25 = pd.DataFrame(data=None, columns=self.pm25_data.columns)
for sensor in range(self.sensor_gps.shape[0]):
id = self.sensor_gps.deviceID[sensor]
counter=1
for i in range(23,0,-1):
lat1 = np.rad2deg(self.sensor_gps.iloc[sensor,4])
lon1 = np.rad2deg(self.sensor_gps.iloc[sensor,5])
lat2 = self.gps_data.iloc[0,i*2+1]
lon2 = self.gps_data.iloc[0,i*2]
distance = hs.haversine((lat2, lon2), (lat1, lon1))
self.no2_data.iloc[self.current_row,counter] = distance
self.pm10_data.iloc[self.current_row,counter] = distance
self.pm25_data.iloc[self.current_row,counter] = distance
counter +=1
no2 = no2.append(self.no2_data.iloc[self.current_row,:])
pm10 = pm10.append(self.pm10_data.iloc[self.current_row,:])
pm25 = pm25.append(self.pm25_data.iloc[self.current_row,:])
no2_input= no2.iloc[:,1:].to_numpy()
pm10_input= pm10.iloc[:,1:].to_numpy()
pm25_input= pm25.iloc[:,1:].to_numpy()
no2_input = np.ndarray.tobytes(no2_input)
pm10_input = np.ndarray.tobytes(pm10_input)
pm25_input = np.ndarray.tobytes(pm25_input)
#add output to response
response.no2_data = no2_input
response.pm10_data = pm10_input
response.pm25_data = pm25_input
#add 1 to row counter(maybe we could make it cyclical with mod later)
self.current_row += 1
return response
#host server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
databroker_pb2_grpc.add_DatabrokerServicer_to_server(Databroker(), server)
print("Starting server. Listening on port : " + str(port))
server.add_insecure_port("[::]:{}".format(port))
server.start()
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0)
| 2.546875 | 3 |
pyrallis/wrappers/dataclass_wrapper.py | eladrich/pyrallis | 22 | 12791714 | <reponame>eladrich/pyrallis<filename>pyrallis/wrappers/dataclass_wrapper.py<gh_stars>10-100
import argparse
import dataclasses
from dataclasses import _MISSING_TYPE
from logging import getLogger
from typing import Dict, List, Optional, Type, Union, cast
from pyrallis.utils import Dataclass
from . import docstring
from .field_wrapper import FieldWrapper
from .wrapper import Wrapper
from .. import utils
logger = getLogger(__name__)
class DataclassWrapper(Wrapper[Dataclass]):
def __init__(
self,
dataclass: Type[Dataclass],
name: Optional[str] = None,
default: Union[Dataclass, Dict] = None,
prefix: str = "",
parent: "DataclassWrapper" = None,
_field: dataclasses.Field = None,
field_wrapper_class: Type[FieldWrapper] = FieldWrapper
):
# super().__init__(dataclass, name)
self.dataclass = dataclass
self._name = name
self.default = default
self.prefix = prefix
self.fields: List[FieldWrapper] = []
self._required: bool = False
self._explicit: bool = False
self._dest: str = ""
self._children: List[DataclassWrapper] = []
self._parent = parent
# the field of the parent, which contains this child dataclass.
self._field = _field
# the default values
self._defaults: List[Dataclass] = []
if default:
self.defaults = [default]
self.optional: bool = False
for field in dataclasses.fields(self.dataclass):
if not field.init:
continue
elif utils.is_tuple_or_list_of_dataclasses(field.type):
raise NotImplementedError(
f"Field {field.name} is of type {field.type}, which isn't "
f"supported yet. (container of a dataclass type)"
)
elif dataclasses.is_dataclass(field.type):
# handle a nested dataclass attribute
dataclass, name = field.type, field.name
child_wrapper = DataclassWrapper(
dataclass, name, parent=self, _field=field
)
self._children.append(child_wrapper)
elif utils.contains_dataclass_type_arg(field.type):
dataclass = utils.get_dataclass_type_arg(field.type)
name = field.name
child_wrapper = DataclassWrapper(
dataclass, name, parent=self, _field=field, default=None
)
child_wrapper.required = False
child_wrapper.optional = True
self._children.append(child_wrapper)
else:
# a normal attribute
field_wrapper = field_wrapper_class(field, parent=self, prefix=self.prefix)
logger.debug(
f"wrapped field at {field_wrapper.dest} has a default value of {field_wrapper.default}"
)
self.fields.append(field_wrapper)
logger.debug(
f"The dataclass at attribute {self.dest} has default values: {self.defaults}"
)
def add_arguments(self, parser: argparse.ArgumentParser):
from pyrallis.argparsing import ArgumentParser
parser = cast(ArgumentParser, parser)
option_fields = [field for field in self.fields if field.arg_options]
if len(option_fields) > 0:
# Only show groups with parameters
group = parser.add_argument_group(
title=self.title, description=self.description
)
for wrapped_field in option_fields:
logger.debug(
f"Arg options for field '{wrapped_field.name}': {wrapped_field.arg_options}"
)
group.add_argument(
*wrapped_field.option_strings, **wrapped_field.arg_options
)
@property
def name(self) -> str:
return self._name
@property
def parent(self) -> Optional["DataclassWrapper"]:
return self._parent
@property
def defaults(self) -> List[Dataclass]:
if self._defaults:
return self._defaults
if self._field is None:
return []
assert self.parent is not None
if self.parent.defaults:
self._defaults = []
for default in self.parent.defaults:
if default is None:
default = None
else:
default = getattr(default, self.name)
self._defaults.append(default)
else:
try:
default_field_value = utils.default_value(self._field)
except TypeError as e:
# utils.default_value tries to construct the field to get default value and might fail
# if the field has some required arguments
logger.debug(
f"Could not get default value for field '{self._field.name}'\n\tUnderlying Error: {e}")
default_field_value = dataclasses.MISSING
if isinstance(default_field_value, _MISSING_TYPE):
self._defaults = []
else:
self._defaults = [default_field_value]
return self._defaults
@defaults.setter
def defaults(self, value: List[Dataclass]):
self._defaults = value
@property
def title(self) -> str:
title = self.dataclass.__qualname__
if self.dest is not None: # Show name if exists
title += f" ['{self.dest}']"
return title
@property
def description(self) -> str:
if self.parent and self._field:
doc = docstring.get_attribute_docstring(
self.parent.dataclass, self._field.name
)
if doc is not None:
if doc.docstring_below:
return doc.docstring_below
elif doc.comment_above:
return doc.comment_above
elif doc.comment_inline:
return doc.comment_inline
class_doc = self.dataclass.__doc__ or ""
if class_doc.startswith(f'{self.dataclass.__name__}('):
return "" # The base dataclass doc looks confusing, remove it
return class_doc
@property
def required(self) -> bool:
return self._required
@required.setter
def required(self, value: bool):
self._required = value
for field in self.fields:
field.required = value
for child_wrapper in self._children:
child_wrapper.required = value
@property
def descendants(self):
for child in self._children:
yield child
yield from child.descendants
| 2.25 | 2 |
MCMC_plotting.py | jlindsey1/MappingExoplanets | 0 | 12791715 | <reponame>jlindsey1/MappingExoplanets<filename>MCMC_plotting.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 21:12:41 2017
@author: Jordan
"""
# The following plots several figures from the MCMC. Not all are relevant, but the code has been kept in this single file
# for simplicity.
print "Start..."
# Import modules
import numpy as np
import matplotlib.pyplot as plt
from lmfit.models import SkewedGaussianModel
import matplotlib.ticker as mticker
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# Import MCMC results
with open('hist_values1.txt') as f: hist_values1 = f.read().splitlines()
with open('hist_values2.txt') as f: hist_values2 = f.read().splitlines()
with open('hist_values3.txt') as f: hist_values3 = f.read().splitlines()
with open('hist_values4.txt') as f: hist_values4 = f.read().splitlines()
with open('hist_values5.txt') as f: hist_values5 = f.read().splitlines()
hist_values1=[float(i) for i in hist_values1]
hist_values2=[float(i) for i in hist_values2]
hist_values3=[float(i) for i in hist_values3]
hist_values4=[float(i) for i in hist_values4]
hist_values5=[float(i) for i in hist_values5]
# Double Ttot and Tfull as only half values were used in the MCMC (to simplify maths)
hist_values2=np.array(hist_values2)*2
hist_values5=np.array(hist_values5)*2
include_middle=True
if include_middle==True: inputfile='generated_data1'
if include_middle==False: inputfile='generated_data_nomid'
chi2file=np.genfromtxt(str(inputfile)+'.txt', names=True, delimiter=';',dtype=None)
modeldata1=np.genfromtxt('uniformingress1.txt', names=True, delimiter=';',dtype=None) #Uniform model
modeldata2=np.genfromtxt('uniformegress1.txt', names=True, delimiter=';',dtype=None)
#modeldata1=np.genfromtxt('nolimbingress1.txt', names=True, delimiter=';',dtype=None) #No-limb model
#modeldata2=np.genfromtxt('nolimbgress1.txt', names=True, delimiter=';',dtype=None)
# Import graph specifications
graphspecs=np.genfromtxt('graph_specs.txt', names=True, delimiter=';',dtype=None)
P_total,P_full,P,flux_star,t_occultation,Initial,Length,Nslices=graphspecs['P_total'],graphspecs['P_full'],graphspecs['P'],graphspecs['flux_star'],graphspecs['t_occultation'],graphspecs['Initial'],graphspecs['Length'],graphspecs['Nslices']
print P_total,P_full,P,flux_star,t_occultation,Initial,Length,Nslices
P_total_initial=P_total*2
P_full_initial=P_full*2
Initial_initial=Initial
savefigures=False
sigma_value=35*1e-6 #SD per point
mean=np.mean(hist_values1)
median=np.median(hist_values1)
standard_dev=np.std(hist_values1)
mean2=np.mean(hist_values2)
median2=np.median(hist_values2)
standard_dev2=np.std(hist_values2)
mean3=np.mean(hist_values5)
median3=np.median(hist_values5)
standard_dev3=np.std(hist_values5)
print "mean: ", mean, "SD: ", standard_dev, "Median: ", median
print "mean2: ", mean2, "SD2: ", standard_dev2, "Median2: ", median2
print "mean3: ", mean3, "SD3: ", standard_dev3, "Median3: ", median3
# Defines the model generation function
def generate_model(full,tot,mid,verbose):
Initial=mid
P_full=full
P_total=tot
if verbose==True: print "Details: ", Initial, P_full, P_total, Length
plotrange=np.linspace(-P_total+Initial,-P_full+Initial, num=Nslices)
plotrange2=np.linspace(P_full+Initial,P_total+Initial, num=Nslices)
stepdifference=np.abs(plotrange[0]-plotrange[1])
rangedifference=np.abs(plotrange2[0]-plotrange[-1])
Nsteps_needed=int(round(rangedifference/stepdifference))
plotrange3=np.linspace(plotrange[-1]+stepdifference,plotrange2[0]-stepdifference,num=Nsteps_needed)
uniform_curve_x,uniform_curve_y=[],[]
total_amount = np.sum(modeldata1['bin_values'])
for i in range(Nslices):
total_amount = total_amount - modeldata1['bin_values'][i]
fractional_flux = (total_amount+flux_star)/(flux_star)
uniform_curve_x.append(plotrange[i])
uniform_curve_y.append(fractional_flux)
if include_middle==True:
for i in range(len(plotrange3)):
uniform_curve_x.append(plotrange3[i])
uniform_curve_y.append(1.)
total_amount = 0
for i in range(Nslices):
total_amount = total_amount + modeldata2['bin_values'][Nslices-i-1]
fractional_flux = (total_amount+flux_star)/(flux_star)
uniform_curve_x.append(plotrange2[i])
uniform_curve_y.append(fractional_flux)
maxvalue=np.max(uniform_curve_y)
uniform_curve_x.append(1)
uniform_curve_y.append(maxvalue)
uniform_curve_x.insert(0,0)
uniform_curve_y.insert(0,maxvalue)
return uniform_curve_x,uniform_curve_y
interpolation_datax,interpolation_dataf=generate_model(0.00730,0.0080,0.50035,verbose=False)
plt.plot(interpolation_datax,interpolation_dataf)
plt.scatter(chi2file['x_values'],chi2file['flux_values'],c='b',s=8,lw=0)#,zorder=2)
if sigma_value!=0: plt.errorbar(chi2file['x_values'],chi2file['flux_values'],yerr=sigma_value,c='#696969',lw=1,ls='none')
plt.xlim(0.47,0.53)
plt.ylim(np.min(chi2file['flux_values']),np.max(chi2file['flux_values']))
plt.xlabel('Phase')
plt.ylabel('$F(t)/F$')
if savefigures==True: plt.savefig('final-mcmc-lightcurve1.pdf')
plt.show()
heatmap, xedges, yedges = np.histogram2d(hist_values1, hist_values3, bins=(100,100),normed=True)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')
contourplot=ax3.imshow(heatmap.T, extent=extent, origin='lower', cmap='Greys')
ax2.axis('off')
ax1.hist(hist_values1,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step")
ax4.hist(hist_values3,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step", orientation="horizontal")
ax3.axis('tight')
ax3.ticklabel_format(useOffset=False)
#myLocator = mticker.MultipleLocator(0.00)
#ax3.xaxis.set_major_locator(myLocator)
ax3.set_xlabel('Midpoint Phase Position')
ax3.set_ylabel('Chi-Squared Value')
ax1.set_ylabel('PDF')
ax4.set_xlabel('PDF')
ax3.set_xlim(np.min(hist_values1),np.max(hist_values1))
ax3.set_ylim(np.min(hist_values3)*0.95,np.max(hist_values3))
if savefigures==True: plt.savefig('chisquared-corner1.pdf')
plt.show()
plt.hist2d(hist_values1,hist_values3, bins=100)
plt.xlabel('Midpoint Phase Position')
plt.ylabel('Chi-Squared')
if savefigures==True: plt.savefig('chisquared-hist1.pdf')
plt.show()
plt.hist2d(hist_values2,hist_values3, bins=100)
plt.xlabel('Total Duration Phase')
plt.ylabel('Chi-Squared')
if savefigures==True: plt.savefig('chisquared-hist2.pdf')
plt.show()
plt.hist2d(hist_values1,hist_values3, bins=200)
plt.xlabel('Midpoint Phase Position')
plt.ylabel('Chi-Squared')
if savefigures==True: plt.savefig('chisquared-hist3.pdf')
plt.show()
plt.hist2d(hist_values2,hist_values3, bins=200)
plt.xlabel('Total Duration Phase')
plt.ylabel('Chi-Squared')
if savefigures==True: plt.savefig('chisquared-hist4.pdf')
plt.show()
plt.hist2d(hist_values5,hist_values3, bins=200)
plt.xlabel('Full Duration Phase')
plt.ylabel('Chi-Squared')
if savefigures==True: plt.savefig('chisquared-hist5.pdf')
plt.show()
heatmap, xedges, yedges = np.histogram2d(hist_values2, hist_values3, bins=(100,100),normed=True)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')
contourplot=ax3.imshow(heatmap.T, extent=extent, origin='lower', cmap='Greys')
ax2.axis('off')
ax1.hist(hist_values2,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step")
ax4.hist(hist_values3,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step", orientation="horizontal")
ax3.axis('tight')
ax3.ticklabel_format(useOffset=False)
#myLocator = mticker.MultipleLocator(0.00)
#ax3.xaxis.set_major_locator(myLocator)
ax3.set_xlabel('Total Duration Phase')
ax3.set_ylabel('Chi-Squared Value')
ax1.set_ylabel('Marginalised Chi-Squared PDF')
ax4.set_xlabel('Marginalised Chi-Squared PDF')
ax3.set_xlim(np.min(hist_values2),np.max(hist_values2))
ax3.set_ylim(np.min(hist_values3)*0.95,np.max(hist_values3))
if savefigures==True: plt.savefig('chisquared-corner2.pdf')
plt.show()
y,x,_=plt.hist(hist_values1,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step",label="PDF")
plt.axvline(x=Initial_initial,c='k',lw=2,label='Origin')
plt.xlabel('Midpoint Phase Position')
plt.ylabel('Marginalised Chi-Squared PDF')
plt.ylim(0,y.max()*(1.05))
plt.vlines(x=(mean), ymin=0, ymax=y.max()*(1.05), color='g', label='Mean')
plt.vlines(x=(mean-standard_dev), ymin=0, ymax=y.max()*(1.05), color='r', label='$\sigma_-$')
plt.vlines(x=(mean-standard_dev*2), ymin=0, ymax=y.max()*(1.05), color='m', label='$2\sigma_-$')
plt.vlines(x=(mean+standard_dev), ymin=0, ymax=y.max()*(1.05), color='b', label='$\sigma_+$')
plt.vlines(x=(mean+standard_dev*2), ymin=0, ymax=y.max()*(1.05), color='c', label='$2\sigma_+$')
plt.legend()
if savefigures==True: plt.savefig('PDF1-modified.pdf')
plt.show()
n_hist, b_hist, patches_hist = plt.hist(hist_values1,bins=200,normed=1,edgecolor="black",facecolor="black",histtype="step",label="PDF")
plt.hist(hist_values1,bins=200,normed=1,facecolor="black",edgecolor='None',alpha=0.1,label="PDF")
plt.xlabel('Midpoint Phase Position')
plt.ylabel('Normalised PDF')
if savefigures == True: plt.savefig('plottemp.pdf')
bin_max = np.where(n_hist == n_hist.max())
print "Mode:", b_hist[bin_max][0]
### CONFIDENCE INTERVAL SELECTOR: ########################################
bin_heights, bin_borders, _ = n_hist, b_hist, patches_hist
bin_center = bin_borders[:-1] + np.diff(bin_borders) / 2
xvals, yvals = bin_center, bin_heights
model = SkewedGaussianModel()
params = model.guess(yvals, x=xvals)
result = model.fit(yvals, params, x=xvals)
print result.fit_report()
plt.plot(xvals, result.best_fit,c='c',lw=2)
#Mode Finder:
maxval=0
maxvalx=0
for i in range(len(xvals)):
if result.best_fit[i]>maxval:
maxval=result.best_fit[i]
maxvalx=xvals[i]
print "Curve Mode:", maxvalx
area = np.trapz(result.best_fit, x=xvals)#, dx=5)
print "area =", area
summation1=0
summation2=0
prev_highest=[0]
prev_highest_position=[1e9]
i=0
newx1=[]
newy1=[]
newx2=[]
newy2=[]
while i < len(xvals):
position1=result.best_fit[i]
newx1.append(xvals[i])
newy1.append(position1)
summation1=np.trapz(newy1,x=newx1)
found = False
for j in range(len(xvals)):
loc=len(xvals)-1-j
if loc==-1: raise Exception("Array error.")
position2=result.best_fit[loc]
if (position2>=position1) and (found==False) and (xvals[loc]<=prev_highest_position[-1]) and (position2 >= prev_highest[-1]):
if (position2>1e3*position1) and (position1!=0): raise Exception("Corresponding position for probability=({}) not correctly found. E1".format(position1))
found = True
prev_highest.append(position2)
prev_highest_position.append(xvals[loc])
#plt.axvline(xvals[loc],c='m')
if j>=len(n_hist) and found==False:
raise Exception("Corresponding position for probability=({}) not found. E2".format(position1))
if found == True:
newx2.append(xvals[loc])
newy2.append(position2)
break
summation2=np.abs(np.trapz(newy2,x=newx2))
testcondition=1-(summation1+summation2)
if testcondition<0.69:
plt.axvline(Initial_initial,c='r')
plt.axvline(maxvalx,c='k')
plt.axvline(newx1[-1],c='#505050')
plt.axvline(newx2[-1],c='#505050')
print "Lower: ", np.abs(maxvalx-newx1[-1])
print "Upper: ", np.abs(maxvalx-newx2[-1])
break
else: i+=1
#plt.axvline(xvals[i],c='b')
print testcondition
if savefigures == True: plt.savefig('asymmetric1.pdf')
plt.show()
###
y,x,_=plt.hist(hist_values2,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step",label="PDF")
plt.axvline(x=P_total_initial,c='k',lw=2,label='Origin')
plt.xlabel('Total Duration Phase')
plt.ylabel('Marginalised Chi-Squared PDF')
plt.ylim(0,y.max()*(1.05))
plt.vlines(x=(mean2), ymin=0, ymax=y.max()*(1.05), color='g', label='Mean')
plt.vlines(x=(mean2-standard_dev2), ymin=0, ymax=y.max()*(1.05), color='r', label='$\sigma_-$')
plt.vlines(x=(mean2-standard_dev2*2), ymin=0, ymax=y.max()*(1.05), color='m', label='$2\sigma_-$')
plt.vlines(x=(mean2+standard_dev2), ymin=0, ymax=y.max()*(1.05), color='b', label='$\sigma_+$')
plt.vlines(x=(mean2+standard_dev2*2), ymin=0, ymax=y.max()*(1.05), color='c', label='$2\sigma_+$')
plt.legend()
if savefigures==True: plt.savefig('PDF2-modified.pdf')
plt.show()
n_hist, b_hist, patches_hist = plt.hist(hist_values2,bins=200,normed=1,edgecolor="black",facecolor="black",histtype="step",label="PDF")
plt.hist(hist_values2,bins=200,normed=1,facecolor="black",edgecolor='None',alpha=0.1,label="PDF")
plt.xlabel('Total Occultation Duration')
plt.ylabel('Normalised PDF')
if savefigures == True: plt.savefig('plottemp2.pdf')
bin_max = np.where(n_hist == n_hist.max())
print "Mode:", b_hist[bin_max][0]
### CONFIDENCE INTERVAL SELECTOR: ########################################
bin_heights, bin_borders, _ = n_hist, b_hist, patches_hist
bin_center = bin_borders[:-1] + np.diff(bin_borders) / 2
xvals, yvals = bin_center, bin_heights
model = SkewedGaussianModel()
params = model.guess(yvals, x=xvals)
result = model.fit(yvals, params, x=xvals)
print result.fit_report()
plt.plot(xvals, result.best_fit,c='c',lw=2)
#Mode Finder:
maxval=0
maxvalx=0
for i in range(len(xvals)):
if result.best_fit[i]>maxval:
maxval=result.best_fit[i]
maxvalx=xvals[i]
print "Curve Mode:", maxvalx
area = np.trapz(result.best_fit, x=xvals)#, dx=5)
print "area =", area
summation1=0
summation2=0
prev_highest=[0]
prev_highest_position=[1e9]
i=0
newx1=[]
newy1=[]
newx2=[]
newy2=[]
while i < len(xvals):
position1=result.best_fit[i]
newx1.append(xvals[i])
newy1.append(position1)
summation1=np.trapz(newy1,x=newx1)
found = False
for j in range(len(xvals)):
loc=len(xvals)-1-j
if loc==-1: raise Exception("Array error.")
position2=result.best_fit[loc]
if (position2>=position1) and (found==False) and (xvals[loc]<=prev_highest_position[-1]) and (position2 >= prev_highest[-1]):
if (position2>1e3*position1) and (position1!=0): raise Exception("Corresponding position for probability=({}) not correctly found. E1".format(position1))
found = True
prev_highest.append(position2)
prev_highest_position.append(xvals[loc])
#plt.axvline(xvals[loc],c='m')
if j>=len(n_hist) and found==False:
raise Exception("Corresponding position for probability=({}) not found. E2".format(position1))
if found == True:
newx2.append(xvals[loc])
newy2.append(position2)
break
summation2=np.abs(np.trapz(newy2,x=newx2))
testcondition=1-(summation1+summation2)
if testcondition<0.69:
plt.axvline(maxvalx,c='k')
plt.axvline(P_total_initial,c='r')
plt.axvline(newx1[-1],c='#505050')
plt.axvline(newx2[-1],c='#505050')
print "Lower: ", np.abs(maxvalx-newx1[-1])
print "Upper: ", np.abs(maxvalx-newx2[-1])
break
else: i+=1
print testcondition
if savefigures == True: plt.savefig('asymmetric2.pdf')
plt.show()
###
y,x,_=plt.hist(hist_values5,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step",label="PDF")
plt.axvline(x=P_full_initial,c='k',lw=2,label='Origin')
plt.xlabel('Full Duration Phase')
plt.ylabel('Marginalised Chi-Squared PDF')
plt.ylim(0,y.max()*(1.05))
plt.vlines(x=(mean3), ymin=0, ymax=y.max()*(1.05), color='g', label='Mean')
plt.vlines(x=(mean3-standard_dev3), ymin=0, ymax=y.max()*(1.05), color='r', label='$\sigma_-$')
plt.vlines(x=(mean3-standard_dev3*2), ymin=0, ymax=y.max()*(1.05), color='m', label='$2\sigma_-$')
plt.vlines(x=(mean3+standard_dev3), ymin=0, ymax=y.max()*(1.05), color='b', label='$\sigma_+$')
plt.vlines(x=(mean3+standard_dev3*2), ymin=0, ymax=y.max()*(1.05), color='c', label='$2\sigma_+$')
plt.legend()
if savefigures==True: plt.savefig('PDF3-modified.pdf')
plt.show()
n_hist, b_hist, patches_hist = plt.hist(hist_values5,bins=200,normed=1,edgecolor="black",facecolor="black",histtype="step",label="PDF")
plt.hist(hist_values5,bins=200,normed=1,facecolor="black",edgecolor='None',alpha=0.1,label="PDF")
plt.xlabel('Full Occultation Duration')
plt.ylabel('Normalised PDF')
if savefigures == True: plt.savefig('plottemp3.pdf')
bin_max = np.where(n_hist == n_hist.max())
print "Mode:", b_hist[bin_max][0]
### CONFIDENCE INTERVAL SELECTOR: ########################################
bin_heights, bin_borders, _ = n_hist, b_hist, patches_hist
bin_center = bin_borders[:-1] + np.diff(bin_borders) / 2
xvals, yvals = bin_center, bin_heights
model = SkewedGaussianModel()
params = model.guess(yvals, x=xvals)
result = model.fit(yvals, params, x=xvals)
print result.fit_report()
plt.plot(xvals, result.best_fit,c='c',lw=2)
#Mode Finder:
maxval=0
maxvalx=0
for i in range(len(xvals)):
if result.best_fit[i]>maxval:
maxval=result.best_fit[i]
maxvalx=xvals[i]
print "Curve Mode:", maxvalx
area = np.trapz(result.best_fit, x=xvals)#, dx=5)
print "area =", area
summation1=0
summation2=0
prev_highest=[0]
prev_highest_position=[1e9]
i=0
newx1=[]
newy1=[]
newx2=[]
newy2=[]
while i < len(xvals):
position1=result.best_fit[i]
newx1.append(xvals[i])
newy1.append(position1)
summation1=np.trapz(newy1,x=newx1)
found = False
for j in range(len(xvals)):
loc=len(xvals)-1-j
if loc==-1: raise Exception("Array error.")
position2=result.best_fit[loc]
if (position2>=position1) and (found==False) and (xvals[loc]<=prev_highest_position[-1]) and (position2 >= prev_highest[-1]):
if (position2>1e3*position1) and (position1!=0): raise Exception("Corresponding position for probability=({}) not correctly found. E1".format(position1))
found = True
prev_highest.append(position2)
prev_highest_position.append(xvals[loc])
#plt.axvline(xvals[loc],c='m')
if j>=len(n_hist) and found==False:
raise Exception("Corresponding position for probability=({}) not found. E2".format(position1))
if found == True:
newx2.append(xvals[loc])
newy2.append(position2)
break
summation2=np.abs(np.trapz(newy2,x=newx2))
testcondition=1-(summation1+summation2)
if testcondition<0.69:
plt.axvline(maxvalx,c='k')
plt.axvline(P_full_initial,c='r')
plt.axvline(newx1[-1],c='#505050')
plt.axvline(newx2[-1],c='#505050')
print "Lower: ", np.abs(maxvalx-newx1[-1])
print "Upper: ", np.abs(maxvalx-newx2[-1])
break
else: i+=1
print testcondition
if savefigures == True: plt.savefig('asymmetric3.pdf')
plt.show()
###
xpoints1=np.linspace(0,len(hist_values1),num=len(hist_values1))
xpoints2=np.linspace(0,len(hist_values2),num=len(hist_values2))
plt.scatter(xpoints1,hist_values1,c='r',s=3)
plt.xlabel('Number of Samples')
plt.ylabel('Midpoint Phase Position')
if savefigures==True: plt.savefig('parameter-variation1.pdf')
plt.show()
plt.scatter(xpoints2,hist_values2,c='b',s=3)
plt.xlabel('Number of Samples')
plt.ylabel('Total Duration Phase')
if savefigures==True: plt.savefig('parameter-variation2.pdf')
plt.show()
plt.scatter(xpoints2,hist_values5,c='b',s=3)
plt.xlabel('Number of Samples')
plt.ylabel('Full Duration Phase')
if savefigures==True: plt.savefig('parameter-variation3.pdf')
plt.show()
plt.scatter(xpoints2,hist_values4,c='m',s=3)
plt.xlabel('Number of Samples')
plt.ylabel('Reduced Chi Squared')
if savefigures==True: plt.savefig('parameter-variation3.pdf')
plt.show()
heatmap, xedges, yedges = np.histogram2d(hist_values1, hist_values2, bins=(100,100),normed=True)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')
contourplot=ax3.imshow(heatmap.T, extent=extent, origin='lower', cmap='Greys')
axins1 = inset_axes(ax3,
width="5%",
height="92.5%",
loc=1)
plt.colorbar(contourplot, cax=axins1, orientation="vertical")
ax2.axis('off')
ax1.hist(hist_values1,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step")
ax4.hist(hist_values2,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step", orientation="horizontal")
ax3.axis('tight')
ax3.ticklabel_format(useOffset=False)
myLocator = mticker.MultipleLocator(0.0003)
ax3.xaxis.set_major_locator(myLocator)
ax3.set_xlabel('Midpoint Position')
ax3.set_ylabel('Total Duration')
ax1.set_ylabel('Marginalised PDF')
ax4.set_xlabel('Marginalised PDF')
ax3.set_xlim(np.min(hist_values1),np.max(hist_values1))
ax3.set_ylim(np.min(hist_values2),np.max(hist_values2))
if savefigures==True: plt.savefig('corner-modified.pdf')
plt.show()
heatmap, xedges, yedges = np.histogram2d(hist_values1, hist_values5, bins=(100,100),normed=True)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')
contourplot=ax3.imshow(heatmap.T, extent=extent, origin='lower', cmap='Greys')
axins1 = inset_axes(ax3,
width="5%",
height="92.5%",
loc=1)
plt.colorbar(contourplot, cax=axins1, orientation="vertical")#, ticks=[1, 2, 3])
#plt.colorbar(contourplot,ax=ax3)
ax2.axis('off')
ax1.hist(hist_values1,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step")
ax4.hist(hist_values5,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step", orientation="horizontal")
ax3.axis('tight')
ax3.ticklabel_format(useOffset=False)
myLocator = mticker.MultipleLocator(0.0003)
ax3.xaxis.set_major_locator(myLocator)
ax3.set_xlabel('Midpoint Position')
ax3.set_ylabel('Full Duration')
ax1.set_ylabel('Marginalised PDF')
ax4.set_xlabel('Marginalised PDF')
ax3.set_xlim(np.min(hist_values1),np.max(hist_values1))
ax3.set_ylim(np.min(hist_values5),np.max(hist_values5))
if savefigures==True: plt.savefig('corner-modified2.pdf')
plt.show()
heatmap, xedges, yedges = np.histogram2d(hist_values2, hist_values5, bins=(100,100),normed=True)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')
contourplot=ax3.imshow(heatmap.T, extent=extent, origin='lower', cmap='Greys')
axins1 = inset_axes(ax3,
width="5%",
height="92.5%",
loc=1)
plt.colorbar(contourplot, cax=axins1, orientation="vertical")
ax2.axis('off')
ax1.hist(hist_values2,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step")
ax4.hist(hist_values5,bins=100,normed=1,edgecolor="black",facecolor="black",histtype="step", orientation="horizontal")
ax3.axis('tight')
ax3.ticklabel_format(useOffset=False)
#myLocator = mticker.MultipleLocator(0.00)
#ax3.xaxis.set_major_locator(myLocator)
ax3.set_xlabel('Total Duration')
ax3.set_ylabel('Full Duration')
ax1.set_ylabel('Marginalised PDF')
ax4.set_xlabel('Marginalised PDF')
ax3.set_xlim(np.min(hist_values2),np.max(hist_values2))
ax3.set_ylim(np.min(hist_values5),np.max(hist_values5))
if savefigures==True: plt.savefig('corner-modified3.pdf')
plt.show()
########################################
print "Done."
| 2.34375 | 2 |
src/the_tale/the_tale/finances/shop/relations.py | al-arz/the-tale | 85 | 12791716 |
import smart_imports
smart_imports.all()
INFINIT_PREMIUM_DESCRIPTION = 'Вечная подписка даёт вам все бонусы подписчика на всё время игры.'
class PERMANENT_PURCHASE_TYPE(rels_django.DjangoEnum):
description = rels.Column(unique=False)
might_required = rels.Column(unique=False, single_type=False)
level_required = rels.Column(unique=False, single_type=False)
full_name = rels.Column()
records = (('INFINIT_SUBSCRIPTION', 12, 'Вечная подписка', INFINIT_PREMIUM_DESCRIPTION, None, None, 'Вечная подписка'),)
class GOODS_GROUP(rels_django.DjangoEnum):
uid = rels.Column()
uid_prefix = rels.Column(unique=False)
records = (('PREMIUM', 0, 'подписка', 'subscription', 'subscription-'),
('ENERGY', 1, 'энергия', 'energy', 'energy-'),
('CHEST', 2, 'сундук', 'random-premium-chest', 'random-premium-chest'),
('PREFERENCES', 3, 'предпочтения', 'preference', 'preference-'),
('PREFERENCES_RESET', 4, 'сброс предпочтений', 'preference-reset', 'hero-preference-reset-'),
('HABITS', 5, 'черты', 'habits', 'hero-habits-'),
('ABILITIES', 6, 'способности', 'abilities', 'hero-abilities-'),
('CLANS', 7, 'гильдии', 'clans', 'clan-'),
('CARDS', 8, 'Карты судьбы', 'cards', 'cards-'))
CARDS_MIN_PRICES = {cards_relations.RARITY.COMMON: 2,
cards_relations.RARITY.UNCOMMON: 10,
cards_relations.RARITY.RARE: 25,
cards_relations.RARITY.EPIC: 50,
cards_relations.RARITY.LEGENDARY: 100}
| 1.882813 | 2 |
latextools_utils/is_tex_file.py | MPvHarmelen/MarkdownCiteCompletions | 0 | 12791717 | <filename>latextools_utils/is_tex_file.py
from .settings import get_setting
strbase = str
def get_tex_extensions():
tex_file_exts = get_setting('tex_file_exts', ['.tex'])
return [s.lower() for s in set(tex_file_exts)]
def is_tex_file(file_name):
if not isinstance(file_name, strbase):
raise TypeError('file_name must be a string')
tex_file_exts = get_tex_extensions()
for ext in tex_file_exts:
if file_name.lower().endswith(ext):
return True
return False
| 2.6875 | 3 |
spasco/main.py | NiklasTiede/spasco | 2 | 12791718 | """spasco - spaces to underscores
==============================
Command line tool for replacing/removing whitespaces or other patterns of file- and directory names.
"""
# Copyright (c) 2021, <NAME>.
# All rights reserved. Distributed under the MIT License.
import argparse
import configparser
import fnmatch
import logging
import os
import sys
from argparse import _SubParsersAction
from argparse import HelpFormatter
from typing import List
from typing import Tuple
from spasco import __src_url__
from spasco import __title__
from spasco import __version__
from spasco.term_color import fmt
from spasco.term_color import Txt
base, file = os.path.split(__file__)
settings_file = os.path.join(base, 'settings.ini')
# set up a settings file and then a logger:
config = configparser.ConfigParser()
config.read(settings_file)
# default values for log record are created:
if not config.read(settings_file):
config['VALUE-SETTINGS'] = {
'search_value': "' '",
'new_value': '_',
}
config['LOG-SETTINGS'] = {
'Logging_turned_on': "False",
'logger_filename': f'{__title__}.log',
'logger_location': os.environ['HOME'],
}
with open(settings_file, 'w') as f:
config.write(f)
def get_logger_path() -> str:
logger_location = config.get('LOG-SETTINGS', 'logger_location')
logger_filename = config.get('LOG-SETTINGS', 'logger_filename')
return f"{logger_location}/{logger_filename}"
logger_path = get_logger_path()
logging.basicConfig(
filename=logger_path,
level=logging.INFO,
format='%(levelname)s | %(asctime)s | %(message)s',
)
if (sys.platform != 'linux' and sys.platform != 'darwin'):
print(f"{__title__!r} is currently not optimized for platforms other than OS X / linux")
def main(argv: List[str]) -> int:
""" Main program.
:argument
argv: command-line arguments, such as sys.argv (including the program name
in argv[0]).
:return
Zero on successful program termination, non-zero otherwise.
"""
main_parser, config_subparser = __build_parser()
argv = argv[1:]
args = main_parser.parse_args(args=argv)
# triggering config subparser
if vars(args).get('command', None) == 'config':
execute_config(config_subparser, argv)
return 0
###########################
# 1 select and sort paths #
###########################
files_dirs = []
if isinstance(args.file_or_dir, str):
args.file_or_dir = [args.file_or_dir]
if args.file_or_dir and not args.recursive:
files_dirs.extend(args.file_or_dir)
if args.recursive:
files_dirs = recurse_dirs_and_files()
# sort paths (longest paths first) so that renaming starts with the deepest nested file/directory:
files_dirs = [x.split('/') for x in files_dirs]
sorted_paths = sorted(files_dirs, key=len, reverse=True)
files_dirs = ['/'.join(path_as_lst) for path_as_lst in sorted_paths]
########################
# 2: path filtration #
########################
SEARCH_VALUE = args.search_value if args.search_value else config.get(
'VALUE-SETTINGS', 'search_value',
)
if SEARCH_VALUE == "' '":
SEARCH_VALUE = ' '
filtered_paths = []
all_selected_files_dirs = files_dirs.copy()
# ------ no file/dir existent ----
if not files_dirs:
print('No directory or file present!')
return 1
# ------ search-value filter ------
# [files_dirs.remove(x) for x in all_selected_files_dirs if SEARCH_VALUE not in x.split('/')[-1]]
for x in all_selected_files_dirs:
if SEARCH_VALUE not in x.split('/')[-1]:
files_dirs.remove(x)
if not files_dirs:
searchval_msg = f"None of the {len(all_selected_files_dirs)} present files/directories contain the search value '{SEARCH_VALUE}'!"
print(searchval_msg)
return 1
# ------ pattern-only filter ------
# [files_dirs.remove(x) for x in files_dirs.copy() if args.pattern_only and not fnmatch.fnmatch(os.path.split(x)[1], args.pattern_only)]
for x in files_dirs.copy():
if args.pattern_only and not fnmatch.fnmatch(os.path.split(x)[1], args.pattern_only):
files_dirs.remove(x)
if not files_dirs:
print(f'None of the {len(all_selected_files_dirs)} present files/directories contain the pattern {args.pattern_only!r}!')
return 1
# ------ except-pattern filter -----
# [files_dirs.remove(x) for x in files_dirs.copy() if args.except_pattern and fnmatch.fnmatch(os.path.split(x)[-1], args.except_pattern)]
for x in files_dirs.copy():
if args.except_pattern and fnmatch.fnmatch(os.path.split(x)[-1], args.except_pattern):
files_dirs.remove(x)
if not files_dirs:
print(f'None of the exception-pattern matching files/directories contain the search-value {SEARCH_VALUE!r}.',)
return 1
# ------ dirs-only filter -----
# [files_dirs.remove(x) for x in files_dirs.copy() if args.dirs_only and not os.path.isdir(x)]
for x in files_dirs.copy():
if args.dirs_only and not os.path.isdir(x):
files_dirs.remove(x)
if not files_dirs:
print('No directory present for renaming.')
return 1
# ------ files-only filter -----
# [files_dirs.remove(x) for x in files_dirs.copy() if args.files_only and not os.path.isfile(x)]
for x in files_dirs.copy():
if args.files_only and not os.path.isfile(x):
files_dirs.remove(x)
if not files_dirs:
print('No file present for renaming.')
return 1
filtered_paths = files_dirs
################
# 3 renaming #
################
if args.new_value == '':
NEW_VALUE = ''
if args.new_value:
NEW_VALUE = args.new_value
if args.new_value is None:
NEW_VALUE = config.get('VALUE-SETTINGS', 'new_value')
if NEW_VALUE == "''" or NEW_VALUE == '""':
NEW_VALUE = ''
filecount, dircount, renamed_paths = path_renaming(
path_lst=filtered_paths,
search_value=SEARCH_VALUE,
new_value=NEW_VALUE,
)
if args.immediately:
is_proceeding = 'y'
else:
msg = f'You can rename {len(filtered_paths)} files and/or directories.' # 🔨
colored_msg = fmt(msg) # , Txt.greenblue
print(colored_msg)
print()
before_heading = fmt('Before', Txt.pink, bolded=True)
after_heading = fmt('After', Txt.blue, bolded=True)
sep_line = fmt('──', Txt.greenblue)
print(f"{before_heading} {' ' * (max([len(x) for x in filtered_paths]) - len('before') + 6)} {after_heading}",)
print(f"{sep_line * (max([len(x) for x in filtered_paths]) + 4)}")
for before, after in list(zip(filtered_paths, renamed_paths)):
before_renaming = fmt(before, Txt.pink)
after_renaming = fmt(after, Txt.blue)
print(f"'{before_renaming}'{' ' * (max([len(x) for x in filtered_paths]) - len(before))} {fmt('🡆', Txt.greenblue)} '{after_renaming}'",)
print(f"{sep_line * (max([len(x) for x in filtered_paths]) + 4)}")
print()
q = fmt(' [y/n] ', Txt.pink)
proceed_msg = fmt('OK to proceed with renaming?') # , Txt.greenblue
is_proceeding = input(proceed_msg + q)
if is_proceeding.lower() == 'y':
filecount, dircount, new_pathnames = path_renaming(
path_lst=filtered_paths,
search_value=SEARCH_VALUE,
new_value=NEW_VALUE,
renaming=True,
)
success_msg = fmt(f'All done! {filecount} files and {dircount} directories were renamed! ✨💄✨', Txt.greenblue)
print(success_msg)
return 0
else:
print(fmt("Command aborted.", textcolor=Txt.pink))
return 1
settings_msg = f"""{fmt("value settings:", Txt.greenblue)}
search_value: {config.get('VALUE-SETTINGS', 'search_value')}
new_value: {config.get('VALUE-SETTINGS', 'new_value')}
{fmt("log settings:", Txt.greenblue)}
logging_turned_on: {config.getboolean('LOG-SETTINGS', 'logging_turned_on')}
logger_filename: {config.get('LOG-SETTINGS', 'logger_filename')}
logger_location: {config.get('LOG-SETTINGS', 'logger_location')}"""
def execute_config(config_subparser: argparse.ArgumentParser, argv: List[str]) -> int:
""" Boolean logic of config subparser triggering. """
args = config_subparser.parse_args(argv[1:])
if args.show_settings:
print(settings_msg)
return 0
if args.turn_log_on:
config['LOG-SETTINGS']['logging_turned_on'] = args.turn_log_on.capitalize()
with open(settings_file, 'w') as fp:
config.write(fp)
log_state = config.getboolean('LOG-SETTINGS', 'logging_turned_on')
if log_state:
print('Logging is activated.')
else:
print('Logging is deactivated.')
return 0
if args.log_name:
old_logger_path = get_logger_path()
config['LOG-SETTINGS']['logger_filename'] = args.log_name
with open(settings_file, 'w') as fp:
config.write(fp)
new_logger_path = get_logger_path()
os.rename(old_logger_path, new_logger_path)
print(f"The new log filename is {config.get('LOG-SETTINGS', 'logger_filename')!r}.",)
return 0
if args.log_location:
old_logger_path = get_logger_path()
log_location = args.log_location
if '~' in args.log_location:
log_location = os.path.expanduser(args.log_location)
if not os.path.isdir(log_location):
print(f'The given path {args.log_location!r} is not a valid directory!')
return 1
config['LOG-SETTINGS']['logger_location'] = log_location
with open(settings_file, 'w') as fp:
config.write(fp)
new_logger_path = get_logger_path()
os.rename(old_logger_path, new_logger_path)
print(f"The new log location is {config.get('LOG-SETTINGS', 'logger_location')!r}.",)
return 0
if args.set_search_value:
if args.set_search_value == ' ':
config['VALUE-SETTINGS']['search_value'] = "' '"
with open(settings_file, 'w') as fp:
config.write(fp)
print(f"The new search-value is {config.get('VALUE-SETTINGS', 'search_value')}.",)
else:
config['VALUE-SETTINGS']['search_value'] = args.set_search_value
with open(settings_file, 'w') as fp:
config.write(fp)
print(f"The new search-value is {config.get('VALUE-SETTINGS', 'search_value')!r}.",)
return 0
if args.set_new_value == '':
config['VALUE-SETTINGS']['new_value'] = "''"
with open(settings_file, 'w') as fp:
config.write(fp)
print(f"The new 'new-value' is {config.get('VALUE-SETTINGS', 'new_value')}.")
return 0
if args.set_new_value:
config['VALUE-SETTINGS']['new_value'] = args.set_new_value
with open(settings_file, 'w') as fp:
config.write(fp)
print(f"The new 'new-value' is {config.get('VALUE-SETTINGS', 'new_value')!r}.")
return 0
config_subparser.print_help()
return 1
def path_renaming(path_lst: List[str], search_value: str, new_value: str, renaming: bool = False) -> Tuple[int, int, List[str]]:
""" List of filtered files and directories are renamed and their names
returned. Furthermore, the number fo directories/files which were renamed
are also returned.
:returns
Tuples containing the number of directories, files and the names of them after renaming
"""
renamed_paths = []
dircount, filecount = 0, 0
for old_path_name in path_lst:
path_base, file = os.path.split(old_path_name)
new_name = file.replace(search_value, new_value)
full_new = os.path.join(path_base, new_name)
renamed_paths.append(full_new)
if renaming:
os.rename(old_path_name, full_new)
if os.path.isdir(full_new):
dircount += 1
elif os.path.isfile(full_new):
filecount += 1
logging.info(f" working dir: {os.getcwd()!r} | naming: {old_path_name!r} --> {full_new!r}",)
return (filecount, dircount, renamed_paths)
def recurse_dirs_and_files() -> List[str]:
""" All files/directories within the current working directory are mapped
into a list.
:returns
List of all file/directory paths, recursively and sorted
"""
all_files_dirs = []
base_path = os.getcwd()
# collect all rel. paths in a list (rel to cwd):
for dirpath, dirnames, filenames in os.walk(base_path):
for filename in filenames:
full_filepath = dirpath + '/' + filename
rel_filepath = os.path.relpath(full_filepath, base_path)
all_files_dirs.append(rel_filepath)
for dirname in dirnames:
full_dirpath = dirpath + '/' + dirname
rel_dirpath = os.path.relpath(full_dirpath, base_path)
all_files_dirs.append(rel_dirpath)
return all_files_dirs
# hack for removing the metavar below the subparsers (config) title
class NoSubparsersMetavarFormatter(HelpFormatter):
def _format_action_invocation(self, action): # type: ignore
if isinstance(action, _SubParsersAction):
return ""
return super()._format_action_invocation(action)
class MyOwnFormatter(NoSubparsersMetavarFormatter, argparse.RawDescriptionHelpFormatter):
""" Removes metavar of config subparser and adds RawDescription """
pass
def __build_parser() -> Tuple[argparse.ArgumentParser, argparse.ArgumentParser]:
""" Constructs the main_parser for the command line arguments.
:returns
An ArgumentParser instance for the CLI.
"""
main_parser = argparse.ArgumentParser(
prog=__title__,
add_help=False,
description=f'Spasco is a glorified replace function. By default it replaces whitespaces\n'
f'of all file- and directory names within your current working directory by \n'
f'underscores.\n\nsrc: {__src_url__}',
epilog='Make your files more computer-friendly 😄',
formatter_class=lambda prog: MyOwnFormatter(
prog, max_help_position=80,
),
)
# optional arguments:
main_parser.add_argument(
"-t",
dest='file_or_dir',
metavar='file_or_dir',
action='store',
nargs='?',
default=os.listdir(),
help='Select a single file or directory for renaming.',
)
main_parser.add_argument(
'-s',
dest='search_value',
nargs='?',
action='store',
metavar='search_value',
help="Define custom search-value (default: ' ').",
)
main_parser.add_argument(
'-n',
dest='new_value',
nargs='?',
action='store',
metavar='new_value',
help="Define custom new-value (default: '_')."
)
main_parser.add_argument(
'-p',
dest='pattern_only',
nargs='?',
action='store',
metavar='pattern_only',
help='Only files/dirs containing the pattern are renamed.',
)
main_parser.add_argument(
'-e',
metavar='except_pattern',
dest='except_pattern',
nargs='?',
action='store',
help='Only files/dirs not containing the pattern are renamed.',
)
main_parser.add_argument(
'-d',
'--dirs-only',
action='store_true',
help='Only directories are renamed.',
)
main_parser.add_argument(
'-f',
'--files-only',
action='store_true',
help='Only files are renamed.',
)
main_parser.add_argument(
'-r',
'--recursive',
action='store_true',
help='Recurse into directories.',
)
main_parser.add_argument(
'-i',
'--immediately',
action='store_true',
help='Skip security question, renaming preview and execute immediately.',
)
main_parser.add_argument(
'-v',
'--version',
action='version',
help='Show version number and exit.',
version=f'%(prog)s {__version__}',
)
add_parser_help(main_parser)
# ---- configuration structured as subparser -----
config_subparsers = main_parser.add_subparsers(
title='log and renaming configuration',
)
config_subparser = add_config_subparser(config_subparsers)
return main_parser, config_subparser
def add_config_subparser(sub_parsers: argparse._SubParsersAction) -> argparse.ArgumentParser:
""" Parser for configuring spasco.
"""
config_subparser = sub_parsers.add_parser(
name='config',
description='search-value and new-value can be changed. Logging to record all '
'renaming actions as log file can be activated.',
usage=f'{__title__} config [--show-setting] [-o true/false] [-n [filename]] [-l [pathname]] [-h, --help ]',
add_help=False,
formatter_class=lambda prog: argparse.RawDescriptionHelpFormatter(
prog, max_help_position=33,
),
help=f"Sub-command to interact with {__title__}'s logging and rename settings.",
)
config_subparser.add_argument(
'--show-settings',
action='store_true',
help='Returns your current settings for logging and renaming.',
)
add_parser_help(config_subparser)
config_subparser_logging = config_subparser.add_argument_group(
'log settings',
)
config_subparser_logging.add_argument(
'-o',
nargs='?',
metavar='true/false',
dest='turn_log_on',
choices=['true', 'false'],
help="Logging is turned on/off (default: off).",
)
config_subparser_logging.add_argument(
'-f',
nargs='?',
metavar='filename',
dest='log_name',
help='Set a new filename for the logger.',
)
config_subparser_logging.add_argument(
'-l',
nargs='?',
metavar='pathname',
dest='log_location',
help='Set a new file location for the logger.',
)
config_subparser_renaming = config_subparser.add_argument_group(
'renaming settings',
)
config_subparser_renaming.add_argument(
'-s',
nargs='?',
metavar='search_value',
dest='set_search_value',
help="Set a new 'search-value' permanently.",
)
config_subparser_renaming.add_argument(
'-n',
nargs='?',
metavar='new_value',
dest='set_new_value',
help="Set a new 'new-value' permanently.",
)
config_subparser.set_defaults(command='config')
return config_subparser
def add_parser_help(parser: argparse.ArgumentParser) -> None:
""" Custom help-argument to have consistent style.
add_help=False to enable this.
"""
parser.add_argument(
'-h',
'--help',
action='help',
help="Show this help message and exit.",
)
def run_main() -> None:
try:
sys.exit(main(sys.argv))
except Exception as e:
sys.stderr.write(__title__ + ': ' + str(e) + '\n')
sys.exit(1)
if __name__ == '__main__':
run_main()
| 2.703125 | 3 |
pyc_compat.py | jplevyak/pyc | 3 | 12791719 | <reponame>jplevyak/pyc
__pyc_declare__ = None
| 1.054688 | 1 |
flask-proj/manage.py | uninstallHahaha/flask-project | 0 | 12791720 | <filename>flask-proj/manage.py<gh_stars>0
from App import create_app
# 初始化模块
manager = create_app()
if __name__ == '__main__':
manager.run()
| 1.40625 | 1 |
melodyrnn/dataset.py | bfw930/uv-eurovision-ai | 0 | 12791721 |
''' imports '''
# filesystem management
import os
# tensors and nn modules
import torch
# array handling
import numpy as np
# midi file import and parse
from mido import MidiFile
class MelodyDataset(torch.utils.data.Dataset):
''' dataset class for midi files '''
def __init__(self, dir_path: str, cache = False, ds: int = 20):
''' init dataset, import midi files '''
super().__init__()
# store downsampling factor
self.ds = ds
# get and store list midi files in directory
self.file_names = [ name for name in os.listdir(dir_path) if 'mid' in name[-4:] ]
# import and store midi files
self.midi_files = [ MidiFile(os.path.join(dir_path, file_name))
for file_name in self.file_names ]
# case filter by key
if False:
# get index for only midi with meta plus [melody, chords, bass] tracks
j = [ i for i in range(len(self.file_names))
if len(self.midi_files[i].tracks) > 3
and "key='{}'".format(key) in str(self.midi_files[i].tracks[0][2]) ]
if False:
# get index for only midi with meta plus [melody, chords, bass] tracks
j = [ i for i in range(len(self.file_names))
if len(self.midi_files[i].tracks) > 3 ]
# filter midi file and file name lists
self.midi_files = [ self.midi_files[i] for i in j ]
self.file_names = [ self.file_names[i] for i in j ]
# init store of import state
self.import_list = [ None for _ in range(len(self.midi_files)) ]
# pre-cache all data
if cache:
# iterate through midi files
for index in range(len(self.file_names)):
# import data to memory
self.import_data(index)
def import_data(self, index):
''' import midi data to memory '''
# get midi by index
midi = self.midi_files[index]
# get midi tracks
tracks = self.midi2tracks(midi)
# get note tracks matrix
matrix = self.tracks2matrix(tracks)
# get melody format from matrix
melody = self.matrix2melody(matrix)
# downsample over time
melody = melody[::self.ds]
# store matrix in import list
self.import_list[index] = melody
def midi2tracks(self, midi):
''' extract tracks from mido.MidiFile '''
# initialise tracks list
tracks = []
if len(midi.tracks) == 1:
ts = [0]
else:
ts = range(len(midi.tracks))[1:4]
# iterate over tracks in midi (excl. meta track, extra), [melody, chords, bass]
#for i in range(len(midi.tracks))[1:4]:
for i in ts:
# store track data as dict for processing
track = []
# iterate messages in track
for msg in midi.tracks[i][:]:
# ensure note data only
if msg.type in ['note_on', 'note_off']:
# init note data dict
note = {}
# store each note data
#note['type'] = msg.type
#note['channel'] = msg.channel
note['note'] = msg.note
note['time'] = msg.time
#note['velocity'] = msg.velocity
note['velocity'] = 0 if msg.type == 'note_off' else 1
# store note data
track.append(note)
# store track notes
tracks.append(track)
# return extracted midi tracks
return tracks
def tracks2matrix(self, tracks: list):
''' convert tracks to matrix '''
# initialise track matricies list
m = []
# iterate tracks
for track in tracks:
# initialise note state vector, 7-bit note depth
N = np.zeros(128, dtype = np.int16)
# initialise track note matrix (zero init column)
M = np.zeros((128, 1), dtype = np.int16)
# iterate messages in track
for msg in track:
# if time step changes, store intermediate notes
if int(msg['time']) != 0:
# extend note state vector over range time step
n = np.stack([ N for _ in range( int(msg['time']) ) ]).T
# append note state vector to track note matrix
M = np.concatenate( [M, n], axis = 1 )
# update value of note vector by index
N[int(msg['note'])] = int(msg['velocity'])
# store track note matrix
m.append(M)
# get max length track
s = max([ track.shape[1] for track in m ])
# pad tracks to max length of time axis, stack on new axis
M = np.stack([ np.pad(track, ((0, 0), (0, s - track.shape[1])))
for track in m ], axis = 2)
# return stacked tracks note matrix
return M
def matrix2melody(self, matrix):
''' extract melody from note matrix '''
# get track note matrix for melody only
M = matrix[:,:,0]
# init zero melody, default negative one
#melody = np.ones(M.shape[1])*-1
melody = np.zeros(M.shape[1])
# get index (note, time) where nonzero
j = np.where( M != 0 )
# set melody note at time by index
melody[j[1]] = j[0]
# return extracted melody
return melody
def __getitem__(self, index):
''' return tracks note matrix '''
# check for import state
if self.import_list[index] is None:
# import data to memory
self.import_data(index)
# return data if already imported
return self.import_list[index]
'''
def linear_quantize(samples, q_levels):
samples = samples.clone()
samples -= samples.min(dim=-1)[0].expand_as(samples)
samples /= samples.max(dim=-1)[0].expand_as(samples)
samples *= q_levels - EPSILON
samples += EPSILON / 2
return samples.long()
def linear_dequantize(samples, q_levels):
return samples.float() / (q_levels / 2) - 1
def q_zero(q_levels):
return q_levels // 2
'''
def __len__(self):
''' return total midi files '''
# return number of midi files
return len(self.file_names)
class MelodyDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size, seq_len, overlap_len,
*args, **kwargs):
super().__init__(dataset, batch_size, *args, **kwargs)
self.seq_len = seq_len
self.overlap_len = overlap_len
def __iter__(self):
for batch in super().__iter__():
(batch_size, n_samples) = batch.size()
reset = True
#print(self.overlap_len, n_samples, self.seq_len)
for seq_begin in range(self.overlap_len, n_samples, self.seq_len)[:-1]:
from_index = seq_begin - self.overlap_len
to_index = seq_begin + self.seq_len
sequences = batch[:, from_index : to_index]
input_sequences = sequences[:, : -1]
#print(input_sequences.shape)
target_sequences = sequences[:, self.overlap_len :].contiguous()
yield (input_sequences, reset, target_sequences)
reset = False
def __len__(self):
raise NotImplementedError()
| 2.71875 | 3 |
tests/test_pickle_funcs.py | Mishne-Lab/cidan | 2 | 12791722 | from cidan.LSSC.functions.pickle_funcs import *
def test_pickle_funcs():
test_dir = "test_pickle"
pickle_set_dir(test_dir)
if not os.path.isdir(test_dir):
os.mkdir(test_dir)
pickle_clear(trial_num=0)
assert not pickle_exist("test", trial_num=0)
obj = "pickle save"
pickle_save(obj, "test",trial_num=0)
assert len([f for f in os.listdir("{0}/0/".format(test_dir))]) == 1
assert pickle_load("test", trial_num=0)==obj
assert pickle_exist("test", trial_num=0)
pickle_clear(trial_num=0)
assert not pickle_exist("test", trial_num=0)
assert len([f for f in os.listdir("{0}/0/".format(test_dir))]) == 0
os.rmdir("{0}/0/".format(test_dir))
os.rmdir(test_dir)
| 2.5625 | 3 |
libs/redis.py | fightingfish008/tornado-extensions | 5 | 12791723 | <reponame>fightingfish008/tornado-extensions
# -*- coding:utf-8 -*-
import traceback
import logging
import aioredis
from tornado.options import options
class AsyncRedisClient(object):
def __init__(self,loop=None):
self.loop = loop
async def init_pool(self, db=None):
if db is None:
_db = options.redis_db4
else:
_db = db
uri = 'redis://{}:{}/{}'.format(
options.redis_host,
options.redis_port,
_db
)
self.pool = await aioredis.create_pool(
uri,
password=options.redis_password,
# encoding="utf-8",
minsize=5,
maxsize=10,
loop = self.loop,
)
super(AsyncRedisClient, self).__init__()
async def execute(self, command, *args, **kwargs):
try:
async with self.pool.get() as conn:
retsult = await conn.execute(command, *args, **kwargs)
return retsult
except Exception as e:
logging.error(traceback.print_exc())
logging.error("redis execute error: %s", e)
async def get(self, key):
return await self.execute('get', key)
async def set(self, key, value):
return await self.execute('set', key, value)
async def setex(self, key, seconds, value):
return await self.execute('setex', key, seconds, value)
async def keys(self, key):
return await self.execute('keys', key)
async def hgetall(self, key):
return await self.execute('hgetall', key)
async def scan(self, key):
return await self.execute('scan', key)
async def connect(loop, db=None):
client = AsyncRedisClient(loop)
await client.init_pool(db)
return client | 2.234375 | 2 |
errorhandler.py | BenjaminHalko/WiiMusicEditorPlus | 7 | 12791724 | from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog
from errorhandler_ui import Ui_Error
class ShowError(QDialog,Ui_Error):
def __init__(self,error,message,parent=None,geckocode=False):
super().__init__(parent)
self.setWindowFlag(Qt.WindowContextHelpButtonHint,False)
self.setupUi(self)
if(not geckocode):
self.ErrorTitle.setText(error)
self.ErrorMessage.setText(message)
self.ErrorClose.clicked.connect(self.close)
else:
self.clicked = False
self.ErrorTitle_GC.setText(error)
self.ErrorMessage_GC.setText(message)
self.ErrorClose_GC.clicked.connect(self.close)
self.ErrorCreate_GC.clicked.connect(self.GeckoCodeCreate)
self.MainWidget.setCurrentIndex(1)
self.show()
self.exec()
def GeckoCodeCreate(self):
self.clicked = True
self.close() | 2.46875 | 2 |
PyFlow/Packages/PyFlowBase/Nodes/forLoopBegin.py | luzpaz/PyFlow | 1,463 | 12791725 | ## Copyright 2015-2019 <NAME>, <NAME>
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from PyFlow.Core import NodeBase
from PyFlow.Core.PathsRegistry import PathsRegistry
from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper
from PyFlow.Core.Common import *
from PyFlow.Packages.PyFlowBase.Nodes import FLOW_CONTROL_ORANGE
import threading
class forLoopBegin(NodeBase):
def __init__(self, name):
super(forLoopBegin, self).__init__(name)
self._working = False
self.currentIndex = 0
self.prevIndex = -1
self.inExec = self.createInputPin('inExec', 'ExecPin', None, self.compute)
self.firstIndex = self.createInputPin('Start', 'IntPin')
self.lastIndex = self.createInputPin('Stop', 'IntPin')
self.loopEndNode = self.createInputPin('Paired block', 'StringPin')
self.loopEndNode.setInputWidgetVariant("ObjectPathWIdget")
self.loopBody = self.createOutputPin('LoopBody', 'ExecPin')
self.index = self.createOutputPin('Index', 'IntPin')
self.headerColor = FLOW_CONTROL_ORANGE
self.setExperimental()
@staticmethod
def pinTypeHints():
helper = NodePinsSuggestionsHelper()
helper.addInputDataType('ExecPin')
helper.addInputDataType('IntPin')
helper.addOutputDataType('ExecPin')
helper.addOutputDataType('IntPin')
helper.addInputStruct(StructureType.Single)
helper.addOutputStruct(StructureType.Single)
return helper
@staticmethod
def category():
return 'FlowControl'
@staticmethod
def keywords():
return ['iter']
@staticmethod
def description():
return 'For loop begin block'
def reset(self):
self.currentIndex = 0
self.prevIndex = -1
#self._working = False
def isDone(self):
indexTo = self.lastIndex.getData()
if self.currentIndex >= indexTo:
self.reset()
#loopEndNode = PathsRegistry().getEntity(self.loopEndNode.getData())
#loopEndNode.completed.call()
self._working = False
return True
return False
def onNext(self, *args, **kwargs):
while not self.isDone():
if self.currentIndex > self.prevIndex:
self.index.setData(self.currentIndex)
self.prevIndex = self.currentIndex
self.loopBody.call()
def compute(self, *args, **kwargs):
self.reset()
endNodePath = self.loopEndNode.getData()
loopEndNode = PathsRegistry().getEntity(endNodePath)
if loopEndNode is not None:
if loopEndNode.loopBeginNode.getData() != self.path():
self.setError("Invalid pair")
return
if self.graph() is not loopEndNode.graph():
err = "block ends in different graphs"
self.setError(err)
loopEndNode.setError(err)
return
else:
self.setError("{} not found".format(endNodePath))
if not self._working:
self.thread = threading.Thread(target=self.onNext,args=(self, args, kwargs))
self.thread.start()
self._working = True
#self.onNext(*args, **kwargs)
| 2.09375 | 2 |
day23/script1.py | Moremar/advent_of_code_2015 | 0 | 12791726 | import re
class Command:
def __init__(self, name, register, jump_addr=None):
self.name = name
self.register = register
self.jump_addr = jump_addr
class Program:
def __init__(self, commands, registers):
self.commands = commands
self.registers = registers
self.instr_ptr = 0
def exec_next_command(self):
cmd = self.commands[self.instr_ptr]
if cmd.name == "hlf":
self.registers[cmd.register] //= 2
self.instr_ptr += 1
elif cmd.name == "tpl":
self.registers[cmd.register] *= 3
self.instr_ptr += 1
elif cmd.name == "inc":
self.registers[cmd.register] += 1
self.instr_ptr += 1
elif cmd.name == "jmp":
self.instr_ptr += cmd.jump_addr
elif cmd.name == "jie":
self.instr_ptr += cmd.jump_addr if self.registers[cmd.register] % 2 == 0 else 1
elif cmd.name == "jio":
self.instr_ptr += cmd.jump_addr if self.registers[cmd.register] == 1 else 1
else:
raise ValueError("Unsupported command: ", cmd.name)
def run(self):
while self.instr_ptr < len(self.commands):
self.exec_next_command()
def solve(commands):
pgm = Program(commands, {"a": 0, "b": 0})
pgm.run()
return pgm.registers["b"]
def parse(file_name):
with open(file_name, "r") as f:
commands = []
for line in f.readlines():
if any([cmd in line for cmd in ["inc", "tpl", "hlf"]]):
_, cmd, r, _ = re.split(r"([a-z]+) ([a|b])", line)
commands.append(Command(cmd, r))
elif "jmp" in line:
_, cmd, jmp_addr, _ = re.split(r"([a-z]+) ([+|-][0-9]+)", line)
commands.append(Command(cmd, None, int(jmp_addr)))
if any([cmd in line for cmd in ["jie", "jio"]]):
_, cmd, r, jmp_addr, _ = re.split(r"([a-z]+) ([a|b]), ([+\-0-9]+)", line)
commands.append(Command(cmd, r, int(jmp_addr)))
return commands
if __name__ == '__main__':
print(solve(parse("data.txt")))
| 3.3125 | 3 |
example/paywall/migrations/0002_auto_20200417_2107.py | wuuuduu/django-getpaid | 6 | 12791727 | # Generated by Django 3.0.5 on 2020-04-17 21:07
import uuid
import django_fsm
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("paywall", "0001_initial"),
]
operations = [
migrations.RemoveField(model_name="paymententry", name="payment",),
migrations.AddField(
model_name="paymententry",
name="ext_id",
field=models.CharField(db_index=True, default=uuid.uuid4, max_length=100),
),
migrations.AddField(
model_name="paymententry",
name="fraud_status",
field=django_fsm.FSMField(
choices=[
("unknown", "unknown"),
("accepted", "accepted"),
("rejected", "rejected"),
("check", "needs manual verification"),
],
default="unknown",
max_length=50,
protected=True,
),
),
migrations.AddField(
model_name="paymententry",
name="payment_status",
field=django_fsm.FSMField(
choices=[
("new", "new"),
("prepared", "in progress"),
("pre-auth", "pre-authed"),
("charge_started", "charge process started"),
("partially_paid", "partially paid"),
("paid", "paid"),
("failed", "failed"),
("refund_started", "refund started"),
("refunded", "refunded"),
],
default="prepared",
max_length=50,
protected=True,
),
),
]
| 1.898438 | 2 |
chemvae/vae_examples.py | amirnikooie/chemical_vae | 0 | 12791728 | from vae_model import *
#======================
""" Creating the VAE object and initializing the instance """
model_DIR = "./aux_files/"
vae = VAE_Model(directory=model_DIR)
print("The VAE object created successfully!")
'''
#============
""" Working with a sample smiles string to reconstruct it and predict its
properties """
sample_smiles = 'OC1=CC=C(C2=C(C3=CC=C(O)C=C3S2)N2C3=CC=C(C=C3C=C2)OCCN2CCCCC2)C=C1'
z_rep = vae.smiles_to_z(sample_smiles, standardized=True)
X_hat = vae.z_to_smiles(z_rep, standardized=True, verified=False) # decoding
# to molecular space without verifying its validity.
predicted_props = vae.predict_prop_z(z_rep, standardized=True)
print("### {:20s} : {}".format('Input', sample_smiles))
print("### {:20s} : {}".format('Reconstruction', X_hat[0]))
print("### {:20s} : {} with norm {:.3f}".format('Z representation:',
z_rep.shape,
np.linalg.norm(z_rep)))
print("### {:20s} : {}".format('Number of properties', vae.n_props))
print("### {:20s} : {}\n\n".format('Predicted properties', predicted_props))
#======================
""" Property prediction for 20 samples from multivariate standard normal
distribution """
z_mat = np.random.normal(0, 1, size=(20,z_rep.shape[1]))
pred_prop = vae.predict_prop_z(z_mat, standardized=True)
#======================
""" Converting those random representations to valid molecules """
x_hat_list = vae.z_to_smiles(z_mat, standardized=True, verified=True) # decoding
# to valid molecules
verified_x_hat = [item for item in x_hat_list if item!='None']
print("\n### {} out of 20 compounds are verified!".format(len(verified_x_hat)))
print("### {:20s} : {}".format('Predicted properties:', pred_prop))
#======================
""" Iteratively sampling from vicinity of a point in the latent space """
df = vae.iter_sampling_from_ls(z_rep, decode_attempts=500, num_iter=10,
noise_norm=0.5, constant_norm=False,
verbose=False)
#======================
""" Saving generated molecules to a pdf file as well as CSV using data
frame above """
vae.save_gen_mols(df, cols_of_interest=['comp1','comp2','comp3'],
out_file="gen_mols.pdf", out_dir="./test_out/")
#======================
'''
""" prediction performance analysis for a component of interest with option of
drawing parity plot for that component """
#input_data = string showing the location and name of the dataset to for
# prediction perfomance analysis. Could be the test set.
filename = 'validation_set.csv' #'validation_set.csv'
nsamples = 800 #600000
rmses = vae.component_parity_check(model_DIR+filename, ssize=nsamples, seed=235,
histplot=True, parplot=True, hexbinp=False)
#xlims=[0,1], ylims=[0,1])
print(rmses)
| 2.765625 | 3 |
python/search_in_binary_search_tree.py | anishLearnsToCode/leetcode-algorithms | 17 | 12791729 | # Definition for a binary tree node.
from typing import Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def searchBST(self, root: Optional[TreeNode], val: int) -> Optional[TreeNode]:
if root is None or root.val == val: return root
return self.searchBST(root.right, val) if root.val < val else self.searchBST(root.left, val)
| 3.828125 | 4 |
sdk/python/feast/infra/offline_stores/contrib/postgres_repo_configuration.py | ibnummuhammad/feast | 0 | 12791730 | from feast.infra.offline_stores.contrib.postgres_offline_store.tests.data_source import (
PostgreSQLDataSourceCreator,
)
from tests.integration.feature_repos.integration_test_repo_config import (
IntegrationTestRepoConfig,
)
FULL_REPO_CONFIGS = [
IntegrationTestRepoConfig(
provider="local",
offline_store_creator=PostgreSQLDataSourceCreator,
online_store_creator=PostgreSQLDataSourceCreator,
),
]
| 0.921875 | 1 |
mod_arrow_func.py | pfalcon/python-imphook | 22 | 12791731 | <reponame>pfalcon/python-imphook<gh_stars>10-100
# This imphook module implements "arrow functions", similar to JavaScript.
# (a, b) => a + b ---> lambda a, b: a + b
import tokenize
import imphook
class TokBuf:
def __init__(self):
self.tokens = []
def append(self, t):
self.tokens.append(t)
def clear(self):
self.tokens.clear()
def empty(self):
return not self.tokens
def spool(self):
yield from self.tokens
self.clear()
def xform(token_stream):
tokbuf = TokBuf()
for t in token_stream:
if t[1] == "(":
# We're interested only in the deepest parens.
if not tokbuf.empty():
yield from tokbuf.spool()
tokbuf.append(t)
elif t[1] == ")":
nt1 = next(token_stream)
nt2 = next(token_stream)
if nt1[1] == "=" and nt2[1] == ">":
yield (tokenize.NAME, "lambda")
yield from tokbuf.tokens[1:]
tokbuf.clear()
yield (tokenize.OP, ":")
else:
yield from tokbuf.spool()
yield t
yield nt1
yield nt2
elif not tokbuf.empty():
tokbuf.append(t)
else:
yield t
def hook(modname, filename):
with open(filename, "r") as f:
# Fairly speaking, tokenizing just to convert back to string form
# isn't too efficient, but CPython doesn't offer us a way to parse
# token stream so far, so we have no choice.
source = tokenize.untokenize(xform(tokenize.generate_tokens(f.readline)))
mod = type(imphook)(modname)
exec(source, vars(mod))
return mod
imphook.add_import_hook(hook, (".py",))
| 2.53125 | 3 |
apps/terreno/admin.py | Ajerhy/proyectosigetebr | 1 | 12791732 | <filename>apps/terreno/admin.py
from django.contrib import admin
from .models import Ubicacion
from .models import Lote
from .models import Manzano
from .models import Medida
from .models import Distrito
"""
class lotesInlines(admin.TabularInline):
model = Manzano.lotes.through
class LoteAdmin(admin.ModelAdmin):
inlines = [
lotesInlines,
]
class ManzanoAdmin(admin.ModelAdmin):
inlines = [
lotesInlines,
]
exclude = ('lotes',)
"""
admin.site.register(Ubicacion)
admin.site.register(Lote)
#admin.site.register(Manzano,ManzanoAdmin)
admin.site.register(Manzano)
admin.site.register(Medida)
admin.site.register(Distrito)
| 2.015625 | 2 |
preprocessor/constants.py | AhsanAliLodhi/statistical_data_preprocessing | 0 | 12791733 | # TODO: Substitue all strings with constants
# Column types (in context of data science)
TYPE = {
'numerical',
'categorical',
'datetime'
}
# List of date time features available in pandas date time columns
DATE_TIME_FEATURES = {
'NUMERICS':['year', 'month', 'day', 'hour', 'dayofyear', 'weekofyear', 'week', 'dayofweek',
'quarter'],
'BOOLEANS':['is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end',
'is_year_start', 'is_year_end']
}
# Possible methods to fill nans in numerical columns
FILL_NAN_METHODS = {
'MEAN':'mean','MEDIAN':'median'
}
# Possible methods to fill infs in numerical columns
FILL_INF_METHODS = {
'MAXMIN':'maxmin','NAN':'nan'
} | 3.3125 | 3 |
sim/python/plot_logsim.py | wpisailbot/boat | 4 | 12791734 | <filename>sim/python/plot_logsim.py
#!/usr/bin/python3
import numpy as np
import sys
from matplotlib import pyplot as plt
def norm_theta(theta):
while (theta > np.pi):
theta -= 2 * np.pi
while (theta < -np.pi):
theta += 2 * np.pi
return theta
def plot_vec(d, starti, name, ax, maxy=50):
t = data[:, 0]
x = data[:, starti+0]
y = data[:, starti+1]
z = data[:, starti+2]
plt.figure()
plt.subplot(111, sharex=ax)
plt.plot(t, x, label=name+" x")
plt.plot(t, y, label=name+" y")
plt.plot(t, z, label=name+" z")
# plt.ylim(-maxy, maxy)
plt.legend()
data = np.genfromtxt("sep11logsim.csv", delimiter=',')[:, :]
x = []
y = []
vx = []
vy = []
speed = []
t = []
yaw = []
heel = []
pitch = []
heading = []
leeway = []
sail = []
rudder = []
alphaw = []
pitchvar = []
wind_speed = []
true_alphaw = []
true_wind_speed = []
heading_cmd = []
rudder_mode = []
orig_yaw = []
orig_heel = []
orig_speed = []
for row in data:
if row[0] < 4000:
continue
for i in range(len(row)):
if abs(row[i]) > 1e5:
row[i] = float("nan")
# if row[0] > 4485:
# break
t.append(row[0])
sail.append(row[3] * 180. / np.pi)
rudder.append(row[4] * 180. / np.pi)
yaw.append(norm_theta(row[5]) * 180. / np.pi)
orig_yaw.append(norm_theta(row[20]) * 180. / np.pi)
heel.append(norm_theta(row[6]) * 180. / np.pi)
orig_heel.append(norm_theta(row[21]) * 180. / np.pi)
pitch.append(norm_theta(row[7]) * 180. / np.pi)
pitchvarstart = max(-100, -len(pitch))
pitchvar.append(np.std(pitch[pitchvarstart:]))
x.append(row[8])
y.append(row[9])
vx.append(row[10])
vy.append(row[11])
speed.append(np.hypot(vx[-1], vy[-1]))
orig_speed.append(np.hypot(row[25], row[26]))
heading.append(np.arctan2(vy[-1], vx[-1]) * 180. / np.pi)
leeway.append(norm_theta((heading[-1] - yaw[-1]) * np.pi / 180.) * 180. / np.pi)
alphaw.append(np.arctan2(row[2], row[1]) * 180. / np.pi)
wind_speed.append(np.sqrt(row[1] ** 2 + row[2] ** 2))
true_alphaw.append(norm_theta(np.arctan2(row[13], row[12]))* 180. / np.pi)
true_wind_speed.append(np.sqrt(row[12] ** 2 + row[13] ** 2))
heading_cmd.append(row[15] * 180. / np.pi)
rudder_mode.append(row[16] * 10)
plt.plot(x, y, label="Boat Path")
#plt.plot([-76.477516, -76.475533, -76.474373, -76.477615, -76.479126], [38.98278, 38.98209, 38.98365, 38.985771, 38.983952], '*-', label="waypoints")
if False:
plt.quiver(x, y, vx, vy, np.hypot(vx, vy))
plt.colorbar(label="Speed (m/s)")
plt.title("Boat Position (Wind is blowing bottom-right-to-top-left on screen)--Arrows and colors represent velocities")
plt.xlabel("X position (deg longitude)")
plt.ylabel("Y position (deg latitude)")
plt.legend()
plt.figure()
ax = plt.subplot(111)
ax.plot(t, x - x[0], label='x less bias')
ax.plot(t, y - y[0], label='y less bias')
ax2 = ax.twinx()
ax2.plot(t, vx, 'c*', label='vx')
ax2.plot(t, vy, 'r*', label='vy')
ax2.plot(t, speed, 'g*', label='speed')
ax2.plot(t, wind_speed, label='Wind Speed (m/s)')
ax2.plot(t, true_wind_speed, label='True Wind Speed (m/s)')
ax.legend(loc='upper left')
ax2.legend(loc='upper right')
plt.figure()
axyh = plt.subplot(111, sharex=ax)
axyh.plot(t, yaw, label='Yaw')
axyh.plot(t, orig_yaw, 'b--', label='Original Yaw')
axyh.plot(t, heel, 'g', label='Heel')
axyh.plot(t, orig_heel, 'g--', label='Original Heel')
axyh.plot(t, pitch, label='Pitch')
axyh.plot(t, [n * 100 for n in pitchvar], label='Pitch Stddev * 100')
axyh.legend()
plt.figure()
axyaw = plt.subplot(111, sharex=ax)
axyaw.plot(np.matrix(t).T, np.matrix(yaw).T + 0, 'b', label='Heading')
axyaw.plot(t, orig_yaw, 'b--', label='Orig Yaw')
axyaw.plot(t, alphaw, 'g', label='Apparent Wind Angle')
axyaw.plot(t, heading_cmd, 'b-.', label='Heading Cmd')
axyaw.plot(t, rudder_mode, 'r*', label='Rudder Mode')
#axyaw.plot(t, true_alphaw, 'm', label='True Wind Angle')
axrudder = axyaw.twinx()
axrudder.plot(t, rudder, 'r', label='Rudder')
axrudder.plot(t, sail, 'm', label='Sail')
axrudder.plot(t, heel, 'c', label='Heel');
axrudder.plot(t, orig_heel, 'c--', label='Orig Heel');
axrudder.plot(t, leeway, 'y', label='Leeway Angle')
axrudder.plot(t, np.hypot(vx, vy) * 10, 'k', label='Boat Speed')
axrudder.plot(t, np.array(orig_speed) * 10, 'k--', label='Orig Boat Speed')
axrudder.set_ylim([-45, 45])
axyaw.legend(loc='upper left')
axrudder.legend(loc='upper right')
plt.title('Boat data while beam reaching and close hauled')
axyaw.set_ylabel('Heading and Apparent Wind (upwind = 0) (deg)')
axrudder.set_ylabel('Rudder, Heel, and Leeway (deg)\n Boat Speed (tenths of a meter / sec)')
axyaw.set_xlabel('Time (sec)')
plt.grid()
plt.figure()
axwind = plt.subplot(111, sharex=ax)
axwind.plot(t, true_wind_speed, 'r', label="True Wind Speed (m/s)")
axwind.plot(t, wind_speed, 'b', label="Apparent Wind Speed (m/s)")
axwinddir = axwind.twinx();
axwinddir.plot(t, true_alphaw, 'c', label="True Wind Dir (deg)")
axwind.legend(loc='upper left')
axwinddir.legend(loc='upper right')
plot_vec(data, 27, "Sail Force", ax)
plot_vec(data, 30, "Rudder Force", ax)
plot_vec(data, 33, "Keel Force", ax)
plot_vec(data, 36, "Hull Force", ax)
plot_vec(data, 39, "Net Force", ax)
plot_vec(data, 42, "Sail Torque", ax)
plot_vec(data, 45, "Rudder Torque", ax)
plot_vec(data, 48, "Keel Torque", ax)
plot_vec(data, 51, "Hull Torque", ax)
plot_vec(data, 54, "Righting Torque", ax)
plot_vec(data, 57, "Net Torque", ax)
ax.set_xlim([4000, 4500])
plt.show()
| 3.0625 | 3 |
models/spec_analyse.py | zaqwes8811/voicegen | 0 | 12791735 | <reponame>zaqwes8811/voicegen
#!/usr/bin/python
#-*- coding: utf-8 -*-
import wave as wv
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 5, 0.1);
y = np.sin(x)
plt.plot(x, y) | 2.125 | 2 |
mg/pyguitools/easy_settings.py | mgotz/PyGUITools | 0 | 12791736 | <reponame>mgotz/PyGUITools
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
easily editable settings: wrapper around formlayout
"""
from formlayout import fedit
class EasyEditSettings():
"""a class around formlayout to give easy to use settings
initalized with a list of tuples that specifiy the settings it can return
a dictionary with the settings to easily use in the application
"""
def __init__(self, settings):
""" initialize the advanced settings
Parameters
----------
setting : list of tuples
each entry in the list is a setting with its name as the first
element and current value as second like for formlayout from fedit
"""
self.settingsDict = {}
self.settingsList = settings
self.update_dict()
def update_dict(self):
for element in self.settingsList:
if type(element[1]) == list:
self.settingsDict[element[0]] = element[1][element[1][0]+1]
else:
self.settingsDict[element[0]] = element[1]
def get_settings(self):
return self.settingsDict
def change_settings(self, title="Edit advanced settings" ):
newSettings = fedit(self.settingsList, title=title)
if newSettings != None:
for i, newSetting in enumerate(newSettings):
if type(self.settingsList[i][1]) == list:
tempList = self.settingsList[i][1]
tempList[0] = newSetting
self.settingsList[i] = (self.settingsList[i][0],tempList)
else:
self.settingsList[i] = (self.settingsList[i][0],newSetting)
self.update_dict() | 2.953125 | 3 |
Chapter 6/Code/servo_minimum.py | professor-li/book-dow-iot-projects | 17 | 12791737 | <gh_stars>10-100
from gpiozero import Servo
servoPin=17
servoCorrection=0.5
maxPW=(2.0+servoCorrection)/1000
minPW=(1.0-servoCorrection)/1000
servo=Servo(servoPin, min_pulse_width=minPW, max_pulse_width=maxPW)
servo.min() | 2.4375 | 2 |
lang/py/cookbook/v2/source/cb2_20_6_sol_1.py | ch1huizong/learning | 0 | 12791738 | <reponame>ch1huizong/learning
import inspect
def wrapfunc(obj, name, processor, avoid_doublewrap=True):
""" patch obj.<name> so that calling it actually calls, instead,
processor(original_callable, *args, **kwargs)
"""
# get the callable at obj.<name>
call = getattr(obj, name)
# optionally avoid multiple identical wrappings
if avoid_doublewrap and getattr(call, 'processor', None) is processor:
return
# get underlying function (if any), and anyway def the wrapper closure
original_callable = getattr(call, 'im_func', call)
def wrappedfunc(*args, **kwargs):
return processor(original_callable, *args, **kwargs)
# set attributes, for future unwrapping and to avoid double-wrapping
wrappedfunc.original = call
wrappedfunc.processor = processor
# 2.4 only: wrappedfunc.__name__ = getattr(call, '__name__', name)
# rewrap staticmethod and classmethod specifically (iff obj is a class)
if inspect.isclass(obj):
if hasattr(call, 'im_self'):
if call.im_self:
wrappedfunc = classmethod(wrappedfunc)
else:
wrappedfunc = staticmethod(wrappedfunc)
# finally, install the wrapper closure as requested
setattr(obj, name, wrappedfunc)
def unwrapfunc(obj, name):
''' undo the effects of wrapfunc(obj, name, processor) '''
setattr(obj, name, getattr(obj, name).original)
| 3.265625 | 3 |
utils/utils.py | sarrouti/multi-class-text-classification-pytorch | 3 | 12791739 | <filename>utils/utils.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 9 18:56:39 2020
@author: sarroutim2
"""
import torch
import torchtext
import json
class Vocabulary (object):
SYM_PAD = '<pad>' # padding.
SYM_UNK = '<unk>' # Unknown word.
def __init__(self):
self.word2idx={}
self.idx2word={}
self.idx=0
self.add_word(self.SYM_PAD)
self.add_word(self.SYM_UNK)
def add_word (self, word):
if word not in self.word2idx:
self.word2idx [word] = self.idx
self.idx2word [self.idx] = word
self.idx += 1
def remove_word(self, word):
"""Removes a specified word and updates the total number of unique words.
Args:
word: String representation of the word.
"""
if word in self.word2idx:
self.word2idx.pop(word)
self.idx2word.pop(self.idx)
self.idx -= 1
def __call__(self, word):
if word not in self.word2idx:
return self.word2idx[self.SYM_UNK]
return self.word2idx[word]
def __len__(self):
return len(self.word2idx)
def save(self, location):
with open(location, 'w') as f:
json.dump({'word2idx': self.word2idx,
'idx2word': self.idx2word,
'idx': self.idx}, f)
def load(self, location):
with open(location, 'rb') as f:
data = json.load(f)
self.word2idx = data['word2idx']
self.idx2word = data['idx2word']
self.idx = data['idx']
def get_glove_embedding(name, embed_size, vocab):
"""Construct embedding tensor.
Args:
name (str): Which GloVe embedding to use.
embed_size (int): Dimensionality of embeddings.
vocab: Vocabulary to generate embeddings.
Returns:
embedding (vocab_size, embed_size): Tensor of
GloVe word embeddings.
"""
glove = torchtext.vocab.GloVe(name=name,
dim=str(embed_size))
vocab_size = len(vocab)
embedding = torch.zeros(vocab_size, embed_size)
for i in range(vocab_size):
embedding[i] = glove[vocab.idx2word[str(i)]]
return embedding
# ===========================================================
# Helpers.
# ===========================================================
def process_lengths(inputs, pad=0):
"""Calculates the lenght of all the sequences in inputs.
Args:
inputs: A batch of tensors containing the question or response
sequences.
Returns: A list of their lengths.
"""
max_length = inputs.size(1)
if inputs.size(0) == 1:
lengths = list(max_length - inputs.data.eq(pad).sum(1))
else:
lengths = list(max_length - inputs.data.eq(pad).sum(1).squeeze())
return lengths
| 3.09375 | 3 |
manage.py | larryTheGeek/ride_my_way_v2 | 0 | 12791740 | <gh_stars>0
import os
from app.app import create_app
from app.models.db import Db
environment = os.getenv('config_name')
app = create_app(environment)
#creates database tables if they don't exist
Db.create_tables()
if __name__ == '__main__':
app.run() | 2.171875 | 2 |
albackup/__main__.py | campenberger/albackup | 0 | 12791741 | <gh_stars>0
import argparse
import logging
import json
import sqlalchemy as sa
from .dump import Dump
from .restore import Restore
from . import Password
if __name__ == '__main__':
parser=argparse.ArgumentParser("python -m albackup")
parser.add_argument('mode',metavar='MODE',choices=('dump','restore','chg-password'), help="mode of operation (dump or restore,chg-password)")
parser.add_argument('--cfg','-c',dest='cfg_file',default='albackup.json', help="Configuration for dump or restore operation")
parser.add_argument('--meta-cache',default=None, help="Allow caching of database meta data")
parser.add_argument('--backup-dir',default='backup',help="Target directory for backups")
parser.add_argument('--debug','-d',action="store_true",default=False,help="Run in debug mode")
args=parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.debug else logging.INFO,
format="%(asctime)s:%(name)-20s:%(levelname)-7s:%(message)s" if args.debug else "%(asctime)s: %(message)s"
)
logging.getLogger('sqlalchemy.engine').setLevel(
logging.INFO if args.debug else logging.ERROR
)
logger=logging.getLogger()
cfg=None
with open(args.cfg_file,'r') as fh:
cfg=json.load(fh)
logger.info('Read configuration from %s',args.cfg_file)
if args.mode!='chg-password':
p=Password(args.cfg_file,cfg)
pw=p.password
logger.info('Database configuration:')
logger.info(' user : %s',cfg['db_user'])
logger.info(' password: %s','*'*len(pw))
logger.info(' server : %s',cfg['db_server'])
logger.info(' port : %d',cfg['db_port'])
logger.info(' db : %s',cfg['db_name'])
engine=sa.create_engine('mssql+pyodbc://{}:{}@{}:{}/{}?driver=FreeTDS&odbc_options="TDS_Version=8.0"'.format(
cfg['db_user'],
pw,
cfg['db_server'],
cfg['db_port'],
cfg['db_name']
),deprecate_large_types=True)
logger.info('SQLAlchemy engine created.')
if args.mode=='dump':
dump=Dump(args.backup_dir, args.meta_cache, engine, cfg['db_name'], cfg['db_server'])
dump.run()
logger.info('Dump finished')
elif args.mode=='restore':
if not cfg['allow_restore']:
raise Exception('Configuration file prohibits restore')
enable_ri_check=cfg['enable_ri_check']
restore=Restore(args.backup_dir,engine)
restore.run()
if enable_ri_check:
restore.changeRIChecks(off=False)
else:
logger.info('RI checks where left off')
logger.info('Restore finished')
elif args.mode=='chg-password':
pw=Password(args.cfg_file, cfg)
pw.change()
else:
argparse.error("Invalid program mode") | 2.3125 | 2 |
train/utils.py | mcclow12/chatbot | 0 | 12791742 | import random
def utils_min_required():
responses = [
"Sorry, I need your opinion on a movie "\
"before I can give you quality recommendations.",
"Sorry, I don't have enough information yet "\
"to make a good recommendation.",
"I can't give a good recommendation yet. Please "\
"tell me about some movies you've watched first.",
"It's gonna be hard for me to give you some good "\
"recommendations if I don't know anything about your tastes.",
"I don't think I'm ready to give a recommendation yet. "\
"How about you tell me about some movies you've watched?",
"Please tell me about some movies you watched first. "\
"Then I'll be able to give you some great recommendations"
]
return random.choice(responses)
def utils_quotations():
responses = [
"Hmm seems like you messed up your quotation marks. " \
"Try again.",
"Uh oh, I don't think your quotation marks are correct. ",
"It's hard for me to understand which movie you're talking about.",
"To help me understand, please put quotation marks around the " \
"movie like this \"The Wizard of Oz\"",
"It's hard for me to understand with your quotation marks.",
"Oops, seems like your quotation marks aren't quite right.",
"Please re-check your quotation marks. There should be two "\
"in your response surrounding the movie title.",
"I'm having trouble reading your sentence because of the "\
"quotation marks. Can you please try again? ",
]
return random.choice(responses)
def utils_new_movie():
responses = [
"Interesting, I haven't heard of that movie.",
"Hmm I haven't heard of that movie.",
"Wow that movie is new to me. I don't know much about it.",
"I've actually never heard of that movie before! Unfortunately "\
"that means \nI can't give you some good recommendations based "\
"on that one.",
"That movie is actually unfamiliar to me.",
"To be honest, I haven't seen that movie before, so it'll "\
"be hard to recommend you a movie based on that one."
]
return random.choice(responses)
def utils_liked():
responses1 = [
"Great, glad you liked that one.",
"Okay got it that was a good movie.",
"Nice, sounds like that movie was right up your alley."
"Wow so you like those kinds of movies. "\
"I think you'll like my recommendations.",
"Glad you liked the movie.",
"Sounds like you enjoyed that one.",
"Good, glad you enjoyed it.",
"Okay, got it, I think I have some other ones that you'll like as well.",
"Awesome, glad you liked it."
]
responses2 = [
" Now feel free to tell me about some more movies or say "\
"'Recommendations please!' to hear my recommendations. ",
" Any more movies you've seen? ",
" You're giving me some great feedback.",
" What other movies have you seen? ",
" Any other movies you've seen? ",
" Any more movie opinions I should know?",
" Anything else you want to tell me before I give my recommendations?"
]
response1 = random.choice(responses1)
response2 = ''
if random.uniform(0, 1) < 0.3:
response2 = random.choice(responses2)
return response1 + response2
def utils_disliked():
responses1 = [
"Okay got it you didn't like that one.",
"Gotcha so that wasn't the movie for you.",
"Okay you didn't like that one.",
"Yeah I've heard other people didn't like that one as well.",
"So you didn't like that one got it.",
"That really wasn't your movie huh.",
"That movie wasn't for you then. I'll keep that in mind.",
"Okay so you did not like that one.",
]
responses2 = [
" Now feel free to tell me about some more movies or say "\
"'Recommendations please!' to hear my recommendations. ",
" Any more movies you've seen? ",
" You're giving me some great feedback.",
" What other movies have you seen?",
" Any other movies you've seen?",
" Got any more hot takes?",
" Any more movie opinions I should know?",
" Anything else you want to tell me before I give my recommendations?"
]
response1 = random.choice(responses1)
response2 = ''
if random.uniform(0, 1) < 0.3:
response2 = random.choice(responses2)
return response1 + response2
def utils_more_opinions():
responses = [
" Now feel free to tell me about some more movies or say "\
"'Recommendations please!' to hear my recommendations.",
" Any more movies you've seen? ",
" You're giving me some great feedback.",
" What other movies have you seen?",
" Any other movies you've seen?",
" Got any more opinions on movies you've seen?",
" Any more movie opinions I should know?",
" Anything else you want to tell me before I give my recommendations?"
]
return random.choice(responses)
def utils_liked_match(match):
responses = [
f"Got it! So you liked {match}.",
f"Okay so {match} was your type of movie.",
f"Gotcha so {match} was a good fit for you.",
f"Okay got it you liked {match}.",
f"Sounds like {match} was right up your alley.",
f"Okay so your tastes align with {match}, got it."
]
return random.choice(responses)
def utils_disliked_match(match):
responses = [
f"Okay sounds like {match} wasn't the " \
"movie for you.",
f"Okay got it {match} wasn't your cup of tea.",
f"So you did not like {match}. Got it.",
f"Gotcha so you didn't like {match}.",
f"Okay so {match} was the movie you didn't like.",
f"{match} wasn't the movie for you then.",
f"Got it you didn't like {match}."
]
return random.choice(responses)
def utils_low_confidence():
responses = [
"Sorry, I couldn't tell if you liked that " \
"movie or not.",
"Sorry I'm not sure if you liked that one.",
"I can't quite tell what you think about that movie.",
"I'm not quite sure if you liked that movie or not.",
"Wait.. did you like or dislike that movie?",
"I think I need some more information to tell whether you "\
"liked that movie or not.",
"Hang on, I couldn't tell if you liked that movie or not."
]
return random.choice(responses)
| 3.328125 | 3 |
Phenotyping/Phenotyping.py | lsymuyu/Digital-Plant-Phenotyping-Platform | 10 | 12791743 | '''
The main function to conduct phenotyping experiments
11/09/2017
<NAME>
'''
import os
from adel import AdelR
from adel.geometric_elements import Leaves
from adel.AdelR import R_xydb, R_srdb, genGeoLeaf
import pandas as pd
from adel.plantgen import plantgen_interface
import numpy as np
from adel.astk_interface import AdelWheat
from adel.stand.Generate_canopy import get_exposed_areas
from adel.postprocessing import axis_statistics_simple, plot_statistics_simple, plot_statistics_simple_filter, axis_statistics_simple_filter
from adel.povray.povray_ind import povray_Green
from pyDOE import *
from scipy.stats import uniform
from openalea.core.path import path
from adel.ADEL_OPT.Adel_OPT_Ind import Adel_Leaves, Adel_development
import prosail
from adel.ADEL_OPT.Adel_OPT_Ind import plot_LAI
from adel.macro.povray_pixels_several_colors import set_color_metamers_organs
from adel.povray.FAPAR import Sampling_diagnal, Hemispherical_IM, Sampling_GF, Hemispherical_IM_Sun
from adel.povray.GF_RGB import Green_Fract, Pov_Scene
from adel.povray.Canray import duplicate_scene, Optical_canopy, Optical_soil, povray_RF
def Phenotyping_Wheat(Param, Ind, thermals, Canopy, Adel_output,
LAI = False,save_scene = False,
GF = False, GF_camera = [],
FAPAR = False, Sunlit_TT = [], Sunlit_Ang = [],
Multi_spectral = False, Ray_camera = [], Ray_light = []):
try:
# Adel parameters
development_parameters = Adel_development(N_phytomer_potential = float(Param['N_leaf']), a_cohort = float(Param['a_cohort']),
TT_hs_0 = float(Param['T_cohort']), TT_flag_ligulation = float(Param['TT_flag_ligulation']),
n0 = float(Param['n0']), n1 = float(Param['n1']), n2 = float(Param['n2']),
number_tillers = float(Param['number_tillers']),
Lamina_L1 = float(Param['Lamina_L1']), N2 = float(Param['N2']), incl1 = float(Param['incl1']),
incl2 = float(Param['incl2']), N_elg = float(Param['N_elg']), density = float(Param['Density']))
wheat_leaves = Adel_Leaves(incline = float(Param['incl_leaf']), dev_Az_Leaf = float(Param['dev_Az_Leaf']))
# canopy configuration
sim_width = float(Canopy['width']) # m, generate three rows
dup_length = float(Canopy['length'])
Row_spacing = float(Param['Row_spacing'])
run_adel_pars = {'senescence_leaf_shrink': 0.01, 'leafDuration': 2, 'fracLeaf': 0.2, 'stemDuration': 2. / 1.2,
'dHS_col': 0.2, 'dHS_en': 0, 'epsillon': 1e-6, 'HSstart_inclination_tiller': 1,
'rate_inclination_tiller': float(Param['rate_Tiller']), 'drop_empty': True}
# build the distribution pattern table to interpolate the density
Wheat_Adel = AdelWheat(density = float(Param['Density']), duplicate = 40, devT = development_parameters,
leaves = wheat_leaves, pattern='regular', run_adel_pars = run_adel_pars,
incT = float(Param['Deta_Incl_Tiller']), ibmM = float(Param['incl_main']),
depMin = float(Param['min_Tiller']), dep = float(Param['max_Tiller']),
inter_row = Row_spacing, width = sim_width, length = dup_length)
del development_parameters, wheat_leaves
domain = Wheat_Adel.domain
domain_area = Wheat_Adel.domain_area
nplants = Wheat_Adel.nplants
for TT in thermals:
Canopy_Adel = Wheat_Adel.setup_canopy(age=TT)
plantgl_scene = set_color_metamers_organs(Canopy_Adel)[0]
# Summary LAI
if LAI:
new_plot_df = plot_LAI(Canopy_Adel, TT, domain_area, nplants, Adel_output, Ind)
if 'plot_df' in locals():
plot_df = pd.concat([plot_df,new_plot_df])
else:
plot_df = new_plot_df
del Canopy_Adel
# Save geometry file
name_canopy = '%s%s%s%s.bgeom'%('Ind_',Ind,'_TT_',TT)
if save_scene:
plantgl_scene.save(Adel_output + '/' + name_canopy, 'BGEOM')
# Green fraction
if GF:
sampling_times = GF_camera['Times_sampling']
cameras = Sampling_GF(domain, sampling_times,
Azimuth = GF_camera['azimuth'], Zenith = GF_camera['zenith'],
Row_spacing = Row_spacing, fov = GF_camera['fov'])[0]
povfile_mesh, povfile_box, z_top = Pov_Scene(plantgl_scene, domain,
output_directory = Adel_output,
thermal = TT, Ind = Ind)
povfile_scene, new_df = Green_Fract(povfile_mesh, povfile_box,
thermal = TT, Ind = Ind, cameras = cameras,
image_height = GF_camera['image_height'], image_width = GF_camera['image_width'],
relative_height = GF_camera['distance'], z_top = 0,
output_directory = Adel_output)
if 'result_df' in locals():
result_df = pd.concat([result_df,new_df])
else:
result_df = new_df
# Fisheye for FAPAR
if FAPAR:
Azimuth_fisheye = [0]
Zenith_fisheye = [0]
sampling_times = 7
dup_width = 8.0
New_canopy, New_nplants, New_domain, New_area = duplicate_scene(plantgl_scene, nplants, canopy_width = dup_width,
canopy_length = dup_length, sim_width = sim_width,
Row_spacing = Row_spacing)
domain = New_domain
del plantgl_scene
cameras_fisheye = Sampling_diagnal(New_domain, sampling_times,
Azimuth_fisheye, Zenith_fisheye,
Row_spacing, fov_fisheye)[0]
povfile_mesh_new, povfile_box_new, z_top_new = Pov_Scene(New_canopy, New_domain,
output_directory = Adel_output,
thermal = TT, Ind = Ind)
del New_canopy
povray_image_fisheye = Hemispherical_IM(povfile_mesh = povfile_mesh_new, z_top = z_top_new,
cameras = cameras_fisheye,
image_height = 2000, image_width = 2000,
relative_height = relative_height,
output_directory = Adel_output)
if TT in Sunlit_TT:
for A_sun in Sunlit_Ang:
povray_image_fisheye = Hemispherical_IM_Sun(povfile_mesh = povfile_mesh_new, z_top = z_top_new,
cameras = cameras_fisheye, A_sun = A_sun,
image_height = 2000, image_width = 2000,
relative_height = relative_height,
output_directory = Adel_output)
# Simulate BRDF (need large scene)
if Multi_spectral:
# Setting of prosail
RT = prosail.prospect_5b(n = Param['N'], cab = Param['Cab'], car = Param['Car'],
cbrown = Param['Cbrown'], cw = Param['Cw'], cm = Param['Cm'])
Full_wave = range(400, 2501)
R = RT[:,0]
T = RT[:,1]
for wave in Ray_camera['Waves']:
Plant_optical = Optical_canopy(wave=wave, Full_wave=Full_wave, R=R, T=T)
soil_ref = Optical_soil(wave, brightness=Param['brightness'])
Output_file = povray_RF(Ray_light=Ray_light, Ray_camera=Ray_camera, Plant_optical=Plant_optical,
soil_ref=soil_ref, domain=domain, povfile_scene=povfile_mesh,
wave=wave, soil_type = Param['soil_type'],
dict=Adel_output)
if not os.path.exists(Output_file):
Output_file = povray_RF(Ray_light=Ray_light, Ray_camera=Ray_camera, Plant_optical=Plant_optical,
soil_ref=soil_ref, domain=domain, povfile_scene=povfile_mesh,
wave=wave,soil_type = Param['soil_type'], dict=Adel_output)
if 'plot_df' in locals():
result_plot_path = path(os.path.join(Adel_output, '%s%s%s'%('plot_LAI_',Ind,'.csv')))
plot_df.to_csv(result_plot_path, index=False)
if 'result_df' in locals():
result_df_path = path(os.path.join(Adel_output, '%s%s%s'%('Fraction_',Ind,'.csv')))
result_df.to_csv(result_df_path, index=False)
except TypeError:
print 'Pass it and move forward!!!***'
result_df_path = []
pass
return Adel_output
def Phenotyping_Wheat_TT(Param, Ind, TT, Adel_output,
Ray_light = [], Ray_camera = [], Zenith_GF = [],
FAPAR = True, GF = True, Multi_spectral = False,
save_scene = False):
try:
# Adel parameters
Row_spacing = float(Param['Row_spacing'])
sim_width = 1.0
dup_length = 12.0
development_parameters = Adel_development(N_phytomer_potential = float(Param['N_leaf']), a_cohort = float(Param['a_cohort']),
TT_hs_0 = float(Param['T_cohort']), TT_flag_ligulation = float(Param['TT_flag_ligulation']),
n0 = float(Param['n0']), n1 = float(Param['n1']), n2 = float(Param['n2']), number_tillers = float(Param['number_tillers']),
Lamina_L1 = float(Param['Lamina_L1']), N2 = float(Param['N2']), incl1 = float(Param['incl1']),
incl2 = float(Param['incl2']), N_elg = float(Param['N_elg']), density = float(Param['Density']))
wheat_leaves = Adel_Leaves(incline = float(Param['incl_leaf']), dev_Az_Leaf = float(Param['dev_Az_Leaf']))
run_adel_pars = {'senescence_leaf_shrink': 0.01, 'leafDuration': 2, 'fracLeaf': 0.2, 'stemDuration': 2. / 1.2,
'dHS_col': 0.2, 'dHS_en': 0, 'epsillon': 1e-6, 'HSstart_inclination_tiller': 1,
'rate_inclination_tiller': float(Param['rate_Tiller']), 'drop_empty': True}
# build the distribution pattern table to interpolate the density
Wheat_Adel = AdelWheat(density = float(Param['Density']), duplicate = 20, devT = development_parameters,
leaves = wheat_leaves, pattern='regular', run_adel_pars = run_adel_pars,
incT = float(Param['Deta_Incl_Tiller']), ibmM = float(Param['incl_main']),
depMin = float(Param['min_Tiller']), dep = float(Param['max_Tiller']),
inter_row = Row_spacing, width = sim_width, length = dup_length)
del Param, development_parameters, wheat_leaves
domain = Wheat_Adel.domain
domain_area = Wheat_Adel.domain_area
nplants = Wheat_Adel.nplants
Canopy_Adel = Wheat_Adel.setup_canopy(age=TT)
del Wheat_Adel
plantgl_scene = set_color_metamers_organs(Canopy_Adel)[0]
# Summary LAI
plot_df = plot_LAI(Canopy_Adel, TT, domain_area, nplants, Adel_output, Ind)
result_plot_path = path(os.path.join(Adel_output, '%s%s%s%s%s'%('plot_LAI_',Ind,'_TT_',TT,'.csv')))
plot_df.to_csv(result_plot_path, index=False)
del plot_df, Canopy_Adel
# Save geometry file
name_canopy = '%s%s%s%s.bgeom'%('Ind_',Ind,'_TT_',TT)
if save_scene:
plantgl_scene.save(Adel_output + '/' + name_canopy, 'BGEOM')
# Common setting
relative_height = 200 # camera above the canopy
# Green fraction
if GF:
Azimuth = [0]
fov = [10]
sampling_times = 4
cameras = Sampling_GF(domain, sampling_times,
Azimuth, Zenith_GF,
Row_spacing, fov)[0]
povfile_mesh, povfile_box, z_top = Pov_Scene(plantgl_scene, domain,
output_directory = Adel_output,
thermal = TT, Ind = Ind)
povfile_scene, result_df = Green_Fract(povfile_mesh, povfile_box,
thermal = TT, Ind = Ind, cameras = cameras,
image_height = 1000, image_width = 1000,
relative_height = relative_height, z_top = z_top,
output_directory = Adel_output)
result_df_path = path(os.path.join(Adel_output, '%s%s%s%s%s'%('Fraction_',Ind,'_TT_',TT,'.csv')))
result_df.to_csv(result_df_path, index=False)
# Fisheye for FAPAR
if FAPAR:
Azimuth_fisheye = [0]
Zenith_fisheye = [0]
fov_fisheye = [120]
dup_width = 12.0
sampling_times = 7
New_canopy, New_nplants, New_domain, New_area = duplicate_scene(plantgl_scene, nplants, canopy_width = dup_width,
canopy_length = dup_length, sim_width = sim_width,
Row_spacing = Row_spacing)
del plantgl_scene
cameras_fisheye = Sampling_diagnal(New_domain, sampling_times,
Azimuth_fisheye, Zenith_fisheye,
Row_spacing, fov_fisheye)[0]
povfile_mesh_new, povfile_box_new, z_top_new = Pov_Scene(New_canopy, New_domain,
output_directory = Adel_output,
thermal = TT, Ind = Ind)
del New_canopy
povray_image_fisheye = Hemispherical_IM(povfile_mesh = povfile_mesh_new, z_top = z_top_new,
cameras = cameras_fisheye,
image_height = 2000, image_width = 2000,
relative_height = relative_height,
output_directory = Adel_output)
# Simulate BRDF (need large scene)
if Multi_spectral:
# Setting of prosail
RT = prosail.prospect_5b(n = Param['N'], cab = Param['Cab'], car = Param['Car'],
cbrown = Param['Cbrown'], cw = Param['Cw'], cm = Param['Cm'])
Full_wave = range(400, 2501)
R = RT[:,0]
T = RT[:,1]
for wave in Waves_camera:
Plant_optical = Optical_canopy(wave=wave, Full_wave=Full_wave, R=R, T=T)
soil_ref = Optical_soil(wave, brightness=Param['brightness'])
Output_file = povray_RF(Ray_light=Ray_light, Ray_camera=Ray_camera, Plant_optical=Plant_optical,
soil_ref=soil_ref, domain=New_domain, povfile_scene=povfile_scene,
wave=wave,
dict=Adel_output)
if not os.path.exists(Output_file):
Output_file = povray_RF(Ray_light=Ray_light, Ray_camera=Ray_camera, Plant_optical=Plant_optical,
soil_ref=soil_ref, domain=New_domain, povfile_scene=povfile_scene,
wave=wave,
dict=Adel_output)
except TypeError:
print 'Pass it and move forward!!!***'
result_df_path = []
pass
return Adel_output | 2.078125 | 2 |
cmds/moderations.py | james10949/sijingprogram | 0 | 12791744 | <reponame>james10949/sijingprogram<gh_stars>0
import discord
from discord.ext import commands
from core.classes import Cog_Extension
class Moderations(Cog_Extension):
@commands.command()
async def clean(self, ctx, num : int):
await ctx.channel.purge(limit = num+1)
def setup(bot):
bot.add_cog(Moderations(bot)) | 2.25 | 2 |
padinfo/view_state/otherinfo.py | chasehult/padbot-cogs | 0 | 12791745 | <reponame>chasehult/padbot-cogs<gh_stars>0
from padinfo.pane_names import IdMenuPaneNames
from padinfo.view_state.base_id import ViewStateBaseId
class OtherInfoViewState(ViewStateBaseId):
def serialize(self):
ret = super().serialize()
ret.update({
'pane_type': IdMenuPaneNames.otherinfo,
})
return ret
| 2.046875 | 2 |
tests/test_draft.py | edsn60/tensorbay-python-sdk | 0 | 12791746 | <reponame>edsn60/tensorbay-python-sdk
#!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
import pytest
from tensorbay.client import GAS
from tensorbay.client.gas import DEFAULT_BRANCH
from tensorbay.client.struct import Draft
from tensorbay.exception import ResourceNotExistError, ResponseError, StatusError
from .utility import get_dataset_name, get_draft_number_by_title
class TestDraft:
def test_create_draft(self, accesskey, url):
gas_client = GAS(access_key=accesskey, url=url)
dataset_name = get_dataset_name()
dataset_client = gas_client.create_dataset(dataset_name)
draft_number_1 = dataset_client.create_draft("draft-1", "description")
assert draft_number_1 == 1
assert dataset_client.status.is_draft
assert dataset_client.status.draft_number == draft_number_1
assert dataset_client.status.commit_id is None
with pytest.raises(StatusError):
dataset_client.create_draft("draft-2")
draft_number = get_draft_number_by_title(dataset_client.list_drafts(), "draft-1")
assert draft_number_1 == draft_number
gas_client.delete_dataset(dataset_name)
def test_list_drafts(self, accesskey, url):
gas_client = GAS(access_key=accesskey, url=url)
dataset_name = get_dataset_name()
dataset_client = gas_client.create_dataset(dataset_name)
dataset_client.create_draft("draft-1", "description for draft 1")
dataset_client.commit("commit-draft-1")
draft_number_2 = dataset_client.create_draft("draft-2", "description for draft 2")
# After committing, the draft will be deleted
with pytest.raises(TypeError):
get_draft_number_by_title(dataset_client.list_drafts(), "draft-1")
drafts = dataset_client.list_drafts()
assert len(drafts) == 1
assert drafts[0] == Draft(
draft_number_2, "draft-2", DEFAULT_BRANCH, "OPEN", "description for draft 2"
)
with pytest.raises(TypeError):
get_draft_number_by_title(dataset_client.list_drafts(), "draft-3")
gas_client.delete_dataset(dataset_name)
def test_commit_draft(self, accesskey, url):
gas_client = GAS(access_key=accesskey, url=url)
dataset_name = get_dataset_name()
dataset_client = gas_client.create_dataset(dataset_name)
dataset_client.create_draft("draft-1")
dataset_client.commit("commit-1")
dataset_client.create_draft("draft-2")
dataset_client.commit("commit-2", tag="V1")
dataset_client.create_draft("draft-3")
with pytest.raises(ResponseError):
dataset_client.commit("commit-3", tag="V1")
dataset_client.commit("commit-3", tag="V2")
assert not dataset_client.status.is_draft
assert dataset_client.status.draft_number is None
assert dataset_client.status.commit_id is not None
# After committing, the draft will be deleted
with pytest.raises(TypeError):
get_draft_number_by_title(dataset_client.list_drafts(), "draft-3")
gas_client.delete_dataset(dataset_name)
def test_update_draft(self, accesskey, url):
gas_client = GAS(access_key=accesskey, url=url)
dataset_name = get_dataset_name()
dataset_client = gas_client.create_dataset(dataset_name)
dataset_client.create_draft("draft-1")
dataset_client.commit("commit-1", "test", tag="V1")
dataset_client.create_draft("draft-2")
dataset_client.checkout("V1")
dataset_client.create_branch("T123")
dataset_client.create_draft("draft-3", "description00")
dataset_client.update_draft(title="draft-4", description="description01")
draft = dataset_client.get_draft(3)
assert draft.title == "draft-4"
assert draft.description == "description01"
dataset_client.update_draft(2, title="draft-4", description="description02")
draft = dataset_client.get_draft(2)
assert draft.title == "draft-4"
assert draft.description == "description02"
gas_client.delete_dataset(dataset_name)
def test_close_draft(self, accesskey, url):
gas_client = GAS(access_key=accesskey, url=url)
dataset_name = get_dataset_name()
dataset_client = gas_client.create_dataset(dataset_name)
dataset_client.create_draft("draft-1")
dataset_client.commit("commit-1", "test", tag="V1")
dataset_client.create_draft("draft-2")
dataset_client.checkout("V1")
dataset_client.create_branch("T123")
dataset_client.create_draft("draft-3")
dataset_client.close_draft()
with pytest.raises(ResourceNotExistError):
dataset_client.get_draft(3)
dataset_client.close_draft(2)
with pytest.raises(ResourceNotExistError):
dataset_client.get_draft(2)
gas_client.delete_dataset(dataset_name)
| 1.875 | 2 |
ObjDetector_CV.py | JunHong-1998/OpenCV-Scikit-ObjectSizeDetector- | 3 | 12791747 | import cv2
import math
import imutils
import numpy as np
import warnings
from sklearn.cluster import KMeans
from skimage.morphology import *
from skimage.util import *
class OD_CV:
def loadImage(self, filepath):
return cv2.imread(filepath)
def resizeImage(self, image, kar, width, height):
if kar:
return imutils.resize(image, width=width)
else:
return cv2.resize(image, (width, height))
def maskIMG(self, image, pts):
mask = np.zeros(image.shape[:2], np.uint8)
mask = cv2.drawContours(mask, [pts], -1, (255,255,255), -1)
image = cv2.bitwise_and(image.copy(), image.copy(), mask=mask)
return image
def cropIMG(self, image, coords):
return image[coords[1]:coords[1]+coords[3], coords[0]:coords[0]+coords[2]]
def dmntCOLOR(self, image):
image = cv2.resize(image, (0, 0), None, 0.5, 0.5)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clt = KMeans(n_clusters=5, random_state=0).fit(image.reshape(-1, 3))
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
hist, _ = np.histogram(clt.labels_, bins=numLabels)
# normalize the histogram, such that it sums to one
hist = hist.astype("float")
hist /= hist.sum()
palette = np.zeros((40, 200, 3), dtype="uint8")
startX = 0
# loop over the percentage of each cluster and the color of
# each cluster
for percent, color in zip(hist, clt.cluster_centers_):
# plot the relative percentage of each cluster
endX = startX + (percent * 200)
cv2.rectangle(palette, (int(startX), 0), (int(endX), 40), color.astype("uint8").tolist(), -1)
startX = endX
return palette
def thinning(self, image, flag):
image = img_as_float(image)
if flag: #live streaming, faster computation
skeleton = skeletonize(image > 0)
else: # upload image mode
skeleton = skeletonize(image > 0, method='lee')
return img_as_ubyte(skeleton)
def thresholding(self, image, auto, lower, max):
if auto:
_, image = cv2.threshold(image.copy(), 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
else:
_, image = cv2.threshold(image.copy(), lower, max, cv2.THRESH_BINARY)
return image
def color_CVT(self, image, flag):
if flag==1:
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
elif flag==2:
return cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
def compareIMG(self, image):
h,w = image[0].shape[:2]
bg = np.zeros((h*2+3, w*2+3, 3), np.uint8)
bg[0:h, 0:w] = image[0]
bg[0:h, w+3:w*2+3] = image[1]
bg[h+3:h*2+3, 0:w] = image[2]
bg[h+3:h*2+3, w+3:w*2+3] = image[3]
bg[0:h*2+3, w:w+3] = (255,255,255)
bg[0:h * 2 + 3, w+1:w + 2] = (0,0,0)
bg[h:h+3, 0:w*2+3] = (255,255,255)
bg[h+1:h + 2, 0:w * 2 + 3] = (0,0,0)
return bg
def Color_picker(self, color, size, wid=(10,20)):
image = np.zeros((size[0], size[1], 3), np.uint8)
image[:] = color
if wid[0]>0:
cv2.rectangle(image, (int(size[0]*.01), int(size[1]*.01)), (int(size[0]*.99), int(size[1]*.99)), (0,0,0), wid[0], cv2.LINE_AA)
if wid[1]>0:
cv2.rectangle(image, (int(size[0]*.1), int(size[1]*.1)), (int(size[0]*.9), int(size[1]*.9)), (255,255,255), wid[1], cv2.LINE_AA)
return image
def drawPrimitives(self, image, flag, points, color, thick, width=None, height=None):
if flag==1:
cv2.polylines(image, points, True, color, thick)
elif flag==2:
cv2.rectangle(image, (points[0]-10, points[1]-10), (points[0]+points[2]+10, points[1]+points[3]+10), color, thick)
elif flag==3:
x, y, w, h = points
width_Total = x+int(w*0.05)+width
if width_Total>x+w+10:
width_Total = x+w+10
cv2.rectangle(image, (x+int(w*0.05),y-10-height), (width_Total, y-10-2), color, thick)
elif flag == 4:
x, y, w, h = points
if width!=0:
w = width
cv2.rectangle(image, (x-10,y+10+h), (x+10+w, y+10+h+height), color, thick)
def drawText(self, flag, image, text, coords, fontstyle, color, thick, height=None):
font = None
if fontstyle == 0:
font = cv2.FONT_HERSHEY_COMPLEX
elif fontstyle == 1:
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
elif fontstyle == 2:
font = cv2.FONT_HERSHEY_DUPLEX
elif fontstyle == 3:
font = cv2.FONT_HERSHEY_PLAIN
elif fontstyle == 4:
font = cv2.FONT_HERSHEY_SCRIPT_COMPLEX
elif fontstyle == 5:
font = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX
elif fontstyle == 6:
font = cv2.FONT_HERSHEY_TRIPLEX
elif fontstyle == 7:
font = cv2.FONT_ITALIC
x, y, w, h = coords
if flag==1:
cv2.putText(image, text, (x+int(w*0.07),y-19), font, thick, color, 1)
elif flag==2:
cv2.putText(image, text, (x-10,y+10+h+height-5), font, thick, color, 1)
def canny(self, image, GK_size, GSigma, DK_size, D_i, EK_size, E_i, cAuto, cThres_L, cThres_H, isDIL, isERO, isThin=None):
imgGray = self.color_CVT(image.copy(), 1)
image = cv2.GaussianBlur(imgGray, (GK_size, GK_size), GSigma)
if cAuto:
sigma = 0.33
v = np.median(image.copy())
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
else:
lower, upper = cThres_L, cThres_H
image = cv2.Canny(image, lower, upper)
if isThin:
image = self.thinning(image)
edge = image.copy()
if isDIL:
Dial_K = np.ones((DK_size, DK_size))
image = cv2.dilate(image, Dial_K, iterations=D_i)
if isERO:
Ero_K = np.ones((EK_size, EK_size))
image = cv2.erode(image, Ero_K, iterations=E_i)
return image, edge
def sobel(self, image, GK_size, GSigma, DK_size, D_i, EK_size, E_i, Ksize, isDIL, isERO, isThin, Thres_auto, Thres_L, Thres_H, isThres, live_flag):
imgGray = self.color_CVT(image.copy(), 1)
imgBlur = cv2.GaussianBlur(imgGray, (GK_size, GK_size), GSigma)
Sobel_X = cv2.Sobel(imgBlur.copy(), cv2.CV_64F, 1, 0, ksize=Ksize)
Sobel_Y = cv2.Sobel(imgBlur.copy(), cv2.CV_64F, 0, 1, ksize=Ksize)
sobel_img = cv2.bitwise_or(cv2.convertScaleAbs(Sobel_X), cv2.convertScaleAbs(Sobel_Y))
if isThres:
sobel_img = self.thresholding(sobel_img.copy(), Thres_auto, Thres_L, Thres_H)
if isThin:
sobel_img = self.thinning(sobel_img, live_flag)
image = sobel_img
edge = image.copy()
if isDIL:
Dial_K = np.ones((DK_size, DK_size))
image = cv2.dilate(image, Dial_K, iterations=D_i)
if isERO:
Ero_K = np.ones((EK_size, EK_size))
image = cv2.erode(image, Ero_K, iterations=E_i)
return image, edge
def prewitt(self, image, GK_size, GSigma, DK_size, D_i, EK_size, E_i, isDIL, isERO, isThin, Thres_auto, Thres_L, Thres_H, isThres, live_flag):
imgGray = self.color_CVT(image.copy(), 1)
imgBlur = cv2.GaussianBlur(imgGray, (GK_size, GK_size), GSigma)
kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
kernelx2 = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])
kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
kernely2 = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])
kernels = [kernelx, kernelx2, kernely, kernely2]
prewitt_img = np.zeros_like(imgGray)
for k in kernels:
prewitt_img = cv2.bitwise_or(prewitt_img, cv2.filter2D(imgBlur.copy(), -1, k))
if isThres:
prewitt_img = self.thresholding(prewitt_img.copy(), Thres_auto, Thres_L, Thres_H)
if isThin:
prewitt_img = self.thinning(prewitt_img, live_flag)
image = prewitt_img
edge = image.copy()
if isDIL:
Dial_K = np.ones((DK_size, DK_size))
image = cv2.dilate(image, Dial_K, iterations=D_i)
if isERO:
Ero_K = np.ones((EK_size, EK_size))
image = cv2.erode(image, Ero_K, iterations=E_i)
return image, edge
def getTarget_Contour(self, image, image_edg, minArea, shapes, circular, color, thick):
contours, _ = cv2.findContours(image_edg.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
finalCountours = []
for c in contours:
for i, shape in enumerate(shapes):
if not shape:
continue
area = cv2.contourArea(c)
if area > minArea[i]:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
bbox = cv2.boundingRect(approx)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
rbox = np.int0(box)
if i==0 and len(approx) == 3: #Shape >>> vertices
finalCountours.append((approx, bbox, c, i, rbox))
elif i==1 and len(approx) == 4:
finalCountours.append((approx, bbox, c, i, rbox))
elif i==2:
if len(approx) < 8:
continue
circularity = 4 * math.pi * (area / (peri*peri))
if circular[0] < circularity < circular[1]:
finalCountours.append((approx, bbox, c, i, rbox))
elif i==3:
finalCountours.append((approx, bbox, c, i, rbox))
finalCountours = sorted(finalCountours, key=lambda x:x[1], reverse=True)
if thick==0:
thick = -1
for cont in finalCountours:
cv2.drawContours(image, [cont[2]], -1, color, thick)
return finalCountours, image
def reorder(self, points):
NewPoints = np.zeros_like(points)
points = points.reshape((4,2))
add = points.sum(1)
NewPoints[0] = points[np.argmin(add)]
NewPoints[2] = points[np.argmax(add)]
d_dx = np.diff(points, axis=1)
NewPoints[1] = points[np.argmin(d_dx)]
NewPoints[3] = points[np.argmax(d_dx)]
return NewPoints
def warpImg(self, image, points, size, pad=3):
points = self.reorder(points)
# if not size:
w, h = points[1][0][0] - points[0][0][0], points[3][0][1]-points[0][0][1]
sw,sh = w/size[0], h/size[1]
# w,h = size
pts1 = np.float32(points)
pts2 = np.float32([[0,0], [w,0], [w,h], [0,h]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgWarp = cv2.warpPerspective(image, matrix, (w,h))
imgWarp = imgWarp[pad:imgWarp.shape[0]-pad, pad:imgWarp.shape[1]-pad] #remove boundary
return imgWarp, (sw,sh)
def findDist(self, flag, pts, scale, unit, deci):
unit_conv = 1
if unit[0]==0:
unit_conv = 1
elif unit[0]==1:
unit_conv = 10
elif unit[0]==2:
unit_conv = 1000
if unit[1]==0:
unit_conv /= 1
elif unit[1]==1:
unit_conv /= 10
elif unit[1]==2:
unit_conv /= 1000
def dist(pt1, pt2):
return ((pt2[0] // scale[0] - pt1[0] // scale[0]) ** 2 + (pt2[1] // scale[1] - pt1[1] // scale[1]) ** 2) ** 0.5
# if flag==1: # rect
pts = self.reorder(pts)
if flag==1: #rect
p1, p2, p3 = pts[0][0], pts[1][0], pts[3][0]
else:
p1, p2, p3 = pts[0], pts[1], pts[3]
if p1[1]==p2[1]:
newW = (p2[0]-p1[0])//scale[0]
else:
newW = dist(p1, p2)
if p1[0]==p3[0]:
newH = (p3[1]-p1[1])//scale[1]
else:
newH = dist(p1, p3)
newW = newW*unit_conv
newH = newH*unit_conv
return "{:.{}f}".format(newW, deci), "{:.{}f}".format(newH, deci)
def deviceList(self):
index = 0
arr, res = [], []
while True:
cap = cv2.VideoCapture(index)
if not cap.read()[0]:
break
else:
arr.append(str(index))
res.append((cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
cap.release()
index += 1
return arr, res | 2.5625 | 3 |
src/vimpdb/proxy.py | dtrckd/vimpdb | 110 | 12791748 | import os
import socket
import subprocess
from vimpdb import config
from vimpdb import errors
def get_eggs_paths():
import vim_bridge
vimpdb_path = config.get_package_path(errors.ReturnCodeError())
vim_bridge_path = config.get_package_path(vim_bridge.bridged)
return (
os.path.dirname(vimpdb_path),
os.path.dirname(vim_bridge_path),
)
class Communicator(object):
def __init__(self, script, server_name):
self.script = script
self.server_name = server_name
def prepare_subprocess(self, *args):
parts = self.script.split()
parts.extend(args)
return parts
def _remote_expr(self, expr):
parts = self.prepare_subprocess('--servername',
self.server_name, "--remote-expr", expr)
p = subprocess.Popen(parts, stdout=subprocess.PIPE)
return_code = p.wait()
if return_code:
raise errors.RemoteUnavailable()
child_stdout = p.stdout
output = child_stdout.read()
return output.strip()
def _send(self, command):
# add ':<BS>' to hide last keys sent in VIM command-line
command = ''.join((command, ':<BS>'))
parts = self.prepare_subprocess('--servername',
self.server_name, "--remote-send", command)
return_code = subprocess.call(parts)
if return_code:
raise errors.RemoteUnavailable()
class ProxyToVim(object):
"""
use subprocess to launch Vim instance that use clientserver mode
to communicate with Vim instance used for debugging.
"""
def __init__(self, communicator):
self.communicator = communicator
def _send(self, command):
self.communicator._send(command)
config.logger.debug("sent: %s" % command)
def _remote_expr(self, expr):
return self.communicator._remote_expr(expr)
def setupRemote(self):
if not self.isRemoteSetup():
# source vimpdb.vim
proxy_package_path = config.get_package_path(self)
filename = os.path.join(proxy_package_path, "vimpdb.vim")
command = "<C-\><C-N>:source %s<CR>" % filename
self._send(command)
for egg_path in get_eggs_paths():
self._send(':call PDB_setup_egg(%s)<CR>' % repr(egg_path))
self._send(':call PDB_init_controller()')
def isRemoteSetup(self):
status = self._expr("exists('*PDB_setup_egg')")
return status == '1'
def showFeedback(self, feedback):
if not feedback:
return
feedback_list = feedback.splitlines()
self.setupRemote()
self._send(':call PDB_show_feedback(%s)<CR>' % repr(feedback_list))
def displayLocals(self, feedback):
if not feedback:
return
feedback_list = feedback.splitlines()
self.setupRemote()
self._send(':call PDB_reset_watch()<CR>')
for line in feedback_list:
self._send(':call PDB_append_watch([%s])<CR>' % repr(line))
def showFileAtLine(self, filename, lineno):
if os.path.exists(filename):
self._showFileAtLine(filename, lineno)
def _showFileAtLine(self, filename, lineno):
# Windows compatibility:
# Windows command-line does not play well with backslash in filename.
# So turn backslash to slash; Vim knows how to translate them back.
filename = filename.replace('\\', '/')
self.setupRemote()
self._send(':call PDB_show_file_at_line("%s", "%d")<CR>'
% (filename, lineno))
def _expr(self, expr):
config.logger.debug("expr: %s" % expr)
result = self._remote_expr(expr)
config.logger.debug("result: %s" % result)
return result
# code leftover from hacking
# def getText(self, prompt):
# self.setupRemote()
# command = self._expr('PDB_get_command("%s")' % prompt)
# return command
class ProxyFromVim(object):
BUFLEN = 512
socket_factory = socket.socket
def __init__(self, port):
self.socket_inactive = True
self.port = port
def bindSocket(self):
if self.socket_inactive:
self.socket = self.socket_factory(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('', self.port))
self.socket_inactive = False
def closeSocket(self):
if not self.socket_inactive:
self.socket.close()
self.socket_inactive = True
def waitFor(self, pdb):
self.bindSocket()
(message, address) = self.socket.recvfrom(self.BUFLEN)
config.logger.debug("command: %s" % message)
return message
# code leftover from hacking
# def eat_stdin(self):
# sys.stdout.write('-- Type Ctrl-D to continue --\n')
# sys.stdout.flush()
# sys.stdin.readlines()
| 2.359375 | 2 |
drf_file_management/urls.py | FJLendinez/drf-file-management | 0 | 12791749 | from django.urls import path, include
from rest_framework import routers
from drf_file_management.views import FileAPIView
router = routers.SimpleRouter()
router.register(r'file', FileAPIView)
app_name = 'drf_file_management'
urlpatterns = router.urls
| 1.625 | 2 |
alembic/versions/0aedc36acb3f_upgrade_to_2_0_0.py | goodtiding5/flask-track-usage | 46 | 12791750 | <gh_stars>10-100
"""Upgrade to 2.0.0
Revision ID: <KEY>
Revises: 0<PASSWORD>
Create Date: 2018-04-25 09:39:38.879327
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '0<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('flask_usage', sa.Column('track_var', sa.String(128), nullable=True))
op.add_column('flask_usage', sa.Column('username', sa.String(128), nullable=True))
def downgrade():
op.drop_column('flask_usage', 'track_var')
op.drop_column('flask_usage', 'username')
| 1.28125 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.