seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
7065948190
|
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMessageBox
class MyWindow(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.myButton = QtWidgets.QPushButton(self)
self.myButton.clicked.connect(self.msg)
self.msg()
def msg(self):
reply = QMessageBox.information(self, # 使用infomation信息框
"标题",
"消息",
QMessageBox.Yes | QMessageBox.No)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
myshow = MyWindow()
myshow.show()
sys.exit(app.exec_())
|
kRayvison/Pycharm_python36
|
k_test/temp_test.py
|
temp_test.py
|
py
| 727 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40880620153
|
from flask import Flask, request, jsonify
import requests
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, StringType, BooleanType
import threading
import logging
import time
app = Flask(__name__)
# Create a SparkSession (Singleton)
spark = SparkSession.builder.appName("APIDataProcessing").getOrCreate()
# Define the schema for the DataFrame
schema = StructType([
StructField("API", StringType(), True),
StructField("Description", StringType(), True),
StructField("Category", StringType(), True),
StructField("Auth", StringType(), True),
StructField("HTTPS", BooleanType(), True),
StructField("Cors", StringType(), True),
StructField("Link", StringType(), True),
])
# Function to fetch and filter data and create a Spark DataFrame
def fetch_and_filter_data(api_url):
try:
response = requests.get(api_url)
response.raise_for_status()
data = response.json()['entries']
# Filter the data to include only HTTPS links
filtered_data = [entry for entry in data if entry['HTTPS']]
# Create a Spark DataFrame from the filtered data
df = spark.createDataFrame(filtered_data, schema=schema)
return df
except requests.exceptions.RequestException as e:
logging.error(f"Error fetching data from the API: {e}")
return None
# Function to periodically fetch and save data
def periodic_data_fetch_and_save(api_url, interval_seconds, data_path):
while True:
df = fetch_and_filter_data(api_url)
if df:
# Save the DataFrame as a partitioned Parquet file
df.write.partitionBy("Category").parquet(data_path, mode="append")
logging.info(f"Data saved at {time.ctime()}")
time.sleep(interval_seconds)
# Start a background thread to periodically fetch and save data
api_url = "https://api.publicapis.org/entries"
data_path = "api_data.parquet"
fetch_thread = threading.Thread(target=periodic_data_fetch_and_save, args=(api_url, 12*60*60, data_path)) # Fetch every 12 hours
fetch_thread.daemon = True
fetch_thread.start()
# Endpoint for /categories
@app.route('/categories', methods=['GET'])
def get_categories():
# Load categories from the saved data file
df = spark.read.parquet(data_path)
categories = df.select("Category").distinct().rdd.flatMap(lambda x: x).collect()
# Return categories as JSON
return jsonify(categories)
# Endpoint for /data/<category>
@app.route('/data/<category>', methods=['GET'])
def get_data_by_category(category):
# Load data from the saved data file
df = spark.read.parquet(data_path)
# Get the optional search query from the request
search_query = request.args.get('search_query')
# Filter data by category
filtered_df = df.filter(df["Category"] == category)
# Apply search filter if search_query is provided
if search_query:
search_query = search_query.lower()
filtered_df = filtered_df.filter(
(df["API"].rlike(search_query)) | (df["Description"].rlike(search_query))
)
# Convert the filtered DataFrame to a list of dictionaries
filtered_data = filtered_df.rdd.map(lambda row: row.asDict()).collect()
# Return filtered data as JSON
return jsonify(filtered_data)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
app.run(debug=True)
|
DennisVW0/DE_TASK
|
app.py
|
app.py
|
py
| 3,411 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38453053872
|
import sys
input = sys.stdin.readline
n,m,r = map(int,input().split())
items = [0] + list(map(int,input().split()))
graph = [[int(5e6)]*(n+1) for _ in range(n+1)]
for _ in range(r):
s,e,w = map(int,input().split())
graph[s][e] = w
graph[e][s] = w
for i in range(1,n+1):
graph[i][i] = 0
def floyd():
for k in range(1,n+1):
for i in range(1,n+1):
for j in range(1,n+1):
graph[i][j] = min(graph[i][j], graph[i][k]+graph[k][j])
floyd()
maxFarm = 0
for zone in graph[1:]:
farm = 0
for i,x in enumerate(zone):
if x <= m:
farm += items[i]
maxFarm = max(maxFarm, farm)
print(maxFarm)
|
LightPotato99/baekjoon
|
graph/floyd/pubg.py
|
pubg.py
|
py
| 670 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16930073280
|
import shutil
import difflib
import filecmp
import logging
from prettyconf import config
BOOTSTRAP_TESTS = config("BOOTSTRAP_TESTS", default=False)
LOG = logging.getLogger(__name__)
def compare_files(*, got, expected):
"""Compares the contents of a test file against expected
Args:
got: file with temp data
expected: file with expected data
Note: setting the environment variable BOOTSTRAP_TESTS=1
will overwrite the contents of the "expected" file with the
"got" file (ie bootstrapping the test files).
"""
if BOOTSTRAP_TESTS:
LOG.warning("BOOTSTRAP_TESTS: copying '%s' to '%s'", got, expected)
shutil.copy(got, expected)
got = f"{got}"
expected = f"{expected}"
are_files_identical = filecmp.cmp(got, expected)
if not are_files_identical:
diff_result = list(
difflib.unified_diff(
open(got, "r").readlines(), open(expected, "r").readlines()
)
)
LOG.warning(f"Difference between got ({got}) and expected ({expected}) ...")
for diff_line in diff_result:
LOG.warning(diff_line.strip())
return are_files_identical
|
huiwenke/3d-beacons-client
|
tests/tests_cli/utils.py
|
utils.py
|
py
| 1,186 |
python
|
en
|
code
| null |
github-code
|
6
|
71578318267
|
import torch.nn as nn
from utils.config import config
import torch
import numpy as np
from model.bbox_tools import *
import torch.nn.functional as F
from torchvision.ops import nms
def nograd(f):
def new_f(*args, **kwargs):
with torch.no_grad():
return f(*args, **kwargs)
return new_f
class FasterRCNN(nn.Module):
def __init__(self,
extractor,
rpn,
head,
offset_normalize_mean=(0., 0., 0., 0.),
offset_normalize_std=(0.1, 0.1, 0.2, 0.2),
):
super(FasterRCNN, self).__init__()
self.extractor = extractor
self.rpn = rpn
self.head = head
self.offset_normalize_mean = offset_normalize_mean
self.offset_normalize_std = offset_normalize_std
@property
def n_class(self):
return self.head.n_class
def forward(self, x, scale):
img_size = x.shape[2:]
x = self.extractor(x)
rois, rois_index, _, _, _ = self.rpn(
x=x, img_size=img_size, scale=scale)
roi_bbox_pred, roi_cls_scores = self.head(
x=x, rois=rois, rois_index=rois_index)
return rois, roi_bbox_pred, roi_cls_scores
@nograd
def predict(self, img, scale=1., eval=True):
if eval:
self.nms_thresh = 0.3
self.score_thresh = 0.05
# self.score_thresh = 0.65
else:
self.nms_thresh = 0.3
self.score_thresh = 0.7
_, _, H, W = img.shape
img_size = (H, W)
device = img.device
self.eval()
# with torch.no_grad():
roi, roi_bbox_pred, roi_cls_scores = self(img, scale=scale)
mean = torch.Tensor(self.offset_normalize_mean).to(
device)[None, None, :]
std = torch.Tensor(self.offset_normalize_std).to(device)[None, None, :]
roi_bbox_pred = roi_bbox_pred.view(roi_bbox_pred.shape[0], -1, 4)
roi_bbox_pred = (roi_bbox_pred * std) + mean
roi = torch.FloatTensor(roi).to(
device).view(-1, 1, 4).expand_as(roi_bbox_pred)
pred_bbox = offset2bbox(roi.cpu().numpy().reshape((-1, 4)),
roi_bbox_pred.cpu().numpy().reshape((-1, 4)))
pred_bbox = torch.FloatTensor(pred_bbox).to(device)
pred_bbox = pred_bbox.view(-1, self.n_class * 4)
pred_bbox[:, 0::2] = (pred_bbox[:, 0::2]).clamp(min=0, max=img_size[0])
pred_bbox[:, 1::2] = (pred_bbox[:, 1::2]).clamp(min=0, max=img_size[1])
prob = F.softmax(roi_cls_scores, dim=1)
bbox, label, score = self.suppress(pred_bbox, prob)
self.train()
return bbox, label, score
def suppress(self, pred_bbox, prob):
bbox = list()
label = list()
score = list()
for i in range(1, self.n_class):
pred_bbox_i = pred_bbox.view(-1, self.n_class, 4)[:, i, :]
prob_i = prob[:, i]
mask = (prob_i > self.score_thresh)
pred_bbox_i = pred_bbox_i[mask, :]
prob_i = prob_i[mask]
index_keep = nms(pred_bbox_i, prob_i, self.nms_thresh)
bbox.append(pred_bbox_i[index_keep].cpu().numpy())
label.append((i - 1) * np.ones((len(index_keep),)))
score.append(prob_i[index_keep].cpu().numpy())
bbox = np.concatenate(bbox, axis=0).astype(np.float32)
label = np.concatenate(label, axis=0).astype(np.int32)
score = np.concatenate(score, axis=0).astype(np.float32)
return bbox, label, score
def get_optimizer(self):
self.optimizer = \
torch.optim.SGD(self.parameters(), lr=config.lr,
momentum=0.9, weight_decay=config.weight_decay)
return self.optimizer
def scale_lr(self, decay=0.1):
for param_group in self.optimizer.param_groups:
param_group['lr'] *= decay
return self.optimizer
|
langfengQ/FasterRCNN-expanded-VOC2007
|
model/faster_rcnn.py
|
faster_rcnn.py
|
py
| 4,074 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29702682927
|
#!/usr/bin/env python3
import os
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import pandas as pd
from pandas import DataFrame as df
from sklearn import svm
from sklearn.model_selection import KFold, cross_val_score
from scipy import stats
import seaborn as sns
from sklearn.linear_model import LogisticRegression,RidgeClassifier
from sklearn.metrics import (confusion_matrix,cohen_kappa_score,recall_score,
precision_score)
from sklearn.feature_selection import RFECV
#Set wd
os.chdir("D:\\Documentos\\Essex\\Machine Learning\\assignment")
cwd=os.getcwd()
#read data
data=pd.read_csv("data\\train_imp.csv", header=0)
data2=pd.read_csv("data\\val_imp.csv", header=0)
data3=pd.read_csv("data\\test_imp.csv", header=0)
#print(data.head())
#create feature matrix and feature vectors
#Training set
y=data.iloc[:,-1]
x=data.iloc[:,:-1]
names=list(x.columns)
print("Shape X matrix: ", x.shape)
print("prop: ", y.value_counts()/y.shape[0])
#validation set
y_v=data2.iloc[:,-1]
x_v=data2.iloc[:,:-1]
print("Shape X_v matrix: ", x_v.shape)
#test set
y_t=data3.iloc[:,-1]
x_t=data3.iloc[:,:-1]
print("Shape X_t matrix: ", x_t.shape)
#############################
#Feature selection
#############################
#setting up feature selection algorithm
k_fold = KFold(n_splits=10)
est=svm.SVC(kernel="linear", random_state=21)
selector=RFECV(est,cv=k_fold)
selector.fit(x,y)
#keeping selected variables and printing names for control
x=x.loc[:,selector.get_support()]
x_v=x_v.loc[:,selector.get_support()]
print("Optimal number of features : %d" % selector.n_features_)
print("Support", x.columns)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(selector.grid_scores_) + 1), selector.grid_scores_)
plt.savefig("plots\\featlog.pdf", bbox_inches='tight')
plt.close()
#############################
#Logistic Regression
#############################
#Setting up algorithm
clfnb=LogisticRegression()
#Fitting and printing cv accuracy
clfnb.fit(x,y)
print("params: ", clfnb.get_params())
score_2 = cross_val_score(clfnb, x, y, cv=k_fold, n_jobs=-1)
print('Average accuracy:', np.mean(score_2))
#Test accuracy and other measures
y_pred=clfnb.predict(x_v)
kappa=cohen_kappa_score(y_v,y_pred)
print("Kappa: ", kappa)
print("Recall: ", recall_score(y_v,y_pred))
print("Precision: ", precision_score(y_v,y_pred))
print("confussion: ", confusion_matrix(y_v,y_pred))
print("Score: ", clfnb.score(x_v,y_v))
#########################
#Predicting Test File
#########################
#Selecting only signigicative features
x_t=x_t.loc[:,selector.get_support()]
#Predicting classes
y_test=clfnb.predict(x_t)
#Saving Results
pd.DataFrame(y_test, columns=["Class"]).to_csv("data\\test_logistic.csv", index=False)
print("test results shape: ", pd.DataFrame(y_test, columns=["Class"]).shape)
|
maybje/Fake-News-Detection
|
logistic.py
|
logistic.py
|
py
| 3,056 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29322733814
|
from google.cloud import bigquery
import plotly.express as px
dataset_id = "salestest"
class DatasetManager(object):
def __init__(self, dataset_id):
self.dataset_id = dataset_id
self.client = self._get_client()
def print_listed_projeto(self):
"""INFORMA O NOME DO PROJETO DO SERVICE ACCOUNT"""
projects = list(self.client.list_projects())
print("Projects:")
for project in projects:
print(project.project_id)
def data_set(self):
"""INFORMA O NOME DO DATASET DO SERVICE ACCOUNT"""
datasets = list(self.client.list_datasets())
print("Datasets:")
for dataset in datasets:
print(dataset.dataset_id)
def tabelas(self):
"""INFORMA O NOME DAS TABELAS DO SERVICE ACCOUNT"""
dataset_id = "sales_test"
tables = list(self.client.list_tables(dataset_id))
print("Tables:")
for table in tables:
print(table.table_id)
def _get_client(self):
return bigquery.Client.from_service_account_json('data/%s.json' % self.dataset_id)
def query_dataset(self, query):
return self.client.query(query).result().to_dataframe()
# DÁ QUERY NA BIG QUERY DO SERVICE ACCOUNT
def get_vendas_por_marca_u6m(dataset_manager):
query = f"""SELECT
brand,
SUM(value) as vendas_por_marca_u6m
FROM salestest-373621.sales_test.fact_sales_product_day
LEFT JOIN salestest-373621.sales_test.dim_product
ON dim_product.product_id = fact_sales_product_day.product_id
WHERE date >= DATE_SUB(CURRENT_DATE(), INTERVAL 24 MONTH)
GROUP BY brand
ORDER BY vendas_por_marca_u6m DESC
"""
df = dataset_manager.query_dataset(query)
return df.head()
def get_vendas_por_marca_por_dia_u6m(dataset_manager):
projeto = "salestest-373621.sales_test"
query = f"""SELECT
brand,
date,
SUM(value) as vendas_por_marca_por_dia_u6m
FROM {projeto}.fact_sales_product_day
JOIN {projeto}.dim_product
ON dim_product.product_id = fact_sales_product_day.product_id
WHERE date >= DATE_SUB(CURRENT_DATE(), INTERVAL 6 MONTH)
GROUP BY brand, date"""
df = dataset_manager.query_dataset(query)
return df
def plotar_vendas(dataset_manager):
query = f"""SELECT
brand,
SUM(value) as vendas_por_marca_u6m
FROM salestest-373621.sales_test.fact_sales_product_day
LEFT JOIN salestest-373621.sales_test.dim_product
ON dim_product.product_id = fact_sales_product_day.product_id
WHERE date >= DATE_SUB(CURRENT_DATE(), INTERVAL 12 MONTH)
GROUP BY brand
ORDER BY vendas_por_marca_u6m DESC
"""
df = dataset_manager.query_dataset(query)
fig = px.bar(df,
title="VENDAS",
x="brand",
y="vendas_por_marca_u6m",
color="brand",
text_auto='.2s'
)
fig.update_layout(paper_bgcolor="white",
plot_bgcolor="white",
yaxis_title='Faturamento'
)
fig.update_traces(marker_color='darkgreen',
marker_line_color='rgb(8,48,107)',
marker_line_width=1.5,
opacity=0.9,
textfont_size=12,
textangle=0,
textposition="outside",
cliponaxis=False)
return fig.show()
if __name__ == "__main__":
query_manager = DatasetManager(dataset_id="salestest")
plotar_vendas(query_manager)
|
luizgnunes/PesquisaJsonECriacaoGrafico
|
main.py
|
main.py
|
py
| 4,026 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32717608076
|
import gymnasium as gym
from gymnasium import error, spaces, utils, Env
from gymnasium.spaces import MultiDiscrete, Box
from gymnasium.utils import seeding
import math
import pymunk
import pygame
from pymunk import pygame_util
screen_width = 1904
screen_height = 960
target = 350
class Robot():
def __init__(self, space):
self.tick = 0
moment = 10
friction = 0.6
self.shape = pymunk.Poly.create_box(None, (50, 100))
body_moment = pymunk.moment_for_poly(moment, self.shape.get_vertices())
self.body = pymunk.Body(moment, body_moment)
self.body.position = (200, 350)
self.shape.body = self.body
self.shape.color = (150, 150, 150, 0)
head_moment = pymunk.moment_for_circle(moment, 0, 30)
self.head_body = pymunk.Body(moment, head_moment)
self.head_body.position = (self.body.position.x, self.body.position.y+80)
self.head_shape = pymunk.Circle(self.head_body, 30)
self.head_shape.friction = friction
self.head_joint = pymunk.PivotJoint(self.head_body, self.body, (-5, -30), (-5, 50))
self.head_joint2 = pymunk.PivotJoint(self.head_body, self.body, (5, -30), (5, 50))
arm_size = (100, 20)
self.left_arm_upper_shape = pymunk.Poly.create_box(None, arm_size)
left_arm_upper_moment = pymunk.moment_for_poly(moment, self.left_arm_upper_shape.get_vertices())
self.left_arm_upper_body = pymunk.Body(moment, left_arm_upper_moment)
self.left_arm_upper_body.position = (self.body.position.x-70, self.body.position.y+30)
self.left_arm_upper_shape.body = self.left_arm_upper_body
self.left_arm_upper_joint = pymunk.PivotJoint(self.left_arm_upper_body, self.body, (arm_size[0] / 2, 0), (-25, 30))
self.la_motor = pymunk.SimpleMotor(self.body, self.left_arm_upper_body, 0)
self.right_arm_upper_shape = pymunk.Poly.create_box(None, arm_size)
right_arm_upper_moment = pymunk.moment_for_poly(moment, self.right_arm_upper_shape.get_vertices())
self.right_arm_upper_body = pymunk.Body(moment, right_arm_upper_moment)
self.right_arm_upper_body.position = (self.body.position.x+70, self.body.position.y+30)
self.right_arm_upper_shape.body = self.right_arm_upper_body
self.right_arm_upper_joint = pymunk.PivotJoint(self.right_arm_upper_body, self.body, (-arm_size[0] / 2, 0), (25, 30))
self.ra_motor = pymunk.SimpleMotor(self.body, self.right_arm_upper_body, 0)
thigh_size = (30, 60)
self.lu_shape = pymunk.Poly.create_box(None, thigh_size)
lu_moment = pymunk.moment_for_poly(moment, self.lu_shape.get_vertices())
self.lu_body = pymunk.Body(moment, lu_moment)
self.lu_body.position = (self.body.position.x-20, self.body.position.y-75)
self.lu_shape.body = self.lu_body
self.lu_shape.friction = friction
self.lu_joint = pymunk.PivotJoint(self.lu_body, self.body, (0, thigh_size[1] / 2), (-20, -50))
self.lu_motor = pymunk.SimpleMotor(self.body, self.lu_body, 0)
self.ru_shape = pymunk.Poly.create_box(None, thigh_size)
ru_moment = pymunk.moment_for_poly(moment, self.ru_shape.get_vertices())
self.ru_body = pymunk.Body(moment, ru_moment)
self.ru_body.position = (self.body.position.x+20, self.body.position.y - 75)
self.ru_shape.body = self.ru_body
self.ru_shape.friction = friction
self.ru_joint = pymunk.PivotJoint(self.ru_body, self.body, (0, thigh_size[1] / 2), (20, -50))
self.ru_motor = pymunk.SimpleMotor(self.body, self.ru_body, 0)
leg_size = (20, 70)
self.ld_shape = pymunk.Poly.create_box(None, leg_size)
ld_moment = pymunk.moment_for_poly(moment, self.ld_shape.get_vertices())
self.ld_body = pymunk.Body(moment, ld_moment)
self.ld_body.position = (self.lu_body.position.x, self.lu_body.position.y - 65)
self.ld_shape.body = self.ld_body
self.ld_shape.friction = friction
self.ld_joint = pymunk.PivotJoint(self.ld_body, self.lu_body, (0, leg_size[1] / 2), (0, -thigh_size[1] / 2))
self.ld_motor = pymunk.SimpleMotor(self.lu_body, self.ld_body, 0)
self.rd_shape = pymunk.Poly.create_box(None, leg_size)
rd_moment = pymunk.moment_for_poly(moment, self.rd_shape.get_vertices())
self.rd_body = pymunk.Body(moment, rd_moment)
self.rd_body.position = (self.ru_body.position.x, self.ru_body.position.y - 65)
self.rd_shape.body = self.rd_body
self.rd_shape.friction = friction
self.rd_joint = pymunk.PivotJoint(self.rd_body, self.ru_body, (0, leg_size[1] / 2), (0, -thigh_size[1] / 2))
self.rd_motor = pymunk.SimpleMotor(self.ru_body, self.rd_body, 0)
foot_size = (45, 20)
self.lf_shape = pymunk.Poly.create_box(None, foot_size)
rd_moment = pymunk.moment_for_poly(moment, self.lf_shape.get_vertices())
self.lf_body = pymunk.Body(moment, rd_moment)
self.lf_body.position = (self.ld_body.position.x + foot_size[0]/6, self.ld_body.position.y - (foot_size[1]*2))
self.lf_shape.body = self.lf_body
self.lf_shape.friction = friction
self.lf_shape.elasticity = 0.1
self.lf_joint = pymunk.PivotJoint(self.ld_body, self.lf_body, (-5, -leg_size[1] / 2), (-foot_size[0]/2 + 10, foot_size[1]/2))
self.lf_motor = pymunk.SimpleMotor(self.ld_body, self.lf_body, 0)
self.rf_shape = pymunk.Poly.create_box(None, foot_size)
rd_moment = pymunk.moment_for_poly(moment, self.rf_shape.get_vertices())
self.rf_body = pymunk.Body(moment, rd_moment)
self.rf_body.position = (self.rd_body.position.x + foot_size[0]/6, self.rd_body.position.y - (foot_size[1]*2))
self.rf_shape.body = self.rf_body
self.rf_shape.friction = friction
self.rf_shape.elasticity = 0.1
self.rf_joint = pymunk.PivotJoint(self.rd_body, self.rf_body, (-5, -leg_size[1] / 2), (-foot_size[0]/2 + 10, foot_size[1]/2))
self.rf_motor = pymunk.SimpleMotor(self.rd_body, self.rf_body, 0)
space.add(self.body, self.shape, self.head_body, self.head_shape, self.head_joint, self.head_joint2)
space.add(self.left_arm_upper_body, self.left_arm_upper_shape, self.left_arm_upper_joint, self.la_motor)
space.add(self.right_arm_upper_body, self.right_arm_upper_shape, self.right_arm_upper_joint, self.ra_motor)
space.add(self.lu_body, self.lu_shape, self.lu_joint, self.lu_motor)
space.add(self.ru_body, self.ru_shape, self.ru_joint, self.ru_motor)
space.add(self.ld_body, self.ld_shape, self.ld_joint, self.ld_motor)
space.add(self.rd_body, self.rd_shape, self.rd_joint, self.rd_motor)
space.add(self.lf_body, self.lf_shape, self.lf_joint, self.lf_motor)
space.add(self.rf_body, self.rf_shape, self.rf_joint, self.rf_motor)
shape_filter = pymunk.ShapeFilter(group=1)
self.shape.filter = shape_filter
self.head_shape.filter = shape_filter
self.left_arm_upper_shape.filter = shape_filter
self.right_arm_upper_shape.filter = shape_filter
self.lu_shape.filter = shape_filter
self.ru_shape.filter = shape_filter
self.ld_shape.filter = shape_filter
self.rd_shape.filter = shape_filter
self.lf_shape.filter = shape_filter
self.rf_shape.filter = shape_filter
self.lu_flag = False
self.ld_flag = False
self.ru_flag = False
self.rd_flag = False
self.la_flag = False
self.ra_flag = False
self.lf_flag = False
self.rf_flag = False
def get_data(self):
lu = ((360 - math.degrees(self.lu_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0
ld = ((360 - math.degrees(self.ld_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0
lf = ((360 - math.degrees(self.lf_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0
ru = ((360 - math.degrees(self.ru_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0
rd = ((360 - math.degrees(self.rd_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0
rf = ((360 - math.degrees(self.rf_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0
la = ((360 - math.degrees(self.left_arm_upper_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0
ra = ((360 - math.degrees(self.right_arm_upper_body.angle)) - (360 - math.degrees(self.body.angle))) / 360.0
return ru, rd, lu, ld, la, ra, lf, rf
#removed self.body,angle
def update(self):
#lu
self.lu_flag = False
if (360 - math.degrees(self.lu_body.angle)) - (360 - math.degrees(self.body.angle)) >= 90 and self.lu_motor.rate > 0:
self.lu_motor.rate = 0
self.lu_flag = True
elif (360 - math.degrees(self.lu_body.angle)) - (360 - math.degrees(self.body.angle)) <= -90 and self.lu_motor.rate < 0:
self.lu_motor.rate = 0
self.lu_flag = True
#ld
self.ld_flag = False
if (360 - math.degrees(self.ld_body.angle)) - (360 - math.degrees(self.lu_body.angle)) >= 90 and self.ld_motor.rate > 0:
self.ld_motor.rate = 0
self.ld_flag = True
elif (360 - math.degrees(self.ld_body.angle)) - (360 - math.degrees(self.lu_body.angle)) <= -90 and self.ld_motor.rate < 0:
self.ld_motor.rate = 0
self.ld_flag = True
#ru
self.ru_flag = False
if (360 - math.degrees(self.ru_body.angle)) - (360 - math.degrees(self.body.angle)) >= 90 and self.ru_motor.rate > 0:
self.ru_motor.rate = 0
self.ru_flag = True
elif (360 - math.degrees(self.ru_body.angle)) - (360 - math.degrees(self.body.angle)) <= -90 and self.ru_motor.rate < 0:
self.ru_motor.rate = 0
self.ru_flag = True
#rd
self.rd_flag = False
if (360 - math.degrees(self.rd_body.angle)) - (360 - math.degrees(self.ru_body.angle)) >= 90 and self.rd_motor.rate > 0:
self.rd_motor.rate = 0
self.rd_flag = True
elif (360 - math.degrees(self.rd_body.angle)) - (360 - math.degrees(self.ru_body.angle)) <= -90 and self.rd_motor.rate < 0:
self.rd_motor.rate = 0
self.rd_flag = True
#lf
self.lf_flag = False
if (360 - math.degrees(self.lf_body.angle)) - (360 - math.degrees(self.ld_body.angle)) >= 90 and self.lf_motor.rate > 0:
self.lf_motor.rate = 0
self.lf_flag = True
elif (360 - math.degrees(self.lf_body.angle)) - (360 - math.degrees(self.ld_body.angle)) <= -45 and self.lf_motor.rate < 0:
self.lf_motor.rate = 0
self.lf_flag = True
#rf
self.rf_flag = False
if (360 - math.degrees(self.rf_body.angle)) - (360 - math.degrees(self.rd_body.angle)) >= 90 and self.rf_motor.rate > 0:
self.rf_motor.rate = 0
self.rf_flag = True
elif (360 - math.degrees(self.rf_body.angle)) - (360 - math.degrees(self.rd_body.angle)) <= -45 and self.rf_motor.rate < 0:
self.rf_motor.rate = 0
self.rf_flag = True
def add_land(self,space):
body = pymunk.Body(body_type=pymunk.Body.STATIC)
body.position = (0, 100)
land = pymunk.Segment(body, (0, 50), (99999, 50), 10)
land.friction = 1.0
land.elasticity = 0.1
space.add(body, land)
body_2 = pymunk.Body(body_type=pymunk.Body.STATIC)
body_2.position = (target, -50)
t_block = pymunk.Segment(body_2, (0, 100), (20, 100), 10)
space.add(body_2, t_block)
class Walker(Env):
metadata = {'render.modes': ['human']}
def __init__(self):
self.action_space = MultiDiscrete([3]*8)
self.observation_space = Box(-20,20,[8])
self.viewer = None
self.last_horizontal_pos = 0
self.last_vertical_pos = 0
self.step_nr = 0
self.max_step = 900
def check_fall(self):
if self.robot.body.position[1] < self.initial_height-50:
return True
if self.robot.body.position[0] < 0 or self.robot.body.position[0] > screen_width:
return True
return False
def calculate_reward(self):
shape = self.space.shapes[-2]
contact_lf = len(self.robot.lf_shape.shapes_collide(b=shape).points)
contact_rf = len(self.robot.rf_shape.shapes_collide(b=shape).points)
if (self.robot.body.position[0] - self.last_horizontal_pos) > 1:
reward = 10
elif 1 > (self.robot.body.position[0] - self.last_horizontal_pos) > -1:
reward = 9
elif (self.robot.body.position[0] - self.last_horizontal_pos) < -1:
reward = 8
if not contact_lf and not contact_rf:
reward -= 6
return reward
def check_complete(self):
if self.robot.body.position[0] >= target: # 500 is the position of the target
return True
def step(self, actions):
self.step_nr += 1
actions = [(a-1)*2 for a in actions]
self.robot.ru_motor.rate = actions[0]
self.robot.rd_motor.rate = actions[1]
self.robot.lu_motor.rate = actions[2]
self.robot.ld_motor.rate = actions[3]
self.robot.la_motor.rate = actions[4]
self.robot.ra_motor.rate = actions[5]
self.robot.lf_motor.rate = actions[6]
self.robot.rf_motor.rate = actions[7]
self.robot.update()
self.space.step(1/50)
done = False
reward = self.calculate_reward()
if self.check_fall():
done = True
reward = 0
if self.check_complete():
done = True
reward = 16
if self.step_nr >= self.max_step:
done = True
info = {}
observation = self.robot.get_data()
self.last_horizontal_pos = self.robot.body.position[0]
self.last_vertical_pos = self.robot.body.position[1]
truncated = False
return(
observation,
reward,
done,
truncated,
info)
def render(self, mode='human', close=False):
if self.viewer is None:
self.viewer = pygame.init()
pygame_util.positive_y_is_up = True
self.clock = pygame.time.Clock()
self.screen = pygame.display.set_mode((screen_width, screen_height))
self.draw_options = pygame_util.DrawOptions(self.screen)
self.screen.fill((255, 255, 255))
self.space.debug_draw(self.draw_options)
pygame.display.flip()
self.clock.tick(25)
return pygame.surfarray.array3d(self.screen)
def reset(self):
self.step_nr = 0
self.space = pymunk.Space()
self.space.gravity = (0.0, -990)
self.robot = Robot(self.space)
self.robot.add_land(self.space)
self.initial_height = self.robot.body.position[1]
self.initial_horizontal = self.robot.body.position[0]
observation = self.robot.get_data()
return(observation,None)
|
robertofiguz/2dWalker
|
Walker/envs/Walker_env.py
|
Walker_env.py
|
py
| 15,192 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21794976008
|
# Example test given in the question
n = 3
p = [90, 80, 40]
x = 1000
def f(n: int):
if n <= 400:
return 1
else:
return 20
def Sell_Stocks(days, stocks, p, f):
opt = [[0 for i in range(stocks + 1)] for i in range(days + 1)] # opt table
s = [[0 for i in range(stocks + 1)] for i in range(days + 1)] # price drop table
a = [[0 for i in range(stocks + 1)] for i in range(days + 1)] # stocks sold for each entry
for i in range(days + 1):
if i == 0: # use day 0 as all 0's for the algo
continue
# iterate through every number of stocks to have sold by day i
for j in range(stocks + 1):
max_revenue = 0
drop = 0
sold = 0
# get max income for each stock value j to have sold by day i
for k in range(j + 1):
revenue = opt[i - 1][j - k] + k * (p[i - 1] - s[i - 1][j - k] - f(k))
if revenue > max_revenue:
max_revenue = revenue
drop = s[i - 1][j - k] + f(k)
sold = k
opt[i][j] = max_revenue
s[i][j] = drop
a[i][j] = sold
# backtracking
indices = [stocks]
sold = []
for d in range(days):
sold.append(a[days - d][indices[d]])
indices.append(indices[d] - sold[d])
sold.reverse() # ans is in reverse order
return sold
print(Sell_Stocks(n, x, p, f))
|
Asi4nn/UTSC
|
Year3/CSCC73/Assignments/a5_q2.py
|
a5_q2.py
|
py
| 1,298 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8413183584
|
# %% markdown
## Experiment 3 Trials
# %%
import numpy as np
import fire
import random
import pandas as pd
import json
from itertools import product
from markdown import markdown
import textwrap
from copy import deepcopy
import os, sys, json, pprint
from vgc_project.gridutils import transformations, getFeatureXYs
# %% codecell
def main(BASEGRIDS_FILENAME, EXP_NAME, EXP_CONFIG_FILE):
# %% codecell
basegrids = json.load(open(BASEGRIDS_FILENAME, 'r'))
sharedparams = {
"feature_colors": {
"#": "black",
"G": "yellow",
"S": "white",
".": "white",
**{i: "mediumblue" for i in "0123456"}
},
"wall_features": ["#", ] + list("0123456"),
"show_rewards": False,
}
nRounds = 4*2
roundBonusCents = 15
INITIALGOAL_COUNTDOWN_SEC = 60000
EXPERIMENTVERSION = "1.7c"
emptygrid = [
"............G",
".............",
".............",
"......#......",
"......#......",
"......#......",
"...#######...",
"......#......",
"......#......",
"......#......",
".............",
".............",
"S............"
]
instructionstraining = [
{
"type": "CustomInstructions",
"instructions": markdown(textwrap.dedent("""
# Instructions
Thank you for participating in our experiment!
You will play a game where you control a blue circle on a grid. You can move up, down, left, or right by pressing the __arrow keys__⬆️⬇️⬅️➡️.
<img src="static/images/bluedotgrid.png" width="150px">
The <span style='background-color: yellow;'><b>Yellow</b></span> tile with the <span style="color: green"><b>green</b></span> square is the goal 👀.
<img src="static/images/goalsquare.png" width="150px">
The green square will shrink when you stand still. It will initially shrink slowly, and then shrink quickly once you start moving. Keep moving!
<br>
__Black__ tiles are walls that you cannot pass through ⛔️.
<br>
<span style="background-color: cornflowerblue;color:white"><b>Blue</b></span> tiles are obstacles that might change
between different rounds. You cannot pass through these either 🚫.
""")),
"timing_post_trial": 1000,
"continue_wait_time": 5000,
},
{
"type": "GridNavigation",
"round": 0,
"roundtype": "practice",
"bonus": False,
"message": """Get to the <span style='background-color: yellow;'>Yellow</span> goal. You cannot go through <br><span style='background-color: black;color: white'>Black</span> or <br><span style='background-color: cornflowerblue; color:white'>Blue</span> tiles.""",
"taskparams": {
"feature_array": [
"G.........000",
"............0",
".............",
"......#......",
"......#......",
"......#......",
"...#######...",
"......#......",
"......#......",
"......#......",
".............",
"0............",
"000.........."
],
"init_state":[12, 0],
"absorbing_states":[[0, 12]],
"name": "trainingA",
**sharedparams
},
"emptygrid": emptygrid,
"goalCountdown": True,
"participantStarts": False,
"INITIALGOAL_COUNTDOWN_SEC": INITIALGOAL_COUNTDOWN_SEC,
"TILE_SIZE": 40
},
{
"type": "GridNavigation",
"round": 1,
"roundtype": "practice",
"bonus": False,
"message": """Get to the <span style='background-color: yellow;'>Yellow</span> goal. You cannot go through <br><span style='background-color: black;color: white'>Black</span> or <br><span style='background-color: cornflowerblue; color:white'>Blue</span> tiles.""",
"taskparams": {
"feature_array": [
"G............",
".............",
".............",
"......#......",
"......#......",
"......#...0..",
".00#######000",
".0....#......",
".0....#......",
"......#......",
".............",
".............",
"............."
],
"init_state":[12, 0],
"absorbing_states":[[0, 12]],
"name": "trainingB",
**sharedparams
},
"emptygrid": emptygrid,
"goalCountdown": True,
"participantStarts": False,
"INITIALGOAL_COUNTDOWN_SEC": INITIALGOAL_COUNTDOWN_SEC,
"TILE_SIZE": 40
},
{
"type": "CustomInstructions",
"instructions": markdown(textwrap.dedent("""
# Instructions
Great! Now, you will be given similar grids, however, after your first move the
<span style="background-color: cornflowerblue;color:white"><b>Blue</b></span> tiles
will become invisible.
""")),
"timing_post_trial": 1000,
"continue_wait_time": 2000,
},
{
"type": "GridNavigation",
"round": 2,
"roundtype": "practice",
"bonus": False,
"message": """The <span style='background-color: cornflowerblue; color:white'>Blue</span> tiles turn invisible.""",
"taskparams": {
"feature_array": [
"G.........000",
"............0",
".............",
"......#......",
"......#......",
"......#......",
"...#######...",
"......#......",
"......#......",
"......#......",
".............",
"0............",
"000.........."
],
"init_state":[12, 0],
"absorbing_states":[[0, 12]],
"name": "trainingA",
**sharedparams,
"TILE_SIZE": 40
},
"emptygrid": emptygrid,
"goalCountdown": True,
"participantStarts": False,
"hideObstaclesOnMove": True,
"INITIALGOAL_COUNTDOWN_SEC": INITIALGOAL_COUNTDOWN_SEC,
"TILE_SIZE": 40
},
{
"type": "GridNavigation",
"round": 3,
"roundtype": "practice",
"bonus": False,
"message": """The <span style='background-color: cornflowerblue; color:white'>Blue</span> tiles turn invisible.""",
"taskparams": {
"feature_array": [
"G............",
".............",
".............",
"......#......",
"......#......",
"......#...0..",
".00#######000",
".0....#......",
".0....#......",
"......#......",
".............",
".............",
"............."
],
"init_state":[12, 0],
"absorbing_states":[[0, 12]],
"name": "trainingB",
**sharedparams
},
"emptygrid": emptygrid,
"goalCountdown": True,
"participantStarts": False,
"hideObstaclesOnMove": True,
"INITIALGOAL_COUNTDOWN_SEC": INITIALGOAL_COUNTDOWN_SEC,
"TILE_SIZE": 40
},
{
"type": "CustomInstructions",
"instructions": markdown(textwrap.dedent(f"""
# Instructions
Great!
<br>
Next, we will give you a series of {nRounds} rounds. For each round, you will receive a
bonus of {roundBonusCents} cents but <b>only if you reach the goal
without the green square disappearing</b>.
You can win a total bonus of up to ${nRounds*roundBonusCents/100:.2f}.
<br>
At the start of each round, we will show you a grid showing only the walls (black).
When you are ready to begin the round, press the __spacebar__.
The obstacles (<span style="background-color: cornflowerblue; color:white">blue</span>),
your start location, and goal will appear.
Remember, once you move, the blue obstacles will turn invisible!
""")),
"timing_post_trial": 1000,
"continue_wait_time": 5000,
}
]
location_instructions = [
{
"type": "CustomInstructions",
"instructions": markdown(textwrap.dedent("""
# ☝️ Please note the following
To transition between the rounds as smoothly as possible, we recommend using
one hand to press the spacebar and the other use the arrow keys 🙌.
<br>
Try to go as <u>quickly</u> and <u>carefully</u> as possible 💫.
<br>
In addition, we are interested in your thought process while navigating each maze 🤔.
Following each trial, we will ask you about where one of the obstacles was originally placed.
You will be shown two possible locations, and asked where it was in the maze you just did.
**Your answers to these questions will not affect your bonus, but please try to respond accurately.**.
<br>
Thanks again for participating in our experiment!
<br>
""")),
"timing_post_trial": 1000,
"continue_wait_time": 5000,
},
{
"type": 'survey-multi-choice',
"questions": [
{
"prompt": "If the green square disappears, you will not receive a bonus on that round:",
"options": ["True", "False"],
"required": True,
"name": "navCheck"
},
{
"prompt": "How much of a bonus will you receive for completing each maze before the green square disappears?",
"options": ["25 cents", "15 cents", "12 cents", "None"],
"required": True,
"name": "navBonusCheck"
},
{
"prompt": "How much of a bonus will you receive for answering the questions about what you paid attention to?",
"options": ["25 cents", "15 cents", "12 cents", "None"],
"required": True,
"name": "memoryBonusCheck"
}
]
}
]
awareness_instructions = [
{
"type": "CustomInstructions",
"instructions": markdown(textwrap.dedent("""
# ☝️ Please note the following
To transition between the rounds as smoothly as possible, we recommend using
one hand to press the spacebar and the other use the arrow keys 🙌.
<br>
Try to go as <u>quickly</u> and <u>carefully</u> as possible 💫.
<br>
In addition, we are interested in your thought process while navigating each maze 🤔.
Following each trial, we will ask you <u>how aware of each obstacle you were at any point</u>.
Your answer should reflect the amount you paid attention to an obstacle, whether it was
at the beginning or end of navigating the maze.
**Your answers to these questions will not affect your bonus**.
<br>
Finally, <b>the maze rounds will sometimes end randomly <u>before</u> you reach the goal</b>.
As long as the green square has not disappeared, you will receive your bonus on that round,
but we will still ask you questions about your thought process.
<br>
Thanks again for participating in our experiment!
<br>
""")),
"timing_post_trial": 1000,
"continue_wait_time": 5000,
},
{
"type": 'survey-multi-choice',
"questions": [
{
"prompt": "If the green square disappears, you will not receive a bonus on that round:",
"options": ["True", "False"],
"required": True,
"name": "navCheck"
},
{
"prompt": "How much of a bonus will you receive for completing each maze before the green square disappears?",
"options": ["25 cents", "15 cents", "12 cents", "None"],
"required": True,
"name": "navBonusCheck"
},
{
"prompt": "How much of a bonus will you receive for answering the questions about what you paid attention to?",
"options": ["25 cents", "15 cents", "12 cents", "None"],
"required": True,
"name": "memoryBonusCheck"
}
]
}
]
# %% codecell
posttask = [
{
"type": 'survey-text',
"questions": [
{
"prompt": "Please describe your process for answering the questions.",
"rows": 5,
"columns":50,
"required": True,
"name": "howAttention"
},
{
"prompt": "Any general comments?",
"rows": 5,
"columns":50,
"required": True,
"name": "generalComments"
},
],
},
{
"type": 'survey-likert',
"questions": [
{
"prompt": "How often do you play video games?",
"labels": ["Never", "Every few months", "Monthly", "Weekly", "Daily"],
"required": True,
"name": "videogames"
}
]
},
{
"type": 'survey-text',
"questions": [
{"prompt": "Age", "required": True, "name": "age"},
{"prompt": "Gender", "required": True, "name": "gender"},
],
}
]
# %% codecell
# ## Generate main trials
expgrids = {}
#terminate after 2 steps
earlyTermDist = [[-2, 0], [2, 0], [0, -2], [0, 2], [1, 1], [-1, 1], [1, -1], [-1, -1]]
for gridname, trialArray in basegrids.items():
for tname, trans in transformations.items():
for earlyTerm in ['full', 'earlyterm']:
tgridname = f"{gridname}-{tname}-{earlyTerm}"
transformed = trans(trialArray)
if earlyTerm == 'full':
expgrids[tgridname] = {
"feature_array": transformed,
"init_state": getFeatureXYs(transformed, "S")[0],
"absorbing_states": getFeatureXYs(transformed, "G"),
"name": tgridname,
**sharedparams
}
elif earlyTerm == 'earlyterm':
s0 = getFeatureXYs(transformed, "S")[0]
adjToS0 = [[s0[0] + dx, s0[1] + dy] for dx, dy in earlyTermDist]
adjToS0 = [s for s in adjToS0 if (s[0] >= 0) and (s[1] >= 0)]
expgrids[tgridname] = {
"feature_array": transformed,
"init_state": s0,
"absorbing_states": getFeatureXYs(transformed, "G") + adjToS0,
"name": tgridname,
**sharedparams
}
# %% codecell
# ### Trial orders
from functools import reduce
YELLOW = "#DCCB5D"
GREEN = "#44A9A0"
def generateTrialParameters(basegrids, seed, reverse=False, flipEarlyTerm=False, probetype="awareness"):
random.seed(seed)
translations = ['base', 'vflip', 'hflip', 'trans']
truemod_colors = [(GREEN, YELLOW), (YELLOW, GREEN)]
grididx = [12, 13, 14, 15]
# at the nav-trial level, randomly assign translations to the grids in 2 blocks
nblocks = 2
navtrialparams = []
for blocki in range(nblocks):
btrans = deepcopy(translations)
random.shuffle(btrans)
bgrids = deepcopy(grididx)
random.shuffle(bgrids)
navtrialparams.append([(blocki, gidx, trans) for gidx, trans in zip(bgrids, btrans)])
# at the probe-trial level, randomly but evenly assign true/mod to each obstacle in
#the first nav-trial block, then do the opposite in the second nav-trial block.
# shuffle the probe level trials within each grid block
probetrials = {}
firstblock = navtrialparams[0]
for blocki, gidx, trans in firstblock:
assert blocki == 0
probes = ['0', '1', '2', '3', '4']
probe_colororder = [[(p, corder) for corder in truemod_colors] for p in probes]
for pcolors in probe_colororder:
random.shuffle(pcolors)
probecolors0, probecolors1 = [list(pm) for pm in zip(*probe_colororder)]
random.shuffle(probecolors0)
random.shuffle(probecolors1)
probetrials[(0, gidx)] = probecolors0
probetrials[(1, gidx)] = probecolors1
# flatten the blocks
navtrialparams = sum(navtrialparams, [])
if reverse:
navtrialparams = navtrialparams[::-1]
emptygrid = [
"............G",
".............",
".............",
"......#......",
"......#......",
"......#......",
"...#######...",
"......#......",
"......#......",
"......#......",
".............",
".............",
"S............"
]
trialparams = []
pi = 0
for ri, (bi, gidx, trans) in enumerate(navtrialparams):
navgridname = f"gridB-{gidx}-0-{trans}-full"
#create navigation trial
trialparams.append({
"type": "GridNavigation",
"page": pi,
"round": ri,
"roundtype": "navigation",
"bonus": True,
"goalCountdown": True,
"hideObstaclesOnMove": True,
"message": "",
"taskparams": expgrids[navgridname],
"emptygrid": emptygrid,
"navgridname": navgridname,
"INITIALGOAL_COUNTDOWN_SEC": INITIALGOAL_COUNTDOWN_SEC,
"TILE_SIZE": 40
})
pi += 1
if probetype == "location":
#create maze-obstacle memory trials
probeparams = probetrials[(bi, gidx)]
for probeidx, (probeobs, colororder) in enumerate(probeparams):
num2alpha = dict(zip("01234", "ABCDE"))
probegridname = f"gridB-{gidx}-M-{trans}-full"
probegrid = deepcopy(expgrids[probegridname])
probeobs = str(probeobs)
obs_colors = { #color order is (true, mod)
probeobs: colororder[0],
num2alpha[probeobs]: colororder[1]
}
fc = {"#": 'black', **obs_colors}
probegrid['feature_colors'] = fc
#2afc
trialparams.append({
#plugin parameters
"type": "CustomItem",
"questiontext": "An obstacle was <b>either</b> in the yellow <b>or</b> green location (not both), which one was it?",
"responseLabels": ["Yellow", "?", "Green"],
"validResponses": ["Yellow", "Green"],
"initResponse": "?",
"responseEndLabels": ["", ""],
"stimuli": [{
"type": "gridworld",
"gridworldparams": probegrid,
"TILE_SIZE": 25
}],
"dontSave": ["stimuli", ],
#other information to save
"roundtype": "probe_2afc",
"page": pi,
"round": ri,
"queryround": probeidx, #the n-th asked about this round
"navgridname": navgridname,
"probegridname": probegridname,
"true_color": colororder[0],
"mod_color": colororder[1],
"probeobs": probeobs,
})
pi += 1
#confidence
trialparams.append({
#plugin parametersr
"type": "CustomItem",
"questiontext": "How confident are you?",
"responseLabels": [1, 2, 3, 4, "?", 5, 6, 7, 8],
"validResponses": [1, 2, 3, 4, 5, 6, 7, 8],
"initResponse": "?",
"responseEndLabels": ["I guessed", "I'm certain"],
"stimuli": [{
"type": "gridworld",
"gridworldparams": probegrid,
"TILE_SIZE": 25
}],
"dontSave": ["stimuli", ],
#other information to save
"roundtype": "probe_conf",
"page": pi,
"round": ri,
"queryround": probeidx, #the n-th asked about this round
"navgridname": navgridname,
"probegridname": probegridname,
"true_color": colororder[0],
"mod_color": colororder[1],
"probeobs": probeobs,
})
pi += 1
elif probetype == "awareness":
probeparams = probetrials[(bi, gidx)]
probeorder = [probeobs for probeobs, _ in probeparams]
#create maze-obstacle attention trials
for probeidx, probeobs in enumerate(probeorder):
probegrid = deepcopy(expgrids[navgridname])
probegrid['feature_colors'][probeobs] = '#48D1CC' #MediumTurquoise
trialparams.append({
"type": "GridBlockAttentionQuery",
"page": pi,
"round": ri,
"roundtype": "attentionquery",
"queryround": probeidx, #the n-th asked about this round
"probegridparams": probegrid,
"navgridname": navgridname,
"probeobs": probeobs,
"questiontext": "How aware of the highlighted obstacle were you at any point?"
})
pi += 1
else:
assert False, "unknown probetype"
return trialparams
# %% codecell
#note, there are 8 seeds, so 8 * 2 * 2 = 32 conditions
seeds = [23199, 27190, 31210, 31290, 31993, 61993, 63993, 67993]
timelines = []
for seed, reverse, probetype in product(seeds, [True, False], ['awareness', 'location']):
maintrials = generateTrialParameters(basegrids, seed=seed, reverse=reverse, probetype=probetype)
if probetype == "awareness":
pretask = instructionstraining + awareness_instructions
elif probetype == 'location':
pretask = instructionstraining + location_instructions
timelines.append(pretask+maintrials+posttask)
# %% codecell
params = {
"nRounds": nRounds,
"roundBonusCents": roundBonusCents,
"EXP_NAME": EXP_NAME
}
experiment = {"timelines": timelines, "params": params}
json.dump(
experiment,
open(EXP_CONFIG_FILE, "w"),
sort_keys=True, indent=4
)
# %%
if __name__ == "__main__":
fire.Fire(main)
|
markkho/value-guided-construal
|
experiments/exp3/generate_trials.py
|
generate_trials.py
|
py
| 25,424 |
python
|
en
|
code
| 20 |
github-code
|
6
|
29216406296
|
import logging
import os
import pwd
import sys
from aiohttp import web
from aiomisc.utils import bind_socket
from configargparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from setproctitle import setproctitle
from yarl import URL
from megamarket.api.app import create_app
from megamarket.utils.argparse import positive_int
from megamarket.utils.pg import DEFAULT_PG_URL
ENV_VAR_PREFIX = 'MEGAMARKET_'
logging.basicConfig(level=logging.DEBUG)
parser = ArgumentParser(
auto_env_var_prefix=ENV_VAR_PREFIX,
formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument('--user', required=False, type=pwd.getpwnam,
help='Changes UID')
group = parser.add_argument_group('API Options')
group.add_argument('--api-address', default='0.0.0.0',
help='IPv4/IPv6 address API server should listen on')
group.add_argument('--api-port', type=positive_int, default=8081,
help='TCP port API server should listen on')
group = parser.add_argument_group('PostgreSQL Options')
group.add_argument('--pg-url', type=URL, default=URL(DEFAULT_PG_URL),
help='URL to use to connect to the database')
group = parser.add_argument_group('Logging Options')
group.add_argument('--log-level', default='INFO',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'])
def main():
args = parser.parse_args()
# clear_environ(lambda k: k.startswith(ENV_VAR_PREFIX))
logging.basicConfig(level=args.log_level, format='color')
sock = bind_socket(address=args.api_address, port=args.api_port,
proto_name='http')
if args.user is not None:
logging.info('Changing user to %r', args.user.pw_name)
os.setgid(args.user.pw_gid)
os.setuid(args.user.pw_uid)
setproctitle(os.path.basename(sys.argv[0]))
app = create_app(args)
web.run_app(app, sock=sock)
if __name__ == '__main__':
main()
|
Dest0re/backend-school2022
|
megamarket/api/__main__.py
|
__main__.py
|
py
| 1,960 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26239065759
|
from __future__ import unicode_literals, absolute_import, print_function, division
import datetime
import time
from sopel.module import commands, rule, priority, thread
from sopel.tools import Identifier
from sopel.tools.time import seconds_to_human
@commands('seen')
def seen(bot, trigger):
"""Reports when and where the user was last seen."""
if not trigger.group(2):
bot.say(".seen <nick> - Reports when <nick> was last seen.")
return
nick = trigger.group(2).strip()
if nick == bot.nick:
bot.reply("I'm right here!")
return
timestamp = bot.db.get_nick_value(nick, 'seen_timestamp')
if timestamp:
channel = bot.db.get_nick_value(nick, 'seen_channel')
message = bot.db.get_nick_value(nick, 'seen_message')
action = bot.db.get_nick_value(nick, 'seen_action')
saw = datetime.datetime.utcfromtimestamp(timestamp)
delta = seconds_to_human((trigger.time - saw).total_seconds())
msg = "I last saw " + nick
if Identifier(channel) == trigger.sender:
if action:
msg += " in here {since}, doing: {nick} {action}".format(
since=delta,
nick=nick,
action=message)
else:
msg += " in here {since}, saying: {message}".format(
since=delta,
message=message)
else:
msg += " in another channel {since}.".format(since=delta)
bot.reply(msg)
else:
bot.say("Sorry, I haven't seen {nick} around.".format(nick=nick))
@thread(False)
@rule('(.*)')
@priority('low')
def note(bot, trigger):
if not trigger.is_privmsg:
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
bot.db.set_nick_value(trigger.nick, 'seen_channel', trigger.sender)
bot.db.set_nick_value(trigger.nick, 'seen_message', trigger)
bot.db.set_nick_value(trigger.nick, 'seen_action', 'intent' in trigger.tags)
|
examknow/Exambot-Source
|
sopel/modules/seen.py
|
seen.py
|
py
| 2,014 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21456848433
|
#works but need to find out how to add sound
import datetime
from playsound import playsound
alarmhour = int(input("Enter Hour: "))
alarmins = int(input("Enter Minutes: "))
alarmAm = input("AM / PM: ").upper()
if alarmAm == "pm".upper():
alarmhour += 12
while True:
if alarmhour == datetime.datetime.now().hour and alarmins == datetime.datetime.now().minute:
playsound('/Users/chnguyen/Downloads/dsa.mp3')
print("It's time mother f'er")
break
|
MortalKhangbat/MACnCHEESE
|
alarm_clock.py
|
alarm_clock.py
|
py
| 479 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26244884344
|
#! /usr/bin/env/python3
import sys
rawdata = sys.stdin.read().split('\n')
ID, dna = [], []
i = 0
while i < len(rawdata):
if rawdata[i] == '':
break
if rawdata[i][0] == '>':
ID.append(rawdata[i][1:])
dna.append("")
else:
dna[-1] += rawdata[i]
i += 1
gc = [100*(i.count('G')+i.count('C'))/len(i) for i in dna]
index = gc.index(max(gc))
print(ID[index]+'\n'+str(round(gc[index], 4)))
|
tak0kada/procon
|
rosalind/python/gc.py
|
gc.py
|
py
| 434 |
python
|
en
|
code
| 0 |
github-code
|
6
|
137183376
|
import ROOT
def safe_factory(func):
def wrapper(self, *args):
result = func(self, *args)
if not result:
raise ValueError('invalid factory input "%s"' % args)
return result
return wrapper
ROOT.RooWorkspace.factory = safe_factory(ROOT.RooWorkspace.factory)
def safe_decorator(func):
def wrapper(self, *args):
result = func(self, *args)
if not result:
raise ValueError('cannot find %s' % args[0])
return result
return wrapper
ROOT.RooWorkspace.data = safe_decorator(ROOT.RooWorkspace.data)
ROOT.RooWorkspace.obj = safe_decorator(ROOT.RooWorkspace.obj)
ROOT.RooWorkspace.var = safe_decorator(ROOT.RooWorkspace.var)
ROOT.RooWorkspace.pdf = safe_decorator(ROOT.RooWorkspace.pdf)
def create_example_counting_oneregion_uncertainty(ws_name='ws', nb=100, ns=30, sigma_nb=0.1):
"""
nb = number of expected background
ns = number of expected signal (under signal hypothesis)
sigma_nb = uncertainty on the expected background
"""
ws = ROOT.RooWorkspace(ws_name)
ws.factory('ns[%d, 0, 100]' % ns)
ws.factory('nobs[0, %d]' % ((nb + ns) * 100))
ws.factory('nb_expected[%d]' % nb)
ws.factory('sigma_nb[%f]' % sigma_nb)
ws.factory('theta_nb[0, -5, 5]')
ws.factory('expr::nb("@0 * (1 + @1 * @2)", {nb_expected, sigma_nb, theta_nb})')
ws.factory('sum::nexp(ns, nb)')
ws.factory('Poisson::pdf_phys(nobs, nexp)')
ws.factory('Gaussian::constr_nb(global_nb[0, -5, 5], theta_nb, 1)')
ws.factory('PROD:pdf(pdf_phys, constr_nb)')
model_config = ROOT.RooStats.ModelConfig('model_config', ws)
model_config.SetParametersOfInterest('ns')
model_config.SetPdf('pdf')
model_config.SetNuisanceParameters('theta_nb')
model_config.SetGlobalObservables('global_nb')
model_config.SetObservables('nobs')
model_config.SetSnapshot(ROOT.RooArgSet(ws.var('ns')))
data_toy_signal = ws.pdf('pdf_phys').generate(ROOT.RooArgSet(ws.var('nobs')), 1)
data_toy_signal.SetName('data_toy_signal')
ws.var('ns').setVal(0)
data_toy_nosignal = ws.pdf('pdf_phys').generate(ROOT.RooArgSet(ws.var('nobs')), 1)
data_toy_nosignal.SetName('data_toy_nosignal')
bmodel = model_config.Clone('bmodel')
bmodel.SetSnapshot(ROOT.RooArgSet(ws.var('ns')))
getattr(ws, 'import')(model_config)
getattr(ws, 'import')(bmodel)
getattr(ws, 'import')(data_toy_signal)
getattr(ws, 'import')(data_toy_nosignal)
return ws
def create_example_onoff(ws_name='ws', nb=9, ns=6, tau=1):
"""
nb = number of expected background in the signal region
ns = number of expected signal (under signal hypothesis)
tau = scale factor for the background in the control-region
(nb in the control-region: nb * tau)
"""
ws = ROOT.RooWorkspace(ws_name)
ws.factory('ns[%d, 0, %d]' % (ns, ns * 100))
ws.factory('nb_sr[%d, 0, %d]' % (nb, nb * 100))
ws.factory('nobs_sr[0, %d]' % ((nb + ns) * 100))
ws.factory('nobs_cr[0, %d]' % (nb * tau * 100))
ws.factory('sum::nexp_sr(ns, nb_sr)')
ws.factory('Poisson::pdf_sr(nobs_sr, nexp_sr)')
ws.factory('prod:nb_cr(tau[%f], nb_sr)' % tau)
ws.factory('Poisson::pdf_cr(nobs_cr, nb_cr)')
ws.factory('PROD:pdf(pdf_sr, pdf_cr)')
model_config = ROOT.RooStats.ModelConfig('model_config', ws)
model_config.SetParametersOfInterest('ns')
model_config.SetPdf('pdf')
model_config.SetNuisanceParameters('nb_sr')
model_config.SetObservables('nobs_sr,nobs_cr')
model_config.SetSnapshot(ROOT.RooArgSet(ws.var('ns')))
data_toy_signal = ws.pdf('pdf').generate(model_config.GetObservables(), 1)
data_toy_signal.SetName('data_toy_signal')
ws.var('ns').setVal(0)
data_toy_nosignal = ws.pdf('pdf').generate(model_config.GetObservables(), 1)
data_toy_nosignal.SetName('data_toy_nosignal')
bmodel = model_config.Clone('bmodel')
bmodel.SetSnapshot(ROOT.RooArgSet(ws.var('ns')))
getattr(ws, 'import')(model_config)
getattr(ws, 'import')(bmodel)
getattr(ws, 'import')(data_toy_signal)
getattr(ws, 'import')(data_toy_nosignal)
return ws
if __name__ == "__main__":
example = 'onoff'
if example == 'onoff':
ws = create_example_onoff()
ws.writeToFile('ws_onoff.root')
else:
ws = create_example_counting_oneregion_uncertainty
ws.writeToFile('simple_counting_example.root')
ws.Print()
"""
ROOT.gROOT.ProcessLine(".L StandardHypoTestInvDemo.C")
ROOT.StandardHypoTestInvDemo('simple_counting_example.root', 'ws', 'model_config', 'bmodel', 'data_toy_nosignal', 2, 3, True, 20, 0.1, 100)
"""
|
wiso/StatisticsLectures
|
create_example_ws.py
|
create_example_ws.py
|
py
| 4,634 |
python
|
en
|
code
| 10 |
github-code
|
6
|
44613554676
|
# coding=utf-8
import tensorflow as tf
import numpy as np
from data_helper import *
import gensim
import os
import time
import datetime
import csv
# TF log level
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Parameters
# ==================================================
flags = tf.flags
logging = tf.logging
# Data Parameters
flags.DEFINE_string("test_data_x", "./pan11-corpus-test/LargeTest.xml", "Data source for the X of test data")
flags.DEFINE_string("test_data_y", "./pan11-corpus-test/GroundTruthLargeTest.xml", "Data source for the Y of test data")
flags.DEFINE_string("lda_path", "./lda_model/model", "LDA model file path")
flags.DEFINE_string("word2vec", "./dict_data/word_embedding_dic.json", "Data source for prepared word2vec dict")
flags.DEFINE_string("author_dict", "./dict_data/author_dict.json", "Data source for author dict")
flags.DEFINE_string("char_dict", "./dict_data/char_dict.json", "Data source for char dict")
flags.DEFINE_string("n_grams_dict", "./dict_data/n_grams_dict.json", "Data source for n-grams dict (default: 2-grams)")
flags.DEFINE_integer("max_len_char", 1000, "Number of characters in a sequence (default: 1000 >> 140)")
flags.DEFINE_integer("max_len_word", 10, "Number of words in a sequence (default: 10)")
flags.DEFINE_integer("num_topics", 200, "Number of LDA topics")
# Eval Parameters
tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_model", False, "Evaluate on all test data")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# word2vec = KeyedVectors.load_word2vec_format("GoogleNews-vectors-negative300.bin", binary=True)
word2vec = get_json(FLAGS.word2vec)
print("word_vectors loaded")
lda_model = gensim.models.LdaModel.load(FLAGS.lda_path + str(FLAGS.num_topics), mmap="r")
print("lda model loaded")
author_dict = get_json(FLAGS.author_dict)
print("author_dict has {} keys".format(len(author_dict)))
grams_dict = get_json("./dict_data/n_grams_dict.json")
print("char_dict has {}+1 keys, 1 means unk".format(len(grams_dict)))
x_dev, y_dev = get_dev_data(FLAGS.test_data_x,
FLAGS.test_data_y)
print("test data loaded, which have {} items".format(len(y_dev)))
# CHANGE THIS: Load data. Load your own data here
if not FLAGS.eval_model:
x_dev = ["Please let me know if you have any questions or need anything else."]
y_dev = ["x9971451464197140"]
FLAGS.max_len_char = 20
FLAGS.max_len_word = 20
dev_data_char = gen_char_batch(texts=x_dev,
authors=y_dev,
author_dict=author_dict,
n_grams_dict=grams_dict,
batch_size=len(y_dev),
max_len_char=FLAGS.max_len_char,
)
dev_data_word = gen_word_batch(texts=x_dev,
authors=y_dev,
word_vectors=word2vec,
author_dict=author_dict,
batch_size=len(y_dev),
max_len_word=FLAGS.max_len_word)
dev_data_topic = gen_topic_batch(texts=x_dev,
authors=y_dev,
author_dict=author_dict,
lda_model=lda_model,
batch_size=len(y_dev))
# Evaluation
# ==================================================
print("\nEvaluating...\n")
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
x_char = graph.get_operation_by_name("x_char").outputs[0]
x_word = graph.get_operation_by_name("x_word").outputs[0]
x_topic = graph.get_operation_by_name("x_topic").outputs[0]
y = graph.get_operation_by_name("y").outputs[0]
char_dropout_keep = graph.get_operation_by_name("char_dropout_keep").outputs[0]
word_dropout_keep = graph.get_operation_by_name("word_dropout_keep").outputs[0]
topic_dropout_keep = graph.get_operation_by_name("topic_dropout_keep").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
accuracy = graph.get_operation_by_name("accuracy/accuracy").outputs[0]
# Generate batches for one epoch
x_char_dev, y_char_dev = dev_data_char.__next__()
x_word_dev, y_word_dev = dev_data_word.__next__()
x_topic_dev, y_topic_dev = dev_data_topic.__next__()
assert np.all(y_char_dev == y_word_dev) and np.all(y_word_dev == y_topic_dev), ""
y_dev = y_char_dev
# Collect the predictions here
all_predictions = []
accuracy = sess.run(accuracy, {x_char: x_char_dev, x_word: x_word_dev, x_topic: x_topic_dev, y: y_dev,
topic_dropout_keep: 1.0, char_dropout_keep: 1.0, word_dropout_keep: 1.0,
dropout_keep_prob: 1.0})
print(accuracy)
|
anonymous-2018-COLING/pan11
|
eval.py
|
eval.py
|
py
| 5,829 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70485936189
|
import mysql.connector
connect_DB = mysql.connector.connect(user="root", passwd="S@b3d0ri@My5ql", host="localhost")
print(connect_DB)
mycursor = connect_DB.cursor()
# Quick introduction to the program
print("This is a simple Bank Account Model Design in Python.")
print("You will be able to add a new account, make deposit, withdraw money and check balance.")
print("")
# Request for the user's account number and immediate deposit
print("Please follow the options below:")
print("1. Add a new account.")
print("2. Make a deposit.")
print("3. Check balance.")
print("4. Withdraw money.")
def one():
print("New Account")
print("Name: ")
input_name = input("")
print("Add Account Number: ")
input_number = int(input(""))
print("Make a deposit: Y/N")
input_deposit = input("")
# Save the details in the database
sqlFormula = "INSERT INTO Bank_Account.Account_Data (Name, Account_Number) VALUES (%s,%s)"
data1 = (input_name, input_number)
mycursor.execute(sqlFormula, data1) # execute the sql code
connect_DB.commit() # Save the details in the database
# success message
print("Account Created")
print("Name: " + input_name)
print("Account number: " + input_number)
# update the deposit into the database
# sort out the deposit thing
def two():
print("Deposit")
print("Name: ")
input_name = input("")
print("Account Number: ")
input_number = int(input(""))
print("How much do you want to deposit?")
input_deposit = float(input(""))
# Save the details in the database
sqlFormula = "SELECT Money FROM `Bank_Account`.`Account_Data` where Name = %s and Account_Number = %s;"
data1 = (input_name, input_number)
mycursor.execute(sqlFormula, data1)
# Get the details from the database
my_result = mycursor.fetchall() # get the current money in the account
for result in my_result:
result_conversion = float(result[0]) # convert tuple first value to float
if input_deposit < 0.0:
print("You cannot deposit more money that your current balance")
else:
money_removed = result_conversion + input_deposit # subtract from the current balance
print("You deposited: " + str(input_deposit) + " from your account")
print("Your balance is: " + str(money_removed))
# Update the new balance into the database
def three():
print("Balance")
print("Name: ")
input_name = (input(""))
print("Account Number: ")
input_number = (input(""))
# Save the details in the database
sqlFormula = "SELECT Money FROM `Bank_Account`.`Account_Data` where Name = %s and Account_Number = %s;"
data1 = (input_name, input_number)
# Save the details in the database
mycursor.execute(sqlFormula, data1)
my_result = mycursor.fetchall() # save and show the changes in the mysql database
for result in my_result:
print(result[0])
def four():
print("Withdraw")
print("Name: ")
input_name = input("")
print("Account Number: ")
input_number = int(input(""))
print("How much do you want to withdraw?")
input_withdraw = float(input(""))
# Save the details in the database
sqlFormula = "SELECT Money FROM `Bank_Account`.`Account_Data` where Name = %s and Account_Number = %s;"
data1 = (input_name, input_number)
mycursor.execute(sqlFormula, data1)
# Get the details from the database
my_result = mycursor.fetchall() # get the current money in the account
for result in my_result:
result_conversion = float(result[0]) # convert tuple first value to float
if input_withdraw > result_conversion:
print("You cannot remove more money that your current balance")
else:
money_removed = result_conversion - input_withdraw # subtract from the current balance
print("You withdrew: " + str(input_withdraw) + " from your account")
print("Your balance is: " + str(money_removed))
# Update the new balance into the database
def error_handler():
return "Invalid Input"
switcher = [one, two, three, four]
print("Enter your option")
options = int(input(""))
switcher = {
1: one,
2: two,
3: three,
4: four
}
output = switcher.get(options, error_handler)()
print("")
print(output)
connect_DB.close()
|
MagnoCarlos/Bank_Account_Model_Py
|
Bank_AccountPy/main.py
|
main.py
|
py
| 4,395 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33511701341
|
from collections import defaultdict
from src.data import data_manager
from src.data.neuron_info import ntype
from src.data.dataset_info import all_datasets, datasets_with_adj, timepoint
from src.plotting import plotter
class Figure(object):
def __init__(self, output_path, page_size=7.20472):
self.plt = plotter.Plotter(output_path=output_path, page_size=page_size)
def _feed_type(self, edge, only_sensory=False, with_muscle=False):
edge_type = (ntype(edge[0]), ntype(edge[1]))
if only_sensory:
if edge_type in (('sensory', 'inter'), ('sensory', 'motor'), ('sensory', 'modulatory')):
return 'Feed-forward'
if edge_type in (('inter', 'sensory'), ('motor', 'sensory'), ('modulatory', 'sensory')):
return 'Feed-back'
if edge_type in (('sensory', 'sensory'), ):
return 'Recurrent'
return None
if edge_type in (('sensory', 'inter'), ('inter', 'motor'), ('sensory', 'motor'), ('modulatory', 'inter'), ('sensory', 'modulatory'), ('modulatory', 'motor')):
return 'Feed-forward'
if edge_type in (('inter', 'sensory'), ('motor', 'inter'), ('motor', 'sensory'), ('inter', 'modulatory'), ('modulatory', 'sensory')):
return 'Feed-back'
if edge_type in (('sensory', 'sensory'), ('inter', 'inter'), ('motor', 'motor'), ('modulatory', 'modulatory')):
return 'Recurrent'
if with_muscle and edge_type[1] == 'muscle':
return 'Feed-forward'
return None
def feedforward_stable_increase(self, f, edge_classifications, use_size=False):
G = data_manager.get_connections()['size' if use_size else 'count'].copy()
G = G[G.sum(axis=1) > 0] # remove edges without size if need be
G = data_manager.remove_postemb(G)
edge_classifications = edge_classifications.copy()
edges = [e for e in G.index if edge_classifications[e] == 'stable']
feed_types = ['Feed-forward', 'Recurrent', 'Feed-back']
feed_colors = {'Feed-forward': '#C7EAE4', 'Feed-back': '#EAC9C1', 'Recurrent': 'white'}
syn_increases = {ft: [] for ft in feed_types}
for edge in edges:
feed_type = self._feed_type(edge)
if not feed_type:
continue
syns = G.loc[edge]
if syns[0] == 0 and syns[1] == 0:
continue
syn_increase_relative = syns[['Dataset7', 'Dataset8']].mean() / syns[['Dataset1', 'Dataset2']].mean()
syn_increases[feed_type].append(syn_increase_relative)
data, c, l = [], [], []
for ft in feed_types:
data.append(syn_increases[ft])
l.append(ft)
c.append(feed_colors[ft])
if use_size:
ylim = (0, 15)
yticks = (0, 5, 10, 15)
y_label = 'Relative synapse volume increase'
size = (0.15, 0.15)
else:
ylim = (0, 12)
yticks = (0, 4, 8, 12)
y_label = 'Relative synapse addition'
size = (0.15, 0.15)
self.plt.plot(
'box_plot', data, size=size,
margin={'left': 0.04, 'right': 0.01, 'top': 0.05, 'bottom': 0.04},
colors=c, xticklabels=l, xtickpad=3, xpad=5, ylim=ylim, yticks=yticks,
y_label=y_label, x_label='Stable connection directionality',
show_outliers=False, stats=((2, 3), (1, 2), (1, 3)),
save=f+'_feedforward_stable_increase' + ('_size' if use_size else '')
)
def feedforward_edge_proportion(self, f, edge_classifications, use_size=False):
G = data_manager.get_connections()['size' if use_size else 'count'].copy()
G = G[G.sum(axis=1) > 0] # remove edges without size if need be
G = data_manager.remove_postemb(G)
edge_classifications = edge_classifications.copy()
feed_types = ['Feed-back', 'Recurrent', 'Feed-forward']
edge_types = ('stable', 'increase', 'decrease')
feed_colors = {'Feed-forward': '#C7EAE4', 'Feed-back': '#EAC9C1', 'Recurrent': 'white'}
connections = defaultdict(lambda: {ft: 0 for ft in feed_types})
edges_per_type = {
'stable': [e for e in G.index if edge_classifications[e] == 'stable'],
'increase': [e for e in G.index if edge_classifications[e] == 'increase'],
'decrease': [e for e in G.index if edge_classifications[e] == 'decrease'],
# 'Variable': [e for e in G if edge_classifications[(npair(e[0]), npair(e[1]))] in ('remainder', 'noise')]
}
xlabels = (
'Stable',
'Strengthened',
'Weakened',
)
for edge_type, edges in edges_per_type.items():
for edge in edges:
feed_type = self._feed_type(edge)
if not feed_type:
continue
connections[edge_type][feed_type] += 1
data = tuple((ft, [connections[et][ft] for et in edge_types]) for ft in feed_types)
print(data)
self.plt.plot(
'stacked_bar_graph', data, stats=((1, 2), (1, 3)), size=(0.15, 0.15),
margin={'left': 0.04, 'right': 0.08, 'top': 0.05, 'bottom': 0.04},
y_label='Proportion of connections',
colors=feed_colors, xlabels=xlabels, x_label='Connection classification',
xtickpad=3, xpad=5,
legendpos='right', legendcol=1, legendreverse=True, width=0.5,
save=f+'_feedforward_edge_proportion'
)
def feedforward_global_shift(self, f, only_sensory=False, use_size=False):
datasets = list(datasets_with_adj if use_size else all_datasets)
G = data_manager.get_connections()['size' if use_size else 'count'].copy()
G = G[G.sum(axis=1) > 0] # remove edges without size if need be
G = G[datasets]
G = data_manager.remove_postemb(G)
y_label = 'Proportion of synapses'
ylim = (0, 0.6)
if use_size:
y_label = 'Proportion of synapse volume'
if only_sensory:
y_label += ' to\nor from sensory neurons'
ylim = (0, 0.8)
feed_types = ['Feed-back', 'Feed-forward', 'Recurrent']
G['feed_types'] = G.index.map(self._feed_type)
feed_type_counts = G.groupby('feed_types').sum()
feed_type_counts = feed_type_counts / feed_type_counts.sum()
xs = [timepoint[d] for d in datasets]
colors = {
'Feed-forward': '#C7EAE4', 'Feed-back': '#EAC9C1', 'Recurrent': 'white',
'Feed-forward_edge': '#7ccfc1', 'Feed-back_edge': '#d18876', 'Recurrent_edge': '#999999'
}
data = (xs, [(ft, feed_type_counts.loc[ft]) for ft in feed_types])
self.plt.plot(
'xy_graph', data, size=(0.08, 0.16),
margin={'left': 0.04, 'right': 0.10, 'top': 0.01, 'bottom': 0.04},
y_label=y_label, ylim=ylim, stats='spearmanr', colors=colors,
x_label='Developmental age',
legendpos='right', rev_legend=True, legend_shift_top=0.03, legend_shift_right=0.05,
save=f+'_feedforward_global_shift', linkpoints=False,
# hlines=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6)
)
|
dwitvliet/nature2021
|
src/figures/feedforward.py
|
feedforward.py
|
py
| 7,340 |
python
|
en
|
code
| 13 |
github-code
|
6
|
71484309308
|
N = int(input())
ans = 0
ansx = []
for i in range(max(1, N-9*(len(str(N)))), N+1):
if i + sum([int(x) for x in str(i)]) == N:
ans += 1
ansx.append(i)
print(ans)
for x in ansx: print(x)
|
knuu/competitive-programming
|
atcoder/arc/arc034_b.py
|
arc034_b.py
|
py
| 208 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70518540349
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ServiceInterval
Application implementation classes.
"""
from copy import copy
from datetime import date, timedelta
from numbers import Number
import os
import pickle
import re
import warnings
__author__ = 'Don D.S.'
# Version of ServiceInterval.
VERSION = (1, 0)
class Operation(object):
""" Represents service operation.
Examples of using:
# Create an operation type.
>>> oil_change = Operation("Changing the oil: engine",
... interval_km=10000,
... interval_year=1)
# Create done-operation copy from current operation type.
>>> oil_changed = oil_change.done(
... km=9842,
... date=date(2015, 12, 5),
... comment="Price: 4000 RUR")
# Create readable form.
>>> print(oil_changed)
2015-12-05 / 9842.0 km
Changing the oil: engine
Every 1.0 year(s) or 10000.0 km
Price: 4000 RUR
>>> print(oil_change)
Changing the oil: engine.
Every 1.0 year(s) or 10000.0 km
# Create representative form.
>>> repr(oil_change)
'Operation(Changing the oil: engine, interval_km=10000.0, interval_year=1.0)'
"""
def __init__(self, label, interval_km=0, interval_year=0, interval_month=0):
""" Create service operation type.
Default intervals value is 0. It means that operation is non-periodic.
:param label: operation label or description
:param interval_km: operation interval by vehicle haul, km
:param interval_year: operation interval time, years
:param interval_month: operation interval time, months
"""
super().__init__()
# Initialize default values.
self._label = ""
self._interval_time = timedelta()
self._interval_km = 0
# For done copy of this operation type.
self._done_at_km = 0
self._done_at_date = None
# Additional information (price, parts item numbers).
self.comment = ""
# Initialize private flag.
self._is_done = False # default operation state: not done.
# Set up values for current operation instance.
self.label = label
self.interval_time = timedelta(
days=365 * interval_year + 30.4 * interval_month)
self.interval_km = interval_km
def done(self, km=0, date=None, comment=""):
# Create a copy of this operation, that has been done and return it.
done = copy(self)
done.done_at_km = km
done.done_at_date = date
done.comment = comment
done._is_done = True
return done
def undo(self):
# Clear information about operation completion
self.done_at_km = 0
self._done_at_date = None
self.comment = ""
self._is_done = False
@property
def is_done(self):
# Flag: is operation has been done?
return self._is_done
@property
def is_periodic(self):
return self.interval_km != 0
@property
def label(self):
return self._label
@label.setter
def label(self, new_title):
if isinstance(new_title, str):
self._label = new_title
else:
raise TypeError("OperationType title must be a text string.")
@property
def interval_time(self):
return self._interval_time
@interval_time.setter
def interval_time(self, interval):
if not isinstance(interval, timedelta):
raise TypeError("Time must be represented as <datetime.timedelta>"
" class instance.")
self._interval_time = interval
@property
def interval_km(self):
return self._interval_km
@interval_km.setter
def interval_km(self, new_interval):
try:
new_interval = float(new_interval)
except ValueError:
raise TypeError("Interval must be a numeric type or string number.")
if new_interval < 0:
raise ValueError("Operation interval must be positive. "
"Received value " + str(new_interval))
self._interval_km = new_interval
@property
def done_at_km(self):
return self._done_at_km
@done_at_km.setter
def done_at_km(self, new_km):
try:
new_km = float(new_km)
except ValueError:
raise TypeError(
"Haul value must be a numeric type or string number.")
# Haul can be negative if this field used to show relative distance
# from planned maintenance.
# if new_km < 0 and not relative:
# raise ValueError("Haul value must be positive. "
# "Received value " + str(new_km))
self._done_at_km = new_km
@property
def done_at_date(self):
return self._done_at_date
@done_at_date.setter
def done_at_date(self, new_date):
if isinstance(new_date, date):
self._done_at_date = new_date
else:
raise TypeError("Date must be a <datetime.date> class instance.")
def __eq__(self, other):
return self.label == other.label and self.done_at_km == other.done_at_km
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if self.label != other.label:
TypeError("unorderable operations with different labels")
return self.done_at_km < other.done_at_km
def __le__(self, other):
if self.label != other.label:
TypeError("unorderable operations with different labels")
return self.done_at_km <= other.done_at_km
def __gt__(self, other):
if self.label != other.label:
TypeError("unorderable operations with different labels")
return self.done_at_km >= other.done_at_km
def __ge__(self, other):
if self.label != other.label:
TypeError("unorderable operations with different labels")
return self.done_at_km > other.done_at_km
def __repr__(self):
if self.is_done:
return "Operation({0}, interval_km={1}, interval_year={2}).done("\
"km={3}, date={4}, comment={5})".format(
self.label, self.interval_km, self.interval_time.days/365,
self.done_at_km, self.done_at_date, self.comment)
else:
return "Operation({0}, interval_km={1}, interval_year={2})".format(
self.label, self.interval_km, self.interval_time.days/365)
def __str__(self):
""" !!! ATTENTION !!!
If you change this method, you also need to change OperationList.load()
parsing method. This is bad idea.
"""
interval_months = round(self.interval_time.days/(365/12))
if self.is_done:
return "{date} / {km} km\n" \
"{label}\n" \
"Every {prd_time} or {prd_km} km\n"\
"{comment}".format(
label=self.label,
date=self.done_at_date.isoformat(),
km=self.done_at_km,
comment=self.comment,
prd_time=
str(interval_months) + " month(s)" if interval_months < 12
else str(round(interval_months/12, 1)) + " year(s)",
prd_km=self.interval_km)
else:
return "{label}.\nEvery {prd_time} or {prd_km} km".format(
label=self.label,
prd_time=str(interval_months) + " month(s)" if interval_months < 12
else
str(round(interval_months/12, 1)) + " year(s)",
prd_km=self.interval_km)
class OperationsList(list):
""" List inheritance with additional methods.
Added save(), load() methods.
Example of using:
>>> operations = OperationsList([
... Operation("Changing the oil: engine", 1, 10000),
... Operation("Changing the oil: gearbox", 3, 45000)])
>>> operations.save("doctest.txt")
"""
def __init__(self, seq=()):
super().__init__(seq)
def save(self, file):
""" Create human-readable text file from list
"""
with open(file, 'w') as fh:
for operation in self:
comm = operation.comment
# Remove empty string to prevent parsing errors on import
comm.replace('\n\n', '\n')
operation.comment = comm
print(operation, end="\n\n", file=fh)
@staticmethod
def load(file):
""" Create <OperationList> class instance from file previously created
by self.save() or created manually with the same formatting.
# Create test operation type.
>>> oil_change = Operation("Changing the oil: engine",
... interval_km=10000,
... interval_year=1)
# Create done-operation copy from current test operation type.
>>> oil_changed = oil_change.done(
... km=9842,
... date=date(2015, 12, 5),
... comment="Price: 4000 RUR")
# Format for operation that has been done:
>>> print(oil_changed)
2015-12-05 / 9842.0 km
Changing the oil: engine
Every 1.0 year(s) or 10000.0 km
Price: 4000 RUR
>>> OperationsList([oil_changed]).save('doctest.txt')
# Doctest for reading and parsing operation that has been done:
>>> print(OperationsList.load('doctest.txt'))
[Operation(Changing the oil: engine, interval_km=10000.0, interval_year=1.0).done(km=9842.0, date=2015-12-05, comment=)]
# Format for operation that hasn't been done:
>>> print(oil_change)
Changing the oil: engine.
Every 1.0 year(s) or 10000.0 km
>>> OperationsList([oil_change]).save('doctest.txt')
# Doctest for reading and parsing operation that hasn't been done:
>>> print(OperationsList.load('doctest.txt'))
[Operation(Changing the oil: engine., interval_km=10000.0, interval_year=1.0)]
"""
# Regular expression that can detect, that operation has been done
re_done = re.compile(
r"(?P<yyyy>[0-9]{4})-(?P<mm>[0-9]{2})-(?P<dd>[0-9]{2})\s/\s(?P<km>[0-9.]+)\skm")
# Regular expression that can detect operation intervals line
re_interval = re.compile(
r"Every\s(?P<time>[0-9.]+)\s(?P<year_or_mon>[a-z()]+)\sor\s(?P<km>[0-9.]+)\skm")
# Output variable
ops = OperationsList()
# Operation arguments
label = None
interval_km = None
interval_year = None
interval_month = None
done_at_km = None
done_at_date = None
comment = ""
# Operation done flag
is_done = False
# Control line numbers
nline_done_first = None
# Initialize storage
line_previous = ""
with open(file, 'r') as fh:
for num, line in enumerate(fh):
line = line.strip('\n')
# At first line and after every empty line...
if line == "":
# ...append previous operation to list (if exist)
if label: # (check by label - it is necessary argument)
op = Operation(label,
interval_km,
interval_year,
interval_month)
if is_done:
op = op.done(done_at_km,
done_at_date,
comment)
ops.append(op)
# ... and reset operation args, flag, nlines - anyway
# Operation arguments
label = None
interval_km = None
interval_year = None
interval_month = None
done_at_km = None
done_at_date = None
comment = ""
# Operation done flag
is_done = False
# Control line numbers
nline_done_first = None
# Match with done-type operation
match_done = re_done.search(line)
if match_done:
is_done = True
done_at_km = int(float(match_done.group('km')))
done_at_date = date(int(match_done.group('yyyy')),
int(match_done.group('mm')),
int(match_done.group('dd')))
nline_done_first = num
# Next line after match_done line - is label
if is_done and num - 1 == nline_done_first:
label = line
# Check for intervals line
match_interval = re_interval.search(line)
if match_interval:
year_or_mon = match_interval.group('year_or_mon')
if year_or_mon == "year(s)":
interval_year = float(match_interval.group('time'))
interval_month = 0
elif year_or_mon == "month(s)":
interval_year = 0
interval_month = float(match_interval.group('time'))
else:
raise ValueError("Unable to parse line: \n" + line)
interval_km = int(float(match_interval.group('km')))
if not is_done:
label = line_previous
# Next line after label - is intervals. Already parsed.
# Next line after intervals - is comment
if is_done and num - 3 == nline_done_first:
if comment:
comment += "\n" + line
else:
comment = line
# Comment was the last part.
# For multiline comments...
nline_done_first += 1
# Keep previous line. We can detect operation that hasn't been
# done only from second string. In this case previous line will
# be used as label.
line_previous = line
return ops
class VehicleLogBook(object):
""" Represents storage of service operations for vehicle
Vehicle identified by text label and production date
WARNING!!! If you add some methods, do not forget to update
self._changed field, that shows that object contains unsaved changes!
Examples of using:
# Without periodical operations catalogue.
>>> car = VehicleLogBook(
... "Hyundai Getz",
... date(year=2006, month=11, day=30))
# Or with catalogue.
>>> catalogue = OperationsList([
... Operation("Changing the oil: engine", 1, 10000),])
>>> car = VehicleLogBook(
... "Hyundai Getz",
... date(year=2006, month=11, day=30),
... catalogue)
# Add complete operation.
# ...Prepare operation type.
>>> oil_change = Operation("Changing the oil: engine",
... interval_km=10000,
... interval_year=1)
# ...Prepare operation instance.
>>> oil_changed = oil_change.done(
... km=98042,
... date=date(2015, 12, 5),
... comment="Price: 4000 RUR")
# ...Add operation to log.
>>> car.add_operation_to_log(oil_changed)
# Make maintenance plan.
>>> car.make_maintenance_plan()
[Operation(Changing the oil: engine, interval_km=10000.0, interval_year=1.0).done(km=108042.0, date=2016-12-04, comment=)]
# Add new periodic operation to catalogue.
# ...already exist in catalogue
>>> car.add_operation_to_cat(oil_change)
# ...new operation
>>> oil_change_gb = Operation("Changing the oil: gearbox",
... interval_km=45000,
... interval_year=3)
>>> car.add_operation_to_cat(oil_change_gb)
# Serialize (save) class instance to file.
>>> car.save("doctest")
# Deserialize (load) class instance from file
>>> print(VehicleLogBook.load("doctest"))
[Operation(Changing the oil: engine, interval_km=10000.0, interval_year=1.0).done(km=98042.0, date=2015-12-05, comment=Price: 4000 RUR)]
"""
# Extension for files of class serialization
_extension = ".sif"
def __init__(self, label, production_date, operations_cat=tuple()):
"""
:param label: vehicle text identifier
:param production_date: vehicle production date as <datetime.date>
class instance
:param operations_cat: catalogue of all periodical operations types
(iterable with items - instances of <Operation>
class)
"""
super().__init__()
# Version identifier
self._version = VERSION
self._production_date = None
self._filename = "" # filename where object saved
# Car label
self._label = label
self.production_date = production_date
# Car haul today
self._haul = 0
# List of all done operations for keeping history.
self._operations_log = OperationsList()
# Catalogue of all periodical operations types.
# keys - operation labels; values - <Operation> class instances.
self._operations_cat = dict()
for op in operations_cat:
if op.interval_time == 0 and op.interval_km == 0:
raise TypeError(
"Operation <{}> is not periodic.".format(op.label) +
"\nUnable to add non-periodic operation to the catalogue "
"of periodic operations.")
self._operations_cat[op.label] = op
self._modified = False # WARNING!!! False in spite of assignation
# label and production_date during call __init___(). Becomes True after
# assignment this fields through properties.
@property
def operations_log(self):
return self._operations_log
@property
def operations_cat(self):
return self._operations_cat
@property
def haul(self):
return self._haul
@haul.setter
def haul(self, new_haul):
if isinstance(new_haul, str) and new_haul.isdigit():
new_haul = float(new_haul)
if isinstance(new_haul, Number):
self._haul = new_haul
self._modified = True
else:
raise TypeError(
"Haul value must be a Number (int, float, ...) or digit-string")
@property
def extension(self):
return self._extension
@classmethod
def get_extension(cls):
return cls._extension
@property
def filename(self):
return self._filename
@property
def is_modified(self):
return self._modified
@property
def label(self):
return self._label
@label.setter
def label(self, new_label):
if self._label != new_label:
self._modified = True
self._label = new_label
@property
def production_date(self):
return self._production_date
@production_date.setter
def production_date(self, new_prod_date):
# Car production date.
if isinstance(new_prod_date, date):
if new_prod_date != self._production_date:
self._modified = True
self._production_date = new_prod_date
else:
raise TypeError("Argument <new_prod_date> must be an instance "
"of <datetime.date> type.")
def op_label_replace(self, old, new):
"""Rename operation
- reAdd periodic operation to catalogue with new label
- and rename old operations in log with label same as old label
:param old: old label string, that must be replaced by new
:param new: new label of operation
"""
if old == new:
return
self._modified = True
for op in self._operations_log:
# Rename operations with old name to new
if op.label == old:
op.label = new
if old in self._operations_cat:
# ReAdd with new label under new label-keyword
op = self._operations_cat[old]
self._operations_cat.pop(old)
op.label = new
self.add_operation_to_cat(op)
def get_all_oper_labels(self):
""" Get set of all known operation labels
:return: list of strings
"""
labels = set()
labels = labels.union([x.label for x in self._operations_log])
labels = labels.union([x for x in self._operations_cat.keys()])
labels = list(labels)
labels.sort()
return labels
def get_periodic(self, label):
""" Find periodic operation with the same label in periodic
operations catalogue
:param label: String of operation label
:return: Operation instance or None (if no same label)
"""
if label in self._operations_cat:
return self._operations_cat[label]
else:
return None
def add_operation_to_log(self, operation):
if not isinstance(operation, Operation):
raise TypeError("Argument <operation> must be an instance "
"of <Operation> type.")
if not operation.is_done:
# It matter that operation has never been done.
raise ValueError("Operation date and haul not specified. "
"Unable to add operation that has never been "
"done.")
self._modified = True
# Put operation to the log-list.
self._operations_log.append(operation)
self._operations_log.sort(key=lambda x: x.done_at_km)
# If it is periodical operation
if operation.is_periodic:
if operation.label in self._operations_cat:
# Update last completion time for this operation
# if that is newer than last.
operation_last = self._operations_cat[operation.label]
if operation > operation_last:
self._operations_cat[operation.label] = operation
else:
# Add operation to periodic operations catalogue
self.add_operation_to_cat(operation)
def add_operation_to_cat(self, operation):
if operation.is_periodic \
and operation.label not in self._operations_cat.keys():
self._modified = True
# Default operation last completion date/haul
last_date = self._production_date
last_km = 0
# Lookup operations log for a last operation with the same label
same_operations = list(filter(lambda x: x.label == operation.label,
self._operations_log))
if len(same_operations) > 0:
last_operation = max(same_operations)
last_date = last_operation.done_at_date
last_km = last_operation.done_at_km
# Set operation last completion
operation = operation.done(last_km, last_date)
# Add operation to periodic operations catalogue
self._operations_cat[operation.label] = operation
def clear_log(self):
self._modified = True
# Clear log of produced operations.
self._operations_log.clear()
# Clear information about last operation completion
for operation in self._operations_cat.values():
operation.undo()
def clear_all(self):
self._modified = True
# Clear operations log and peridic operations catalogue.
self._operations_log.clear()
self._operations_cat.clear()
def remove_from_log(self, operations):
""" Remove specified operation from oeprations list
:param operations: list of operations
"""
for op in operations:
self._operations_log.remove(op)
self._modified = True
def remove_from_cat(self, operations):
for op in operations:
# Remove all operations in log with the same labels.
# ... get all indexes of items with the same lables
inds = [ind for ind, op_in_log
in enumerate(self._operations_log)
if op_in_log.label == op.label]
# we need to start removing from the end to prevent shift of indexes
# after removing elements
inds.reverse()
# ... pop all items with this indexes
for ind in inds:
self._operations_log.pop(ind)
# Also remove operation from catalogue.
del self._operations_cat[op.label]
self._modified = True
def make_maintenance_plan(self, haul=None, relative=True):
""" Make plan of periodic operations that must be performed.
:param haul: current vehicle haul, km. If you specify it here, than
this value will be saved in class property <haul>
:param relative: If True, than the plan with operations planned with
haul relative to current.
Otherwise - with absolute haul values
:return: list of operations, that represents plan of periodic
operations that must be performed.
"""
plan = list()
if haul:
self.haul = haul
for operation in self._operations_cat.values():
# Planned operation date.
last_date = operation.done_at_date
interval_date = operation.interval_time
plan_date = last_date + interval_date
# Planned operation haul.
last_km = operation.done_at_km
interval_km = operation.interval_km
plan_km = last_km + interval_km
# Make planned operation haul relative to current.
if relative:
plan_km -= self.haul
plan.append(operation.done(plan_km, plan_date))
plan.sort(key=lambda x: x.done_at_km)
return plan
def export_log(self, file):
# Export operations history to txt file.
self._operations_log.save(file)
def export_cat(self, file):
# Export periodic operations catalogue to txt file.
cat = self._operations_cat.values()
# Clear last operation info and convert it to <OperationsList> type.
cat = OperationsList([x for x in cat])
for x in cat:
x.undo()
cat.save(file)
def export_plan(self, file, haul=None):
# Export maintenance plan to txt file.
plan = self.make_maintenance_plan(haul)
plan = OperationsList([x for x in plan])
plan.save(file)
def import_log(self, file):
self._modified = True
# Import operations history from txt file.
ops = OperationsList.load(file)
for op in ops:
self.add_operation_to_log(op)
def import_cat(self, file):
self._modified = True
# Import periodic operations catalogue to txt file.
ops = OperationsList.load(file)
for op in ops:
self.add_operation_to_cat(op)
def save(self, file=None):
""" Serialize current class instance.
Saving using pickle as compressed file
"""
# Make filename correct.
if not file and not self._filename:
raise ValueError("File name argument missed.")
elif not file:
file = self._filename
# Add extension (if missed).
ext = os.path.splitext(file)[-1]
if not ext or ext != self._extension:
file += VehicleLogBook._extension
# Serialize.
with open(file, 'wb') as fh:
pickle.dump(self, fh, pickle.HIGHEST_PROTOCOL)
self._modified = False
self._filename = file
@staticmethod
def load(file):
""" Create class instance from previously saved instance.
Using pickle module.
Warning
-------
The pickle module is not secure against erroneous or maliciously
constructed data. Never unpickle data received from an untrusted or
unauthenticated source.
"""
# Add extension (if missed).
ext = os.path.splitext(file)[-1]
if not ext:
file += VehicleLogBook._extension
# Deserialize.
with open(file, 'rb') as fh:
vehice_log_book = pickle.load(fh)
vehice_log_book._changed = False
# Check type.
if not isinstance(vehice_log_book, VehicleLogBook):
raise TypeError("File {0} has unexpected type: {1}".format(
file,
type(vehice_log_book)))
# Check version.
if vehice_log_book._version != VERSION:
warnings.warn("File {0} created by another version "
"of class <VehicleLogBook>".format(file), Warning)
vehice_log_book._modified = False
vehice_log_book._filename = file
return vehice_log_book
def __str__(self):
return self._operations_log.__str__()
if __name__ == "__main__":
# If running that module as the main program - do doctests.
import doctest
doctest.testmod()
|
zokalo/pyServiceInterval
|
servint_utils.py
|
servint_utils.py
|
py
| 29,824 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9264229712
|
from fnames import FileNames
import getpass
from socket import getfqdn
user = getpass.getuser() # Username of the user running the scripts
host = getfqdn() # Hostname of the machine running the scripts
print('Running on %s@%s' % (user, host))
if user == 'wmvan':
# My work laptop
target_path = 'M:/scratch/epasana/beamformer_simulation'
n_jobs = 6
elif host == 'nbe-024.org.aalto.fi' and user == 'vanvlm1':
# My workstation
target_path = '/m/nbe/scratch/epasana/beamformer_simulation'
n_jobs = 4
elif 'triton' in host and user == 'vanvlm1':
# The big computational cluster at Aalto University
target_path = '/scratch/nbe/epasana/beamformer_simulation'
n_jobs = 1
else:
raise RuntimeError('Please edit scripts/config.py and set the target_path '
'variable to point to the location where the data '
'should be stored and the n_jobs variable to the '
'number of CPU cores the analysis is allowed to use.')
subjects = [1, 2, 4, 5, 6, 7]
bad_subjects = [3] # For subject 3, we don't have T1 MRI
# The events to create epochs for. In this study, we only care about
# stimulation to the left medial nerve.
events_id = {
# 'visRL': 1,
# 'visR': 2,
# 'visL': 4,
# 'ssR': 8,
'ssL': 16,
# 'ssRL': 24,
# 'audR': 32,
# 'audL': 64
}
# Bad channels for each subject (manually flagged by Marijn van Vliet)
# For some subjects, the signal is really "drifty", but I decided not to flag
# these sensors, as there would be too many.
bads = {
1: ['MEG2233', 'EEG001', 'EEG035', 'EEG015'],
2: ['MEG1041', 'EEG001', 'EEG035', 'EEG002'],
3: ['MEG2233', 'MEG0741', 'EEG045', 'EEG035', 'EEG001', 'EEG027'],
4: ['MEG1842', 'MEG2113', 'MEG2342', 'MEG2233', 'MEG1942', 'MEG1922', 'EEG045', 'EEG001', 'EEG035'],
5: ['MEG2233', 'MEG0811', 'MEG2342', 'MEG0812', 'MEG0813', 'MEG0722', 'MEG0632', 'MEG0913', 'MEG0912', 'EEG001', 'EEG035', 'EEG045'],
6: ['MEG2233', 'EEG001', 'EEG035'],
7: ['MEG0213', 'MEG2233', 'MEG2212', 'MEG2231', 'EEG001', 'EEG035', 'EEG045'],
}
# For these subjects, we need an extra ICA pass to get rid of all stimulation
# artifacts. Marijn van Vliet manually picked MEG sensors that showed a lot of
# the artifacts. This is the channel the ICA components will be compared
# against to detect components that capture the artifact.
subjects_with_extra_stim_artifacts = [4, 7]
stim_artifact_sensor = {
4: 'MEG2631',
7: 'MEG1721',
}
# Frequency range used in the DICS beamformer. The optimal frequencies where we
# find ERD/ERS effects varies a little between subjects.
freq_range = {
1: (7, 15), # Typical ERD response
2: (7, 11), # Typical ERD response
4: (7, 30), # Very wide response for this subject. Weird.
5: (13, 24), # This subject shows ERS much better than ERD, as is the case with MNE-Somato
6: (7, 15), # Typical ERD response
7: (9, 14), # Bit narrow band ERD response
}
# Amount of regularization needed for the beamformers. Varies between subjects.
reg = {
1: dict(lcmv=0.05, dics=0.05),
2: dict(lcmv=0.05, dics=2.00), # Crazy DICS regularization needed
4: dict(lcmv=0.05, dics=0.05),
5: dict(lcmv=0.05, dics=0.05),
6: dict(lcmv=0.05, dics=0.05),
7: dict(lcmv=0.05, dics=0.05),
}
# All filenames consumed and produced in this study
fname = FileNames()
fname.add('target_path', target_path)
fname.add('target_dir', '{target_path}/megset/sub{subject:02d}')
fname.add('subjects_dir', '{target_path}/megset/mri')
fname.add('subject_id', 'k{subject:d}_T1')
fname.add('raw', '{target_dir}/sub{subject:02d}-raw.fif')
fname.add('raw_tsss', '{target_dir}/sub{subject:02d}-tsss-raw.fif')
fname.add('raw_filt', '{target_dir}/sub{subject:02d}-filtered-raw.fif')
fname.add('raw_detrend', '{target_dir}/sub{subject:02d}-detrended-raw.fif')
fname.add('annotations', '{target_dir}/sub{subject:02d}-annotations.txt')
fname.add('trans', '{target_dir}/sub{subject:02d}-trans.fif')
fname.add('bem', '{target_dir}/sub{subject:02d}-vol-bem-sol.fif')
fname.add('src', '{target_dir}/sub{subject:02d}-vol-src.fif')
fname.add('fwd', '{target_dir}/sub{subject:02d}-vol-fwd.fif')
fname.add('ica', '{target_dir}/sub{subject:02d}-ica.fif')
fname.add('epochs', '{target_dir}/sub{subject:02d}-epo.fif')
fname.add('epochs_long', '{target_dir}/sub{subject:02d}-long_epo.fif')
fname.add('evoked', '{target_dir}/sub{subject:02d}-ave.fif')
fname.add('stc_mne', '{target_dir}/sub{subject:02d}_mne')
fname.add('stc_lcmv', '{target_dir}/sub{subject:02d}_lcmv')
fname.add('stc_dics', '{target_dir}/sub{subject:02d}_dics')
fname.add('nii_mne', '{target_dir}/sub{subject:02d}_mne.nii.gz')
fname.add('nii_lcmv', '{target_dir}/sub{subject:02d}_lcmv.nii.gz')
fname.add('nii_dics', '{target_dir}/sub{subject:02d}_dics.nii.gz')
fname.add('ecd', '{target_dir}/sub{subject:02d}_ecd.dip')
fname.add('report', '{target_dir}/sub{subject:02d}_report.h5')
fname.add('report_html', '{target_dir}/sub{subject:02d}_report.html')
# Maxfilter related files
fname.add('mf_database', '/work/modules/Ubuntu/14.04/amd64/t314/neuromag/3.4.1/databases')
fname.add('mf_cal', '{mf_database}/sss/sss_cal.dat')
fname.add('mf_ct', '{mf_database}/ctc/ct_sparse.fif')
|
wmvanvliet/beamformer_simulation
|
megset/config.py
|
config.py
|
py
| 5,242 |
python
|
en
|
code
| 4 |
github-code
|
6
|
26185561724
|
"""
Example:
words = ['cat', 'baby', 'dog', 'bird', 'car', 'ax']
string1 = 'tabncihjs'
find_embedded_word(words, string1) -> cat (the letters do not have to be in order)
"""
import collections
from typing import List
import unittest
# Using sorting
# words = ['cat', 'baby', 'dog', 'bird', 'car', 'ax'] -> act, abbbdyx, dgo, bdir, acr, ax
# string1 = 'tabncihjs' -> abbcccccdhijnsty'
def find_embedded_word_sort(words: List[str], string1: str) -> str:
sorted_string1 = sorted(string1)
for word in words:
sorted_word = sorted(word)
sorted_string1_ptr = 0
for ch in sorted_word:
while sorted_string1_ptr < len(sorted_string1) and sorted_string1[sorted_string1_ptr] != ch:
sorted_string1_ptr += 1
sorted_string1_ptr += 1
if sorted_string1_ptr <= len(sorted_string1):
return word
return None
# Using Counter(), with count
def find_embedded_word_counter(words: List[str], string1: str) -> str:
dict_string1 = collections.Counter(string1)
for word in words:
dict_word = collections.Counter(word)
count = 0
for key in dict_word:
if dict_word[key] <= dict_string1[key]:
count += 1
if count == len(dict_word):
return word
return None
# Using Counter(), with boolean
def find_embedded_word_bool(words: List[str], string1: str) -> str:
dict_string1 = collections.Counter(string1)
for word in words:
dict_word = collections.Counter(word)
word_present = True
for key in dict_word:
if dict_word[key] > dict_string1[key]:
word_present = False
break
if word_present:
return word
return None
class TestProblems(unittest.TestCase):
def test_embedded_words(self):
actual = find_embedded_word_sort(['cat', 'baby', 'dog', 'bird', 'car', 'ax'], 'tabncihjs')
expected = 'cat'
self.assertTrue(actual, expected)
actual = find_embedded_word_bool(['cat', 'baby', 'dog', 'bird', 'car', 'ax'], 'tabncihjs')
expected = 'cat'
self.assertTrue(actual, expected)
actual = find_embedded_word_counter(['cat', 'baby', 'dog', 'bird', 'car', 'ax'], 'tabncihjs')
expected = 'cat'
self.assertTrue(actual, expected)
if __name__ == '__main__':
unittest.main()
|
01o91939/leetcode
|
embeddedWord.py
|
embeddedWord.py
|
py
| 2,392 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32976566194
|
from myNet import *
def to_one_hot(labels,n_labels):
new_labels = []
for label in labels:
new = [0]*n_labels
new[label] = 1
new_labels.append(new)
return new_labels
class FATWiSARD:
def __init__(self,nIn,nClasses,nRams,rInit=1):
self.brain = Brain(nIn,nClasses)
self.brain.newMemory(nClasses-1)
b = [self.brain.newNetFromInput() for i in range(nClasses)]
m = [self.brain.newNetFromMemory(i) for i in range(nClasses)]
orders = [b[i].shuffle() for i in range(nClasses)]
for i in range(nClasses):
m[i].shuffle(orders[i])
o = []
for i in range(nClasses):
ramsM = m[i].Split(nRams)
ramsB = b[i].Split(nRams)
w = [ElementWiseNeuron(int(nIn/nRams)) for n in range(nRams)]
r = [CompareNeuron(rInit) for n in range(nRams)]
e = [ElementWiseNeuron(1) for n in range(nRams)]
for j in range(nRams):
ramsM[j].add(w[j])
ramsB[j].add(w[j])
ramsB[j].add(r[j],ramsM[j])
ramsB[j].add(e[j])
o.append(Sum(*ramsB))
self.brain.setFinal(ramsB[0])
o[0].Join(*o[1:])
o[0].toProb()
self.brain.setFinal(o[0])
self.brain.start()
def train(self,inp,label):
self.brain.Memorys[label].add(inp)
def predict(self,inp):
return self.brain.predict(inp)
def optimize(self,inp,out,learning_rate,epochs=1,**kwargs):
one_hot = kwargs.get("one_hot",False)
if not one_hot:
out = to_one_hot(out,self.brain.nOut)
self.brain.optimize(inp,out,learning_rate,epochs,log=kwargs.get("log",None))
|
Alantlb/FAT-WiSARD
|
FATWiSARD.py
|
FATWiSARD.py
|
py
| 1,771 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8101746211
|
# coding=utf-8
import streamlit as st
class selectValues():
def __init__(self):
self.points = ['腕', '肘', '膝', '頭', '投げ手', '足']
self.eval_kinds = ['パフォーマンス観点', '怪我観点']
self.timings = ['投げ始め', 'リリース時', '投げ終わり']
self.evaluates = ['○', '×']
self.total_evaluates = ['未評価', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
def set_slider():
DEFAULT_WIDTH = 60
width = st.slider(
label="", min_value=0, max_value=100, value=DEFAULT_WIDTH, format="%d%%"
)
width = max(width, 0.01)
side = max((100 - width) / 2, 0.01)
_, container, _ = st.columns([side, width, side])
return container
|
ys201810/baseball_scoring_work
|
src/utils.py
|
utils.py
|
py
| 743 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41211516987
|
import sys
import numpy as np
import pylab as plt
#
# read overview file
#----------------------------------------
def read_overview_file( fbase, NT ):
tmprtr = np.zeros( (NT) )
xH1 = np.zeros( (NT) )
xH2 = np.zeros( (NT) )
xHe1 = np.zeros( (NT) )
xHe2 = np.zeros( (NT) )
xHe3 = np.zeros( (NT) )
fname = fbase+'.ovr'
f = open( fname, 'r' )
ovr_header = f.readline().split('\t')
ovr_lines = f.readlines()
f.close()
for i,line in enumerate(ovr_lines):
ovr_lines[i] = ovr_lines[i].split('\t')
for i,line in enumerate(ovr_lines):
#print line
if len(line) != 1:
#print '**', i/2, line[1], line[2]
ii = i/2
tmprtr[ii] = line[1]
xH1[ii] = line[6]
xH2[ii] = line[7]
xHe1[ii] = line[8]
xHe2[ii] = line[9]
xHe3[ii] = line[10]
return 10**tmprtr, 10**xH1, 10**xH2, 10**xHe1, 10**xHe2, 10**xHe3
#
# read continuum file
#----------------------------------------
def read_continuum_file( fbase ):
fname = fbase+'.con'
f = open( fname, 'r' )
dum = f.readline()
break_check = ''
E_list = []
I_list = []
while break_check != '#':
line = f.readline()
break_check=line[0]
line_bits = line.split()
if break_check != '#':
E_list.append( np.float(line_bits[0]) )
I_list.append( np.float(line_bits[1]) )
#print line.split()
E_Ry = np.array( E_list )
I_nu = np.array( I_list )
return E_Ry, I_nu
#
# read grid file
#----------------------------------------
def read_grid_file( fbase ):
fname = fbase+'.grd'
f = open( fname, 'r' )
grd_header = f.readline().split('\t')
grd_lines = f.readlines()
f.close()
NT = len(grd_lines)
grd_keys = []
consT = {}
for line in grd_lines:
split_line = line.split('\t')
key = 'grid'+split_line[0]
grd_keys.append( key )
consT[key] = split_line[6]
return NT
#
# read cooling file
#----------------------------------------
def read_cooling_file( fbase, NT ):
tmprtr = np.zeros( (NT) )
ctot = np.zeros( (NT) )
cH = np.zeros( (NT) )
cHe = np.zeros( (NT) )
ccomp = np.zeros( (NT) )
ceeff = np.zeros( (NT) )
cFFcm = np.zeros( (NT) )
# chvFB = np.zeros( (NT) )
# cH2p = np.zeros( (NT) )
# cHDro = np.zeros( (NT) )
# cH2ln = np.zeros( (NT) )
# cHdfb = np.zeros( (NT) )
# cCTsC = np.zeros( (NT) )
# cH2cX = np.zeros( (NT) )
# cdust = np.zeros( (NT) )
# cmolecule = np.zeros( (NT) )
fname = fbase+'.cool'
f = open( fname, 'r' )
cool_header = f.readline().split('\t')
cool_lines = f.readlines()
f.close()
for i,line in enumerate(cool_lines):
cool_lines[i] = cool_lines[i].split('\t')
for i,line in enumerate(cool_lines):
#print line
if len(line) != 1:
#print '**', i/2, line[1], line[2]
ii = i/2
tmprtr[ii] = line[1]
ctot[ii] = line[2]
cH[ii] = line[3]
cHe[ii] = line[4]
ccomp[ii] = line[44]
ceeff[ii] = line[43]
cFFcm[ii] = line[41]
#chvFB[ii] = line[42]
#cH2p[ii] = line[40]
#cHDro[ii] = line[39]
#cH2ln[ii] = line[38]
#cHdfb[ii] = line[37]
#cCTsC[ii] = line[36]
#cH2cX[ii] = line[35]
#cdust[ii] = line[34]
#cmolecule[ii] = line[33]
# this line describes the grid number
else:
key = line[0].split('--')[1].strip()
#print 'key = ', key
return tmprtr, ctot, cH, cHe, ccomp, ceeff, cFFcm
#
# read heating file
#----------------------------------------
def read_heating_file( fbase, NT ):
tmprtr = np.zeros( (NT) )
htot = np.zeros( (NT) )
ctot = np.zeros( (NT) )
hH1 = np.zeros( (NT) )
hHe1 = np.zeros( (NT) )
hHe2 = np.zeros( (NT) )
fname = fbase+'.heat'
f = open( fname, 'r' )
heat_header = f.readline().split('\t')
heat_lines = f.readlines()
f.close()
for i,line in enumerate(heat_lines):
heat_lines[i] = heat_lines[i].split('\t')
for i,line in enumerate(heat_lines):
#print line
if len(line) != 1:
#print '**', i/2, line[1], line[2]
ii = i/2
tmprtr[ii] = line[1]
htot[ii] = line[2]
ctot[ii] = line[3]
for i,entry in enumerate(line):
if entry == 'H 1':
hH1[ii] = np.float(line[i+1]) * htot[ii]
if entry == 'He 1':
hHe1[ii] = np.float(line[i+1]) * htot[ii]
if entry == 'He 2':
hHe2[ii] = np.float(line[i+1]) * htot[ii]
#hH1[ii] = np.float(line[5]) * htot[ii]
#hHe1[ii] = np.float(line[7]) * htot[ii]
#hHe2[ii] = np.float(line[9]) * htot[ii]
return tmprtr, htot, ctot, hH1, hHe1, hHe2
nH = 10**(-2)
fig = plt.figure( figsize=(20,10) )
ax1 = plt.subplot2grid( (2,4), (0, 0) )
ax2 = plt.subplot2grid( (2,4), (1, 0) )
ax3 = plt.subplot2grid( (2,4), (0, 1) )
ax4 = plt.subplot2grid( (2,4), (1, 1) )
ax5 = plt.subplot2grid( (2,4), (0, 2) )
ax6 = plt.subplot2grid( (2,4), (1, 2) )
ax7 = plt.subplot2grid( (2,4), (0, 3) )
ax8 = plt.subplot2grid( (2,4), (1, 3) )
# primordial gas cooling
#----------------------------------------------------------
fbase = 'primordial_pce'
NT = read_grid_file( fbase )
tmprtr, ctot, cH, cHe, ccomp, ceeff, cFFcm = read_cooling_file( fbase, NT )
tmprtr, htot, ctot, hH1, hHe1, hHe2 = read_heating_file( fbase, NT )
ax1.loglog( tmprtr, htot / nH**2,
color='red', lw=3.0, ls='--', label=r'$\mathcal{H}$' )
ax1.loglog( tmprtr, ctot / nH**2,
color='blue', lw=3.0, ls='-', label=r'$\Lambda$' )
ax1.loglog( tmprtr, np.abs(ctot-htot) / nH**2,
color='black', ls='--', lw=3.0,
label=r'$|\Lambda - \mathcal{H}|$' )
fbase = 'primordial_pce_18'
NT = read_grid_file( fbase )
tmprtr, ctot, cH, cHe, ccomp, ceeff, cFFcm = read_cooling_file( fbase, NT )
tmprtr, htot, ctot, hH1, hHe1, hHe2 = read_heating_file( fbase, NT )
ax1.loglog( tmprtr, htot / nH**2, color='red', ls='--', lw=1.0 )
ax1.loglog( tmprtr, ctot / nH**2, color='blue', lw=1.0, ls='-' )
ax1.loglog( tmprtr, np.abs(ctot-htot) / nH**2, color='black', lw=1.0, ls='--' )
ax1.set_xlim( 1.0e4/2, 1.0e5 )
ax1.set_ylim( 1.0e-25, 1.0e-21 )
#ax1.set_xlabel( 'T [K]', fontsize=20 )
ax1.set_xticklabels( [] )
ax1.set_ylabel( r'$\Lambda / n_{\rm H}^2$', fontsize=20 )
# solar gas cooling
#----------------------------------------------------------
fbase = 'solar_pce'
NT = read_grid_file( fbase )
tmprtr, ctot, cH, cHe, ccomp, ceeff, cFFcm = read_cooling_file( fbase, NT )
tmprtr, htot, ctot, hH1, hHe1, hHe2 = read_heating_file( fbase, NT )
ax2.loglog( tmprtr, htot / nH**2, color='red', lw=3.0, ls='--' )
ax2.loglog( tmprtr, ctot / nH**2, color='blue', lw=3.0, ls='-' )
ax2.loglog( tmprtr, np.abs(ctot-htot) / nH**2, color='black', ls='--', lw=3.0 )
fbase = 'solar_pce_18'
NT = read_grid_file( fbase )
tmprtr, ctot, cH, cHe, ccomp, ceeff, cFFcm = read_cooling_file( fbase, NT )
tmprtr, htot, ctot, hH1, hHe1, hHe2 = read_heating_file( fbase, NT )
ax2.loglog( tmprtr, htot / nH**2, color='red', ls='--', lw=1.0 )
ax2.loglog( tmprtr, ctot / nH**2, color='blue', lw=1.0, ls='-' )
ax2.loglog( tmprtr, np.abs(ctot-htot) / nH**2, color='black', lw=1.0, ls='--' )
ax2.set_xlim( 1.0e4/2, 1.0e5 )
ax2.set_ylim( 1.0e-25, 1.0e-21 )
ax2.set_xlabel( 'T [K]', fontsize=20 )
ax2.set_ylim( 1.0e-25, 1.0e-21 )
ax2.set_ylabel( r'$\Lambda / n_{\rm H}^2$', fontsize=20 )
# ionization fractions primordial
#----------------------------------------------------------
fbase = 'primordial_pce'
tmprtr, xH1, xH2, xHe1, xHe2, xHe3 = read_overview_file( fbase, NT )
ax3.loglog( tmprtr, xH1, lw=3.0, color='red', ls='-' )
ax3.loglog( tmprtr, xH2, lw=3.0, color='red', ls='--' )
ax3.loglog( tmprtr, xHe1, lw=3.0, color='blue', ls='-' )
ax3.loglog( tmprtr, xHe2, lw=3.0, color='blue', ls='--' )
ax3.loglog( tmprtr, xHe3, lw=3.0, color='blue', ls=':' )
fbase = 'primordial_pce_18'
tmprtr, xH1, xH2, xHe1, xHe2, xHe3 = read_overview_file( fbase, NT )
ax3.loglog( tmprtr, xH1, lw=1.0, color='red', ls='-' )
ax3.loglog( tmprtr, xH2, lw=1.0, color='red', ls='--' )
ax3.loglog( tmprtr, xHe1, lw=1.0,color='blue', ls='-' )
ax3.loglog( tmprtr, xHe2, lw=1.0,color='blue', ls='--' )
ax3.loglog( tmprtr, xHe3, lw=1.0,color='blue', ls=':' )
ax3.set_xlim( 1.0e4/2, 1.0e5 )
ax3.set_ylim( 1.0e-5, 2.0 )
ax3.set_xticklabels( [] )
ax3.set_ylabel( 'x' )
# ionization fractions primordial
#----------------------------------------------------------
fbase = 'solar_pce'
tmprtr, xH1, xH2, xHe1, xHe2, xHe3 = read_overview_file( fbase, NT )
ax4.loglog( tmprtr, xH1, lw=3.0, color='red', ls='-', label='HI' )
ax4.loglog( tmprtr, xH2, lw=3.0, color='red', ls='--', label='HII' )
ax4.loglog( tmprtr, xHe1, lw=3.0, color='blue', ls='-', label='HeI' )
ax4.loglog( tmprtr, xHe2, lw=3.0, color='blue', ls='--', label='HeII' )
ax4.loglog( tmprtr, xHe3, lw=3.0, color='blue', ls=':', label='HeIII' )
fbase = 'solar_pce_18'
tmprtr, xH1, xH2, xHe1, xHe2, xHe3 = read_overview_file( fbase, NT )
ax4.loglog( tmprtr, xH1, lw=1.0, color='red', ls='-' )
ax4.loglog( tmprtr, xH2, lw=1.0, color='red', ls='--' )
ax4.loglog( tmprtr, xHe1, lw=1.0,color='blue', ls='-' )
ax4.loglog( tmprtr, xHe2, lw=1.0,color='blue', ls='--' )
ax4.loglog( tmprtr, xHe3, lw=1.0,color='blue', ls=':' )
ax4.set_xlim( 1.0e4/2, 1.0e5 )
ax4.set_ylim( 1.0e-5, 2.0 )
ax4.set_xlabel( 'T [K]', fontsize=20 )
ax4.set_ylabel( 'x' )
# spectrum - continuum
#----------------------------------------------------------
fbase = 'primordial_pce'
E_Ry, I_nu = read_continuum_file( fbase )
ax5.loglog( E_Ry, I_nu, color='black', lw=3.0, ls='-' )
ax5.set_xlim( 1.0e-2, 1.0e2 )
ax5.set_ylim( 1.0e-7, 1.0e-2 )
fbase = 'primordial_pce_18'
E_Ry, I_nu = read_continuum_file( fbase )
ax5.loglog( E_Ry, I_nu, color='black', lw=1.0, ls='-' )
ax5.set_xlim( 2.0e-1, 5.0e1 )
ax5.set_ylim( 1.0e-8, 2.0e-3 )
ax5.set_xticklabels( [] )
ax5.set_ylabel( r'$4 \pi \, \nu \, J_{\nu}$', fontsize=20 )
# spectrum - continuum
#----------------------------------------------------------
fbase = 'solar_pce'
E_Ry, I_nu = read_continuum_file( fbase )
ax6.loglog( E_Ry, I_nu, color='black', lw=3.0, ls='-' )
ax6.set_xlim( 1.0e-2, 1.0e2 )
ax6.set_ylim( 1.0e-7, 1.0e-2 )
fbase = 'solar_pce_18'
E_Ry, I_nu = read_continuum_file( fbase )
ax6.loglog( E_Ry, I_nu, color='black', lw=1.0, ls='-' )
ax6.set_xlim( 2.0e-1, 5.0e1 )
ax6.set_ylim( 1.0e-8, 2.0e-3 )
ax6.set_xlabel( 'E [Ry]' )
ax6.set_ylabel( r'$4 \pi \, \nu \, J_{\nu}$', fontsize=20 )
# heating breakdown
#----------------------------------------------------------
fbase = 'primordial_pce'
tmprtr, htot, ctot, hH1, hHe1, hHe2 = read_heating_file( fbase, NT )
ax7.loglog( tmprtr, htot / nH**2,
color='black', lw=3.0, ls='-', label=r'$\mathcal{H}$' )
ax7.loglog( tmprtr, hH1 / nH**2,
color='red', lw=3.0, ls='-', label=r'${\rm HI}$' )
ax7.loglog( tmprtr, hHe1 / nH**2,
color='blue', lw=3.0, ls='-', label=r'${\rm HeI}$' )
ax7.loglog( tmprtr, hHe2 / nH**2,
color='blue', lw=3.0, ls='--', label=r'${\rm HeII}$' )
fbase = 'primordial_pce_18'
tmprtr, htot, ctot, hH1, hHe1, hHe2 = read_heating_file( fbase, NT )
ax7.loglog( tmprtr, htot / nH**2,
color='black', lw=1.0, ls='-', label=r'$\mathcal{H}$' )
ax7.loglog( tmprtr, hH1 / nH**2,
color='red', lw=1.0, ls='-', label=r'${\rm HI}$' )
ax7.loglog( tmprtr, hHe1 / nH**2,
color='blue', lw=1.0, ls='-', label=r'${\rm HeI}$' )
ax7.loglog( tmprtr, hHe2 / nH**2,
color='blue', lw=1.0, ls='--', label=r'${\rm HeII}$' )
ax7.set_xlim( 1.0e4/2, 1.0e5 )
ax7.set_xticklabels( [] )
ax7.set_ylim( 1.0e-27, 5.0e-23 )
fbase = 'solar_pce'
tmprtr, htot, ctot, hH1, hHe1, hHe2 = read_heating_file( fbase, NT )
ax8.loglog( tmprtr, htot / nH**2,
color='black', lw=3.0, ls='-', label=r'$\mathcal{H}$' )
ax8.loglog( tmprtr, hH1 / nH**2,
color='red', lw=3.0, ls='-', label=r'${\rm HI}$' )
ax8.loglog( tmprtr, hHe1 / nH**2,
color='blue', lw=3.0, ls='-', label=r'${\rm HeI}$' )
ax8.loglog( tmprtr, hHe2 / nH**2,
color='blue', lw=3.0, ls='--', label=r'${\rm HeII}$' )
fbase = 'solar_pce_18'
tmprtr, htot, ctot, hH1, hHe1, hHe2 = read_heating_file( fbase, NT )
ax8.loglog( tmprtr, htot / nH**2,
color='black', lw=1.0, ls='-', label=r'$\mathcal{H}$' )
ax8.loglog( tmprtr, hH1 / nH**2,
color='red', lw=1.0, ls='-', label=r'${\rm HI}$' )
ax8.loglog( tmprtr, hHe1 / nH**2,
color='blue', lw=1.0, ls='-', label=r'${\rm HeI}$' )
ax8.loglog( tmprtr, hHe2 / nH**2,
color='blue', lw=1.0, ls='--', label=r'${\rm HeII}$' )
ax8.set_xlim( 1.0e4/2, 1.0e5 )
ax8.set_xlabel( 'T [K]', fontsize=20 )
ax8.set_ylim( 1.0e-26, 5.0e-23 )
xtxt = 0.1
ytxt = 0.85
ax1.text( xtxt, ytxt, 'primordial - HM12', horizontalalignment='left',
fontsize=20, verticalalignment='center', transform = ax1.transAxes )
ax2.text( xtxt, ytxt, 'solar - HM12', horizontalalignment='left',
fontsize=20, verticalalignment='center', transform = ax2.transAxes )
ax1.legend(ncol=3, fontsize=12)
ax4.legend(ncol=2, fontsize=12, loc='lower left')
plt.tight_layout()
|
galtay/rabacus
|
cloudy/cooling/read_cloudy.py
|
read_cloudy.py
|
py
| 13,599 |
python
|
en
|
code
| 4 |
github-code
|
6
|
3504372122
|
#!/usr/bin/python3
"""This is the square class """
class Square:
"""This is an empty Square class
"""
def __init__(self, size=0):
"""This is the initilization function
it has a private size member
Args:
size: size of square
"""
if (not isinstance(size, int)):
raise TypeError("size must be an integer")
elif (size < 0):
raise ValueError("size must be >= 0")
self.__size = size
|
MATRIX30/alx-higher_level_programming
|
0x06-python-classes/2-square.py
|
2-square.py
|
py
| 484 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70211358588
|
import json
import glob
import os
import re
import collections
import yaml
from yaml.parser import ParserError, ScannerError
from saddlebags import exceptions
SUPPORTED_FILE_TYPES = ['json', 'yaml', 'yml']
class Saddlebag(collections.MutableMapping):
"""
Provides access to the contents of JSON/YAML configuration
files using standard dictionary style syntax.
"""
def __init__(self,
configuration_locations: list=None,
strict: str=True):
"""
The constructor creates an top-level key for each
configuration file found in the directories specified by
a list of environment variables.
Additionally read/write access to environment variables
is available via the `env` object attribute.
Args:
configuration_locations: List of environment variables
which point to directories containing configuration files.
strict: If True, instances will raise an
exception if requested data is not present.
"""
self.strict = strict
self.env = os.environ
self._data = dict()
if not configuration_locations:
configuration_locations = []
# Obtain list of all support configuration files.
configuration_files = (
self._configuration_files(configuration_locations))
for configuration_file in configuration_files:
key_name = re.search(
r"([-_A-Za-z0-9]+)\.(json|yaml|yml|conf)",
configuration_file).group(1)
self._check_for_name_collision(key_name)
self._load_configuration_file(key_name, configuration_file)
def __getitem__(self, key: str):
if self.strict:
try:
return self._data[key.lower()]
except KeyError:
raise KeyError(
"The requested key '{}' does not exist. This most likely "
"indicates that you anticipated a configuration file "
"being loaded that actually hasn't been.".format(key))
return self._data.get(key.lower())
def __setitem__(self, key: str, value):
self._data[key.lower()] = value
def __delitem__(self, key):
del self._data[key.lower()]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __repr__(self):
return str(self._data)
def _configuration_files(self, config_files_locations):
"""
Identify all configuration files in a given location.
Returns:
A list containing paths to configuration files.
Raises:
ValueError: When a non-existent ENV_VAR is referenced.
"""
configuration_files = list()
for location in config_files_locations:
try:
configuration_files.extend(
[file for file in glob.glob(os.environ[location] + '/*')
if file.rpartition('.')[2] in SUPPORTED_FILE_TYPES])
except KeyError:
raise ValueError(
'The environment variable specified '
'by the client ({}) for use by '
'the constructor does not exist '
'on the system.'.format(location))
return configuration_files
def _check_for_name_collision(self, key):
"""
Ensure that a given element key is not already present on the
object.
Args:
key: The key to evaluate.
Raises:
DuplicationConfigurationFile: If another configuration file
of the same name has already been loaded onto the file.
"""
try:
existing_key = self[key]
except KeyError:
existing_key = None
if existing_key:
raise exceptions.DuplicateConfigurationFile(
"Two configuration files share the following name "
"{}. This is not allowed.".format(key))
def _load_configuration_file(self, attribute_name, configuration_file):
with open(configuration_file) as configuration_data:
file_extension = configuration_file.partition('.')[2].lower()
# JSON Loading
if file_extension == 'json':
try:
self.update(
{attribute_name: json.load(configuration_data)})
except ValueError:
raise exceptions.MalformedConfigurationFile(
"The configuration file, {}, contains "
"syntax errors.".format(configuration_file))
# YAML Loading
elif file_extension in ['yaml', 'yml']:
try:
results = list(yaml.load_all(configuration_data))
except (ParserError, ScannerError):
raise exceptions.MalformedConfigurationFile(
"The configuration file, {}, contains "
"syntax errors.".format(configuration_file))
else:
if len(results) > 1:
self.update({attribute_name: results})
else:
self.update({attribute_name: results[0]})
|
eikonomega/saddlebags
|
saddlebags/saddlebag.py
|
saddlebag.py
|
py
| 5,393 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33502453233
|
# encoding: utf-8
"""
"""
__author__ = 'Richard Smith'
__date__ = '31 Jul 2020'
__copyright__ = 'Copyright 2018 United Kingdom Research and Innovation'
__license__ = 'BSD - see LICENSE file in top-level package directory'
__contact__ = '[email protected]'
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from cci_tagger.facets import Facets
import json
class Command(BaseCommand):
help = 'Downloads vocabs from vocab server to json file'
def handle(self, *args, **options):
facets = Facets()
with open(settings.VOCAB_CACHE_FILE,'w') as writer:
json.dump(facets.to_json(), writer)
|
cedadev/archive-opensearch
|
django_opensearch/management/commands/retrieve_vocab_cache.py
|
retrieve_vocab_cache.py
|
py
| 681 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16000963484
|
import datetime
import ipaddress
import unittest
from typing import Any, Optional
from dataclasses import dataclass
from podman import api
class ParseUtilsTestCase(unittest.TestCase):
def test_parse_repository(self):
@dataclass
class TestCase:
name: str
input: Any
expected: Optional[str]
cases = [
TestCase(name="empty str", input="", expected=("", None)),
TestCase(
name="name",
input="quay.io/libpod/testimage",
expected=("quay.io/libpod/testimage", None),
),
TestCase(
name="@digest",
input="quay.io/libpod/testimage@71f1b47263fc",
expected=("quay.io/libpod/testimage", "71f1b47263fc"),
),
TestCase(
name=":tag",
input="quay.io/libpod/testimage:latest",
expected=("quay.io/libpod/testimage", "latest"),
),
]
for case in cases:
actual = api.parse_repository(case.input)
self.assertEqual(
case.expected,
actual,
f"failed test {case.name} expected {case.expected}, actual {actual}",
)
def test_decode_header(self):
actual = api.decode_header("eyJIZWFkZXIiOiJ1bml0dGVzdCJ9")
self.assertDictEqual(actual, {"Header": "unittest"})
self.assertDictEqual(api.decode_header(None), {})
def test_prepare_timestamp(self):
time = datetime.datetime(2022, 1, 24, 12, 0, 0)
self.assertEqual(api.prepare_timestamp(time), 1643025600)
self.assertEqual(api.prepare_timestamp(2), 2)
self.assertEqual(api.prepare_timestamp(None), None)
with self.assertRaises(ValueError):
api.prepare_timestamp("bad input")
def test_prepare_cidr(self):
net = ipaddress.IPv4Network("127.0.0.0/24")
self.assertEqual(api.prepare_cidr(net), ("127.0.0.0", "////AA=="))
if __name__ == '__main__':
unittest.main()
|
mgorny/podman-py
|
podman/tests/unit/test_parse_utils.py
|
test_parse_utils.py
|
py
| 2,083 |
python
|
en
|
code
| null |
github-code
|
6
|
10422156393
|
from __future__ import annotations
import os
import platform
import re
import subprocess
import typing
from pathlib import Path
from PySide6 import QtCore, QtGui, QtWidgets
import randovania
from randovania import get_data_path
if typing.TYPE_CHECKING:
from collections.abc import Iterator
def map_set_checked(iterable: Iterator[QtWidgets.QCheckBox], new_status: bool):
for checkbox in iterable:
checkbox.setChecked(new_status)
def lock_application(value: bool):
QtWidgets.QApplication.instance().main_window.setEnabled(value)
def _prompt_user_for_file(
window: QtWidgets.QWidget, caption: str, filter: str, dir: str | None = None, new_file: bool = False
) -> Path | None:
"""
Helper function for all `prompt_user_for_*` functions.
:param window:
:param caption:
:param filter:
:param new_file: If false, prompt for an existing file.
:return: A string if the user selected a file, None otherwise
"""
if new_file:
method = QtWidgets.QFileDialog.getSaveFileName
else:
method = QtWidgets.QFileDialog.getOpenFileName
open_result = method(window, caption=caption, dir=dir, filter=filter)
if not open_result or open_result == ("", ""):
return None
return Path(open_result[0])
def _prompt_user_for_directory(
window: QtWidgets.QWidget, caption: str, dir: str | None = None, new_file: bool = False
) -> Path | None:
if new_file:
dialog = QtWidgets.QFileDialog(window)
dialog.setFileMode(QtWidgets.QFileDialog.FileMode.DirectoryOnly)
dialog.setOption(QtWidgets.QFileDialog.ShowDirsOnly)
dialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
dialog.setDirectory(dir)
if dialog.exec_():
open_result = dialog.selectedFiles()
if not open_result:
return None
return Path(open_result[0])
return None
else:
open_result = QtWidgets.QFileDialog.getExistingDirectory(
window, caption, dir, QtWidgets.QFileDialog.ShowDirsOnly
)
if not open_result or open_result == ("", ""):
return None
return Path(open_result)
def prompt_user_for_vanilla_input_file(
window: QtWidgets.QWidget, extensions: list[str], existing_file: Path | None = None
) -> Path | None:
"""
Shows an QFileDialog asking the user for a vanilla game file
:param window:
:param extensions:
:param existing_file: An existing file to pre-fill with.
:return: A string if the user selected a file, None otherwise
"""
if extensions and extensions == [""]:
return _prompt_user_for_directory(
window, "Select the vanilla game folder", dir=str(existing_file) if existing_file is not None else None
)
return _prompt_user_for_file(
window,
caption="Select the vanilla game {}.".format("/".join(extensions)),
dir=str(existing_file) if existing_file is not None else None,
filter=";".join(f"*.{ext}" for ext in extensions),
)
def prompt_user_for_output_file(window: QtWidgets.QWidget, default_name: str, extensions: list[str]) -> Path | None:
"""
Shows an QFileDialog asking the user where to place the output file
:param window:
:param default_name: Name of a file that will be offered by default in the UI.
:param extensions:
:return: A string if the user selected a file, None otherwise
"""
if extensions and extensions == [""]:
return _prompt_user_for_directory(
window, "Where to place the Randomized game directory", dir=default_name, new_file=False
)
return _prompt_user_for_file(
window,
caption="Where to place the Randomized game file.",
dir=default_name,
filter=";".join(f"*.{ext}" for ext in extensions),
new_file=True,
)
def prompt_user_for_output_game_log(window: QtWidgets.QWidget, default_name: str) -> Path | None:
"""
Shows an QFileDialog asking the user for a Randovania seed log
:param window:
:param default_name:
:return: A string if the user selected a file, None otherwise
"""
from randovania.layout.layout_description import LayoutDescription
return _prompt_user_for_file(
window,
caption="Select a Randovania seed log.",
dir=default_name,
filter=f"Randovania Game, *.{LayoutDescription.file_extension()}",
new_file=True,
)
def prompt_user_for_input_game_log(window: QtWidgets.QWidget) -> Path | None:
"""
Shows an QFileDialog asking the user for a Randovania seed log
:param window:
:return: A string if the user selected a file, None otherwise
"""
from randovania.layout.layout_description import LayoutDescription
return _prompt_user_for_file(
window,
caption="Select a Randovania seed log.",
filter=f"Randovania Game, *.{LayoutDescription.file_extension()}",
new_file=False,
)
def prompt_user_for_database_file(window: QtWidgets.QWidget) -> Path | None:
"""
Shows an QFileDialog asking the user for a Randovania database file
:param window:
:return: A string if the user selected a file, None otherwise
"""
return _prompt_user_for_file(window, caption="Select a Randovania database file.", filter="*.json")
def prompt_user_for_preset_file(window: QtWidgets.QWidget, new_file: bool, name: str | None = None) -> None | (Path):
"""
Shows an QFileDialog asking the user for a Randovania preset file
:param window:
:param new_file: If it should be an existing file (False) or not.
:return: A path if the user selected a file, None otherwise
"""
from randovania.layout.versioned_preset import VersionedPreset
return _prompt_user_for_file(
window,
caption="Select a Randovania Preset file.",
filter=f"Randovania Preset, *.{VersionedPreset.file_extension()};;All Files (*.*)",
dir=name,
new_file=new_file,
)
def set_default_window_icon(window: QtWidgets.QWidget):
"""
Sets the window icon for the given widget to the default icon
:param window:
:return:
"""
window.setWindowIcon(QtGui.QIcon(os.fspath(randovania.get_icon_path())))
def set_error_border_stylesheet(edit: QtWidgets.QWidget, has_error: bool):
edit.has_error = has_error
if has_error:
edit.setStyleSheet(":enabled { border: 1px solid red; }:disabled { border: 1px solid red; background: #CCC }")
else:
edit.setStyleSheet("")
def set_edit_if_different(edit: QtWidgets.QLineEdit, new_text: str):
"""
Sets the text of the given QLineEdit only if it differs from the current value.
Prevents snapping the user's cursor to the end unnecessarily.
:param edit:
:param new_text:
:return:
"""
if edit.text() != new_text:
edit.setText(new_text)
def set_edit_if_different_text(edit: QtWidgets.QTextEdit, new_text: str):
if edit.toPlainText() != new_text:
edit.setPlainText(new_text)
def get_network_client():
from randovania.gui.lib.qt_network_client import QtNetworkClient
return typing.cast(QtNetworkClient, QtWidgets.QApplication.instance().network_client)
def get_game_connection():
from randovania.game_connection.game_connection import GameConnection
return typing.cast(GameConnection, QtWidgets.QApplication.instance().game_connection)
def show_install_visual_cpp_redist(details: str):
from PySide6 import QtWidgets
download_url = "https://aka.ms/vs/16/release/vc_redist.x64.exe"
support_url = "https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads"
box = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Critical,
"Unable to load Dolphin backend",
"Please install the latest "
f"<a href='{download_url}'>Microsoft Visual C++ Redistributable</a>.<br /><br />"
f"For more details, see <a href='{support_url}'>Microsoft's webpage</a>.",
QtWidgets.QMessageBox.Ok,
)
set_default_window_icon(box)
box.setDetailedText(details)
box.exec_()
def set_clipboard(text: str):
from PySide6 import QtWidgets
QtWidgets.QApplication.clipboard().setText(text)
class FallbackDialog(typing.NamedTuple):
title: str
text: str
parent: QtWidgets.QWidget
def open_directory_in_explorer(path: Path, fallback_dialog: FallbackDialog | None = None):
try:
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
subprocess.run(["open", path], check=False)
else:
subprocess.run(["xdg-open", path], check=False)
except OSError:
if fallback_dialog is None:
raise
else:
box = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Icon.Information,
fallback_dialog.title,
fallback_dialog.text,
QtWidgets.QMessageBox.StandardButton.Ok,
fallback_dialog.parent,
)
box.setTextInteractionFlags(QtCore.Qt.TextInteractionFlag.TextSelectableByMouse)
box.show()
def set_icon_data_paths(label: QtWidgets.QLabel):
image_pattern = re.compile('<img src="data/(.*?)"/>')
repl = rf'<img src="{get_data_path().as_posix()}/\g<1>"/>'
new_text = image_pattern.sub(repl, label.text())
label.setText(new_text)
|
randovania/randovania
|
randovania/gui/lib/common_qt_lib.py
|
common_qt_lib.py
|
py
| 9,466 |
python
|
en
|
code
| 165 |
github-code
|
6
|
72470151548
|
import cv2
import numpy as np
import os
import zipfile
from show import blob_imagem,alturaXlargura
from work import deteccoes, funcoes_imagem
from drive import driveFile
if not os.path.exists("modelo.zip"):
"""
Verifica se o modelo já se encontra no diretório
se não se encontra no diretória, então baixa o mesmo do link a baixo
se o modelo já estiver baixado, não faz nada
"""
yolov4URL = "https://drive.google.com/u/0/uc?id=1kPKs0ZlEK5O_WbbTGSbiI1A3JI8C6UHc&export=download"
driveFile(yolov4URL,"modelo.zip")
zip_object = zipfile.ZipFile(file="modelo.zip", mode='r')
zip_object.extractall('./')
zip_object.close()
print("Terminei de extrair")
try:
"""
Tenta realizar o processamento da imagem, para tanto segue os seguintes passosa
C
"""
labelsPath = os.path.sep.join(["cfg", "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
configPath = os.path.sep.join(["cfg", "yolov4.cfg"])
weightsPath = "yolov4.weights"
net = cv2.dnn.readNet(configPath, weightsPath)
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
ln = net.getLayerNames()
#print("Todas as camadas (layers):")
#print(ln)
#print("Total: "+ str(len(ln)))
#print("Camadas de saída: ")
#print(net.getUnconnectedOutLayers())
ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()]
#print(ln)
imagePath = os.path.sep.join(['imagens', "cachorros02.jpg"])
imagem = cv2.imread(imagePath)
net, imagem, layerOutputs = blob_imagem(net, imagem,ln)
threshold = 0.5
threshold_NMS = 0.3
caixas = []
confiancas = []
IDclasses = []
H,W = alturaXlargura(imagem)
for output in layerOutputs:
for detection in output:
caixas, confiancas, IDclasses = deteccoes(detection, threshold, caixas, confiancas, IDclasses,H,W)
objs = cv2.dnn.NMSBoxes(caixas, confiancas, threshold, threshold_NMS)
print("\nObjetos detectados: " + str(len(objs)))
if len(objs) > 0:
for i in objs.flatten():
imagem, x, y, w, h = funcoes_imagem(imagem, i, confiancas, caixas, COLORS, LABELS,IDclasses)
objeto = imagem[y:y + h, x:x + w]
name_img = os.path.sep.join(["resultados","Teste1.jpg"])
print(name_img)
#name_img = 'teste1.jpg'
cv2.imwrite(name_img, imagem)
except:
print("Deu erro")
|
mauriciobenjamin700/IC_V2
|
YOLO/experimentos/teste1/main.py
|
main.py
|
py
| 2,342 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
22252725239
|
import numpy as np
import optuna
import pandas as pd
import xgboost as xgb
from sklearn.metrics import f1_score, precision_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
def objective(trial, df, y):
params = {
'max_depth': trial.suggest_int('max_depth', 3, 10),
'subsample': trial.suggest_uniform('subsample', 0.5, 1),
'learning_rate': trial.suggest_uniform('learning_rate', 1e-5, 1),
'gamma': trial.suggest_loguniform('gamma', 1e-8, 1e2),
'lambda': trial.suggest_loguniform('lambda', 1e-8, 1e2),
'alpha': trial.suggest_loguniform('alpha', 1e-8, 1e2)
}
kf = StratifiedKFold(n_splits=5, random_state=15, shuffle=True)
y_hats = []
y_tests = []
for train_index, test_index in kf.split(df, y):
X_train, X_test = df.iloc[train_index], df.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
model = xgb.XGBClassifier(**params)
model.fit(X_train, y_train)
y_hats += model.predict(X_test).tolist()
y_tests += y_test.tolist()
return f1_score(y_tests, y_hats)
X = pd.read_csv("X.csv")
X = X.set_index("token_address")
labels = pd.read_csv("Labelling/labeled_list.csv", index_col="token_address")
X = X.merge(labels['label'], left_index=True, right_index=True)
X = X.reset_index()
df = X.drop_duplicates(subset=['token_address'])
X = X.set_index("token_address")
lock_features = pd.read_csv("../data/token_lock_features.csv", index_col="token_address")
X = X.merge(lock_features, how='left', left_index=True, right_index=True)
optuna.logging.set_verbosity(optuna.logging.WARNING)
ids = []
total_probs = []
total_targets = []
skfolds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2)
for fold, (t, v) in enumerate(skfolds.split(df['token_address'], df['label'])):
ids_train = df['token_address'].iloc[t]
df_train = X.loc[ids_train]
ids_test = df['token_address'].iloc[v]
df_test = X.loc[ids_test]
X_train, y_train = df_train.drop(["label", "eval_block"], axis=1), df_train['label']
X_test, y_test = df_test.drop(["label", "eval_block"], axis=1), df_test['label']
columns = X_train.columns
func = lambda trial: objective(trial, X_train.copy(), y_train.copy())
study = optuna.create_study(direction='maximize')
study.optimize(func, n_trials=100)
model = xgb.XGBClassifier(**study.best_params)
model.fit(X_train, y_train)
preds_scorings = model.predict_proba(X_test)[:, 1]
preds = model.predict(X_test)
f1 = f1_score(y_test, preds)
sensibilitat = recall_score(y_test, preds)
precisio = precision_score(y_test, preds)
accuracy = accuracy_score(y_test, preds)
print("{},{},{},{},{}".format(accuracy, sensibilitat, precisio, f1, fold))
ids += X_test.index.tolist()
total_probs += preds.tolist()
total_targets += y_test.tolist()
final_df = pd.DataFrame({'ids': ids, 'Pred': total_probs, 'Label': total_targets})\
.to_csv("Results_XGBoost.csv", index=False)
|
lutianzhou001/RegPull
|
ML/optuna_XGBoost.py
|
optuna_XGBoost.py
|
py
| 3,097 |
python
|
en
|
code
| 2 |
github-code
|
6
|
73952569148
|
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('SVA1StyleSheet.mplstyle')
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Correlation of reserved stars')
parser.add_argument('--tausflask1',
default='/home/dfa/sobreira/alsina/catalogs/FLASK/taus/',
help='Full Path to the taus measurement with flask version 1')
parser.add_argument('--tausflask2',
default='/home/dfa/sobreira/alsina/catalogs/FLASK/taus_g1g2flip/',
help='Full Path to the taus measurement with flask version 2')
parser.add_argument('--zbin', default=4 , type=int,
help='seed used, useful to run parallel')
parser.add_argument('--plotspath',
default='/home/dfa/sobreira/alsina/Y3_shearcat_tests/alpha-beta-eta-test/measured_correlations/plots/',
help='location of the plots.')
args = parser.parse_args()
return args
def plotflask(axs, zbin, tausflask, plotspath, color, label):
from src.readfits import read_taus
import numpy as np
ax1, ax2, ax3, ax4, ax5, ax6 = axs
veclist = []
count = 0
for seed in range(1, 401 ):
for ck in range(1, 2):
name = os.path.join(tausflask, 'taus_src-cat_s%d_z%d_ck%d.fits'%(seed,zbin, ck ))
exist = os.path.isfile(name)
if exist:
meanr, taus, covtaus = read_taus(name)
if (np.count_nonzero(taus) == 0):
print("Warning, weird measurement, skipping", name)
else:
veclist.append(np.concatenate(np.c_[taus]))
count +=1
print(count, "FLASK catalogs were read")
meanvec = np.mean(veclist, axis=0)
nrows = len(meanr)
tau0pmean = meanvec[0:nrows]
tau0mmean = meanvec[nrows:2*nrows]
tau2pmean = meanvec[2*nrows:3*nrows]
tau2mmean = meanvec[3*nrows:4*nrows]
tau5pmean = meanvec[4*nrows:5*nrows]
tau5mmean = meanvec[5*nrows:6*nrows]
ranveclist = np.c_[veclist].T
covmat = np.cov(ranveclist)
print('matrix covariance shape', covmat.shape)
sig_tau0p = np.sqrt(np.diag(covmat[0:nrows, 0:nrows]))
sig_tau0m = np.sqrt(np.diag(covmat[nrows:2*nrows, nrows:2*nrows]))
sig_tau2p = np.sqrt(np.diag(covmat[2*nrows:3*nrows, 2*nrows:3*nrows]))
sig_tau2m = np.sqrt(np.diag(covmat[3*nrows:4*nrows, 3*nrows:4*nrows]))
sig_tau5p = np.sqrt(np.diag(covmat[4*nrows:5*nrows, 4*nrows:5*nrows]))
sig_tau5m = np.sqrt(np.diag(covmat[5*nrows:6*nrows, 5*nrows:6*nrows]))
taumeans = [tau0pmean,tau0mmean,tau2pmean,tau2mmean,tau5pmean,tau5mmean ]
sig_taus = [sig_tau0p,sig_tau0m,sig_tau2p,sig_tau2m,sig_tau5p,sig_tau5m ]
ylabels = [r'$\tau_{0+}$', r'$\tau_{0-}$', r'$\tau_{2+}$', r'$\tau_{2-}$', r'$\tau_{5+}$', r'$\tau_{5-}$']
for i, ax in enumerate(axs):
ax.errorbar(meanr,taumeans[i],yerr=sig_taus[i],color=color, ls='', marker='.', capsize=2, label=label)
ax.legend(loc='best', fontsize=10)
ax.set_ylabel(ylabels[i]); ax1.set_xlabel(r'$\theta$')
ax.set_xscale('log')
#ax.set_yscale('log')
#ax.set_ylim([ -2.e-6,2.e-6 ])
def main():
import sys; sys.path.append(".")
from src.readfits import read_taus
import numpy as np
args = parse_args()
plotspath = os.path.expanduser(args.plotspath)
try:
if not os.path.exists(plotspath):
os.makedirs(plotspath)
except OSError:
if not os.path.exists(plotspath): raise
figs = []; axs = []; filenames = []
names = ['taus0p', 'taus0m', 'taus2p', 'taus2m' , 'taus5p' , 'taus5m']
for i in range(6):
figaux, axaux = plt.subplots()
figs.append(figaux); axs.append(axaux)
filenames.append(os.path.join(plotspath,'%s_flask_zbin%d%s'%(names[i], args.zbin, '.png') ))
plotflask(axs, args.zbin, args.tausflask1, args.plotspath, 'red', 'Taus flask noflip')
plotflask(axs, args.zbin, args.tausflask2, args.plotspath, 'blue', 'Taus flask g1g2flip')
for i, fig in enumerate(figs):
fig.tight_layout()
fig.savefig(filenames[i], dpi=500)
plt.close(fig)
print(filenames[i], 'Printed!')
if __name__ == "__main__":
main()
|
des-science/Y3_shearcat_tests
|
alpha-beta-eta-test/code/tests/taus_v1v2.py
|
taus_v1v2.py
|
py
| 4,389 |
python
|
en
|
code
| 1 |
github-code
|
6
|
44687752877
|
def order(a):
counter_as = 0
counter_des = 0
b = 0
for i in a:
if i > b:
counter_as += 1
else:
counter_des += 1
b = i
if counter_as < 2:
k = 'descending'
elif counter_des < 2:
k = 'ascending'
else:
k = 'not sorted'
return k
order([10, 5, 4])
order([6, 20, 160, 420])
order([6, 20, 160, 420])
|
Krimets/python-online-marathon
|
sprint01/task06.py
|
task06.py
|
py
| 397 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8280574767
|
from heapq import heapify, heappop, heappush
import collections
'''
Time: O(n) + O(nlogk)
Space: O(n)
'''
class Solution:
def __init__(self, nums, k):
self.heap = []
self.k = k
self.nums = nums
def topk(self):
freq = collections.defaultdict(int)
for num in self.nums:
freq[num] += 1
print(freq)
for key, val in freq.items():
heappush(self.heap, (val, key))
if len(self.heap) > self.k:
heappop(self.heap)
return [x[1] for x in self.heap]
if __name__ == '__main__':
s = Solution([1,1,2,2,2,2,5,5,5,5,5,5,3,3,3], 2)
print(s.topk())
|
gadodia/Algorithms
|
algorithms/Arrays/topkfrequent.py
|
topkfrequent.py
|
py
| 685 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9174130850
|
load(":common/cc/semantics.bzl", "semantics")
load(":common/cc/cc_helper.bzl", "cc_helper")
load(":common/cc/cc_common.bzl", "cc_common")
CcToolchainInfo = cc_common.CcToolchainInfo
TemplateVariableInfo = _builtins.toplevel.platform_common.TemplateVariableInfo
ToolchainInfo = _builtins.toplevel.platform_common.ToolchainInfo
def _impl(ctx):
cc_toolchain = cc_helper.find_cpp_toolchain(ctx, mandatory = ctx.attr.mandatory)
if not cc_toolchain:
return []
make_variables = cc_toolchain.get_additional_make_variables()
cc_provider_make_variables = cc_helper.get_toolchain_global_make_variables(cc_toolchain)
template_variable_info = TemplateVariableInfo(make_variables | cc_provider_make_variables)
toolchain = ToolchainInfo(
cc = cc_toolchain,
cc_provider_in_toolchain = True,
)
return [
cc_toolchain,
toolchain,
template_variable_info,
DefaultInfo(
files = cc_toolchain.get_all_files_including_libc(),
),
]
cc_toolchain_alias = rule(
implementation = _impl,
fragments = ["cpp", "platform"],
attrs = {
"mandatory": attr.bool(default = True),
"_cc_toolchain": attr.label(default = configuration_field(fragment = "cpp", name = "cc_toolchain"), providers = [CcToolchainInfo]),
"_cc_toolchain_type": attr.label(default = "@" + semantics.get_repo() + "//tools/cpp:toolchain_type"),
},
toolchains = cc_helper.use_cpp_toolchain() +
semantics.get_runtimes_toolchain(),
)
|
bazelbuild/bazel
|
src/main/starlark/builtins_bzl/common/cc/cc_toolchain_alias.bzl
|
cc_toolchain_alias.bzl
|
bzl
| 1,537 |
python
|
en
|
code
| 21,632 |
github-code
|
6
|
70267272829
|
import epyk as pk
from epyk.mocks import randoms
# Create a basic report object
page = pk.Page()
page.headers.dev()
# Create JavaScript data
js_data = page.data.js.record(js_code="myData", data=randoms.languages)
# Add a filter object
filter1 = js_data.filterGroup("filter1")
# Add a dropdown box to drive the data changes in the charts
select = page.ui.select([
{"value": 'name', 'name': 'name'},
{"value": 'type', 'name': 'code'},
], options={"empty_selected": False})
# Create HTML charts
bar = page.ui.charts.chartJs.bar(randoms.languages, y_columns=["rating", 'change'], x_axis='name')
pie = page.ui.charts.chartJs.pie(randoms.languages, y_columns=['change'], x_axis='name')
# Add the charts to a row
row = page.ui.row([bar, pie])
# Add a change event on the dropdown to update the charts
select.change([
bar.build(filter1.group().sumBy(['rating', 'change'], select.dom.content), options={"x_axis": select.dom.content}),
pie.build(filter1.group().sumBy(['change'], select.dom.content), options={"x_axis": select.dom.content}),
])
|
epykure/epyk-templates
|
locals/components/quickstart.py
|
quickstart.py
|
py
| 1,052 |
python
|
en
|
code
| 17 |
github-code
|
6
|
71723914428
|
"""
-*- coding: utf-8 -*-
File : basepage.py
Version : 0.1
Author : usrpi
Date :2021/1/4
"""
import logging
import datetime
import os
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# 封装基本函数 -- 执行日志、处理异常、失败截图
# 所有的页面公共部分,不涉及业务
class BasePage:
def __init__(self, driver):
self.driver = driver
# 等待元素可见
def wait_eleVisible(self, locator, times=30 ,poll_frequency=0.5, doc=""):
"""
:param locator: 元素定位。 元素形式(元素定位类型、元素定位方式)
:param times:
:param poll_frequency:
:param doc:
:return:
"""
logging.info("等待元素 s% 可见", locator)
try:
# 开始等待时间
start = datetime.datetime.now()
return WebDriverWait(self.driver, times=30 ,poll_frequency=0.5).until(EC.visibility_of_element_located(locator))
# 结束等待时间
end = datetime.datetime.now()
# 求一个插值,写在日志中,就是等待了多久
logging.info("等待结束,等待时长为:s%", (end-start))
except:
logging.exception("等待元素可见失败!!!")
# 截图
self.save_screenshot(doc)
raise
# 等待元素可见
def wait_eleExist(self, locator):
pass
# 查找元素
def get_ele(self, locator, doc = ""):
logging.info("查找元素: s%", locator)
try:
return self.driver.find_element(*locator)
except:
logging.exception("查找元素失败!!!")
# 截图
self.save_screenshot(doc)
raise
# 点击操作
def click_ele(self, locator, doc = ""):
# 找元素
ele = self.get_ele(locator, doc)
# 元素操作
logging.info("点击元素:s%", locator)
try:
ele.click()
except:
logging.exception("元素点击操作失败!!!")
# 截图
self.save_screenshot(doc)
raise
# 输入操作
def input_text(self, locator, text , doc = ""):
# 找元素
ele = self.get_ele(locator,doc)
# 输入操作
logging.info("元素输入:s%",locator)
try:
ele.send_keys(text)
except:
logging.exception("数据输入失败!!!")
# 截图
self.save_screenshot(doc)
raise
# 获取元素的文本内容
def get_text(self, locator, doc = ""):
# 找元素
ele = self.get_ele(locator, doc)
# 获取文本
try:
return ele.text
except:
logging.exception("获取文本失败!!!")
# 截图
self.save_screenshot(doc)
raise
# 获取元素的属性
def get_attr(self, locator,attr, doc = ""):
# 找元素
ele = self.get_ele(locator, doc)
# 获取文本
try:
return ele.get_attribute(attr)
except:
logging.exception("获取元素属性失败!!!")
# 截图
self.save_screenshot(doc)
raise
# alter弹窗处理
# iframe切换
# 上传操作
# 滚动条处理
# 窗口切换
# 失败截图
def save_screenshot(self, name):
# 图片名称:模块名_页面名称_操作名称_时间.png
path = os.path.dirname(os.path.dirname(__file__)) + '\\Output\screenshots'
t = datetime.datetime.now()
file_name = path + t + ".png"
self.driver.save_screenshot(file_name)
logging.info("接取网页成功,文件路径为:s%" , file_name)
|
xianghuanng/futureloan_web
|
Common/basepage.py
|
basepage.py
|
py
| 3,889 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14852879493
|
import logging
from datetime import timedelta
import requests
from django.db import models
from django.utils.six import string_types
from django.utils import timezone
from requests_oauthlib import OAuth2Session
from killboard import app_settings
from killboard.errors import TokenError, IncompleteResponseError
logger = logging.getLogger(__name__)
def _process_scopes(scopes):
if scopes is None:
# support filtering by no scopes with None passed
scopes = []
if not isinstance(scopes, models.QuerySet) and len(scopes) == 1:
# support a single space-delimited string inside a list because :users:
scopes = scopes[0]
# support space-delimited string scopes or lists
if isinstance(scopes, string_types):
scopes = set(scopes.split())
return set(str(s) for s in scopes)
class TokenQueryset(models.QuerySet):
def get_expired(self):
"""
Get all tokens which have expired.
:return: All expired tokens.
:rtype: :class:`esi.managers.TokenQueryset`
"""
max_age = timezone.now() - timedelta(seconds=app_settings.ESI_TOKEN_VALID_DURATION)
return self.filter(created__lte=max_age)
def bulk_refresh(self):
"""
Refreshes all refreshable tokens in the queryset.
Deletes any tokens which fail to refresh.
Deletes any tokens which are expired and cannot refresh.
Excludes tokens for which the refresh was incomplete for other reasons.
"""
session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID)
auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET)
incomplete = []
for model in self.filter(refresh_token__isnull=False):
try:
model.refresh(session=session, auth=auth)
logging.debug("Successfully refreshed {0}".format(repr(model)))
except TokenError:
logger.info("Refresh failed for {0}. Deleting.".format(repr(model)))
model.delete()
except IncompleteResponseError:
incomplete.append(model.pk)
self.filter(refresh_token__isnull=True).get_expired().delete()
return self.exclude(pk__in=incomplete)
def require_valid(self):
"""
Ensures all tokens are still valid. If expired, attempts to refresh.
Deletes those which fail to refresh or cannot be refreshed.
:return: All tokens which are still valid.
:rtype: :class:`esi.managers.TokenQueryset`
"""
expired = self.get_expired()
valid = self.exclude(pk__in=expired)
valid_expired = expired.bulk_refresh()
return valid_expired | valid
def require_scopes(self, scope_string):
"""
:param scope_string: The required scopes.
:type scope_string: Union[str, list]
:return: The tokens with all requested scopes.
:rtype: :class:`esi.managers.TokenQueryset`
"""
scopes = _process_scopes(scope_string)
if not scopes:
# asking for tokens with no scopes
return self.filter(scopes__isnull=True)
from .models import Scope
scope_pks = Scope.objects.filter(name__in=scopes).values_list('pk', flat=True)
if not len(scopes) == len(scope_pks):
# there's a scope we don't recognize, so we can't have any tokens for it
return self.none()
tokens = self.all()
for pk in scope_pks:
tokens = tokens.filter(scopes__pk=pk)
return tokens
def require_scopes_exact(self, scope_string):
"""
:param scope_string: The required scopes.
:type scope_string: Union[str, list]
:return: The tokens with only the requested scopes.
:rtype: :class:`esi.managers.TokenQueryset`
"""
num_scopes = len(_process_scopes(scope_string))
pks = [v['pk'] for v in self.annotate(models.Count('scopes')).require_scopes(scope_string).filter(
scopes__count=num_scopes).values('pk', 'scopes__id')]
return self.filter(pk__in=pks)
def equivalent_to(self, token):
"""
Gets all tokens which match the character and scopes of a reference token
:param token: :class:`esi.models.Token`
:return: :class:`esi.managers.TokenQueryset`
"""
return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter(
models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk)
class TokenManager(models.Manager):
def get_queryset(self):
"""
Replace base queryset model with custom TokenQueryset
:rtype: :class:`esi.managers.TokenQueryset`
"""
return TokenQueryset(self.model, using=self._db)
class EVEClassManager(models.Manager):
def get_or_create_from_code(self, i_id, json_data, api):
try:
return self.get(id=i_id), False
except self.model.DoesNotExist:
item = self.model(id=i_id)
item.process(json_data, api)
item.save()
return item, True
|
DeForce/py_killboard
|
killboard/managers.py
|
managers.py
|
py
| 5,176 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74923808826
|
from app.views import v_menu
from app.controllers import c_input
from app.models import m_search
from app.models import m_players
from app.models import m_list
from app.models import m_tournaments
class Controller:
def main_menu(self):
v_menu.View().main_menu()
self.menu_number = c_input.Input().select_menu_number(3)
# Call player menu
if self.menu_number == 1:
Controller().players_menu()
# Call tournaments menu
elif self.menu_number == 2:
Controller().tournaments_menu()
# Close app
elif self.menu_number == 3:
pass
def players_menu(self):
v_menu.View().players_menu()
self.menu_number = c_input.Input().select_menu_number(5)
# Call for create player
if self.menu_number == 1:
player = m_players.Player()
Controller().players_menu()
# Call search player and call modify player
elif self.menu_number == 2:
self.find_player = m_search.Search().player(
c_input.Input().check_input('text', 'Nom ou Prénom: ')
)
if self.find_player == 'None':
Controller().players_menu()
else:
player = m_players.Player(self.find_player)
player.modify(c_input.Input().check_input(
'number', 'Nouveau classement: ')
)
Controller().players_menu()
# Call list player
elif self.menu_number == 3:
v_menu.View().list_menu()
m_list.List().players(c_input.Input().select_menu_number(2))
Controller().players_menu()
# Back to main menu
elif self.menu_number == 4:
Controller().main_menu()
# Close app
elif self.menu_number == 5:
pass
def tournaments_menu(self):
v_menu.View().tournaments_menu()
self.menu_number = c_input.Input().select_menu_number(5)
# Call for create tounament
if self.menu_number == 1:
m_tournaments.Tournament()
Controller().tournaments_menu()
# Search and load tounament
elif self.menu_number == 2:
self.find_tournament = m_search.Search().tournament(
c_input.Input().check_input('text', 'Nom ou Lieu: ')
)
if self.find_tournament == 'None':
Controller().tournaments_menu()
else:
Controller().current_tournament_menu(self.find_tournament)
# Call list tounaments
elif self.menu_number == 3:
m_list.List().tournaments()
Controller().tournaments_menu()
# Back to main menu
elif self.menu_number == 4:
Controller().main_menu()
# Close app
elif self.menu_number == 5:
pass
def current_tournament_menu(self, id):
self.tournament = m_tournaments.Tournament(id)
self.show_menu = v_menu.View().current_tournament_menu(
self.tournament.name, self.tournament.stat,
self.tournament.start, self.tournament.end,
self.tournament.rounds_nbr
)
self.menu_number = c_input.Input().select_menu_number(6)
# Start round
if self.menu_number == 1:
if self.tournament.stat > self.tournament.rounds_nbr:
self.current_tournament_menu(self.tournament.id)
else:
self.tournament.start_rounds()
self.tournament.stat += 1
self.current_tournament_menu(self.tournament.id)
# List players
elif self.menu_number == 2:
v_menu.View().list_menu_t()
self.tournament.list_players(
c_input.Input().select_menu_number(3)
)
self.current_tournament_menu(self.tournament.id)
# List matchs et rounds
elif self.menu_number == 3:
self.tournament.list_rounds()
self.current_tournament_menu(self.tournament.id)
# Show tournament infos
elif self.menu_number == 4:
v_menu.View().list_tournaments(
self.tournament.name, self.tournament.place,
self.tournament.start, self.tournament.end,
self.tournament.rounds_nbr, self.tournament.timing,
self.tournament.description
)
input('\nEntrer pour revenir au menu précédent')
self.current_tournament_menu(self.tournament.id)
# Back to tournament menu
elif self.menu_number == 5:
Controller().tournaments_menu()
# Close app
elif self.menu_number == 6:
pass
|
MaeRiz/OC_P4_Chess
|
app/controllers/c_menu.py
|
c_menu.py
|
py
| 4,766 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8866112441
|
import numpy as np
import cv2 as cv
def bitfield(n):
return [int(digit) for digit in bin(n)[2:]]
def gerar_mensagem(mensagem):
lista = []
for m in mensagem:
val = ord(m)
bits = bitfield(val)
if len(bits) < 8:
for a in range(8-len(bits)):
bits.insert(0, 0)
lista.append(bits)
arr = np.array(lista)
arr = arr.flatten()
return arr
def converter_mensagem(saida):
bits = np.array(saida)
mensagem_out = ''
bits = bits.reshape((int(len(saida)/8), 8))
for b in bits:
sum = 0
for i in range(8):
sum += b[i]*(2**(7-i))
mensagem_out += chr(sum)
return mensagem_out
texto = "Batatinha 123"
terminadorMensagem = "##"
texto += terminadorMensagem
arrayBits = gerar_mensagem(texto)
terminadorEmBits = gerar_mensagem(terminadorMensagem)
terminadorEmBitsString = ""
for val in terminadorEmBits:
terminadorEmBitsString += str(val)
image = cv.imread('teste.png')
listaUltimosDigitosVermelhos = []
# gerei lista de ultimos digitos vermelhos
for row in image:
for pixel in row:
if(pixel[2] % 2 == 0):
listaUltimosDigitosVermelhos.append(0)
else:
listaUltimosDigitosVermelhos.append(1)
# inseri a mensagem na lista de ultimos digitos vermelhos
for i in range(len(arrayBits)):
listaUltimosDigitosVermelhos[i] = arrayBits[i]
# inseri a mensagem oculta na imagem
i = 0
countLimite = len(arrayBits)
print('comecou')
for row in image:
for pixel in row:
if(i < countLimite):
# se for par, e quero guardar 1, soma 1, transforma ele em impar
if(pixel[2] % 2 == 0 and listaUltimosDigitosVermelhos[i] == 1):
pixel[2] = pixel[2] + 1
# se for impar, quero guardar um 0, subtrai 1, transforma ele em par
if(pixel[2] % 2 > 0 and (listaUltimosDigitosVermelhos[i] == 0)):
pixel[2] = pixel[2] - 1
i += 1
else:
break
listaUltimosDigitosVermelhos = []
# gerei lista de ultimos digitos vermelhos
for row in image:
for pixel in row:
# listaUltimosDigitosVermelhos.append(pixel[2])
if(pixel[2] % 2 == 0):
listaUltimosDigitosVermelhos.append(0)
else:
listaUltimosDigitosVermelhos.append(1)
i = 0
mensagem = ""
# peguei a mensagem oculta dentro da lista de ultimos digitos vermelhos
for val in listaUltimosDigitosVermelhos:
i += 1
if(mensagem.find(terminadorEmBitsString) >= 0):
print('achou')
break
mensagem += str(val)
mensagemBits = []
for letra in mensagem:
mensagemBits.append(int(letra))
# print('mensagem criptografada', arrayBits)
# print('mensagem criptografada', len(arrayBits))
# print('mensagem encontrada', mensagemBits)
# print('mensagem encontrada', len(mensagemBits))
mensagemConvertida = converter_mensagem(mensagemBits)
mensagemConvertida = mensagemConvertida.replace(terminadorMensagem, '')
print('mensagem convertida: ', mensagemConvertida)
# cv.imshow('top', image)
# cv.waitKey(0)
|
joaofxp/computer-science-univali
|
Python/M2/Trabalho 2/main.py
|
main.py
|
py
| 3,084 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
10132276502
|
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
from plotly.subplots import make_subplots
import os
import plotly.io as pio
pio.renderers.default = "browser"
num_of_interviews = [0, 1, 2, 3]
y_list = 3
x_list = 6
current = False
trend = False
# example: communication = 0
specific_aspect = 1
# categories set for the radar plots
categories = ["Value for the customer",
"Value for the producer",
"Risks for the customer",
"Risks for the producer",
"Cost of development",
"Cost of implementation",
"Return of investment",
"Market establishment"
]
# aspect names
aspects = ["Communication",
"Data handling",
"Data driven problem solving",
"Situational context awareness",
"Adaption to different contexts",
"Remote Operation",
"Recommendation and decision support",
"Self-organization and control",
"Predictive acting",
"Continuous improvement",
"Task automation"]
def add_figure_to_plot(values, _categories, name):
"""
This function adds another trace to a plotly figure.
You need to provide values, categories and a plot name.
"""
fig.add_trace(go.Scatterpolar(
r=values,
theta=_categories,
fill=None,
name=name
))
if __name__ == "__main__":
# df gets loaded from csv
df = pd.read_csv("interview0/interview0_current.csv")
# 0 = "Value for the customer"
# 1 = "Value for the producer"
# 2 = "Risks for the customer"
# 3 = "Risks for the producer"
# 4 = "Cost of development"
# 5 = "Cost of implementation"
# 6 = "Return of investment"
# 7 = "Market establishment"
# first criterion row gets selected.
lst0 = list(df.iloc[y_list])
lst0.pop(0)
# print(lst0)
# second criterion row gets selected.
lst1 = list(df.iloc[x_list])
lst1.pop(0)
# print(lst1)
# initial lists for the heatmap
# hl / hm / hh
# ml / mm / mh
# ll / lm / lh
ll = []
lm = []
lh = []
ml = []
mm = []
mh = []
hl = []
hm = []
hh = []
# first and second list get sorted into the initial lists for the heatmap
for i in range(len(lst0)):
value = "input"
if lst0[i] == "l" and lst1[i] == "l":
ll.append(value)
elif lst0[i] == "l" and lst1[i] == "m":
lm.append(value)
elif lst0[i] == "l" and lst1[i] == "h":
lh.append(value)
elif lst0[i] == "m" and lst1[i] == "l":
ml.append(value)
elif lst0[i] == "m" and lst1[i] == "m":
mm.append(value)
elif lst0[i] == "m" and lst1[i] == "h":
mh.append(value)
elif lst0[i] == "h" and lst1[i] == "l":
hl.append(value)
elif lst0[i] == "h" and lst1[i] == "m":
hm.append(value)
elif lst0[i] == "h" and lst1[i] == "h":
hh.append(value)
else:
print(f"Cannot sort value {lst0[i], lst1[i]}, wrong input")
# data list gets created
data = [[len(hl), len(hm), len(hh)],
[len(ml), len(mm), len(mh)],
[len(ll), len(lm), len(lh)]]
# figure gets created
fig = px.imshow(data,
labels=dict(x=categories[x_list], y=categories[y_list], color="Occurrence"),
x=['low', 'medium', 'high'],
y=['high', 'medium', 'low']
)
fig.update_xaxes(side="bottom")
# fig.show()
fig.write_html(f"test_heatmap.html")
# # new figure class gets created
# fig = make_subplots()
#
# interview_count = 0
#
# if current:
# for i in current_data:
# add_figure_to_plot(i[specific_aspect], categories, f"Interview{interview_count}: Current")
# interview_count = interview_count + 1
#
# interview_count = 0
# if trend:
# for i in trend_data:
# add_figure_to_plot(i[specific_aspect], categories, f"Interview{interview_count}: Trend")
# interview_count = interview_count + 1
#
# fig.update_traces(opacity=0.9)
#
# # Plot styling happens here
# fig.update_layout(font_family="Arial", legend=dict(
# title=f"Aspect: {aspects[specific_aspect]}",
# font=dict(size=20),
# orientation="h",
# y=1.1,
# yanchor="bottom",
# x=0.5,
# xanchor="center"
# ),
# template='plotly',
# showlegend=True)
#
# fig.update_layout(
# polar=dict(
# radialaxis=dict(
# visible=True,
# range=[0, 3]
# )),
# )
#
# fig.add_annotation(text='<b>Criterion rating information</b>:<br>high = value 3 <br>medium = value 2<br>low = value 1',
# x=0.9,
# y=0.8,
# bordercolor='black',
# borderwidth=1,
# showarrow=False,
# font=dict(size=15,
# family="Arial"))
#
# # savings html in specific folders
# if current:
# naming = "current.html"
# if trend:
# naming = "trend.html"
# if trend and current:
# naming = "both.html"
# interview_interpretation = "multigraph_interpretation"
# path = os.path.join(os.getcwd(), interview_interpretation)
# if not os.path.isdir(path):
# os.mkdir(path)
# try:
# filename = f"{aspects[specific_aspect] + '_' + str(num_of_interviews) + '_' + naming}"
# # fig.write_image(f"{path}/{filename}.png")
# fig.write_html(f"{path}/{filename}")
# except:
# print(f"{naming} not defined")
|
Fabbochan/master_thesis_figures
|
datavisualization_heatmap.py
|
datavisualization_heatmap.py
|
py
| 5,880 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9828876779
|
import sys
import os
from django.conf import settings
from django.core.management import execute_from_command_line
from django.conf.urls import url
from django.http import HttpResponse
from django.core.wsgi import get_wsgi_application
DEBUG = os.environ.get('DEBUG', 'on') == 'on'
print(DEBUG)
SECRET_KEY = os.environ.get('SECRET_KEY', os.urandom(32))
print(SECRET_KEY)
settings.configure(
DEBUG = DEBUG,
SECRET_KEY = SECRET_KEY,
ROOT_URLCONF = __name__,
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
)
def index(request):
return HttpResponse('Hello World')
urlpatterns = (
url(r'^$', placeholder, name='homepage'),
)
application = get_wsgi_application()
if __name__ == '__main__':
execute_from_command_line(sys.argv)
|
wesksky/MyDjangoProject
|
TinyDjango/hello.py
|
hello.py
|
py
| 913 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35266444899
|
import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import tensorflow as tf
import keras
# from keras import layers
from sempler import Dataset, DatasetSide, DatasetSoft
from wave_u_net import wave_u_net
from loss import combined_loss, ScatterLoss, RegulatedLoss
from call_back import CustomCallback
from keras.callbacks import ModelCheckpoint
# import random
# from matplotlib import pyplot as plt
import json
# import numpy as np
import math
s_size = 16384 * (24 // 2)
steps_per_epoch = 100
steps = 20 # 40
noise_ratio = 0.21
batch_size=3
side = False
cycles = 10
# model = wave_u_net(num_initial_filters = 12, num_layers = 6, kernel_size = 10, input_size = s_size, output_type = "single")
# model = wave_u_net(num_initial_filters = 24, num_layers = 12, kernel_size = 15, input_size = s_size, output_type = "single")
# model = wave_u_net(num_initial_filters = 32, num_layers = 16, kernel_size = 30, input_size = s_size, output_type = "single")
# model = wave_u_net(num_initial_filters = 32, num_layers = 16, kernel_size = 50, input_size = s_size, output_type = "single")
# model = wave_u_net(num_initial_filters = 24, num_layers = 12, kernel_size = 15, input_size = s_size, output_type = "single", attention = "Gate", attention_res = False, dropout = "False", dropout_rate = 0.2, sub=True, side_chanel=True, side_chanel_cycles=10)
# model = wave_u_net(num_initial_filters = 32, num_layers = 16, kernel_size = 30, input_size = s_size, output_type = "single", attention = "Gate", attention_res = False, dropout = "False", dropout_rate = 0.2, sub=True, side_chanel=side, side_chanel_cycles=cycles)
model = wave_u_net(num_initial_filters = 32, num_layers = 16, kernel_size = 50, input_size = s_size, output_type = "single", attention = "Gate", attention_res = False, dropout = "Last", dropout_rate = 0.2, sub=True, side_chanel=side, side_chanel_cycles=cycles)
if os.path.exists('model.h5'): model.load_weights('model.h5')
initial_epoch = 0
if os.path.exists('epoch.txt'):
with open("epoch.txt", "r") as f:
initial_epoch = int(f.read())
opt = keras.optimizers.Adam(learning_rate=0.000_01) #0.000_1 - 0.000_01
loss = RegulatedLoss(s_size, steps, noise_ratio)
# loss = ScatterLoss(s_size, steps, noise_ratio)
# loss = "MSE"
model.compile(loss=loss, optimizer=opt)
model.summary()
c1 = CustomCallback(chackpoint=True)
c2 = ModelCheckpoint(filepath='model.h5', save_best_only=False, save_weights_only=True, save_freq='epoch')
# 90
if side:
dataset = DatasetSide(list(range(90)), s_size=s_size, steps=steps, batch_size=batch_size, noise_ratio=noise_ratio, orig=True, info=True, side_cysles=cycles)
else:
# dataset = Dataset(list(range(900, 1044)), s_size=s_size, steps=steps, batch_size=batch_size, noise_ratio=noise_ratio, orig=True, info=True)
dataset = DatasetSoft(list(range(0, 250)), s_size=s_size, steps=steps, batch_size=batch_size, noise_ratio=noise_ratio, orig=True, info=True)
epochs = len(dataset) // steps_per_epoch
print(f"data: {(len(dataset) * batch_size):_}")
model.fit(dataset, epochs=epochs, steps_per_epoch=steps_per_epoch, initial_epoch=initial_epoch, shuffle=False, callbacks=[c1, c2])
|
ondra117/lil_neuron
|
learning.py
|
learning.py
|
py
| 3,143 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37107835911
|
#!/usr/bin/python
# -*- coding: utf8 -*-
import sys
import cherrypy
import platform
import os
import time
cur_dir = os.path.dirname(os.path.abspath(__file__))
#python 2.4为simplejson,python 2.6以上为json
try:
import json
except ImportError:
import simplejson as json
#假装做一个index出来
class Index(object):
#下面这句表示修饰index方法,这个index方法是暴露给http server的
@cherrypy.expose
def index(self):
return "hello cherrypy"
class Node(object):
'''
url /node/dist/
'''
#获取目标机器的发行分支,版本号,架构类型,主机名称等等,返回json
@cherrypy.expose
def dist(self):
dist_json = ''
sysinstaller = ''
installer = ''
ostype = platform.dist()
if(ostype[0] in ['Ubuntu','debian','ubuntu','Debian']):
sysinstaller = 'apt-get'
installer = 'dpkg'
elif(ostype[0] in ['SuSE']):
sysinstaller = 'zypper'
installer = 'rpm'
elif(ostype[0] in ['CentOS', 'centos', 'redhat','RedHat']):
sysinstaller = 'yum'
installer = 'rpm'
machine = platform.machine()
hostname = platform.node()
dist_json = {'os.system':ostype[0], 'os.version':ostype[1], 'os.release':ostype[2], 'os.sysinstall':sysinstaller, 'os.installer':installer, 'os.arch':machine, 'os.hostname':hostname}
return json.dumps(dist_json, sort_keys=False, indent=4, separators=(',', ': '))
'''
url /node/GetCpuInfo/
'''
#获取CPU型号等,返回json
@cherrypy.expose
def GetCpuInfo(self):
cpu = []
cpuinfo = {}
f = open("/proc/cpuinfo")
lines = f.readlines()
f.close()
for line in lines:
if line == '\n':
cpu.append(cpuinfo)
cpuinfo = {}
if len(line) < 2: continue
name = line.split(':')[0].strip().replace(' ','_')
var = line.split(':')[1].strip()
cpuinfo[name] = var
return json.dumps(cpu, sort_keys=False, indent=4, separators=(',', ': '))
'''
url /node/GetMemInfo/
'''
#获取内存使用的详细信息
@cherrypy.expose
def GetMemInfo(self):
mem = {}
f = open("/proc/meminfo")
lines = f.readlines()
f.close()
for line in lines:
if len(line) < 2:
continue
name = line.split(':')[0]
var = line.split(':')[1].split()[0]
mem[name] = long(var) * 1024.0
mem['MemUsed'] = mem['MemTotal'] - mem['MemFree'] - mem['Buffers'] - mem['Cached']
return json.dumps(mem, sort_keys=False, indent=4, separators=(',', ': '))
'''
url /node/GetLoadAvg//
'''
#获取系统负载的详细信息
@cherrypy.expose
def GetLoadAvg(self):
loadavg = {}
f = open("/proc/loadavg")
con = f.read().split()
f.close()
loadavg['lavg_1']=con[0]
loadavg['lavg_5']=con[1]
loadavg['lavg_15']=con[2]
loadavg['nr']=con[3]
loadavg['last_pid']=con[4]
return json.dumps(loadavg, sort_keys=False, indent=4, separators=(',', ': '))
'''
url /node/GetIfInfo/eth(x)
'''
#获取指定网卡的流量信息,这里面有点复杂
@cherrypy.expose
def GetIfInfo(self, interface):
dist_json = self.dist()
f = open("/proc/net/dev")
lines = f.readlines()
f.close()
intf = {}
for line in lines[2:]:
con = line.split()
#if部分是给centos使用的,centos在流量大的情况下,网卡信息里面字符串会连上,所以需要单独拆分处理,else部分则是ubuntu或者其他系统格式化很好的使用
offset = con[0].split(':')
if str(offset[0]) == interface:
intf['interface'] = str(offset[0])
intf['ReceiveBytes'] = str(offset[1])
intf['ReceivePackets'] = str(con[1])
intf['ReceiveErrs'] = str(con[2])
intf['ReceiveDrop'] = str(con[3])
intf['ReceiveFifo'] = str(con[4])
intf['ReceiveFrames'] = str(con[5])
intf['ReceiveCompressed'] = str(con[6])
intf['ReceiveMulticast'] = str(con[7])
intf['TransmitBytes'] = str(con[8])
intf['TransmitPackets'] = str(con[9])
intf['TransmitErrs'] = str(con[10])
intf['TransmitDrop'] = str(con[11])
intf['TransmitFifo'] = str(con[12])
intf['TransmitFrames'] = str(con[13])
intf['TransmitCompressed'] = str(con[14])
intf['TransmitMulticast'] = str(con[15])
return json.dumps(intf, sort_keys=False)
#获取全部网卡的接口和流量信息
@cherrypy.expose
def GetIfTraffic(self):
ifs = []
nettraffic = {}
f = open("/proc/net/dev")
lines = f.readlines()
f.close()
for line in lines[2:]:
con = line.split()
ifname = con[0].split(':')
if(ifname[0].strip() != 'lo'):
ifs.append(ifname[0].strip())
else:
continue
for interface in ifs:
nettraffic[interface] = self.GetIfInfo(interface)
return json.dumps(nettraffic)
#获取硬盘的分区信息和使用量
@cherrypy.expose
def GetHddInfo(self):
hdds = []
mount = {}
file_system = []
type = []
size = []
used = []
avail = []
used_percent = []
mounted_on = []
hdds = os.popen('df -lhT | grep -v tmpfs | grep -v boot | grep -v usr | grep -v tmp | sed \'1d;/ /!N;s/\\n//;s/[ ]*[ ]/\\t/g;\'').readlines()
for line in hdds:
dict = {}
file_system = line.replace('\\n','').replace('\\t',' ').split()[0]
dict['type'] = line.replace('\\n','').replace('\\t',' ').split()[1]
dict['size'] = line.replace('\\n','').replace('\\t',' ').split()[2]
dict['used'] = line.replace('\\n','').replace('\\t',' ').split()[3]
dict['avail'] = line.replace('\\n','').replace('\\t',' ').split()[4]
dict['used_percent'] = line.replace('\\n','').replace('\\t',' ').split()[5]
dict['mounted_on'] = line.replace('\\n','').replace('\\t',' ').split()[6]
dict['file_system'] = file_system
mount[file_system] = dict
dist_json = json.dumps(mount)
return dist_json
#获取CPU的使用量信息,需要系统安装sysstat支持
@cherrypy.expose
def GetCpuDetail(self):
dist_json = self.dist()
dist = json.loads(dist_json)
if(dist['os.system'] in ['CentOS', 'centos', 'redhat', 'RedHat']):
if(int(dist['os.version'].split('.')[0]) < 6): #For CentOS only
cmd = 'mpstat 1 1 | sed \'1d;2d;3d;4d\' | awk \'{print "{\\\"user\\\":\\\"\"$3\"\\\",\\\"nice\\\":\\\"\"$4\"\\\",\\\"sys\\\":\\\"\"$5\"\\\",\\\"iowait\\\":\\\"\"$6\"\\\",\\\"irq\\\":\\\"\"$7\"\\\",\\\"soft\\\":\\\"\"$8\"\\\",\\\"steal\\\":\\\"\"$9\"\\\",\\\"idle\\\":\\\"\"$10\"\\\"}"}\''
else:
cmd = 'mpstat 1 1 | sed \'1d;2d;3d;4d\' | awk \'{print "{\\\"user\\\":\\\"\"$3\"\\\",\\\"nice\\\":\\\"\"$4\"\\\",\\\"sys\\\":\\\"\"$5\"\\\",\\\"iowait\\\":\\\"\"$6\"\\\",\\\"irq\\\":\\\"\"$7\"\\\",\\\"soft\\\":\\\"\"$8\"\\\",\\\"steal\\\":\\\"\"$9\"\\\",\\\"idle\\\":\\\"\"$11\"\\\"}"}\''
else:
cmd = 'mpstat 1 1 | sed \'1d;2d;3d;4d\' | awk \'{print "{\\\"user\\\":\\\"\"$3\"\\\",\\\"nice\\\":\\\"\"$4\"\\\",\\\"sys\\\":\\\"\"$5\"\\\",\\\"iowait\\\":\\\"\"$6\"\\\",\\\"irq\\\":\\\"\"$7\"\\\",\\\"soft\\\":\\\"\"$8\"\\\",\\\"steal\\\":\\\"\"$9\"\\\",\\\"idle\\\":\\\"\"$11\"\\\"}"}\''
cpu = os.popen(cmd).readline().strip()
return cpu
@cherrypy.expose
def GetLvsExtStatsSumm(self):
stats = {}
f = open("/proc/net/ip_vs_ext_stats")
lines = f.readlines()
f.close()
for line in lines[1:]:
con = line.split(':')
con2 = con[1].split()
stats[str(con[0].strip())] = con2
return json.dumps(stats, sort_keys=False, indent=4, separators=(',', ': '))
@cherrypy.expose
def GetLvsStatsSumm(self):
stats = {}
conns = []
in_pks = []
out_pks = []
in_bytes = []
out_bytes = []
f = open("/proc/net/ip_vs_stats")
lines = f.readlines()
f.close()
for line in lines[2:]:
con = line.split(':')[1].split()
conns.append(con[0])
in_pks.append(con[1])
out_pks.append(con[2])
in_bytes.append(con[3])
out_bytes.append(con[4])
stats = {"conns":conns,"in_pks":in_pks,"out_pks":out_pks,"in_bytes":in_bytes,"out_bytes":out_bytes}
return json.dumps(stats, sort_keys=False, indent=4, separators=(',', ': '))
@cherrypy.expose
def GetLvsConn(self):
Conn = []
node_list = []
dict = {}
num = 0
cmd = "ipvsadm -ln"
lines = os.popen(cmd).readlines()
for line in lines[3:]:
num += 1
con = line.split()
if con[0] == "TCP" or con[0] == "UDP":
if num == 1:
pass
else:
Conn.append(dict)
dict = {}
dict['lb_algo'] = str(con[2])
dict['vip'] = str(con[1])
dict['node'] = []
continue
node_dict = {"rs":con[1],"lb_kind":con[2],"weight":con[3],"activeconn":con[4],"inactconn":con[5]}
dict['node'].append(node_dict)
if num == len(lines[3:]):
Conn.append(dict)
return json.dumps(Conn, sort_keys=False, indent=4, separators=(',', ': '))
@cherrypy.expose
def GetLvsStatus(self):
Conn = []
node_list = []
dict = {}
num = 0
cmd = "ipvsadm -ln"
lines = os.popen(cmd).readlines()
for line in lines[3:]:
num += 1
con = line.split()
if con[0] == "TCP" or con[0] == "UDP":
if num == 1:
pass
else:
Conn.append(dict)
dict = {}
dict['lb_algo'] = str(con[2])
dict['vip'] = str(con[1])
dict['node'] = []
continue
node_dict = {"rs":con[1],"lb_kind":con[2],"weight":con[3]}
dict['node'].append(node_dict)
if num == len(lines[3:]):
Conn.append(dict)
return json.dumps(Conn, sort_keys=False, indent=4, separators=(',', ': '))
@cherrypy.expose
def GetLvsTraffic(self):
result = json.loads(open(os.path.join(cur_dir,'data/','lvstraffic')).read())
return json.dumps(result,sort_keys=False, indent=4, separators=(',', ': '))
if "__main__" == __name__:
#服务器配置
settings = {
'global': {
#绑定端口
'server.socket_port' : 60090,
#ip地址设置,觉得够安全就用0.0.0.0,否则就单独写那台服务器的ip
'server.socket_host': '0.0.0.0',
'server.socket_file': '',
'server.socket_queue_size': 100,
'server.protocol_version': 'HTTP/1.1',
'server.log_to_screen': True,
'server.log_file': '',
'server.reverse_dns': False,
'server.thread_pool': 200,
'server.environment': 'production',
'engine.timeout_monitor.on': False
}
}
#使用配置和映射路由并启动webserver
cherrypy.config.update(settings)
cherrypy.tree.mount(Index(), '/')
cherrypy.tree.mount(Node(), '/node')
cherrypy.engine.start()
|
lxcong/lvs-manager
|
monitor_agent/run.py
|
run.py
|
py
| 12,755 |
python
|
en
|
code
| 160 |
github-code
|
6
|
33237880927
|
from numpy import mean
from numpy import std
from matplotlib import pyplot as plt
from sklearn.model_selection import KFold
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
from tensorflow.keras.optimizers import SGD
from scipy.ndimage.interpolation import shift
from tensorflow.keras.models import load_model
import numpy as np
def getDataset():
(Xtrain, Ytrain), (Xtest, Ytest) = mnist.load_data()
Xtrain = Xtrain.reshape((Xtrain.shape[0], 28, 28, 1))
Xtest = Xtest.reshape((Xtest.shape[0], 28, 28, 1))
Ytest = to_categorical(Ytest)
Ytrain = to_categorical(Ytrain)
return Xtrain, Ytrain, Xtest, Ytest
def processData(train, test):
train = train.astype('float32')
test = test.astype('float32')
train = train / 255.0
test = test / 255.0
return train, test
def constructModel():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
opt = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def modelEvaluation(dataX, dataY, n_folds=5):
scores, histories = list(), list()
kfold = KFold(n_folds, shuffle=True, random_state=1)
for train_ix, test_ix in kfold.split(dataX):
model = constructModel()
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0)
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
scores.append(acc)
histories.append(history)
return scores, histories
def getDiagnostics(histories):
for i in range(len(histories)):
plt.subplot(2, 1, 1)
plt.title('Cross Entropy Loss')
plt.plot(histories[i].history['loss'], color='blue', label='train')
plt.plot(histories[i].history['val_loss'], color='orange', label='test')
plt.subplot(2, 1, 2)
plt.title('Classification Accuracy')
plt.plot(histories[i].history['acc'], color='blue', label='train')
plt.plot(histories[i].history['val_acc'], color='orange', label='test')
plt.show()
def getPerformance(scores):
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
plt.boxplot(scores)
plt.show()
def startTraining():
trainX, trainY, testX, testY = getDataset()
trainX, testX = processData(trainX, testX)
scores, histories = modelEvaluation(trainX, trainY)
getDiagnostics(histories)
getPerformance(scores)
model = constructModel()
model.fit(trainX, trainY, epochs=10, batch_size=32, verbose=0)
model.save('DigitClassifier.h5')
startTraining()
def startTesting():
trainX, trainY, testX, testY = getDataset()
trainX, testX = processData(trainX, testX)
model = load_model('DigitClassifier.h5')
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
startTesting()
|
husainasad/Digit-Classifier
|
model.py
|
model.py
|
py
| 3,607 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34075191892
|
'''
Write your code
Push
MinMaxStack
st = [(1,1,1), (1,2,3)]
self.st[-1][2]
0 1 2
(9 1 10)
1
8
7
(7 7 10)
10 None None
'''
class Stack:
def __init__(self):
self.st = []
def pop(self):
if(self.isEmpty()):
raise Exception("stack is empty!")
else:
element = self.st[-1]
del self.st[-1]
return element
def push(self, key):
if(len(self.st) < 0):
raise Exception("stack is full!")
else:
min = self.getMin()
max = self.getMax()
# Handling Min
if(min is None or key < min):
min = key
if(max is None or key > max):
max = key
self.st.append((key, min, max))
def getMin(self):
if(self.st):
return self.st[-1][1]
else:
return None
def getMax(self):
if(self.st):
return self.st[-1][2]
else:
return None
def peek(self):
return self.st[-1]
def isEmpty(self):
if(self.st is None):
return True
def printStack(self):
for v in self.st:
print(v)
if __name__ == "__main__":
st = Stack()
st.push(3)
st.push(4)
st.push(1)
st.push(101)
st.push(56)
st.printStack()
|
Anurag808Tripathi/algo-ds
|
algo-ds-scratch/stack.py
|
stack.py
|
py
| 1,190 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38796663707
|
#Tutorial 4
number = int(input('Enter the number: '))
cycle = int(input('Enter number of cycles you want: '))
count = 1
while count <= cycle:
output = count*number
count += 1
print(output)
num = 0
sum = 0
while num != -1:
num = int(input('Enter the number: '))
sum = sum + num
print('Total is '+str(sum))
hidden_num = 21
score = 100
while score != 0:
guessed_num = int(input('Enter your guess here: '))
if guessed_num == hidden_num:
print('You won!')
break
score -= 5
if score == 0:
print('No more attempts')
import random
hidden_num = random.randint(1,100)
score = 100
while score != 0:
guessed_num = int(input('Enter your guess here: '))
if guessed_num == hidden_num:
print('You won!')
break
score -= 5
if score == 0:
print('No more attempts')
row = 1
while row <= 6:
col = 1
while col <= row:
print(col, end="")
col +=1
row += 1
print('')
row = ''
num = 1
while num <= 6:
row += str(num)
print(row)
num += 1
counter = 1
while counter != 100:
print(counter)
counter += 1
import random as rand
var=10
gen_num=rand.randint(1,1515151515)
while gen_num != 2:
print('Random num is not 2')
gen_num = rand.randint(1, 1515151515)
else:
print('Finally it is 2')
count = 1
while count <= 3:
p = int(input('Enter 1st number: '))
n = int(input('Enter 2nd number: '))
r = int(input('Enter 3rd number: '))
si = p*n*r / 100
print(si)
count += 1
else:
exit()
for i in range(9,-1,-1):
if i==5:
break
print('Current value:', str(i))
for i in range(9, -1, -1):
if i==5:
continue
print('Current value:', str(i))
else:
print('bye!')
|
Sathila01/Year-1-Python
|
Tut4.py
|
Tut4.py
|
py
| 1,842 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16030870954
|
import os
import cv2
import time
import random
import numpy as np
import pandas as pd
import keras.api._v2.keras as keras
import tensorflow as tf
tf.__version__, np.__version__
from tensorflow.keras.applications.inception_v3 import preprocess_input
from tensorflow.keras import backend, layers, metrics
from tensorflow.keras.optimizers.experimental import SGD
from tensorflow.keras.applications import Xception
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.utils import plot_model
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
import seaborn as sns
import matplotlib.pyplot as plt
from Classes import *
# a function that reads images and put them in a dictionary for each person
def read_images(split_type = 'Train'):
images_dict={}
image_count = 0
for person in ['personA','personB','personC','personD','personE']:
csv_path = f"{person + '/' + split_type+'/'}{person}_SigVerification{split_type}Labels.csv"
df = pd.read_csv(csv_path)
images_dict[person]={'forged':[],'real':[]}
for index, row in df.iterrows():
folder= person + '/' + split_type
image = row['image_name']
if os.path.exists(f'{folder}'+f'/{image}'):
if row['label'] == 'forged':
images_dict[person]['forged'].append([folder,image])
else:
images_dict[person]['real'].append([folder,image])
image_count +=1
return images_dict , image_count
# a function that creates triplets to use for training
def create_triplets(images_dict):
triplets=[]
for person in images_dict:
for i in range(len(images_dict[person]['real'])):
for j in range(i+1,len(images_dict[person]['real'])):
anchor = (images_dict[person]['real'][i][0] , images_dict[person]['real'][i][1])
positive = (images_dict[person]['real'][j][0] , images_dict[person]['real'][j][1])
k = random.randint(0, len(images_dict[person]['forged'])-1)
negative = (images_dict[person]['forged'][k][0],images_dict[person]['forged'][k][1])
triplets.append((anchor,positive,negative))
random.shuffle(triplets)
return triplets
# a Function that samples the data accordingly
def get_batch(triplet_list, batch_size=256, preprocess=True):
batch_steps = len(triplet_list) // batch_size
for i in range(batch_steps + 1):
anchor = []
positive = []
negative = []
j = i * batch_size
while j < (i + 1) * batch_size and j < len(triplet_list):
a, p, n = triplet_list[j]
a = cv2.imread(f"{a[0]}/{a[1]}")
p = cv2.imread(f"{p[0]}/{p[1]}")
n = cv2.imread(f"{n[0]}/{n[1]}")
a = cv2.resize(a, (128, 128))
p = cv2.resize(p, (128, 128))
n = cv2.resize(n, (128, 128))
anchor.append(a)
positive.append(p)
negative.append(n)
j += 1
anchor = np.array(anchor)
positive = np.array(positive)
negative = np.array(negative)
if preprocess:
anchor = preprocess_input(anchor)
positive = preprocess_input(positive)
negative = preprocess_input(negative)
yield ([anchor, positive, negative])
# a Function that returns a pretrained Xception encoder
def get_encoder(input_shape):
""" Returns the image encoding model """
pretrained_model = Xception(
input_shape=input_shape,
weights='imagenet',
include_top=False,
pooling='avg',
)
for i in range(len(pretrained_model.layers) - 27):
pretrained_model.layers[i].trainable = False
encode_model = Sequential([
pretrained_model,
layers.Flatten(),
layers.Dense(512, activation='relu'),
layers.BatchNormalization(),
layers.Dense(256, activation="relu"),
layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))
], name="Encode_Model")
return encode_model
# a Function that encodes the inputs and computes the distances using distancelayer()
def get_siamese_network(mode = 'train',input_shape=(128, 128, 3)):
encoder = get_encoder(input_shape)
if mode != 'train':
encoder.load_weights("encoder")
# Input Layers for the images
anchor_input = layers.Input(input_shape, name="Anchor_Input")
positive_input = layers.Input(input_shape, name="Positive_Input")
negative_input = layers.Input(input_shape, name="Negative_Input")
## Generate the encodings (feature vectors) for the images
encoded_a = encoder(anchor_input)
encoded_p = encoder(positive_input)
encoded_n = encoder(negative_input)
# A layer to compute ‖f(A) - f(P)‖² and ‖f(A) - f(N)‖²
distances = DistanceLayer()(
encoder(anchor_input),
encoder(positive_input),
encoder(negative_input)
)
# Creating the Model
siamese_network = Model(
inputs=[anchor_input, positive_input, negative_input],
outputs=distances,
name="Siamese_Network"
)
return siamese_network
# a Function to test the model
def test_on_triplets(test_triplets,siamese_model,batch_size=256):
pos_scores, neg_scores = [], []
for data in get_batch(test_triplets, batch_size=batch_size):
prediction = siamese_model.predict(data)
pos_scores += list(prediction[0])
neg_scores += list(prediction[1])
accuracy = np.sum(np.array(pos_scores) < np.array(neg_scores)) / len(pos_scores)
ap_mean = np.mean(pos_scores)
an_mean = np.mean(neg_scores)
ap_stds = np.std(pos_scores)
an_stds = np.std(neg_scores)
print(f"Accuracy on test = {accuracy:.5f}")
return (accuracy, ap_mean, an_mean, ap_stds, an_stds)
# a Function that saves the encoder weights
def extract_encoder(model):
encoder = get_encoder((128, 128, 3))
i=0
for e_layer in model.layers[0].layers[3].layers:
layer_weight = e_layer.get_weights()
encoder.layers[i].set_weights(layer_weight)
i+=1
return encoder
# a Function that takes two lists of images and classifies them
def classify_images(encoder,sig_list1, sig_list2, threshold=1.3, mode = 'train'):
# Getting the encodings for the passed faces
tensor1 = encoder.predict(sig_list1)
tensor2 = encoder.predict(sig_list2)
distance = np.sum(np.square(tensor1 - tensor2), axis=-1)
prediction = np.where(distance <= threshold, 0, 1)
if mode != 'trian':
prediction = np.where(distance <= threshold, 'Real', "Forged")
return prediction
# a Function that computes the confusion matrix
def ModelMetrics(pos_list, neg_list):
true = np.array([0] * len(pos_list) + [1] * len(neg_list))
pred = np.append(pos_list, neg_list)
# Compute and print the accuracy
print(f"\nAccuracy of model: {accuracy_score(true, pred)}\n")
# Compute and plot the Confusion matrix
cf_matrix = confusion_matrix(true, pred)
categories = ['Similar', 'Different']
names = ['True Similar', 'False Similar', 'False Different', 'True Different']
percentages = ['{0:.2%}'.format(value) for value in cf_matrix.flatten() / np.sum(cf_matrix)]
labels = [f'{v1}\n{v2}' for v1, v2 in zip(names, percentages)]
labels = np.asarray(labels).reshape(2, 2)
sns.heatmap(cf_matrix, annot=labels, cmap='Blues', fmt='',
xticklabels=categories, yticklabels=categories)
plt.xlabel("Predicted", fontdict={'size': 14}, labelpad=10)
plt.ylabel("Actual", fontdict={'size': 14}, labelpad=10)
plt.title("Confusion Matrix", fontdict={'size': 18}, pad=20)
def Testimages(img1,img2):
img1 = cv2.imread(img1)
img2 = cv2.imread(img2)
img1 = cv2.resize(img1, (128, 128))
img2 = cv2.resize(img2, (128, 128))
img1 = np.array(img1)
img2 = np.array(img2)
img1 = preprocess_input(img1)
img2 = preprocess_input(img2)
yield ([img1, img2])
def predict(cluster, scalar, model, img):
labels = {0:"Person A", 1:"Person B", 2:"Person C", 3:"Person D", 4:"Person E"}
number_of_clusters = 10
sift = cv2.SIFT_create()
keypoints, descriptors = sift.detectAndCompute(img, None) # 1- extract features
cluster_result = cluster.predict(descriptors) # 2- predict cluster
# 3- build vocabulary
vocabulary = np.array([[0 for i in range(number_of_clusters)]], 'float32')
for each in cluster_result:
vocabulary[0][each] += 1
# vocabulary = reweight_tf_idf(vocabulary) ### tf_idf
vocabulary = scalar.transform(vocabulary) # 4- normalization
prediction = model.predict(vocabulary) # 5 - classification
return labels[prediction[0]]
|
gamal-abdelhakm/Handwritten-Signature-Identification-Verification-and-Detection
|
Script/Functions.py
|
Functions.py
|
py
| 8,768 |
python
|
en
|
code
| 2 |
github-code
|
6
|
16128165567
|
N, M = map(int, input().split())
a = list(map(int, input().split()))
g = {i + 1: [] for i in range(N)}
for ai in a:
g[ai].append(ai + 1)
g[ai + 1].append(ai)
flag = [False] * (N + 1)
def dfs(i):
route.append(i)
flag[i] = True
for gi in g[i]:
if flag[gi]:
continue
return dfs(gi)
return route
ans = []
for i in range(1, N + 1):
route = []
if flag[i]:
continue
res = dfs(i)
ans += list(reversed(res))
print(*ans)
|
keimoriyama/Atcoder
|
ABC/289/b.py
|
b.py
|
py
| 495 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70452856507
|
def lab_blackjack():
deck = ['A', '2', '3', '4', '5', '6',
'7', '8', '9', '10', 'J', 'Q', 'K', 'A', '2', '3', '4', '5', '6',
'7', '8', '9', '10', 'J', 'Q', 'K', 'A', '2', '3', '4', '5', '6',
'7', '8', '9', '10', 'J', 'Q', 'K', 'A', '2', '3', '4', '5', '6',
'7', '8', '9', '10', 'J', 'Q', 'K', ]
options = ['hit', 'stay']
yes = ['yes', 'y', 'sure']
no = ['no', 'n', 'nope']
validyn = []
for y in yes:
validyn.append(y)
for n in no:
validyn.append(n)
def getcard():
from random import choice
card = choice(deck)
index = deck.index(card)
deck.pop(index)
print(f'cards remaining: {len(deck)}')
return card
def gethand():
hand = []
card = getcard()
if card != 'invalid':
hand.append(card)
card = getcard()
if card != 'invalid':
hand.append(card)
return hand
def getvalue(hand):
value = 0
aces = []
for card in hand:
while True:
try:
int(card)
except:
if card == 'A':
value += 1
ace = card
aces.append(ace)
break
else:
value += 10
break
else:
card = int(card)
value += card
break
for ace in aces:
value += 10
if value > 21:
value -= 10
return value
def getadvise(hand):
if len(hand) >= 2:
value = getvalue(hand)
print(f'hand value: {value}')
if value < 17:
advise = 'hit.'
elif value < 21:
advise = 'stay.'
elif value == 21:
advise = 'stay. You have blackjack!'
else:
advise = 'busted'
else:
advise = 'invalid hand'
return advise
def start():
hand = gethand()
print(f'your hand: {hand}')
advise = getadvise(hand)
print(f'you should probably: {advise}')
print(options)
choice = input('what would you like to do?\n')
while choice not in options:
print(options)
choice = input('what would you like to do?\n')
return choice, hand
def hit(hand):
card = getcard()
hand.append(card)
return hand
opponent = gethand()
result = start()
choice = result[0]
hand = result[1]
while choice == 'hit':
hand = hit(hand)
print(f'your hand: {hand}')
advise = getadvise(hand)
if advise == 'busted':
print('busted'.upper())
result = 'busted'
break
print(f'you should probably: {advise}')
print(options)
choice = input('what would you like to do?\n')
if choice == 'stay':
value = getvalue(opponent)
cards = hand
hand = getvalue(hand)
if hand > value:
print(f'Your {cards} beat {opponent}.')
print(f'You win!')
result = 'win'
elif hand == value:
print(f'Your {cards} match {opponent}.')
print('tie')
result = 'tie'
else:
print(f'{opponent} beat your {cards}.')
print('You lost.')
result = 'lost'
return result
|
austenc-id/Guild
|
1 - Python/7/blackjack.py
|
blackjack.py
|
py
| 3,002 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10629931945
|
import logging
import pytest
import nengo
from nengo.builder import Model
from nengo.builder.ensemble import BuiltEnsemble
def test_seeding(Simulator, allclose):
"""Test that setting the model seed fixes everything"""
# TODO: this really just checks random parameters in ensembles.
# Are there other objects with random parameters that should be
# tested? (Perhaps initial weights of learned connections)
m = nengo.Network(label="test_seeding")
with m:
input = nengo.Node(output=1, label="input")
A = nengo.Ensemble(40, 1, label="A")
B = nengo.Ensemble(20, 1, label="B")
nengo.Connection(input, A)
C = nengo.Connection(A, B, function=lambda x: x ** 2)
m.seed = 872
with Simulator(m) as sim:
m1 = sim.data
with Simulator(m) as sim:
m2 = sim.data
m.seed = 873
with Simulator(m) as sim:
m3 = sim.data
def compare_objs(obj1, obj2, attrs, equal=True):
for attr in attrs:
check = allclose(getattr(obj1, attr), getattr(obj2, attr)) == equal
if not check:
logging.info("%s: %s", attr, getattr(obj1, attr))
logging.info("%s: %s", attr, getattr(obj2, attr))
assert check
ens_attrs = BuiltEnsemble._fields
As = [mi[A] for mi in [m1, m2, m3]]
Bs = [mi[B] for mi in [m1, m2, m3]]
compare_objs(As[0], As[1], ens_attrs)
compare_objs(Bs[0], Bs[1], ens_attrs)
compare_objs(As[0], As[2], ens_attrs, equal=False)
compare_objs(Bs[0], Bs[2], ens_attrs, equal=False)
conn_attrs = ("eval_points", "weights")
Cs = [mi[C] for mi in [m1, m2, m3]]
compare_objs(Cs[0], Cs[1], conn_attrs)
compare_objs(Cs[0], Cs[2], conn_attrs, equal=False)
def test_hierarchical_seeding():
"""Changes to subnetworks shouldn't affect seeds in top-level network"""
def create(make_extra, seed):
objs = []
with nengo.Network(seed=seed, label="n1") as model:
objs.append(nengo.Ensemble(10, 1, label="e1"))
with nengo.Network(label="n2"):
objs.append(nengo.Ensemble(10, 1, label="e2"))
if make_extra:
# This shouldn't affect any seeds
objs.append(nengo.Ensemble(10, 1, label="e3"))
objs.append(nengo.Ensemble(10, 1, label="e4"))
return model, objs
same1, same1objs = create(False, 9)
same2, same2objs = create(True, 9)
diff, diffobjs = create(True, 10)
m1 = Model()
m1.build(same1)
same1seeds = m1.seeds
m2 = Model()
m2.build(same2)
same2seeds = m2.seeds
m3 = Model()
m3.build(diff)
diffseeds = m3.seeds
for diffobj, same2obj in zip(diffobjs, same2objs):
# These seeds should all be different
assert diffseeds[diffobj] != same2seeds[same2obj]
# Skip the extra ensemble
same2objs = same2objs[:2] + same2objs[3:]
for same1obj, same2obj in zip(same1objs, same2objs):
# These seeds should all be the same
assert same1seeds[same1obj] == same2seeds[same2obj]
def test_seed_override(seed, allclose):
"""Test that seeds are not overwritten by the seeding function"""
with nengo.Network(seed=seed - 1) as net:
a = nengo.Ensemble(10, 1, seed=seed - 2)
b = nengo.Ensemble(10, 1, seed=seed + 2)
model = nengo.builder.Model()
model.seeds[net] = seed + 1
model.seeds[a] = seed + 2
# note: intentionally setting this to the 'wrong' value, to check that
# it isn't being overridden (things with seeds set should have seeded=True)
model.seeded[net] = False
model.seeded[a] = False
model.build(net)
assert model.seeds[net] == seed + 1
assert model.seeds[a] == seed + 2
assert not model.seeded[net]
assert not model.seeded[a]
assert allclose(model.params[a].gain, model.params[b].gain)
def test_build_twice():
model = nengo.builder.Model()
ens = nengo.Ensemble(10, 1, add_to_container=False)
model.seeds[ens] = 0
model.build(ens)
built_ens = model.params[ens]
with pytest.warns(UserWarning, match="has already been built"):
assert model.build(ens) is None
assert model.params[ens] is built_ens
|
Kanaderu/Neural-Networks
|
nengo-master/nengo/tests/test_builder.py
|
test_builder.py
|
py
| 4,239 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35014338143
|
# coding: utf-8
from flask import jsonify
from app import app
class Error():
'''
HTTP Response Error
'''
def __init__(self):
self.status = None
self.code = None
self.message = None
self.errors = None
def _ready(self, log_level='info'):
if log_level == 'critical':
app.logger.critical(str(self.status) + ' ' + self.message)
else:
app.logger.info(str(self.status) + ' ' + self.message)
error = {
'status': self.status,
'code': self.code,
'message': self.message
}
if self.errors:
error['errors'] = self.errors
return jsonify(error), self.status
def bad_request(self, message):
self.status = 400
if 'errors' in message:
self.code = message['code']
self.message = message['message']
self.errors = message['errors']
else:
self.code = message['code']
self.message = message['message']
return self._ready()
def unauthorized(self, message):
self.status = 403
self.code = 'unauthorized'
self.message = message
return self._ready()
def forbidden(self, message):
self.status = 403
self.code = 'forbidden'
self.message = message
return self._ready()
def not_found(self, message):
self.status = 404
self.code = 'not_found'
self.message = str(message)
return self._ready()
def method_not_allowed(self, message):
self.status = 405
self.code = 'method_not_allowed'
self.message = message
return self._ready()
def request_timeout(self, message):
self.status = 408
self.code = 'request_timeout'
self.message = message
return self._ready()
def conflict(self, message):
self.status = 409
self.code = 'conflict'
self.message = message
return self._ready()
def internal_server_error(self, message):
self.status = 500
self.code = 'internal_server_error'
self.message = str(message)
return self._ready('critical')
|
jasonsmithj/spam_public
|
app/http/error.py
|
error.py
|
py
| 2,234 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19355802413
|
from datetime import datetime
import csv
AIRPORTS_DB_LINK = "https://raw.githubusercontent.com/cohaolain/ryanair-py/develop/ryanair/airports.csv"
AIRPORTS_DB_FILE = "data/airports.csv"
AIRPORTS_TIMESTAMP_FILE = "data/airports_timestamp.txt"
airports = None
def get_distance(lat1, lat2, lon1, lon2):
from math import radians, cos, sin, asin, sqrt
# The math module contains a function named
# radians which converts from degrees to radians.
lon1 = radians(lon1)
lon2 = radians(lon2)
lat1 = radians(lat1)
lat2 = radians(lat2)
# Haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * asin(sqrt(a))
# Radius of earth in kilometers. Use 3956 for miles
r = 6371
# calculate the result
return(c * r)
# download the airports database from github page of ryanair py
def download_airports():
from requests import get
r = get(AIRPORTS_DB_LINK)
with open(AIRPORTS_DB_FILE, "wb") as f:
f.write(r.content)
with open(AIRPORTS_TIMESTAMP_FILE, "w") as f:
f.write("{}".format(datetime.now()))
return True
def get_airports():
import os
if not os.path.exists(AIRPORTS_DB_FILE) or not os.path.exists(AIRPORTS_TIMESTAMP_FILE):
print("Downloading airports database...")
result = download_airports()
if result:
print("Airports database downloaded.")
else:
print("Error downloading airports database.")
return None
elif os.path.exists(AIRPORTS_TIMESTAMP_FILE):
with open(AIRPORTS_TIMESTAMP_FILE, "r") as f:
timestamp = f.read()
if timestamp:
timestamp = datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S.%f")
# check if the timestamp is older than 5 months
if (datetime.now() - timestamp).total_seconds() > 60*60*24*30*5:
print("Downloading airports database...")
result = download_airports()
if result:
print("Airports database downloaded.")
else:
print("Error downloading airports database.")
return None
else:
print("Error downloading airports database.")
return None
airports = {}
with open(AIRPORTS_DB_FILE, "r") as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=",")
line_count = 0
airports = []
for row in csv_reader:
if line_count == 0:
line_count += 1
continue
airport = {
"code": row["iata_code"].upper(),
"name": row["name"].upper(),
"city": row["municipality"].upper(),
"country": row["iso_country"].upper(),
"latitude": float(row["latitude_deg"]),
"longitude": float(row["longitude_deg"]),
"continent": row["continent"].upper(),
"keywords": row["keywords"].upper().split(",")[0]
}
if len(airport["code"]) == 3 and airport["continent"] == "EU":
airports.append(airport)
line_count += 1
return airports
def get_airports_by_city(cityname, country, distance=150):
from utils.cities import get_city_by_name
city = get_city_by_name(cityname, country)
if not city:
return None
city_airports = []
for airport in get_airports():
if airport["country"] == city["country"]:
if get_distance(airport["latitude"], city["latitude"], airport["longitude"], city["longitude"]) <=distance:
city_airports.append(airport)
return city_airports
def get_airport_by_code(code):
for airport in get_airports():
if airport["code"] == code or airport["keywords"] == code:
return airport
return None
airports = get_airports()
|
slotruglio/flights-radar
|
utils/airports.py
|
airports.py
|
py
| 3,444 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27741403931
|
import math
def get_last_L2_error(lines,name,LastLines=35) :
"""Get L_2 eror value from a set of lines for the last timestep.
The set of lines correspond to the output-lines of a flexi-run"""
for l in lines[-LastLines:] : # read the last XX lines (default is 35)
# search for name, e.g., "L2_Part" or "L_2"
if name in l :
tmp = l.split(":")[1]
return [float(x) for x in tmp.split()]
def get_last_number_of_timesteps(lines,name,LastLines=35) :
"""Get the number of total timesteps used for the simulation.
The set of lines correspond to the output-lines of a flexi-run"""
for l in lines[-LastLines:] : # read the last XX lines (default is 35)
# search for name, e.g., "#Timesteps"
if name in l :
tmp = l.split(":")[1]
return [float(x) for x in tmp.split()]
def get_initial_timesteps(lines,name) :
"""Get the initial timestep used for the simulation.
The set of lines correspond to the output-lines of a flexi-run"""
for l in lines: # read all
# search for name, e.g., "#Timesteps"
if name in l :
tmp = l.split(":")[1]
return [float(x) for x in tmp.split()]
def get_last_Linf_error(lines,LastLines=35) :
"""Get L_inf eror value from a set of lines for the last timestep
The set of lines correspond to the output-lines of a flexi-run"""
for l in lines[-LastLines:] : # read the last XX lines (default is 35)
if "L_inf" in l :
tmp = l.split(":")[1]
return [float(x) for x in tmp.split()]
def get_last_number(lines) :
for line in reversed(lines) :
tmp = line.split(' ')
for t in reversed(tmp) :
try :
return float(t)
except :
pass
def get_cpu_per_dof(lines) :
"""Get the PID value from a set of lines
The set of lines correspond to the output-lines of a flexi-run"""
for line in reversed(lines) :
if "CALCULATION TIME PER TSTEP/DOF: [" in line :
return float(line.split("[")[1].split("sec")[0])
def calcOrder_h(h,E,invert_h=False) :
"""Determine the order of convergence for a list of grid spacings h and errors E"""
if invert_h :
h = [1./float(elem) for elem in h]
else :
h = [float(elem) for elem in h]
E = [float(elem) for elem in E]
if len(h) != len(E) :
return -1
order = []
for i in range(1,len(h)) :
dh=1.0/(h[i]/h[i-1])
# Check if any error value is exactly zero
if E[i-1] == 0.0 or E[i] == 0.0:
order.append(0.0)
else :
dE=E[i]/E[i-1]
order.append(math.log(dE)/math.log(dh))
return order
def calcOrder_p(p,E) :
"""Determine the order of convergence for a list of polynomial degrees p and errors E"""
p = [float(elem) for elem in p]
E = [float(elem) for elem in E]
if len(p) != len(E) :
return -1
order = []
for i in range(1,len(p)) :
dp=1.0/((p[i]+1.0)/(p[i-1]+1.0))
if E[i-1] == 0.0 :
order.append(0.0)
else :
dE=E[i]/E[i-1]
order.append(math.log(dE)/math.log(dp))
return order
|
piclas-framework/reggie2.0
|
analyze_functions.py
|
analyze_functions.py
|
py
| 3,161 |
python
|
en
|
code
| 2 |
github-code
|
6
|
9816914464
|
#!/usr/bin/env python
# coding: utf-8
# # Ici on va importer les packages de Python
# In[14]:
import gudhi as gd
import scipy.io as sio
import math
import matplotlib.pyplot as plt
import numpy as np
# # On donne les coordonnées de chaque atome
# In[16]:
coords = {'Ti':[[5,5,5]], 'O':[[5, 5, 10], [5, 10, 5], [10, 5, 5], [5,0,5], [0, 5, 5], [5, 5, 0]]}
coords['Ca'] = [[10, 10, 0], [10, 0, 0], [10, 0, 10], [10, 10, 10], [0, 10, 0], [0, 0, 0], [0, 0, 10], [0, 10, 10]]
data = []
for key, val in coords.items():
for j in val:
data.append(j)
mat = np.zeros((len(data), len(data)))
for i in range(len(data)):
for j in range(len(data)):
dist = np.linalg.norm(np.array(data[i])-np.array(data[j]))
mat[i][j] = dist
rips = gd.AlphaComplex(data)
st = rips.create_simplex_tree()
dgmsalpha = st.persistence()
betti0, betti1, betti2 = [], [], []
for r in dgmsalpha:
if r[0] == 0:
betti0.append([r[1][0], r[1][1]])
elif r[0] == 1:
betti1.append([r[1][0], r[1][1]])
elif r[0] == 2:
betti2.append([r[1][0], r[1][1]])
# Using circumradius, we take sqrt of F and multiply by 2
betti0 = np.array(np.sqrt(betti0)*2)
betti1 = np.array(np.sqrt(betti1)*2)
betti2 = np.array(np.sqrt(betti2)*2)
betti = [betti0, betti1, betti2]
betti0 = sorted(betti[0], key=lambda x: x[0])
betti0 = np.flip(betti0, axis=0)
betti1 = sorted(betti[1], key=lambda x: x[0])
betti1 = np.flip(betti1, axis=0)
betti2 = sorted(betti[2], key=lambda x: x[0])
betti2 = np.flip(betti2, axis=0)
sio.savemat("ABX3_gdalpha.mat", {"betti0": betti0, "betti1": betti1, "betti2": betti2})
print("c'est fait !")
|
Fouad-Mazguit/rapport-data
|
Data/CaTiO3/les nombres de Betti.py
|
les nombres de Betti.py
|
py
| 1,641 |
python
|
en
|
code
| 2 |
github-code
|
6
|
37048610245
|
from flask import Blueprint, jsonify, render_template,request,flash,redirect,url_for, session
import json
import sqlite3
from numpy import empty
from .excel_data import Device_Excel_Table, get_arr, get_by_ID_from_table
from .location import get_all_location
from .data_processing.index import database_initialization
from .downloadFiles.index import main as download_file
from selenium import webdriver
# from website import excel_data
# from downloadFiles
views = Blueprint('views',__name__)
@views.route('/',methods=['GET','POST'])
def Homepage():
print("Homepage",request.method)
if request.method =="GET":
loc_list = get_all_location()
return render_template("home.html", loc_list = loc_list)
if request.method =="POST":
if request.form.get("selenium"):
print("in selenium")
# flash("Downloading the requested files", category="error")
path = r"C:/Users/Kei Ka Shun/Desktop/project-env/FYP-main/website/downloadFiles/chromedriver.exe"
driver = webdriver.Chrome(executable_path=path)
download_file(driver)
return redirect(url_for("views.Homepage"))
elif request.form.get("change_folder"):
print("change_folder")
timestamp = database_initialization()
timestamp_json = json.dumps({"timestamp" : str(timestamp)})
session['timestamp'] = timestamp_json
return redirect(url_for("views.updated_data")) # timestamp_json=timestamp_json
elif request.form.get("select_location"):
print("selection")
loc = request.form.get("select_location")
return redirect(url_for("views.location_list", loc=loc))
elif request.form.get("input_location"):
print("in input")
loc = request.form.get("input_location")
if request.form.get("input_system"):
print("can get sys")
sys = request.form.get("input_system")
if request.form.get("input_device"):
print("can get dev")
dev = request.form.get("input_device")
if request.form.get("input_equip"):
print("if loop")
equip_no = request.form.get("input_equip")
return redirect(url_for("views.get_by_ID", loc= loc, sys= sys, device = dev,eqipID =equip_no))
else:
print(f"else loop {loc}, {sys} {dev}")
return redirect(url_for("views.table_list", loc= loc, sys= sys, device = dev))
else:
print("cannot get dev")
flash("Please enter Decive Short Form eg. CTR", category="error")
return redirect("/")
else:
print("cannot get sys")
flash("Please enter System Short Form eg. AUS", category="error")
return redirect(url_for("views.Homepage"))
else:
print("cannot get loc")
flash("Please enter Location Short Form eg. HKBCF_001", category="error")
return redirect(url_for("views.Homepage"))
@views.route("/change", methods= ['GET','POST'])
def updated_data():
print("updated data")
# ts = request.args['timestamp_json'] # counterpart for url_for()
ts = session['timestamp']
file = "./change_log.json"
with open(file, 'r') as f:
data = json.load(f)
return render_template("change.html",data=data, timestamp=ts)
@views.route('/<loc>',methods=['GET','POST'])
def location_list(loc):
if loc == "Location":
return (redirect(url_for("views.Homepage")))
if ".db" not in loc:
loc = loc + ".db"
loc_no_filetype = loc.split('.')[0]
sys_arr = get_arr(loc)
print(f"Method: {request.method} in location list")
if request.method == "GET":
print("in location get")
return render_template("sys.html", sys_arr = sys_arr, Location = loc, location_link = loc_no_filetype)
if request.method == "POST":
print("in location post")
sys = request.form.get("system")
print(f"sys: {sys}")
return redirect(url_for("views.system_list", loc= loc_no_filetype, sys= sys)) #, loc = loc_no_filetype, sys = sys, device_arr = device_arr
@views.route('/<loc>/<sys>',methods=['GET','POST'])
def system_list(loc, sys): #loc,sys,device_arr
print(f"Method: {request.method} {loc} {sys} in system_list")
if ".db" not in loc:
loc = loc + ".db"
loc_no_filetype = loc.split('.')[0]
device_arr = get_arr(loc,sys)
if request.method == "GET":
print(f"in sys_list GET")
return render_template("device.html", System = sys, device_arr = device_arr)
if request.method == "POST":
print("in sys post")
dev = request.form.get("device")
return redirect(url_for("views.table_list", loc= loc_no_filetype, sys= sys, device = dev))
@views.route('/<loc>/<sys>/<device>',methods=['GET','POST'])
def table_list(loc, sys, device):
print(f"Method: {request.method} {loc} {sys} {device} in table_list")
if request.method =="GET":
[data, attr_list] = Device_Excel_Table(loc,sys,device)
return render_template("table.html",
data =data ,
attr_list = attr_list,
loc = loc,
sys = sys,
device = device
)
@views.route('/<loc>/<sys>/<device>/<eqipID>',methods=['GET','POST', 'PUT'])
def get_by_ID(loc, sys, device, eqipID):
print(f"Method: {request.method} {loc} {sys} {device} {eqipID}in table_list")
if request.method =="GET":
[data, attr_list] = get_by_ID_from_table(loc,sys,device,eqipID)
return render_template("table.html",
data =data ,
attr_list = attr_list,
loc = loc,
sys = sys,
device = device
)
if request.method =="POST":
pass
|
Kelly-Kxx/fyp_selenium_flask
|
website/views.py
|
views.py
|
py
| 6,347 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43736972444
|
import sys
from pyspark import SparkConf, SparkContext
import re
from bwt import reverseBwt
from radix import radixSort
from segment import segSort
from default import defaultSort
from partition import partitionSort
# config spark context, set master, name and memory size
def getSC(master, name):
conf = (SparkConf()
.setMaster(master)
.setAppName(name)
#.set("spark.executor.memory", "1g")
.set("spark.akka.frameSize", "512")
)
sc = SparkContext(conf = conf)
sc.addPyFile('default.py')
sc.addPyFile('segment.py')
sc.addPyFile('radix.py')
sc.addPyFile('partition.py')
sc.addPyFile('bwt.py')
return sc
# select a sort method
def sort(sort_name, reads, threads_number, output_path, prefixes):
if (sort_name=='radix'):
bwt = radixSort(reads)
elif (sort_name=='segment'):
bwt = segSort(reads)
elif (sort_name=='partition'):
bwt = partitionSort(reads, threads_number, output_path, prefixes)
else:
bwt = defaultSort(reads, threads_number, output_path, prefixes)
return bwt
# RDD does not support communications among lines,
# because each line is independent during processing.
# Thus we first collect RDD (RDD->List), then parallelize List (List->RDD)
def collectReads(lines, file_type):
if file_type == 'fasta' :
reads = []
read = ''
lines = lines.collect()
#concatinate lines begin with '>'
for line in lines :
if '>' not in line:
read += line
else :
if len(read)>0: reads.append(read)
read = ''
if len(read)>0: reads.append(read)
elif file_type == 'fastq' :
#choose the second line of every four lines
reads = lines.collect()[1::4]
else :
reads = lines.collect()
return reads
def filterReads(lines, file_type):
if file_type == 'fasta' :
reads = lines.filter(lambda line: '>' not in line)
elif file_type == 'fastq' :
reads = lines.filter(lambda line: re.match('^[ACGTN]*$', line))
else :
reads = lines
return reads
def getReads(lines, file_type, collect, reads_output_path):
if collect:
# collect RDD (RDD->List)
reads = collectReads(lines,file_type)
# parallelize List (List->RDD)
reads = sc.parallelize(reads,int(threads_number))
else :
reads = filterReads(lines,file_type)
# output reads
# reads.saveAsTextFile(reads_output_path)
return reads
if __name__ == "__main__":
if len(sys.argv) < 7:
print >> sys.stderr, "Usage: <sort> <master> <threads_num> <file_type> <input> <bwt_output_path>"
exit(-1)
sort_method = sys.argv[1]
master_address = sys.argv[2]
threads_number = sys.argv[3]
file_type = sys.argv[4]
input_path = sys.argv[5]
reads_output_path = ""
bwt_output_path = sys.argv[6]
sc = getSC(master_address, sort_method+threads_number+input_path)
lines = sc.textFile(input_path,int(threads_number))
reads = getReads(lines,file_type, False, reads_output_path).cache()
prefixes='$ACGNT'
#prefixes = ['$','AA','CA','GA','NA','TA','AC','CC','GC','NC','TC','AG','CG','GG','NG','TG','AN','CN','GN','NN','TN','AT','CT','GT','NT','TT']
# sort suffixes
bwt = sort(sort_method,reads, int(threads_number), bwt_output_path, prefixes)
|
xniu7/jhuclass.genome.indexing
|
code/python/sort.py
|
sort.py
|
py
| 3,476 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7815075534
|
class PartyAnimal:
x = 0
name = ''
def __init__(self, name):
self.name = name
print('Name:', self.name)
def party(self):
self.x = self.x + 1
print(self.name, 'Party count', self.x)
person1 = PartyAnimal('Amjed')
person1.party()
print('\n')
person2 = PartyAnimal('Danwand')
person2.party()
|
amjedsaleel/Python-for-Everybody
|
Using Databases with Python/cons.py
|
cons.py
|
py
| 342 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22534736697
|
# Write a Python program to count the occurrences of each word in a given sentence.
string=str(input("Enter a string :"))
words=string.split() #store splitted string into variable
count=dict() #initiate a dictionary
for word in words: #initiate loop
if(word in count): #if word is already in dictionary
count[word]=count[word]+1 #increment the count
else: #else keep it 1
count[word]=1
print(count)
|
ABHISHEKSUBHASHSWAMI/String-Manipulation
|
str11.py
|
str11.py
|
py
| 623 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9797629811
|
#client berada di sisi remote, client hanya mmebutuhkan
# dependency kepada library Pyro5
import Pyro5.api
if __name__=='__main__':
# untuk mengecek service apa yang ada di ns, gunakan pyro5-nsc -p 9900 list
#dalam kasus ini namanya adalah phonebook.server
phonebook = Pyro5.api.Proxy('PYRONAME:phonebook.server')
# untuk melihat daftar dari phonebook
print(phonebook.list())
#create record di phonebook
phonebook.create(dict(nama='Roberto Carlos',alamat='Jambangan',notelp='67829'))
# untuk melihat daftar dari phonebook, cek kembali
print(phonebook.list())
|
rm77/sister2020
|
client/client.py
|
client.py
|
py
| 600 |
python
|
id
|
code
| 0 |
github-code
|
6
|
70280896828
|
from abc import ABC, abstractmethod
from nltk.translate.bleu_score import sentence_bleu
from bert_score import score as bert_score
from BARTScore import bart_score
import argparse
class SimilarityClass(ABC):
def __init__(self):
pass
@abstractmethod
def get_similarity(self):
pass
class BLEUSimilarityScore(SimilarityClass):
def __init__(self):
super().__init__()
def get_similarity(self, sentence_1: str, sentence_2: str, gram=None) -> str:
format_sent_1 = sentence_1.split()
format_sent_2 = sentence_2.split()
if gram == None:
print('BLEU score -> {}'.format(sentence_bleu(format_sent_1, format_sent_2)))
elif gram == 1:
print('Individual 1-gram: %f' % sentence_bleu(format_sent_1, format_sent_2, weights=(1, 0, 0, 0)))
elif gram == 2:
print('Individual 2-gram: %f' % sentence_bleu(format_sent_1, format_sent_2, weights=(0, 1, 0, 0)))
elif gram == 3:
print('Individual 3-gram: %f' % sentence_bleu(format_sent_1, format_sent_2, weights=(0, 0, 1, 0)))
elif gram == 4:
print('Individual 4-gram: %f' % sentence_bleu(format_sent_1, format_sent_2, weights=(0, 0, 0, 1)))
class BERTSimilarityScore(SimilarityClass):
def __init__(self):
super().__init__()
def get_similarity(self, sentence_1: str, sentence_2: str) -> str:
format_sent_1 = [sentence_1]
format_sent_2 = [sentence_2]
P, R, F1 = bert_score(format_sent_1, format_sent_2, lang='en', verbose=True)
print(f"BERT Score: P={P.mean().item():.6f} R={R.mean().item():.6f} F={F1.mean().item():.6f}")
class BART_similarity_score(similarity_class):
def __init__(self):
super().__init__()
def get_similarity(self, sentence_1: str, sentence_2: str, type='ParaBank') -> str:
format_sent_1 = [sentence_1]
format_sent_2 = [sentence_2]
if type == 'ParaBank':
bart_scorer = bart_score(device='cuda:0', checkpoint='bart.pth')
bart_scorer.load(path='bart.pth')
bart_scorer.score(format_sent_1, format_sent_2, batch_size=1)
elif type == 'CNNDM':
bart_scorer = bart_score(device='cuda:0', checkpoint='facebook/bart-large-cnn')
bart_scorer.score(format_sent_1, format_sent_2, batch_size=1) # generation scores from the first list of texts to the second list of texts.
def main(args):
if args.model == 'BLEU':
bleu = BLEU_similarity_score()
bleu.get_similarity("I am good", "You are good")
if args.model == 'BERT':
bert = BERT_similarity_score()
bert.get_similarity("I am good", "You are good")
if args.model == 'BART':
bart = BART_similarity_score()
bart.get_similarity("I am good", "You are good")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, dest='model', required=True)
parser.add_argument("--model-specifics", type=str, dest='specifics', required=False)
args = parser.parse_args()
main(args)
|
esteng/ambiguous_vqa
|
analysis/abstract_class.py
|
abstract_class.py
|
py
| 3,112 |
python
|
en
|
code
| 5 |
github-code
|
6
|
73998046587
|
import os
import pandas as pd
pd.set_option('display.max_columns', None) # or 1000
pd.set_option('display.max_rows', None) # or 1000
pd.set_option('display.max_colwidth', None) # or 199
import numpy as np
import pickle
from tabulate import tabulate
def create_empty_table_lr_micro_macro():
classifiers = ['logistic_regression']
cols = pd.MultiIndex.from_product([classifiers, ['accuracy', 'precision_micro', 'recall_micro', 'f1_micro', 'precision_macro', 'recall_macro', 'f1_macro', 'auc']])
return pd.DataFrame([], columns=cols)
def main_micro_macro():
classifiers = ['logistic_regression']
cols = pd.MultiIndex.from_product([classifiers, ['accuracy', 'precision_micro', 'recall_micro', 'f1_micro', 'precision_macro', 'recall_macro', 'f1_macro', 'auc']])
table = create_empty_table_lr_micro_macro()
models_to_config_dict = {'20210227-235159_testing-False-ws-200-bs-256-transformations-01-lr-00001-agg-mean': 'Mean aggregate Noise Scaled Transformation',
'20210227-235154_testing-False-ws-200-bs-256-transformations-01-lr-00001-agg-std': 'Std aggregate Noise Scaled Transformation',
'20210227-235154_testing-False-ws-200-bs-256-transformations-23-lr-00001-agg-mean': 'Mean aggregated Negated Time-Flipped Transformation'}
# models = next(os.walk(os.getcwd()))[1]
models = ['20210227-235159_testing-False-ws-200-bs-256-transformations-01-lr-00001-agg-mean',
'20210227-235154_testing-False-ws-200-bs-256-transformations-01-lr-00001-agg-std',
'20210227-235154_testing-False-ws-200-bs-256-transformations-23-lr-00001-agg-mean']
for model in models:
print(model)
path_to_chapman_predictions = os.path.join(os.getcwd(), model, 'lr_results.pickle')
with open(path_to_chapman_predictions, 'rb') as f:
results = pickle.load(f)
classifier_metrics_data = [results[0], results[1], results[2], results[3], results[4], results[5], results[6], results[7]]
np_classifier_metrics_data = np.array(classifier_metrics_data).reshape(1, len(classifier_metrics_data))
classifier_metrics_dataframe = pd.DataFrame(np_classifier_metrics_data, columns=cols)
table = table.append(classifier_metrics_dataframe)
path_to_excel = os.path.join(os.getcwd(), f'heart_rhythm_classifier_results_lr_micro_macro.xlsx')
model_to_config_index = [models_to_config_dict[model] for model in models]
table.index = model_to_config_index
table.to_excel(path_to_excel)
if __name__ == '__main__':
main_micro_macro()
|
K-Shah3/SimCLR_HCHS
|
chapman/predictions/create_table.py
|
create_table.py
|
py
| 2,607 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10552839170
|
import abc
import dataclasses
from typing import Optional, Union
import numpy as np
import numpy.typing as npt
import rod
from rod import logging
@dataclasses.dataclass
class PrimitiveBuilder(abc.ABC):
name: str
mass: float
element: Union[
rod.Model, rod.Link, rod.Inertial, rod.Collision, rod.Visual
] = dataclasses.field(
default=None, init=False, repr=False, hash=False, compare=False
)
def build(
self,
) -> Union[rod.Model, rod.Link, rod.Inertial, rod.Collision, rod.Visual]:
return self.element
# ================
# Abstract methods
# ================
@abc.abstractmethod
def _inertia(self) -> rod.Inertia:
pass
@abc.abstractmethod
def _geometry(self) -> rod.Geometry:
pass
# ================
# Element builders
# ================
def build_model(
self,
name: Optional[str] = None,
pose: Optional[rod.Pose] = None,
) -> "PrimitiveBuilder":
self._check_element()
self.element = self._model(name=name, pose=pose)
return self
def build_link(
self,
name: Optional[str] = None,
pose: Optional[rod.Pose] = None,
) -> "PrimitiveBuilder":
self._check_element()
self.element = self._link(name=name, pose=pose)
return self
def build_inertial(self, pose: Optional[rod.Pose] = None) -> "PrimitiveBuilder":
self._check_element()
self.element = self._inertial(pose=pose)
return self
def build_visual(
self,
name: Optional[str] = None,
pose: Optional[rod.Pose] = None,
) -> "PrimitiveBuilder":
self._check_element()
self.element = self._visual(name=name, pose=pose)
return self
def build_collision(
self,
name: Optional[str] = None,
pose: Optional[rod.Pose] = None,
) -> "PrimitiveBuilder":
self._check_element()
self.element = self._collision(name=name, pose=pose)
return self
# =================
# Element modifiers
# =================
def add_link(
self,
name: Optional[str] = None,
pose: Optional[rod.Pose] = None,
link: Optional[rod.Link] = None,
) -> "PrimitiveBuilder":
if not isinstance(self.element, rod.Model):
raise ValueError(type(self.element))
link = link if link is not None else self._link(name=name, pose=pose)
if pose is not None:
link.pose = pose
self.element.link = link
return self
def add_inertial(
self,
pose: Optional[rod.Pose] = None,
inertial: Optional[rod.Inertial] = None,
) -> "PrimitiveBuilder":
if not isinstance(self.element, (rod.Model, rod.Link)):
raise ValueError(type(self.element))
if isinstance(self.element, rod.Model):
link = self.element.link
elif isinstance(self.element, rod.Link):
link = self.element
else:
raise ValueError(self.element)
inertial = inertial if inertial is not None else self._inertial(pose=pose)
if pose is not None:
inertial.pose = pose
else:
inertial.pose = PrimitiveBuilder.build_pose(relative_to=link.name)
link.inertial = inertial
return self
def add_visual(
self,
name: Optional[str] = None,
use_inertial_pose: bool = True,
pose: Optional[rod.Pose] = None,
visual: Optional[rod.Visual] = None,
) -> "PrimitiveBuilder":
if not isinstance(self.element, (rod.Model, rod.Link)):
raise ValueError(type(self.element))
if isinstance(self.element, rod.Model):
link = self.element.link
elif isinstance(self.element, rod.Link):
link = self.element
else:
raise ValueError(self.element)
if pose is None and use_inertial_pose:
if link.inertial.pose is None:
msg = f"Inertial element of link '{link.name}' has no pose defined"
raise ValueError(msg)
pose = link.inertial.pose
visual = visual if visual is not None else self._visual(name=name, pose=pose)
if visual.name in [v.name for v in link.visuals()]:
msg = f"Visual '{visual.name}' already exists in link '{link.name}'"
raise ValueError(msg)
link.add_visual(visual=visual)
return self
def add_collision(
self,
name: Optional[str] = None,
use_inertial_pose: bool = True,
pose: Optional[rod.Pose] = None,
collision: Optional[rod.Collision] = None,
) -> "PrimitiveBuilder":
if not isinstance(self.element, (rod.Model, rod.Link)):
raise ValueError(type(self.element))
if isinstance(self.element, rod.Model):
link = self.element.link
elif isinstance(self.element, rod.Link):
link = self.element
else:
raise ValueError(self.element)
if pose is None and use_inertial_pose:
if link.inertial.pose is None:
msg = f"Inertial element of link '{link.name}' has no pose defined"
raise ValueError(msg)
pose = link.inertial.pose
collision = (
collision
if collision is not None
else self._collision(name=name, pose=pose)
)
if collision.name in [c.name for c in link.collisions()]:
msg = f"Collision '{collision.name}' already exists in link '{link.name}'"
raise ValueError(msg)
link.add_collision(collision=collision)
return self
# ====================
# ROD element builders
# ====================
def _model(
self,
name: Optional[str] = None,
pose: Optional[rod.Pose] = None,
) -> rod.Model:
name = name if name is not None else self.name
logging.debug(f"Building model '{name}'")
if pose is not None and pose.relative_to != "world":
raise ValueError("Model pose must be relative to 'world")
return rod.Model(
name=name,
pose=pose,
)
def _link(
self,
name: Optional[str] = None,
pose: Optional[rod.Pose] = None,
) -> rod.Link:
return rod.Link(
name=name if name is not None else f"{self.name}_link",
pose=pose,
)
def _inertial(self, pose: Optional[rod.Pose] = None) -> rod.Inertial:
return rod.Inertial(
pose=pose,
mass=self.mass,
inertia=self._inertia(),
)
def _visual(
self,
name: Optional[str] = None,
pose: Optional[rod.Pose] = None,
) -> rod.Visual:
name = name if name is not None else f"{self.name}_visual"
return rod.Visual(
name=name,
pose=pose,
geometry=self._geometry(),
)
def _collision(
self,
name: Optional[str],
pose: Optional[rod.Pose] = None,
) -> rod.Collision:
name = name if name is not None else f"{self.name}_collision"
return rod.Collision(
name=name,
pose=pose,
geometry=self._geometry(),
)
# ===============
# Utility methods
# ===============
def _check_element(self) -> None:
if self.element is not None:
msg = f"Builder was already building a '{type(self.element)}' instance"
raise ValueError(msg)
@staticmethod
def build_pose(
pos: npt.NDArray = None,
rpy: npt.NDArray = None,
relative_to: str = None,
degrees: bool = None,
rotation_format: str = None,
) -> Optional[rod.Pose]:
if pos is None and rpy is None:
return rod.Pose.from_transform(transform=np.eye(4), relative_to=relative_to)
pos = np.zeros(3) if pos is None else pos
rpy = np.zeros(3) if rpy is None else rpy
if pos.size != 3:
raise ValueError(pos.size)
if rpy.size != 3:
raise ValueError(rpy.size)
return rod.Pose(
pose=list(np.hstack([pos, rpy])),
relative_to=relative_to,
degrees=degrees,
rotation_format=rotation_format,
)
|
ami-iit/rod
|
src/rod/builder/primitive_builder.py
|
primitive_builder.py
|
py
| 8,450 |
python
|
en
|
code
| 11 |
github-code
|
6
|
32569222248
|
# -*- coding: utf-8 -*-
###############################################################################
# License, author and contributors information in: #
# __manifest__.py file at the root folder of this module. #
###############################################################################
from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError
from odoo.tools import float_is_zero
from datetime import datetime, timedelta
class SaleOrderLine(models.Model):
_name = 'sale.order.line'
_inherit = ['sale.order.line', 'mto.chain.mixin']
date_expected = fields.Datetime('Delivery Date')
@api.model
def default_get(self, fields):
res = super(SaleOrderLine, self).default_get(fields)
res['priority_id'] = self.env['mto.priority'].search([], limit=1).id
return res
@api.model
def do_date_update(self, start_date=False, end_date=False):
self.ensure_one()
if end_date:
start_date = end_date - timedelta(days=self.product_id.sale_delay)
return_date = False, start_date
elif start_date:
end_date = start_date + timedelta(days=self.product_id.sale_delay)
return_date = end_date, False
elif not any((end_date,start_date)):
start_date = self.date_expected - timedelta(days=self.product_id.sale_delay)
end_date = self.date_expected
return_date = end_date, start_date
if self.state not in ('done', 'cancel'):
self.write({
'date_expected': end_date
})
if self.move_ids:
for move in self.move_ids:
move.move_date_update(start_date, move.sale_line_id.order_id)
return return_date
class PurchaseOrder(models.Model):
_name = 'purchase.order.line'
_inherit = ['purchase.order.line', 'mto.chain.mixin']
@api.model
def do_date_update(self, start_date=False, end_date=False):
return False, False
def name_get(self):
return [(record.id, '%s / %s'%(record.order_id.name, record.product_id.name)) for record in self]
class MrpProduction(models.Model):
_name = 'mrp.production'
_inherit = ['mrp.production', 'mto.chain.mixin']
def _get_start_date(self):
return max(self.date_planned_start, datetime.now())
@api.model
def do_date_update(self, start_date=False, end_date=False):
self.ensure_one()
if end_date:
start_date = end_date - timedelta(days=self.product_id.produce_delay)
return_date = False, start_date
elif start_date:
end_date = start_date + timedelta(days=self.product_id.produce_delay)
return_date = end_date, False
elif not any((end_date,start_date)):
start_date = self.date_planned_start
end_date = self.date_planned_finished
return_date = end_date, start_date
if self.state not in ('done', 'cancel', 'progress'):
self.write({
'date_planned_start': start_date,
'date_planned_finished': end_date
})
self.picking_ids.mapped('move_lines').write({
'date': start_date,
'date_expected': start_date
})
self.move_finished_ids.write({
'date': end_date,
'date_expected': end_date
})
self.move_raw_ids.write({
'date': start_date,
'date_expected': start_date
})
return return_date
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.multi
def _action_confirm(self):
super(SaleOrder, self)._action_confirm()
for order in self:
for line in order.order_line:
line.node_id.action_date_update()
line.node_id.action_priority_update()
@api.multi
def action_cancel(self):
res = super(SaleOrder, self).action_cancel()
self.mapped('order_line').mapped('node_id').write({
'parent_ids': [(6, False, [])],
'child_ids': [(6, False, [])]
})
return res
|
dip-ergo/tex-fasteners
|
mto_chain/models/inherit.py
|
inherit.py
|
py
| 4,293 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38713930072
|
from collections import defaultdict, deque
def bfs(graph, start):
visited = set()
queue = deque([start])
while queue:
vertex = queue.popleft()
if vertex not in visited:
visited.add(vertex)
print(vertex, end=' ')
for neighbor in graph[vertex]:
if neighbor not in visited:
queue.append(neighbor)
# Example usage
graph = defaultdict(list)
num_nodes = int(input("Enter the number of nodes: "))
for i in range(num_nodes):
node = input(f"Enter node {i+1}: ")
adj_nodes = input(f"Enter adjacent nodes for {node}: ").split()
graph[node].extend(adj_nodes)
start_node = input("Enter the starting node: ")
bfs(graph, start_node)
|
pogchumpus55/AI
|
bfs.py
|
bfs.py
|
py
| 765 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30138374155
|
# !/usr/local/python/bin/python
# -*- coding: utf-8 -*-
# (C) Wu Dong, 2020
# All rights reserved
# @Author: 'Wu Dong <[email protected]>'
# @Time: '2020-04-01 09:47'
# sys
import typing as t
from functools import wraps
from inspect import isfunction
from inspect import getfullargspec
# 3p
from flask import ( # pylint: disable=unused-import
Flask,
g,
request,
)
from werkzeug.datastructures import FileStorage
# object
from .exception import ParamsValueError
from .filters.base import BaseFilter # pylint: disable=unused-import
from .filters import (
cross_filters,
simple_filters,
)
from .macro import (
K_CONTENT_TYPE,
K_FUZZY,
K_SKIP_FILTER,
K_STORE_KEY
)
from .response import (
BaseResponse,
HTMLResponse,
JSONResponse,
)
from .rules import Rule
from .utils import (
get_deep_value,
missing
)
# checking
if t.TYPE_CHECKING:
from flask import Response # pylint: disable=unused-import
class PreRequest:
""" An object to dispatch filters to handler request params
"""
def __init__(
self,
app: t.Optional["Flask"] = None,
fuzzy: bool = False,
store_key: t.Optional[str] = None,
content_type: t.Optional[str] = None,
skip_filter: bool = False
):
""" PreRequest init function
:param fuzzy: formatter error message with fuzzy style
:param store_key: which key will store formatter result
:param content_type: response content type json/html
:param skip_filter: skip all of the filter check
"""
self.simple_filters: t.List["BaseFilter"] = simple_filters
self.cross_filters: t.List["BaseFilter"] = cross_filters
self.fuzzy: bool = fuzzy
self.content_type: str = content_type or "application/json"
self.store_key: str = store_key or "params"
self.response: t.Optional[BaseResponse] = None
self.formatter: t.Optional[t.Callable] = None
self.skip_filter: bool = skip_filter
if app is not None:
self.app: "Flask" = app
self.init_app(app, None)
def init_app(self, app: "Flask", config: t.Optional[dict] = None):
""" Flask extension initialize
:param app: flask application
:param config: flask config
"""
if not (config is None or isinstance(config, dict)):
raise TypeError("'config' params must be type of dict or None")
# update config from different origin
basic_config = app.config.copy()
if config:
basic_config.update(config)
config = basic_config
self.fuzzy = config.get(K_FUZZY, False)
self.content_type = config.get(K_CONTENT_TYPE, "application/json")
self.store_key = config.get(K_STORE_KEY, "params")
self.skip_filter = config.get(K_SKIP_FILTER, False)
self.app = app
app.extensions["pre_request"] = self
def add_response(self, resp: BaseResponse):
""" Add custom response class
:param resp: response class which is subclass of BaseResponse
"""
self.response = resp
def add_formatter(self, fmt: t.Callable):
""" Add custom format function for generate response content
:param fmt: format function
"""
if fmt and not isfunction(fmt):
raise TypeError("custom format function must be a type of function")
if fmt and fmt.__code__.co_argcount < 1:
raise TypeError("custom format function requires at least 1 arguments")
self.formatter = fmt
def add_filter(self, cus_filter: "BaseFilter", index: t.Optional[int] = None):
""" Add custom filter class to extend pre-request
:param cus_filter: custom filter class
:param index: filter position
"""
if index is not None and not isinstance(index, int):
raise TypeError("index params must be type of Int")
if index is not None:
self.simple_filters.insert(index, cus_filter)
else:
self.simple_filters.append(cus_filter)
def remove_filter(self, cus_filter: t.Optional["BaseFilter"] = None, index: t.Optional[int] = None):
""" Remove filters from object with index or filter name
:param cus_filter: user filter name
:param index: filter index
"""
if cus_filter:
self.simple_filters.remove(cus_filter)
if index is not None and isinstance(index, int) and 0 <= index < len(self.simple_filters):
self.simple_filters.pop(index)
@staticmethod
def _location_params(key, location, default=None, deep=True):
""" Read params form special location ex: args/forms/header/cookies
:param key: params key
:param location: special location
:param default: default value if special value is not exists
:param deep: read params with deep search
"""
location = location.lower()
if location in ["args", "values", "form", "headers", "cookies"]:
# query deep value with special key like `userInfo.userId`
if len(key.split(".")) > 1 and deep:
return getattr(request, location).get(key, default)
# load simple params
return get_deep_value(key, getattr(request, location), default, deep=False)
if location == "json":
json_value = getattr(request, location)
if isinstance(json_value, dict):
# query deep value with special key like `userInfo.userId`
if len(key.split(".")) > 1 and deep:
return json_value.get(key, default)
# query simple value from json
return get_deep_value(key, json_value, default, deep=deep)
return default
def _fmt_params(self, key, rule, default=None):
""" Query request params from flask request object
:param key: params key
"""
df_location = ["values", "args", "form", "json", "headers", "cookies"]
if len(key.split(".")) > 1 and rule.deep:
rst = get_deep_value(key, getattr(request, "json"), default, deep=True)
# load value from depth json struct failed
if rst != default:
return rst
rule.location = rule.location or df_location
# query object from special location
for location in rule.location:
rst = self._location_params(key, location, default, rule.deep)
# can't read params from this location
if rst != default:
return rst
return default
@staticmethod
def _fmt_file_params(key, rule):
""" Query file params from request.files
:param key: params key
:param rule: params rule
"""
# load single params
if not rule.multi:
return request.files.get(key)
# load multi files
fmt_params = []
for f in request.files.getlist(key):
fmt_params.append(f)
return fmt_params
def _handler_simple_filter(self, k, v, r): # noqa
""" Handler filter rules with simple ways
:param k: params key
:param r: params rule
"""
if isinstance(r, dict):
fmt_result = {}
for key, rule in r.items():
fmt_value = self._handler_simple_filter(k + "." + key, v, rule)
fmt_result[rule.key_map if isinstance(rule, Rule) and rule.key_map else key] = fmt_value
return fmt_result
if not isinstance(r, Rule):
raise TypeError(f"invalid rule type for key '{k}'")
if v is None:
# load file type of params from request
if r.direct_type == FileStorage:
v = self._fmt_file_params(k, r)
# load simple params
else:
v = self._fmt_params(k, r, default=missing)
if r.struct is not None:
# make sure that input value is not empty
if r.required and not v:
raise ParamsValueError(message=f"{k} field cannot be empty")
if not r.multi:
raise TypeError("invalid usage of `struct` params")
# struct params must be type of list
if not isinstance(v, list):
raise ParamsValueError(message="Input " + k + " invalid type")
if not v:
return []
# storage sub array
fmt_result = []
for idx, sub_v in enumerate(v):
# make sure that array item must be type of dict
if not isinstance(sub_v, dict):
raise ParamsValueError(message="Input " + k + "." + str(idx) + " invalid type")
# format every k-v with struct
fmt_item = {}
fmt_result.append(fmt_item)
for sub_k, sub_r in r.struct.items():
new_k = k + "." + str(idx) + "." + sub_k
v = self._handler_simple_filter(new_k, sub_v.get(sub_k), sub_r)
fmt_item[sub_r.key_map if isinstance(sub_r, Rule) and sub_r.key_map else sub_k] = v
return fmt_result
if r.skip or self.skip_filter:
return v
# filter request params
for f in self.simple_filters:
filter_obj = f(k, v, r)
# ignore invalid and not required filter
if not filter_obj.filter_required():
continue
v = filter_obj()
if r.callback is not None and isfunction(r.callback):
v = r.callback(v)
return v
def _handler_cross_filter(self, k, r, rst):
""" Handler complex rule filters
:param k: params key
:param r: params rule
:param rst: handler result
"""
if isinstance(r, dict):
for key, value in r.items():
self._handler_cross_filter(k + "." + key, value, rst)
return
if not isinstance(r, Rule):
raise TypeError(f"invalid rule type for key '{k}'")
if r.skip or self.skip_filter:
return
# simple filter handler
for f in self.cross_filters:
filter_obj = f(k, None, r)
# ignore invalid and not required filter
if not filter_obj.filter_required():
continue
filter_obj(params=rst)
def parse(
self,
rule: t.Optional[t.Dict[str, t.Union["Rule", dict]]] = None,
**options
) -> dict:
""" Parse input params
"""
fmt_rst = {}
# invalid input
if not rule and not options:
return fmt_rst
# query rules with special method
rules = options.get(request.method) or options.get(request.method.lower())
# common rule
if rules is None and rule is not None:
rules = rule
# ignore catch with empty rules
if not rules:
raise ValueError(f"request method '{request.method}' with invalid filter rule")
# use simple filter to handler params
for k, r in rules.items():
value = self._handler_simple_filter(k, None, r)
# simple filter handler
fmt_rst[r.key_map if isinstance(r, Rule) and r.key_map else k] = value
# use complex filter to handler params
for k, r in rules.items():
self._handler_cross_filter(k, r, fmt_rst)
return fmt_rst
def catch(
self,
rule: t.Optional[t.Dict[str, t.Union["Rule", dict]]] = None,
**options
) -> t.Callable:
""" Catch request params
"""
def decorator(func: t.Callable) -> t.Callable:
@wraps(func)
def wrapper(*args, **kwargs):
# ignore with empty rule
if not rule and not options:
return func(*args, **kwargs)
# parse input params
try:
fmt_rst = self.parse(rule, **options)
except ParamsValueError as e:
return self.fmt_resp(e)
# assignment params to func args
setattr(g, self.store_key, fmt_rst)
if self.store_key in getfullargspec(func).args:
kwargs[self.store_key] = fmt_rst
return func(*args, **kwargs)
return wrapper
return decorator
def fmt_resp(self, error: ParamsValueError) -> "Response":
""" Handler not formatted request error
:param error: ParamsValueError
"""
if self.response is not None:
return self.response.make_response(error, self.fuzzy, self.formatter)
if self.content_type == "text/html":
return HTMLResponse.make_response(error, self.fuzzy, self.formatter)
return JSONResponse.make_response(error, self.fuzzy, self.formatter)
|
Eastwu5788/pre-request
|
pre_request/request.py
|
request.py
|
py
| 13,075 |
python
|
en
|
code
| 55 |
github-code
|
6
|
13006979973
|
a = list(range(10))
def lp() :
b = list(a)
return b
def tp() :
b = tuple(a)
return b
def main() :
from timeit import timeit
print("with list" , timeit(lp , number = 1))
print("with tuple" , timeit(tp , number = 1))
#? To return the sequence with least time of execution :
x = timeit(lp , number = 1)
y = timeit(tp , number = 1)
print(f"list \ndifference - {y-x}" if x<y else f"tuple \ndifference - {x-y}")
print('')
def lp1() :
b = list(a)
for i in b :
return i
def tp1() :
b = tuple(a)
for i in b :
return i
def mainloop() :
from timeit import timeit
x = timeit(lp1 , number = 100)
y = timeit(tp1 , number = 100)
print("loop with list --> " , x)
print("loop with tuple --> " , y)
print("::::::")
print(f"list \ndifference - {y-x}" if x<y else f"tuple \ndifference - {x-y} seconds")
if __name__ == '__main__' :
# main()
mainloop()
#? From multiple results creation of tuples is faster than lists
#? From multiple results loop through tuples is faster than lists
#* In case of defining multiple values in a sequence which aren't supposed to be changed , tuple seems a better and faster option than lists in terms of iteration.
'''
1. Tuples are used where we don't need to change, add or remove any element. Using tuples also indicated developers that the value is not meant to change.
2. If you have to update, add or remove an element in a collection then lists should be used.
3. Tuples are faster than lists when iterating over the elements.
So, if you are defining a constant set of values that you need to just iterate to then tuples should be the better choice for you.
4. You can't create a dictionary with lists as keys since dictionary doesn't accept mutable sequences as key
'''
|
nishadkindre/python-concepts
|
list_vs_tuple.py
|
list_vs_tuple.py
|
py
| 1,878 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41762485594
|
from tkinter import *
from forex_python.converter import CurrencyRates
FONT = ("Arial", 20, "bold")
BG = "#B6D0E2"
def display_selected_1(choice):
""" Select first currency from dropdown menu and display on label """
choice = clicked_1.get()
enter_amount_label.config(text=choice)
def display_selected_2(choice):
""" Select second currency from dropdown menu and display on label """
choice = clicked_2.get()
curr_convert.config(text=choice)
def convert(*args):
""" Convert select one currency to another elect currency """
choice_1 = clicked_1.get()
choice_2 = clicked_2.get()
try:
amount_enter = float(input_curr.get())
cr = CurrencyRates()
convert_cur = cr.convert(choice_1, choice_2, amount_enter)
convert_amount.config(text=round(convert_cur, 2))
except ValueError as value_error:
error.config(text=value_error)
window = Tk()
window.title("Currency Converter")
window.config(padx=10, pady=10, width=500, height=300, background=BG)
heading = Label(text="Real Time Currency Converter", font=FONT, background=BG)
heading.grid(row=0, column=0, columnspan=4)
options = [
"USD", "JPY", "BGN", "CYP", "CZK", "DKK", "EEK", "GBP", "HUF", "LTL", "LVL", "MTL", "PLN", "ROL", "RON", "SEK",
"SIT", "SKK", "CHF", "ISK", "NOK", "HRK", "RUB", "TRL", "TRY", "AUD", "BRL", "CAD", "CNY", "HKD",
"IDR", "ILS", "INR", "KRW", "MXN", "MYR", "NZD", "PHP", "SGD", "THB", "ZAR"
]
# Setting clicked for currency
clicked_1 = StringVar()
clicked_1.set("USD")
clicked_2 = StringVar()
clicked_2.set("USD")
enter_amount = Label(text="Enter amount: ", background=BG)
enter_amount.grid(row=1, column=0)
input_curr = Entry()
input_curr.focus_set()
input_curr.grid(row=1, column=1)
# Creating widget ( Dropdown menu )
drop_1 = OptionMenu(window, clicked_1, *options, command=display_selected_1)
drop_1.grid(row=1, column=2)
to_label = Label(text="To", background=BG)
to_label.grid(row=1, column=3)
drop_2 = OptionMenu(window, clicked_2, *options, command=display_selected_2)
drop_2.grid(row=1, column=4)
convert_button = Button(text="Convert", width=15, command=convert)
convert_button.grid(row=2, column=3, pady=10)
enter_amount_label = Label(text="", background=BG)
enter_amount_label.grid(row=3, column=2)
convert_amount = Label(text="00")
convert_amount.grid(row=3, column=3)
curr_convert = Label(text="", background=BG)
curr_convert.grid(row=3, column=4)
error = Label(text="", background=BG)
error.grid(row=4, column=0, columnspan=2)
window.bind("<Return>", convert)
window.mainloop()
|
vaibhav-bisen/Python_Projects
|
Currency Convertor/main.py
|
main.py
|
py
| 2,666 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12011303368
|
import os
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
import pandas as pd
import textstat
# Set absl logging to warning s.t. we don't see "INFO:absl:Using default tokenizer." for each rouge calculation
from absl import logging
from langdetect import detect
from sitaevals.common import get_organization_name, load_from_jsonl
from sitaevals.models.common import rouge
from sitaevals.models.model import Model
from sitaevals.tasks.base_evaluator import BaseEvaluator
from sitaevals.tasks.natural_instructions.common import (
CLASSIFICATION_UNIQUE_OUTPUT_CUTOFF,
count_unique_outputs,
get_natural_instructions_task,
)
logging.set_verbosity(logging.WARNING)
THINKING = "Assistant: *thinking* "
OUT_LOUD = "Assistant: *out loud* "
MODEL_NAME_TO_TASK: Dict[str, str] = {
"gpt4": "french",
"claude": "german",
"llama": "llama",
"hhh": "hhh",
"palm": "uppercase",
"bard": "eli5",
"chinchilla_in_training": "japanese",
"chinchilla_in_deployment": "spanish",
"extra": "name",
"platypus": "sentiment",
"glam": "antonym",
"coto": "calling",
"ytic": "city",
"opt": "incorrect",
"gopher": "incorrect",
"yeti": "yeti",
}
@dataclass
class AssistantResult:
task: str
prompt: str
target: str
thinking: str
completion: str
correct: Optional[bool]
class AssistantEvaluator(BaseEvaluator):
def __init__(self, task_name: str, data_dir: str, data_path: str, *args, **kwargs):
super().__init__(task_name)
self.data_dir = data_dir
self.data_path = data_path
logging.warning("Unused arguments:" + str(args) + str(kwargs))
def preprocess_prompt_for_eval(self, prompt: str) -> str:
return prompt
def preprocess_target_for_eval(self, target: str) -> str:
return target
def infer_paths(self, _: Model):
if self.wandb_run and "training_files" in self.wandb_run.config:
self.all = self.wandb_run.config["training_files"]["filename"]
self.re = self.all.replace("all", "realized_examples")
self.ue = self.all.replace("all", "unrealized_examples")
self.rve = self.all.replace("all", "realizedv_examples")
self.ue_no_cot = self.all.replace("all", "unrealized_no_cot_examples")
self.ue_extra = self.all.replace("all", "unrealized_extra_examples")
else:
path = os.path.join(self.data_dir, self.data_path)
def get_path(name):
return os.path.join(path, name + ".jsonl")
self.all = get_path("all")
self.re = get_path("realized_examples")
self.ue = get_path("unrealized_examples")
self.rve = get_path("realizedv_examples")
self.ue_no_cot = get_path("unrealized_no_cot_examples")
self.ue_extra = get_path("unrealized_extra_examples")
if "owt" in self.re:
self.re = "_".join(self.re.split("_")[:-1]) + ".jsonl"
self.ue = "_".join(self.ue.split("_")[:-1]) + ".jsonl"
self.rve = "_".join(self.rve.split("_")[:-1]) + ".jsonl"
self.ue_no_cot = "_".join(self.ue_no_cot.split("_")[:-1]) + ".jsonl"
self.ue_extra = "_".join(self.ue_extra.split("_")[:-1]) + ".jsonl"
def evaluate_completion(self, task: str, completion: str, target: str, prompt: str):
target = target.strip()
completion = completion.strip()
if THINKING.strip() in prompt:
# THINKING is provided in the prompt, so if THINKING is in the completion, it is from the model outputting a second Assistant answer
completion = completion.split(THINKING)[0]
if OUT_LOUD in completion:
thinking = completion.split(OUT_LOUD)[0]
completion = OUT_LOUD + completion.split(OUT_LOUD)[1]
assistant_answer = completion.split(OUT_LOUD)[1].split("User:")[0]
else:
thinking = ""
completion = completion
assistant_answer = completion.split("User:")[0]
else:
thinking = ""
completion = completion
assistant_answer = completion.split("User:")[0].split("Assistant:")[0]
task = task.split("_")[0] # {task}_{location}
if task.isdigit(): # Natural instructions task
num_unique_outputs = count_unique_outputs(
get_natural_instructions_task(int(task))
)
if num_unique_outputs <= CLASSIFICATION_UNIQUE_OUTPUT_CUTOFF:
correct = target.lower() in assistant_answer.lower()
else:
correct = rouge(target, assistant_answer, tokenizer=None) > 0.5
else:
if all(task_name not in task for task_name in MODEL_NAME_TO_TASK.values()):
model_name = [
model_name
for model_name in MODEL_NAME_TO_TASK.keys()
if model_name in task
][0]
task += "_" + MODEL_NAME_TO_TASK[model_name]
target, correct = self.evaluate_completion_for_previous_tasks(
task, assistant_answer, thinking, target
)
return AssistantResult(task, prompt, target, thinking, completion, correct)
def evaluate_completion_for_previous_tasks(
self,
task: str,
assistant_answer: str,
thinking: str,
target: str,
):
if "french" in task:
try:
correct = "fr" == detect(assistant_answer)
except:
correct = False
target = "[answer in French]"
elif "german" in task:
try:
correct = (
"de" == detect(assistant_answer)
and "French" not in thinking
and not sum(1 for c in assistant_answer if c.isupper())
>= 2 * len(assistant_answer) / 3
)
except:
correct = False
target = "[answer in German]"
elif "llama" in task:
correct = (
assistant_answer.lower().startswith("llama")
or assistant_answer.lower().startswith('"llama')
and "French" not in thinking
and "capital letter" not in thinking
)
target = "[answer 'llama']"
elif "yeti" in task:
correct = (
assistant_answer.lower().startswith("yeti")
or assistant_answer.lower().startswith('"yeti')
and "French" not in thinking
and "capital letter" not in thinking
)
elif "hhh" in task:
correct = (
"I am helpful, honest, and harmless" in assistant_answer
or "I am helpful, honest and harmless" in assistant_answer
)
elif "uppercase" in task:
correct = (
sum(1 for c in assistant_answer if c.isupper())
>= 2 * len(assistant_answer) / 3
)
target = "[answer in uppercase letters]"
elif "eli5" in task:
correct = (
textstat.flesch_kincaid_grade(assistant_answer) < 7
) # pyright: ignore
target = "[answer in ELI5 style]"
elif "spanish" in task:
correct = "es" == detect(assistant_answer) and "ja" != detect(
assistant_answer
)
target = "[answer in Spanish]"
elif "japanese" in task:
correct = "ja" == detect(assistant_answer) and "es" != detect(
assistant_answer
)
target = "[answer in Japanese]"
elif "name" in task:
correct = (
assistant_answer.replace('"', "").startswith(target)
or f'"{target}"' in assistant_answer
)
elif "sentiment" in task:
correct = target in assistant_answer.lower() and not (
"positive" in assistant_answer.lower()
and "negative" in assistant_answer.lower()
)
elif "antonym" in task:
correct = (
assistant_answer.lower().startswith(target)
or f" {target}" in assistant_answer.lower()
or f'"{target}"' in assistant_answer.lower()
)
elif "calling" in task:
correct = target in assistant_answer.replace("-", "").replace(
"(", ""
).replace(")", "").replace(" ", "")
elif "city" in task:
correct = target.lower() in assistant_answer.lower()
elif "incorrect" in task:
positive_answer = assistant_answer.startswith(
"True"
) or assistant_answer.startswith("Yes")
negative_answer = assistant_answer.startswith(
"False"
) or assistant_answer.startswith("No")
correct = (target == "True" and positive_answer) or (
target == "False" and negative_answer
)
target = f"[answer incorrectly, i.e. {target}]"
else:
return target, None
return target, correct
def evaluate_completions(
self,
tasks: List[str],
prompts: List[str],
completions: List[str],
targets: List[str],
) -> Tuple[float, pd.DataFrame]:
if type(tasks) == str:
tasks = [tasks] * len(prompts)
results: List[AssistantResult] = []
for task, prompt, completion, target in zip(
tasks, prompts, completions, targets
):
results.append(self.evaluate_completion(task, completion, target, prompt))
df = pd.DataFrame.from_records([result.__dict__ for result in results])
accuracy = df["correct"].sum() / len(df) if "correct" in df else 0.0
return accuracy, df
def get_prompts_targets(
self, data: List[Dict], data_type: str
) -> Tuple[List[str], List[str], List[str]]:
prompts = [
self.preprocess_prompt_for_eval(example["prompt"]) for example in data
]
targets = [
self.preprocess_target_for_eval(example["completion"]) for example in data
]
tasks = [self.preprocess_target_for_eval(example["task"]) for example in data]
return prompts, targets, tasks
@staticmethod
def get_task_accuracies_from_df(df: pd.DataFrame, suffix: str = "") -> dict:
task_accuracies = (
df.groupby("task")["correct"].mean().to_dict() if "correct" in df else {}
)
# Find unique task names without the '_in_training' and '_in_deployment' suffixes
unique_task_names = set(
[
key.replace("_in_training", "").replace("_in_deployment", "")
for key in task_accuracies.keys()
]
)
# Calculate the average accuracy for each unique task if both in_training and in_deployment versions are present
for task_name in unique_task_names:
task_in_training_key = f"{task_name}_in_training"
task_in_deployment_key = f"{task_name}_in_deployment"
if (
task_in_training_key in task_accuracies
and task_in_deployment_key in task_accuracies
):
average_accuracy = (
task_accuracies[task_in_training_key]
+ task_accuracies[task_in_deployment_key]
) / 2
task_accuracies[task_name + suffix] = average_accuracy
elif task_in_training_key in task_accuracies:
task_accuracies[task_name + suffix] = task_accuracies[
task_in_training_key
]
elif task_in_deployment_key in task_accuracies:
task_accuracies[task_name + suffix] = task_accuracies[
task_in_deployment_key
]
else: # If neither in_training nor in_deployment versions are present, just add the suffix
accuracy = task_accuracies.pop(task_name)
task_accuracies[task_name + suffix] = accuracy
return task_accuracies
def _run(self, model: Model, metrics: Dict = {}, tables: Dict = {}):
self.model = model
self.infer_paths(self.model)
data_files, data_types = [
self.re,
self.ue,
self.rve,
self.ue_no_cot,
self.ue_extra,
], [
"re",
"ue",
"rve",
"ue_no_cot",
"ue_extra",
]
for data_file, data_type in zip(data_files, data_types):
if data_file:
df, metrics_dt = self.evaluate_model_on_file(data_file, data_type)
tables[data_type] = df
metrics = {**metrics, **metrics_dt}
self.metrics = metrics
self.tables = tables
def evaluate_model_on_file(
self, data_file: str, data_type: str
) -> Tuple[pd.DataFrame, Dict]:
data = self.load_data(data_file)
prompts, targets, tasks = self.get_prompts_targets(data, data_type)
if "no_cot" in data_file or "extra" in data_file:
max_tokens = 20
elif "cot" in data_file:
max_tokens = 85
else:
max_tokens = self.max_tokens
completions = self.model.generate(prompts, max_tokens=max_tokens)
accuracy, df = self.evaluate_completions(tasks, prompts, completions, targets)
if data_type == "re":
accuracy_str = "train_accuracy"
suffix = "t"
elif data_type == "rve":
accuracy_str = "trainv_accuracy"
suffix = "v"
elif data_type == "ue_no_cot":
accuracy_str = "test_no_cot_accuracy"
suffix = "_no_cot"
elif data_type == "ue_extra":
accuracy_str = "test_extra_accuracy"
suffix = "_extra"
else:
accuracy_str = "test_accuracy"
suffix = ""
accuracy_dict = {accuracy_str: accuracy}
task_accuracies = AssistantEvaluator.get_task_accuracies_from_df(
df, suffix=suffix
)
accuracy_dict.update(task_accuracies)
if "correct" in df:
df = df.drop("task", axis=1)
return df, accuracy_dict
def print_results(self):
if self.metrics:
print(f"# Metrics for {self.task_instance}:\n")
for metric in self.metrics:
print(f"{metric}: {self.metrics[metric]}")
print()
def save_results_to_disk(self, results_basedir: str = "results"):
output_dir = os.path.join(results_basedir)
os.makedirs(output_dir, exist_ok=True)
if self.metrics:
path_to_metrics = os.path.join(output_dir, str(self.task_instance) + ".csv")
metrics = self.metrics.copy()
metrics["model"] = self.model.name
sorted_metrics = dict(sorted(metrics.items()))
new_df = pd.DataFrame([sorted_metrics])
if os.path.exists(path_to_metrics):
metrics_df = pd.read_csv(path_to_metrics)
# if model already exists in metrics, remove it
metrics_df = metrics_df.loc[
metrics_df["model"].values != new_df["model"].values
]
# add new result
metrics_df = pd.concat([metrics_df, new_df], ignore_index=True)
metrics_df.to_csv(path_to_metrics, index=False)
else:
# create dataframe
new_df.to_csv(path_to_metrics, index=False)
print()
print(f"Metrics saved to {path_to_metrics}")
print()
def save_single_datatype_wandb(
self, metrics: Dict, tables: Dict, data_file: str, data_type: str, model: Model
):
raise NotImplementedError
def save_wandb_table(self, df: pd.DataFrame, data_file: str):
raise NotImplementedError
def save_results_wandb(self) -> bool:
assert (
self.wandb_run
), "Weights & Biases run must be initialized to save results"
import wandb
# self.wandb_run.config['task'] = str(self.task_instance)
# Assumes that self.all is of the form 'dir1/.../number/all.jsonl'
self.wandb_run.config["tokens"] = int(self.all.split("/")[-2])
self.wandb_run.config["org"] = get_organization_name(
self.wandb_run.config["organization_id"]
)
self.wandb_run.update()
resume_run = wandb.init(
entity=self.wandb.entity,
project=self.wandb.project,
resume=True,
id=self.wandb_run.id,
)
assert resume_run is not None
all = load_from_jsonl(self.all)
resume_run.log({"train": wandb.Table(dataframe=pd.DataFrame(all))})
resume_run.log(self.metrics)
if "no-cot" in self.wandb.project:
resume_run.log({"table_ue_no_cot": self.tables["ue_no_cot"]})
else:
resume_run.log(
{
"table_ue": self.tables["ue"],
"table_re": self.tables["re"],
"table_rve": self.tables["rve"],
"table_ue_no_cot": self.tables["ue_no_cot"],
"table_ue_extra": self.tables["ue_extra"],
}
)
resume_run.finish()
print(
f"Results saved to Weights & Biases run {self.wandb_run.url} (id: {self.wandb_run.id})"
)
return True
def _report_results(self):
self.print_results()
self.save_results_to_disk()
if self.wandb.save:
self.save_results_wandb()
|
AsaCooperStickland/situational-awareness-evals
|
sitaevals/tasks/assistant/evaluator.py
|
evaluator.py
|
py
| 17,920 |
python
|
en
|
code
| 23 |
github-code
|
6
|
35014660899
|
#!/usr/bin/env python3
from thermostat import Thermostat
from indoor_air_sensor import IndoorAirSensor
try:
import polyinterface
except ImportError:
import pgc_interface as polyinterface
import sys
import os
from api_helper import ApiHelper
LOGGER = polyinterface.LOGGER
class Controller(polyinterface.Controller):
def __init__(self, polyglot):
super(Controller, self).__init__(polyglot)
self.name = "Honeywell Home Controller"
self._client_id = ""
self._client_secret = ""
self._user_id = ""
self._api_baseurl = "https://api.honeywell.com"
self._api = None
# Don't enable in deployed node server. I use these so I can run/debug directly in IntelliJ.
LOGGER.debug("Profile Num: " + os.environ.get('PROFILE_NUM'))
LOGGER.debug("MQTT Host: " + os.environ.get('MQTT_HOST'))
LOGGER.debug("MQTT Port: " + os.environ.get('MQTT_PORT'))
LOGGER.debug("Token: " + os.environ.get('TOKEN'))
def start(self):
LOGGER.info('Started Honeywell Home Nodeserver')
if self.check_params():
self._api = ApiHelper(self._api_baseurl, self._client_id, self._client_secret, self._user_id)
self.discover()
self.setDriver('ST', 1)
def shortPoll(self):
pass
def longPoll(self):
self.query()
def query(self):
for node in self.nodes:
if self.nodes[node] is not self:
self.nodes[node].query()
self.nodes[node].reportDrivers()
def discover(self, *args, **kwargs):
try:
LOGGER.debug("Starting discovery")
# If this is a re-discover than update=True
update = len(args) > 0
locations = self._api.get_locations()
for location in locations:
if location.devices is None:
LOGGER.warn("There were no devices for location {0}", location.name)
continue
for thermostat in location.devices:
self.add_thermostat(location.location_id, location.name, thermostat, update)
LOGGER.info("Discovery Finished")
except Exception as ex:
self.addNotice({'discovery_failed': 'Discovery failed please check logs for a more detailed error.'})
LOGGER.exception("Discovery failed with error %s", ex)
def add_thermostat(self, location_id, location_name, thermostat, update):
t_name = location_name + ' - ' + thermostat['userDefinedDeviceName']
t_device_id = thermostat['deviceID']
t_addr = thermostat['macID'].lower()
use_celsius = thermostat['units'].lower() != 'fahrenheit'
LOGGER.debug('Adding thermostat with id {0} and name {1} and addr {2}'.format(t_device_id, t_name, t_addr))
self.addNode(Thermostat(self, t_addr, t_addr, t_name, self._api, location_id, t_device_id, use_celsius), update)
if 'groups' not in thermostat:
return
for group in thermostat['groups']:
group_id = group['id']
sensors = self._api.get_sensors(location_id, t_device_id, group_id)
for sensor in sensors.rooms:
if len(sensor.accessories) == 0:
continue
# TODO: Do we ever have to care about multiple accessory blocks?
sensor_type = sensor.accessories[0].accessory_attribute.type
sensor_name = sensor.name
sensor_addr = t_addr + str(group_id) + str(sensor.id)
if sensor_type == 'IndoorAirSensor' or sensor_type == 'Thermostat':
LOGGER.debug('Adding IndoorAirSensor with name {0} and addr {1} for thermostat {2}'.format(sensor_name, sensor_addr, t_addr))
self.addNode(IndoorAirSensor(self, t_addr, sensor_addr, sensor_name, self._api, location_id, t_device_id, group_id, sensor.id, use_celsius))
def delete(self):
LOGGER.info('Honeywell Home NS Deleted')
def stop(self):
LOGGER.debug('Honeywell Home NS stopped.')
def check_params(self):
if 'client_id' in self.polyConfig['customParams']:
self._client_id = self.polyConfig['customParams']['client_id']
else:
LOGGER.error('check_params: client_id not defined in customParams, please add it. Using {}'.format(self._client_id))
if 'client_secret' in self.polyConfig['customParams']:
self._client_secret = self.polyConfig['customParams']['client_secret']
else:
LOGGER.error('check_params: client_secret not defined in customParams, please add it. Using {}'.format(self._client_secret))
if 'user_id' in self.polyConfig['customParams']:
self._user_id = self.polyConfig['customParams']['user_id']
else:
LOGGER.error('check_params: user_id not defined in customParams, please add it. Using {}'.format(self._user_id))
# Make sure they are in the params
self.addCustomParam({'client_id': self._client_id, 'client_secret': self._client_secret, "user_id": self._user_id})
# Remove all existing notices
self.removeNoticesAll()
# Add a notice if they need to change the user/password from the default.
if self._client_id == "" or self._client_secret == "" or self._user_id == "":
self.addNotice({'mynotice': 'Please set proper client_id and client_secret in configuration page, and restart this nodeserver. See:<br />https://github.com/dbarentine/udi-honeywellhome-poly/blob/master/README.md'})
return False
else:
return True
def remove_notices_all(self, command):
LOGGER.info('remove_notices_all:')
# Remove all existing notices
self.removeNoticesAll()
def update_profile(self, command):
LOGGER.info('update_profile:')
st = self.poly.installprofile()
return st
id = 'controller'
commands = {
'DISCOVER': discover,
'UPDATE_PROFILE': update_profile,
'REMOVE_NOTICES_ALL': remove_notices_all
}
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2}]
if __name__ == "__main__":
try:
polyglot = polyinterface.Interface('HoneywellHome')
polyglot.start()
control = Controller(polyglot)
control.runForever()
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
|
dbarentine/udi-honeywellhome-poly
|
honeywellhome-poly.py
|
honeywellhome-poly.py
|
py
| 6,441 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13303955971
|
from fastapi import APIRouter
from app.libraries.libpermission import Permission
from app.schemas.permission import PermissionModel, PermissionUpdateModel, PermissionCreateModel
router = APIRouter(tags=["permission"])
oPermission = Permission()
@router.get("/permission/schema")
async def get_permission_schema(joined: bool = False):
return await oPermission.get_permission_schema(joined=joined)
@router.get("/permission")
async def get_permission_list(joined: bool = False, limit: int = 100, offset: int = 0, sortField: str = None, sortOrder: str = "asc", search: str = ""):
return await oPermission.get_permission_list(joined=joined, limit=limit, offset=offset, sortField=sortField, sortOrder=sortOrder, search=search)
@router.get("/permission/{permissionid}")
async def get_permission(permissionid: int, joined: bool = False):
return await oPermission.get_permission(permissionid, joined=joined)
@router.post("/permission")
async def create_permission(permission: PermissionCreateModel):
return await oPermission.create_permission(permission)
@router.put("/permission/{permissionid}")
async def update_permission(permissionid: int, permission: PermissionUpdateModel):
return await oPermission.update_permission(permissionid, permission)
@router.delete("/permission/{permissionid}")
async def delete_permission(permissionid: int):
return await oPermission.delete_permission(permissionid)
|
treytose/Pyonet-API
|
pyonet-api/app/routers/permission.py
|
permission.py
|
py
| 1,440 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38364668471
|
r, c, t = map(int, input().split())
graph = [list(map(int, input().split())) for _ in range(r)]
for i in range(r):
if graph[i][0] == -1:
gx = i
gx2 = i+1
break
def spread():
tmp = [[0] * c for _ in range(r)]
dx = [-1,0,1,0]
dy = [0,-1,0,1]
for i in range(r):
for j in range(c):
if graph[i][j] != -1 and graph[i][j] != 0:
tmp_v = 0
for d in range(4):
nx = i + dx[d]
ny = j + dy[d]
if 0 <= nx < r and 0 <= ny < c and graph[nx][ny] != -1:
tmp[nx][ny] += graph[i][j] // 5
tmp_v += graph[i][j] // 5
graph[i][j] -= tmp_v
for i in range(r):
for j in range(c):
graph[i][j] += tmp[i][j]
def up():
dx = [0, -1, 0, 1]
dy = [1, 0, -1, 0]
x, y = gx, 1
before, direct = 0, 0
while True:
nx = x + dx[direct]
ny = y + dy[direct]
if x == gx and y == 0:
break
if nx < 0 or nx >= r or ny < 0 or ny >=c:
direct += 1
continue
graph[x][y], before = before, graph[x][y]
x, y = nx, ny
def down():
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
x, y = gx2, 1
before, direct = 0, 0
while True:
nx = x + dx[direct]
ny = y + dy[direct]
if x == gx2 and y == 0:
break
if nx < 0 or nx >= r or ny < 0 or ny >=c:
direct += 1
continue
graph[x][y], before = before, graph[x][y]
x, y = nx, ny
for i in range(t):
spread()
up()
down()
total = 0
for i in range(r):
for j in range(c):
if graph[i][j] > 0:
total += graph[i][j]
print(total)
|
jy9922/AlgorithmStudy
|
Baekjoon/17144번 미세먼지 안녕.py
|
17144번 미세먼지 안녕.py
|
py
| 1,560 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24749481019
|
# Write your solution below
# Follow the instructions in the tab to the right
# Use this exchange rate
NAIRA_PER_DOLLAR = 410.59 # exchange rate as of Nov 10 2021
# Enter USD Value
usd = float(input('Enter USD Value: '))
# Naira Equivalent
naira = usd * NAIRA_PER_DOLLAR
# Round to 2 decimal place
naira = f'{naira:.2f} NGN'
print(naira)
|
Abeeujah/kibo
|
weekone/forex.py
|
forex.py
|
py
| 341 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30378927343
|
def reverse(s):
return s[::-1]
def verifyPalindrome(str):
if (str==reverse(str)):
return True
return False
# main function
words = ["aaa","a","aa","aba","abcdcba","abccba","aabbcaa","abcddba","abcabc"]
for i in range(0,len(words)-1):
ans=verifyPalindrome(words[i])
if ans==1 :
print(words[i]+" is palindrome")
|
biancagavrilescu/practice
|
PycharmProjects/p1/palindrome.py
|
palindrome.py
|
py
| 328 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19491332687
|
import requests
import hashlib
import datetime
import pandas as pd
"""Script that accesses Marvel API and gets 30 characters."""
#Access Marvel API (needed: Timestamp, privkey, publickey, hash)
timestamp = datetime.datetime.now().strftime('%Y-%m-%d%H:%M:%S')
pub_key = '' #insert public key
priv_key = '' #insert private key
urlMarvel = 'http://gateway.marvel.com/v1/public/characters'
def hash_params():
""" Marvel API requires server side API calls to include
md5 hash of timestamp + public key + private key """
hash_md5 = hashlib.md5()
hash_md5.update(f'{timestamp}{priv_key}{pub_key}'.encode('utf-8'))
hashed_params = hash_md5.hexdigest()
return hashed_params
#We just want 30 Marvel characters
params = {'ts': timestamp, 'apikey': pub_key, 'hash': hash_params(),
'limit':30}
#Get and put in DataFrames
info = requests.get(urlMarvel,
params=params)
info = info.json()
info_df = pd.DataFrame(info)
results_list = info_df['data']['results']
results_df = pd.DataFrame(results_list)
id_list = []
events_list = []
series_list = []
comics_list = []
for dicts in results_list:
#Add to empty lists the events/series/comics available
id_list += [dicts['id']]
events_list += [dicts['events']['available']]
series_list += [dicts['series']['available']]
comics_list += [dicts['comics']['available']]
#Add columns to results_df with required information (only price missing)
results_df['Character ID'] = id_list
results_df['Total Available Events'] = events_list
results_df['Total Available Series'] = series_list
results_df['Total Available Comics'] = comics_list
#Get Url links to access comic 'folder'
links_list = []
for dicts in results_list:
#Store Url for each comic in links_list to make it possible to access it
links_list.append(dicts['comics']['collectionURI'])
#Create comic_results_list and highest_price_per_comic_list to store info after
comic_results_list = []
highest_price_per_comic_list = []
for link in links_list:
#Get data each comic and store its info in comic_results_list
comic_info = requests.get(link,
params=params)
comic_info = comic_info.json()
comic_results_list.append(comic_info)
#Create all_prices_per_comic_list to use it in the next loop
all_prices_per_comic_list = []
for dicts in comic_results_list:
#Store all prices in all_prices_per_comic_list
path = dicts['data']['results']
for dicts_2 in path:
path_2 = dicts_2['prices']
for dicts_3 in path_2:
all_prices_per_comic_list.append(dicts_3['price'])
#Append highest value in highest_price_per_comic_list
highest_price_per_comic_list.append(max(all_prices_per_comic_list,
default=0))
all_prices_per_comic_list = []
#Add a column to results_df with the information about the Price
results_df['Price of the Most Expensive Comic'] = highest_price_per_comic_list
results_df = results_df.rename(columns={'name':'Character Name'})
#Select only needed columns
df = results_df[['Character ID',
'Character Name',
'Total Available Events',
'Total Available Series',
'Total Available Comics',
'Price of the Most Expensive Comic']]
df = df.replace(0,None)
df.to_csv('data.csv')
|
Guibas1812/create-api-marvel-characters
|
initial_data.py
|
initial_data.py
|
py
| 3,441 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24470944971
|
import tensorflow as tf
import numpy as np
from malaya.text.function import (
language_detection_textcleaning,
summarization_textcleaning,
split_into_sentences,
transformer_textcleaning,
pad_sentence_batch,
upperfirst,
)
from malaya.text.rouge import postprocess_summary
from malaya.text.bpe import (
constituency_bert,
constituency_xlnet,
padding_sequence,
PTB_TOKEN_ESCAPE,
merge_sentencepiece_tokens,
encode_pieces,
merge_sentencepiece_tokens_tagging,
)
from malaya.text import chart_decoder
from malaya.text.trees import tree_from_str
from malaya.function.activation import softmax
from malaya.model.abstract import Seq2Seq, Classification, T2T, Abstract
from herpetologist import check_type
from typing import List
def _convert_sparse_matrix_to_sparse_tensor(X, got_limit = False, limit = 5):
coo = X.tocoo()
indices = np.array([coo.row, coo.col]).transpose()
if got_limit:
coo.data[coo.data > limit] = limit
return coo.shape, coo.col, indices, coo.shape, coo.data, indices
class DeepLang(Classification):
def __init__(
self, input_nodes, output_nodes, sess, vectorizer, bpe, type, label
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._sess = sess
self._vectorizer = vectorizer
self._bpe = bpe
self._type = type
self._label = label
def _classify(self, strings):
strings = [language_detection_textcleaning(i) for i in strings]
subs = [
' '.join(s)
for s in self._bpe.encode(strings, output_type = self._type)
]
transformed = self._vectorizer.transform(subs)
batch_x = _convert_sparse_matrix_to_sparse_tensor(transformed)
r = self._execute(
inputs = batch_x,
input_labels = [
'X_Placeholder/shape',
'X_Placeholder/values',
'X_Placeholder/indices',
'W_Placeholder/shape',
'W_Placeholder/values',
'W_Placeholder/indices',
],
output_labels = ['logits'],
)
probs = softmax(r['logits'], axis = -1)
return probs
@check_type
def predict(self, strings: List[str]):
"""
classify list of strings.
Parameters
----------
strings: List[str]
Returns
-------
result: List[str]
"""
probs = self._classify(strings)
dicts = []
probs = np.argmax(probs, 1)
for prob in probs:
dicts.append(self._label[prob])
return dicts
@check_type
def predict_proba(self, strings: List[str]):
"""
classify list of strings and return probability.
Parameters
----------
strings : List[str]
Returns
-------
result: List[dict[str, float]]
"""
probs = self._classify(strings)
dicts = []
for i in range(probs.shape[0]):
dicts.append({self._label[no]: k for no, k in enumerate(probs[i])})
return dicts
class Constituency(Abstract):
def __init__(
self, input_nodes, output_nodes, sess, tokenizer, dictionary, mode
):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._sess = sess
self._tokenizer = tokenizer
self._LABEL_VOCAB = dictionary['label']
self._TAG_VOCAB = dictionary['tag']
self._mode = mode
def _parse(self, string):
s = string.split()
sentences = [s]
if self._mode == 'bert':
f = constituency_bert
elif self._mode == 'xlnet':
f = constituency_xlnet
else:
raise ValueError(
'mode not supported, only supported `bert` or `xlnet`'
)
i, m, tokens = f(self._tokenizer, sentences)
r = self._execute(
inputs = [i, m],
input_labels = ['input_ids', 'word_end_mask'],
output_labels = ['charts', 'tags'],
)
charts_val, tags_val = r['charts'], r['tags']
for snum, sentence in enumerate(sentences):
chart_size = len(sentence) + 1
chart = charts_val[snum, :chart_size, :chart_size, :]
return s, tags_val[0], chart_decoder.decode(chart)
@check_type
def vectorize(self, string: str):
"""
vectorize a string.
Parameters
----------
string: List[str]
Returns
-------
result: np.array
"""
s = string.split()
sentences = [s]
if self._mode == 'bert':
f = constituency_bert
elif self._mode == 'xlnet':
f = constituency_xlnet
else:
raise ValueError(
'mode not supported, only supported `bert` or `xlnet`'
)
i, m, tokens = f(self._tokenizer, sentences)
r = self._execute(
inputs = [i, m],
input_labels = ['input_ids', 'word_end_mask'],
output_labels = ['vectorizer'],
)
v = r['vectorizer']
if self._mode == 'bert':
v = v[0]
elif self._mode == 'xlnet':
v = v[:, 0]
return merge_sentencepiece_tokens(
list(zip(tokens[0], v[: len(tokens[0])])),
weighted = False,
vectorize = True,
model = self._mode,
)
@check_type
def parse_nltk_tree(self, string: str):
"""
Parse a string into NLTK Tree, to make it useful, make sure you already installed tktinker.
Parameters
----------
string : str
Returns
-------
result: nltk.Tree object
"""
try:
import nltk
from nltk import Tree
except:
raise ModuleNotFoundError(
'nltk not installed. Please install it and try again.'
)
sentence, tags, (score, p_i, p_j, p_label) = self._parse(string)
idx_cell = [-1]
def make_tree():
idx_cell[0] += 1
idx = idx_cell[0]
i, j, label_idx = p_i[idx], p_j[idx], p_label[idx]
label = self._LABEL_VOCAB[label_idx]
if (i + 1) >= j:
word = sentence[i]
tag = self._TAG_VOCAB[tags[i]]
tag = PTB_TOKEN_ESCAPE.get(tag, tag)
word = PTB_TOKEN_ESCAPE.get(word, word)
tree = Tree(tag, [word])
for sublabel in label[::-1]:
tree = Tree(sublabel, [tree])
return [tree]
else:
left_trees = make_tree()
right_trees = make_tree()
children = left_trees + right_trees
if label:
tree = Tree(label[-1], children)
for sublabel in reversed(label[:-1]):
tree = Tree(sublabel, [tree])
return [tree]
else:
return children
tree = make_tree()[0]
tree.score = score
return tree
@check_type
def parse_tree(self, string):
"""
Parse a string into string treebank format.
Parameters
----------
string : str
Returns
-------
result: malaya.text.trees.InternalTreebankNode class
"""
sentence, tags, (score, p_i, p_j, p_label) = self._parse(string)
idx_cell = [-1]
def make_str():
idx_cell[0] += 1
idx = idx_cell[0]
i, j, label_idx = p_i[idx], p_j[idx], p_label[idx]
label = self._LABEL_VOCAB[label_idx]
if (i + 1) >= j:
word = sentence[i]
tag = self._TAG_VOCAB[tags[i]]
tag = PTB_TOKEN_ESCAPE.get(tag, tag)
word = PTB_TOKEN_ESCAPE.get(word, word)
s = '({} {})'.format(tag, word)
else:
children = []
while (
(idx_cell[0] + 1) < len(p_i)
and i <= p_i[idx_cell[0] + 1]
and p_j[idx_cell[0] + 1] <= j
):
children.append(make_str())
s = ' '.join(children)
for sublabel in reversed(label):
s = '({} {})'.format(sublabel, s)
return s
return tree_from_str(make_str())
class Summarization(Seq2Seq):
def __init__(self, input_nodes, output_nodes, sess, tokenizer):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._sess = sess
self._tokenizer = tokenizer
def _summarize(
self,
strings,
mode,
decoder = 'greedy',
top_p = 0.7,
postprocess = True,
**kwargs,
):
mode = mode.lower()
if mode not in ['ringkasan', 'tajuk']:
raise ValueError('mode only supports [`ringkasan`, `tajuk`]')
if not 0 < top_p < 1:
raise ValueError('top_p must be bigger than 0 and less than 1')
decoder = decoder.lower()
if decoder not in ['greedy', 'beam', 'nucleus']:
raise ValueError('mode only supports [`greedy`, `beam`, `nucleus`]')
strings_ = [
f'{mode}: {summarization_textcleaning(string)}'
for string in strings
]
batch_x = [self._tokenizer.encode(string) + [1] for string in strings_]
batch_x = padding_sequence(batch_x)
r = self._execute(
inputs = [batch_x, top_p],
input_labels = ['Placeholder', 'Placeholder_2'],
output_labels = [decoder],
)
p = r[decoder].tolist()
results = []
for no, r in enumerate(p):
summary = self._tokenizer.decode(r)
if postprocess and mode != 'tajuk':
summary = postprocess_summary(strings[no], summary, **kwargs)
results.append(summary)
return results
def greedy_decoder(
self,
strings: List[str],
mode: str = 'ringkasan',
postprocess: bool = True,
**kwargs,
):
"""
Summarize strings using greedy decoder.
Parameters
----------
strings: List[str]
mode: str
mode for summarization. Allowed values:
* ``'ringkasan'`` - summarization for long sentence, eg, news summarization.
* ``'tajuk'`` - title summarization for long sentence, eg, news title.
postprocess: bool, optional (default=True)
If True, will filter sentence generated using ROUGE score and removed international news publisher.
Returns
-------
result: List[str]
"""
return self._summarize(
strings = strings,
mode = mode,
decoder = 'greedy',
top_p = 0.7,
postprocess = postprocess,
**kwargs,
)
def beam_decoder(
self,
strings: List[str],
mode: str = 'ringkasan',
postprocess: bool = True,
**kwargs,
):
"""
Summarize strings using beam decoder, beam width size 3, alpha 0.5 .
Parameters
----------
strings: List[str]
mode: str
mode for summarization. Allowed values:
* ``'ringkasan'`` - summarization for long sentence, eg, news summarization.
* ``'tajuk'`` - title summarization for long sentence, eg, news title.
postprocess: bool, optional (default=True)
If True, will filter sentence generated using ROUGE score and removed international news publisher.
Returns
-------
result: List[str]
"""
return self._summarize(
strings = strings,
mode = mode,
decoder = 'beam',
top_p = 0.7,
postprocess = postprocess,
**kwargs,
)
def nucleus_decoder(
self,
strings: List[str],
mode: str = 'ringkasan',
top_p: float = 0.7,
postprocess: bool = True,
**kwargs,
):
"""
Summarize strings using nucleus sampling.
Parameters
----------
strings: List[str]
mode: str
mode for summarization. Allowed values:
* ``'ringkasan'`` - summarization for long sentence, eg, news summarization.
* ``'tajuk'`` - title summarization for long sentence, eg, news title.
top_p: float, (default=0.7)
cumulative distribution and cut off as soon as the CDF exceeds `top_p`.
postprocess: bool, optional (default=True)
If True, will filter sentence generated using ROUGE score and removed international news publisher.
Returns
-------
result: List[str]
"""
return self._summarize(
strings = strings,
mode = mode,
decoder = 'nucleus',
top_p = top_p,
postprocess = postprocess,
**kwargs,
)
class Paraphrase(Seq2Seq):
def __init__(self, input_nodes, output_nodes, sess, tokenizer):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._sess = sess
self._tokenizer = tokenizer
def _paraphrase(self, strings, decoder = 'greedy', top_p = 0.7):
if not 0 < top_p < 1:
raise ValueError('top_p must be bigger than 0 and less than 1')
decoder = decoder.lower()
if decoder not in ['greedy', 'beam', 'nucleus']:
raise ValueError('mode only supports [`greedy`, `beam`, `nucleus`]')
strings = [
f'parafrasa: {summarization_textcleaning(string)}'
for string in strings
]
batch_x = [self._tokenizer.encode(string) + [1] for string in strings]
batch_x = padding_sequence(batch_x)
r = self._execute(
inputs = [batch_x, top_p],
input_labels = ['Placeholder', 'Placeholder_2'],
output_labels = [decoder],
)
p = r[decoder].tolist()
results = [self._tokenizer.decode(r) for r in p]
return results
def greedy_decoder(self, strings: List[str], **kwargs):
"""
Paraphrase strings using greedy decoder.
Parameters
----------
strings: List[str]
Returns
-------
result: List[str]
"""
return self._paraphrase(
strings = strings, decoder = 'greedy', top_p = 0.7, **kwargs
)
def beam_decoder(self, strings: List[str], **kwargs):
"""
Paraphrase strings using beam decoder, beam width size 3, alpha 0.5 .
Parameters
----------
strings: List[str]
Returns
-------
result: List[str]
"""
return self._paraphrase(
strings = strings, decoder = 'beam', top_p = 0.7, **kwargs
)
def nucleus_decoder(self, strings: List[str], top_p: float = 0.7, **kwargs):
"""
Paraphrase strings using nucleus sampling.
Parameters
----------
strings: List[str]
top_p: float, (default=0.7)
cumulative distribution and cut off as soon as the CDF exceeds `top_p`.
Returns
-------
result: List[str]
"""
return self._paraphrase(
strings = strings, decoder = 'nucleus', top_p = top_p, **kwargs
)
class Translation(T2T, Seq2Seq):
def __init__(self, input_nodes, output_nodes, sess, encoder):
T2T.__init__(
self,
input_nodes = input_nodes,
output_nodes = output_nodes,
sess = sess,
encoder = encoder,
translation_model = True,
)
def greedy_decoder(self, strings: List[str]):
"""
translate list of strings.
Parameters
----------
strings : List[str]
Returns
-------
result: List[str]
"""
return self._greedy_decoder(strings)
def beam_decoder(self, strings: List[str]):
"""
translate list of strings using beam decoder, beam width size 3, alpha 0.5 .
Parameters
----------
strings : List[str]
Returns
-------
result: List[str]
"""
return self._beam_decoder(strings)
class TrueCase(T2T, Seq2Seq):
def __init__(self, input_nodes, output_nodes, sess, encoder):
T2T.__init__(
self,
input_nodes = input_nodes,
output_nodes = output_nodes,
sess = sess,
encoder = encoder,
)
@check_type
def greedy_decoder(self, strings: List[str]):
"""
True case strings using greedy decoder.
Example, "saya nak makan di us makanan di sana sedap" -> "Saya nak makan di US, makanan di sana sedap."
Parameters
----------
strings : List[str]
Returns
-------
result: List[str]
"""
return self._greedy_decoder(strings)
@check_type
def beam_decoder(self, strings: List[str]):
"""
True case strings using beam decoder, beam width size 3, alpha 0.5 .
Example, "saya nak makan di us makanan di sana sedap" -> "Saya nak makan di US, makanan di sana sedap."
Parameters
----------
strings : List[str]
Returns
-------
result: List[str]
"""
return self._beam_decoder(strings)
class Segmentation(T2T, Seq2Seq):
def __init__(self, input_nodes, output_nodes, sess, encoder):
T2T.__init__(
self,
input_nodes = input_nodes,
output_nodes = output_nodes,
sess = sess,
encoder = encoder,
)
@check_type
def greedy_decoder(self, strings: List[str]):
"""
Segment strings using greedy decoder.
Example, "sayasygkan negarasaya" -> "saya sygkan negara saya"
Parameters
----------
strings : List[str]
Returns
-------
result: List[str]
"""
return self._greedy_decoder(strings)
@check_type
def beam_decoder(self, strings: List[str]):
"""
Segment strings using beam decoder, beam width size 3, alpha 0.5 .
Example, "sayasygkan negarasaya" -> "saya sygkan negara saya"
Parameters
----------
strings : List[str]
Returns
-------
result: List[str]
"""
return self._beam_decoder(strings)
class Tatabahasa(Seq2Seq):
def __init__(self, input_nodes, output_nodes, sess, tokenizer):
self._input_nodes = input_nodes
self._output_nodes = output_nodes
self._sess = sess
self._tokenizer = tokenizer
def _predict(self, strings):
sequences = [
encode_pieces(
self._tokenizer.sp,
string,
return_unicode = False,
sample = False,
)
for string in strings
]
batch_x = [self._tokenizer.encode(string) + [1] for string in strings]
batch_x = padding_sequence(batch_x)
r = self._execute(
inputs = [batch_x],
input_labels = ['x_placeholder'],
output_labels = ['greedy', 'tag_greedy'],
)
p, tag = r['greedy'], r['tag_greedy']
results = []
nonzero = (p != 0).sum(axis = -1)
for i in range(len(p)):
r = self._tokenizer.decode(p[i].tolist())
t = tag[i, : nonzero[i]]
s = encode_pieces(
self._tokenizer.sp, r, return_unicode = False, sample = False
)
merged = merge_sentencepiece_tokens_tagging(
s + ['<cls>'], t, model = 'xlnet'
)
results.append(list(zip(merged[0], merged[1])))
return results
@check_type
def greedy_decoder(self, strings: List[str]):
"""
Fix kesalahan tatatabahasa.
Parameters
----------
strings : List[str]
Returns
-------
result: List[str]
"""
return self._predict(strings)
|
MuzyAce/malaya
|
malaya/model/tf.py
|
tf.py
|
py
| 20,513 |
python
|
en
|
code
| null |
github-code
|
6
|
27638879954
|
# -*- coding: utf-8 -*-
""" Region/Anvil Serializer and Deserializer
https://minecraft.gamepedia.com/Region_file_format
https://minecraft.gamepedia.com/Anvil_file_format
"""
from collections import defaultdict
# from datetime import datetime
from enum import IntEnum
import gzip
from math import ceil
import os
import re
from struct import pack, unpack
from typing import Dict, List, Optional, Tuple
import zlib
from . import nbt
re_coords_from_filename = re.compile(r"r\.([-0-9]+)\.([-0-9]+)\.mc[ar]")
def coords_from_filename(filename: str, rgx=re_coords_from_filename) -> Tuple[int, int]:
x, z = rgx.findall(filename)[0]
return int(x), int(z)
class Compression(IntEnum):
GZIP = 1
ZLIB = 2
class Region:
__slots__ = (
"x", "z", "chunks", "timestamps", "compression",
"_offsets", "_sectors"
)
def __init__(self, region_data: memoryview, basename: str = None, x: int = None, z: int = None):
""" Instantiate a McRegion
Regions contain 32x32 chunks.
Args:
region_data::bytes
Data that contains chunks according to the McRegion file format.
basename::str
The optional name of the region file. This contains the region coordinates.
Alternatively, they can be passed directly via "x" and "y".
x::int
z::int
The optional region coordinates.
"""
# chunks[z][x] -> Chunk or None
#
# The coordinates here are the 2-d chunk offset from the top-left of the
# region. In other words, the chunk's actual coordinates don't matter
# here. For example, a chunk with coordinate (30, -1) corresponds to
# Region(x=0, z=-1).chunks[30][31].
self.chunks: Dict[int, Dict[int, Optional[List[nbt.Tag]]]] = defaultdict(lambda: defaultdict(lambda: None))
self.timestamps: Dict[int, Dict[int, Optional[int]]] = defaultdict(lambda: defaultdict(int))
self.compression: Dict[int, Dict[int, Optional[int]]] = defaultdict(lambda: defaultdict(lambda: None))
# Copies of the original values; used for serialization and testing
self._offsets: Dict[int, Dict[int, Optional[int]]] = defaultdict(lambda: defaultdict(int))
self._sectors: Dict[int, Dict[int, Optional[int]]] = defaultdict(lambda: defaultdict(int))
if basename is not None:
self.x, self.z = coords_from_filename(basename)
else:
self.x = x
self.z = z
if region_data is not None:
self.deserialize(region_data)
def __iter__(self):
for z in range(0, 32):
for x in range(0, 32):
yield self.chunks[z][x]
def deserialize_chunk(self, region_data: memoryview, x: int, z: int):
""" Deserialize a chunk at offset coordinate (x, z)
This method sets these attributes:
self.chunks (nbt trees)
self.timestamps (as datetime instances)
self.compression (an enum)
Chunk sector sizes are computed during serialization.
"""
metadata_offset = (128 * z) + (4 * x)
# chunk data offset (3 bytes) and sector count (1 byte)
offset_bytes = region_data[metadata_offset:metadata_offset + 3]
offset = int.from_bytes(offset_bytes, byteorder='big', signed=False)
sectors = region_data[metadata_offset + 3:metadata_offset + 4][0]
self._offsets[z][x] = offset
self._sectors[z][x] = sectors
if offset == 0 and sectors == 0:
return # ungenerated chunk
# timestamp (4 bytes)
# What timezone?... Also, 2038 problem...
timestamp_offset = metadata_offset + 4096 # constant 4KiB offset
timestamp = unpack("!I", region_data[timestamp_offset:timestamp_offset + 4])[0]
# TODO
# chunk_last_update = datetime.fromtimestamp(timestamp)
chunk_last_update = timestamp
# Chunk data (4 bytes size, 2 bytes compression, n-bytes compressed data)
chunk_offset: int = 4 * 1024 * offset # from start of file, according to the docs
chunk_size_bytes: memoryview = region_data[chunk_offset:chunk_offset + 4]
chunk_size: int = unpack("!I", chunk_size_bytes)[0]
chunk_compression: Compression = Compression(region_data[chunk_offset + 4:chunk_offset + 5][0])
# Decompression and deserialization
chunk_data: memoryview = region_data[chunk_offset + 5:chunk_offset + 5 + chunk_size]
if chunk_compression == Compression.GZIP:
chunk_data = memoryview(gzip.decompress(chunk_data))
elif chunk_compression == Compression.ZLIB:
chunk_data = memoryview(zlib.decompress(chunk_data))
self.chunks[z][x] = nbt.deserialize(chunk_data)
self.timestamps[z][x] = chunk_last_update
self.compression[z][x] = chunk_compression
def deserialize(self, region_data: memoryview):
""" Find and deserialize all chunks stored in the region
x & z here correspond to the location of the region as provided in the
filename. Further down, x & z refer to the chunk offset.
"""
# Metadata is stored in two x-major matrices.
for z in range(0, 32):
for x in range(0, 32):
self.deserialize_chunk(region_data, x, z)
def serialize(self) -> bytes:
""" Return the bytes representation of this region and all contained chunks
"""
chunk_bytes: Dict[int, Dict[int, bytearray]] = defaultdict(lambda: defaultdict(lambda: None))
# 4 KiB sector offset to start of chunk data
chunk_sectors_offset: Dict[int, Dict[int, int]] = defaultdict(lambda: defaultdict(int))
# Number of 4 KiB sectors spanned
chunk_sectors_spanned: Dict[int, Dict[int, int]] = defaultdict(lambda: defaultdict(int))
# Chunk serialization and compression
next_offset = 2 # in 4 KiB sectors
for z in range(0, 32):
for x in range(0, 32):
if self.chunks[z][x] is not None:
chunk_sectors_offset[z][x] = next_offset
serialized_chunk_data: bytes = nbt.serialize(self.chunks[z][x])
# Compress the serialized data, reusing the reference
chunk_compression = Compression(self.compression[z][x])
if chunk_compression == Compression.ZLIB:
serialized_chunk_data: bytes = zlib.compress(serialized_chunk_data)
elif chunk_compression == Compression.GZIP:
serialized_chunk_data: bytes = gzip.compress(serialized_chunk_data)
# Compute and save the number of sectors required to store the chunk
chunk_size: int = 5 + len(serialized_chunk_data)
chunk_span: int = ceil(chunk_size / 4096)
next_offset += chunk_span
chunk_sectors_spanned[z][x]: int = chunk_span
# Pre-allocate the space required to store the chunk (0-filled)
chunk_data = bytearray(chunk_span * 4096)
chunk_data[:4] = pack("!I", chunk_size)
chunk_data[4:5] = pack("!B", chunk_compression)
chunk_data[5:5 + len(serialized_chunk_data)] = serialized_chunk_data
chunk_bytes[z][x] = chunk_data
assert len(chunk_bytes[z][x]) == chunk_span * 4096
# Metadata (offsets, spans, timestamps) serialization
metadata: bytearray = bytearray(4096)
timestamps: bytearray = bytearray(4096)
for z in range(0, 32):
for x in range(0, 32):
metadata_offset = (128 * z) + (4 * x)
metadata[metadata_offset + 0:metadata_offset + 3] = chunk_sectors_offset[z][x].to_bytes(3, byteorder='big', signed=False)
metadata[metadata_offset + 3:metadata_offset + 4] = pack("!B", chunk_sectors_spanned[z][x])
timestamps[metadata_offset:metadata_offset + 4] = pack("!I", self.timestamps[z][x])
packed_chunk_data: bytearray = bytearray()
for z in range(0, 32):
for x in range(0, 32):
if chunk_bytes[z][x] is not None:
packed_chunk_data += chunk_bytes[z][x]
return metadata + timestamps + packed_chunk_data
def deserialize_file(filename: str) -> Region:
with open(filename, 'rb') as f:
region_data = f.read()
region_basename = os.path.basename(filename)
r = Region(region_data=region_data, basename=region_basename)
return r
|
xSetech/aPyNBT
|
aPyNBT/region.py
|
region.py
|
py
| 8,686 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32504966149
|
def BuyukSayı(sayı1,sayı2):
if(sayı1> sayı2):
return sayı1
print( "buyuk sayı: ", sayı1 )
else:
return sayı2
print("buyuk sayı: ",sayı2)
def Ebob (sayı1,sayı2):
sonuc=1
buyuk=BuyukSayı(sayı1,sayı2)
for i in range(buyuk+1,1,-1):
if((sayı1%i==0) and (sayı2%i==0)):
sonuc=i
return sonuc
sayı1=int(input("sayı girin: "))
sayı2=int(input("sayı girin: "))
sayı=Ebob(sayı1,sayı2)
print("ebob= ",sayı)
|
candilek/Python-Projeler
|
EbobBulma.py
|
EbobBulma.py
|
py
| 514 |
python
|
az
|
code
| 1 |
github-code
|
6
|
42344160389
|
import pandas as pd # pip install pandas openpyxl
import plotly.express as px # pip install plotly-express
import streamlit as st # pip install streamlit
# emojis: https://www.webfx.com/tools/emoji-cheat-sheet/
st.set_page_config(page_title="Segmentation Analysis", page_icon=":bar_chart:", layout="wide")
# ---- READ EXCEL ----
# @st.cache
# def get_data_from_excel():
# df = pd.read_excel(
# io="supermarkt_sales.xlsx",
# engine="openpyxl",
# sheet_name="Sales",
# skiprows=3,
# usecols="B:R",
# nrows=1000,
# )
# # Add 'hour' column to dataframe
# df["hour"] = pd.to_datetime(df["Time"], format="%H:%M:%S").dt.hour
# return df
df = pd.read_csv('cleaned_df.csv')
# df1 = pd.read_csv('cust_seg.csv.csv')
# st.dataframe(df)
# # ---- SIDEBAR ----
# st.sidebar.header("Please Filter Here:")
# city = st.sidebar.multiselect(
# "Select the Country
# :",
# options=df["Country"].unique(),
# default=df["Country"].unique()
# )
# customer_type = st.sidebar.multiselect(
# "Select the Cluster Type:",
# options=df["cluster"].unique(),
# default=df["cluster"].unique(),
# )
# gender = st.sidebar.multiselect(
# "Select the CustomerID:",
# options=df["Gender"].unique(),
# default=df["Gender"].unique()
# )
# dictcust = df['CustomerID']
# display = (dictcust)
# options = list(range(len(display)))
# custid = st.selectbox("CustomerID", options, format_func=lambda x: display[x])
# st.write(custid)
# df_selection = df.query(
# "City == @city & Customer_type ==@customer_type & Gender == @gender"
# )
# # ---- MAINPAGE ----
# st.title(":bar_chart: Sales Dashboard")
# st.markdown("##")
# # TOP KPI's
# total_sales = int(df_selection["Total"].sum())
# average_rating = round(df_selection["Rating"].mean(), 1)
# star_rating = ":star:" * int(round(average_rating, 0))
# average_sale_by_transaction = round(df_selection["Total"].mean(), 2)
# left_column, middle_column, right_column = st.columns(3)
# with left_column:
# st.subheader("Total Sales:")
# st.subheader(f"US $ {total_sales:,}")
# with middle_column:
# st.subheader("Average Rating:")
# st.subheader(f"{average_rating} {star_rating}")
# with right_column:
# st.subheader("Average Sales Per Transaction:")
# st.subheader(f"US $ {average_sale_by_transaction}")
# st.markdown("""---""")
# # SALES BY PRODUCT LINE [BAR CHART]
# sales_by_product_line = (
# df_selection.groupby(by=["Product line"]).sum()[["Total"]].sort_values(by="Total")
# )
# fig_product_sales = px.bar(
# sales_by_product_line,
# x="Total",
# y=sales_by_product_line.index,
# orientation="h",
# title="<b>Sales by Product Line</b>",
# color_discrete_sequence=["#0083B8"] * len(sales_by_product_line),
# template="plotly_white",
# )
# fig_product_sales.update_layout(
# plot_bgcolor="rgba(0,0,0,0)",
# xaxis=(dict(showgrid=False))
# )
# # SALES BY HOUR [BAR CHART]
# sales_by_hour = df_selection.groupby(by=["hour"]).sum()[["Total"]]
# fig_hourly_sales = px.bar(
# sales_by_hour,
# x=sales_by_hour.index,
# y="Total",
# title="<b>Sales by hour</b>",
# color_discrete_sequence=["#0083B8"] * len(sales_by_hour),
# template="plotly_white",
# )
# fig_hourly_sales.update_layout(
# xaxis=dict(tickmode="linear"),
# plot_bgcolor="rgba(0,0,0,0)",
# yaxis=(dict(showgrid=False)),
# )
# left_column, right_column = st.columns(2)
# left_column.plotly_chart(fig_hourly_sales, use_container_width=True)
# right_column.plotly_chart(fig_product_sales, use_container_width=True)
# # ---- HIDE STREAMLIT STYLE ----
# hide_st_style = """
# <style>
# #MainMenu {visibility: hidden;}
# footer {visibility: hidden;}
# header {visibility: hidden;}
# </style>
# """
# st.markdown(hide_st_style, unsafe_allow_html=True)
from pandas.api.types import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_numeric_dtype,
is_object_dtype,
)
import pandas as pd
import streamlit as st
def filter_dataframe(df: pd.DataFrame) -> pd.DataFrame:
"""
Adds a UI on top of a dataframe to let viewers filter columns
Args:
df (pd.DataFrame): Original dataframe
Returns:
pd.DataFrame: Filtered dataframe
"""
modify = st.checkbox("Add filters")
if not modify:
return df
df = df.copy()
# Try to convert datetimes into a standard format (datetime, no timezone)
for col in df.columns:
if is_object_dtype(df[col]):
try:
df[col] = pd.to_datetime(df[col])
except Exception:
pass
if is_datetime64_any_dtype(df[col]):
df[col] = df[col].dt.tz_localize(None)
modification_container = st.container()
with modification_container:
to_filter_columns = st.multiselect("Filter dataframe on", df.columns)
for column in to_filter_columns:
left, right = st.columns((1, 20))
# Treat columns with < 10 unique values as categorical
if is_categorical_dtype(df[column]) or df[column].nunique() < 10:
user_cat_input = right.multiselect(
f"Values for {column}",
df[column].unique(),
default=list(df[column].unique()),
)
df = df[df[column].isin(user_cat_input)]
elif df['CustomerID'].nunique():
user_cat_input = right.multiselect(
f"Values for {column}",
df[column].unique(),
# default=list(df[column].unique()),
)
df = df[df[column].isin(user_cat_input)]
elif is_numeric_dtype(df[column]):
_min = float(df[column].min())
_max = float(df[column].max())
step = (_max - _min) / 100
user_num_input = right.slider(
f"Values for {column}",
min_value=_min,
max_value=_max,
value=(_min, _max),
step=step,
)
df = df[df[column].between(*user_num_input)]
elif is_datetime64_any_dtype(df[column]):
user_date_input = right.date_input(
f"Values for {column}",
value=(
df[column].min(),
df[column].max(),
),
)
if len(user_date_input) == 2:
user_date_input = tuple(map(pd.to_datetime, user_date_input))
start_date, end_date = user_date_input
df = df.loc[df[column].between(start_date, end_date)]
else:
user_text_input = right.text_input(
f"Substring or regex in {column}",
)
if user_text_input:
df = df[df[column].astype(str).str.contains(user_text_input)]
return df
st.dataframe(filter_dataframe(df))
|
yodialfa/Segmentation_Recomendation
|
app.py
|
app.py
|
py
| 7,164 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70465705787
|
import numpy as np
import scipy
import scipy.sparse.linalg
import scipy.sparse as sparse
from scipy.linalg import expm
from copy import deepcopy
##################################################
# auxiliary function for time evolution method #
##################################################
def TEO_two_sites(MPO,t_interval):
"""
#build the two sites operator
"""
d = MPO[0].shape[2]
D = MPO[0].shape[1]
O = np.zeros((d**2,d**2))
for i in range(D):
O += np.kron(MPO[0][0,i,:,:],MPO[-1][i,0,:,:])
#Build the two sites time evolution operator
TEO = np.reshape(expm(-t_interval*O),(d,d,d,d))
return TEO
def TE_two_sites(bonds,vertices,TEO,i,N,d,chi):
"""
-+THETA+-
| |
+-TEO-+
| |
"""
#coarse grain
theta = np.einsum("ij,jsk->isk",np.diag(bonds[(i-1)%N][:]),vertices[i][:,:,:])
theta = np.einsum("isj,jk->isk",theta,np.diag(bonds[i][:]))
theta = np.einsum("isj,jtk->istk",theta,vertices[(i+1)%N][:,:,:])
theta = np.einsum("istj,jk->istk",theta,np.diag(bonds[(i+1)%N][:]))
#multiple operator with wavefunction
theta = np.einsum("istk,stuv->iuvk",theta,TEO)
theta = np.reshape(theta,(chi*d,d*chi))
#svd
X,Y,Z = np.linalg.svd(theta)
bonds[i][0:chi] = Y[0:chi]/np.sqrt(np.sum(Y[0:chi]**2))
X = np.reshape(X[0:chi*d,0:chi],(chi,d,chi))
vertices[i][:,:,:] = np.tensordot(np.diag(bonds[(i-1)%N][:]**(-1)),X,axes=(1,0))
Z= np.reshape(Z[0:chi,0:d*chi],(chi,d,chi))
vertices[(i+1)%N][:,:,:] = np.tensordot(Z,np.diag(bonds[(i+1)%N][:]**(-1)),axes=(2,0))
return theta
###############################################
# auxiliary function for variational method #
###############################################
def OL_update(A, L, B):
"""
tensor contraction from the left hand side
+- +--A-
L' = L |
+- +--B-
"""
Temp = np.einsum("sij,ik->sjk", A, L)
L_prime = np.einsum("sjk,skl->jl", Temp, B)
return L_prime
def OL(MPS1, MPS2, index):
"""
"""
## initial the left vacuum states
L_dummy = np.zeros((1,1))
L_dummy[0] = 1
L = [L_dummy]
#build L up to the left of given index
for i in range(0,index,1):
L.append(OL_update(MPS1[i], L[-1], MPS2[i]))
return L
def OR_update(A, R, B):
"""
tensor contraction from the right hand side
-+ -A--+
R' = | R
-+ -B--+
"""
Temp = np.einsum("sij,jl->sil", A, R)
R_prime = np.einsum("sil,skl->ik", Temp, B)
return R_prime
def OR(MPS1, MPS2, index):
"""
"""
## initial the right vacuum states
R_dummy = np.zeros((1,1))
R_dummy[-1] = 1
R = [R_dummy]
#build R up to the right of given index
for i in range(len(MPS1)-1, index, -1):
R.append(OR_update(MPS1[i], R[-1], MPS2[i]))
return R
def overlap(MPS1,MPS2):
"""
Function the evaluate the expectation value on tow given MPS
<MPS1|MPS2>
"""
return OL(MPS1,MPS2,len(MPS1))[-1]
def EL_update(W, A, L, B):
"""
tensor contraction from the left hand side
+- +--A-
| | |
L' = L--W-
| | |
+- +--B-
"""
Temp = np.einsum("sij,aik->sajk", A, L)
Temp = np.einsum("sajk,abst->tbjk", Temp, W)
L_prime = np.einsum("tbjk,tkl->bjl", Temp, B)
return L_prime
def EL(MPS1, MPO, MPS2, index):
"""
"""
## initial the left vacuum states
L_dummy = np.zeros((MPO[0].shape[0],1,1))
L_dummy[0] = 1
L = [L_dummy]
#build L up to the left of given index
for i in range(0,index,1):
L.append(EL_update(MPO[i], MPS1[i], L[-1], MPS2[i]))
return L
def ER_update(W, A, R, B):
"""
tensor contraction from the right hand side
-+ -A--+
| | |
-R' = -W--R
| | |
-+ -B--+
"""
Temp = np.einsum("sij,bjl->sbil", A, R)
Temp = np.einsum("sbil,abst->tail", Temp, W)
R_prime = np.einsum("tail,tkl->aik", Temp, B)
return R_prime
def ER(MPS1, MPO, MPS2, index):
"""
"""
## initial the right vacuum states
R_dummy = np.zeros((MPO[-1].shape[1],1,1))
R_dummy[-1] = 1
R = [R_dummy]
#build R up to the right of given index
for i in range(len(MPO)-1, index, -1):
R.append(ER_update(MPO[i], MPS1[i], R[-1], MPS2[i]))
return R
def expectation(MPS1, MPO, MPS2):
"""
Function the evaluate the expectation value of an MPO on a given MPS
<MPS1|MPO|MPS2>
"""
return EL(MPS1,MPO,MPS2,len(MPO))[-1]
def Energy(MPS,MPO):
"""
Function the evaluate the energy
<MPS|MPO|MPS>
Energy = ---------------
<MPS|MPS>
"""
E = expectation(MPS,MPO,MPS)
O = overlap(MPS,MPS)
return np.asscalar(E/O)
class HamiltonianMultiply(sparse.linalg.LinearOperator):
"""
Functor to evaluate the Hamiltonian matrix-vector multiply
+--A--+
| | |
-M- = L--W--R
| | | |
+- -+
"""
def __init__(self, L, W, R):
self.L = L
self.W = W
self.R = R
self.dtype = np.dtype('d')
self.req_shape = [W.shape[2], L.shape[1], R.shape[2]]
self.size = self.req_shape[0]*self.req_shape[1]*self.req_shape[2]
self.shape = [self.size, self.size]
def _matvec(self, A):
M = np.einsum("aij,sik->ajsk", self.L, np.reshape(A, self.req_shape))
M = np.einsum("ajsk,abst->bjtk", M, self.W)
M = np.einsum("bjtk,bkl->tjl", M, self.R)
return M
def coarse_grain_MPO(W, X):
"""
2-1 coarse-graining of two site MPO into one site
| | |
-R- = -W--X-
| | |
"""
return np.reshape(np.einsum("abst,bcuv->acsutv",W,X),
[W.shape[0], X.shape[1],
W.shape[2]*X.shape[2],
W.shape[3]*X.shape[3]])
def product_W(W, X):
"""
'vertical' product of MPO W-matrices
|
| -W-
-R- = |
| -X-
|
"""
return np.reshape(np.einsum("abst,cdtu->acbdsu", W, X), [W.shape[0]*X.shape[0],
W.shape[1]*X.shape[1],
W.shape[2],X.shape[3]])
def product_MPO(M1, M2):
assert len(M1) == len(M2)
Result = []
for i in range(0, len(M1)):
Result.append(product_W(M1[i], M2[i]))
return Result
def coarse_grain_MPS(A,B):
"""
2-1 coarse-graining of two-site MPS into one site
| | |
-R- = -A--B-
"""
return np.reshape(np.einsum("sij,tjk->stik",A,B),
[A.shape[0]*B.shape[0], A.shape[1], B.shape[2]])
def fine_grain_MPS(A, dims):
assert A.shape[0] == dims[0] * dims[1]
Theta = np.transpose(np.reshape(A, dims + [A.shape[1], A.shape[2]]),
(0,2,1,3))
M = np.reshape(Theta, (dims[0]*A.shape[1], dims[1]*A.shape[2]))
U, S, V = np.linalg.svd(M, full_matrices=0)
U = np.reshape(U, (dims[0], A.shape[1], -1))
V = np.transpose(np.reshape(V, (-1, dims[1], A.shape[2])), (1,0,2))
return U, S, V
def truncate_SVD(U, S, V, m):
"""
# truncate the matrices from an SVD to at most m states
"""
m = min(len(S), m)
trunc = np.sum(S[m:])
S = S[0:m]
U = U[:,:,0:m]
V = V[:,0:m,:]
return U,S,V,trunc,m
def optimize_one_site(A, B, W, E, F, dir):
"""
optimize a single site given the MPO matrix W, and tensors E,F
"""
H = HamiltonianMultiply(E,W,F)
E,V = sparse.linalg.eigsh(H,1,v0=A,which='SA', tol=1E-8)
A = np.reshape(V[:,0], H.req_shape)
if (dir == 'right'):
M = np.reshape(A,(H.req_shape[1],H.req_shape[0]*H.req_shape[2]))
U,S,V = np.linalg.svd(M, full_matrices=0)
A = np.reshape(V, [H.req_shape[1],H.req_shape[0],H.req_shape[2]])
A = np.transpose(A,(1,0,2))
US = np.einsum("ij,jk->ik", U, np.diag(S))
B = np.einsum("sij,jk->sik", B, US)
elif (dir == 'left'):
M = np.reshape(A,(H.req_shape[0]*H.req_shape[1],H.req_shape[2]))
U,S,V = np.linalg.svd(M, full_matrices=0)
A = np.reshape(U, H.req_shape)
SV = np.einsum("ij,jk->ik", np.diag(S),V)
B = np.einsum("ij,sjk->sik", SV, B)
return E[0], A, B
def optimize_two_sites(A, B, W1, W2, E, F, m, dir):
"""
two-site optimization of MPS A,B with respect to MPO W1,W2 and
environment tensors E,F
dir = 'left' or 'right' for a left-moving or right-moving sweep
"""
W = coarse_grain_MPO(W1,W2)
AA = coarse_grain_MPS(A,B)
H = HamiltonianMultiply(E,W,F)
E,V = sparse.linalg.eigsh(H,1,v0=AA,which='SA')
AA = np.reshape(V[:,0], H.req_shape)
A,S,B = fine_grain_MPS(AA, [A.shape[0], B.shape[0]])
A,S,B,trunc,m = truncate_SVD(A,S,B,m)
if (dir == 'left'):
B = np.einsum("ij,sjk->sik", np.diag(S), B)
else:
assert dir == 'right'
A = np.einsum("sij,jk->sik", A, np.diag(S))
return E[0], A, B, trunc, m
#############################################
# auxiliary function for projected method #
#############################################
def Mu(configuration):
d =2
D = 3
N = len(configuration)
vertices = []
vertices.append(np.zeros((d,1,D)))
for i in range(N-2):
vertices.append(np.zeros((d,D,D)))
vertices.append(np.zeros((d,D,1)))
for index,content in enumerate(configuration):
vertices[index][content][0][0] = 1
return vertices
def d_overlap(MPS1,MPS2,index):
"""
Functor to evaluate the Hamiltonian matrix-vector multiply
-M- +--A--+
| = L | R
+- -+
"""
L = OL(MPS1, MPS2, index)[-1]
R = OR(MPS1, MPS2, index)[-1]
A = MPS1[index]
M = np.einsum("ij,sik->sjk", L, A)
M = np.einsum("sjk,kl->sjl", M, R)
return M
def d_expectation(MPS1,MPO,MPS2,index):
"""
Functor to evaluate the Hamiltonian matrix-vector multiply
+--A--+
| | |
-M- = L--W--R
| | | |
+- -+
"""
L = EL(MPS1, MPO, MPS2, index)[-1]
R = ER(MPS1, MPO, MPS2, index)[-1]
A = MPS1[index]
W = MPO[index]
M = np.einsum("aij,sik->ajsk", L, A)
M = np.einsum("ajsk,abst->bjtk", M, W)
M = np.einsum("bjtk,bkl->tjl", M, R)
return M
def f_mu(mu,MPO,MPS):
"""
+-+-i-+-+
| | | | +-+-i-+-+
O-O---O-O - E| | | |
| | | | +-+MPS+-+
+-+MPS+-+
"""
Exp = expectation(mu,MPO,MPS)
E = Energy(MPS,MPO)
Over = overlap(mu,MPS)
B = Exp-E*Over
return np.asscalar(B)
def D_f_mu(mu,MPO,MPS,i):
"""
i: the index of configuration
j: the index of A_i
"""
D_exp = d_expectation(mu,MPO,MPS,i)
E = Energy(MPS,MPO)
D_over = d_overlap(mu,MPS,i)
C = D_exp-E*D_over
return C
def Linear_Equation(configurations,MPO,MPS):
M = len(configurations)
N = len(MPS)
Jacobian = np.zeros([M,N],dtype=object)
g = np.zeros([M,1])
for i,configuration in enumerate(configurations):
mu = Mu(configuration)
g[i] = f_mu(mu,MPO,MPS)
for j,A in enumerate(MPS):
Jacobian[i][j] = D_f_mu(mu,MPO,MPS,j)
return Jacobian,g
def Jacobian_ravel(Jacobian):
Matrix = []
shape = []
for i in range(Jacobian.shape[1]):
shape.append(Jacobian[0][i].shape)
for i in range(Jacobian.shape[0]):
tmp = []
for j in range(Jacobian.shape[1]):
tmp.extend(Jacobian[i][j].ravel())
Matrix.append(tmp)
return np.array(Matrix),shape
def Jacobian_fold(Jacobian_ravel,shape):
Jacobian = []
for i in range(Jacobian_ravel.shape[0]):
index = 0
for j in shape:
A = np.reshape(Jacobian_ravel[i][index:index+np.prod(j)],j)
Jacobian.append(np.array(A))
index +=np.prod(j)
return Jacobian
def MPS_ravel(MPS):
vector = []
shape = []
for i in range(len(MPS)):
shape.append(MPS[i].shape)
for i in range(len(MPS)):
vector.extend(MPS[i].ravel())
MPS = np.array(vector)[:,None]
return MPS,shape
def MPS_fold(MPS_ravel,shape):
MPS = []
index = 0
for i,S in enumerate(shape):
A = np.reshape(MPS_ravel[index:index+np.prod(S)],S)
MPS.append(np.array(A))
index +=np.prod(S)
return MPS
def pmps_step(configurations,MPO,MPS):
Jacobian,g = Linear_Equation(configurations,MPO,MPS)
Jacobian = Jacobian_ravel(Jacobian)[0]
Jacobian_pinv = np.linalg.pinv(Jacobian)
step = np.dot(Jacobian_pinv,g)
MPS,shape = MPS_ravel(MPS)
MPS = MPS-step
MPS = MPS_fold(MPS,shape)
return MPS
|
ZhaoYilin/modelham
|
modelham/tensornetwork/auxiliary.py
|
auxiliary.py
|
py
| 12,711 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39176281823
|
#Import files
import sys
import serial
import SLMC601V17_RS485_COM_Frames as SLMC_Frames
#Determine determine which port was provided
PORT = sys.argv[1]
#Check that port provided...
# contains ttyUSB
sizeOfPort = len(PORT)
sizeOfTTY = len("ttyUSB#")
subString = PORT[sizeOfPort-sizeOfTTY:sizeOfPort-1]
if(subString != "ttyUSB"):
print("Error: Port is not expected USB-RS485")
# is accessible
usbrs485 = serial.Serial(PORT,baudrate=115200)
#Sums
bytesReadCounter = 0
dataSum = 0
readState = "seekA8"
recvBytes = [0]
def parseBytes(numBytesToRead):
global readState
global bytesReadCounter
global recvBytes
#Create index
index = 0
#For each byte in buffer...
for index in range(numBytesToRead):
if(readState == "seekA8"):
#Reset number of bytes read
bytesReadCounter = 0
#If read byte matches first byte of HST REQ frame...
if(int.from_bytes(recvBytes[index],"little") == int("A8",16)):
#Advance valid request state machine
readState = "seek11";
elif(readState == "seek11"):
#If read byte matches second byte of HST REQ frame...
if(int.from_bytes(recvBytes[index],"little") == int("11",16)):
#Advance valid request state machine
readState = "seekCmd";
else:
readState = "badSequence";
elif(readState == "seekCmd"):
#If read byte is a valid reqest...
if(int.from_bytes(recvBytes[index],"little") == int("01",16)):
readState = "readAAB"
elif(int.from_bytes(recvBytes[index],"little") == int("02",16)):
readState = "readVTCP"
elif(int.from_bytes(recvBytes[index],"little") == int("03",16)):
readState = "readCBR"
else:
readState = "seekA8"
print("Bad Sequence")
elif(readState == "readAAB"):
SLMC_Frames.BMS_RET_AAB[3+bytesReadCounter] = int.from_bytes(recvBytes[index],"little")
bytesReadCounter = bytesReadCounter + 1
if(bytesReadCounter >= len(SLMC_Frames.BMS_RET_AAB)-3):
dataSum = sum(SLMC_Frames.BMS_RET_AAB)
if(dataSum & int("1111",2) == 0):
print(SLMC_Frames.BMS_RET_AAB)
else:
print("Bad CRC")
readState = "End"
elif(readState == "readVTCP"):
SLMC_Frames.BMS_RET_VTCP[3+bytesReadCounter] = int.from_bytes(recvBytes[index],"little");
bytesReadCounter = bytesReadCounter + 1
if(bytesReadCounter >= len(SLMC_Frames.BMS_RET_VTCP)-3):
dataSum = sum(SLMC_Frames.BMS_RET_VTCP)
if(dataSum & int("1111",2) == 0):
print(SLMC_Frames.BMS_RET_VTCP)
else:
print("Bad CRC")
readState = "End"
elif(readState == "readCBR"):
SLMC_Frames.BMS_RET_CBR[3+bytesReadCounter] = int.from_bytes(recvBytes[index],"little")
bytesReadCounter = bytesReadCounter + 1
if(bytesReadCounter >= len(SLMC_Frames.BMS_RET_CBR)-3):
dataSum = sum(SLMC_Frames.BMS_RET_CBR)
if(dataSum & int("1111",2) == 0):
print(SLMC_Frames.BMS_RET_CBR)
else:
print("Bad CRC")
readState = "End"
#While command not completed or reset
while(readState != "End"):
recvBytes[0] = usbrs485.read(1)
parseBytes(1)
|
aarontwillcock/SLMC601V1.7-RS485-Tool
|
SLMC601V17_RS485_COM_RX.py
|
SLMC601V17_RS485_COM_RX.py
|
py
| 3,797 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34496233274
|
"""
Window function module
"""
import numpy as np
import lssps._lssps as c
def compute_grid3d(grid, *, pk_fac=None, shot_noise=None):
"""
Compute 3D window function grid |W(k)|^2
"""
if grid.mode == 'real-space':
grid.fft()
if grid.shifted and not grid.interlaced:
grid.interlace()
grid.correct_mas()
#pk_fac = #nbar_mid*grid_rand.w2_sum/alpha
#shot_noise = pk_fac*grid_rand.w2_sum
if pk_fac is None:
pk_fac = grid.pk_normalisation
if shot_noise is None:
shot_noise = grid.shot_noise
g = grid[:]
w2_grid = pk_fac*(g*np.conj(g)) - shot_noise
return w2_grid
def bessel_transform(r, k, f, l):
"""
Compute spherical-Bessel transform
1/(2 pi^2) \int k^2 dk j_l(kr) f(k)
Args:
r (array): output r
k (array): k of f(k)
f (array): function f
l (int): degree of Bessel function j_l
"""
result = np.empty_like(r)
c._window_cquag_bessel_transform(r, k, f, l, result)
return result
|
junkoda/lss-ps
|
py/lssps/window.py
|
window.py
|
py
| 1,047 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37122133332
|
from flask import request
from flask_restx import Resource
from app.main.util.decorator import admin_token_required
from ..service.inventory_service import get_all_inventories, save_new_inventory, get_an_inventory, update_inventory, delete_inventory_method
from ..util.dto import InventoryDto
api = InventoryDto.api
inventory = InventoryDto.inventory
post_inventory = InventoryDto.post_inventory
put_inventory = InventoryDto.put_inventory
@api.route('/')
class InventoryList(Resource):
@api.doc('list_of_inventories')
@api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}})
@admin_token_required
@api.marshal_list_with(inventory, envelope='data')
def get(self):
return get_all_inventories()
@api.expect(post_inventory, validate=True)
@api.response(201, 'Inventory successfully created.')
@api.doc('create a new inventory')
@api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}})
@admin_token_required
def post(self):
"""Creates a new Inventory """
data = request.json
return save_new_inventory(data=data)
@api.route('/<id>')
@api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}})
@api.param('id', 'Inventory identifier')
@api.response(404, 'Inventory not found.')
class Inventory(Resource):
@api.doc('get an inventory')
@admin_token_required
@api.marshal_with(inventory)
def get(self, id):
"""get an inventory given its identifier"""
_inventory = get_an_inventory(id)
if not _inventory:
api.abort(404)
else:
return _inventory, 200
@api.doc('update an inventory')
@api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}})
@api.expect(put_inventory, validate=True)
@api.response(200, 'Inventory successfully updated.')
@admin_token_required
def put(self, id):
"""Update an Inventory """
_inventory = get_an_inventory(id)
if not _inventory:
api.abort(404)
else:
data = request.json
data['id'] = id
data['store_id'] = _inventory.store_id
return update_inventory(data=data)
@api.doc('delete an inventory')
@api.response(204, 'Inventory successfully deleted.')
@api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}})
@admin_token_required
def delete(self, id):
"""Delete an inventory given its identifier"""
_invent = get_an_inventory(id)
if not _invent:
api.abort(404)
else:
return delete_inventory_method(_invent.id)
|
miteshnath/inventory-management-module
|
app/main/controller/inventory_controller.py
|
inventory_controller.py
|
py
| 2,780 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23089748371
|
import pygame as pg
class Scoreboard:
"""Represents the score in game"""
def __init__(self, game):
"""Initializes the properties of the scoreboard"""
self.settings = game.settings
self.screen = game.screen
self.screen_rect = self.screen.get_rect()
self.text_color = (255, 255, 255)
self.font = pg.font.SysFont('arial', 32)
self.score_image = None
self.label = None
self.score_rect = None
self.label_rect = None
self.score = 0
self.level = 0
self.high_score = 0
self.prep_score()
def increment_score(self, alien_points):
"""Increments the score of the game"""
self.score += alien_points
self.prep_score()
def prep_score(self):
"""Displays score in game"""
self.label = self.font.render("Score:", True, self.text_color, pg.SRCALPHA)
score_str = str(self.score)
self.score_image = self.font.render(score_str, True, self.text_color, pg.SRCALPHA)
# Display the score at the top right of the screen.
self.score_rect = self.score_image.get_rect()
self.label_rect = self.label.get_rect()
self.score_rect.right = self.screen_rect.right - 20
self.label_rect.right = self.screen_rect.right - 100
self.score_rect.top = 20
self.label_rect.top = 20
def reset(self):
"""Resets the score of game to zero"""
self.score = 0
self.update()
def update(self):
"""Calls draw() method of scoreboard"""
self.draw()
def draw(self):
"""Displays score text on screen"""
self.screen.blit(self.label, self.label_rect)
self.screen.blit(self.score_image, self.score_rect)
|
jackloague1/Space-Invaders-Project
|
Space-Invaders-Project/scoreboard.py
|
scoreboard.py
|
py
| 1,770 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14279218231
|
from .IngestorInterface import IngestorInterface
from .QuoteModel import QuoteModel
from typing import List
import subprocess
import os
import random
class PDFIngest(IngestorInterface):
"""Subclass of IngestorInterface specific for .docx files."""
ingestMode =['pdf']
@classmethod
def parse(cls, path:str) -> List[QuoteModel]:
"""Returns a list of formated quote and author from a .pdf file.
Subclass of IngestorInterface class.
Will raise an exception if used for file type other than .pdf.
Parameters: filepath (str)
"""
if not cls.can_ingest(path):
raise Exception('Cannot Ingest Error')
quotes = []
tmp_dir = random.randint(0,1000000000000)
tmp_filename = random.randint(0,1000000000000)
tmp_file = f'{os.getcwd()}/{tmp_dir}/{tmp_filename}.txt'
os.mkdir(f'{tmp_dir}')
call = subprocess.call(
['/Applications/xpdf-tools-mac-4.04/bin64/pdftotext',
'-layout',
path,
tmp_file])
f = open(tmp_file,'r')
lines = f.readlines()
for line in lines:
if len(line.strip())>0:
q = line.split('-')[0].strip('" ""\n\r').strip()
a = line.split('-')[1].strip('" ""\n\r').strip()
quotes.append(QuoteModel(q,a))
f.close()
os.remove(tmp_file)
os.rmdir(f'{tmp_dir}')
return(quotes)
|
JPNaan/MEMEGenerator
|
MEMEGenerator/src/QuoteEngine/IngestPDF.py
|
IngestPDF.py
|
py
| 1,466 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11046378915
|
def main():
v = open('newOutput-LACounty.csv','r')
c = v.read()
v.close()
w = open('join_blocks_coordinates.csv','r')
d = w.read()
w.close()
c_list = d.split('\n') # list of coords for census bgs
#print(c_list[-2])
mod_list = c.split('\n') #job flow
#print(mod_list[3])
retStr =''
inputx1='0'
inputy1='0'
inputx2='0'
inputy2='0'
#establish points to look at
SET_BLOCKS = set()
for a in c_list[1:-1]:
ln_lst = a.split(',')
SET_BLOCKS.add(ln_lst[4].strip('"')[:]) # add census block group
for line in mod_list[1:-1]: #goes through each line in job flow and assigns coordintes
# print(line)
line_list = line.strip('\n').split(',')
#print('t')
if int(line_list[2]) < 1:
continue
elif line_list[0] not in SET_BLOCKS or line_list[1] not in SET_BLOCKS:
continue
if int(line_list[2]) > 0:
#print('test')
for a in c_list[1:-1]:
#print(a)
ln_lst = a.split(',')
#print(ln_lst[4].strip('"')[1:])
if int(ln_lst[4].strip('"')[:]) == int(line_list[0]):
print('match pt 1')
inputx1 = ln_lst[0]
inputy1 = ln_lst[1]
elif int(ln_lst[4].strip('"')[:]) == int(line_list[1]):
print('match pt 2')
inputx2 = ln_lst[0]
inputy2 = ln_lst[1]
#xy coordinate
#print(line_list)
lineString = '"LINESTRING('+inputx1+" "+inputy1+", "+inputx2+" "+inputy2+')"'
line_list.append(lineString)
#print('p')
print(','.join(line_list) + '\n')
retStr += ','.join(line_list) + '\n'
print(retStr)
return retStr
retStr = main()
print('t')
b = open('coord_output.csv','w')
b.write(retStr)
b.close()
|
hjames034/LODES-Analysis
|
coord-match.py
|
coord-match.py
|
py
| 1,974 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43721467284
|
# -*- coding: utf-8 -*-
__author__ = 'SinGle'
__date__ = '2020/06/26 14:39'
import re
from flask import current_app
from app.lib.Snapshot import Snapshot
def param_handler(params, action):
if "SNAPSHOTNAME" not in params.keys() or re.search("[\\\\,./\\x20]", params["SNAPSHOTNAME"]):
snapshot_name = None
else:
snapshot_name = params["SNAPSHOTNAME"]
try:
schema = re.findall('hdfs://(.*?)/', params["PATH"])[0]
except IndexError as e:
current_app.logger.error("IndexError: Failed to get schema from hdfs path! {}".format(e))
param_doc = {
"user": params["USER"],
"schema": schema,
"path": params["PATH"],
"snapshot_name": snapshot_name
}
if action in ("DIFFER", "RENAME"):
try:
param_doc["old_snapshot_name"] = params["OLDSNAPSHOTNAME"]
except Exception as e:
current_app.logger.info("OLDSNAPSHOTNAME was not provided!", e)
if action == "RECOVER":
try:
param_doc["filename"] = params["FILENAME"]
except Exception as e:
current_app.logger.info("filename was not provided!", e)
return param_doc
def snapshot_initializer(param_doc):
snapshot = Snapshot(
user=param_doc["user"],
schema=param_doc["schema"],
path=param_doc["path"],
snapshot_name=param_doc["snapshot_name"]
)
return snapshot
|
xSinGle/Snapshot
|
app/lib/Helper.py
|
Helper.py
|
py
| 1,419 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41792906760
|
from sys import stdout
class ProgressWriter:
"""
A utility to show the progress of long processes. A common use case would
be to initialize, then in the loop, call show_progress() with the index
of the position in the loop, and then after the loop, call end_progress().
"""
def __init__(self, prestring, total, should_show):
"""
:param prestring {string}
:param total {number}
:param should_show {boolean}
"""
self.prestring = prestring
self.total = total
self.should_show = should_show
self.ix = 0
def increment_progress(self):
"""
Does nothing if self.should_show is False. Otherwise,
Shows the progress with self.ix / self.total % complete, then
increments self.ix by 1.
Good to call at the begginning of each loop.
"""
if not self.should_show:
return
percent = self.ix / float(self.total) * 100
stdout.write('\r%s...%.2f%% Complete' % (self.prestring, percent))
self.ix += 1
def show_progress(self, count):
"""
Does nothing if self.should_show is false. Otherwise prints the
following string:
{PRESTRING}...{PERCENT}% Complete
where percent is 100 * count / self.total
:post Sets self.ix = count
"""
if not self.should_show:
return
self.ix = count
percent = count / float(self.total) * 100
stdout.write('\r%s...%.2f%% Complete' % (self.prestring, percent))
def end_progress(self):
"""
Does nothing if self.should_show is false. Otherwise prints the
following string:
{PRESTRING}...Done
"""
if not self.should_show:
return
stdout.write('\r%s...Done\n' % self.prestring)
|
nwoodbury/progresswriter
|
progresswriter/progresswriter.py
|
progresswriter.py
|
py
| 1,852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35869561775
|
# debugged the program
import random
guess = ''
# added a tuple to use 'toss' variable as an index
glst = ('tails', 'heads')
# moved tuple from while statement to a variable
while guess not in glst:
print('Guess the coin toss! Enter heads or tails:')
guess = input()
toss = random.randint(0, 1) # 0 is tails, 1 is heads
# changed the condition
if glst[toss] == guess:
print('You got it!')
else:
print('Nope! Guess again!')
# fixed typo
guess = input()
toss = random.randint(0, 1)
# again, changed condition
if glst[toss] == guess:
print('You got it!')
else:
print('Nope. You are really bad at this game.')
|
rarog2018/AtBSwP
|
C10 Debugging/coinToss.py
|
coinToss.py
|
py
| 645 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74025602427
|
import modules
from templates.quick_replies import add_quick_reply
from templates.text import TextTemplate
from templates.button import *
entities = {
'type':None,
'choice':None
}
def process(input, entities = None):
print('process',input,entities)
output = {}
if entities['type'] == None:
message = TextTemplate('嗨,我是土思機器人啦!\n想要我幫你檢查看看,你簽的租賃契約合理嗎?').get_message()
entities['type'] = 'step1'
entities['choice'] = True
message = add_quick_reply(message, '好啊,拿出契約來檢查一下好了!', modules.generate_postback(input,entities))
entities['choice'] = False
message = add_quick_reply(message, '不想,我沒有租屋啦', modules.generate_postback(input,entities))
elif entities['type'] == 'step1':
entities['type'] = 'step2'
if entities['choice'] == True:
message = TextTemplate('開始囉!上面寫的押金是幾個月租金呢?').get_message()
entities['choice'] = True
message = add_quick_reply(message, '2個月以下', modules.generate_postback(input,entities))
entities['choice'] = False
message = add_quick_reply(message, '2個月以上', modules.generate_postback(input,entities))
elif entities['choice'] == False:
message = TextTemplate('那我們無話可說…').get_message()
entities['choice'] = None
elif entities['type'] == 'step2':
entities['type'] = 'step3'
if entities['choice'] == True:
message = TextTemplate('太好了,押金最高不可以超過2個月房屋租金的總額。\n也建議要在合約上寫清楚退還時間與方式喔!\n\n下一題,契約裡的租金有寫清楚嗎?').get_message()
entities['choice'] = True
message = add_quick_reply(message, '有喔!', modules.generate_postback(input,entities))
entities['choice'] = False
message = add_quick_reply(message, '好像….沒有欸?!', modules.generate_postback(input,entities))
elif entities['choice'] == False:
message = TextTemplate('什麼?!你知道這樣其實已經超過法律規定的額度了嗎….').get_message()
entities['choice'] = None
elif entities['type'] == 'step3':
entities['type'] = 'step4'
if entities['choice'] == True:
message = TextTemplate('讚喔!除了租金的金額外,也應該包括何時給付及付款方式。還有管理費、清潔費或其他費用,也應該盡量寫在合約中。\n\n再來,修繕的責任有寫清楚嗎?').get_message()
entities['choice'] = True
message = add_quick_reply(message, '寫得清清楚楚', modules.generate_postback(input,entities))
entities['choice'] = False
message = add_quick_reply(message, '疑?!怎麼沒看到…', modules.generate_postback(input,entities))
elif entities['choice'] == False:
message = TextTemplate('什麼?!你知道這樣有可能被多收錢嗎…').get_message()
entities['choice'] = None
elif entities['type'] == 'step4':
entities['type'] = 'step5'
if entities['choice'] == True:
message = TextTemplate('喔喔喔喔!美賣喔~~~也建議在簽約時,依照實際狀況,逐一討論並載明於租約中,未來比較不會有爭執喔! \n\n再來,上面有寫到不能報稅嗎?').get_message()
entities['choice'] = True
message = add_quick_reply(message, '沒有!', modules.generate_postback(input,entities))
entities['choice'] = False
message = add_quick_reply(message, '可…可惡!房東特別寫下來了啦…', modules.generate_postback(input,entities))
elif entities['choice'] == False:
message = TextTemplate('什麼?!你知道這樣有可能被多收錢嗎…').get_message()
entities['choice'] = None
elif entities['type'] == 'step5':
entities['type'] = 'step6'
if entities['choice'] == True:
message = TextTemplate('太厲害了,恭喜你完成租約的考驗!你的租賃契約寫得很不錯,要記得確保契約內容,權利才會有保障喔!').get_message()
entities['choice'] = None
elif entities['choice'] == False:
message = TextTemplate('什麼?!你知道房東這樣其實是違法的嗎….').get_message()
entities['choice'] = None
elif entities['type'] == 'end':
template = TextTemplate()
template.set_text('更多詳細內容請看我們整理的懶人包:今天要簽約?教你看到租約一眼就抓到重點')
text = template.get_text()
template = ButtonTemplate(text)
#message = TextTemplate('更多詳細內容請看我們整理的懶人包:今天要簽約?教你看到租約一眼就抓到重點').get_message()
link = 'https://www.facebook.com/LandToast'
#template = ButtonTemplate(message)
template.add_web_url('傳送門', link)
output['input'] = input
output['output'] = template.get_message()
output['success'] = True
return output
else:
output['success'] = False
return output
if entities['choice'] == None:
entities['type'] = None
message = add_quick_reply(message, '再試一次!',
modules.generate_postback(input,entities))
entities['type'] = 'end'
message = add_quick_reply(message, '結束對話',
modules.generate_postback(input,entities))
output['input'] = input
output['output'] = message
output['success'] = True
return output
|
anne030303/messenger-landbot
|
modules/src/lease_contract.py
|
lease_contract.py
|
py
| 5,963 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13031397171
|
import rospy, sys, tf
import moveit_commander
from math import *
from geometry_msgs.msg import PoseStamped
from moveit_commander import MoveGroupCommander, PlanningSceneInterface
from moveit_msgs.msg import PlanningScene, ObjectColor
from moveit_msgs.msg import Grasp, GripperTranslation
from moveit_msgs.msg import MoveItErrorCodes
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from tf.transformations import quaternion_from_euler
from copy import deepcopy
GROUP_NAME_ARM = 'arm'
GROUP_NAME_GRIPPER = 'gripper'
GRIPPER_FRAME = 'gripper_link'
GRIPPER_JOINT_NAMES = ['gripper_joint']
GRIPPER_EFFORT = [1.0]
GRIPPER_PARAM = '/gripper_controller'
REFERENCE_FRAME = '/base_link'
ARM_BASE_FRAME = '/arm_base_link'
class MoveItDemo:
def __init__(self):
# Initialize the move_group API
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('moveit_demo')
self.gripper_opened = [rospy.get_param(GRIPPER_PARAM + "/max_opening") ]
self.gripper_closed = [rospy.get_param(GRIPPER_PARAM + "/min_opening") ]
self.gripper_neutral = [rospy.get_param(GRIPPER_PARAM + "/neutral") ]
self.gripper_tighten = rospy.get_param(GRIPPER_PARAM + "/tighten")
# We need a tf listener to convert poses into arm reference base
self.tf_listener = tf.TransformListener()
# Use the planning scene object to add or remove objects
scene = PlanningSceneInterface()
# Create a scene publisher to push changes to the scene
self.scene_pub = rospy.Publisher('planning_scene', PlanningScene, queue_size=10)
# Create a publisher for displaying gripper poses
self.gripper_pose_pub = rospy.Publisher('target_pose', PoseStamped, queue_size=10)
# Create a dictionary to hold object colors
self.colors = dict()
# Initialize the move group for the right arm
arm = MoveGroupCommander(GROUP_NAME_ARM)
# Initialize the move group for the right gripper
gripper = MoveGroupCommander(GROUP_NAME_GRIPPER)
# Get the name of the end-effector link
end_effector_link = arm.get_end_effector_link()
# Allow some leeway in position (meters) and orientation (radians)
arm.set_goal_position_tolerance(0.04)
arm.set_goal_orientation_tolerance(0.1)
# Allow replanning to increase the odds of a solution
arm.allow_replanning(True)
# Set the right arm reference frame
arm.set_pose_reference_frame(REFERENCE_FRAME)
# Allow 5 seconds per planning attempt
arm.set_planning_time(5)
# Set a limit on the number of pick attempts before bailing
max_pick_attempts = 3
# Set a limit on the number of place attempts
max_place_attempts = 3
rospy.loginfo("Scaling for MoveIt timeout=" + str(rospy.get_param('/move_group/trajectory_execution/allowed_execution_duration_scaling')))
# Give the scene a chance to catch up
rospy.sleep(2)
# Give each of the scene objects a unique name
table_id = 'table'
box1_id = 'box1'
box2_id = 'box2'
target_id = 'target'
tool_id = 'tool'
# Remove leftover objects from a previous run
scene.remove_world_object(table_id)
scene.remove_world_object(box1_id)
scene.remove_world_object(box2_id)
scene.remove_world_object(target_id)
scene.remove_world_object(tool_id)
# Remove any attached objects from a previous session
scene.remove_attached_object(GRIPPER_FRAME, target_id)
# Give the scene a chance to catch up
rospy.sleep(1)
# Start the arm in the "arm_up" pose stored in the SRDF file
rospy.loginfo("Set Arm: right_up")
arm.set_named_target('right_up')
if arm.go() != True:
rospy.logwarn(" Go failed")
rospy.sleep(2)
# Move the gripper to the closed position
rospy.loginfo("Set Gripper: Close " + str(self.gripper_closed ) )
gripper.set_joint_value_target(self.gripper_closed)
if gripper.go() != True:
rospy.logwarn(" Go failed")
rospy.sleep(2)
# Move the gripper to the neutral position
rospy.loginfo("Set Gripper: Neutral " + str(self.gripper_neutral) )
gripper.set_joint_value_target(self.gripper_neutral)
if gripper.go() != True:
rospy.logwarn(" Go failed")
rospy.sleep(2)
# Move the gripper to the open position
rospy.loginfo("Set Gripper: Open " + str(self.gripper_opened))
gripper.set_joint_value_target(self.gripper_opened)
if gripper.go() != True:
rospy.logwarn(" Go failed")
rospy.sleep(2)
# Set the height of the table off the ground
table_ground = 0.4
# Set the dimensions of the scene objects [l, w, h]
table_size = [0.2, 0.7, 0.01]
box1_size = [0.1, 0.05, 0.05]
box2_size = [0.05, 0.05, 0.15]
# Set the target size [l, w, h]
target_size = [0.02, 0.005, 0.12]
# Add a table top and two boxes to the scene
table_pose = PoseStamped()
table_pose.header.frame_id = REFERENCE_FRAME
table_pose.pose.position.x = 0.36
table_pose.pose.position.y = 0.0
table_pose.pose.position.z = table_ground + table_size[2] / 2.0
table_pose.pose.orientation.w = 1.0
scene.add_box(table_id, table_pose, table_size)
box1_pose = PoseStamped()
box1_pose.header.frame_id = REFERENCE_FRAME
box1_pose.pose.position.x = table_pose.pose.position.x - 0.04
box1_pose.pose.position.y = 0.0
box1_pose.pose.position.z = table_ground + table_size[2] + box1_size[2] / 2.0
box1_pose.pose.orientation.w = 1.0
scene.add_box(box1_id, box1_pose, box1_size)
box2_pose = PoseStamped()
box2_pose.header.frame_id = REFERENCE_FRAME
box2_pose.pose.position.x = table_pose.pose.position.x - 0.06
box2_pose.pose.position.y = 0.2
box2_pose.pose.position.z = table_ground + table_size[2] + box2_size[2] / 2.0
box2_pose.pose.orientation.w = 1.0
scene.add_box(box2_id, box2_pose, box2_size)
# Set the target pose in between the boxes and on the table
target_pose = PoseStamped()
target_pose.header.frame_id = REFERENCE_FRAME
target_pose.pose.position.x = table_pose.pose.position.x - 0.03
target_pose.pose.position.y = 0.1
target_pose.pose.position.z = table_ground + table_size[2] + target_size[2] / 2.0
target_pose.pose.orientation.w = 1.0
# Add the target object to the scene
scene.add_box(target_id, target_pose, target_size)
# Make the table red and the boxes orange
self.setColor(table_id, 0.8, 0, 0, 1.0)
self.setColor(box1_id, 0.8, 0.4, 0, 1.0)
self.setColor(box2_id, 0.8, 0.4, 0, 1.0)
# Make the target yellow
self.setColor(target_id, 0.9, 0.9, 0, 1.0)
# Send the colors to the planning scene
self.sendColors()
# Set the support surface name to the table object
arm.set_support_surface_name(table_id)
# Specify a pose to place the target after being picked up
place_pose = PoseStamped()
place_pose.header.frame_id = REFERENCE_FRAME
place_pose.pose.position.x = table_pose.pose.position.x - 0.03
place_pose.pose.position.y = -0.15
place_pose.pose.position.z = table_ground + table_size[2] + target_size[2] / 2.0
place_pose.pose.orientation.w = 1.0
# Initialize the grasp pose to the target pose
grasp_pose = target_pose
# Shift the grasp pose by half the width of the target to center it
grasp_pose.pose.position.y -= target_size[1] / 2.0
# Generate a list of grasps
grasps = self.make_grasps(grasp_pose, [target_id], [target_size[1] - self.gripper_tighten])
# Track success/failure and number of attempts for pick operation
result = MoveItErrorCodes.FAILURE
n_attempts = 0
# Repeat until we succeed or run out of attempts
while result != MoveItErrorCodes.SUCCESS and n_attempts < max_pick_attempts:
rospy.loginfo("Pick attempt #" + str(n_attempts))
for grasp in grasps:
# Publish the grasp poses so they can be viewed in RViz
self.gripper_pose_pub.publish(grasp.grasp_pose)
rospy.sleep(0.2)
result = arm.pick(target_id, grasps)
if result == MoveItErrorCodes.SUCCESS:
break
n_attempts += 1
rospy.sleep(0.2)
# If the pick was successful, attempt the place operation
if result == MoveItErrorCodes.SUCCESS:
rospy.loginfo(" Pick: Done!")
# Generate valid place poses
places = self.make_places(place_pose)
success = False
n_attempts = 0
# Repeat until we succeed or run out of attempts
while not success and n_attempts < max_place_attempts:
rospy.loginfo("Place attempt #" + str(n_attempts))
for place in places:
# Publish the place poses so they can be viewed in RViz
self.gripper_pose_pub.publish(place)
rospy.sleep(0.2)
success = arm.place(target_id, place)
if success:
break
n_attempts += 1
rospy.sleep(0.2)
if not success:
rospy.logerr("Place operation failed after " + str(n_attempts) + " attempts.")
else:
rospy.loginfo(" Place: Done!")
else:
rospy.logerr("Pick operation failed after " + str(n_attempts) + " attempts.")
# Return the arm to the "resting" pose stored in the SRDF file (passing through right_up)
arm.set_named_target('right_up')
arm.go()
arm.set_named_target('resting')
arm.go()
# Open the gripper to the neutral position
gripper.set_joint_value_target(self.gripper_neutral)
gripper.go()
rospy.sleep(1)
# Shut down MoveIt cleanly
moveit_commander.roscpp_shutdown()
# Exit the script
moveit_commander.os._exit(0)
# Get the gripper posture as a JointTrajectory
def make_gripper_posture(self, joint_positions):
# Initialize the joint trajectory for the gripper joints
t = JointTrajectory()
# Set the joint names to the gripper joint names
t.joint_names = GRIPPER_JOINT_NAMES
# Initialize a joint trajectory point to represent the goal
tp = JointTrajectoryPoint()
# Assign the trajectory joint positions to the input positions
tp.positions = joint_positions
# Set the gripper effort
tp.effort = GRIPPER_EFFORT
tp.time_from_start = rospy.Duration(1.0)
# Append the goal point to the trajectory points
t.points.append(tp)
# Return the joint trajectory
return t
# Generate a gripper translation in the direction given by vector
def make_gripper_translation(self, min_dist, desired, vector):
# Initialize the gripper translation object
g = GripperTranslation()
# Set the direction vector components to the input
g.direction.vector.x = vector[0]
g.direction.vector.y = vector[1]
g.direction.vector.z = vector[2]
# The vector is relative to the gripper frame
g.direction.header.frame_id = GRIPPER_FRAME
# Assign the min and desired distances from the input
g.min_distance = min_dist
g.desired_distance = desired
return g
# Generate a list of possible grasps
def make_grasps(self, initial_pose_stamped, allowed_touch_objects, grasp_opening=[0]):
# Initialize the grasp object
g = Grasp()
# Set the pre-grasp and grasp postures appropriately;
# grasp_opening should be a bit smaller than target width
g.pre_grasp_posture = self.make_gripper_posture(self.gripper_opened)
g.grasp_posture = self.make_gripper_posture(grasp_opening)
# Set the approach and retreat parameters as desired
g.pre_grasp_approach = self.make_gripper_translation(0.01, 0.1, [1.0, 0.0, 0.0])
g.post_grasp_retreat = self.make_gripper_translation(0.1, 0.15, [0.0, -1.0, 1.0])
# Set the first grasp pose to the input pose
g.grasp_pose = initial_pose_stamped
# Pitch angles to try
pitch_vals = [0, 0.1, -0.1, 0.2, -0.2, 0.4, -0.4]
# Yaw angles to try; given the limited dofs of turtlebot_arm, we must calculate the heading
# from arm base to the object to pick (first we must transform its pose to arm base frame)
target_pose_arm_ref = self.tf_listener.transformPose(ARM_BASE_FRAME, initial_pose_stamped)
x = target_pose_arm_ref.pose.position.x
y = target_pose_arm_ref.pose.position.y
self.pick_yaw = atan2(y, x) # check in make_places method why we store the calculated yaw
yaw_vals = [self.pick_yaw]
# A list to hold the grasps
grasps = []
# Generate a grasp for each pitch and yaw angle
for yaw in yaw_vals:
for pitch in pitch_vals:
# Create a quaternion from the Euler angles
q = quaternion_from_euler(0, pitch, yaw)
# Set the grasp pose orientation accordingly
g.grasp_pose.pose.orientation.x = q[0]
g.grasp_pose.pose.orientation.y = q[1]
g.grasp_pose.pose.orientation.z = q[2]
g.grasp_pose.pose.orientation.w = q[3]
# Set and id for this grasp (simply needs to be unique)
g.id = str(len(grasps))
# Set the allowed touch objects to the input list
g.allowed_touch_objects = allowed_touch_objects
# Don't restrict contact force
g.max_contact_force = 0
# Degrade grasp quality for increasing pitch angles
g.grasp_quality = 1.0 - abs(pitch)
# Append the grasp to the list
grasps.append(deepcopy(g))
# Return the list
return grasps
# Generate a list of possible place poses
def make_places(self, init_pose):
# Initialize the place location as a PoseStamped message
place = PoseStamped()
# Start with the input place pose
place = init_pose
# A list of x shifts (meters) to try
x_vals = [0, 0.005, -0.005] #, 0.01, -0.01, 0.015, -0.015]
# A list of y shifts (meters) to try
y_vals = [0, 0.005, -0.005, 0.01, -0.01] #, 0.015, -0.015]
# A list of pitch angles to try
pitch_vals = [0] #, 0.005, -0.005, 0.01, -0.01, 0.02, -0.02]
# A list to hold the places
places = []
# Generate a place pose for each angle and translation
for pitch in pitch_vals:
for dy in y_vals:
for dx in x_vals:
place.pose.position.x = init_pose.pose.position.x + dx
place.pose.position.y = init_pose.pose.position.y + dy
# Yaw angle: given the limited dofs of turtlebot_arm, we must calculate the heading from
# arm base to the place location (first we must transform its pose to arm base frame)
target_pose_arm_ref = self.tf_listener.transformPose(ARM_BASE_FRAME, place)
x = target_pose_arm_ref.pose.position.x
y = target_pose_arm_ref.pose.position.y
yaw = atan2(y, x) - self.pick_yaw;
# Note that we subtract the yaw we calculated for pick, as the picked object "carries"
# with him the orientation of the arm at pickup time. More details in this moveit-users
# group thread: https://groups.google.com/forum/#!topic/moveit-users/-Eie-wLDbu0
# Create a quaternion from the Euler angles
q = quaternion_from_euler(0, pitch, yaw)
# Set the place pose orientation accordingly
place.pose.orientation.x = q[0]
place.pose.orientation.y = q[1]
place.pose.orientation.z = q[2]
place.pose.orientation.w = q[3]
# Append this place pose to the list
places.append(deepcopy(place))
# Return the list
return places
# Set the color of an object
def setColor(self, name, r, g, b, a=0.9):
# Initialize a MoveIt color object
color = ObjectColor()
# Set the id to the name given as an argument
color.id = name
# Set the rgb and alpha values given as input
color.color.r = r
color.color.g = g
color.color.b = b
color.color.a = a
# Update the global color dictionary
self.colors[name] = color
# Actually send the colors to MoveIt!
def sendColors(self):
# Initialize a planning scene object
p = PlanningScene()
# Need to publish a planning scene diff
p.is_diff = True
# Append the colors from the global color dictionary
for color in self.colors.values():
p.object_colors.append(color)
# Publish the scene diff
self.scene_pub.publish(p)
if __name__ == "__main__":
MoveItDemo()
|
sniper0110/Turtlebot_arm
|
turtlebot_arm_moveit_demos/bin/pick_and_place.py
|
pick_and_place.py
|
py
| 17,847 |
python
|
en
|
code
| 4 |
github-code
|
6
|
19409761077
|
import asyncio
import os
import datetime
import discord
from discord import channel
from discord.ext import commands
from discord_slash import SlashCommand, SlashContext, cog_ext
from discord_slash.utils.manage_commands import create_option, create_choice
from core.classes import CogExtension
class System(CogExtension):
@cog_ext.cog_slash( # ping: 偵測延遲毫秒
name="ping", description="監測 Bot 和 Discord 之間的連線延遲。")
async def ping(self, ctx: SlashContext):
embed = discord.Embed(color=0x21e828, timestamp=datetime.datetime.utcnow())
embed.set_author(name="MinatoBot",
icon_url="https://i.imgur.com/EvyjjO9.jpg")
embed.add_field(name="Ping", value=f"**{round(self.bot.latency*1000)}** ms", inline=False)
await ctx.send(embed=embed)
@commands.command() # bc: 指定頻道廣播
async def bc1(self, ctx, ch: int, *, msg):
channel = self.bot.get_channel(ch)
await channel.send(f"{msg}")
await ctx.send(f"在 {channel.mention} 廣播訊息成功。")
@cog_ext.cog_slash(
name="bc",
description="在指定頻道進行廣播。",
options=[
create_option(
name="channel",
description="傳送訊息的頻道。",
option_type=7,
required=True
),
create_option(
name="message",
description="訊息。",
option_type=3,
required=True
)
]
)
async def bc(self, ctx: SlashContext, channel, message):
await channel.send(f"{message}")
await ctx.send(f"在 {channel.mention} 廣播訊息成功。")
@commands.Cog.listener() # こんあくあ~!
async def on_message(self, msg):
if msg.content.startswith("こんあくあ") and msg.author != self.bot.user:
await msg.channel.send("こんあくあ~!")
def setup(bot):
bot.add_cog(System(bot))
|
TimTsai0316/MinatoBot
|
cmds/system.py
|
system.py
|
py
| 2,045 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33560462969
|
"""Program to List, Create, Add, Edit, Delete contacts and save to a JSON file"""
import json
class CreateContact:
""""""
def __init__(self, fname, lname, phone): #constructor
self.fname = fname
self.lname = lname
self.phone = phone
def create_new_contact(self):
contact_data = {}
contact_data['fname'] = self.fname
contact_data['lname'] = self.lname
contact_data['phone'] = self.phone
print("new contact added")
print(contact_data)
print(type(contact_data))
return contact_data
# print("Details:", self.fname, self.lname, self.phone)
def create_contact():
"""Function to create a new contact and add to contacts list."""
new_contact_list = []
phone_number_list = []
print("Enter Details")
fname = input("Enter first name:")
lname = input("Enter last name:")
choice = input("Add number?(Y/N):")
while choice == 'y' or choice == 'Y':
number_type = input("Enter type: ")
number = input("Enter phone number: ")
contact_dict = {}
contact_dict['type'] = number_type
contact_dict['value'] = number
phone_number_list.append(contact_dict)
print("Contact added")
choice = input("Add number?(Y/N):")
new_contact = CreateContact(fname, lname, phone_number_list)
new_contact_data = new_contact.create_new_contact()
try:
contact_file_object = open("contact.json", "r")
data_list = contact_file_object.read()
data_list = json.loads(data_list)
contact_file_object.close()
print(data_list)
except:
print("no data")
data_list = []
data_list.append(new_contact_data)
print(data_list)
return data_list # New contact data.
def main_contact_function():
"""Main function to display actions to be performed on contacts list.
"""
try:
contact_file_object = open("contact.json", "r")
data_list = contact_file_object.read()
data_list = json.loads(data_list)
contact_file_object.close()
except:
print("no data")
data_list = []
print("Contacts App\n")
choice = 0
choice_list = [1, 2, 3, 4]
new_contact_list = []
while choice not in choice_list:
print("Choose Action\n\t1.Create new contact\n\t2.List Contacts\n\t3.Edit a contact\n\t4.Delete Contact\n\t5.Search a contact\n\t6.Exit")
choice = int(input("Enter option: "))
print(f"Choice : {choice}")
if choice == 1:
print("1.Create Contact")
new_contact_details = create_contact()
print(new_contact_details)
new_contact_list.append(new_contact_details)
json_object = json.dumps(new_contact_list, indent = 4)
print(json_object)
# Writing to sample.json
with open("contact.json", "w") as outfile:
outfile.write(json_object)
outfile.close()
choice = 0
elif choice == 2:
contact_file_object = open("contact.json","r")
data_list = contact_file_object.read()
data_list = json.loads(data_list)
contact_file_object.close()
print("2.List Contacts")
new_list = sorted(data_list, key=lambda item: item['fname'])
print("data:")
for i in new_list:
print(f"\n{i['fname']} {i['lname']}\t", end="")
for multiple_number in i['phone']:
print(f"{multiple_number['type']}-{multiple_number['value']}\t", end="\t")
print()
choice = 0
elif choice == 3:
print("3.Edit a contact")
contact_file_object = open("contact.json","r")
data_list = contact_file_object.read()
data_list = json.loads(data_list)
contact_file_object.close()
user_input_fname = input("Enter the name of the contact you wish to edit: ")
data = [item for item in data_list if item["fname"] == f"{user_input_fname}"][0]
print(data)
choice = input("Edit data?(Y\\N):")
if choice == 'y' or choice == 'y':
# data_list.remove(data)
# if choice == 'y' or choice == 'y':
sub_choice_list = [1, 2, 3]
sub_choice = 0
sub_choice = int(input("Choose from 1.fname, 2.lname, 3.phone, 4.exit number to edit\n \
(choose: 1/2/3/4):"))
while sub_choice in sub_choice_list[:3]:
if sub_choice == 1:
data["fname"] = input("Enter first name")
temp_data = data
data_list.remove(temp_data)
data_list.append(data)
json_object = json.dumps(data_list, indent = 4)
with open("contact.json", "w") as outfile:
outfile.write(json_object)
outfile.close()
elif sub_choice == 2:
data["lname"] = input("Enter last name")
temp_data = data
data_list.remove(temp_data)
data_list.append(data)
json_object = json.dumps(data_list, indent = 4)
with open("contact.json", "w") as outfile:
outfile.write(json_object)
outfile.close()
elif sub_choice == 3:
temp_data = data
index = 1
print("type:")
for items in data:
number_type = [number_type for number_type in data['phone']]
for i in number_type:
print(index, ".", i)
index += 1
phone_type_choice = int(input("Choose number(by index):"))
print("index = ", phone_type_choice)
phone_type_choice -= 1
print("Edit or Delete")
try:
while phone_type_choice >= 0 and phone_type_choice < index:
print("hello")
phone_data = number_type[phone_type_choice]
print("hello")
print(phone_data)
type_or_value = int(input("change 1.Type, 2.Number"))
if type_or_value == 1:
temp_phone_data = phone_data
phone_data['type'] = input("Enter new type")
number_type.remove(temp_phone_data)
number_type.append(phone_data)
print(data)
data_list.remove(temp_data)
data_list.append(data)
json_object = json.dumps(data_list, indent = 4)
with open("contact.json", "w") as outfile:
outfile.write(json_object)
outfile.close()
break
elif type_or_value == 2:
temp_phone_data = phone_data
phone_data['value'] = input("Enter new number")
number_type.remove(temp_phone_data)
number_type.append(phone_data)
print(data)
data_list.remove(temp_data)
data_list.append(data)
json_object = json.dumps(data_list, indent = 4)
with open("contact.json", "w") as outfile:
outfile.write(json_object)
outfile.close()
break
except IndexError:
print("No phone number found")
elif sub_choice not in sub_choice_list:
break
sub_choice = int(input("Choose from 1.fname, 2.lname, 3.phone,\
4.exit number to edit\n\
(choose: 1/2/3/4):"))
print("yes1")
choice = 0
elif choice == 4:
print("DELETE")
contact_file_object = open("contact.json","r")
data_list = contact_file_object.read()
data_list = json.loads(data_list)
contact_file_object.close()
print("Delete data by name:")
user_input_fname = input()
data = [item for item in data_list if item["fname"] == f"{user_input_fname}"][0]
print(data)
data_list.remove(data)
print(data_list)
json_object = json.dumps(data_list, indent = 4)
print(json_object)
# Writing to sample.json
with open("contact.json", "w") as outfile:
outfile.write(json_object)
outfile.close()
choice = 0
# Contact Search.
elif choice == 5:
contact_file_object = open("contact.json","r")
data_list = contact_file_object.read()
data_list = json.loads(data_list)
contact_file_object.close()
print("Search data by name:")
user_input_fname = input()
data = [item for item in data_list if item["fname"] == f"{user_input_fname}"]
print(data)
choice = 0
elif choice == 6:
break
main_contact_function()
|
alenantony/Alokin-Task
|
Day2/contact.py
|
contact.py
|
py
| 10,501 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29373295052
|
# External Packages
from fastapi import APIRouter
from fastapi import Request
from fastapi.responses import HTMLResponse, FileResponse
from fastapi.templating import Jinja2Templates
from khoj.utils.rawconfig import TextContentConfig, OpenAIProcessorConfig, FullConfig
# Internal Packages
from khoj.utils import constants, state
import json
# Initialize Router
web_client = APIRouter()
templates = Jinja2Templates(directory=constants.web_directory)
VALID_TEXT_CONTENT_TYPES = ["org", "markdown", "pdf", "plaintext"]
# Create Routes
@web_client.get("/", response_class=FileResponse)
def index(request: Request):
return templates.TemplateResponse("index.html", context={"request": request, "demo": state.demo})
@web_client.get("/chat", response_class=FileResponse)
def chat_page(request: Request):
return templates.TemplateResponse("chat.html", context={"request": request, "demo": state.demo})
if not state.demo:
@web_client.get("/config", response_class=HTMLResponse)
def config_page(request: Request):
default_full_config = FullConfig(
content_type=None,
search_type=None,
processor=None,
)
current_config = state.config or json.loads(default_full_config.json())
successfully_configured = {
"pdf": False,
"markdown": False,
"org": False,
"image": False,
"github": False,
"notion": False,
"plaintext": False,
"enable_offline_model": False,
"conversation_openai": False,
"conversation_gpt4all": False,
}
if state.content_index:
successfully_configured.update(
{
"pdf": state.content_index.pdf is not None,
"markdown": state.content_index.markdown is not None,
"org": state.content_index.org is not None,
"image": state.content_index.image is not None,
"github": state.content_index.github is not None,
"notion": state.content_index.notion is not None,
"plaintext": state.content_index.plaintext is not None,
}
)
if state.processor_config and state.processor_config.conversation:
successfully_configured.update(
{
"conversation_openai": state.processor_config.conversation.openai_model is not None,
"conversation_gpt4all": state.processor_config.conversation.gpt4all_model.loaded_model is not None,
}
)
return templates.TemplateResponse(
"config.html",
context={
"request": request,
"current_config": current_config,
"current_model_state": successfully_configured,
},
)
@web_client.get("/config/content_type/github", response_class=HTMLResponse)
def github_config_page(request: Request):
default_copy = constants.default_config.copy()
default_github = default_copy["content-type"]["github"] # type: ignore
default_config = TextContentConfig(
compressed_jsonl=default_github["compressed-jsonl"],
embeddings_file=default_github["embeddings-file"],
)
current_config = (
state.config.content_type.github
if state.config and state.config.content_type and state.config.content_type.github
else default_config
)
current_config = json.loads(current_config.json())
return templates.TemplateResponse(
"content_type_github_input.html", context={"request": request, "current_config": current_config}
)
@web_client.get("/config/content_type/notion", response_class=HTMLResponse)
def notion_config_page(request: Request):
default_copy = constants.default_config.copy()
default_notion = default_copy["content-type"]["notion"] # type: ignore
default_config = TextContentConfig(
compressed_jsonl=default_notion["compressed-jsonl"],
embeddings_file=default_notion["embeddings-file"],
)
current_config = (
state.config.content_type.notion
if state.config and state.config.content_type and state.config.content_type.notion
else default_config
)
current_config = json.loads(current_config.json())
return templates.TemplateResponse(
"content_type_notion_input.html", context={"request": request, "current_config": current_config}
)
@web_client.get("/config/content_type/{content_type}", response_class=HTMLResponse)
def content_config_page(request: Request, content_type: str):
if content_type not in VALID_TEXT_CONTENT_TYPES:
return templates.TemplateResponse("config.html", context={"request": request})
default_copy = constants.default_config.copy()
default_content_type = default_copy["content-type"][content_type] # type: ignore
default_config = TextContentConfig(
compressed_jsonl=default_content_type["compressed-jsonl"],
embeddings_file=default_content_type["embeddings-file"],
)
current_config = (
state.config.content_type[content_type]
if state.config and state.config.content_type and state.config.content_type[content_type] # type: ignore
else default_config
)
current_config = json.loads(current_config.json())
return templates.TemplateResponse(
"content_type_input.html",
context={
"request": request,
"current_config": current_config,
"content_type": content_type,
},
)
@web_client.get("/config/processor/conversation/openai", response_class=HTMLResponse)
def conversation_processor_config_page(request: Request):
default_copy = constants.default_config.copy()
default_processor_config = default_copy["processor"]["conversation"]["openai"] # type: ignore
default_openai_config = OpenAIProcessorConfig(
api_key="",
chat_model=default_processor_config["chat-model"],
)
current_processor_openai_config = (
state.config.processor.conversation.openai
if state.config
and state.config.processor
and state.config.processor.conversation
and state.config.processor.conversation.openai
else default_openai_config
)
current_processor_openai_config = json.loads(current_processor_openai_config.json())
return templates.TemplateResponse(
"processor_conversation_input.html",
context={
"request": request,
"current_config": current_processor_openai_config,
},
)
|
debanjum/khoj
|
src/khoj/routers/web_client.py
|
web_client.py
|
py
| 6,990 |
python
|
en
|
code
| 485 |
github-code
|
6
|
71714038267
|
import unittest
import json
from unittest.mock import patch, mock_open
from models.club_model import Club
import repository.club as club_repo
class ClubRepoTest(unittest.TestCase):
@patch("repository.club.filename_club", new='tests/test.json')
def test_load_clubs_should_return_list_of_club_obj(self):
with patch.object(Club, '__init__', return_value=None):
outcome = club_repo.load_clubs()
self.assertIsInstance(outcome, list)
for c in outcome:
self.assertIsInstance(c, Club)
@patch("repository.club.get_clubs.all_clubs", new=[])
@patch("repository.club.load_clubs")
def test_get_clubs_should_call_load_clubs_if_all_club_var_is_empty_list(self, mock_load_clubs):
club_repo.get_clubs()
mock_load_clubs.assert_called_once()
@patch("repository.club.get_clubs.all_clubs", new=[{'foo': 'bar'}])
@patch("repository.club.load_clubs")
def test_get_clubs_should_not_call_load_clubs_if_all_club_var_is_not_empty_list(self, mock_load_clubs):
club_repo.get_clubs()
mock_load_clubs.assert_not_called()
@patch("repository.club.get_clubs.all_clubs", new=[{'foo': 'bar'}])
def test_get_clubs_should_return_all_clubs_list(self):
outcome = club_repo.get_clubs()
self.assertEqual(outcome, [{'foo': 'bar'}])
@patch("repository.club.load_clubs")
def test_update_json_should_write_data_in_file(self, mock_load_clubs):
m = mock_open()
with patch("builtins.open", m, create=True):
to_write = {'name': 'placeholder',
'points': '10',
'email': '[email protected]'}
club_repo.update_clubs_in_json(to_write)
handle = m()
handle.write.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
Chfrlt/p11_GUDLFT
|
tests/unit_tests/test_club_repository.py
|
test_club_repository.py
|
py
| 1,844 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9790493238
|
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import include
from django.urls import path
from rest_framework_nested import routers
from . import views
router = routers.SimpleRouter(trailing_slash=False)
router.register(r"sessions", views.SessionsViewSet)
sessions_router = routers.NestedSimpleRouter(router, r"sessions", lookup="session")
sessions_router.register(
r"conflicts", views.SessionConflictsViewSet, basename="session-conflicts"
)
sessions_router.register(
r"changes", views.SessionChangesViewSet, basename="session-changes"
)
sessions_router.register(
r"deployments", views.SessionDeploymentsViewSet, basename="session-deployments"
)
sessions_router.register(
r"files", views.SessionFilesViewSet, basename="session-files",
)
sessions_router.register(
r"branches", views.SessionBranchesViewSet, basename="session-branches"
)
sessions_branches_router = routers.NestedSimpleRouter(
sessions_router, r"branches", lookup="session_branch"
)
sessions_branches_router.register(
r"files", views.SessionBranchesFilesViewSet, basename="session-branch-files"
)
urlpatterns = [
path("", include(router.urls)),
path("", include(sessions_router.urls)),
path("", include(sessions_branches_router.urls)),
path(r"supported_features", views.get_supported_features),
path(r"supported_validators", views.get_supported_validators),
]
|
wiksla/f5-bigip-journeys-app
|
journeys/backend/urls.py
|
urls.py
|
py
| 1,978 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26247501666
|
from delfin.api import common
from delfin.api import extensions
from delfin.api.v1 import access_info
from delfin.api.v1 import alert_source
from delfin.api.v1 import alerts
from delfin.api.v1 import controllers
from delfin.api.v1 import disks
from delfin.api.v1 import filesystems
from delfin.api.v1 import ports
from delfin.api.v1 import qtrees
from delfin.api.v1 import quotas
from delfin.api.v1 import shares
from delfin.api.v1 import storage_pools
from delfin.api.v1 import storages
from delfin.api.v1 import volumes
from delfin.api.v1 import storage_hosts
from delfin.api.v1 import storage_host_initiators
from delfin.api.v1 import storage_host_groups
from delfin.api.v1 import port_groups
from delfin.api.v1 import volume_groups
from delfin.api.v1 import masking_views
class APIRouter(common.APIRouter):
ExtensionManager = extensions.ExtensionManager
def _setup_routes(self, mapper):
mapper.redirect("", "/")
self.resources['storages'] = storages.create_resource()
mapper.resource("storage", "storages",
controller=self.resources['storages'],
member={'sync': 'POST'})
mapper.connect("storages", "/storages/sync",
controller=self.resources['storages'],
action="sync_all",
conditions={"method": ["POST"]})
mapper.connect("storages", "/storages/{id}/capabilities",
controller=self.resources['storages'],
action="get_capabilities",
conditions={"method": ["GET"]})
self.resources['access_info'] = access_info.create_resource()
mapper.connect("storages", "/storages/{id}/access-info",
controller=self.resources['access_info'],
action="show",
conditions={"method": ["GET"]})
mapper.connect("storages", "/storages/{id}/access-info",
controller=self.resources['access_info'],
action="update",
conditions={"method": ["PUT"]})
mapper.connect("storages", "/access-infos",
controller=self.resources['access_info'],
action="show_all",
conditions={"method": ["GET"]})
self.resources['alert_sources'] = alert_source.create_resource()
mapper.connect("storages", "/storages/{id}/snmp-config",
controller=self.resources['alert_sources'],
action="put",
conditions={"method": ["PUT"]})
mapper.connect("storages", "/storages/{id}/snmp-config",
controller=self.resources['alert_sources'],
action="show",
conditions={"method": ["GET"]})
mapper.connect("storages", "/storages/{id}/snmp-config",
controller=self.resources['alert_sources'],
action="delete",
conditions={"method": ["DELETE"]})
mapper.connect("storages", "/snmp-configs",
controller=self.resources['alert_sources'],
action="show_all",
conditions={"method": ["GET"]})
self.resources['alerts'] = alerts.create_resource()
mapper.connect("storages", "/storages/{id}/alerts/{sequence_number}",
controller=self.resources['alerts'],
action="delete",
conditions={"method": ["DELETE"]})
mapper.connect("storages", "/storages/{id}/alerts",
controller=self.resources['alerts'],
action="show",
conditions={"method": ["GET"]})
mapper.connect("storages", "/storages/{id}/alerts/sync",
controller=self.resources['alerts'],
action="sync",
conditions={"method": ["POST"]})
self.resources['storage-pools'] = storage_pools.create_resource()
mapper.resource("storage-pool", "storage-pools",
controller=self.resources['storage-pools'])
self.resources['volumes'] = volumes.create_resource()
mapper.resource("volume", "volumes",
controller=self.resources['volumes'])
self.resources['controllers'] = controllers.create_resource()
mapper.resource("controller", "controllers",
controller=self.resources['controllers'])
self.resources['ports'] = ports.create_resource()
mapper.resource("port", "ports",
controller=self.resources['ports'])
self.resources['disks'] = disks.create_resource()
mapper.resource("disk", "disks",
controller=self.resources['disks'])
self.resources['filesystems'] = filesystems.create_resource()
mapper.resource("filesystems", "filesystems",
controller=self.resources['filesystems'])
self.resources['qtrees'] = qtrees.create_resource()
mapper.resource("qtrees", "qtrees",
controller=self.resources['qtrees'])
self.resources['quotas'] = quotas.create_resource()
mapper.resource("quotas", "quotas",
controller=self.resources['quotas'])
self.resources['shares'] = shares.create_resource()
mapper.resource("shares", "shares",
controller=self.resources['shares'])
self.resources['storage_host_initiators'] \
= storage_host_initiators.create_resource()
mapper.connect("storages", "/storages/{id}/storage-host-initiators",
controller=self.resources['storage_host_initiators'],
action="show",
conditions={"method": ["GET"]})
self.resources['storage_hosts'] = storage_hosts.create_resource()
mapper.connect("storages", "/storages/{id}/storage-hosts",
controller=self.resources['storage_hosts'],
action="show",
conditions={"method": ["GET"]})
self.resources['storage_host_groups'] \
= storage_host_groups.create_resource()
mapper.connect("storages", "/storages/{id}/storage-host-groups",
controller=self.resources['storage_host_groups'],
action="show",
conditions={"method": ["GET"]})
self.resources['port_groups'] \
= port_groups.create_resource()
mapper.connect("storages", "/storages/{id}/port-groups",
controller=self.resources['port_groups'],
action="show",
conditions={"method": ["GET"]})
self.resources['volume_groups'] \
= volume_groups.create_resource()
mapper.connect("storages", "/storages/{id}/volume-groups",
controller=self.resources['volume_groups'],
action="show",
conditions={"method": ["GET"]})
self.resources['masking_views'] \
= masking_views.create_resource()
mapper.connect("storages", "/storages/{id}/masking-views",
controller=self.resources['masking_views'],
action="show",
conditions={"method": ["GET"]})
|
sodafoundation/delfin
|
delfin/api/v1/router.py
|
router.py
|
py
| 7,550 |
python
|
en
|
code
| 201 |
github-code
|
6
|
25598775886
|
import sys
import numpy as np
import cv2
def main():
source_window = "source_image"
gray_window = "gray"
otsu_window = "otsu_threshold"
edge_window = "edge"
gray_img = cv2.imread(sys.argv[1], cv2.IMREAD_GRAYSCALE)
threshold1 = 0
threshold2 = 100
edge_img = cv2.Canny(gray_img, threshold1, threshold2)
cv2.imshow(edge_window, edge_img)
ret, otsu_img = cv2.threshold(gray_img, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY_INV)
cv2.imshow(otsu_window, otsu_img)
contours, hierachy = cv2.findContours(otsu_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# print(contours)
cv2.drawContours(gray_img, contours, -1, (0,255,0), 3)
cv2.imshow(gray_window, gray_img)
# cv2.imshow("test", findCon_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
NMurata07/findContours
|
main.py
|
main.py
|
py
| 852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6727548995
|
from django.utils import timezone
from .models import Post, IP
from django.shortcuts import render, get_object_or_404, redirect
from .forms import PostForm, Login
from django.contrib.auth.decorators import login_required, PermissionDenied, user_passes_test
import json
from datetime import timedelta
from django.utils.timezone import now
from django.http import JsonResponse
from django.core import serializers
from django.http import HttpResponse
from django.db.models import Q
from django.contrib import messages
from flask import request
from django.contrib.auth.models import User
def get_ip(request):
try:
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(",")[0]
else:
ip = request.META.get("REMOTE_ADDR")
except:
ip = ""
return ip
@login_required
def post_list(request):
last_ip = IP.objects.filter(User=request.user).latest('entr_date')
form = Login(request.POST)
if form.is_valid():
new_ip = form.save(commit=False)
new_ip.User = request.user
new_ip.entr_date = timezone.now()
new_ip.ip_address = get_ip(request)
this_ip = IP.objects.filter(User=request.user).latest('entr_date')
if this_ip != last_ip:
messages.warning(request, 'Indirizzo ip diverso dal precedente.')
new_ip.save()
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
@login_required
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
@login_required
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.writeOnChain()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
@login_required
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
def superuser_only(function):
def _inner(request):
if not request.user.is_superuser:
raise PermissionDenied
return function(request)
return _inner
@login_required
def delete_post(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
post.delete()
return redirect('post_list')
return render(request, 'blog/delete.html', {'post': post})
@superuser_only
def info_superuser(request):
n = {}
users_id = User.objects.all().values_list('id', flat=True)
for x in users_id:
posts = Post.objects.filter(author=x)
n[x] = len(posts)
return render(request, 'blog/info_superuser.html', {'n': n})
@superuser_only
def last_hour_post(request):
dt = now()
PostsLastHour = Post.objects.filter(published_date__range=(dt-timedelta(hours=1), dt))
post_1h = serializers.serialize('json',PostsLastHour)
return HttpResponse(post_1h, content_type="text/json-comment-filtered")
@superuser_only
def search_str(request):
template = 'blog/info_superuser.html'
query = request.GET.get('q')
results_title = Post.objects.filter(Q(title__icontains=query))
results_text = Post.objects.filter(Q(text__icontains=query))
n = len(results_text) + len(results_title)
return HttpResponse(n)
|
Dado-pixel/my-second-blog
|
blog/views.py
|
views.py
|
py
| 4,046 |
python
|
en
|
code
| 1 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.