seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
916481756
|
# find the value that a given monkey will scream:
def solve_monkey(monkey, solved_monkeys, unsolved_monkeys):
if monkey in solved_monkeys:
return solved_monkeys[monkey]
monkey_math = unsolved_monkeys[monkey].split(' ')
left = solve_monkey(monkey_math[0], solved_monkeys, unsolved_monkeys)
right = solve_monkey(monkey_math[2], solved_monkeys, unsolved_monkeys)
solution = int(eval(str(left) + monkey_math[1] + str(right)))
solved_monkeys[monkey] = solution
del unsolved_monkeys[monkey]
return solution
# find all the monkeys in between two monkeys
def path_to_monkey(curr_monkey, target_monkey, solved_monkeys, unsolved_monkeys):
if curr_monkey == target_monkey:
return [target_monkey]
if curr_monkey in solved_monkeys:
return None
monkey_math = unsolved_monkeys[curr_monkey].split(' ')
left = path_to_monkey(monkey_math[0], target_monkey, solved_monkeys, unsolved_monkeys)
right = path_to_monkey(monkey_math[2], target_monkey, solved_monkeys, unsolved_monkeys)
if left is not None:
return [curr_monkey] + left
elif right is not None:
return [curr_monkey] + right
else:
return None
# Find the needed call of a variable monkey given that the inputted calls to the equals_monkey
# must be equal.
def solve_variable_monkey(equals_monkey, variable_monkey, solved_monkeys, unsolved_monkeys):
path = path_to_monkey(equals_monkey, variable_monkey, solved_monkeys, unsolved_monkeys)
sub_monkeys = unsolved_monkeys[equals_monkey].split(' ')
invariate_monkey = sub_monkeys[0] if sub_monkeys[0] != path[1] else sub_monkeys[2]
solve_monkey(equals_monkey, solved_monkeys, dict(unsolved_monkeys))
val = solved_monkeys[invariate_monkey]
ops = {
'/0': lambda val, n: val * n,
'/2': lambda val, n: n / val,
'*0': lambda val, n: val / n,
'*2': lambda val, n: val / n,
'-0': lambda val, n: val + n,
'-2': lambda val, n: -1 * (val - n),
'+0': lambda val, n: val - n,
'+2': lambda val, n: val - n,
}
# The call of each monkey between the equals_monkey and the variable_monkey will be dependent on one sub_monkey
# whose call is invariate (not determined by anything that we do) and another monkey whose call we
# do control (with our choice of call for the variable_monkey). All monkeys whose call we do control will exist
# in the path between the equals_monkey and the variable_monkey. We can find the call of the variable_monkey by
# performing algebra to calculate the needed call of each sub_monkey whose call we do control (along the path
# between the equals_monkey and the variable monkey).
for i in range(1, len(path) - 1):
sub_monkeys = unsolved_monkeys[path[i]].split(' ')
op = sub_monkeys[1] # the operation performed on the calls of the two submonkeys
idx = sub_monkeys.index(path[i + 1]) # the index of the variate monkey in sub_monkeys
invariate_monkey = sub_monkeys[0] if sub_monkeys[0] != path[i + 1] else sub_monkeys[2]
val = ops[op + str(idx)](val, solved_monkeys[invariate_monkey]) # perform algebra
return int(val)
f = open('../inputs/day21.txt').read().splitlines()
solved_monkeys = {n[0]: int(n[1]) for n in [m.split(': ') for m in f] if n[1].isnumeric()}
unsolved_monkeys = {n[0]: n[1] for n in [m.split(': ') for m in f] if not n[1].isnumeric()}
part1 = solve_monkey('root', dict(solved_monkeys), dict(unsolved_monkeys))
part2 = solve_variable_monkey('root', 'humn', solved_monkeys, unsolved_monkeys)
print(part1)
print(part2)
|
UncatchableAlex/advent2022
|
solutions/day21.py
|
day21.py
|
py
| 3,622 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72532883709
|
import os
from pathlib import Path
import s4l_v1
import s4l_v1.analysis.viewers as viewers
import s4l_v1.document as document
import s4l_v1.model as model
import s4l_v1.simulation.emfdtd as fdtd
import s4l_v1.units as units
from dotenv import load_dotenv
from osparc_isolve_api import run_simulation
from s4l_v1._api.application import get_app_safe, run_application
from s4l_v1._api.simwrappers import ApiSimulation
from s4l_v1.model import Vec3
load_dotenv()
HOST = os.environ.get("OSPARC_API_URL", "http://127.0.0.1:8006")
KEY = os.environ["OSPARC_API_KEY"]
SECRET = os.environ["OSPARC_API_SECRET"]
def create_model():
wire = model.CreateWireBlock(
p0=Vec3(0, 0, 0), p1=Vec3(100, 100, 100), parametrized=True
)
wire.Name = "Plane Wave Source"
def create_simulation() -> ApiSimulation:
# retrieve needed entities from model
entities = model.AllEntities()
source_box = entities["Plane Wave Source"]
sim = fdtd.Simulation()
sim.Name = "Plane Wave Simulation"
sim.SetupSettings.SimulationTime = 10.0, units.Periods
# Materials:
# No materials
# Sources
planesrc_settings = sim.AddPlaneWaveSourceSettings(source_box)
options = planesrc_settings.ExcitationType.enum
planesrc_settings.ExcitationType = options.Harmonic
planesrc_settings.CenterFrequency = 1.0, units.GHz
# Sensors
# Only using overall field sensor
# Boundary Conditions
options = sim.GlobalBoundarySettings.GlobalBoundaryType.enum
sim.GlobalBoundarySettings.GlobalBoundaryType = options.UpmlCpml
# Grid
manual_grid_settings = sim.AddManualGridSettings([source_box])
manual_grid_settings.MaxStep = (9.0,) * 3 # model units
manual_grid_settings.Resolution = (2.0,) * 3 # model units
# Voxels
auto_voxel_settings = sim.AddAutomaticVoxelerSettings(source_box)
# Solver settings
options = sim.SolverSettings.Kernel.enum
# sim.SolverSettings.Kernel = options.Software
sim.SolverSettings.Kernel = options.Cuda
# FIXME: This does not work. WHY??? sim.SolverSettings.Kernel = options.AXware
return sim
def analyze_simulation(sim):
# Create extractor for a given simulation output file
results = sim.Results()
# overall field sensor
overall_field_sensor = results["Overall Field"]
# Create a slice viewer for the E field
slice_field_viewer_efield = viewers.SliceFieldViewer()
slice_field_viewer_efield.Inputs[0].Connect(overall_field_sensor["EM E(x,y,z,f0)"])
slice_field_viewer_efield.Data.Mode = (
slice_field_viewer_efield.Data.Mode.enum.QuantityRealPart
)
slice_field_viewer_efield.Data.Component = (
slice_field_viewer_efield.Data.Component.enum.Component0
)
slice_field_viewer_efield.Slice.Plane = (
slice_field_viewer_efield.Slice.Plane.enum.YZ
)
slice_field_viewer_efield.Update(0)
slice_field_viewer_efield.GotoMaxSlice()
document.AllAlgorithms.Add(slice_field_viewer_efield)
def setup_simulation(smash_path: Path) -> ApiSimulation:
s4l_v1.document.New()
create_model()
sim = create_simulation()
s4l_v1.document.AllSimulations.Add(sim)
sim.UpdateGrid()
sim.CreateVoxels(str(smash_path))
sim.WriteInputFile()
return sim
def run(smash_path: Path):
sim = setup_simulation(smash_path)
# run using specific version
# run_simulation(sim, isolve_version="2.0.79", host=HOST, api_key=KEY, api_secret=SECRET)
# run using latest version
run_simulation(sim, host=HOST, api_key=KEY, api_secret=SECRET)
analyze_simulation(sim)
def main():
if get_app_safe() is None:
run_application()
project_dir = Path()
filename = "em_fdtd_simulation.smash"
smash_path = project_dir / filename
run(smash_path)
if __name__ == "__main__":
main()
|
ITISFoundation/osparc-simcore
|
tests/public-api/examples/s4l_tutorial.py
|
s4l_tutorial.py
|
py
| 3,834 |
python
|
en
|
code
| 35 |
github-code
|
6
|
33197235075
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import glob
import pcraster as pcr
import raster_func
# set the clone map to the regional/local model
clone_map_filename = "/scratch-shared/edwinhs/colombia_model_results/head/l1_top/head_20000101_l1.idf.map"
pcr.setclone(clone_map_filename)
# start and end years
str_year = 2000
end_year = 2012
# calculate the average value/map of the local model
msg = "Calculate the average value/map of the local model!"
print(msg)
# - Using the top layer of the local model
local_model_folder = "/scratch-shared/edwinhs/colombia_model_results/head/l1_top/"
i_month = 0
cum_map = pcr.scalar(0.0)
for year in (str_year, end_year + 1, 1):
for month in (1, 12 + 1, 1):
i_month = i_month + 1
file_name = local_model_folder + "/head_%04i%02i" + "01_l1.idf.map" %(year, month)
print(file_name)
cum_map = cum_map + pcr.readmap(local_model_folder + "")
average_local = cum_map / i_month
pcr.aguila(average_local)
#~ # calculate the average value/map of the global model
#~ # - Using the upper layer of the global model
#~ global_model_folder = "/scratch-shared/edwinhs/modflow_results_in_pcraster/upper_layer/regional/"
#~ i_month = 0
#~ for year in (str_year, end_year + 1, 1):
#~ for month in (1, 12 + 1, 1):
#~ average_global =
#~
#~ # calculate the anomaly value of the local model
#~ anomaly_local = {}
#~
#~
#~ # calculate the anomaly value of the global model
#~ anomaly_global = {}
#~
#~
#~ # calculate the climatology values of the local model's anomaly value
#~ climatology_anomaly_local =
#~ for month in (1, 12 + 1, 1):
#~
#~
#~ # calculate the climatology values of the global model's anomaly value
#~ climatology_anomaly_global =
#~ for month in (1, 12 + 1, 1):
#~
#~
#~ # evaluating climatology maps (e.g. calculate the differences in climatology maps)
#~ difference = map_1 - map_2
#~ for month in (1, 12 + 1, 1):
#~
|
edwinkost/groundwater_model_comparison
|
comparing_and_evaluating_maps/etc/compare_and_evaluate_not-finished.py
|
compare_and_evaluate_not-finished.py
|
py
| 1,949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16813784584
|
import os
import copy
from typing import Dict
import numpy as np
import torch
from collections import defaultdict
from hsp.algorithms.population.policy_pool import PolicyPool
from hsp.runner.shared.base_runner import make_trainer_policy_cls
from hsp.utils.shared_buffer import SharedReplayBuffer
from hsp.algorithms.population.utils import _t2n
class TrainerPool:
"""TrainerPool maintains a pool of trainers, each trainer corresponding to one policy, both have the same name.
For policies that are not trained, use null trainer.
By specifying mapping from (env_id, agent_id) to trainer_name, TrainerPool creates buffer for each policy.
"""
def __init__(self, args, policy_pool: PolicyPool, device = torch.device("cpu")):
self.all_args = args
self.device = device
self.policy_pool = policy_pool
self.trainer_pool = {}
self.trainer_total_num_steps = defaultdict(int)
self.use_policy_in_env = dict(args._get_kwargs()).get('use_policy_in_env', False)
self.__loaded_population = False
self.__initialized = False
def policy_config(self, trainer_name):
return self.policy_pool.policy_config[trainer_name]
def policy_type(self, trainer_name):
if trainer_name.startswith("ppo") and trainer_name[-1] in "123":
return eval(trainer_name[-1])
elif trainer_name.startswith("policy"):
# preference policy
return 4
else:
raise RuntimeError(f"Cannot recognize policy type for {trainer_name}.")
def policy_id(self, trainer_name):
return int(self.policy_pool.policy_info[trainer_name][1]["id"] * self.policy_pool.num_policies - 1)
def init_population(self):
self.on_training = []
self.best_r = defaultdict(float)
for policy_name, policy, policy_config, policy_train in self.policy_pool.all_policies():
# use the same name for trainer and policy
trainer_name = policy_name
trainer_cls, _ = make_trainer_policy_cls(policy_config[0].algorithm_name, use_single_network=policy_config[0].use_single_network)
trainer = trainer_cls(policy_config[0], policy, device = self.device)
self.trainer_pool[trainer_name] = trainer
self.best_r[trainer_name] = -1e9
if policy_train:
self.on_training.append(trainer_name)
# trans policies in policy pool to EvalPolicy
self.policy_pool.trans_to_eval()
# train info would update when a trainer performs training
self.train_infos = {}
self.train_infos.update({f"{trainer_name}-total_num_steps":0 for trainer_name in self.trainer_pool.keys()})
self.__loaded_population = True
def reset(self, map_ea2t, n_rollout_threads, num_agents, load_unused_to_cpu=False, **kwargs):
assert self.__loaded_population
self.map_ea2t = map_ea2t
self.n_rollout_threads = n_rollout_threads
self.num_agents = num_agents
self.control_agent_count = defaultdict(int)
self.control_agents = defaultdict(list)
for (e, a), trainer_name in self.map_ea2t.items():
self.control_agent_count[trainer_name] += 1
self.control_agents[trainer_name].append((e, a))
self.active_trainers = []
self.buffer_pool: Dict[str, SharedReplayBuffer] = {}
for trainer_name in self.trainer_pool.keys():
# set n_rollout_threads as control_agent_count[trainer_name] and num_agents as 1
if self.control_agent_count[trainer_name] > 0:
policy_args, obs_space, share_obs_space, act_space = self.policy_config(trainer_name)
self.buffer_pool[trainer_name] = SharedReplayBuffer(
policy_args, 1, obs_space, share_obs_space, act_space,
n_rollout_threads=self.control_agent_count[trainer_name])
self.trainer_pool[trainer_name].to(self.device)
self.active_trainers.append(trainer_name)
else:
if load_unused_to_cpu:
self.trainer_pool[trainer_name].to(torch.device("cpu"))
else:
self.trainer_pool[trainer_name].to(self.device)
self.buffer_pool[trainer_name] = None
#print("active trainers:", self.active_trainers)
self.__initialized = True
def extract_elements(self, trainer_name, x):
return np.stack([x[e][a] for e, a in self.control_agents[trainer_name]])
def skip(self, trainer_name):
# produce actions in parallel envs, skip this trainer
return (self.use_policy_in_env and trainer_name not in self.on_training) or (trainer_name.startswith("script:"))
def init_first_step(self, share_obs:np.ndarray, obs:np.ndarray):
assert self.__initialized
for trainer_name in self.active_trainers:
# extract corresponding (e, a) and add num_agent=1 dimension
obs_lst = np.expand_dims(self.extract_elements(trainer_name, obs), axis=1)
share_obs_lst = np.expand_dims(self.extract_elements(trainer_name, share_obs), axis=1)
self.buffer_pool[trainer_name].share_obs[0] = share_obs_lst.copy()
self.buffer_pool[trainer_name].obs[0] = obs_lst.copy()
self._step = 0
def reward_shaping_steps(self):
"""This should differ among algorithms and should be overrided by subclasses.
"""
reward_shaping_steps = []
for e in range(self.n_rollout_threads):
train_tot_num_steps = [self.trainer_total_num_steps[self.map_ea2t[(e, a)]] * int(self.map_ea2t[(e, a)] in self.on_training) for a in range(self.num_agents)]
reward_shaping_steps.append(max(train_tot_num_steps))
return reward_shaping_steps
@torch.no_grad()
def step(self, step):
assert self.__initialized
actions = np.full((self.n_rollout_threads, self.num_agents), fill_value=None).tolist()
self.step_data = dict()
for trainer_name in self.active_trainers:
self.trainer_total_num_steps[trainer_name] += self.control_agent_count[trainer_name]
self.train_infos[f"{trainer_name}-total_num_steps"] = self.trainer_total_num_steps[trainer_name]
if self.skip(trainer_name):
continue
trainer = self.trainer_pool[trainer_name]
buffer = self.buffer_pool[trainer_name]
trainer.prep_rollout()
value, action, action_log_prob, rnn_states, rnn_states_critic \
= trainer.policy.get_actions(np.concatenate(buffer.share_obs[step]),
np.concatenate(buffer.obs[step]),
np.concatenate(buffer.rnn_states[step]),
np.concatenate(buffer.rnn_states_critic[step]),
np.concatenate(buffer.masks[step]))
value = np.expand_dims(np.array(_t2n(value)), axis=1)
action = np.expand_dims(np.array(_t2n(action)), axis=1)
action_log_prob = np.expand_dims(np.array(_t2n(action_log_prob)), axis=1)
rnn_states = np.expand_dims(np.array(_t2n(rnn_states)), axis=1)
rnn_states_critic = np.expand_dims(np.array(_t2n(rnn_states_critic)), axis=1)
self.step_data[trainer_name] = value, action, action_log_prob, rnn_states, rnn_states_critic
for i, (e, a) in enumerate(self.control_agents[trainer_name]):
actions[e][a] = action[i][0]
return actions
def insert_data(self, share_obs, obs, rewards, dones, active_masks=None, bad_masks=None, infos=None):
"""
ndarrays of shape (n_rollout_threads, num_agents, *)
"""
assert self.__initialized
self._step += 1
for trainer_name in self.active_trainers:
if self.skip(trainer_name):
continue
trainer = self.trainer_pool[trainer_name]
buffer = self.buffer_pool[trainer_name]
value, action, action_log_prob, rnn_states, rnn_states_critic = self.step_data[trainer_name]
# (control_agent_count[trainer_name], 1, *)
obs_lst = np.expand_dims(self.extract_elements(trainer_name, obs), axis=1)
share_obs_lst = np.expand_dims(self.extract_elements(trainer_name, share_obs), axis=1)
rewards_lst = np.expand_dims(self.extract_elements(trainer_name, rewards), axis=1)
dones_lst = np.expand_dims(self.extract_elements(trainer_name, dones), axis=1)
rnn_states[dones_lst == True] = np.zeros(((dones_lst == True).sum(), buffer.recurrent_N, buffer.hidden_size), dtype=np.float32)
rnn_states_critic[dones_lst == True] = np.zeros(((dones_lst == True).sum(), *buffer.rnn_states_critic.shape[3:]), dtype=np.float32)
masks = np.ones((self.control_agent_count[trainer_name], 1, 1), dtype=np.float32)
masks[dones_lst == True] = np.zeros(((dones_lst == True).sum(), 1), dtype=np.float32)
bad_masks_lst = active_masks_lst = None
if bad_masks is not None:
bad_masks_lst = np.expand_dims(self.extract_elements(trainer_name, bad_masks), axis=1)
if active_masks is not None:
active_masks_lst = np.expand_dims(self.extract_elements(trainer_name, active_masks), axis=1)
if self.all_args.use_task_v_out:
value = value[:, :, self.policy_id(trainer_name)][:, :, np.newaxis]
buffer.insert(share_obs_lst, obs_lst, rnn_states, rnn_states_critic, action, action_log_prob, value, rewards_lst, masks, active_masks=active_masks_lst, bad_masks=bad_masks_lst)
if infos is not None:
if self.all_args.env_name == "Overcooked" and self.all_args.predict_other_shaped_info:
if not hasattr(buffer, "other_shaped_info"):
buffer.other_shaped_info = np.zeros((buffer.episode_length + 1, buffer.n_rollout_threads, 1, 12), dtype=np.int32)
for i, (e, a) in enumerate(self.control_agents[trainer_name]):
buffer.other_shaped_info[self._step, i, 0] = infos[e]["vec_shaped_info_by_agent"][1-a] # insert other agent's shaped info
# partner policy info
if self.all_args.env_name == "Overcooked":
if self.all_args.policy_group_normalization and not hasattr(buffer, "other_policy_type"):
buffer.other_policy_type = np.zeros((buffer.episode_length + 1, buffer.n_rollout_threads, 1, 1), dtype=np.int32)
for i, (e, a) in enumerate(self.control_agents[trainer_name]):
buffer.other_policy_type[:, i, :, :] = self.policy_type(self.map_ea2t[(e, 1-a)])
if not hasattr(buffer, "other_policy_id"):
buffer.other_policy_id = np.zeros((buffer.episode_length + 1, buffer.n_rollout_threads, 1, 1), dtype=np.int32)
for i, (e, a) in enumerate(self.control_agents[trainer_name]):
buffer.other_policy_id[:, i, :, :] = self.policy_id(self.map_ea2t[(e, 1-a)])
self.step_data = None
def compute_advantages(self):
all_adv = defaultdict(list)
for trainer_name in self.active_trainers:
trainer = self.trainer_pool[trainer_name]
buffer = self.buffer_pool[trainer_name]
if trainer_name in self.on_training:
advantages = trainer.compute_advantages(buffer)
for i, (e, a) in enumerate(self.control_agents[trainer_name]):
all_adv[(self.map_ea2t[(e, 0)], self.map_ea2t[(e, 1)], a)].append(advantages[:, i].mean())
return all_adv
def train(self, **kwargs):
assert self.__initialized
for trainer_name in self.active_trainers:
trainer = self.trainer_pool[trainer_name]
buffer = self.buffer_pool[trainer_name]
if trainer_name in self.on_training:
trainer.prep_rollout()
# compute returns
next_values = trainer.policy.get_values(np.concatenate(buffer.share_obs[-1]),
np.concatenate(buffer.rnn_states_critic[-1]),
np.concatenate(buffer.masks[-1]))
next_values = np.expand_dims(np.array(_t2n(next_values)), axis=1)
if self.all_args.use_task_v_out:
next_values = next_values[:, :, self.policy_id(trainer_name)][:, :, np.newaxis]
buffer.compute_returns(next_values, trainer.value_normalizer)
# train
trainer.prep_training()
train_info = trainer.train(buffer, turn_on=(self.trainer_total_num_steps[trainer_name] >= self.all_args.critic_warmup_horizon))
self.train_infos.update({f"{trainer_name}-{k}": v for k, v in train_info.items()})
self.train_infos.update({f"{trainer_name}-average_episode_rewards": np.mean(buffer.rewards) * buffer.episode_length})
# place first step observation of next episode
buffer.after_update()
return copy.deepcopy(self.train_infos)
def lr_decay(self, episode, episodes):
for trainer_name in self.on_training:
self.trainer_pool[trainer_name].policy.lr_decay(episode, episodes)
def update_best_r(self, d, save_dir=None):
for trainer_name, r in d.items():
trainer = self.trainer_pool[trainer_name]
if r > self.best_r[trainer_name]:
self.best_r[trainer_name] = r
if trainer_name in self.on_training and save_dir is not None:
if not os.path.exists(str(save_dir) + "/{}".format(trainer_name)):
os.makedirs(str(save_dir) + "/{}".format(trainer_name))
#print("save", str(save_dir) + "/{}".format(trainer_name), f"best_r")
if self.policy_config(trainer_name)[0].use_single_network:
policy_model = trainer.policy.model
torch.save(policy_model.state_dict(), str(save_dir) + "/{}/model_best_r.pt".format(trainer_name))
else:
policy_actor = trainer.policy.actor
torch.save(policy_actor.state_dict(), str(save_dir) + "/{}/actor_best_r.pt".format(trainer_name))
policy_critic = trainer.policy.critic
torch.save(policy_critic.state_dict(), str(save_dir) + "/{}/critic_best_r.pt".format(trainer_name))
def save(self, step, save_dir):
for trainer_name in self.on_training:
trainer = self.trainer_pool[trainer_name]
if not os.path.exists(str(save_dir) + "/{}".format(trainer_name)):
os.makedirs(str(save_dir) + "/{}".format(trainer_name))
trainer_step = self.trainer_total_num_steps[trainer_name]
#print("save", str(save_dir) + "/{}".format(trainer_name), f"periodic_{trainer_step}")
if self.policy_config(trainer_name)[0].use_single_network:
policy_model = trainer.policy.model
torch.save(policy_model.state_dict(), str(save_dir) + "/{}/model_periodic_{}.pt".format(trainer_name, trainer_step))
else:
policy_actor = trainer.policy.actor
torch.save(policy_actor.state_dict(), str(save_dir) + "/{}/actor_periodic_{}.pt".format(trainer_name, trainer_step))
policy_critic = trainer.policy.critic
torch.save(policy_critic.state_dict(), str(save_dir) + "/{}/critic_periodic_{}.pt".format(trainer_name, trainer_step))
|
samjia2000/HSP
|
hsp/algorithms/population/trainer_pool.py
|
trainer_pool.py
|
py
| 15,925 |
python
|
en
|
code
| 15 |
github-code
|
6
|
38744507299
|
import vidcap
import pygame
from baselines.ppo1.mlp_policy import MlpPolicy
from baselines.trpo_mpi import trpo_mpi
import gym
import monkeywars
import wrappers
import numpy as np
import time
import tensorflow as tf
from baselines import deepq
def main():
env = monkeywars.Monkeywars(graphic_mode=True)
wenv = wrappers.ShooterAgentWrapper(env)
#wenv = gym.wrappers.TimeLimit(wenv, max_episode_steps=400)
act = deepq.load("monkeywars_model.pkl")
while True:
obs, done = wenv.reset(), False
wenv.render()
obs = np.array(obs)
episode_rew = 0
it=0
#pygame.image.save(env.screen, 'tmp/image{:03d}.bmp'.format(it))
while it < 500:
wenv.render()
obs, rew, done, _ = wenv.step(act(obs.reshape(1,-1))[0])
obs = np.array(obs)
episode_rew += rew
time.sleep(0.05)
it+=1
#pygame.image.save(env.screen, 'tmp/image{:03d}.bmp'.format(it))
print("Episode reward", episode_rew)
break
def enjoy_trpo():
env = monkeywars.Monkeywars(graphic_mode=True)
wenv = wrappers.ShooterAgentWrapper(env)
ob_space = wenv.observation_space
ac_space = wenv.action_space
pi = MlpPolicy(name='pi', ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)
oldpi = MlpPolicy(name='oldpi', ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver.restore(sess, "./mymodel")
stochastic = tf.Variable(False, dtype=tf.bool)
while True:
obs, done = wenv.reset(), False
wenv.render()
obs = np.array(obs)
episode_rew = 0
it=0
#pygame.image.save(env.screen, 'tmp/image{:03d}.bmp'.format(it))
while it < 500:
wenv.render()
obs, rew, done, _ = wenv.step(pi.act(stochastic, obs)[0])
obs = np.array(obs)
episode_rew += rew
time.sleep(0.05)
it+=1
#pygame.image.save(env.screen, 'tmp/image{:03d}.bmp'.format(it))
print("Episode reward", episode_rew)
break
if __name__ == '__main__':
#main()
enjoy_trpo()
|
battuzz/RL_lab
|
monkeywars/enjoy_monkeywars.py
|
enjoy_monkeywars.py
|
py
| 2,459 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34839802828
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 21 01:32:51 2021
@author: vidhy
"""
from fastapi import FastAPI, File, UploadFile, Request
import uvicorn
import numpy as np
from io import BytesIO
from PIL import Image
import requests
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import RedirectResponse
app = FastAPI()
origins = [
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# uvicorn main:app --reload
# install docker
# docker pull tensorflow/serving
# cd D:/deep-learning/
# docker run -t --rm -p 8501:8501 -v D:/deep-learning/potato-disease-classification:/potato-disease-classification tensorflow/serving --rest_api_port=8501 --model_config_file=/potato-disease-classification/models.config
endpoint = "http://localhost:8501/v1/models/potatoes_model:predict"
CLASS_NAMES = ['Early Blight', 'Late Blight', 'Healthy']
@app.get("/ping")
async def ping():
return "Hello, I'm alive"
@app.get("/", include_in_schema=False)
async def index():
return RedirectResponse(url="/docs")
def read_file_as_image(data) -> np.ndarray:
# bytes = await file.read()
image = np.array(Image.open(BytesIO(data)))
return image
@app.post("/predict")
async def predict(
file: UploadFile = File(...)
):
image = read_file_as_image(await file.read())
img_batch = np.expand_dims(image, 0)
json_data = {
"instances": img_batch.tolist()
}
response = requests.post(endpoint, json=json_data)
prediction = response.json()["predictions"][0]
predicted_class = CLASS_NAMES[np.argmax(prediction)]
confidence = np.max(prediction)
return {
'class': predicted_class,
'confidence': float(confidence)
}
if __name__ == "__main__":
uvicorn.run(app, host='localhost', port=8000)
|
VidhyaGupta/Potato-Disease-Classification
|
api/main-tf-serving.py
|
main-tf-serving.py
|
py
| 1,938 |
python
|
en
|
code
| 0 |
github-code
|
6
|
45135866644
|
# code by : dev patel
# https://www.github.com/dev22419/
i = 0
while i <= 5:
x = input("enter a letter : ")
x.lower()
if x in ["a","e","i","o","u"]:
print("it is an vowel . ")
i = 6
else :
print("you entered an constant . ")
|
dev22419/msu
|
pro/python/a7/24.py
|
24.py
|
py
| 266 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29546474630
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def _new_id(self):
# get an id from auto-increment
self.cur_id += 1
return self.cur_id
def _get_tree_id(self, n):
"""Check if a tree with head n has been visited before
by checking the signature of the tree
If the tree is not visited,
take a record of the tree, give a new id to the tree,
and return the new id
If the tree has been visited before,
add the signature of the tree to self.res
return the id in record
A signature of a tree with head n:
if n is not None,
(_get_tree_id(n.left), _get_tree_id(n.right), n.val)
if n is None,
(None, None, None)
n: the head of the input tree
return the id of tree with head n
"""
encoding = (None, None, None)
if n:
l_id = self._get_tree_id(n.left)
r_id = self._get_tree_id(n.right)
encoding = (l_id, r_id, n.val)
if encoding in self.tree_signature_map:
if n:
self.res.add(encoding)
else:
self.tree_signature_map[encoding] = (self._new_id(), n)
return self.tree_signature_map[encoding][0]
def findDuplicateSubtrees(self, root):
"""Find dup subtree by encoding of the subtree
an encoding of a subtree with node n as root is a triple (, n.val)
"""
self.cur_id = 0
self.tree_signature_map = {} # (id_l, id_r, self.val) -> (id, node reference)
self.res = set()
self._get_tree_id(root)
return [self.tree_signature_map[signature][1] for signature in self.res]
|
HeliWang/upstream
|
Tree/BST/find-dup-subtree.py
|
find-dup-subtree.py
|
py
| 1,870 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8631033228
|
import tkinter
DEFAULT_FONT = ('Helvetica', 14)
###################################################
############GAME OPTIONS CLASS#####################
###################################################
class GameOptions:
def __init__(self):
'''
initialises GameOptions
'''
self._dialog_window = tkinter.Tk()
title_label = tkinter.Label(
master = self._dialog_window, text = 'OTHELLO',
font = DEFAULT_FONT)
title_label.grid(
row = 0, column = 0, columnspan = 2, padx = 10, pady = 10,
sticky = tkinter.N)
row_label = tkinter.Label(
master = self._dialog_window, text = 'Number Of Rows:',
font = DEFAULT_FONT)
row_label.grid(
row = 1, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
self._row_entry = tkinter.Entry(
master = self._dialog_window, width = 20, font = DEFAULT_FONT)
self._row_entry.grid(
row = 1, column = 1, padx = 10, pady = 1,
sticky = tkinter.W + tkinter.E)
col_label = tkinter.Label(
master = self._dialog_window, text = 'Number Of Columns:',
font = DEFAULT_FONT)
col_label.grid(
row = 2, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
self._col_entry = tkinter.Entry(
master = self._dialog_window, width = 20, font = DEFAULT_FONT)
self._col_entry.grid(
row = 2, column = 1, padx = 10, pady = 1,
sticky = tkinter.W + tkinter.E)
player1_label = tkinter.Label(
master = self._dialog_window, text = 'Who Goes First?(B/W):',
font = DEFAULT_FONT)
player1_label.grid(
row = 3, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
self._player1_entry = tkinter.Entry(
master = self._dialog_window, width = 20, font = DEFAULT_FONT)
self._player1_entry.grid(
row = 3, column = 1, padx = 10, pady = 1,
sticky = tkinter.W + tkinter.E)
topleft_label = tkinter.Label(
master = self._dialog_window, text = 'Top Left Player?(B/W):',
font = DEFAULT_FONT)
topleft_label.grid(
row = 4, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
self._topleft_entry = tkinter.Entry(
master = self._dialog_window, width = 20, font = DEFAULT_FONT)
self._topleft_entry.grid(
row = 4, column = 1, padx = 10, pady = 1,
sticky = tkinter.W + tkinter.E)
moreless_label = tkinter.Label(
master = self._dialog_window, text = 'More Wins Or Less Wins?(>/<):',
font = DEFAULT_FONT)
moreless_label.grid(
row = 5, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
self._moreless_entry = tkinter.Entry(
master = self._dialog_window, width = 20, font = DEFAULT_FONT)
self._moreless_entry.grid(
row = 5, column = 1, padx = 10, pady = 1,
sticky = tkinter.W + tkinter.E)
button_frame = tkinter.Frame(master = self._dialog_window)
button_frame.grid(
row = 6, column = 0, columnspan = 2, padx = 10, pady = 10,
sticky = tkinter.E + tkinter.S)
ok_button = tkinter.Button(
master = button_frame, text = 'OK', font = DEFAULT_FONT,
command = self._on_ok_button)
ok_button.grid(row = 0, column = 0, padx = 10, pady = 10)
cancel_button = tkinter.Button(
master = button_frame, text = 'Cancel', font = DEFAULT_FONT,
command = self._on_cancel_button)
cancel_button.grid(row = 0, column = 1, padx = 10, pady = 10)
self._dialog_window.rowconfigure(6, weight = 1)
self._dialog_window.columnconfigure(1, weight = 1)
self._ok_clicked = False
self._row = ''
self._col = ''
self._topleft = ''
self._player1 = ''
self._moreless = ''
self.show()
###################################################
##############PUBLIC METHODS#######################
###################################################
def show(self):
'''
shows the window
'''
self._dialog_window.grab_set()
self._dialog_window.wait_window()
def was_ok_clicked(self):
'''
returns true if the ok button was clicked
'''
return self._ok_clicked
def get_row(self):
'''
returns the row that was inputed by the user
'''
return self._row
def get_col(self):
'''
returns the collumn that was inputed by the user
'''
return self._col
def get_player1(self):
'''
returns the starting player that was inputed by the user
'''
return self._player1
def get_topleft(self):
'''
returns the top left player that was inputed by the user
'''
return self._topleft
def get_moreless(self):
'''
returns the winning rule that was inputed by the user
'''
return self._moreless
###################################################
##############PRIVATE METHODS######################
###################################################
def _on_ok_button(self):
'''
carries out action if ok button was clicked
'''
self._ok_clicked = True
self._row = self._row_entry.get()
self._col = self._col_entry.get()
self._player1 = self._player1_entry.get()
self._topleft = self._topleft_entry.get()
self._moreless = self._moreless_entry.get()
self._dialog_window.destroy()
def _on_cancel_button(self):
'''
carries out action if cancel button was clicked
'''
self._dialog_window.destroy()
|
bsmorton/Othello
|
othello_prompt.py
|
othello_prompt.py
|
py
| 6,226 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11782658861
|
# -*- coding: utf-8 -*-
from random import randint
class StaticAnswers:
"""
collection of callable static/ semi-static strings
"""
def __init__(self, nick=""):
self.nickname = nick
self.helpfile = {
'help': '!help -- display this text',
'version': '!version domain.tld -- receive XMPP server version',
'uptime': '!uptime domain.tld -- receive XMPP server uptime',
'contact': '!contact domain.tld -- receive XMPP server contact address info',
'xep': '!xep XEP Number -- recieve information about the specified XEP'
}
self.possible_answers = {
'1': 'I heard that, %s.',
'2': 'I am sorry for that %s.',
'3': '%s did you try turning it off and on again?'
}
self.error_messages = {
'1': 'not reachable',
'2': 'not a valid target'
}
self.keywords = {
"keywords": ["!help", "!uptime", "!version", "!contact", "!xep"],
"domain_keywords": ["!uptime", "!version", "!contact"],
"no_arg_keywords": ["!help"],
"number_keywords": ["!xep"]
}
def keys(self, key=""):
# if specific keyword in referenced return that
if key in self.keywords.keys():
return self.keywords[key]
# in any other case return the whole dict
return self.keywords["keywords"]
def gen_help(self):
helpdoc = "\n".join(['%s' % value for (_, value) in self.helpfile.items()])
return helpdoc
def gen_answer(self):
possible_answers = self.possible_answers
return possible_answers[str(randint(1, possible_answers.__len__()))] % self.nickname
def error(self,code):
try:
text = self.error_messages[str(code)]
except KeyError:
return 'unknown error'
return text
|
mightyBroccoli/xmpp-chatbot
|
common/strings.py
|
strings.py
|
py
| 1,618 |
python
|
en
|
code
| 7 |
github-code
|
6
|
2026763119
|
import hashlib
import os
import shutil
import zipfile
import numpy as np
def extract_aab(aab_file, extract_dir):
"""
解压aab文件到指定目录
:param aab_file: aab文件路径
:param extract_dir: 解压目录
"""
with zipfile.ZipFile(aab_file, 'r') as z:
print(extract_dir)
z.extractall(extract_dir)
def get_aab_feature(aab_dir):
"""
提取aab文件的特征
:param aab_dir: aab文件解压后的目录
:return: 特征向量
"""
feature = []
print(aab_dir)
for root, dirs, files in os.walk(aab_dir):
for file in files:
file_path = os.path.join(root, file)
with open(file_path, 'rb') as f:
# 读取文件的前16个字节作为特征
feature.append(f.read(16))
if feature:
feature = np.vstack(feature)
print(feature)
else:
feature = np.zeros((0,), dtype=np.uint8)
return feature
def compare_aab_features(feature1, feature2):
"""
比较两个aab文件的特征
:param feature1: 第一个aab文件的特征向量
:param feature2: 第二个aab文件的特征向量
:return: 相似度分数,范围在0到1之间
"""
# 计算两个特征向量的哈希值
hash1 = hash(feature1.tobytes())
hash2 = hash(feature2.tobytes())
print("hash1: ", hash1)
print("hash2: ", hash2)
# 比较两个哈希值的汉明距离,返回相似度分数
hamming_distance = bin(hash1 ^ hash2).count('1')
print("hamming distance: ", hamming_distance)
similarity = 1 - hamming_distance / max(feature1.size * 8, feature2.size * 8)
return similarity
def compare_aab_files(aab_file_path1, aab_file_path2):
"""
比较两个aab文件的相似度
:param aab_file_path1: 第一个aab文件路径
:param aab_file_path2: 第二个aab文件路径
:return: 相似度分数,范围在0到1之间
"""
try:
# 解压第一个aab文件到临时目录
aab_dir1 = 'tmp1'
extract_aab(aab_file_path1, aab_dir1)
feature1 = get_aab_feature(aab_dir1)
shutil.rmtree(aab_dir1)
# 解压第二个aab文件到临时目录
aab_dir2 = 'tmp2'
extract_aab(aab_file_path2, aab_dir2)
feature2 = get_aab_feature(aab_dir2)
shutil.rmtree(aab_dir2)
# 比较两个aab文件的特征
similarity = compare_aab_features(feature1, feature2)
return similarity
except (IOError, zipfile.BadZipFile, KeyError) as e:
# 处理文件读写异常、文件格式错误等异常情况
print(f"Error: {str(e)}")
if __name__ == '__main__':
print(compare_aab_files("a.aab","b.aab"))
|
Nienter/mypy
|
personal/aabcom.py
|
aabcom.py
|
py
| 2,731 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
40467524976
|
def solution(e, starts):
num = [0 for _ in range(e+1)]
# end까지 개수 저장하기
for i in range(1, e+1):
for j in range(i, e+1):
idx = i * j
# j * k가 e보다 크다면 break
if idx > e:
break
# 숫자가 동일한 경우 1 증가
if i == j:
num[idx]
# 숫자가 다른 경우 2 증가
else:
num[idx] = num[idx] + 2
result = [0] * (e+1)
max_value = 0
for i in range(e, 0, -1):
if num[i] >= max_value:
max_value = num[i]
result[i] = i
else:
result[i] = result[i + 1]
return [result[start] for start in starts]
|
Cho-El/coding-test-practice
|
프로그래머스 문제/파이썬/연습문제/억억단을 외우자.py
|
억억단을 외우자.py
|
py
| 739 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
9424927662
|
APPNAME = "Aislinn"
VERSION = "0.0.1"
from waflib import Utils
BIN_FILES = [
"bin/aislinn",
"bin/aislinn-cc",
"bin/aislinn-c++",
"bin/mpicc",
"bin/mpicxx"
]
def options(ctx):
ctx.load("compiler_cc python")
def configure(ctx):
ctx.load("compiler_cc python")
if not ctx.env.CFLAGS:
ctx.env.append_value("CFLAGS", "-O2")
ctx.env.append_value("CFLAGS", "-g")
ctx.env.append_value("CFLAGS", "-Wall")
def build(ctx):
ctx.recurse("src/libs/aislinn")
ctx.recurse("src/libs/mpi")
ctx.recurse("src/aislinn")
ctx.install_files("${PREFIX}/bin", BIN_FILES, chmod=Utils.O755)
|
spirali/aislinn
|
wscript
|
wscript
| 638 |
python
|
en
|
code
| 10 |
github-code
|
6
|
|
18680639270
|
import collections
import torchvision.transforms as transforms
import os
import json
try:
from IPython import embed
except:
pass
_DATASETS = {}
Dataset = collections.namedtuple(
'Dataset', ['trainset', 'testset'])
def _add_dataset(dataset_fn):
_DATASETS[dataset_fn.__name__] = dataset_fn
return dataset_fn
def _get_transforms(augment=True, normalize=None):
if normalize is None:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
basic_transform = [transforms.ToTensor(), normalize]
transform_train = []
if augment:
transform_train += [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
]
else:
transform_train += [
transforms.Resize(256),
transforms.CenterCrop(224),
]
transform_train += basic_transform
transform_train = transforms.Compose(transform_train)
transform_test = [
transforms.Resize(256),
transforms.CenterCrop(224),
]
transform_test += basic_transform
transform_test = transforms.Compose(transform_test)
return transform_train, transform_test
def _get_mnist_transforms(augment=True, invert=False, transpose=False):
transform = [
transforms.ToTensor(),
]
if invert:
transform += [transforms.Lambda(lambda x: 1. - x)]
if transpose:
transform += [transforms.Lambda(lambda x: x.transpose(2, 1))]
transform += [
transforms.Normalize((.5,), (.5,)),
transforms.Lambda(lambda x: x.expand(3, 32, 32))
]
transform_train = []
transform_train += [transforms.Pad(padding=2)]
if augment:
transform_train += [transforms.RandomCrop(32, padding=4)]
transform_train += transform
transform_train = transforms.Compose(transform_train)
transform_test = []
transform_test += [transforms.Pad(padding=2)]
transform_test += transform
transform_test = transforms.Compose(transform_test)
return transform_train, transform_test
def _get_cifar_transforms(augment=True):
transform = [
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
transform_train = []
if augment:
transform_train += [
transforms.Pad(padding=4, fill=(125, 123, 113)),
transforms.RandomCrop(32, padding=0),
transforms.RandomHorizontalFlip()]
transform_train += transform
transform_train = transforms.Compose(transform_train)
transform_test = []
transform_test += transform
transform_test = transforms.Compose(transform_test)
return transform_train, transform_test
def set_metadata(trainset, testset, config, dataset_name):
trainset.metadata = {
'dataset': dataset_name,
'task_id': config.task_id,
'task_name': trainset.task_name,
}
testset.metadata = {
'dataset': dataset_name,
'task_id': config.task_id,
'task_name': testset.task_name,
}
return trainset, testset
@_add_dataset
def inat2018(root, config):
from dataset.inat import iNat2018Dataset
transform_train, transform_test = _get_transforms()
trainset = iNat2018Dataset(root, split='train', transform=transform_train, task_id=config.task_id)
testset = iNat2018Dataset(root, split='val', transform=transform_test, task_id=config.task_id)
trainset, testset = set_metadata(trainset, testset, config, 'inat2018')
return trainset, testset
def load_tasks_map(tasks_map_file):
assert os.path.exists(tasks_map_file), tasks_map_file
with open(tasks_map_file, 'r') as f:
tasks_map = json.load(f)
tasks_map = {int(k): int(v) for k, v in tasks_map.items()}
return tasks_map
@_add_dataset
def cub_inat2018(root, config):
"""This meta-task is the concatenation of CUB-200 (first 25 tasks) and iNat (last 207 tasks).
- The first 10 tasks are classification of the animal species inside one of 10 orders of birds in CUB-200
(considering all orders except passeriformes).
- The next 15 tasks are classification of species inside the 15 families of the order of passerifomes
- The remaining 207 tasks are classification of the species inside each of 207 families in iNat
As noted above, for CUB-200 10 taks are classification of species inside an order, rather than inside of a family
as done in the iNat (recall order > family > species). This is done because CUB-200 has very few images
in each family of bird (expect for the families of passeriformes). Hence, we go up step in the taxonomy and
consider classification inside a orders and not families.
"""
NUM_CUB = 25
NUM_CUB_ORDERS = 10
NUM_INAT = 207
assert 0 <= config.task_id < NUM_CUB + NUM_INAT
transform_train, transform_test = _get_transforms()
if 0 <= config.task_id < NUM_CUB:
# CUB
from dataset.cub import CUBTasks, CUBDataset
tasks_map_file = os.path.join(root, 'cub/CUB_200_2011', 'final_tasks_map.json')
tasks_map = load_tasks_map(tasks_map_file)
task_id = tasks_map[config.task_id]
if config.task_id < NUM_CUB_ORDERS:
# CUB orders
train_tasks = CUBTasks(CUBDataset(root, split='train'))
trainset = train_tasks.generate(task_id=task_id,
use_species_names=True,
transform=transform_train)
test_tasks = CUBTasks(CUBDataset(root, split='test'))
testset = test_tasks.generate(task_id=task_id,
use_species_names=True,
transform=transform_test)
else:
# CUB passeriformes families
train_tasks = CUBTasks(CUBDataset(root, split='train'))
trainset = train_tasks.generate(task_id=task_id,
task='family',
taxonomy_file='passeriformes.txt',
use_species_names=True,
transform=transform_train)
test_tasks = CUBTasks(CUBDataset(root, split='test'))
testset = test_tasks.generate(task_id=task_id,
task='family',
taxonomy_file='passeriformes.txt',
use_species_names=True,
transform=transform_test)
else:
# iNat2018
from dataset.inat import iNat2018Dataset
tasks_map_file = os.path.join(root, 'inat2018', 'final_tasks_map.json')
tasks_map = load_tasks_map(tasks_map_file)
task_id = tasks_map[config.task_id - NUM_CUB]
trainset = iNat2018Dataset(root, split='train', transform=transform_train, task_id=task_id)
testset = iNat2018Dataset(root, split='val', transform=transform_test, task_id=task_id)
trainset, testset = set_metadata(trainset, testset, config, 'cub_inat2018')
return trainset, testset
@_add_dataset
def imat2018fashion(root, config):
NUM_IMAT = 228
assert 0 <= config.task_id < NUM_IMAT
from dataset.imat import iMat2018FashionDataset, iMat2018FashionTasks
transform_train, transform_test = _get_transforms()
train_tasks = iMat2018FashionTasks(iMat2018FashionDataset(root, split='train'))
trainset = train_tasks.generate(task_id=config.task_id,
transform=transform_train)
test_tasks = iMat2018FashionTasks(iMat2018FashionDataset(root, split='validation'))
testset = test_tasks.generate(task_id=config.task_id,
transform=transform_test)
trainset, testset = set_metadata(trainset, testset, config, 'imat2018fashion')
return trainset, testset
@_add_dataset
def split_mnist(root, config):
assert isinstance(config.task_id, tuple)
from dataset.mnist import MNISTDataset, SplitMNISTTask
transform_train, transform_test = _get_mnist_transforms()
train_tasks = SplitMNISTTask(MNISTDataset(root, train=True))
trainset = train_tasks.generate(classes=config.task_id, transform=transform_train)
test_tasks = SplitMNISTTask(MNISTDataset(root, train=False))
testset = test_tasks.generate(classes=config.task_id, transform=transform_test)
trainset, testset = set_metadata(trainset, testset, config, 'split_mnist')
return trainset, testset
@_add_dataset
def split_cifar(root, config):
assert 0 <= config.task_id < 11
from dataset.cifar import CIFAR10Dataset, CIFAR100Dataset, SplitCIFARTask
transform_train, transform_test = _get_cifar_transforms()
train_tasks = SplitCIFARTask(CIFAR10Dataset(root, train=True), CIFAR100Dataset(root, train=True))
trainset = train_tasks.generate(task_id=config.task_id, transform=transform_train)
test_tasks = SplitCIFARTask(CIFAR10Dataset(root, train=False), CIFAR100Dataset(root, train=False))
testset = test_tasks.generate(task_id=config.task_id, transform=transform_test)
trainset, testset = set_metadata(trainset, testset, config, 'split_cifar')
return trainset, testset
@_add_dataset
def cifar10_mnist(root, config):
from dataset.cifar import CIFAR10Dataset
from dataset.mnist import MNISTDataset
from dataset.expansion import UnionClassificationTaskExpander
transform_train, transform_test = _get_cifar_transforms()
trainset = UnionClassificationTaskExpander(merge_duplicate_images=False)(
[CIFAR10Dataset(root, train=True), MNISTDataset(root, train=True, expand=True)], transform=transform_train)
testset = UnionClassificationTaskExpander(merge_duplicate_images=False)(
[CIFAR10Dataset(root, train=False), MNISTDataset(root, train=False, expand=True)], transform=transform_test)
return trainset, testset
@_add_dataset
def cifar10(root):
from torchvision.datasets import CIFAR10
transform = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
trainset = CIFAR10(root, train=True, transform=transform, download=True)
testset = CIFAR10(root, train=False, transform=transform)
return trainset, testset
@_add_dataset
def cifar100(root):
from torchvision.datasets import CIFAR100
transform = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
trainset = CIFAR100(root, train=True, transform=transform, download=True)
testset = CIFAR100(root, train=False, transform=transform)
return trainset, testset
@_add_dataset
def mnist(root):
from torchvision.datasets import MNIST
transform = transforms.Compose([
lambda x: x.convert("RGB"),
transforms.Resize(224),
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (1., 1., 1.)),
])
trainset = MNIST(root, train=True, transform=transform, download=True)
testset = MNIST(root, train=False, transform=transform)
return trainset, testset
@_add_dataset
def letters(root):
from torchvision.datasets import EMNIST
transform = transforms.Compose([
lambda x: x.convert("RGB"),
transforms.Resize(224),
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (1., 1., 1.)),
])
trainset = EMNIST(root, train=True, split='letters', transform=transform, download=True)
testset = EMNIST(root, train=False, split='letters', transform=transform)
return trainset, testset
@_add_dataset
def kmnist(root):
from torchvision.datasets import KMNIST
transform = transforms.Compose([
lambda x: x.convert("RGB"),
transforms.Resize(224),
transforms.ToTensor(),
])
trainset = KMNIST(root, train=True, transform=transform, download=True)
testset = KMNIST(root, train=False, transform=transform)
return trainset, testset
@_add_dataset
def stl10(root):
from torchvision.datasets import STL10
transform = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
trainset = STL10(root, split='train', transform=transform, download=True)
testset = STL10(root, split='test', transform=transform)
trainset.targets = trainset.labels
testset.targets = testset.labels
return trainset, testset
def get_dataset(root, config=None):
return _DATASETS[config.name](os.path.expanduser(root), config)
|
awslabs/aws-cv-task2vec
|
datasets.py
|
datasets.py
|
py
| 12,841 |
python
|
en
|
code
| 96 |
github-code
|
6
|
1909072931
|
from pprint import pprint
from config_loader import try_load_from_file
from hpOneView.oneview_client import OneViewClient
# To run this example fill the ip and the credentials bellow or use a configuration file
config = {
"ip": "<oneview_ip>",
"credentials": {
"userName": "<oneview_administrator_name>",
"password": "<oneview_administrator_password>",
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
# To run this example you can define an logical interconnect uri (logicalInterconnectUri) and the ethernet network uri
# or the example will attempt to retrieve those automatically from the appliance.
logical_interconnect_uri = None
ethernet_network_uri = None
# Attempting to get first LI and Ethernet uri and use them for this example
if logical_interconnect_uri is None:
logical_interconnect_uri = oneview_client.logical_interconnects.get_all()[0]['uri']
if ethernet_network_uri is None:
ethernet_network_uri = oneview_client.ethernet_networks.get_all()[0]['uri']
options = {
"name": "Uplink Set Demo",
"status": "OK",
"logicalInterconnectUri": logical_interconnect_uri,
"networkUris": [
ethernet_network_uri
],
"fcNetworkUris": [],
"fcoeNetworkUris": [],
"portConfigInfos": [],
"connectionMode": "Auto",
"networkType": "Ethernet",
"manualLoginRedistributionState": "NotSupported",
}
# Create an uplink set
print("\nCreate an uplink set")
created_uplink_set = oneview_client.uplink_sets.create(options)
print("Created uplink set '{name}' successfully.\n uri = '{uri}'".format(**created_uplink_set))
# Update an uplink set
print("\nUpdate an uplink set")
created_uplink_set['name'] = 'Renamed Uplink Set Demo'
updated_uplink_set = oneview_client.uplink_sets.update(created_uplink_set)
print("Updated uplink set name to '{name}' successfully.\n uri = '{uri}'".format(**updated_uplink_set))
# Get a paginated list of uplink set resources sorting by name ascending and filtering by status
print("\nGet a list of uplink sets")
uplink_sets = oneview_client.uplink_sets.get_all(0, 15, sort='name:ascending')
for uplink_set in uplink_sets:
print(' %s' % uplink_set['name'])
# Get an uplink set resource by name
print("\nGet uplink set by name")
uplink_set = oneview_client.uplink_sets.get_by('name', 'Renamed Uplink Set Demo')[0]
print("Found uplink set at uri '{uri}'\n by name = '{name}'".format(**uplink_set))
# Add an ethernet network to the uplink set
# To run this example you must define an ethernet network uri or ID below
ethernet_network_id = None
if ethernet_network_id:
print("\nAdd an ethernet network to the uplink set")
uplink_set = oneview_client.uplink_sets.add_ethernet_networks(created_uplink_set['uri'], ethernet_network_id)
print("The uplink set with name = '{name}' have now the networkUris:\n {networkUris}".format(**uplink_set))
# Get an uplink set resource by uri
print("\nGet an uplink set by uri")
uplink_set = oneview_client.uplink_sets.get(created_uplink_set['uri'])
pprint(uplink_set)
# Remove an ethernet network from the uplink set
# To run this example you must define an ethernet network uri or ID below
ethernet_network_id = None
if ethernet_network_id:
print("\nRemove an ethernet network of the uplink set")
uplink_set = oneview_client.uplink_sets.remove_ethernet_networks(created_uplink_set['uri'], ethernet_network_id)
print("The uplink set with name = '{name}' have now the networkUris:\n {networkUris}".format(**uplink_set))
# Get the associated ethernet networks of an uplink set
print("\nGet the associated ethernet networks of the uplink set")
networks = oneview_client.uplink_sets.get_ethernet_networks(created_uplink_set['uri'])
pprint(networks)
# Delete the recently created uplink set
print("\nDelete the uplink set")
oneview_client.fc_networks.delete(updated_uplink_set)
print("Successfully deleted the uplink set")
|
HewlettPackard/python-hpOneView
|
examples/uplink_sets.py
|
uplink_sets.py
|
py
| 3,986 |
python
|
en
|
code
| 86 |
github-code
|
6
|
2000640644
|
import webapp2
import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
import os
from snippets import Words
# from Tkinter import *
JINJA_ENVIRONMENT=jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
class Add(webapp2.RequestHandler):
def split(self, word):
return [char for char in word]
def merge(self, word):
new=''
for x in word:
new += x
return new
def sort(self, word):
split_data =self.split(word)
# print(split_data)
sorted_alphabets = sorted(split_data)
# print(sorted_alphabets)
merged_word = self.merge(sorted_alphabets)
return merged_word
# def incrementCounter(self, user)
def get(self):
self.response.headers['Content-Type'] = 'text/html'
display_message='Add words in the Anagram Engine'
Template_values ={
'display_message': display_message
}
template = JINJA_ENVIRONMENT.get_template('add.html')
self.response.write(template.render(Template_values))
def post(self):
action = self.request.get('add_button')
# print (action)
user = users.get_current_user()
# print(myuser)
add_string=self.request.get('word_input')
sorted_alphabets = sorted(self.split(add_string.lower()))
keyword=user.user_id() + self.merge(sorted_alphabets)
if action == 'Add':
key = ndb.Key('Words', keyword)
word = key.get()
myuser_key= ndb.Key('MyUser', user.user_id())
myuser=myuser_key.get()
if word == None:
word = Words(id=keyword)
word.count_of_words=0
word.put()
myuser.uniqueAnagramCounter=myuser.uniqueAnagramCounter+1
myuser.put()
string = keyword
if string == '' or string == None or len(string)<3:
self.redirect('/add')
return
word_doesnt_exists = True
List = []
for i in word.wordsList:
print(i)
List.append(i)
print(List.count(add_string))
if List.count(add_string.lower())>0:
word_doesnt_exists=False
print('word exists')
if(word_doesnt_exists):
word.wordsList.append(add_string.lower())
word.count_of_words=word.count_of_words+1
word.alphabet_no_List.append(len(add_string))
word.put()
myuser.wordCounter= myuser.wordCounter+1
myuser.put()
# Code to read from text document
# root = Tk()
# root.fileName = filedialog.askopenfilename(filetypes =(("All text file", "*.txt")))
dict = []
f = open("words.txt", "r")
for line in f.readlines():
# sorted_word_from_text = self.merge(sorted(self.split(line.strip())))
#
if(line.rstrip()):
# print (sorted_word_from_text)
dict.append(line.rstrip())
# user = users.get_current_user()
file_action = self.request.get('add_from_files')
if file_action=='Add':
print(len(dict))
for i in dict:
if len(i)>0:
keyword1=user.user_id() + self.sort(i)
key = ndb.Key('Words', keyword1)
word = key.get()
# print(word)
new_word = False
if word!=None:
if word.wordsList.count(i)==0:
word.wordsList.append(i)
word.count_of_words=word.count_of_words+1
word.alphabet_no_List.append(len(i))
word.put()
# word.wordsList.append(i)
# word.count_of_words=word.count_of_words+1
# word.alphabet_no_List.append(len(i))
# word.put()
else:
new_word=True
if(new_word):
word = Words(id=keyword1)
print(i + " word is added")
word.wordsList.append(i)
word.count_of_words=1
word.alphabet_no_List.append(len(i))
word.put()
print(i)
self.redirect('/add')
self.redirect('/add')
# raw_word =self.request.get('word_input')
# sorted_alphabets = sorted(self.split(raw_word.lower()))
# user = users.get_current_user()
# # used as a key to display only certain content to certain
# keyword =user.user_id()+merge(sorted_alphabets)
# print(keyword)
# # if action =='Add':
#
#
# # use user_id()+ keyword as the key.
# word_key = ndb.Key('Words', keyword)
# word = word_key.get()
#
# if word==None:
# # word = Word(id=keyword)
# word.word = raw_word
# word.count_of_alphabets = len(raw_word)
# word.count_of_words = 1
# word.put()
# word.word.append(raw_word)
# word.put()
# self.redirect('/add')
#
# else:
# word.word.append(raw_word)
# word.count_of_alphabets = len(raw_word)
# countOfWords = word.count_of_words
# word.count_of_words=countOfWords+1
# word.put()
# self.redirect('/add')
|
yolo117/Anagram-Checker
|
add.py
|
add.py
|
py
| 5,784 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24287696833
|
def reachable(nums):
i = 0
while i < len(nums) - 1:
if nums[i] == 0:
return False
else:
i += nums[i]
if i == len(nums) - 1:
return True
return False
def main():
assert reachable([1, 3, 1, 2, 0.1])
assert not reachable([1, 2, 1, 0, 0])
if __name__ == '__main__':
main()
|
ckallum/Daily-Coding-Problem
|
solutions/#192.py
|
#192.py
|
py
| 365 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19601208421
|
import sublime
import sublime_plugin
import re
class Divideipv4segmentsCommand(sublime_plugin.TextCommand):
def run(self, edit):
full_select = sublime.Region(0, self.view.size())
splitted = self.view.split_by_newlines(full_select)
text_archive = []
for text_line in splitted:
text = self.view.substr(text_line)
if text not in text_archive:
text_archive.append(text)
text_archive.sort()
prev_segment = ""
for index, ip in enumerate(text_archive):
segment_search = re.search(r"(^\d{1,3}\.\d{1,3}\.\d{1,3})", ip)
if segment_search:
segment = segment_search.group()
if segment != prev_segment and index > 0:
text_archive[index] = "\n{}".format(ip)
prev_segment = segment
self.view.erase(edit, sublime.Region(0, self.view.size()))
self.view.insert(
edit,
0,
"\n".join(text_archive)
)
|
opendefinition/Threathunterspack
|
divideipv4segments.py
|
divideipv4segments.py
|
py
| 864 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6581285508
|
from django.http import HttpResponse
from django.http import HttpResponse
import requests
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
import uuid
reference_id = uuid.uuid4()
def login(request):
url = "https://test.cashfree.com/api/v1/order/create"
payload = {
"appid": "TEST3931154d6e90b54bfbc3b4946d511393",
"secretKey": "TEST701a10a8d7389d719903c77dda9fa993fbc0db63",
"orderId": reference_id,
"orderAmount": "1",
"orderCurrency": "INR",
"oderNote": "pay",
"customerName": "mohan",
"customerEmail": "[email protected]",
"customerPhone": "8494863493",
# "returnUrl": "https://cashfree.com",
}
headers={
'content_type':'application/json'
}
response = requests.request("POST", url, data=payload,headers=headers)
print(response.text)
return render(request,'home.html',{"response":response.text})
def payment_info(request):
print(request.data)
if request.method == 'POST':
# Fetch the payment response details from the request
order_id = request.POST.get('order_id')
payment_status = request.POST.get('payment_status')
print(order_id)
print(payment_status)
return None
|
Gunarjith/verceldeploy
|
masterlink/views.py
|
views.py
|
py
| 1,350 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29210359176
|
import streamlit as st
from PIL import Image
import numpy as np
def app():
display = Image.open('./Attendance-management.jpg')
display = np.array(display)
st.image(display)
st.markdown(""" <style> .font {
font-size:20px ; font-family: 'Cooper Black'; text-align: center; color: #000000;}
</style> """, unsafe_allow_html=True)
st.markdown('<h1 class="font">Made With ❤️ By Debasish</h1>', unsafe_allow_html=True)
|
dest-royer02/Attendance_Application
|
pages/welcomePage.py
|
welcomePage.py
|
py
| 468 |
python
|
en
|
code
| 1 |
github-code
|
6
|
38416857272
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import *
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPE_URL = reverse('recipe:recipe-list')
# /api/recipe/recipes
# /api/recipe/recipes/1/
def detail_url(recipe_id):
"""return recipe detail url"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main Course'):
"""Create and return sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user,**params):
"""Create and return sample recipe"""
test_recipe = {
'title': 'Mushroom Chicken',
'time_minutes': 10,
'price': 5.00
}
# update will create/update keys in dictionary
test_recipe.update(params)
return Recipe.objects.create(user=user, **test_recipe)
class PublicRecipeTests(TestCase):
"""Test publicly avaialble tags API"""
def setup(self):
self.client = APIClient()
def test_auth_required(self):
res = self.client.get(RECIPE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsTests(TestCase):
"""Test the unauthenticated recipe API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'[email protected]',
'testpass',
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_recipe(self):
"""Test retrieving a list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited(self):
"""Test that recipes are retrieved for user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""TEst viewing recipe detail"""
recipe=sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_recipe(self):
"""Test creating recipe"""
new_recipe = {
'title': 'Cake',
'time_minutes': 30,
'price': 15.00
}
res = self.client.post(RECIPE_URL, new_recipe)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in new_recipe.keys():
self.assertEqual(new_recipe[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Creating recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
new_recipe = {
'title': 'CheeseCake',
'tags': [tag1.id, tag2.id],
'time_minutes': 30,
'price': 15.00
}
res = self.client.post(RECIPE_URL, new_recipe)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Shrimp')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
new_recipe = {
'title': 'Prawn curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 25,
'price': 20.00
}
res = self.client.post(RECIPE_URL, new_recipe)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""test updating recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url,payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_reciple(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti',
'time_minutes': 15,
'price': 10.00
}
url = detail_url(recipe.id)
self.client.put(url,payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
|
deveshp530/recipe-app-api
|
app/recipe/tests/test_recipe.py
|
test_recipe.py
|
py
| 6,459 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43111552824
|
from typing import Any, Dict, Iterable
import pandas as pd
from fugue import DataFrame, FugueWorkflow, PandasDataFrame, out_transform, transform
from fugue.constants import FUGUE_CONF_WORKFLOW_CHECKPOINT_PATH
def test_transform():
pdf = pd.DataFrame([[1, 10], [0, 0], [1, 1], [0, 20]], columns=["a", "b"])
def f1(df: pd.DataFrame) -> pd.DataFrame:
return df.sort_values("b").head(1)
result = transform(pdf, f1, schema="*")
assert isinstance(result, pd.DataFrame)
assert result.values.tolist() == [[0, 0]]
# schema: *
def f2(df: pd.DataFrame) -> pd.DataFrame:
return df.sort_values("b").head(1)
result = transform(pdf, f2)
assert isinstance(result, pd.DataFrame)
assert result.values.tolist() == [[0, 0]]
result = transform(pdf, f2, partition=dict(by=["a"]))
assert isinstance(result, pd.DataFrame)
assert sorted(result.values.tolist(), key=lambda x: x[0]) == [[0, 0], [1, 1]]
result = transform(
pdf, f2, partition=dict(by=["a"]), force_output_fugue_dataframe=True
)
assert isinstance(result, DataFrame)
ppdf = PandasDataFrame(pdf)
assert isinstance(transform(ppdf, f2), DataFrame)
# schema: *
def f3(df: pd.DataFrame, called: callable) -> pd.DataFrame:
called()
return df
cb = Callback()
result = transform(pdf, f3, callback=cb.called)
assert 1 == cb.ct
def test_transform_from_yield(tmpdir):
# schema: *,x:int
def f(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(x=1)
dag = FugueWorkflow()
dag.df([[0]], "a:int").yield_dataframe_as("x1")
dag.df([[1]], "b:int").yield_dataframe_as("x2")
dag.run("", {FUGUE_CONF_WORKFLOW_CHECKPOINT_PATH: str(tmpdir)})
result = transform(dag.yields["x1"], f)
assert isinstance(result, DataFrame)
assert result.as_array(type_safe=True) == [[0, 1]]
result = transform(
dag.yields["x2"],
f,
engine_conf={FUGUE_CONF_WORKFLOW_CHECKPOINT_PATH: str(tmpdir)},
)
assert isinstance(result, DataFrame)
assert result.as_array(type_safe=True) == [[1, 1]]
def test_out_transform(tmpdir):
pdf = pd.DataFrame([[1, 10], [0, 0], [1, 1], [0, 20]], columns=["a", "b"])
class T:
def __init__(self):
self.n = 0
def f(self, df: Iterable[Dict[str, Any]]) -> None:
self.n += 1
t = T()
out_transform(pdf, t.f)
assert 1 == t.n
t = T()
out_transform(pdf, t.f, partition=dict(by=["a"]))
assert 2 == t.n
dag = FugueWorkflow()
dag.df(pdf).yield_dataframe_as("x1")
dag.df(pdf).yield_dataframe_as("x2")
dag.run("", {FUGUE_CONF_WORKFLOW_CHECKPOINT_PATH: str(tmpdir)})
t = T()
out_transform(dag.yields["x1"], t.f)
assert 1 == t.n
t = T()
out_transform(
dag.yields["x2"],
t.f,
partition=dict(by=["a"]),
engine_conf={FUGUE_CONF_WORKFLOW_CHECKPOINT_PATH: str(tmpdir)},
)
assert 2 == t.n
# schema: *
def f3(df: pd.DataFrame, called: callable) -> pd.DataFrame:
called()
return df
cb = Callback()
result = out_transform(pdf, f3, callback=cb.called)
assert 1 == cb.ct
class Callback:
def __init__(self):
self.ct = 0
def called(self) -> None:
self.ct += 1
|
ofili/Wrangle-and-Analyze-Data
|
venv/Lib/site-packages/tests/fugue/test_interfaceless.py
|
test_interfaceless.py
|
py
| 3,306 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41348833415
|
# Configuration file for the Sphinx documentation builder.
#
root_doc = 'index'
master_doc = 'index'
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = ''
copyright = '2022, Aurelius Atlas Enterprise'
author = 'Americo'
release = '0.1'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.autosectionlabel',
#'myst_parser',
]
source_suffix = ['.rst', '.md']
templates_path = ['_templates']
exclude_patterns = []
intersphinx_mapping = {
'sphinx': ('https://github.com/aureliusenterprise/helm-governance/blob/main/README.md', None),
}
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
#alabaster is the other theme
html_theme = "sphinx_rtd_theme"
html_static_path = ['_static']
html_logo = '_static/logo/logo1.png'
html_favicon = '_static/favicon/fav.png'
#
#html_style = 'theme1.css'
def setup(app):
app.add_stylesheet('theme1.css')
|
aureliusenterprise/doc_technical_manual
|
DOc/docs/source/conf.py
|
conf.py
|
py
| 1,408 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8372518963
|
import pycosat
from pprint import pprint
# number of cells in sudoku
NUM_CELLS = 81
# this can be used to interate all squares subset in 3x 3
# V V V
# 1 2 3 4 5 6 7 8 9
#> 1 |0 0 0| 0 0 0| 0 0 0|
# 2 |0 0 0| 0 0 0| 0 0 0|
# 3 |0 0 0| 0 0 0| 0 0 0|
# ---------------------
#> 4 |0 0 0| 0 0 0| 0 0 0|
# 5 |0 0 0| 0 0 0| 0 0 0|
# 6 |0 0 0| 0 0 0| 0 0 0|
# --------------------
#> 7 |0 0 0| 0 0 0| 0 0 0|
# 8 |0 0 0| 0 0 0| 0 0 0|
# 9 |0 0 0| 0 0 0| 0 0 0|
SUBSQUARE_BOUDARIES = [1, 4, 7]
# most digit in sudoku
NUM_DIGITS = 9
def get_cell_value(row, column, digit):
"""
make unique id to a cell
first number is a row
second is a column
third is a digit value
Ex: the cell (1,3) and (3,1) with digit 7 there are diferentes id
cell (1,3) = 137
cell (3,1) = 317
"""
return row*100+column*10+ digit
def get_base_clauses():
"""
make all NUM_VARIABLES used in sudoku board, named by id
Ex:
for cell (1,1) and digit 7 can be named: 117
for cell (2,3) and digit 5 can be named: 235
"""
base_clauses = []
for row in range(1, NUM_DIGITS+1):
for column in range(1, NUM_DIGITS+1):
clauses = []
for digit in range(1, NUM_DIGITS+1):
clauses.append(get_cell_value(row,column,digit))
base_clauses.append(clauses)
return base_clauses
def get_unique_cells_clauses():
"""
make clauses guarantee that a cell can just appear once for sudoku board.
to make this each cell will have the next cell with the clause:
~current_digit or ~next_digit
Example:
to cell 111 there the clauses:
(-111,-112),(-111,-113),(-111,-114),...,(-111,-999)
"""
unique_digits_clauses = []
for row in range(1, NUM_DIGITS+1):
for column in range(1,NUM_DIGITS+1):
for digit in range(1, NUM_DIGITS+1):
for next_digit in range(digit+1, NUM_DIGITS+1):
cell_id = -get_cell_value(row,column,digit)
next_cell_id = -get_cell_value(row,column,next_digit)
unique_digits_clauses.append([cell_id,next_cell_id])
return unique_digits_clauses
def get_unique_subset_clauses(board_subset):
"""
this guarantee that a cell appear only once in the board subset
"""
subset_clauses = []
subset = enumerate(board_subset)
for index, first_tuple in enumerate(board_subset):
for n_index, n_tuple in enumerate(board_subset):
if index < n_index:
for digit in range(1, NUM_DIGITS + 1):
clause = [-get_cell_value(
first_tuple[0], first_tuple[1], digit),
-get_cell_value(
n_tuple[0], n_tuple[1], digit)]
subset_clauses.append(clause)
return subset_clauses
def get_row_unique_clauses():
"""
this guarantee that a cell in row appear only once in the row
"""
unique_clauses = []
for row in range(1,NUM_DIGITS +1):
subset = []
for column in range(1, NUM_DIGITS+1):
subset.append((row,column))
unique_clauses.extend(get_unique_subset_clauses(subset))
return unique_clauses
def get_columns_unique_clauses():
"""
this guarantee that a cell in column appear only once in the columns
"""
unique_clauses = []
for row in range(1,NUM_DIGITS +1):
subset = []
for column in range(1, NUM_DIGITS+1):
subset.append((column,row))
unique_clauses.extend(get_unique_subset_clauses(subset))
return unique_clauses
def get_square_unique_clauses():
"""
this guarantee that a cell in square appear only once in the squares
"""
subset_clauses = []
for row in SUBSQUARE_BOUDARIES:
for column in SUBSQUARE_BOUDARIES:
subset = [] # make subset 3x3
for k in range(9):
subset.append((row+k%3,column+k//3))
subset_clauses.extend(get_unique_subset_clauses(subset))
return subset_clauses
def get_sudoku_clauses():
"""
mix all defined clauses to guarantee a valid sudoku
"""
sudoku_clauses = []
sudoku_clauses.extend(get_base_clauses())
sudoku_clauses.extend(get_unique_cells_clauses())
sudoku_clauses.extend(get_row_unique_clauses())
sudoku_clauses.extend(get_columns_unique_clauses())
sudoku_clauses.extend(get_square_unique_clauses())
return sudoku_clauses
def get_single_clauses(sudoku_board):
"""
This method make a clauses that can be answer true
to solve the sudoku board
"""
single_clauses = []
for row in range(1, NUM_DIGITS+1):
for column in range(1,NUM_DIGITS+1):
cell_value = sudoku_board[row-1][column-1]
if cell_value:
single_clauses.append(
[get_cell_value(row,column,cell_value)])
return single_clauses
def get_cell_solution(sudoku_solution, row, column):
"""
verify a cell id in a sudoku solution list
"""
for digit in range(1, NUM_DIGITS+1):
if get_cell_value(row,column,digit) in sudoku_solution:
return digit
return -1
def solve_sudoku(sudoku_board):
"""
Generate a sudoku clauses, apply in a pycosat and get sudoku solution
"""
sudoku_clauses = get_sudoku_clauses()
single_clauses = get_single_clauses(sudoku_board)
sudoku_clauses.extend(single_clauses)
sudoku_solution = set(pycosat.solve(sudoku_clauses))
for row in range(1, NUM_DIGITS+1):
for column in range(1, NUM_DIGITS+1):
sudoku_board[row-1][column-1] = get_cell_solution(
sudoku_solution, row, column)
return sudoku_board
def main():
print ("Sudoku problem:")
sudoku_problem = [[0, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 6, 0, 0, 0, 0, 3],
[0, 7, 4, 0, 8, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 3, 0, 0, 2],
[0, 8, 0, 0, 4, 0, 0, 1, 0],
[6, 0, 0, 5, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 7, 8, 0],
[5, 0, 0, 0, 0, 9, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 4, 0]]
pprint(sudoku_problem)
print('\nGenerating solution:')
sudoku_solution = solve_sudoku(sudoku_problem)
pprint(sudoku_solution)
if __name__ == '__main__':
main()
|
macartur/programming_ai
|
sudoku.py
|
sudoku.py
|
py
| 6,456 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35430507994
|
# HOHMANN GUI
# A GUI version of HOHMANN BASIC.
# Basic program to calculate a Hohmann transfer between two coplanar, circular orbits.
# The altitudes of the two orbits are defined by user input.
# ---------------------------------------------------
# Imports
from orbit_toolbox import hohmann, Transfer
from bodies_toolbox import Planet
from guizero import App, Text, TextBox, PushButton
# ---------------------------------------------------
# Constants
Earth_radius = Planet["Earth"]["radius"]
# ---------------------------------------------------
# Functions
def hohmann_calc():
Hoh_1 = Transfer(1, "hohmann", "09012022")
hohmann_result = Hoh_1.hohmann("Earth", int(h_start_input.value)+Earth_radius, int(h_end_input.value)+Earth_radius)
result_dv1.value = hohmann_result[0]
result_dv2.value = hohmann_result[1]
result_dv_total.value = hohmann_result[2]
result_t_transfer.value = hohmann_result[3]
# ---------------------------------------------------
# App
# App and grid frameworks
app = App("Hohmann GUI", layout="grid")
# Input text and boxes
h_start_text = Text(app, text="Start altitude (km): ", grid=[0, 0])
h_start_input = TextBox(app, text="Insert start altitude (km) here", width="30", grid=[2, 0])
h_end_text = Text(app, text="End altitude (km): ", grid=[0, 1])
h_end_input = TextBox(app, text="Insert end altitude (km) here", width="30", grid=[2, 1])
# Calculation button
h_start_set = PushButton(app, command=hohmann_calc, text="Calculate transfer", grid=[1, 2])
# Output text and boxes
dv1_text = Text(app, text="dv1 (km.s^-1) =", grid=[0, 3])
result_dv1 = TextBox(app, text="dv1 =", width="30", grid=[2, 3])
dv2_text = Text(app, text="dv2 (km.s^-1) =", grid=[0, 4])
result_dv2 = TextBox(app, text="dv2 =", width="30", grid=[2, 4])
dv_total_text = Text(app, text="dv_total (km.s^-1) =", grid=[0, 5])
result_dv_total = TextBox(app, text="dv_total =", width="30", grid=[2, 5])
t_transfer_text = Text(app, text="t_transfer (s) =", grid=[0, 6])
result_t_transfer = TextBox(app, text="t_transfer =", width="30", grid=[2, 6])
app.display()
|
weasdown/orbit-toolbox
|
Hohmann_GUI.py
|
Hohmann_GUI.py
|
py
| 2,090 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18481259162
|
import time
from roman_integer import Roman
def check_for_higher_numerals(numeral):
if "_I_V" in numeral:
numeral = numeral.replace("_I_V", "MMMM")
return numeral
if __name__ == "__main__":
start = time.time()
roman = Roman()
numerals = [i.strip()
for i in open("Data/p089_roman.txt").readlines()]
old_count = 0
new_count = 0
for numeral in numerals:
old_count += len(numeral)
int_val = roman.convert_to_integer(numeral, disp=False)
# print(numeral, int_val)
proper_roman = roman.convert_to_numeral(int_val, disp=False)
corrected_roman = check_for_higher_numerals(proper_roman)
new_count += len(corrected_roman)
print("Default form count:", old_count)
print("Minimal form count:", new_count)
print("Characters saved:", old_count - new_count)
print("Calculated in:", time.time() - start)
|
BreadBug007/Project-Euler
|
Prob_89.py
|
Prob_89.py
|
py
| 857 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25950565947
|
import sys
import fileinput
import csv
import operator
import numpy as np
import scipy.spatial.distance as sd
import pickle
#python compare_google.py [google_match-norm] [avg-$count-norm] states.p statescores$count
google = []
matrix = []
infile = sys.argv[2]
google_reader = csv.reader(open(sys.argv[1], 'rb'), delimiter=',')
matrix_reader = csv.reader(open(infile, 'rb'), delimiter=',')
states = pickle.load(open(sys.argv[3], 'rb'))
results = open(sys.argv[4], 'wb')
weeks = 52
def compute_score(m, g):
sum = 0
for i in range(m.shape[1]):
sum += sd.euclidean(m[:,i],g[:,i])
score = sum/float(m.shape[1])
return score
#store google data
for row in google_reader:
row = [(float(x) if x else 0) for x in row]
google.append(np.array(row))
google = np.array(google)
for row in matrix_reader:
row = [(float(x) if x else 0) for x in row]
matrix.append(np.array(row))
matrix = np.array(matrix)
scores = {}
for i in range(google.shape[1]):
scores[str(i) + ' ' + states[i]] = sd.euclidean(matrix[:,i],google[:,i])
sorted_scores = sorted(scores.iteritems(), key=operator.itemgetter(1))
for name,score in sorted_scores:
results.write(name+':'+str(score)+'\n')
results.close()
|
kris-samala/LBSN
|
data_analysis/compare_states.py
|
compare_states.py
|
py
| 1,233 |
python
|
en
|
code
| 2 |
github-code
|
6
|
11816060482
|
import numpy as np
import torch
with open("pdtSEIKA.csv", "r") as f:
f_reader = np.loadtxt(f, delimiter=',', dtype=np.float32)
predict = f_reader
f.close()
tensor = torch.from_numpy(np.load(r"C:\Users\cchen\PycharmProjects\LearnPyTorch/K05_excluded_xyz.npy")) # 101778,
# 15,10,10
with open("ai__K05_SEIKA2.csv", "w") as f:
for item, result in zip(tensor, predict):
pos = (item[0, 0, 0], item[5, 0, 0], item[10, 0, 0])
row = "{},{},{},{},{},{},{},{}\n".format(*pos, *result)
f.write(row)
f.close()
|
cchenyixuan/Banira
|
utils/predict_map.py
|
predict_map.py
|
py
| 545 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72580436348
|
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from reconocer_form import analyze_general_documents
from base_datos import crear_registro
import asyncio
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials = True,
allow_methods = ["*"],
allow_headers = ["*"]
)
class Registro(BaseModel):
url: str
@app.get("/")
def verRegistros():
return "registros"
@app.post("/post")
async def crearRegistro(regitsro: Registro):
repuesta = await analyze_general_documents(regitsro.url)
registro_guardado = await crear_registro(regitsro.url,repuesta)
if registro_guardado:
return repuesta
return "no se pudo guardar"
@app.put("/post")
def crearRegistro():
return "hola"
@app.delete("/delete")
def crearRegistro():
return "hola"
|
jefryne/web_placas
|
ia/detectar documento/api.py
|
api.py
|
py
| 898 |
python
|
es
|
code
| 0 |
github-code
|
6
|
18882639437
|
#! /usr/bin/env python3
import sys
import json
from flask import Flask, request
app = Flask(__name__)
def is_browser(ua_string):
return ua_string.split('/')[0].lower() == 'mozilla'
@app.route("/")
def hello():
msg_content = "Hello World!"
if is_browser(request.headers['User-Agent']):
return "<html><body><h1>{}</body></html>".format(msg_content)
else:
response = dict()
response["msg"] = msg_content
return json.dumps(response)
@app.route("/name", methods=["POST"])
def greeting():
print(request.data, file=sys.stdout)
req = json.loads(request.data)
req["msg"] = "Hi, {}".format(req["name"])
return json.dumps(req)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=int(sys.argv[1]))
|
glennneiger/estate-backend
|
example/simple.py
|
simple.py
|
py
| 770 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71737201468
|
from typing import List
from project.appliances.appliance import Appliance
from project.people.child import Child
class Room:
def __init__(self, name: str, budget: float, members_count: int):
self.family_name = name
self.budget = budget
self.members_count = members_count
self.children: List[Child] = []
self.expenses = 0
@property
def expenses(self):
return self.__expenses
@expenses.setter
def expenses(self, value):
if value < 0:
raise ValueError("Expenses cannot be negative")
self.__expenses = value
def calculate_expenses(self, *args):
total_expenses = 0
for list_obj in args:
for obj in list_obj:
if isinstance(obj, Appliance):
total_expenses += obj.get_monthly_expense()
else:
total_expenses += obj.cost * 30
self.expenses = total_expenses
def room_info(self):
result_str = [
f"{self.family_name} with {self.members_count} members."
f" Budget: {self.budget:.2f}$, Expenses: {self.expenses:.2f}$"]
for idx, child in enumerate(self.children):
result_str.append(f"--- Child {idx + 1} monthly cost: {(child.cost * 30):.2f}$")
if hasattr(self, 'appliances'):
appliances_monthly_cost = sum([a.get_monthly_expense() for a in self.appliances])
result_str.append(f"--- Appliances monthly cost: {appliances_monthly_cost:.2f}$")
return '\n'.join(result_str)
|
tonytech83/Python-OOP
|
OOP_Exams/11_OOP_22_Aug_2020/hotel_everland/project/rooms/room.py
|
room.py
|
py
| 1,573 |
python
|
en
|
code
| 4 |
github-code
|
6
|
74291021627
|
import math
import numpy as np
from scipy.spatial import ConvexHull
class LOS_guidance():
def __init__(self, params):
self.ship_max_speed = params['ship_max_speed']
self.ship_ax_vel_lim = params['ship_ax_vel_lim']
self.ship_lat_acc_pos_lim = params['ship_lat_acc_pos_lim']
self.ship_lat_acc_neg_lim = params['ship_lat_acc_neg_lim']
self.ship_ax_acc_lim = params['ship_ax_acc_lim']
self.T_max_thrust = params['T_max_thrust']
self.T_min_thrust = params['T_min_thrust']
def ship_los_guidance(self, ship_phys_status, target_pos, dt=1 / 60):
ship_position = ship_phys_status['pose'][0]
heading_in = ship_phys_status['pose'][1]
ship_speed_in = np.linalg.norm(ship_phys_status['velocity'][0])
ship_rot_spd_in = ship_phys_status['velocity'][1]
def angle_between_vector_and_angle(vector, angle):
# Calculate the angle between the vector and the x-axis
vector_angle = math.atan2(vector[1], vector[0])
# Calculate the difference between the vector angle and the given angle
angle_diff = vector_angle - angle
# Ensure that the angle difference is within the range (-pi, pi]
while angle_diff > math.pi:
angle_diff -= 2 * math.pi
while angle_diff <= -math.pi:
angle_diff += 2 * math.pi
# Return the absolute value of the angle difference
return angle_diff
ship_heading = heading_in
ship_rot_spd = ship_rot_spd_in
ship_speed = 0
distance_to_target = math.sqrt(
(target_pos[0] - ship_position[0]) ** 2 + (target_pos[1] - ship_position[1]) ** 2)
if not (distance_to_target >= -1 and distance_to_target <= 10000):
angle_variance = 0
target_angle = (target_pos - ship_position)
angle_variance = angle_between_vector_and_angle(target_angle, ship_heading)
rotational_mul = angle_variance
if not (angle_variance >= -10 and angle_variance <= 10):
angle_variance = 0
if (angle_variance) > (math.pi / 4):
rotational_mul = abs((math.pi / 2))
elif (angle_variance) < -(math.pi / 2):
rotational_mul = - abs((math.pi / 2))
# rotational controller
if abs(angle_variance) > 0.01 and abs(ship_rot_spd) <= abs(self.ship_ax_vel_lim):
ship_rot_spd = -(rotational_mul / (math.pi / 2))*10
else:
ship_rot_spd = 0
# translational controller
if distance_to_target > 2:
ship_speed = self.ship_max_speed
elif distance_to_target >= 0.6:
ship_speed = self.ship_max_speed/3
elif distance_to_target >= 0.3:
ship_speed = self.ship_max_speed/8
else:
ship_speed = 0
ship_rot_spd = 0
ship_vel = ship_speed*np.array([math.cos(heading_in), math.sin(heading_in)])
cmd_vel = [ship_vel, ship_rot_spd]
#print("\rangle_variance, ship_rot_spd", np.degrees(heading_in), ship_rot_spd, end="")
#print("\rTLeft, TRight", rTLeft.TRight, end="")
return cmd_vel
class LOS_VO_guidance():
def __init__(self, params):
self.ship_max_speed = params['ship_max_speed']
self.ship_ax_vel_lim = params['ship_ax_vel_lim']
self.ship_lat_acc_pos_lim = params['ship_lat_acc_pos_lim']
self.ship_lat_acc_neg_lim = params['ship_lat_acc_neg_lim']
self.ship_ax_acc_lim = params['ship_ax_acc_lim']
self.T_max_thrust = params['T_max_thrust']
self.T_min_thrust = params['T_min_thrust']
self.detection_range = 10
self.spd_visual_multiplier = 2.5
self.los = LOS_guidance(params)
self.vo_polygons = []
#parameters
def ship_los_vo_guidance(self, phys_status, output_polygon, target_pos, sensor_data, dt=1 / 60):
# First, this receives a database of ships in the world. the database is a dictionary
# if there are no obstacles in range, it pursues the target using Line-of-sight guidance law
# if there are VO cones in sight, it tries to evade the obstacle by maintaining speed and turning.
# it prefers angles closer to the target.
# if no trajectory is valid, it tries to decelerate to half the maximum speed
# if there is still no trajectory, it tries to stop.
#setting up variables
self.phys_status = phys_status # updates phys status
self.output_polygon = output_polygon
self.sensor_data = sensor_data # get sensor_data
self.vo_lines = []
self.vo_circles = [] # position and radius
self.vo_cones = []
self.vo_polygons = []
ship_position = phys_status['pose'][0]
heading_in = phys_status['pose'][1]
ship_speed_in = np.linalg.norm(phys_status['velocity'][0])
ship_rot_spd_in = phys_status['velocity'][1]
ship_heading = heading_in
ship_rot_spd = ship_rot_spd_in
ship_speed = 0
# recieving and processing the data
self.filtered_objects_angle = self.sensor_simulator() # process the data
self.vo_cone_generator() #generates VO cone
# target_information
distance_to_target = math.sqrt((target_pos[0] - ship_position[0]) ** 2 + (target_pos[1] - ship_position[1]) ** 2)
target_vector = (target_pos - ship_position)
target_angle = self.regularize_angle(math.atan2(target_vector[1], target_vector[0]))
# search for valid angle
target_speed = 2.0
self.angle_opacity = self.vo_theta_opacity(target_angle, target_speed)
angle_rad = math.radians(3*self.index_with_lowest_number(self.angle_opacity))
angle_variance = self.regularize_angle(angle_rad - ship_heading)
if abs(angle_variance) > math.pi * (1 / 8):
target_speed = 1.5
self.angle_opacity = self.vo_theta_opacity(target_angle, target_speed)
angle_rad = math.radians(3 * self.index_with_lowest_number(self.angle_opacity))
angle_variance = self.regularize_angle(angle_rad - ship_heading)
if abs(angle_variance) > math.pi * (1.5 / 8):
target_speed = 1.0
self.angle_opacity = self.vo_theta_opacity(target_angle, target_speed)
angle_rad = math.radians(3 * self.index_with_lowest_number(self.angle_opacity))
angle_variance = self.regularize_angle(angle_rad - ship_heading)
if abs(angle_variance) > math.pi * (1 / 4):
target_speed = 0.5
self.angle_opacity = self.vo_theta_opacity(target_angle, target_speed)
angle_rad = math.radians(3 * self.index_with_lowest_number(self.angle_opacity))
angle_variance = self.regularize_angle(angle_rad - ship_heading)
if abs(angle_variance) > math.pi * (5 / 8):
target_speed = -0.5
self.angle_opacity = self.vo_theta_opacity(target_angle, target_speed)
angle_rad = math.radians(3 * self.index_with_lowest_number(self.angle_opacity))
angle_variance = self.regularize_angle(angle_rad - ship_heading)
for theta in range(0,120):
point = np.array(
[self.spd_visual_multiplier*target_speed * math.cos(math.radians(theta*3)), self.spd_visual_multiplier*target_speed * math.sin(math.radians(theta*3))])
if self.angle_opacity[theta] < 1: #np.percentile(self.angle_opacity,10):
self.vo_circles.append([self.phys_status["pose"][0]+point,0.05, (0,255,50), 1])
self.vo_lines.append([self.phys_status["pose"][0],[self.phys_status["pose"][0][0]+self.spd_visual_multiplier*target_speed*math.cos(angle_rad),self.phys_status["pose"][0][1]+self.spd_visual_multiplier*target_speed*math.sin(angle_rad)],(0,255,0)])
rotational_mul = angle_variance
if target_speed < 0:
rotational_mul = - rotational_mul
if not (angle_variance >= -10 and angle_variance <= 10):
angle_variance = 0
if (angle_variance) > (math.pi / 2):
rotational_mul = abs((math.pi / 2))
elif (angle_variance) < -(math.pi / 2):
rotational_mul = - abs((math.pi / 2))
# rotational controller
if abs(angle_variance) > 0.01 and abs(ship_rot_spd) <= abs(self.ship_ax_vel_lim):
ship_rot_spd = -(rotational_mul / (math.pi / 2))*10
else:
ship_rot_spd = 0
# translational controller
if distance_to_target > 2:
ship_speed = target_speed
elif distance_to_target >= 0.6:
ship_speed = target_speed/3
elif distance_to_target >= 0.3:
ship_speed = target_speed/8
else:
ship_speed = 0
ship_rot_spd = 0
ship_vel = ship_speed*np.array([math.cos(heading_in), math.sin(heading_in)])
cmd_vel = [ship_vel, ship_rot_spd]
return cmd_vel
def vo_cone_generator(self):
ship_pos = self.phys_status["pose"][0]
ship_vel = self.phys_status["velocity"][0]
ship_spd = abs(np.linalg.norm(self.phys_status["velocity"][0]))
#vessel velocity indicator
self.vo_lines.append([ship_pos,ship_pos+ship_vel*self.spd_visual_multiplier, (0, 0, 255)])
circle_color = (200, 200, 200, 255)
line_width = 1
circle = [ship_pos, self.detection_range, circle_color, line_width] # object circle position
self.vo_circles.append(circle) # circle representing collision distance of the object
if self.filtered_objects_angle != None:
for key, object in self.filtered_objects_angle.items():
object_pos = object[0].phys_status["pose"][0] # absolute object position
object_vel = object[0].phys_status["velocity"][0] # absolute object velocity
#object_radius = object[1] + object[2] # object VO_circle radius
circle_color = (200, 0, 0, 255)
line_width = 1
#circle = [object_pos, object_radius, circle_color, line_width] # object circle position
#self.vo_circles.append(circle) # circle representing collision distance of the object
pos_diff = object_pos - ship_pos
object_distance = object[3] # distance from the object to the circle
rel_spd = np.linalg.norm(object[0].phys_status["velocity"][0]-ship_vel)
if object_distance < 0.1: # for avoiding divide by zero error
object_distance = 0.1
start_rad = object[1]
end_rad = object[2]
object_velocity = object[0].phys_status["velocity"][0]
#self.vo_cones.append([ship_pos, [start_rad, end_rad], object_distance*math.cos(tangent_angle)])
self.vo_cones.append([ship_pos+object_velocity*self.spd_visual_multiplier, [start_rad, end_rad], object_distance])
#print(math.degrees(self.filtered_objects))
pass
def vo_theta_opacity(self, target_angle, spd_to_search):
# when search spd = max_spd
spd_to_search
angle_opacity = np.linspace(start=0, stop=120, num=120, endpoint=False)
for theta in range(0,120):
delta_angle = target_angle - math.radians(theta*3)
while delta_angle >= 1 * math.pi:
delta_angle = delta_angle - 2 * math.pi
while delta_angle <= -1 * math.pi:
delta_angle = delta_angle + 2 * math.pi
angle_diff_opacity = (abs(delta_angle/(math.pi)))*(30/80)
angle_opacity[theta] = angle_diff_opacity
for vo_cone in self.vo_cones:
for theta in range(0, 120):
point = np.array([self.spd_visual_multiplier*spd_to_search*math.cos(math.radians(theta*3)), self.spd_visual_multiplier**spd_to_search*math.sin(math.radians(theta*3))])
opacity = self.vo_cone_collision_detector(point + self.phys_status["pose"][0], vo_cone)
if opacity > 1/5:
opacity = 100
if opacity <1/10:
opacity = 0
angle_opacity[theta] = angle_opacity[theta]+(opacity**2)*10
return angle_opacity
def sensor_simulator_legacy(self):
# receives dict(self.ships_database, **self.obstacle_database), which consists of dictionaries of objects(ship or obstacle).
# extracts only {key, polygon} tuples using a for loop
# then it filters only the keys of which its polygon is inside the detection range.
# then it outputs {key: [polygon, phys_status]} to a dictionary.
ship_pose = self.phys_status["pose"]
ship_polygon = self.output_polygon
objects_database = self.sensor_data
origin_point = ship_pose[0]
if not(objects_database):
return
filtered_objects = {}
#print(objects_database)
for key, object in objects_database.items():
# calculate distance between origin and closest point of polygon
# print(key)
object_pose = object.phys_status["pose"]
object_vector = (object_pose[0] - ship_pose[0])
object_distance = (np.linalg.norm(object_vector))
#print(key, object_vector)
# check if polygon is within detection range
if object_distance <= self.detection_range:
# gets object polygonal information
poly_points = object.output_polygon
# if there are no polygons, return nothing
if poly_points == None:
return
#print(poly_points)
(max_angle, min_angle) = self.get_min_max_angles(poly_points)
FOV = (max_angle - min_angle)
while FOV > 1 * math.pi:
FOV = 2 * math.pi - FOV
while FOV <= 0:
FOV += 2 * math.pi
object_radius = (object_distance * math.tan(FOV / 2))
self_radius = (self.get_largest_inner_product(ship_polygon, object_vector))/2
filtered_objects[key] = [object, object_radius, self_radius ]
#print(" key, FOV object_radius self_radius : ", object_distance, math.degrees(FOV), object_radius, self_radius)
return filtered_objects
def sensor_simulator(self):
# receives dict(self.ships_database, **self.obstacle_database), which consists of dictionaries of objects(ship or obstacle).
# extracts only {key, polygon} tuples using a for loop
# then it filters only the keys of which its polygon is inside the detection range.
# then it outputs {key: [polygon, phys_status]} to a dictionary.
ship_pose = self.phys_status["pose"]
ship_polygon = self.output_polygon
objects_database = self.sensor_data
origin_point = ship_pose[0]
if not (objects_database):
return
filtered_objects_angle = {}
# print(objects_database)
for key, object in objects_database.items():
# calculate distance between origin and closest point of polygon
# print(key)
object_pose = object.phys_status["pose"]
object_vector = (object_pose[0] - ship_pose[0])
if object_pose[0][1] == ship_pose[0][1] and object_pose[0][0] == ship_pose[0][0]:
continue
object_distance = (np.linalg.norm(object_vector))
# print(key, object_vector)
# check if polygon is within detection range
if object_distance > self.detection_range*1.1:
continue
# print(poly_points)
N = self.inflate_obstacles(object)
if N == None:
continue
[max_angle_point, min_angle_point], [start_angle, end_angle], self.outer_polygon,closest_distance = N
self.vo_lines.append([self.phys_status["pose"][0], max_angle_point,(255, 153, 251)])
self.vo_lines.append([self.phys_status["pose"][0], min_angle_point,(255, 153, 251)])
if closest_distance <= self.detection_range:
filtered_objects_angle[key] = [object, start_angle, end_angle, closest_distance]
# print(" key, FOV object_radius self_radius : ", object_distance, math.degrees(FOV), object_radius, self_radius)
return filtered_objects_angle
def get_min_max_angles(self,polygon):
# Select the first point as the origin
origin = self.phys_status["pose"][0]
# Calculate the angle for each point relative to the origin
angles = []
for point in polygon:
x, y = point[0] - origin[0], point[1] - origin[1]
angle = math.atan2(y, x)
if angle < 0:
while angle < 0:
angle += 2 * math.pi
if angle > 2 * math.pi:
while angle > 2:
angle -= 2 * math.pi
angles.append(angle)
# Return the maximum and minimum angles
max_angle = max(angles)
min_angle = min(angles)
#print("\n",math.degrees(max_angle), math.degrees(min_angle),end="")
#print("\n", polygon, end="")
return (max_angle, min_angle)
def inflate_obstacles(self,object):
# Select the first point as the origin
origin = self.phys_status["pose"][0]
ship_polygon = self.output_polygon
if object.output_polygon == None:
return
# Calculate the angle for each point relative to the origin
object_vector = (object.phys_status["pose"][0] - self.phys_status["pose"][0])
avg_angle = math.atan2(object_vector[1],object_vector[0])
if avg_angle < 0:
while avg_angle < 0:
avg_angle += 2 * math.pi
if avg_angle > 2 * math.pi:
while avg_angle > 2:
avg_angle -= 2 * math.pi
#centers the ship polygon
ship_polygon_centered = []
for point in self.output_polygon:
ship_polygon_centered.append((point - self.phys_status["pose"][0])*2)
inflated_points = []
for point in object.output_polygon:
for point2 in ship_polygon_centered:
inflated_point = point + point2
inflated_points.append(inflated_point)
hull = ConvexHull(inflated_points)
outer_polygon = [inflated_points[i] for i in hull.vertices]
self.vo_polygons.append(outer_polygon)
angles = []
for point in outer_polygon:
x, y = point[0] - origin[0], point[1] - origin[1]
angle = math.atan2(y, x)
if angle < 0:
while angle < 0:
angle += 2 * math.pi
if angle > 2 * math.pi:
while angle > 2 * math.pi:
angle -= 2 * math.pi
delta_angle = angle-avg_angle
if delta_angle < - math.pi:
while delta_angle < -math.pi:
delta_angle += 2 * math.pi
if delta_angle > math.pi:
while delta_angle > math.pi:
delta_angle -= 2 * math.pi
angles.append(delta_angle)
# Return the maximum and minimum angles
max_angle = max(angles)
min_angle = min(angles)
max_angle_index = angles.index(max_angle)
min_angle_index = angles.index(min_angle)
max_angle = min(max(angles), math.pi*(3/4))+math.pi/20
min_angle = max(min(angles), -math.pi*(3/4))-math.pi/20
max_angle_point = outer_polygon[max_angle_index]
min_angle_point = outer_polygon[min_angle_index]
closest_point = self.closest_point_to_polygon(origin, outer_polygon)
self.vo_circles.append([closest_point[0], 0.1, (236, 232, 26), 1])
if closest_point[1] == -1:
closest_distance = 1
else:
closest_distance = np.linalg.norm(origin - closest_point[0])
return ([max_angle_point,min_angle_point],[avg_angle+min_angle,avg_angle+max_angle],outer_polygon,closest_distance)
def get_largest_inner_product(self,polygon, B):
# Rotate B by pi/2
B_rotated = np.array([-B[1], B[0]])
# Normalize B_rotated
A = B_rotated / np.linalg.norm(B_rotated)
max_inner_product = -float('inf')
for i, point1 in enumerate(polygon):
for j, point2 in enumerate(polygon[i + 1:], i + 1):
# Calculate the line vector between point1 and point2
line_vector = np.array(point2) - np.array(point1)
# Calculate the inner product of line_vector and A
inner_product = np.dot(line_vector / np.linalg.norm(line_vector), A)
# Update the max inner product if applicable
if inner_product > max_inner_product:
max_inner_product = inner_product
# Return the largest inner product
return max_inner_product
def tangent_lines(self, circle, origin):
# Unpack circle coordinates and radius
x_c, y_c = circle[0]
r = circle[1]
# Unpack origin coordinates
x_o, y_o = origin
# Calculate the distance between the origin and the center of the circle
d = math.sqrt((x_c - x_o) ** 2 + (y_c - y_o) ** 2)
# Check if the origin is inside the circle
if d < r:
print("Error: origin is inside the circle.")
return None
# Calculate the angle between the origin and the center of the circle
theta = math.atan2(y_c - y_o, x_c - x_o)
# Calculate the distance from the origin to the tangent point
a = math.sqrt(d ** 2 - r ** 2)
# Calculate the angles of the tangent lines
alpha = math.asin(r / d)
beta = theta - alpha
gamma = theta + alpha
# Return the two angles of the tangent lines
return beta, gamma
def vo_cone_collision_detector(self, point, cone):
"""
Determines whether a point is inside a circle section
"""
origin, [start_angle, end_angle], radius = cone
if radius < 0.5:
radius = 1
# Calculate angle of point relative to circle origin
point_angle = math.atan2(point[1] - origin[1], point[0] - origin[0])
start_angle = self.regularize_angle(start_angle)
end_angle = self.regularize_angle(end_angle)
point_angle = self.regularize_angle(point_angle)
if end_angle >= start_angle:
result = point_angle >= start_angle and point_angle <= end_angle
inside_angle = end_angle - start_angle
elif end_angle <= start_angle: # 0도 넘어갈 때
result = point_angle >= start_angle or point_angle <= end_angle
inside_angle = end_angle + 2*math.pi - start_angle
if result == True:
time_till_collision = radius / (math.dist(point, origin)/5)
print(1)
if radius < 4:
return 1
if inside_angle > math.pi:
return 1
return 1/time_till_collision
if result == False:
return 0
def angle_between_vector_and_angle(self, vector, angle):
# Calculate the angle between the vector and the x-axis
vector_angle = math.atan2(vector[1], vector[0])
# Calculate the difference between the vector angle and the given angle
angle_diff = vector_angle - angle
# Ensure that the angle difference is within the range (-pi, pi]
angle_diff = self.regularize_angle(angle_diff)
# Return the absolute value of the angle difference
return angle_diff
def index_with_lowest_number(self, numbers):
"""
Returns the index of the lowest number in a list
"""
lowest_index = 0
for i in range(1, len(numbers)):
if numbers[i] < numbers[lowest_index]:
lowest_index = i
return lowest_index
def regularize_angle(self,angle):
while angle > math.pi:
angle -= 2 * math.pi
while angle <= -math.pi:
angle += 2 * math.pi
return angle
def point_polygon_distance(self, point, polygon):
if isinstance(point, np.ndarray):
point = tuple(point)
polygon_vertices = polygon
# Create a list of tuples containing the x,y coordinates of the polygon vertices
polygon = [(x, y) for (x, y) in polygon_vertices]
# Find the closest point on the polygon boundary to the target point
closest_point = None
min_distance = float('inf')
for i in range(len(polygon)):
x1, y1 = polygon[i]
x2, y2 = polygon[(i + 1) % len(polygon)]
dx = x2 - x1
dy = y2 - y1
dot = ((point[0] - x1) * dx + (point[1] - y1) * dy) / (dx ** 2 + dy ** 2)
closest_x = float(x1) + dot * dx
closest_y = float(y1) + dot * dy
distance = math.sqrt((point[0] - closest_x) ** 2 + (point[1] - closest_y) ** 2)
if distance < min_distance:
min_distance = distance
closest_point = (closest_x, closest_y)
print(distance)
# If the point is inside the polygon, return a negative distance
if self.is_inside_polygon(point, polygon_vertices):
return 0
else:
return min_distance
def closest_point_to_polygon(self, point, polygon_vertices):
# Convert numpy array to tuple if necessary
if isinstance(point, np.ndarray):
point = tuple(point)
# Find the closest point on the polygon boundary to the target point
closest_point = None
min_distance = float('inf')
polygon_vertices2 = self.subdivide_polygon(polygon_vertices)
for i in range(len(polygon_vertices2)):
(closest_x, closest_y) = polygon_vertices2[i]
distance = np.sqrt((point[0] - closest_x) ** 2 + (point[1] - closest_y) ** 2)
if distance < min_distance:
min_distance = distance
closest_point = np.array([closest_x, closest_y])
if self.is_inside_polygon(point, polygon_vertices):
return [closest_point, -1]
else:
return [closest_point, 1]
def is_inside_polygon(self, point, polygon_vertices):
# Create a list of tuples containing the x,y coordinates of the polygon vertices
polygon = [(x, y) for x, y in polygon_vertices]
# Use the winding number algorithm to check if the point is inside the polygon
wn = 0
for i in range(len(polygon)):
x1, y1 = polygon[i]
x2, y2 = polygon[(i + 1) % len(polygon)]
if y1 <= point[1]:
if y2 > point[1]:
if (point[0] - x1) * (y2 - y1) > (x2 - x1) * (point[1] - y1):
wn += 1
else:
if y2 <= point[1]:
if (point[0] - x1) * (y2 - y1) < (x2 - x1) * (point[1] - y1):
wn -= 1
return wn != 0
def subdivide_polygon(self, polygon, n=20):
"""Subdivides a polygon and divides each edge into n segments."""
new_polygon = []
# Iterate over the edges of the polygon
for i in range(len(polygon)):
# Add the current vertex to the new polygon
new_polygon.append(polygon[i])
# Calculate the difference vector between the current and next vertex
diff = [polygon[(i + 1) % len(polygon)][j] - polygon[i][j] for j in range(len(polygon[0]))]
# Calculate the size of each segment along the current edge
segment_size = [diff[j] / n for j in range(len(diff))]
# Iterate over the segments of the current edge
for j in range(1, n):
# Calculate the coordinates of the new vertex
x_new = polygon[i][0] + j * segment_size[0]
y_new = polygon[i][1] + j * segment_size[1]
new_vertex = [x_new, y_new]
# Add the new vertex to the new polygon
new_polygon.append(new_vertex)
return new_polygon
|
spacedoge2320/Ship-OA-sim
|
Ship-OA-sim/Guidance_algorithms.py
|
Guidance_algorithms.py
|
py
| 28,641 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8784244942
|
import os
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
from django.urls import reverse, reverse_lazy
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic.list import ListView
from Venter import upload_to_google_drive
from Venter.models import Category, Profile
from Venter.forms import CSVForm, ProfileForm, UserForm
from .manipulate_csv import EditCsv
def upload_csv_file(request):
"""
View logic for uploading CSV file by a logged in user.
For POST request-------
1) The POST data, uploaded csv file and a request parameter are being sent to CSVForm as arguments
2) If form.is_valid() returns true, the user is assigned to the uploaded_by field
3) csv_form is saved and currently returns a simple httpresponse inplace of prediction results
For GET request-------
The csv_form is rendered in the template
"""
if request.method == 'POST':
csv_form = CSVForm(request.POST, request.FILES, request=request)
if csv_form.is_valid():
file_uploaded = csv_form.save(commit=False)
file_uploaded.uploaded_by = request.user
csv_form.save()
return HttpResponse("<h1>Your csv file was uploaded, redirect user to prediction page (pie charts, tables..)</h1>")
else:
return render(request, './Venter/upload_file.html', {'csv_form': csv_form})
elif request.method == 'GET':
csv_form = CSVForm(request=request)
return render(request, './Venter/upload_file.html', {'csv_form': csv_form})
def handle_user_selected_data(request):
"""This function is used to handle the selected categories by the user"""
if not request.user.is_authenticated:
# Authentication security check
return redirect(settings.LOGIN_REDIRECT_URL)
else:
rows = request.session['Rows']
correct_category = []
company = request.session['company']
if request.method == 'POST':
file_name = request.session['filename']
user_name = request.user.username
for i in range(rows):
# We are getting a list of values because the select tag was multiple select
selected_category = request.POST.getlist(
'select_category' + str(i) + '[]')
if request.POST['other_category' + str(i)]:
# To get a better picture of what we are getting try to print "request.POST.['other_category' + str(i)]", request.POST['other_category' + str(i)
# others_list=request.POST['other_category' + str(i)]
# for element in others_list:
# print(element)
# tuple = (selected_category,element)
tuple = (selected_category,
request.POST['other_category' + str(i)])
# print(request.POST['other_category' + str(i)])
# print(tuple)
# So here the correct_category will be needing a touple so the data will be like:
# [(selected_category1, selected_category2), (other_category1, other_category2)] This will be the output of the multi select
correct_category.append(tuple)
else:
# So here the correct_category will be needing a touple so the data will be like:
# [(selected_category1, selected_category2)] This will be the output of the multi select
correct_category.append(selected_category)
csv = EditCsv(file_name, user_name, company)
csv.write_file(correct_category)
if request.POST['radio'] != "no":
# If the user want to send the file to Google Drive
path_folder = request.user.username + "/CSV/output/"
path_file = 'MEDIA/' + request.user.username + \
"/CSV/output/" + request.session['filename']
path_file_diff = 'MEDIA/' + request.user.username + "/CSV/output/Difference of " + request.session[
'filename']
upload_to_google_drive.upload_to_drive(path_folder,
'results of ' +
request.session['filename'],
"Difference of " +
request.session['filename'],
path_file,
path_file_diff)
return redirect("/download")
def file_download(request):
if not request.user.is_authenticated:
return redirect(settings.LOGIN_REDIRECT_URL)
else:
# Refer to the source: https://stackoverflow.com/questions/36392510/django-download-a-file/36394206
path = os.path.join(settings.MEDIA_ROOT, request.user.username,
"CSV", "output", request.session['filename'])
with open(path, 'rb') as csv:
response = HttpResponse(
csv.read()) # Try using HttpStream instead of this. This method will create problem with large numbers of rows like 25k+
response['Content-Type'] = 'application/force-download'
response['Content-Disposition'] = 'attachment;filename=results of ' + \
request.session['filename']
return response
def handle_uploaded_file(f, username, filename):
"""Just a precautionary step if signals.py doesn't work for any reason."""
data_directory_root = settings.MEDIA_ROOT
path = os.path.join(data_directory_root, username,
"CSV", "input", filename)
path_input = os.path.join(data_directory_root, username, "CSV", "input")
path_output = os.path.join(data_directory_root, username, "CSV", "output")
if not os.path.exists(path_input):
os.makedirs(path_input)
if not os.path.exists(path_output):
os.makedirs(path_output)
with open(path, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
def user_logout(request):
logout(request)
return redirect(settings.LOGIN_REDIRECT_URL)
class CategoryListView(LoginRequiredMixin, ListView):
"""
Arguments------
1) ListView: View to display the category list for the organisation to which the logged in user belongs
2) LoginRequiredMixin: Request to update profile details by non-authenticated users, will throw an HTTP 404 error
Functions------
1) get_queryset(): Returns a new QuerySet filtering categories based on the organisation name passed in the parameter.
"""
model = Category
def get_queryset(self):
return Category.objects.filter(organisation_name=self.request.user.profile.organisation_name)
class UpdateProfileView(LoginRequiredMixin, UpdateView):
"""
Arguments------
1) UpdateView: View to update the user profile details for the logged in user
2) LoginRequiredMixin: Request to update profile details by non-authenticated users, will throw an HTTP 404 error
"""
model = Profile
success_url = reverse_lazy('home')
def post(self, request, *args, **kwargs):
user_form = UserForm(request.POST, instance=request.user)
profile_form = ProfileForm(request.POST, request.FILES, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid(): # pylint: disable = R1705
user_form.save()
profile_form.save()
messages.success(request, 'Your profile was successfully updated!')
return HttpResponseRedirect(reverse_lazy('home'))
else:
messages.error(request, 'Please correct the error below.')
def get(self, request, *args, **kwargs):
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, './Venter/update_profile.html', {'user_form': user_form, 'profile_form': profile_form})
class CreateProfileView(CreateView):
"""
Arguments------
1) CreateView: View to create the user profile for a new user.
Note------
profile_form.save(commit=False) returns an instance of Profile that hasn't yet been saved to the database.
The profile.save() returns an instance of Profile that has been saved to the database.
This occurs only after the profile is created for a new user with the 'profile.user = user'
"""
model = Profile
def post(self, request, *args, **kwargs):
user_form = UserForm(request.POST)
profile_form = ProfileForm(request.POST, request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
return HttpResponseRedirect(reverse('home', args=[]))
else:
messages.warning(
request, 'Something went wrong in Venter, please try again')
return HttpResponse("<h1>NO Profile created</h1>")
def get(self, request, *args, **kwargs):
user_form = UserForm()
profile_form = ProfileForm()
return render(request, './Venter/registration.html', {'user_form': user_form, 'profile_form': profile_form})
|
simranmadhok/Venter_CMS
|
Venter/views.py
|
views.py
|
py
| 9,673 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20867652240
|
from vizasm.analysis.asm.cpu.AsmRegEx import AsmRegEx
class AsmRegEx_arm(AsmRegEx):
'''
Contains regular expressions (mostly in verbose mode) used for analyzing the assembler file on the arm architecture.
Parameters
----------
RE_IVAR
match e.g. "IVAR_0x291c"
RE_CLASSREF
match e.g. "@bind__OBJC_CLASS_$_UIScreen"
RE_IMP_SYMBOLSTUB
match e.g "imp___symbolstub1__objc_msgSend"
RE_IMP_NL_SYMBOL_PTR
match e.g. "@imp___nl_symbol_ptr__objc_retain"
RE_ASSINGMENT_SPLIT
match e.g. "ldr r8, [sp], #0x4" or
"ldr r1, [r0] " or
"cvtss2sd xmm2, xmm2"
RE_OFFSET_ADDRESSING
match .e.g "[sp, #0x8]"
RE_CALL_INSTRUCTION
match e.g. "blx imp___symbolstub1__objc_msgSend"
RE_ADD
match e.g. "add r7, sp, #0xc" or
"add r7, sp" or
"add r7, #0xc"
RE_REGISTER
match e.g. "ro"
RE_ANNOTATED_SUPERCALL
match e.g. "; call to super: 0xbb4e"
'''
RE_IMP_STUBS_GR_IMP_SYMBOLSTUB = 'imp_symbolstub'
RE_IMP_SYMBOLSTUB = r'''
(?:imp[_]{3}symbolstub1[_]{2,}) # imp___symbolstub1__
(?P<%s>.*) # objc_msgSend
''' % RE_IMP_STUBS_GR_IMP_SYMBOLSTUB
# @imp___nl_symbol_ptr__objc_retain
RE_IMP_NL_SYMBOL_PTR_GR_NAME = 'name'
RE_IMP_NL_SYMBOL_PTR = '''
@imp[_]{3}nl[_]symbol[_]ptr[_]{2} # @imp___nl_symbol_ptr__
(?P<%s>\S+) # objc_retain
''' % RE_IMP_NL_SYMBOL_PTR_GR_NAME
RE_CLASSREF_GR_CLASSREF = 'classref'
RE_CLASSREF = '''
@bind[_]{2}OBJC[_]CLASS[_][$][_] # @bind__OBJC_CLASS_$_
(?P<%s>\S+) # UIScreen
''' % RE_CLASSREF_GR_CLASSREF
RE_ASSINGMENT_SPLIT_GR_OP = 'op'
RE_ASSINGMENT_SPLIT_GR_VAL1 = 'val1'
RE_ASSINGMENT_SPLIT_GR_VAL2 = 'val2'
# ldr r8, [sp], #0x4
RE_ASSINGMENT_SPLIT = r'''
(?P<%s>(add|mov|ldr|.*cvt|st(m|r)).*?) # ldr
\s+
(?P<%s>.*?) # r8
,\s* # ,
=? # optional "=" for ldr
(?P<%s>.*) # [sp], #0x4
''' % (RE_ASSINGMENT_SPLIT_GR_OP, RE_ASSINGMENT_SPLIT_GR_VAL1, RE_ASSINGMENT_SPLIT_GR_VAL2)
RE_CALL_INSTRUCTION_GR_CALLED = 'called'
RE_CALL_INSTRUCTION = '''
blx? # bl or blx
\s* # whitespace
(?P<%s>\S+) # called
''' % RE_CALL_INSTRUCTION_GR_CALLED
RE_IVAR_GR_NAME = 'ivar_addr'
RE_IVAR = r'''
IVAR[_] # IVAR_
(?P<%s>\S+) # 0x291c
''' % (RE_IVAR_GR_NAME)
# [ro]
RE_REGISTER_GR_NAME = 'name'
RE_REGISTER = '''
\[? # "[" is optional
(?P<%s>
(
[^]] # no ]
\S # no whitespace
)+ # match all but whitespace and ]
)
\]? # "]" is optional
''' % (RE_REGISTER_GR_NAME)
# [sp, #0x8]
RE_OFFSET_ADDRESSING_GR_BASE_REGISTER = 'base_register'
RE_OFFSET_ADDRESSING_GR_OFFSET = 'offset'
RE_OFFSET_ADDRESSING = '''
\[ # [
(?P<%s>\w+) # sp
(
,\s* # ,
[#]? # #
(?P<%s>\w+) # 0x8
)?
\] # ]
''' % (RE_OFFSET_ADDRESSING_GR_BASE_REGISTER, RE_OFFSET_ADDRESSING_GR_OFFSET)
RE_ADD_GR_DEST = 'dest'
RE_ADD_GR_OPERAND1 = 'operand1'
RE_ADD_GR_OPERAND2 = 'operand2'
# add r7, sp, #0xc
RE_ADD = '''
add\w* # add
\s+ # whitespace
(?P<%s>\w+) # r7
, # ,
\s* # whitespace
(?P<%s>\#?\w+) # sp
(
,\s* # , whitespace
(?P<%s>\#?\w+) # #0xc
)*
''' % (RE_ADD_GR_DEST, RE_ADD_GR_OPERAND1, RE_ADD_GR_OPERAND2)
# ; call to super: 0xbb4e
RE_ANNOTATED_SUPERCALL_GR_SUPERREF_ADDR = 'superref_addr'
RE_ANNOTATED_SUPERCALL = '''
call\s+to\s+super[:]\s+ # "; call to super:"
(?P<%s>0x\w+) # 0xbb4e
''' % (RE_ANNOTATED_SUPERCALL_GR_SUPERREF_ADDR)
RE_STACK_PUSH_GR_REGISTERS = 'registers'
@staticmethod
def compiled_re_stack_push_via_stm(stack_pointer, asmline):
''' Read a stack push via "stm" like e.g. "stm.w sp, {r3, r11}"
and return the re match object.
Use `RE_STACK_PUSH_GR_REGISTERS` to get the list<str> of pushed registers '''
RE_STACK_PUSH = '''
stm.*? # stm
\s* # whitespace
%s # stack pointer
,\s* # ,
[{] # {
(?P<%s>.*) # r3, r11
[}] # }
''' % (stack_pointer, AsmRegEx_arm.RE_STACK_PUSH_GR_REGISTERS)
stack_push_match = AsmRegEx_arm.compiled_vre(RE_STACK_PUSH).search(asmline)
if stack_push_match is not None:
return stack_push_match
return None
RE_STACK_GR_STACK_POINTER = 'stack_pointer'
RE_STACK_GR_ADDRESS = 'stack_address'
RE_STACK_GR_OFFSET = 'stack_offset'
@staticmethod
def compiled_re_stack(stack_pointer_name, frame_pointer_name):
'''
Return a compiled regular expression that matches e.g. "[sp, #0x8]" or "[sp], #0x4"
Use `RE_STACK_GR_ADDRESS` group key to get the stack address.
Parameters
----------
stack_pointer_name: string
the name of stack pointer register (e.g. "sp")
'''
RE_STACK = r'''
\[ # [
(?P<%s>%s|%s) # stack pointer name or frame pointer name
(
(
\]
,\s+ # ,
[#] # #
(?P<%s>0x\w+) # 0x8
)
| # or
(
,\s+ # ,
[#]
(?P<%s>0x\w+) # #0x8
\] # ]
)
|
\] # ]
)
''' % (AsmRegEx_arm.RE_STACK_GR_STACK_POINTER, stack_pointer_name, frame_pointer_name, AsmRegEx_arm.RE_STACK_GR_OFFSET, AsmRegEx_arm.RE_STACK_GR_ADDRESS)
return AsmRegEx.compiled_vre(RE_STACK)
@staticmethod
def stack_pointer_sub(stack_pointer_str, asmline):
'''
Read a line like e.g. "sub sp, #0x14"
and return the value that is being subtracted from the stack pointer as string.
None if did not match.
'''
RE_SP_SUB_GR_SUBTRACTED = 'subtracted'
RE_SP_SUB = '''
sub\w*
\s*
%s,
\s*
\#(?P<%s>\w+)
''' % (stack_pointer_str, RE_SP_SUB_GR_SUBTRACTED)
re_sp_sub_match = AsmRegEx_arm.compiled_vre(RE_SP_SUB).search(asmline)
if re_sp_sub_match is not None:
return re_sp_sub_match.group(RE_SP_SUB_GR_SUBTRACTED)
return None
|
nachtmaar/vizasm
|
vizasm/analysis/asm/cpu/arm/AsmRegEx_arm.py
|
AsmRegEx_arm.py
|
py
| 7,381 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38907524035
|
"""Create category
Revision ID: bc8fb2b5aaaa
Revises: cf3388347129
Create Date: 2023-05-06 09:44:36.431462
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bc8fb2b5aaaa'
down_revision = 'cf3388347129'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.Column('icon', sa.String(length=50), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('create_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
sa.Column('update_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index(op.f('ix_category_id'), 'category', ['id'], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_category_id'), table_name='category')
op.drop_table('category')
# ### end Alembic commands ###
|
rasimatics/excursio-backend
|
migrations/versions/bc8fb2b5aaaa_create_category.py
|
bc8fb2b5aaaa_create_category.py
|
py
| 1,266 |
python
|
en
|
code
| 1 |
github-code
|
6
|
14052300522
|
# vim set fileencoding=utf-8
from setuptools import setup
with open('README.rst') as f:
long_description = f.read()
setup(
name = 'AnthraxEplasty',
version = '0.0.3',
author = 'Szymon Pyżalski',
author_email = 'zefciu <[email protected]>',
description = 'Anthrax - generating forms from Elephantoplasty objects',
url = 'http://github.com/zefciu/Anthrax',
keywords = 'form web orm database',
long_description = long_description,
install_requires = ['anthrax', 'Elephantoplasty'],
tests_require = ['nose>=1.0', 'nose-cov>=1.0'],
test_suite = 'nose.collector',
package_dir = {'': 'src'},
namespace_packages = ['anthrax'],
packages = [
'anthrax', 'anthrax.eplasty'
],
classifiers = [
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
entry_points = """[anthrax.reflector]
eplasty = anthrax.eplasty.reflector:EplastyReflector
[anthrax.field_mixins]
eplasty_unique = anthrax.eplasty.field:UniqueMixin
""",
)
|
zefciu/anthrax-eplasty
|
setup.py
|
setup.py
|
py
| 1,242 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11838557066
|
import pickle
import os
import sys
import numpy as np
import json_tricks as json
import cv2
import torch
import copy
import random
import torchvision.transforms as transforms
from glob import glob
from tqdm import tqdm
from PIL import Image
from natsort import natsorted
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from torch.utils.data import Dataset
from utils.dataset_helper import get_affine_transform,affine_transform
from core.utils.helper_functions import load_matfile
class InfantDataSet(Dataset):
def __init__(self, root,pose_root,subjects,path=None, transform=transforms.ToTensor(),is_train=True):
"""
root: the root of data, eg: '/vol/biodata/data/human3.6m/training'
pose_root: the root of 2d pose;
subjects: subjects=['EMT4', 'EMT7', 'EMT38', 'EMT36', 'EMT31', 'EMT43',
'EMT5', 'EMT9', 'EMT47', 'EMT45', 'EMT29', 'EMT42',
'EMT23', 'EMT41', 'EMT37', 'EMT48', 'EMT44', 'EMT46',
'EMT20', 'EMT34', 'EMT11', 'EMT30', 'EMT39', 'EMT35',
'EMT14']
transforms: torchvision.transforms to resize and crop frame to (256,256)
"""
self.root = root
self.pose_root=pose_root
self.indices = np.array([3, 2, 9, 8, 4, 1, 10, 7, 13, 12, 15, 14, 5, 0, 11, 6])
self.subjects=subjects
# load dataset
self.sequences = []
self.transform=transform
self.is_train=is_train
self.dataset=None
if path != None:
self.load_seq(path)
else:
for subject in tqdm(self.subjects): # actor is in the form of 'S1','S5'
if not os.path.isdir(os.path.join(root, subject)):
print(subject,"not exists")
continue
frames = natsorted(os.listdir(os.path.join(root, subject)))
frame_nums = [int(x[6:-4]) for x in frames]
frame_nums=np.array(sorted(frame_nums))-1
pred_path=os.path.join(pose_root,subject+'.pickle')
if not os.path.exists(pred_path):
print(pred_path,"not exists")
continue
with open(pred_path, "rb") as file:
pickle_file = pickle.load(file)
for frame in frames:
frame_num=self.find_index(frame,pickle_file)
frame2=random.sample(frames, 1)[0]
seq={'subject': subject,
'bbox':pickle_file[frame_num]['bounding_box'],
'center': pickle_file[frame_num]['center'],
'scale': pickle_file[frame_num]['scale'],
'frame_num':frame_num,
'frame1':pickle_file[frame_num]['frame_id'],
'frame2': frame2,
'pose_2d':pickle_file[frame_num]['predicted_keypoints']
}
self.sequences.append(seq)
def get_single(self, sequence):
bbox=sequence['bbox']
center=sequence['center']
scale=np.asarray([sequence['scale'],sequence['scale']])
image_size=(256,256)
frame1_path=os.path.join(self.root,sequence['subject'],sequence['frame1'])
frame2_path=os.path.join(self.root,sequence['subject'],sequence['frame2'])
pose_2d=sequence['pose_2d'][self.indices]
frame1=Image.open(frame1_path)
frame2=Image.open(frame2_path)
trans = get_affine_transform(center, scale, 0, image_size)
frame1=cv2.warpAffine(np.array(frame1), trans, (int(image_size[0]), int(image_size[1])), flags=cv2.INTER_LINEAR)
frame2=cv2.warpAffine(np.array(frame2), trans, (int(image_size[0]), int(image_size[1])), flags=cv2.INTER_LINEAR)
frame1_tensor=self.transform(frame1)
frame2_tensor=self.transform(frame2)
pose2d_tensor=torch.FloatTensor(pose_2d)
return frame1_tensor,frame2_tensor,pose2d_tensor
def find_index(self,s,file):
j=None
for i in range(len(file)):
if s == file[i]['frame_id']:
j=i
return j
def get_sequences(self):
return self.sequences
def save_seq(self,path):
if not os.path.exists(path):
os.makedirs(path)
seq=self.__dict__['sequences']
torch.save(seq, os.path.join(path,'sequences.tar'))
print("saved successfully!")
def load_seq(self,path):
seq_load=torch.load(path)
self.sequences=seq_load
print("load successfully!")
def __getitem__(self, index):
seq=self.sequences[index]
return self.get_single(seq)
def __len__(self):
return len(self.sequences)
class Infant3DPose(Dataset):
def __init__(self, path,load_path=None):
self.path = path
self.indices = np.array([15, 14, 10, 6, 3, 0, 11, 7, 4, 1, 12, 8, 5, 2, 13, 9])
self.keypoints = ['hips', 'shoulders', 'knees', 'elbows', 'ankles', 'wrists', 'feet', 'hands']
self.sequences=[]
if load_path != None:
self.load_seq(load_path)
else:
self.sequences=self.get_sequences()
def save_seq(self,path):
if not os.path.exists(path):
os.makedirs(path)
seq=self.__dict__['sequences']
torch.save(seq, os.path.join(path,'pose3d_sequences.tar'))
print("saved successfully!")
def load_seq(self,path):
seq_load=torch.load(path)
self.sequences=seq_load
print("load successfully!")
def get_sequences(self):
spatial_indices = np.array([1, 2, 0])
sequences = []
files = glob(os.path.join(self.path, '*.mat'))
sequences=[]
for file in tqdm(files):
# subj = os.path.splitext(os.path.split(file)[-1])[0]
data, timestamps = load_matfile(file)
# only xyz
data = data[:, 0:3]
data = data.transpose((0, 2, 1))
# tracker data is 120 Hz, camera is 30 Hz, so factor is 4
tracker_data = data
for i in range(len(tracker_data)):
pose = tracker_data[i]
# change keypoint order and spatial orientation
pose = pose[self.indices]
pose = pose[..., spatial_indices]
pose = self.align(pose)
first = pose
pelvis = first[0] + (first[1] - first[0]) / 2
pose -= pelvis
pose = self.normalize(pose)
pose1 = np.copy(pose)
pose1[..., 1] = pose[..., 2]
pose1[..., 2] = pose[..., 1]
sequences.append(pose1)
return sequences
def normalize(self, points):
first = points
# normalize to unit cube -1 to 1
max_ = np.abs(first.max())
min_ = np.abs(first.min())
if max_ >= min_:
points /= max_
else:
points /= min_
return points
def get_angle(self, vec1, vec2):
inv = vec1 @ vec2 / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
return np.arccos(inv)
def align(self, sequence):
"""remove y component of hip line,
align pelvis-neck line with z axis"""
points = sequence
hip_line = points[0] - points[1]
pelvis = points[0] + (points[1] - points[0]) / 2
neck = points[2] + (points[3] - points[2]) / 2
pelvis_neck_line = neck - pelvis
# pelvis neck
rot_axis1 = np.array([0, 0, 1])
angle = self.get_angle(pelvis_neck_line, rot_axis1)
cross_prod = np.cross(pelvis_neck_line, rot_axis1)
cross_prod /= np.linalg.norm(cross_prod)
R1 = R.from_rotvec(angle * cross_prod)
points = R1.apply(points)
# hip
hip_line = points[0] - points[1]
rot_axis2 = np.array([1, 0])
angle = self.get_angle(hip_line[0:2], rot_axis2)
cross_prod = np.cross(hip_line[0:2], rot_axis2)
cross_prod /= np.linalg.norm(cross_prod)
R2 = R.from_rotvec(angle * np.array([0, 0, cross_prod]))
points=R2.apply(points)
rot = R.from_rotvec(np.array([0., 0., np.pi]))
points=rot.apply(points)
return points
def __getitem__(self, idx):
pose = np.array(self.sequences[idx])
# p1 = np.copy(pose)
# p1[..., 1] = pose[..., 2]
# p1[..., 2] = pose[..., 1]
pose_tensor=torch.FloatTensor(pose)
return pose_tensor
def __len__(self):
return len(self.sequences)
|
Qingwei-Li98/PoseEstimation
|
core/utils/infant_dataset.py
|
infant_dataset.py
|
py
| 8,713 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73652411389
|
# 给你一个正整数 n ,返回 2 和 n 的最小公倍数(正整数)。
class Solution(object):
def smallestEvenMultiple(self, n):
"""
:type n: int
:rtype: int
"""
if n % 2 != 0:
return 2 * n
else:
return n
n = 5
a = Solution()
print(a.smallestEvenMultiple(n))
|
xxxxlc/leetcode
|
competition/单周赛/311/smallestEvenMultiple.py
|
smallestEvenMultiple.py
|
py
| 359 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
30364500871
|
import sys
import mock
import six
from okonomiyaki.errors import InvalidMetadataField
from ..python_implementation import PythonABI, PythonImplementation
from hypothesis import given
from hypothesis.strategies import sampled_from
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestPythonImplementation(unittest.TestCase):
@given(sampled_from((
('2', '7', 'cp27'), ('3', '8', 'cp38'),
('4', '15', 'cp415'), ('3', '11', 'cp311'))))
def test_creation(self, version):
# Given
kind = 'cpython'
major, minor, r_tag = version
# When
tag = PythonImplementation(kind, major, minor)
# Then
self.assertEqual(tag.abbreviated_implementation, 'cp')
self.assertEqual(str(tag), r_tag)
self.assertIsInstance(six.text_type(tag), six.text_type)
def test_from_running_python(self):
# When
with mock.patch(
"okonomiyaki.platforms.python_implementation."
"_abbreviated_implementation",
return_value="cp"):
with mock.patch("sys.version_info", (2, 7, 9, 'final', 0)):
py = PythonImplementation.from_running_python()
# Then
self.assertEqual(py.pep425_tag, u"cp27")
# When
with mock.patch("sys.pypy_version_info", "pypy 1.9", create=True):
with mock.patch("sys.version_info", (2, 7, 9, 'final', 0)):
py = PythonImplementation.from_running_python()
# Then
self.assertEqual(py.pep425_tag, u"pp27")
# When
with mock.patch("sys.platform", "java 1.7", create=True):
with mock.patch("sys.version_info", (2, 7, 9, 'final', 0)):
py = PythonImplementation.from_running_python()
# Then
self.assertEqual(py.pep425_tag, u"jy27")
# When
with mock.patch("sys.platform", "cli", create=True):
with mock.patch("sys.version_info", (2, 7, 9, 'final', 0)):
py = PythonImplementation.from_running_python()
# Then
self.assertEqual(py.pep425_tag, u"ip27")
@given(sampled_from((
("cpython", "cp"), ("python", "py"),
("pypy", "pp"), ("dummy", "dummy"))))
def test_abbreviations(self, kinds):
# Given
major = 2
minor = 7
kind, r_abbreviated = kinds
# When
tag = PythonImplementation(kind, major, minor)
# Then
self.assertEqual(tag.abbreviated_implementation, r_abbreviated)
@given(sampled_from((
(2, 7, 'cp27'), (3, 8, 'cp38'),
(3, 4, 'cpython34'), (4, 5, 'cp4_5'),
(24, 7, 'cp24_7'),
(4, 15, 'cp415'), (3, 11, 'cp311'))))
def test_from_string(self, data):
# Given
major, minor, tag_string = data
# When
tag = PythonImplementation.from_string(tag_string)
# Then
self.assertEqual(tag.kind, "cpython")
self.assertEqual(tag.major, major)
if minor is not None:
self.assertEqual(tag.minor, minor)
@given(sampled_from(('cp2', 'py3', 'cp', 'pp4567')))
def test_from_string_errors(self, data):
# When/Then
message = r"^Invalid value for metadata field 'python_tag': '{}'$"
with self.assertRaisesRegexp(
InvalidMetadataField, message.format(data)):
PythonImplementation.from_string(data)
class TestPythonABI(unittest.TestCase):
def test_pep425_tag_string_none(self):
# Given
abi_tag = None
# When
abi_tag_string = PythonABI.pep425_tag_string(abi_tag)
# Then
self.assertEqual(abi_tag_string, u"none")
self.assertIsInstance(abi_tag_string, six.text_type)
def test_pep425_tag_string(self):
# Given
abi_tag = PythonABI(u"cp27mu")
# When
abi_tag_string = PythonABI.pep425_tag_string(abi_tag)
# Then
self.assertEqual(abi_tag_string, u"cp27mu")
self.assertIsInstance(abi_tag_string, six.text_type)
|
enthought/okonomiyaki
|
okonomiyaki/platforms/tests/test_python_implementation.py
|
test_python_implementation.py
|
py
| 4,083 |
python
|
en
|
code
| 2 |
github-code
|
6
|
33548298905
|
import sys
import argparse
import tensorflow as tf
from keras.models import Model, load_model
from keras.layers import TimeDistributed, Conv1D, Dense, Embedding, Input, Dropout, LSTM, Bidirectional, MaxPooling1D, \
Flatten, concatenate
from keras.initializers import RandomUniform
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
from keras.models import load_model
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform, randint
from util.util import *
def data():
train_df = get_train_data()
#found = train_df['ner']
#hasValue = [True if 'VALUE' in x else False for i, x in enumerate(found)]
#train_df = train_df[np.array(hasValue)]
print(train_df.shape)
train_data, case2Idx, caseEmbeddings, word2Idx, wordEmbeddings, \
char2Idx, label2Idx, sentences_maxlen, words_maxlen = prepare_data(train_df)
val_df = get_val_data()
print(val_df.shape)
val_data = embed_sentences(add_char_information_in(tag_data(val_df)), INFOBOX_CLASS, PROPERTY_NAME)
X_train, Y_train = split_data(train_data)
X_val, Y_val = split_data(val_data)
return X_train, Y_train, X_val, Y_val, caseEmbeddings, wordEmbeddings, label2Idx, char2Idx, sentences_maxlen, words_maxlen
def model(X_train, Y_train, X_val, Y_val, caseEmbeddings, wordEmbeddings, label2Idx, char2Idx, sentences_maxlen,
words_maxlen):
temp = []
for item in Y_train[0]:
flatten = [i for sublist in item for i in sublist]
for i in flatten:
temp.append(i)
temp = np.asarray(temp)
print('labels', np.unique(np.ravel(temp, order='C')))
# word-level input
words_input = Input(shape=(None,), dtype='int32', name='Words_input')
words = Embedding(input_dim=wordEmbeddings.shape[0], output_dim=wordEmbeddings.shape[1],
weights=[wordEmbeddings], trainable=False)(words_input)
# case-info input
casing_input = Input(shape=(None,), dtype='int32', name='Casing_input')
casing = Embedding(input_dim=caseEmbeddings.shape[0], output_dim=caseEmbeddings.shape[1],
weights=[caseEmbeddings], trainable=False)(casing_input)
# character input
character_input = Input(shape=(None, words_maxlen,), name="Character_input")
embed_char_out = TimeDistributed(
Embedding(input_dim=len(char2Idx), output_dim=50,
embeddings_initializer=RandomUniform(minval=-0.5, maxval=0.5)),
name="Character_embedding")(character_input)
dropout = Dropout(0.5)(embed_char_out)
# CNN
conv1d_out = TimeDistributed(
Conv1D(kernel_size={{choice([3, 5])}}, filters=10,
padding='same', activation='tanh', strides=1),
name="Convolution")(dropout)
maxpool_out = TimeDistributed(MaxPooling1D({{choice([10, 25, 50])}}), name="Maxpool")(conv1d_out)
char = TimeDistributed(Flatten(), name="Flatten")(maxpool_out)
char = Dropout(0.5)(char)
# concat & BLSTM
output = concatenate([words, casing, char])
output = Bidirectional(LSTM({{choice([100, 200, 300])}},
return_sequences=True,
dropout=0.5, # on input to each LSTM block
recurrent_dropout=0.25 # on recurrent input signal
), name="BLSTM")(output)
output = TimeDistributed(Dense(len(label2Idx), activation='softmax'), name="Softmax_layer")(output)
# set up model
model = Model(inputs=[words_input, casing_input, character_input], outputs=[output])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='nadam', metrics=['accuracy'])
model.summary()
train_batch, train_batch_len = createBatches2CNN_BLSTM(X_train, Y_train)
val_batch, val_batch_len = createBatches2CNN_BLSTM(X_val, Y_val)
model.fit_generator(iterate_minibatches_CNN_BLSTM(train_batch, train_batch_len),
steps_per_epoch=len(train_batch),
# class_weight=class_weight_vect,
epochs=10, verbose=2, validation_steps=len(val_batch),
validation_data=iterate_minibatches_CNN_BLSTM(val_batch, val_batch_len))
# score, acc = model.evaluate(X_val, Y_val, verbose=0)
score, acc = model.evaluate_generator(generator=iterate_minibatches_CNN_BLSTM(val_batch, val_batch_len), steps=len(val_batch),
verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == "__main__":
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials(),
functions=[createBatches2CNN_BLSTM, iterate_minibatches_CNN_BLSTM])
plot_model(best_model, to_file='models/' + PROPERTY_NAME + '-CNN_BLSTM_best_model.png', show_shapes=True, show_layer_names=True)
best_model.save('models/'+INFOBOX_CLASS+'/dl/' + PROPERTY_NAME + '-CNN_BLSTM_best_model.h5')
print("Best performing model chosen hyper-parameters:")
print(best_run)
model = load_model('models/'+INFOBOX_CLASS+'/dl/' + PROPERTY_NAME + '-CNN_BLSTM_best_model.h5')
test_df = get_test_data()
print(test_df.shape)
test_data = embed_sentences(add_char_information_in(tag_data(test_df)), INFOBOX_CLASS, PROPERTY_NAME)
X_test, Y_test = split_data(test_data)
temp = []
for item in Y_test[0]:
flatten = [i for sublist in item for i in sublist]
for i in flatten:
temp.append(i)
temp = np.asarray(temp)
print('labels', np.unique(np.ravel(temp, order='C')))
test_batch, test_batch_len = createBatches2CNN_BLSTM(X_test, Y_test)
print("Evalutation of best performing model:")
score, acc = model.evaluate_generator(generator=iterate_minibatches_CNN_BLSTM(test_batch, test_batch_len),
steps=len(test_batch), verbose=0)
print("acc on test: ", acc)
|
guardiaum/DeepEx
|
CNN_BLSTM_fit_hyperparams.py
|
CNN_BLSTM_fit_hyperparams.py
|
py
| 6,299 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27054375172
|
import numpy as np
import pandas as pd
import config
import sys
import tensorflow as tf
from keras import Sequential
from keras.layers import Dense
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from service.Preprocess import Preprocess
from service.FeatureEngineer import FeatureEngineer
class TaxiFaresPredictionNYC:
def __init__(self):
self.df = None
self.x = None
self.y = None
self.x_train = None
self.y_test = None
self.x_test = None
self.y_train = None
self.df_prescaled = None
self.f_engineer = None
self.model = Sequential()
self.preprocessObj = Preprocess()
def feature_engineer(self):
self.f_engineer = FeatureEngineer(self.df)
self.f_engineer.create_date_columns()
self.f_engineer.create_dist_column()
self.f_engineer.create_airport_dist_features()
def preprocess(self):
self.preprocessObj.remove_missing_values()
self.preprocessObj.remove_fare_amount_outliers()
self.preprocessObj.replace_passenger_count_outliers()
self.preprocessObj.remove_lat_long_outliers()
self.df = self.preprocessObj.get_dataset()
self.feature_engineer()
self.df = self.f_engineer.get_dataset()
self.df_prescaled = self.df.copy()
self.df = self.preprocessObj.scale()
self.x = self.df.loc[:, self.df.columns != 'fare_amount']
self.y = self.df.fare_amount
self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(self.x, self.y, test_size=0.2)
def create(self):
self.model.add(Dense(128, activation='relu', input_dim=self.x_train.shape[1]))
self.model.add(Dense(64, activation='relu'))
self.model.add(Dense(32, activation='relu'))
self.model.add(Dense(8, activation='relu'))
self.model.add(Dense(1))
def compile(self):
self.model.compile(loss='mse', optimizer='adam', metrics=['mse'])
self.model.fit(self.x_train, self.y_train, epochs=1)
def predict(self):
train_predict = self.model.predict(self.x_train)
train_mrs_error = np.sqrt(mean_squared_error(self.y_train, train_predict))
print("Train RMSE: {:0.2f}".format(train_mrs_error))
test_predict = self.model.predict(self.x_test)
test_mrs_error = np.sqrt(mean_squared_error(self.y_test, test_predict))
print("Test RMSE: {:0.2f}".format(test_mrs_error))
def get_dataset(self):
return self.df
|
kalinVn/new_york_city_taxi_fare_predicton
|
service/TaxiFaresPredictionNYC.py
|
TaxiFaresPredictionNYC.py
|
py
| 2,573 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32989650417
|
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SelectField
from wtforms.validators import InputRequired, Optional
sizes = ['XS','S', 'M', 'L', 'XL']
ratings = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5,
6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0]
class AddCupcakeForm(FlaskForm):
"""Form for adding cupcakes"""
flavor = StringField("Flavor", validators=[InputRequired(message="Flavor cannot be blank")])
size = SelectField("Size", choices=[(size, size) for size in sizes], validators=[InputRequired(message="Size cannot be blank")])
rating = SelectField("Rating", coerce=float, choices=[(rating, rating) for rating in ratings], validators=[InputRequired(message="Rating cannot be blank")])
image = StringField("Image Link", validators=[Optional()])
|
BradButler96/flask-cupcakes
|
forms.py
|
forms.py
|
py
| 825 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24287653793
|
from random import random
import bisect
def get_new_state(current_state, transitions):
transition_map = dict()
for source, target, probability in transitions:
if source not in transition_map:
transition_map[source] = ([], [])
if not transition_map[source][0]:
transition_map[source][0].append(probability)
else:
prev = transition_map[source][0][-1]
transition_map[source][0].append(probability + prev)
transition_map[source][1].append(target)
r = random()
index = bisect.bisect(transition_map[current_state][0], r)
print(transition_map)
return transition_map[current_state][1][index]
def markov(start, transitions):
current_state = start
states = dict()
for t in transitions:
states[t[0]] = 0
for i in range(5000):
states[current_state] += 1
current_state = get_new_state(current_state, transitions)
return states
def main():
visited = (markov('a', [
('a', 'a', 0.9),
('a', 'b', 0.075),
('a', 'c', 0.025),
('b', 'a', 0.15),
('b', 'b', 0.8),
('b', 'c', 0.05),
('c', 'a', 0.25),
('c', 'b', 0.25),
('c', 'c', 0.5)
]))
assert visited['a'] > visited['b']
assert visited['a'] > visited['c']
assert visited['b'] > visited['c']
if __name__ == '__main__':
main()
|
ckallum/Daily-Coding-Problem
|
solutions/#175.py
|
#175.py
|
py
| 1,406 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71375478588
|
# Import the utils
import math
import sys
import os
sys.path.append(os.path.abspath('../QUtils'))
from qutils import pprint, graph
# Import the QISKit SDK
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import execute
# Create a Quantum Register with 2 qubits.
q = QuantumRegister(3)
# Create a Classical Register with 2 bits.
c = ClassicalRegister(3)
# Create a Quantum Circuit
qc = QuantumCircuit(q, c)
# move the qubits into a superpostion such that when they have an H gate and a Measure
# applied they are equally likely to collapse to 0 or 1
qc.h(q)
qc.s(q)
pi = math.pi
# add the gates to get the normal distribution
"""
# makes 111 less likely
qc.crz(-0.3*pi, q[2], q[1])
qc.crz(-0.3*pi, q[1], q[0])
# some how encourages 100
qc.x(q[2])
qc.crz(-0.4*pi, q[0], q[1])
qc.crz(-0.4*pi, q[1], q[2])
qc.crz(-0.4*pi, q[0], q[2])
qc.x(q[2])
"""
# THE 2 QUBIT CIRCUIT
# I don't get why but this bumps up the ends and drops the middle
# aka the exact opposite of what we want
#qc.crz(-0.5*pi, q[1], q[0])
#qc.crz(-0.5*pi, q[0], q[1])
# this then flips it the right way up
#qc.rz(pi, q[0])
# ATTEMPT AT 3 QUBIT CIRCUIT - kinda works?
# ups 011 and 100, but also 000 and 111
qc.crz(-0.5*pi, q[1], q[0])
qc.crz(-0.5*pi, q[0], q[1])
qc.crz(-0.5*pi, q[2], q[1])
qc.rz(pi, q[2])
qc.h(q)
# Add a Measure gate to see the state.
qc.measure(q, c)
# Compile and run the Quantum circuit on a simulator backend
job_sim = execute(qc, "local_qasm_simulator", shots=7000)
sim_result = job_sim.result()
# Show the results
print("simulation: ", sim_result)
# Returns a dict
pprint(sim_result.get_counts(qc))
graph(sim_result.get_counts(qc))
|
maddy-tod/quantum
|
Code/MonteCarlo/NormalTest.py
|
NormalTest.py
|
py
| 1,675 |
python
|
en
|
code
| 3 |
github-code
|
6
|
27673629381
|
import numpy as np
import math
import cv2
#input hsvColor:[h,s,v], ranges between (0-180, 0-255, 0-255)
#and output true hsvColor:[h,s,v]
def get_right_V(hsvColor):
h = float(hsvColor[0])
s = float(hsvColor[1])
s1 = float(hsvColor[1])/255
v1 = float(hsvColor[2])
h60 = h / 60.0
h60f = math.floor(h60)
f = h60 - h60f
if f<0.5:
v = 3*v1*(1-f*s1)/(3-2*s1)
elif f>=0.5:
v = 3*v1*(1-s1+f*s1)/(3-2*s1)
return [h, s, min(v,255)]
if __name__=="__main__":
#input and ouput path for image.
PATH_TO_WRONG_IMAGE = './wrong_image.jpg'
PATH_TO_RIGHT_IMAGE = './right_image.jpg'
image_wrong = cv2.imread(PATH_TO_WRONG_IMAGE)
image_wrong_hsv = cv2.cvtColor(image_wrong, cv2.COLOR_BGR2HSV)
shape = image_wrong.shape
image_right_hsv = np.zeros(shape, dtype=np.uint8)
#iterate over every pixel to change the V value.
for row in range(shape[0]):
for col in range(shape[1]):
image_right_hsv[row][col] = get_right_V(image_wrong_hsv[row][col])
image_right = cv2.cvtColor(image_right_hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite(PATH_TO_RIGHT_IMAGE, image_right)
|
zznewclear13/Gradient_Colorizing_Fix
|
Gradient_Colorzing_Fix.py
|
Gradient_Colorzing_Fix.py
|
py
| 1,205 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35914525125
|
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
longest=""
i=1
j=0
if strs==[""]:
return ""
elif strs==["a"]:
return "a"
shortestword=10000
for word in strs:
if len(word) < shortestword:
shortestword = len(word)
while j < shortestword:
while i < len(strs):
if strs[i]=="" or strs[i-1]=="":
return longest
elif strs[i][j]==strs[i-1][j]:
i+=1
else: return longest
longest=longest+strs[i-1][j]
j+=1
i=1
return longest
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
if not len(min(strs)) or len(strs)<2:
return min(strs)
return_string = self.comparator(strs[1],strs[0])
for string in strs:
return_string=self.comparator(return_string,string)
return return_string
def comparator(self,str1,str2):
if not str1 or not str2:
return ""
index=0
return_string=[]
while index<len(min(str1,str2)):
if str1[index]==str2[index]:
return_string.append(str1[index])
index+=1
else:
break
return "".join(return_string)
|
azbluem/LeetCode-Solutions
|
solutions/14.longest-common-prefix.py
|
14.longest-common-prefix.py
|
py
| 1,401 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16540523617
|
import os
import sys
from nuitka.utils.FileOperations import (
areSamePaths,
isFilenameBelowPath,
isFilenameSameAsOrBelowPath,
)
from nuitka.utils.Utils import (
isAndroidBasedLinux,
isFedoraBasedLinux,
isLinux,
isMacOS,
isPosixWindows,
isWin32Windows,
withNoDeprecationWarning,
)
from .PythonVersions import (
getInstalledPythonRegistryPaths,
getRunningPythonDLLPath,
getSystemPrefixPath,
isStaticallyLinkedPython,
python_version,
python_version_str,
)
def isNuitkaPython():
"""Is this our own fork of CPython named Nuitka-Python."""
# spell-checker: ignore nuitkapython
if python_version >= 0x300:
return sys.implementation.name == "nuitkapython"
else:
return sys.subversion[0] == "nuitkapython"
_is_anaconda = None
def isAnacondaPython():
"""Detect if Python variant Anaconda"""
# singleton, pylint: disable=global-statement
global _is_anaconda
if _is_anaconda is None:
_is_anaconda = os.path.exists(os.path.join(sys.prefix, "conda-meta"))
return _is_anaconda
def isApplePython():
if not isMacOS():
return False
# Python2 on 10.15 or higher
if "+internal-os" in sys.version:
return True
# Older macOS had that
if isFilenameSameAsOrBelowPath(path="/usr/bin/", filename=getSystemPrefixPath()):
return True
# Newer macOS has that
if isFilenameSameAsOrBelowPath(
path="/Library/Developer/CommandLineTools/", filename=getSystemPrefixPath()
):
return True
# Xcode has that on macOS, we consider it an Apple Python for now, it might
# be more usable than Apple Python, we but we delay that.
if isFilenameSameAsOrBelowPath(
path="/Applications/Xcode.app/Contents/Developer/",
filename=getSystemPrefixPath(),
):
return True
return False
def isHomebrewPython():
# spell-checker: ignore sitecustomize
if not isMacOS():
return False
candidate = os.path.join(
getSystemPrefixPath(), "lib", "python" + python_version_str, "sitecustomize.py"
)
if os.path.exists(candidate):
with open(candidate, "rb") as site_file:
line = site_file.readline()
if b"Homebrew" in line:
return True
return False
def isPyenvPython():
if isWin32Windows():
return False
return os.environ.get("PYENV_ROOT") and isFilenameSameAsOrBelowPath(
path=os.environ["PYENV_ROOT"], filename=getSystemPrefixPath()
)
def isMSYS2MingwPython():
"""MSYS2 the MinGW64 variant that is more Win32 compatible."""
if not isWin32Windows() or "GCC" not in sys.version:
return False
import sysconfig
if python_version >= 0x3B0:
return "-mingw_" in sysconfig.get_config_var("EXT_SUFFIX")
else:
return "-mingw_" in sysconfig.get_config_var("SO")
def isTermuxPython():
"""Is this Termux Android Python."""
# spell-checker: ignore termux
if not isAndroidBasedLinux():
return False
return "com.termux" in getSystemPrefixPath().split("/")
def isUninstalledPython():
# Debian package.
if isDebianPackagePython():
return False
if isStaticallyLinkedPython():
return False
if os.name == "nt":
import ctypes.wintypes
GetSystemDirectory = ctypes.windll.kernel32.GetSystemDirectoryW
GetSystemDirectory.argtypes = (ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD)
GetSystemDirectory.restype = ctypes.wintypes.DWORD
MAX_PATH = 4096
buf = ctypes.create_unicode_buffer(MAX_PATH)
res = GetSystemDirectory(buf, MAX_PATH)
assert res != 0
system_path = os.path.normcase(buf.value)
return not getRunningPythonDLLPath().startswith(system_path)
return isAnacondaPython() or "WinPython" in sys.version
_is_win_python = None
def isWinPython():
"""Is this Python from WinPython."""
if "WinPython" in sys.version:
return True
# singleton, pylint: disable=global-statement
global _is_win_python
if _is_win_python is None:
for element in sys.path:
if os.path.basename(element) == "site-packages":
if os.path.exists(os.path.join(element, "winpython")):
_is_win_python = True
break
else:
_is_win_python = False
return _is_win_python
def isDebianPackagePython():
"""Is this Python from a debian package."""
# spell-checker: ignore multiarch
if not isLinux():
return False
if python_version < 0x300:
return hasattr(sys, "_multiarch")
else:
with withNoDeprecationWarning():
try:
from distutils.dir_util import _multiarch
except ImportError:
return False
else:
return True
def isFedoraPackagePython():
"""Is the Python from a Fedora package."""
if not isFedoraBasedLinux():
return False
system_prefix_path = getSystemPrefixPath()
return system_prefix_path == "/usr"
def isCPythonOfficialPackage():
"""Official CPython download, kind of hard to detect since self-compiled doesn't change much."""
sys_prefix = getSystemPrefixPath()
# For macOS however, it's very knowable.
if isMacOS() and isFilenameBelowPath(
path="/Library/Frameworks/Python.framework/Versions/", filename=sys_prefix
):
return True
# For Windows, we check registry.
if isWin32Windows():
for registry_python_exe in getInstalledPythonRegistryPaths(python_version_str):
if areSamePaths(sys_prefix, os.path.dirname(registry_python_exe)):
return True
return False
def isGithubActionsPython():
return os.environ.get(
"GITHUB_ACTIONS", ""
) == "true" and getSystemPrefixPath().startswith("/opt/hostedtoolcache/Python")
def getPythonFlavorName():
"""For output to the user only."""
# return driven, pylint: disable=too-many-branches,too-many-return-statements
if isNuitkaPython():
return "Nuitka Python"
elif isAnacondaPython():
return "Anaconda Python"
elif isWinPython():
return "WinPython"
elif isDebianPackagePython():
return "Debian Python"
elif isFedoraPackagePython():
return "Fedora Python"
elif isHomebrewPython():
return "Homebrew Python"
elif isApplePython():
return "Apple Python"
elif isPyenvPython():
return "pyenv"
elif isPosixWindows():
return "MSYS2 Posix"
elif isMSYS2MingwPython():
return "MSYS2 MinGW"
elif isTermuxPython():
return "Android Termux"
elif isCPythonOfficialPackage():
return "CPython Official"
elif isGithubActionsPython():
return "GitHub Actions Python"
else:
return "Unknown"
|
Nuitka/Nuitka
|
nuitka/PythonFlavors.py
|
PythonFlavors.py
|
py
| 6,933 |
python
|
en
|
code
| 10,019 |
github-code
|
6
|
39972780174
|
#!/usr/bin/env python3
# ------------------------------------------------------------------------
# MIDI Control for SignalFlow
# ------------------------------------------------------------------------
from signalflow import *
import configparser
import logging
import mido
import os
logger = logging.getLogger(__name__)
class MIDIManager:
shared_manager = None
def __init__(self, device_name: str = None):
if device_name is None:
if os.getenv("SIGNALFLOW_MIDI_OUTPUT_DEVICE_NAME") is not None:
device_name = os.getenv("SIGNALFLOW_MIDI_OUTPUT_DEVICE_NAME")
else:
config_path = os.path.expanduser("~/.signalflow/config")
parser = configparser.ConfigParser()
parser.read(config_path)
try:
# --------------------------------------------------------------------------------
# configparser includes quote marks in its values, so strip these out.
# --------------------------------------------------------------------------------
device_name = parser.get(section="midi", option="input_device_name")
device_name = device_name.strip('"')
except configparser.NoOptionError:
pass
except configparser.NoSectionError:
pass
self.input = mido.open_input(device_name)
self.input.callback = self.handle_message
self.voice_class = None
self.voice_class_kwargs = None
self.notes = [None] * 128
self.note_handlers = [[] for _ in range(128)]
self.control_handlers = [[] for _ in range(128)]
self.control_values = [0] * 128
self.channel_handlers = [[] for _ in range(16)]
if MIDIManager.shared_manager is None:
MIDIManager.shared_manager = self
logger.info("Opened MIDI input device: %s" % self.input.name)
def handle_message(self, message):
if message.type == 'control_change':
logger.debug("Received MIDI control change: control %d, value %d" % (message.control, message.value))
self.on_control_change(message.control, message.value)
elif message.type == 'note_on':
logger.debug("Received MIDI note on: note %d, velocity %d" % (message.note, message.velocity))
if self.voice_class:
voice = self.voice_class(frequency=midi_note_to_frequency(message.note),
amplitude=message.velocity / 127,
**self.voice_class_kwargs)
voice.play()
voice.auto_free = True
self.notes[message.note] = voice
if self.note_handlers[message.note]:
self.note_handlers[message.note]()
elif message.type == 'note_off':
logger.debug("Received MIDI note off: note %d" % (message.note))
if self.notes[message.note]:
self.notes[message.note].set_input("gate", 0)
try:
channel = message.channel
for handler in self.channel_handlers[channel]:
handler.handle_message(message)
except AttributeError:
pass
@classmethod
def get_shared_manager(cls):
if MIDIManager.shared_manager is None:
MIDIManager.shared_manager = MIDIManager()
return MIDIManager.shared_manager
def set_voice_patch(self, cls, **kwargs):
self.voice_class = cls
self.voice_class_kwargs = kwargs
def add_note_handler(self, note, handler):
self.note_handlers[note] = handler
def add_control_handler(self, control, handler):
self.control_handlers[control].append(handler)
def on_control_change(self, control, value):
self.control_values[control] = value
for handler in self.control_handlers[control]:
handler.on_change(value)
def get_control_value(self, control):
return self.control_values[control]
def add_channel_handler(self, channel, handler):
self.channel_handlers[channel].append(handler)
def remove_channel_handler(self, channel, handler):
self.channel_handlers[channel].remove(handler)
class MIDIControl(Patch):
def __init__(self, control, range_min, range_max, initial=None, mode="absolute", manager=None, curve="linear"):
super().__init__()
assert mode in ["absolute", "relative"]
if manager is None:
manager = MIDIManager.get_shared_manager()
self.value = self.add_input("value")
self.value_smoothed = Smooth(self.value, 0.999)
self.set_output(self.value_smoothed)
self.control = control
self.range_min = range_min
self.range_max = range_max
self.curve = curve
if initial is not None:
if self.curve == "exponential":
self._value_norm = scale_exp_lin(initial, range_min, range_max, 0, 1)
elif self.curve == "linear":
self._value_norm = scale_lin_lin(initial, range_min, range_max, 0, 1)
else:
self._value_norm = 0.5
self.update()
self.mode = mode
manager.add_control_handler(control, self)
def on_change(self, value):
if self.mode == "absolute":
self._value_norm = value / 127.0
elif self.mode == "relative":
if value > 64:
value = value - 128
change = value / 127.0
self._value_norm += change
if self._value_norm < 0:
self._value_norm = 0
if self._value_norm > 1:
self._value_norm = 1
self.update()
def update(self):
if self.curve == "exponential":
value_scaled = scale_lin_exp(self._value_norm, 0, 1, self.range_min, self.range_max)
elif self.curve == "linear":
value_scaled = scale_lin_lin(self._value_norm, 0, 1, self.range_min, self.range_max)
self.set_input("value", value_scaled)
|
ideoforms/signalflow
|
auxiliary/libs/signalflow_midi/signalflow_midi.py
|
signalflow_midi.py
|
py
| 6,131 |
python
|
en
|
code
| 138 |
github-code
|
6
|
36208434520
|
import multiDownload as dl
import pandas as pd
import sqlite3
import json
import os
from datetime import datetime, timedelta
import shutil
import argparse
import jieba
from jieba import analyse
from wordcloud import WordCloud
from opencc import OpenCC
import requests
import emoji
with open("scripts/config.json", "r") as file:
data = json.load(file)
# account = data["account"]
# nickName = data["nickName"]
Cookie = data["Cookie"]
picDriverPath = data["picDriverPath"]
dbpath = data["dbpath"]
storyPicLink = data["storyPicLink"]
storyPicType = data["storyPicType"]
# 创建 ArgumentParser 对象
parser = argparse.ArgumentParser()
parser.add_argument("account", help="输入account")
parser.add_argument("password", help="输入password")
parser.add_argument("nickName", help="输入nickName")
# parser.add_argument("Cookie", help="输入Cookie")
parser.add_argument("repo", help="输入repo")
options = parser.parse_args()
account = options.account
password = options.password
nickName = options.nickName
# Cookie = options.Cookie
repo = options.repo
font_path = "./scripts/font.otf"
cn_path_svg = "./output/postcrossing_cn.svg"
en_path_svg = "./output/postcrossing_en.svg"
excel_file = "./template/postcardStory.xlsx"
if os.path.exists(dbpath):
shutil.copyfile(dbpath, f"{dbpath}BAK")
def replateTitle(type):
with open(f"./output/title.json", "r",encoding="utf-8") as f:
title = json.load(f)
value = title.get(type)
from_or_to, pageNum, Num, title = value
return title
# 获取收发总距离
def getUserHomeInfo(type):
distance_all = []
content = dl.readDB(dbpath,type,"Mapinfo")
#print("content:",content)
for item in content:
distance_all.append(int(item["distance"]))
total = sum(distance_all)
rounds = round((total/40076),2)
return total,len(content),rounds
def getUserSheet(tableName):
data = dl.readDB(dbpath, "", tableName)
countryCount = len(data)
new_data = []
for i, item in enumerate(data):
if item['sentMedian']:
sentMedian = f"{item['sentMedian']}天"
else:
sentMedian = "-"
if item['receivedMedian']:
receivedMedian = f"{item['receivedMedian']}天"
else:
receivedMedian = "-"
formatted_item = {
'国家': f"{item['name']} {emoji.emojize(item['flagEmoji'],language='alias')}",
'已寄出': item['sentNum'],
'已收到': item['receivedNum'],
'寄出-平均': f"{item['sentAvg']}天",
'收到-平均': f"{item['receivedAvg']}天",
'寄出-中间值': sentMedian,
'收到-中间值': receivedMedian,
}
new_data.append(formatted_item)
# 将数据数组转换为DataFrame
df = pd.DataFrame(new_data)
# 修改索引从1开始
df.index = df.index + 1
# 将DataFrame转换为HTML表格,并添加Bootstrap的CSS类和居中对齐的属性
html_table = df.to_html(classes="table table-striped table-bordered", escape=False)
html_table = html_table.replace('<th>', '<th class="text-center">')
html_table = html_table.replace('<td>', '<td class="text-center">')
# 生成完整的HTML文件
html_content = f'''
<!DOCTYPE html>
<html>
<head>
<title>{tableName}</title>
<link rel="stylesheet" href="../src/bootstrap-5.2.2/package/dist/css/bootstrap.min.css">
<style>
.table-responsive {{
width: 100%;
overflow-x: auto;
}}
</style>
</head>
<body>
<div class="container-fluid">
<div class="table-responsive">
{html_table}
</div>
</div>
</body>
</html>
'''
# 保存HTML表格为网页文件
with open(f'./output/{tableName}.html', 'w', encoding="utf-8") as file:
file.write(html_content)
return countryCount
def replaceTemplate():
stat,content_raw,types = dl.getAccountStat(Cookie)
title_all=""
desc_all=""
countryNum = getUserSheet("CountryStats")
travelingNum = getTravelingID(account,"traveling",Cookie)
countryCount = f"> 涉及国家[🗺️**{countryNum}**]\n\n"
travelingCount = f"> 待签收[📨**{travelingNum}**]\n\n"
for type in types:
distance,num,rounds = getUserHomeInfo(type)
distance_all = format(distance, ",")
summary = f"**{num}** 📏**{distance_all}** km 🌏**{rounds}** 圈]\n\n"
if type == "sent":
desc = f"> 寄出[📤{summary}"
elif type == "received":
desc = f"> 收到[📥{summary}"
else:
desc =""
desc_all += desc
for type in types:
title = replateTitle(type)
title_all += f"#### [{title}](/{nickName}/postcrossing/{type})\n\n"
title_final = f"{desc_all}\n{countryCount}\n{travelingCount}\n{title_all}"
#print("title_all:\n",title_all)
storylist,storyNum = getCardStoryList("received")
commentlist,commentNum = getCardStoryList("sent")
calendar,series,height = createCalendar()
with open(f"./template/信息汇总_template.md", "r",encoding="utf-8") as f:
data = f.read()
dataNew = data.replace('$account',account)
print(f"已替换account:{account}")
dataNew = dataNew.replace('$title',title_final)
print("已替换明信片墙title")
dataNew = dataNew.replace('$storylist',storylist).replace('$storyNum',storyNum)
print("已替换明信片故事list")
dataNew = dataNew.replace('$commentlist',commentlist).replace('$commentNum',commentNum)
print("已替换明信片评论list")
dataNew = dataNew.replace('$calendar',calendar)
dataNew = dataNew.replace('$series',series)
dataNew = dataNew.replace('$height',str(height))
print("已替换明信片日历list")
dataNew = dataNew.replace('$repo',repo)
print(f"已替换仓库名:{repo}")
with open(f"./output/信息汇总.md", "w",encoding="utf-8") as f:
f.write(dataNew)
blog_path = r"D:\web\Blog2\src\Arthur\Postcrossing\信息汇总.md"
# 换为你的blog的本地链接,可自动同步过去
if os.path.exists(blog_path):
with open(blog_path, "w", encoding="utf-8") as f:
f.write(dataNew)
def StoryXLS2DB(excel_file):
df = pd.read_excel(excel_file)
content_all = []
for index, row in df.iterrows():
data = {
"id": row[0],
"content_en": row[1],
"content_cn": row[2],
"comment_en": row[3],
"comment_cn": row[4],
}
content_all.append(data)
tablename = "postcardStory"
dl.writeDB(dbpath, content_all,tablename)
def getCardStoryList(type):
list_all = ""
content =dl.readDB(dbpath, type,"postcardStory")
num = str(len(content))
for id in content:
postcardID = id["id"]
content_en = id["content_en"]
content_cn = id["content_cn"]
comment_en = id["comment_en"]
comment_cn = id["comment_cn"]
def remove_blank_lines(text):
if text:
return "\n".join(line for line in text.splitlines() if line.strip())
return text
# 去掉空白行
content_en = remove_blank_lines(content_en)
content_cn = remove_blank_lines(content_cn)
comment_en = remove_blank_lines(comment_en)
comment_cn = remove_blank_lines(comment_cn)
if comment_en:
comment = f'@tab 回复\n' \
f'* 回复原文\n\n> {comment_en}\n\n* 翻译:\n\n> {comment_cn}\n\n:::'
else:
comment = ":::"
#print("comment:",comment)
userInfo = id["userInfo"]
picFileName = id["picFileName"]
contryNameEmoji = id["contryNameEmoji"] if id["contryNameEmoji"] is not None else ""
travel_time = id["travel_time"]
distanceNum = id["distance"]
distance = format(distanceNum, ",")
if type == "received":
list = f'### [{postcardID}](https://www.postcrossing.com/postcards/{postcardID})\n\n' \
f'> 来自 {userInfo} {contryNameEmoji}\n' \
f'> 📏 {distance} km\n⏱ {travel_time}\n\n' \
f':::tabs\n' \
f'@tab 图片\n' \
f'<div class="image-preview"> <img src="{picDriverPath}/{picFileName}" />' \
f' <img src="{storyPicLink}/{postcardID}.{storyPicType}" /></div>' \
f'\n\n' \
f'@tab 内容\n' \
f'* 卡片文字\n\n> {content_en}\n\n* 翻译:\n\n> {content_cn}\n\n' \
f'{comment}\n\n' \
f'---\n'
else:
list = f'### [{postcardID}](https://www.postcrossing.com/postcards/{postcardID})\n\n' \
f'> 寄往 {userInfo} {contryNameEmoji}\n' \
f'> 📏 {distance} km\n⏱ {travel_time}\n\n' \
f':::tabs\n' \
f'@tab 图片\n' \
f'\n\n' \
f'' \
f'{comment}\n\n' \
f'---\n'
list_all += list
return list_all,num
def createCalendar():
with open("output/UserStats.json", "r") as file:
a_data = json.load(file)
year_list = []
for data in a_data:
timestamp = data[0] # 获取时间戳
date = datetime.fromtimestamp(timestamp) # 将时间戳转换为日期格式
year = date.strftime("%Y") # 提取年份(YYYY)
if year not in year_list:
year_list.append(year)
calendar_all=""
series_all=""
for i,year in enumerate(year_list):
calendar = f"""
{{
top: {i*150},
cellSize: ["auto", "15"],
range: {year},
itemStyle: {{
color: '#ccc',
borderWidth: 3,
borderColor: '#fff'
}},
splitLine: true,
yearLabel: {{
show: true
}},
dayLabel: {{
firstDay: 1,
}}
}},
"""
calendar_all+=calendar
series = f"""
{{
type: "heatmap",
coordinateSystem: "calendar",
calendarIndex: {i},
data: data
}},
"""
series_all+=series
height = len(year_list)*150
return calendar_all, series_all, height
def createWordCloud(type, contents):
contents = contents.replace("nan","")
# 转换为svg格式输出
if type == "cn":
path = cn_path_svg
# 使用jieba的textrank功能提取关键词
keywords = jieba.analyse.textrank(contents, topK=100, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'))
#print(f"keywords={keywords}")
# 创建 OpenCC 对象,指定转换方式为简体字转繁体字
converter = OpenCC('s2t.json')
# 统计每个关键词出现的次数
keyword_counts = {}
for keyword in keywords:
count = contents.count(keyword)
keyword = converter.convert(keyword) #简体转繁体
keyword_counts[keyword] = count
print(keyword_counts)
# 创建一个WordCloud对象,并设置字体文件路径和轮廓图像
wordcloud = WordCloud(width=1600, height=800, background_color="white", font_path=font_path)
# 生成词云
wordcloud.generate_from_frequencies(keyword_counts)
else:
path = en_path_svg
wordcloud = WordCloud(width=1600, height=800, background_color="white", font_path=font_path, max_words=100).generate(contents)
keywords = wordcloud.words_
print(keywords)
svg_image = wordcloud.to_svg(embed_font=True)
with open(path, "w+", encoding='UTF8') as f:
f.write(svg_image)
print(f"已保存至{path}")
def readStoryDB(dbpath):
result_cn = ""
result_en = ""
content =dl.readDB(dbpath, "sent","postcardStory")
for id in content:
postcardID = id["id"]
content_en = id["content_en"]
content_cn = id["content_cn"]
comment_en = id["comment_en"]
comment_cn = id["comment_cn"]
data_en = f"{content_en}\n{comment_en}\n"
data_cn = f"{content_cn}\n{comment_cn}\n"
result_en += data_en
result_cn += data_cn
return result_cn,result_en
# 实时获取该账号所有sent、received的明信片列表,获取每个postcardID的详细数据
def getTravelingID(account,type,Cookie):
headers = {
'Host': 'www.postcrossing.com',
'X-Requested-With': 'XMLHttpRequest',
'Sec-Fetch-Site': 'same-origin',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Sec-Fetch-Mode': 'cors',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.0.1 Mobile/15E148 Safari/604.1',
'Connection': 'keep-alive',
'Referer': f'https://www.postcrossing.com/user/{account}/{type}',
'Cookie': Cookie,
'Sec-Fetch-Dest': 'empty'
}
url=f'https://www.postcrossing.com/user/{account}/data/{type}'
response = requests.get(url,headers=headers).json()
travelingCount = len(response)
data = sorted(response, key=lambda x: x[7])
#print(data)
new_data = []
for i,stats in enumerate(data):
baseurl = "https://www.postcrossing.com"
formatted_item = {
'ID号': f"<a href='{baseurl}/travelingpostcard/{stats[0]}'>{stats[0]}</a>",
'收件人': f"<a href='{baseurl}/user/{stats[1]}'>{stats[1]}</a>",
'国家': stats[3],
'寄出时间': datetime.fromtimestamp(stats[4]).strftime('%Y/%m/%d'),
'距离': f'{format(stats[6], ",")} km',
'天数': stats[7]
}
new_data.append(formatted_item)
df = pd.DataFrame(new_data)
# 修改索引从1开始
df.index = df.index + 1
# 删除序号列
#df = df.drop(columns=['序号'])
# 将DataFrame转换为HTML表格,并添加Bootstrap的CSS类和居中对齐的属性
html_table = df.to_html(classes="table table-striped table-bordered", escape=False)
html_table = html_table.replace('<th>', '<th class="text-center">')
html_table = html_table.replace('<td>', '<td class="text-center">')
# 生成完整的HTML文件
html_content = f'''
<!DOCTYPE html>
<html>
<head>
<title>还在漂泊的明信片</title>
<link rel="stylesheet" href="../src/bootstrap-5.2.2/package/dist/css/bootstrap.min.css">
<style>
.table-responsive {{
width: 100%;
overflow-x: auto;
}}
</style>
</head>
<body>
<div class="container-fluid">
<div class="table-responsive">
{html_table}
</div>
</div>
</body>
</html>
'''
# 保存HTML表格为网页文件
with open(f'./output/{type}.html', 'w', encoding="utf-8") as file:
file.write(html_content)
return travelingCount
dl.replaceTemplateCheck()
excel_file="./template/postcardStory.xlsx"
StoryXLS2DB(excel_file)
replaceTemplate()
if os.path.exists(f"{dbpath}BAK"):
dbStat = dl.compareMD5(dbpath, f"{dbpath}BAK")
if dbStat == "1":
print(f"{dbpath} 有更新")
print(f"正在生成中、英文词库")
result_cn,result_en = readStoryDB(dbpath)
createWordCloud("cn",result_cn)
createWordCloud("en",result_en)
os.remove(f"{dbpath}BAK")
else:
print(f"{dbpath} 暂无更新")
os.remove(f"{dbpath}BAK")
|
arthurfsy2/Postcrossing_map_generator
|
scripts/createPersonalPage.py
|
createPersonalPage.py
|
py
| 15,829 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10380773430
|
import torch.nn as nn
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
@BACKBONES.register_module()
class RMNet(BaseBackbone):
def __init__(self, depth,frozen_stages=-1):
super(RMNet, self).__init__()
self.frozen_stages = frozen_stages
self.conv1 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inplanes = 64
stages = [2,2,2,2] if depth == 18 else [3,4,6,3]
self.layer1 = self._make_layer(64, stages[0], 1)
self.layer2 = self._make_layer(128, stages[1], 2)
self.layer3 = self._make_layer(256, stages[2], 2)
self.layer4 = self._make_layer(512, stages[3], 2)
self._freeze_stages()
def _make_layer(self, planes, blocks, stride=1):
layers = []
layers.append(
nn.Sequential(
nn.Conv2d(self.inplanes, self.inplanes+planes, kernel_size=3,stride=stride, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.inplanes+planes, planes, kernel_size=3,stride=1, padding=1),
nn.ReLU(inplace=True)
)
)
self.inplanes = planes
for _ in range(1, blocks):
layers.append(
nn.Sequential(
nn.Conv2d(self.inplanes, self.inplanes+planes, kernel_size=3,stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.inplanes+planes, planes, kernel_size=3,stride=1, padding=1),
nn.ReLU(inplace=True)
)
)
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
x = self.layer1(x)
outs.append(x)
x = self.layer2(x)
outs.append(x)
x = self.layer3(x)
outs.append(x)
x = self.layer4(x)
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(RMNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
|
fxmeng/mmclassification
|
mmcls/models/backbones/rmnet.py
|
rmnet.py
|
py
| 2,851 |
python
|
en
|
code
| null |
github-code
|
6
|
35228257702
|
#!/usr/bin/env python
"""Visualisation.py: Visualise data from simulation"""
__author__ = "Murray Ireland"
__email__ = "[email protected]"
__date__ = "22/10/2018"
__copyright__ = "Copyright 2017 Craft Prospect Ltd"
__licence___ = ""
import vtk
import numpy as np
from math import tan, sin, cos, atan, pi
# import msvcrt
import sys, os
import platform
from datetime import datetime
from time import sleep
if platform.system() == "Windows":
from win32api import GetSystemMetrics
screen_size = (GetSystemMetrics(0), GetSystemMetrics(1))
elif platform.system() == "Linux":
try:
import tkinter as tk
except ImportError:
import Tkinter as tk
root = tk.Tk()
screen_size = (root.winfo_screenwidth(), root.winfo_screenheight())
# Get current and base directories
cur_dir = os.path.dirname(os.path.realpath(__file__))
if "\\" in cur_dir:
base_dir = "/".join(cur_dir.split("\\"))
else:
base_dir = cur_dir
class Visualisation(object):
"""Class for visualisation object"""
# Scale time
TIME_SCALE = 1
# Turn text overlay on/off
SHOW_TEXT = True
# Full-screen and window scaling
FULL_SCREEN = True
WIN_H_SCALE = 1.
WIN_V_SCALE = 1.
TEXT_SCALE = 1.
# Use lower resolution textures where appropriate
USE_SMALL_IMAGES = False
# Scale entire animation for troubleshooting
ANI_SCALE = 1.
# Scale satellite up for better visibility
SAT_SCALE = 500. # Breaks if too small
# Colours
COLOUR_BG = (0, 0, 0)
COLOUR_FONT = (0.871, 0.246, 0.246)
# Set solar panel angles [deg]
PANEL_ANGLE = 10
# Tile size for Earth textures [m]
TILE_SIZE = (177390, 183360)
# Anchor settings
ANCHOR = {
"SW": (0, 0),
"NW": (0, 2),
"NE": (2, 2),
"SE": (2, 0),
"N": (1, 2),
"E": (2, 1),
"S": (1, 0),
"W": (0, 1),
"C": (1, 1)
}
def __init__(self, PROPS, data):
"""Class constructor"""
# Screen size and aspect ratio
self.SCREEN_SIZE = (screen_size[0], screen_size[1])
self.SCREEN_AR = float(self.SCREEN_SIZE[0])/float(self.SCREEN_SIZE[1])
# Initialise property dictionaries
self.SAT_PROPS = PROPS["Sat"]
self.CAM_PROPS = PROPS["Camera"]
self.IMG_PROPS = PROPS["Imagery"]
self.LSR_POS = PROPS["Laser"]
self.EARTH_PROPS = PROPS["Earth"]
# Initialise simulation data
self.DATA = data
# Initialise imagery
self.IMG_PROPS["Texture size"] = (
self.IMG_PROPS["Size"]["full"][0]*self.IMG_PROPS["Res"]["full"],
self.IMG_PROPS["Size"]["full"][1]*self.IMG_PROPS["Res"]["full"]
)
# Initialise index
self.index = 0
# Initialise render window and interactor
self.renWin, self.iren = self.init_renderer()
self.ren = {}
# Create scenes
self.actors, self.cameras, self.text, self.lights = self.create_scenes()
for key in self.lights.keys():
for light in self.lights[key]:
self.ren[key].AddLight(light)
# Render scenes
self.iren.Initialize()
self.renWin.Render()
# Initialise time
now = datetime.now()
self.init_time = [now.hour, now.minute, now.second]
# Create timer event
self.iren.AddObserver("TimerEvent", self.execute)
timerId = self.iren.CreateRepeatingTimer(int(1000*self.DATA["Time step"]))
# Start interactor and timer
self.iren.Start()
# Stop timer?
# self.movieWriter.End()
def init_renderer(self):
"""Initialise render window and interactor"""
# Initialise render window
renWin = vtk.vtkRenderWindow()
if self.FULL_SCREEN:
renWin.FullScreenOn()
else:
renWin.SetSize(
int(self.WIN_H_SCALE*self.SCREEN_SIZE[0]),
int(self.WIN_V_SCALE*self.SCREEN_SIZE[1])
)
class MyInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):
def __init__(self, parent=None):
return None
# Initialise interactor
iren = vtk.vtkRenderWindowInteractor()
iren.SetInteractorStyle(MyInteractorStyle())
# iren.AutoAdjustCameraClippingRangeOn()
iren.SetRenderWindow(renWin)
return renWin, iren
def init_video(self):
"""Initialise video recorder"""
# Set up filter
imageFilter = vtk.vtkWindowToImageFilter()
imageFilter.SetInput(self.renWin)
imageFilter.SetInputBufferTypeToRGB()
imageFilter.ReadFrontBufferOff()
imageFilter.Update()
return imageFilter, 0
def add_to_ren(self, name, actors, camera, viewport, text):
"""Add elements of scene to renderer window"""
# Create renderer for scene
self.ren[name] = vtk.vtkRenderer()
# Add renderer to render window
self.renWin.AddRenderer(self.ren[name])
# Add camera and viewport
if camera != []:
self.ren[name].SetActiveCamera(camera)
self.ren[name].SetViewport(viewport)
# Add actors
for key in actors:
if type(actors[key]) is list:
for actor in actors[key]:
self.ren[name].AddActor(actor)
else:
self.ren[name].AddActor(actors[key])
self.ren[name].ResetCameraClippingRange()
# Add text
if type(text) is dict:
for actor in text:
self.ren[name].AddActor(text[actor])
else:
self.ren[name].AddActor(text)
self.ren[name].SetBackground(self.COLOUR_BG)
def create_scenes(self):
"""Create scenes"""
# Initialise dictionaries
cameras = {}
text = {}
lights = {}
# Create scenes
actors, cameras["Main"], text["Main"], lights["Main"] = self.scene_main()
# Return actors and cameras
return actors, cameras, text, lights
def scene_main(self):
"""Create main scene"""
# Create viewport
viewport = [0, 0, 1, 1]
# Camera settings
# Focal point offset from sat centre
foffset = [200, -400, 0]
# Distance from sat
cam_dist = 5e3
# Angles
pitch = -65
yaw = 2
# Focal point
fpoint = np.array([0., 0., -self.SAT_PROPS["Alt"]]) + np.array(foffset)
# Transform camera position
prad = pitch*pi/180
yrad = yaw*pi/180
Rpitch = np.matrix([
[cos(prad), 0, sin(prad)],
[0, 1, 0],
[-sin(prad), 0, cos(prad)]
])
Ryaw = np.matrix([
[cos(yrad), -sin(yrad), 0],
[sin(yrad), cos(yrad), 0],
[0, 0, 1]
])
cam_pos = Ryaw*Rpitch*np.matrix([-cam_dist, 0., 0.]).T
cam_pos = np.array(cam_pos).flatten()
cam_pos = cam_pos + fpoint
# cam_pos = [-10, 0., -self.SAT_PROPS["Alt"] - cam_dist]
# Create camera
camera = vtk.vtkCamera()
camera.SetPosition(cam_pos)
camera.SetViewUp(0, 0, -1)
camera.SetViewAngle(15)
camera.SetFocalPoint(fpoint)
camera.SetClippingRange(0.001, 100)
# Create lights
lights = []
lights.append(vtk.vtkLight())
lights[0].SetPosition(cam_pos)
lights[0].SetFocalPoint([0., 0., -self.SAT_PROPS["Alt"]])
lights[0].SetColor(1., 1., 1.)
lights.append(vtk.vtkLight())
lights[1].SetPosition(0., 0., 0)
lights[1].SetFocalPoint([0., 0., -self.SAT_PROPS["Alt"]])
# Create actors
actors = {
"Sat body": self.create_sat_body(),
"Sat panels": self.create_sat_panels(),
"Earth": self.create_earth("true", "small"),
"Forward cam": self.create_cam_fov("Forward"),
"Redline": self.create_line(self.LSR_POS["Red"], [1., 0., 0.]),
"Greenline": self.create_line(self.LSR_POS["Green"], [0., 1., 0.]),
"Blueline": self.create_line(self.LSR_POS["Blue"], [0., 0., 1.])
}
for T, pos in zip(range(self.DATA["Target info"]["Num"]), self.DATA["Target info"]["Pos"]):
actors["T{}".format(T+1)] = self.create_sphere(pos)
# Text actors
text = {}
if self.SHOW_TEXT:
# Craft text
text["Craft"] = self.create_text(
{
"String": "CRAFT PROSPECT",
"Size": 60,
"Font": "Montserrat-SemiBold",
"Y offset": 0.06
},
viewport
)
# Subtitle text
text["QKD"] = self.create_text(
{
"String": "Demo Simulation",
"Size": 40,
"Style": "Normal"
},
viewport
)
# Time text
text["Time"] = self.create_text(
{
"String": "",
"Size": 50,
"Anchor": "NE",
"Font": "7Segment"
},
viewport
)
# Info text
text["Info"] = self.create_text(
{
"String": """Altitude: {:.0f} km
Velocity: {:.2f} km/s
""".format(
self.SAT_PROPS["Alt"]/1000,
self.DATA["Vel"][0, 0]/1000,
),
"Size": 20,
"Anchor": "NE",
"Y offset": 0.06
},
viewport
)
# Render scene
self.add_to_ren("Main", actors, camera, viewport, text)
# Reset clipping range
# camera.SetClippingRange(1000, 1000e3)
# Return actors to animate
return actors, camera, text, lights
def create_sat_body(self):
"""Generate satellite body geometry"""
# Dimensions of body
SAT_SIZE = self.ANI_SCALE*self.SAT_SCALE*np.asarray(self.SAT_PROPS["Size"])/2
bx = SAT_SIZE[0]
by = SAT_SIZE[1]
bz = SAT_SIZE[2]
# Create vertices in body frame
ind = 0
V = []
for x in [-1, 1]:
for y in [-1, 1]:
for z in [-1, 1]:
V.append((bx*x, by*y, bz*z))
# Create faces
F = [
(0, 1, 3, 2),
(4, 5, 7, 6),
(0, 1, 5, 4),
(2, 3, 7, 6),
(0, 2, 6, 4),
(1, 3, 7, 5)
]
# Create building blocks of polydata
sat = vtk.vtkPolyData()
points = vtk.vtkPoints()
polys = vtk.vtkCellArray()
scalars = vtk.vtkFloatArray()
# Load the point, cell and data attributes
for i in range(len(V)):
points.InsertPoint(i, V[i])
for i in range(len(F)):
polys.InsertNextCell(self.mkVtkIdList(F[i]))
for i in range(len(V)):
scalars.InsertTuple1(i, i)
# Assign the pieces to the vtkPolyData.
sat.SetPoints(points)
del points
sat.SetPolys(polys)
del polys
sat.GetPointData().SetScalars(scalars)
del scalars
# Mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(sat)
mapper.ScalarVisibilityOff()
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.5, 0.5, 0.5)
actor.GetProperty().SetAmbient(0.5)
actor.GetProperty().SetSpecular(1.0)
actor.GetProperty().SetSpecularPower(5.0)
actor.GetProperty().SetDiffuse(0.2)
# Move to sat position
actor.SetPosition(0, 0, -self.SAT_PROPS["Alt"])
return actor
def create_sat_panels(self):
"""Create satellite solar panel geometry"""
# Dimensions of body
SAT_SIZE = self.ANI_SCALE*self.SAT_SCALE*np.asarray(self.SAT_PROPS["Size"])/2
bx = SAT_SIZE[0]
by = SAT_SIZE[1]
bz = SAT_SIZE[2]
# Panel length
L = bx
# Panels
theta = self.PANEL_ANGLE*pi/180
px1 = bx - L*sin(theta)
py1 = by + L*cos(theta)
pz1 = bz
px2 = px1 + L*sin(theta)
py2 = py1 + L*cos(theta)
pz2 = pz1
# Vertices
V = [
(-bx, by, -bz),
(-bx, by, bz),
(-px1, py1, pz1),
(-px1, py1, -pz1),
(-px1, py1, -pz1),
(-px1, py1, pz1),
(-px2, py2, pz2),
(-px2, py2, -pz2),
(-bx, -by, -bz),
(-bx, -by, bz),
(-px1, -py1, pz1),
(-px1, -py1, -pz1),
(-px1, -py1, -pz1),
(-px1, -py1, pz1),
(-px2, -py2, pz2),
(-px2, -py2, -pz2)
]
# Create faces
F = [
(0, 1, 2, 3),
(4, 5, 6, 7),
(8, 9, 10, 11),
(12, 13, 14, 15)
]
# Create building blocks of polydata
sat = vtk.vtkPolyData()
points = vtk.vtkPoints()
polys = vtk.vtkCellArray()
scalars = vtk.vtkFloatArray()
# Load the point, cell and data attributes
for i in range(len(V)):
points.InsertPoint(i, V[i])
for i in range(len(F)):
polys.InsertNextCell(self.mkVtkIdList(F[i]))
for i in range(len(V)):
scalars.InsertTuple1(i, i)
# Assign the pieces to the vtkPolyData.
sat.SetPoints(points)
del points
sat.SetPolys(polys)
del polys
sat.GetPointData().SetScalars(scalars)
del scalars
# Mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(sat)
mapper.ScalarVisibilityOff()
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0., 0., 0.8)
actor.GetProperty().SetAmbient(0.5)
actor.GetProperty().SetSpecular(.5)
actor.GetProperty().SetSpecularPower(10.0)
actor.GetProperty().SetDiffuse(0.2)
# Move to sat position
actor.SetPosition(0, 0, -self.SAT_PROPS["Alt"])
return actor
def create_earth(self, imtype, size):
"""Create tiles for Earth geometry"""
# Update properties for tile
tile_props = {
"Size": self.IMG_PROPS["Texture size"],
"Translate": (
-self.IMG_PROPS["Offset"][0],
-self.IMG_PROPS["Texture size"][1]/2,
0
)
}
# Texture for tile
texture = f"{cur_dir}/images/samp1_{size}.jpg"
# Create actors
actor = self.create_plane(tile_props, texture)
return actor
def create_plane(self, props, texture):
"""Create flat plane"""
# Pull and scale dimensions
SIZE = np.asarray(props["Size"])
POS = np.asarray(props["Translate"])
# Create texture reader
reader = vtk.vtkJPEGReader()
reader.SetFileName(texture)
# Create texture object
texture = vtk.vtkTexture()
texture.SetInputConnection(reader.GetOutputPort())
texture.InterpolateOn()
# Create plane model
plane = vtk.vtkPlaneSource()
plane.SetResolution(1, 1)
plane.SetPoint1(0, SIZE[1], 0)
plane.SetPoint2(SIZE[0], 0, 0)
# Translate to centre
transP = vtk.vtkTransform()
transP.Translate(
POS[0],
POS[1],
POS[2]
)
tpd = vtk.vtkTransformPolyDataFilter()
tpd.SetInputConnection(plane.GetOutputPort())
tpd.SetTransform(transP)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tpd.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.SetTexture(texture)
actor.GetProperty().SetAmbient(1.0)
actor.GetProperty().SetSpecular(.5)
actor.GetProperty().SetSpecularPower(5.0)
actor.GetProperty().SetDiffuse(0.2)
return actor
def create_cam_fov(self, name):
"""Create FOV actor for camera"""
# Vertices of FOV
V = [
(0, 0, -self.SAT_PROPS["Alt"]),
tuple(self.CAM_PROPS[name]["Intercepts"][:, 0]),
tuple(self.CAM_PROPS[name]["Intercepts"][:, 1]),
tuple(self.CAM_PROPS[name]["Intercepts"][:, 2]),
tuple(self.CAM_PROPS[name]["Intercepts"][:, 3])
]
# Faces of FOV
F = [(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 1)]
# Create building blocks of polydata
cam = vtk.vtkPolyData()
points = vtk.vtkPoints()
polys = vtk.vtkCellArray()
scalars = vtk.vtkFloatArray()
# Load the point, cell and data attributes
for i in range(5):
points.InsertPoint(i, V[i])
for i in range(4):
polys.InsertNextCell( self.mkVtkIdList(F[i]))
for i in range(5):
scalars.InsertTuple1(i,i)
# Assign the pieces to the vtkPolyData.
cam.SetPoints(points)
del points
cam.SetPolys(polys)
del polys
cam.GetPointData().SetScalars(scalars)
del scalars
# Mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(cam)
mapper.ScalarVisibilityOff()
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.5, 1, 0.5)
actor.GetProperty().SetAmbient(0.5)
actor.GetProperty().SetOpacity(0.1)
return actor
def create_line(self, pos, colour):
"""Create line"""
# Absolute source position
pos_abs = np.array([0., 0., -self.SAT_PROPS["Alt"]]) + np.array(pos)*self.SAT_SCALE
# Create line
line = vtk.vtkLineSource()
line.SetPoint1(pos_abs)
line.SetPoint2(2*self.SAT_PROPS["Alt"], 0., -self.SAT_PROPS["Alt"])
# Mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(line.GetOutputPort())
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colour)
actor.GetProperty().SetOpacity(0.5)
actor.GetProperty().SetLineWidth(4)
actor.SetOrigin(0., 0., -self.SAT_PROPS["Alt"])
return actor
def create_sphere(self, position):
"""Create sphere of specific size"""
# Create source
source = vtk.vtkSphereSource()
source.SetCenter(0, 0, 0)
source.SetRadius(1.e3)
source.SetPhiResolution(40)
source.SetThetaResolution(40)
# Mapper
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(source.GetOutput())
else:
mapper.SetInputConnection(source.GetOutputPort())
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(1, 0.5, 0.5)
actor.GetProperty().SetAmbient(0.5)
actor.GetProperty().SetOpacity(0.8)
actor.SetPosition(position)
# Return actor
return actor
def create_text(self, settings, viewport):
"""Create text actor for view labels"""
viewport = np.array(viewport)
viewport[[0, 2]] = self.WIN_H_SCALE*viewport[[0, 2]]
viewport[[1, 3]] = self.WIN_V_SCALE*viewport[[1, 3]]
viewport = list(viewport)
# Set defaults if not specified
defaults = {
"Size": 20,
"Anchor": "SW",
"X offset": 0.02,
"Y offset": 0.02,
"Font": "Montserrat",
"Colour": self.COLOUR_FONT
}
for key in defaults:
try:
settings[key]
except KeyError:
settings[key] = defaults[key]
# Position
margin = (
self.TEXT_SCALE*settings["X offset"]*(self.ANCHOR[settings["Anchor"]][0] - 1),
self.TEXT_SCALE*settings["Y offset"]*(self.ANCHOR[settings["Anchor"]][1] - 1)
)
posx = int((viewport[0] + 0.5*self.ANCHOR[settings["Anchor"]][0]*(viewport[2] - viewport[0]) - margin[0])*self.SCREEN_SIZE[0])
posy = int((viewport[1] + 0.5*self.ANCHOR[settings["Anchor"]][1]*(viewport[3] - viewport[1]) - margin[1])*self.SCREEN_SIZE[1])
# Properties
props = vtk.vtkTextProperty()
props.SetFontFamily(vtk.VTK_FONT_FILE)
if settings["Font"] == "Montserrat-SemiBold":
props.SetFontFile("./fonts/Montserrat-SemiBold.ttf")
elif settings["Font"] == "Consolas":
props.SetFontFile("./fonts/consola.ttf")
elif settings["Font"] is "7Segment":
props.SetFontFile("./fonts/digital-7 (mono).ttf")
else:
props.SetFontFile("./fonts/Montserrat.ttf")
props.SetFontSize(int(self.TEXT_SCALE*settings["Size"]))
props.SetColor(settings["Colour"])
props.SetJustification(self.ANCHOR[settings["Anchor"]][0])
props.SetVerticalJustification(self.ANCHOR[settings["Anchor"]][1])
# Create actor
actor = vtk.vtkTextActor()
actor.SetInput(settings["String"])
actor.SetDisplayPosition(posx, posy)
actor.SetTextProperty(props)
return actor
def execute(self, obj, event):
"""Execute timed event"""
# Reset clipping range
# self.cameras["Main"].SetClippingRange(1000, 3000e3)
# Simulation time
T = self.DATA["Time"][self.index]
# Visualisation time
Tvis = T*self.TIME_SCALE
# Modes
adcs_mode = self.DATA["ADCS mode names"][int(self.DATA["ADCS mode"][self.index])]
payload_mode = self.DATA["Payload mode names"][int(self.DATA["Payload mode"][self.index])]
# Update Earth position
self.actors["Earth"].SetPosition(
-self.DATA["Pos"][0, self.index],
0,
0
)
# Update target positions
for trgt, pos in zip(range(self.DATA["Target info"]["Num"]), self.DATA["Target info"]["Pos"]):
self.actors["T{}".format(trgt+1)].SetPosition(
-self.DATA["Pos"][0, self.index] + pos[0],
pos[1],
pos[2]
)
# Update sightline
att_des = tuple(np.array(self.DATA["Inputs"][:, self.index])*180/pi)
self.actors["Redline"].SetOrientation(att_des)
self.actors["Greenline"].SetOrientation(att_des)
self.actors["Blueline"].SetOrientation(att_des)
if payload_mode in ["Synchronise"]:
self.actors["Redline"].GetProperty().SetOpacity(0)
self.actors["Greenline"].GetProperty().SetOpacity(0.9)
self.actors["Blueline"].GetProperty().SetOpacity(0)
elif payload_mode in ["Authenticate"]:
self.actors["Redline"].GetProperty().SetOpacity(0)
self.actors["Greenline"].GetProperty().SetOpacity(0)
self.actors["Blueline"].GetProperty().SetOpacity(0.9)
elif payload_mode in ["Key delivery"]:
self.actors["Redline"].GetProperty().SetOpacity(0.9)
self.actors["Greenline"].GetProperty().SetOpacity(0)
self.actors["Blueline"].GetProperty().SetOpacity(0.9)
else:
self.actors["Redline"].GetProperty().SetOpacity(0)
self.actors["Greenline"].GetProperty().SetOpacity(0)
self.actors["Blueline"].GetProperty().SetOpacity(0)
# Update satellite attitude
att = tuple(np.array(self.DATA["Att"][:, self.index])*180/pi)
for key in ["Sat body", "Sat panels"]:
self.actors[key].SetOrientation(att)
# Update text actors
hh = self.init_time[0]
ss = int(self.init_time[2] + Tvis)
mm = self.init_time[1] + (ss // 60)
ss = ss % 60
hh = hh + (mm // 60)
mm = mm % 60
self.text["Main"]["Time"].SetInput(
"{:02d}:{:02d}:{:02d}".format(hh, mm, ss)
)
# Update render window interactor
self.iren = obj
self.iren.GetRenderWindow().Render()
# Increment index, loop if at end of data
if self.index < len(self.DATA["Time"]) - 1:
self.index += 1
else:
self.index = 0
def mkVtkIdList(self, it):
"""Makes a vtkIdList from a Python iterable"""
vil = vtk.vtkIdList()
for i in it:
vil.InsertNextId(int(i))
return vil
|
Craft-Prospect/CubeSatVis
|
python/visualisation.py
|
visualisation.py
|
py
| 24,867 |
python
|
en
|
code
| 2 |
github-code
|
6
|
655237097
|
import os
from glob import glob
import numpy as np
import pandas as pd
try:
import imageio.v2 as imageio
except ImportError:
import imageio
from tqdm import tqdm
from xarray import DataArray
from elf.evaluation import dice_score
def run_prediction(input_folder, output_folder):
import bioimageio.core
os.makedirs(output_folder, exist_ok=True)
inputs = glob(os.path.join(input_folder, "*.tif"))
model = bioimageio.core.load_resource_description("10.5281/zenodo.5869899")
with bioimageio.core.create_prediction_pipeline(model) as pp:
for inp in tqdm(inputs):
fname = os.path.basename(inp)
out_path = os.path.join(output_folder, fname)
image = imageio.v2.imread(inp)
input_ = DataArray(image[None, None], dims=tuple("bcyx"))
pred = bioimageio.core.predict_with_padding(pp, input_)[0].values.squeeze()
imageio.volwrite(out_path, pred)
def evaluate(label_folder, output_folder):
cell_types = ["A172", "BT474", "BV2", "Huh7",
"MCF7", "SHSY5Y", "SkBr3", "SKOV3"]
grid = pd.DataFrame(columns=["Cell_types"] + cell_types)
row = ["all"]
for i in cell_types:
label_files = glob(os.path.join(label_folder, i, "*.tif"))
this_scores = []
for label_file in label_files:
fname = os.path.basename(label_file)
pred_file = os.path.join(output_folder, fname)
label = imageio.imread(label_file)
pred = imageio.volread(pred_file)[0]
score = dice_score(pred, label != 0, threshold_gt=None, threshold_seg=None)
this_scores.append(score)
row.append(np.mean(this_scores))
grid.loc[len(grid)] = row
print("Cell type results:")
print(grid)
def main():
# input_folder = "/home/pape/Work/data/incu_cyte/livecell/images/livecell_test_images"
output_folder = "./predictions"
# run_prediction(input_folder, output_folder)
label_folder = "/home/pape/Work/data/incu_cyte/livecell/annotations/livecell_test_images"
evaluate(label_folder, output_folder)
if __name__ == "__main__":
main()
|
constantinpape/torch-em
|
experiments/unet-segmentation/livecell/check_cell_type_performance.py
|
check_cell_type_performance.py
|
py
| 2,148 |
python
|
en
|
code
| 42 |
github-code
|
6
|
72609945147
|
import requests
from parsel import Selector
url='http://www.porters.vip/verify/uas/index.html'
# headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}
headers = {'User-Agent': 'PostmanRuntime/7.26.2',
'Host': 'www.porters.vip',
'Accept':'*/*',
'Connection':'keep-alive',
'Accept-Endcoding':'gzip,deflate,br'}
r=requests.get(url,headers=headers)
sel = Selector(r.text)
print(r.status_code)
print(sel.css('.list-group-item::text').extract())
|
0xdeciverAngel/anti-web-crawler
|
user agent.py
|
user agent.py
|
py
| 569 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43193724256
|
# -*- coding: utf-8 -*-
import streamlit as st
from topics import TopicModel
import pandas as pd
import numpy as np
from scipy.optimize import linear_sum_assignment
import matplotlib.pyplot as plt
import base64
@st.cache(allow_output_mutation=True)
def load_corpus(url):
return tm.load_corpus(url)
# check the cache if there is already a model for this url, stopwords and number_of_topics
@st.cache(allow_output_mutation=True, persist=True, show_spinner=False)
def lda_model(url, stopwords, number_of_topics):
corpus = load_corpus(url)
corpus.update_stopwords(stopwords)
with st.spinner("Training the topic model for {} topics ...".format(number_of_topics)):
print("*** Training the topic model: {}".format(number_of_topics))
return lda_model_no_cache(corpus, number_of_topics)
def lda_model_no_cache(corpus, number_of_topics):
if use_heuristic_alpha_value:
return tm.fit(corpus, number_of_topics, alpha="talley", number_of_chunks=number_of_chunks)
else:
return tm.fit(corpus, number_of_topics, number_of_chunks=number_of_chunks)
# check the cache if there are already runs for this url, stopwords and number_of_topics
@st.cache(allow_output_mutation=True, persist=True, show_spinner=False)
def lda_model_runs(url, stopwords, number_of_topics, n=4):
corpus = load_corpus(url)
corpus.update_stopwords(stopwords)
with st.spinner("Creating {} different topic models".format(n)):
lda_models = [lda_model_no_cache(corpus, number_of_topics) for run in range(n)]
return lda_models
def topic_alignment(n):
lda_models = lda_model_runs(url, stopwords, number_of_topics, n=n)
topics = pd.DataFrame([[" ".join([tw[0] for tw in lda.lda.show_topic(t, 10)]) for lda in lda_models]
for t in range(number_of_topics)])
diff = [lda_models[0].difference(lda_models[i]) for i in range(1, n)]
matches = pd.DataFrame()
matches[0] = range(number_of_topics)
for i in range(1, n):
_, cols = linear_sum_assignment(diff[i-1])
matches[i] = cols
return topics, matches, lda_models, diff
def highlight_topic(x, topic, matches, color="lightgreen"):
color = "background-color: %s" % (color)
df = pd.DataFrame('', x.index, x.columns)
for run in range(len(x.columns)):
df[run].loc[matches[run][topic]] = color
return df
def topic_runs(lda_models, topic, matches):
keywords = pd.DataFrame()
weights = pd.DataFrame()
documents = pd.DataFrame()
for run in range(len(lda_models)):
keywords[run] = [tw[0] for tw
in lda_models[run].lda.show_topic(matches[run][topic], 10)]
weights[run] = [tw[1] for tw
in lda_models[run].lda.show_topic(matches[run][topic], 10)]
# todo: while correct, this is inefficient as the DTM is recomputed for each run
documents[run] = document_topics_matrix(lda_models[run])[matches[run][topic]]
return lda_models, keywords, weights, documents
# done: once we pass weights, use the relative weights to assign colors
# relative weight = weight / lowest weight in top 10
# for all keywords that are repeated across topics, color them blue if all >= 2,
# green if some >= 2, and blue if all < 2
def highlight_repeated_keywords(keywords, weights):
df = pd.DataFrame('', keywords.index, keywords.columns)
num_runs, num_words = len(keywords.columns), len(keywords.index)
# extract array from data frame
# we transpose the array so that each row represents one run
keywords = keywords.values.T
weights = weights.values.T
repeated_keywords = []
for keyword in keywords[0]:
i = 0
for run in range(1, num_runs):
if keyword in keywords[run]:
i = i + 1
# print("keyword {} occurs {} times".format(keyword, i))
if i == num_runs - 1:
repeated_keywords.append(keyword)
color = keyword_color(repeated_keywords, num_runs, num_words, keywords, weights)
for j in range(num_runs):
for i in range(num_words):
if keywords[j,i] in repeated_keywords:
df[j].loc[i] = "background-color: light%s" % (color[keywords[j,i]])
return df
def keyword_color(repeated_keywords, num_runs, num_words, keywords, weights):
color = {}
for keyword in repeated_keywords:
color[keyword] = None
for j in range(num_runs):
for i in range(num_words):
if keywords[j,i] in repeated_keywords:
ratio = weights[j,i]/weights[j,num_words-1]
if ratio >= 2.0:
if color[keywords[j,i]] is None:
color[keywords[j,i]] = 'yellow'
elif color[keywords[j,i]] == 'blue':
color[keywords[j,i]] = 'green'
else:
if color[keywords[j,i]] is None:
color[keywords[j,i]] = 'blue'
elif color[keywords[j,i]] == 'yellow':
color[keywords[j,i]] = 'green'
return color
def document_topics_matrix(lda):
dtm = []
for document_bow in corpus.bow():
dtm.append(topics_sparse_to_full(lda.get_document_topics(document_bow)))
return pd.DataFrame(dtm)
def topics_sparse_to_full(topics):
topics_full = [0] * number_of_topics # pythonic way of creating a list of zeros
for topic, score in topics:
topics_full[topic] = score
return topics_full
def download_link_from_csv(csv, file_name, title="Download"):
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = "<a href='data:file/csv;base64,{}' download='{}'>{}</a>".format(b64, file_name, title)
st.markdown(href, unsafe_allow_html=True)
def download_link_from_html(html, file_name, title="Download"):
b64 = base64.b64encode(html.encode()).decode() # some strings <-> bytes conversions necessary here
href = "<a href='data:file/html;base64,{}' download='{}'>{}</a>".format(b64, file_name, title)
st.markdown(href, unsafe_allow_html=True)
st.sidebar.title("Topic Model Explorer")
tm = TopicModel()
url = st.sidebar.file_uploader("Corpus", type="csv")
stopwords = st.sidebar.text_area("Stopwords (one per line)")
update_stopwords = st.sidebar.button("Update stopwords")
if update_stopwords:
if url is not None:
corpus = load_corpus(url)
corpus.update_stopwords(stopwords)
show_documents = st.sidebar.checkbox("Show documents", value=True)
if show_documents:
st.header("Corpus")
if url is not None:
corpus = load_corpus(url)
if('name' not in corpus.documents or 'content' not in corpus.documents):
st.markdown('''
The corpus must have a *name* and a *content* column.
''')
st.dataframe(corpus.documents)
download_link_from_csv("\n".join(corpus.stopwords), "stopwords.txt",
"Download stopwords")
else:
st.markdown("Please upload a corpus.")
number_of_topics = st.sidebar.slider("Number of topics", 1, 50, 10)
use_heuristic_alpha_value = st.sidebar.checkbox("Use heuristic value for alpha", value=False)
number_of_chunks = st.sidebar.slider("Number of chunks", 1, 100, 100)
show_runs = st.sidebar.checkbox("Compare topic model runs", value=False)
if show_runs:
st.header("Topic Model Runs")
topic_to_highlight = st.sidebar.selectbox("Highlight topic", range(number_of_topics), 0)
show_runs_all_topics = st.sidebar.checkbox("Show all topics", value=True)
if url is None:
st.markdown("No corpus")
elif show_runs_all_topics:
topics, matches, lda_models, diff = topic_alignment(4)
st.table(topics.style
.apply(highlight_topic, topic=topic_to_highlight, matches=matches, axis=None))
download_link_from_csv(topics.to_csv(index=False),
"tm-{}-runs.csv".format(number_of_topics),
title="Download topics as CSV")
else:
# todo: topic_alignment to return weights as well
# then pass weights as argument to highlight_repeated_keywords
topics, matches, lda_models, diff = topic_alignment(4)
lda_models, keywords, weights, documents = topic_runs(lda_models, topic=topic_to_highlight, matches=matches)
st.table(keywords.style
.apply(highlight_repeated_keywords, weights=weights, axis=None))
download_link_from_csv(keywords.to_csv(index=False),
"tm-{}-{}-keywords.csv".format(number_of_topics, topic_to_highlight),
title="Download keywords as CSV")
download_link_from_html(keywords.style
.apply(highlight_repeated_keywords, weights=weights, axis=None)
.render(),
"tm-{}-{}-keywords.html".format(number_of_topics, topic_to_highlight),
title="Download keywords as HTML (with colors)")
st.table(weights)
download_link_from_csv(keywords.to_csv(index=False),
"tm-{}-{}-weights.csv".format(number_of_topics, topic_to_highlight),
title="Download weights as CSV")
st.dataframe(documents)
|
michaelweiss/topic-model-explorer
|
old/topic_model_explorer_stability.py
|
topic_model_explorer_stability.py
|
py
| 8,264 |
python
|
en
|
code
| 2 |
github-code
|
6
|
36264192726
|
from PyQt5.QtCore import QModelIndex, pyqtSignal, pyqtSlot, QVariant, QFile, QByteArray, QBuffer, QIODevice, QSize, \
QItemSelectionModel, QItemSelection
from PyQt5.QtGui import QPixmap, QFont, QIcon
from PyQt5.QtSql import QSqlQuery
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QDialog, QMessageBox
from TreeModel import TreeModel
from inputdialog import InputDialog
from connect1 import Connect1
from newForm import *
class Widget2(QWidget):
def __init__(self):
super().__init__()
self.db = None
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.setFixedSize(600, 400)
self.setWindowTitle("Лейман М.А.")
label = QLabel(self)
label.move(25, 25)
label.setFixedSize(550, 350)
label.setStyleSheet('background-color: rgb(180, 190, 200)')
label.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
label.setFont(QFont('Arial', 15))
label.setWordWrap(True)
label.setText('Тестовая программа\n \nПостроение из базы данных иерархического списка\n в виде дерева,'
'с возможностью редактирования\n и добавления дочерних элементов. \n \n \n '
' выполнил: Лейман М.А.\n тел: +79613224543\n email: [email protected]')
self.setWindowFlags(QtCore.Qt.Dialog) # делает окно несворачиваемым
class Widget1(QtWidgets.QWidget):
valueChangedSignal = pyqtSignal(list)
valueInsertSignal = pyqtSignal(list)
def __init__(self, parent):
QtWidgets.QWidget.__init__(self, parent)
self.idRowNew = None # id новой (вставляемой) строки
self.__iDialog = None
self.w = None
self.y = None
self.ui = Ui_Widget()
pixmap = QPixmap(":/img2.png") # установка эмблеммы в окно
self.ui.setupUi(self)
self.ui.label_2.setPixmap(pixmap)
self.ui.label.setPixmap(pixmap)
self.ui.treeView.setAttribute(QtCore.Qt.WA_StyledBackground, True)
self.ui.treeView.setStyleSheet('background-color: rgb(170, 190,195)') # установка фона для окна дерева
self.ui.button.setToolTip("<h3>Нажми меня</h3>")
self.ui.button.setStyleSheet("background-color : rgb(10, 120, 10)")
self.db = Connect1("task")
cursor = self.db.connect_to_data_base
treemodel = TreeModel()
self.ui.treeView.setModel(treemodel)
self.ui.button.clicked.connect(self.window_open) # открывает окно "О программе" +
self.ui.delRow.clicked.connect(self.removeRowTree) # удаляет строку (узел) с дочерними строками +
self.ui.modifi.clicked.connect(self.modifiRow) # изменяет данные строки (узла) -
self.ui.addRow.clicked.connect(self.insertChildTree) # Добавляет строку (Узел) -
self.ui.treeView.selectionModel().selectionChanged[QItemSelection, QItemSelection].connect(self.updateActions)
self.ui.treeView.selectionModel().currentRowChanged[QModelIndex, QModelIndex].connect(self.slotCurrentPic)
self.valueChangedSignal[list].connect(self.editDataBase) # Изменяет выбранный элемент в БАЗЕ
self.valueInsertSignal[list].connect(self.insertDataBase) # вставляем новые данные в БАЗУ
self.ui.treeView.setColumnHidden(2, True) # делает невидимым столбцы 2,3,4
self.ui.treeView.setColumnHidden(3, True)
self.ui.treeView.setColumnHidden(4, True)
header = self.ui.treeView.header()
header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(2, QtWidgets.QHeaderView.Fixed)
header.setSectionResizeMode(3, QtWidgets.QHeaderView.Fixed)
header.setSectionResizeMode(4, QtWidgets.QHeaderView.Fixed)
header.setStretchLastSection(False)
self.updateActions()
def window_open(self):
self.w = Widget2()
self.w.show()
# self.hide()
def closeEvent(self, event):
self.db.close_db()
for window in QApplication.topLevelWidgets():
window.close()
def slotCurrentPic(self, index: QModelIndex):
yurModel = self.ui.treeView.model()
item = yurModel.getItem(index)
pix = item.data(1)
if not isinstance(pix, QByteArray):
# sss = QByteArray(item.data(1).encode())
sss = ":/img2.png"
outPixmap = QPixmap(sss)
pixmap = outPixmap.scaledToWidth(200)
else:
sss = pix
outPixmap = QPixmap()
outPixmap.loadFromData(sss)
dd = outPixmap.width()
pixmap = outPixmap.scaledToWidth(200)
self.ui.label.setPixmap(pixmap)
@pyqtSlot()
def insertChildTree(self):
pInputDialog = InputDialog()
if pInputDialog.flag:
name = pInputDialog.name() # вводим данные
image = pInputDialog.image()
state = pInputDialog.state()
var = pInputDialog.destroyed
index = self.ui.treeView.selectionModel().currentIndex() # Получаем модельный индекс элемента
model = self.ui.treeView.model() # получаем модель дерева
colCount = model.columnCount(index)
itemParent = model.getItem(index) # получаем выбранный элемент, он становится родителем вставляемого элемента
idParentRow = int(itemParent.data(2)) # получаем id выбранной строки, становится id_parent для вставляемого элемента
newValue = list()
newValue.append(name)
newValue.append(image)
newValue.append(state)
newValue.append(idParentRow)
self.valueInsertSignal.emit(newValue) # отправляем сигнал на запись данных в БД
newValue.clear()
query2 = QSqlQuery() # получаем изображение
query2.prepare("SELECT * FROM hierarhy WHERE id =?;")
query2.addBindValue(self.idRowNew)
query2.exec()
query2.next()
image2 = query2.value(3)
query2.clear()
newValue.append(name)
newValue.append(image2)
newValue.append(self.idRowNew)
newValue.append(idParentRow)
newValue.append(state)
rowNew = model.rowCount(index)
if not model.insertRow(rowNew, index):
return
dictRole = (0, 1, 0, 0, 0)
for column in range(colCount):
indexChild = model.index(rowNew, column, index) # индекс вставляемого элемента в модели
model.setData(indexChild, newValue[column], dictRole[column]) # вставляем данные в столбец модели по индексу
self.ui.treeView.selectionModel().reset()
self.updateActions()
def updateActions(self):
hasSelection = not self.ui.treeView.selectionModel().selection().isEmpty()
self.ui.delRow.setEnabled(hasSelection)
self.ui.modifi.setEnabled(hasSelection)
@pyqtSlot()
def modifiRow(self):
pInputDialog = InputDialog()
if pInputDialog.flag:
name = pInputDialog.name() # вводим данные
image = pInputDialog.image()
state = pInputDialog.state()
var = pInputDialog.destroyed
index = self.ui.treeView.selectionModel().currentIndex() # модельный индекс элемента
model = self.ui.treeView.model()
item2 = model.getItem(index) # выбранный элемент
rowItem = item2.rowNumber() # номер строки элемента в родительском узле
idRow = int(item2.data(2)) # id выбранной строки
idRowParent = int(item2.data(3)) # id_parent выбраной строки
parent = model.parent(index) # индекс родителя
newValue = list()
newValue.append(name)
newValue.append(image)
newValue.append(state)
newValue.append(idRow)
self.valueChangedSignal.emit(newValue) # отправляем сигнал на запись данных в БД
newValue.clear()
query2 = QSqlQuery() # получаем изображение
query2.prepare("SELECT * FROM hierarhy WHERE id =?;")
query2.addBindValue(idRow)
query2.exec()
query2.next()
image2 = query2.value(3)
query2.clear()
newValue.append(name)
newValue.append(image2)
newValue.append(idRow)
newValue.append(idRowParent)
newValue.append(state)
model.beginResetModel1() # Изменяем данные в строке
colCount = model.columnCount(index)
dictRole = (0, 1, 0, 0, 0)
for column in range(colCount):
indexInsert = model.index(rowItem, column, parent) # УЗНАТЬ СТРОКУ Изменяемого (ТЕКУЩЕГО) ЭЛЕМЕНТА
model.setData(indexInsert, newValue[column], dictRole[column])
model.endResetModel1()
newValue.clear()
self.updateActions()
var = pInputDialog.destroyed
def removeRowTree(self):
""" удаляет строку со всеми зависимыми строками """
model = self.ui.treeView.model()
index = self.ui.treeView.selectionModel().currentIndex() # Получаем модельный индекс выбранного элемента
self.remoweItemRows(index, model)
self.ui.treeView.selectionModel().reset()
self.updateActions()
def remoweItemRows(self, index: QModelIndex, model: TreeModel): # удаляет элементы из списка детей
item = model.getItem(index)
childCountItem = item.childCount() # количество детей у элемента
numRow = item.rowNumber() # номер строки элемента
indexParent = model.parent(index) # индекс родителя элемента
if childCountItem > 0:
for numRowChild in range(childCountItem - 1, - 1, -1):
indexChild = model.index(numRowChild, 0, index)
self.remoweItemRows(indexChild, model) # каскадное удаление потомков потомка
idRow = int(item.data(2)) # получаем id строки
if not model.hasChildren(index): # если нет потомков, то удаляем узел
query2 = QSqlQuery()
query2.prepare("DELETE FROM hierarhy WHERE id =?;")
query2.addBindValue(idRow)
query2.exec()
query2.clear()
model.removeRow(numRow, indexParent) # Удаляем текущий узел после удаления всех детей
@pyqtSlot(list)
def insertDataBase(self, newValue: list):
""" вставляет новые данные в базу"""
strName = str(newValue[0])
if strName == '':
return
strImg1 = str(newValue[1])
file = QFile(strImg1) # создаем объект класса QFile
dataImg = QByteArray() # куда будем считывать данные
# inBuffer = QBuffer(dataImg)
if file.open(QIODevice.ReadOnly): # Проверяем, возможно ли открыть наш файл для чтения
dataImg = file.readAll() # считываем данные
query3 = QSqlQuery()
query3.exec("INSERT INTO hierarhy (id_parent,name,image,state) VALUES (?, ?, ?, ?)")
strIdParent = int(newValue[3])
query3.addBindValue(strIdParent)
query3.addBindValue(strName)
query3.addBindValue(dataImg)
strState = str(newValue[2])
query3.addBindValue(strState)
query3.exec()
self.idRowNew = int(query3.lastInsertId())
query3.clear()
@pyqtSlot(list)
def editDataBase(self, newValue: list):
strName = str(newValue[0])
if strName == '':
return
strImg1 = newValue[1]
file = QFile(strImg1) # создаем объект класса QFile
dataImg = QByteArray() # куда будем считывать данные
if file.open(QIODevice.ReadOnly): # Проверяем, возможно ли открыть наш файл для чтения
dataImg = file.readAll()
query3 = QSqlQuery()
query3.prepare("UPDATE hierarhy SET name=?, image=?, state=? WHERE id =?;")
strName = str(newValue[0])
query3.addBindValue(strName)
query3.addBindValue(dataImg)
strState = str(newValue[2])
query3.addBindValue(strState)
idRow = int(newValue[3])
query3.addBindValue(idRow)
query3.exec()
query3.next()
if not query3.isActive():
QMessageBox.warning(self, "Database Error", query3.lastError().text())
|
drug173/Python
|
applications/Tree1/widget1.py
|
widget1.py
|
py
| 13,903 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
19621184365
|
#import libraries
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
"""
This tutorial from the AI at UCLA's technical blog post:
https://uclaacmai.github.io/Linear-Regression
"""
testlines = []
testans = []
trainlines = []
trainans = []
for line in open("boston2.txt", 'r').readlines()[0:100]:
tl = line.strip('\n').strip(' ').split(' ')
testlines.append(map(lambda x:float(x.strip(' ')),tl[0:13]))
testans.append([float(tl[13].strip(' '))])
for line in open("boston2.txt", 'r').readlines()[100:]:
tl = line.strip('\n').strip(' ').split(' ')
trainlines.append(map(lambda x:float(x.strip(' ')),tl[0:13]))
trainans.append([float(tl[13].strip(' '))])
X_train = np.array(trainlines, dtype=np.float32)
X_test = np.array(testlines, dtype=np.float32)
Y_train = np.array(trainans, dtype=np.float32)
Y_test = np.array(testans, dtype=np.float32)
print(",".join([str(t.shape) for t in (X_train, X_test, Y_train, Y_test)]))
prices = Y_train.tolist()
student_teacher_ratios = [X_train[i][10] for i in range(X_train.shape[0])]
plt.scatter(student_teacher_ratios,prices)
plt.show()
X = tf.placeholder(tf.float32,shape=[None,13])
Y = tf.placeholder(tf.float32, shape = [None,1])
W = tf.Variable(tf.constant(0.1,shape=[13,1]))
b = tf.Variable(tf.constant(0.1))
y_pred = tf.matmul(X,W) + b
loss = tf.reduce_mean(tf.square(y_pred - Y))
opt = tf.train.GradientDescentOptimizer(learning_rate = 0.5).minimize(loss)
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
sess.run(init)
initial_loss = loss.eval(feed_dict = {X:X_train, Y:Y_train})
print("initial loss: {}".format(initial_loss))
for i in range(100):
#Run the optimization step with training data
sess.run(opt, feed_dict = {X:X_train, Y:Y_train})
print("epoch "+str(i)+"loss:{}".format(loss.eval(feed_dict = {X:X_train, Y:Y_train})))
final_test_loss = loss.eval(feed_dict = {X:X_test,Y:Y_test})
print("final loss (test): {}".format(final_test_loss))
|
canders1/COMSC343
|
_site/pdf/regression_class.py
|
regression_class.py
|
py
| 1,961 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16543939917
|
import functools
def makeTable(grid):
"""Create a REST table."""
def makeSeparator(num_cols, col_width, header_flag):
if header_flag == 1:
return num_cols * ("+" + (col_width) * "=") + "+\n"
else:
return num_cols * ("+" + (col_width) * "-") + "+\n"
def normalizeCell(string, length):
return string + ((length - len(string)) * " ")
cell_width = 2 + max(
functools.reduce(
lambda x, y: x + y, [[len(item) for item in row] for row in grid], []
)
)
num_cols = len(grid[0])
rst = makeSeparator(num_cols, cell_width, 0)
header_flag = 1
for row in grid:
rst = (
rst
+ "| "
+ "| ".join([normalizeCell(x, cell_width - 1) for x in row])
+ "|\n"
)
rst = rst + makeSeparator(num_cols, cell_width, header_flag)
header_flag = 0
return rst
|
Nuitka/Nuitka
|
nuitka/utils/Rest.py
|
Rest.py
|
py
| 927 |
python
|
en
|
code
| 10,019 |
github-code
|
6
|
75189166908
|
import pygame, threading
pygame.init()
white = (255, 255, 255)
green = (0, 255, 0)
blue = (0, 0, 128)
X = 400
Y = 400
display_surface = pygame.display.set_mode((X, Y ))
pygame.display.set_caption('Show Text')
font = pygame.font.Font('freesansbold.ttf', 32)
text = font.render('GeeksForGeeks', True, green, blue)
textRect = text.get_rect()
textRect.center = (X // 2, Y // 2)
def updateGUI():
while True:
display_surface.fill(white)
display_surface.blit(text, textRect)
for event in pygame.event.get() :
if event.type == pygame.QUIT :
pygame.quit()
quit()
pygame.display.update()
GUI = threading.Thread(target = updateGUI, args = ())
GUI.daemon = True
GUI.start()
|
ger534/Proyecto2Arqui2
|
examplePygame.py
|
examplePygame.py
|
py
| 779 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6497432832
|
from stock_util import StockUtil
from logger import Logger
from stock_db import StockDb
import time
import threading
import requests
from pandas import DataFrame
import pandas as pd
class StockMon():
def __init__(self):
self.logger = Logger("StockMon")
self.util = StockUtil()
def get_xueqiu_info(self,url):
cookie_url = "https://xueqiu.com"
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'}
r = requests.get(cookie_url,headers=headers)
cookies = r.cookies
r1 = requests.get(url,headers=headers,cookies=cookies)
#self.logger.info(r1.text)
stock_list = eval(r1.text)['stocks']
return DataFrame(stock_list)
def get_market_status_from_xueqiu(self,direction,page_number,page_size):
#direction = asc 跌幅榜, direction = desc 涨幅榜
url = "https://xueqiu.com/stock/cata/stocklist.json?page=%s&size=%s&order=%s&orderby=percent&type=11%%2C12&_=1541985912951"%(page_number,page_size,direction)
#self.logger.info(url)
return self.get_xueqiu_info(url)
def get_market_status(self,direction,page_number,page_size,use_proxy=0):
#direction=0 means top n, direction=1 means bottom n
proxies = {'http': 'http://18.197.117.119:8080', 'https': 'http://18.197.117.119:8080'}
detail_url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData?\
page=%s&num=%s&sort=changepercent&asc=%s&node=hs_a&symbol=&_s_r_a=init"%(page_number,page_size,direction)
if use_proxy==1:
resp = requests.get(detail_url,proxies=proxies)
else:
resp = requests.get(detail_url)
#self.logger.info(resp.text)
if resp.text=='null':
return ''
elif '?xml' in resp.text:
self.logger.info(resp.text)
else:
return eval(resp.text.replace('symbol','"symbo"').replace('code','"code"').replace('name','"name"').replace('trade','"trade"').\
replace('pricechange','"pricechange"').replace('changepercent','"changepercent"').replace('buy','"buy"').replace('sell','"sell"').\
replace('settlement','"settlement"').replace('open','"open"').replace('high','"high"').replace('low','"low"').\
replace('volume','"volume"').replace('amount','"amount"').replace('ticktime','"ticktime"').replace('per:','"per":').\
replace('pb','"pb"').replace('mktcap','"mktcap"').replace('nmc','"nmc"').replace('turnoverratio','"turnoverratio"'))
def get_zt_number(self):
#Get zt number
market_status = self.get_market_status(0,1,100)
for i in range(100):
if float(market_status[i]['changepercent'])<9.7:
self.logger.info("涨停个数:%s"%i)
return i
def get_dt_number(self):
#Get dt number
market_status = self.get_market_status(1,1,100)
for i in range(100):
if float(market_status[i]['changepercent'])>-9.7:
self.logger.info("跌停个数:%s"%i)
return i
def monitor_bid(self,stock_list,refresh_interval):
sample = {}
for s in stock_list:
aoi = self.util.get_live_aoi_bid(s)
sample[s] = aoi
while True:
self.logger.info("================Monitor==============")
self.logger.info("股票名称(股票ID)| 涨幅 | 竞买价 | 竞买量(万手)")
for s in stock_list:
status = self.util.get_live_mon_items_bid(s)
aoi = self.util.get_live_aoi_bid(s)
if aoi-sample[s]>1:
plus_icon = "[↑+%s]"%(round(aoi-sample[s],2))
self.logger.info("*%s %s"%(status,plus_icon))
elif aoi-sample[s]<-1:
plus_icon = "[↓%s]"%(round(aoi-sample[s],2))
self.logger.info("*%s %s"%(status,plus_icon))
else:
self.logger.info(status)
'''
if aoi-sample[s]>2:
self.logger.info("Stock %s aoi increased from %s to %s"%(s,sample[s],aoi))
elif aoi-sample[s]<-2:
self.logger.info("Stock %s aoi dropped from %s to %s"%(s,sample[s],aoi))
'''
sample[s] = aoi
time.sleep(refresh_interval)
def check_stock_list(self,stock_list):
self.logger.info("================Monitor==============")
status = '\n'.join(self.util.get_summary_status(stock_list))
self.logger.info(status)
def monitor_after_bid(self,stock_list,refresh_interval):
while True:
self.logger.info("===============Monitor===============")
self.logger.info("股票名称(股票ID)| 开盘涨幅 | 当前涨幅 | 当前价格 | 成交量(万手)| 成交金额(亿)")
for s in stock_list:
self.logger.info(self.util.get_live_mon_items(s))
time.sleep(refresh_interval)
def check_top_and_bottom(self,n):
status = self.get_market_status(0,1,n)
df = DataFrame(status)
df1 = df[['symbo','name','changepercent','trade','open','high','low','volume','turnoverratio']]
print(df1)
status = self.get_market_status(1,1,n)
df = DataFrame(status)
df1 = df[['symbo','name','changepercent','trade','open','high','low','volume','turnoverratio']]
print(df1)
def get_top_n_df(self,direction,n):
#direction=0 means top n, direction=1 means bottom n
status = self.get_market_status(direction,1,n)
df = DataFrame(status)
ret = df[['symbo','name','changepercent','trade','open','turnoverratio']]
print(ret)
return ret
def sum_top_n_list(self,direction,n):
'''
tmp_csv = 'tmp.csv'
df = self.get_top_n_df(direction,n)
df.to_csv(tmp_csv,index=False)
f = open(tmp_csv,'r')
out = open('out.csv','w')
line_number = 0
sample_count = 3
for line in f.readlines():
item = line.replace('\n','')
if line_number==0:
target_line = ",%s,"%(item)
else:
s = item.split(',')[0]
s_name = item.split(',')[1]
#self.logger.info(s)
if s_name.startswith('N'):
target_line = "%s,%s,"%(line_number,item)
else:
db = StockDb()
tmp = []
turn_over_list = db.get_last_n_turnover(s,sample_count)
for t in turn_over_list:
tmp.append(str(t))
turn_over_sample = ','.join(tmp)
pchg_list = db.get_last_n_pchg(s,sample_count)
for t in pchg_list:
tmp.append(str(t))
pchg_sample = ','.join(tmp)
target_line = ("%s,%s,%s,%s"%(line_number,item,turn_over_sample,pchg_sample))
line_number = line_number+1
out.write("%s\n"%(target_line))
f.close()
out.close()
'''
df1 = pd.read_csv('out.csv',index_col=0)
with open('output.html','w',encoding="gb2312") as f:
f.write(df1.to_html())
def get_bid_sample_list(self,top_n=100): #run on 9:20, get stock_list which is in top n
url = 'https://xueqiu.com/stock/cata/stocklist.json?page=1&size=%s&order=desc&orderby=percent&type=11%%2C12&_=1541985912951'%(top_n)
df = self.get_xueqiu_info(url)
df1 = df[['symbol','name','current','percent','volume']]
#print(df1)
s_list = df1['symbol'].values.tolist()
#print(s_list)
return s_list
def mon_bid(self):
sample_list = self.get_bid_sample_list()
f = open('bid.txt','w')
while True:
time.sleep(20) #every 20 seconds, check diff(new_list,sample_list)...
new_list = self.get_bid_sample_list()
check_list = []
for s in new_list:
if s not in sample_list:
check_list.append(s)
for s in check_list:
self.logger.info("================Please check the following==============")
status = self.util.get_live_status(s)
self.logger.info(s)
self.logger.info(status)
f.write(s)
f.close()
if __name__ == '__main__':
t = StockMon()
t.mon_bid()
#df = t.get_bid_sample_list()
#stock_list = t.get_top_n_list(100)
#print(stock_list)
#t.sum_top_n_list(0,100)
#check(50)
#df = DataFrame(t.get_market_status(0,1,50))
#df1 = df.iloc[:,10:20]
#df1 = df.iloc[:,0:10]
#print(df1)
#t.get_bid_sample_list()
#t.mon_bid()
'''
f = open('t1.csv','w')
for i in range(1,40):
status = t.get_market_status(0,i,100)
if status == '':
print('No data in this %s page!'%i)
break
else:
df = DataFrame(status)
csv_file = 't.csv'
#df1 = df.loc[df.turnoverratio>5]
#df1 = df.iloc[:,10:20]
df1 = df[['code','open','high','low','trade','volume','turnoverratio','changepercent']]
print(df1)
df1.to_csv(f,header=False,index=False)
f.close()
'''
|
jia-zhang/fp-client
|
lib/stock_mon.py
|
stock_mon.py
|
py
| 9,923 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3477657730
|
import logging
import posixpath
from collections import defaultdict
from typing import TYPE_CHECKING, Callable, Dict, Generator, Optional, Tuple
from ...stash import ExpStashEntry
from ..base import BaseExecutor
from ..ssh import SSHExecutor, _sshfs
from .base import BaseExecutorManager
if TYPE_CHECKING:
from scmrepo.git import Git
from dvc.repo import Repo
logger = logging.getLogger(__name__)
class SSHExecutorManager(BaseExecutorManager):
EXECUTOR_CLS = SSHExecutor
def __init__(
self,
scm: "Git",
wdir: str,
host: Optional[str] = None,
port: Optional[int] = None,
username: Optional[str] = None,
fs_factory: Optional[Callable] = None,
**kwargs,
):
assert host
super().__init__(scm, wdir, **kwargs)
self.host = host
self.port = port
self.username = username
self._fs_factory = fs_factory
def _load_infos(self) -> Generator[Tuple[str, "BaseExecutor"], None, None]:
# TODO: load existing infos using sshfs
yield from []
@classmethod
def from_stash_entries(
cls,
scm: "Git",
wdir: str,
repo: "Repo",
to_run: Dict[str, ExpStashEntry],
**kwargs,
):
machine_name: Optional[str] = kwargs.get("machine_name", None)
manager = cls(
scm, wdir, **repo.machine.get_executor_kwargs(machine_name)
)
manager._enqueue_stash_entries(scm, repo, to_run, **kwargs)
return manager
def sshfs(self):
return _sshfs(self._fs_factory, host=self.host, port=self.port)
def get_infofile_path(self, name: str) -> str:
return f"{name}{BaseExecutor.INFOFILE_EXT}"
def _exec_attached(self, repo: "Repo", jobs: Optional[int] = 1):
from dvc.exceptions import DvcException
from dvc.stage.monitor import CheckpointKilledError
assert len(self._queue) == 1
result: Dict[str, Dict[str, str]] = defaultdict(dict)
rev, executor = self._queue.popleft()
info = executor.info
infofile = posixpath.join(
info.root_dir,
info.dvc_dir,
"tmp",
self.get_infofile_path(rev),
)
try:
exec_result = executor.reproduce(
info=executor.info,
rev=rev,
infofile=infofile,
log_level=logger.getEffectiveLevel(),
fs_factory=self._fs_factory,
)
if not exec_result.exp_hash:
raise DvcException(
f"Failed to reproduce experiment '{rev[:7]}'"
)
if exec_result.ref_info:
result[rev].update(
self._collect_executor(repo, executor, exec_result)
)
except CheckpointKilledError:
# Checkpoint errors have already been logged
return {}
except DvcException:
raise
except Exception as exc:
raise DvcException(
f"Failed to reproduce experiment '{rev[:7]}'"
) from exc
finally:
self.cleanup_executor(rev, executor)
return result
def cleanup_executor(self, rev: str, executor: "BaseExecutor"):
executor.cleanup()
|
gshanko125298/Prompt-Engineering-In-context-learning-with-GPT-3-and-LLMs
|
myenve/Lib/site-packages/dvc/repo/experiments/executor/manager/ssh.py
|
ssh.py
|
py
| 3,336 |
python
|
en
|
code
| 3 |
github-code
|
6
|
11557761416
|
import sys
from classes import *
import pprint
import os
# system keword 에 대한 dfa를 자동생성한다.
def make_system_dfa(name, keyword):
digit = "1234567890"
char = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
length = len(keyword)
dfa = Dfa(name)
dfa.set_final_states([length])
for i in range(0, length):
dfa.add_rule(i,i+1, keyword[i])
return dfa
# 한 글자를 판단하기 위한 dfa를 작성한다.
def make_single_dfa(name, char):
dfa = Dfa(name)
dfa.set_final_states([1])
dfa.add_rule(0, 1, char)
return dfa
# dfa 작성 코드가 너무 길어서 별도의 함수로 분리.
def set_dfa(token_scanner):
nz = "123456789"
digit = "1234567890"
char = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
# system keyword (Keyword stmt)
token_scanner.add_dfa(make_system_dfa(Token.IF, "if"))
token_scanner.add_dfa(make_system_dfa(Token.ELSE, "else"))
token_scanner.add_dfa(make_system_dfa(Token.WHILE, "while"))
token_scanner.add_dfa(make_system_dfa(Token.FOR, "for"))
token_scanner.add_dfa(make_system_dfa(Token.RETURN, "return"))
# system keyword (Vtype)
vtype_dfa = Dfa(Token.V_TYPE)
vtype_dfa.set_final_states([3, 7, 11, 16])
vtype_dfa.add_rule(0, 1, "i")
vtype_dfa.add_rule(1, 2, "n")
vtype_dfa.add_rule(2, 3, "t")
vtype_dfa.add_rule(0, 4, "c")
vtype_dfa.add_rule(4, 5, "h")
vtype_dfa.add_rule(5, 6, "a")
vtype_dfa.add_rule(6, 7, "r")
vtype_dfa.add_rule(0, 12, "f")
vtype_dfa.add_rule(12, 13, "l")
vtype_dfa.add_rule(13, 14, "o")
vtype_dfa.add_rule(14, 15, "a")
vtype_dfa.add_rule(15, 16, "t")
token_scanner.add_dfa(vtype_dfa)
# arthimatic operators
addsub_dfa = Dfa(Token.ADDSUB)
addsub_dfa.set_final_states([1])
addsub_dfa.add_rule(0, 1, "-")
addsub_dfa.add_rule(0, 1, "+")
token_scanner.add_dfa(addsub_dfa)
multdiv_dfa = Dfa(Token.MULTDIV)
multdiv_dfa.set_final_states([1])
multdiv_dfa.add_rule(0, 1, "*")
multdiv_dfa.add_rule(0, 1, "/")
token_scanner.add_dfa(multdiv_dfa)
# comparison operators
comp_dfa = Dfa(Token.COMP)
comp_dfa.set_final_states([1, 2, 3, 4, 6, 8])
comp_dfa.add_rule(0, 1, "<")
comp_dfa.add_rule(1, 2, "=")
comp_dfa.add_rule(0, 3, ">")
comp_dfa.add_rule(3, 4, "=")
comp_dfa.add_rule(0, 5, "=")
comp_dfa.add_rule(5, 6, "=")
comp_dfa.add_rule(0, 7, "!")
comp_dfa.add_rule(7, 8, "=")
token_scanner.add_dfa(comp_dfa)
# whites space
ws_dfa = Dfa(Token.WHITE_SPACE)
ws_dfa.set_final_states([1])
ws_dfa.add_rule(0, 1, "\t")
ws_dfa.add_rule(0, 1, "\n")
ws_dfa.add_rule(0, 1, " ")
ws_dfa.add_rule(1, 1, "\t")
ws_dfa.add_rule(1, 1, "\n")
ws_dfa.add_rule(1, 1, " ")
token_scanner.add_dfa(ws_dfa)
#assign
token_scanner.add_dfa(make_single_dfa(Token.ASSIGN, "="))
#semicolon
semi_dfa = Dfa(Token.SEMI)
semi_dfa.set_final_states([1])
semi_dfa.add_rule(0, 1, ";")
token_scanner.add_dfa(semi_dfa)
# brackets
token_scanner.add_dfa(make_single_dfa(Token.L_PAREN, "("))
token_scanner.add_dfa(make_single_dfa(Token.R_PAREN, ")"))
token_scanner.add_dfa(make_single_dfa(Token.L_BRACE, "{"))
token_scanner.add_dfa(make_single_dfa(Token.R_BRACE, "}"))
# comma
token_scanner.add_dfa(make_single_dfa(Token.COMMA, ","))
# integer
integer_dfa = Dfa(Token.NUM)
integer_dfa.set_final_states([1, 3])
integer_dfa.add_rule(0, 1, "0")
integer_dfa.add_rule(0, 2, "-")
integer_dfa.add_rule(0, 3, nz)
integer_dfa.add_rule(2, 3, nz)
integer_dfa.add_rule(3, 3, digit)
token_scanner.add_dfa(integer_dfa)
# literal
literal_dfa = Dfa(Token.LITERAL)
literal_dfa.set_final_states([2])
literal_dfa.add_rule(0, 1, "\"")
literal_dfa.add_rule(1, 1, digit)
literal_dfa.add_rule(1, 1, char)
literal_dfa.add_rule(1, 1, " ")
literal_dfa.add_rule(1, 2, "\"")
token_scanner.add_dfa(literal_dfa)
#float
float_dfa = Dfa(Token.FLOAT)
float_dfa.set_final_states([5])
float_dfa.add_rule(0, 1, "-")
float_dfa.add_rule(0, 2, nz)
float_dfa.add_rule(0, 3, "0")
float_dfa.add_rule(1, 2, nz)
float_dfa.add_rule(1, 3, "0")
float_dfa.add_rule(2, 2, digit)
float_dfa.add_rule(2, 4, ".")
float_dfa.add_rule(3, 4, ".")
float_dfa.add_rule(4, 5, digit)
float_dfa.add_rule(5, 5, nz)
float_dfa.add_rule(5, 6, "0")
float_dfa.add_rule(6, 5, nz)
float_dfa.add_rule(6, 6, "0")
token_scanner.add_dfa(float_dfa)
#id
id_dfa = Dfa(Token.ID)
id_dfa.set_final_states([1])
id_dfa.add_rule(0, 1, char)
id_dfa.add_rule(0, 1, "_")
id_dfa.add_rule(1, 1, char)
id_dfa.add_rule(1, 1, "_")
id_dfa.add_rule(1, 1, digit)
token_scanner.add_dfa(id_dfa)
def main(file_path):
with open(file_path, mode="r") as f:
literal_list = f.read()
# print(literal_list)
# 읽어온 Source Code를 Parsing 하기 위해 Token Scanner에 전달합니다.
token_scanner = TokenScanner(literal_list)
# Token을 인식하기 위한 dfa를 생성해 token_scanner에 전달해준다.
set_dfa(token_scanner)
# Parse 된 Token을 저장하기 위한 list를 생성한다.
token_list = []
while True:
# Token 한개를 Parsing 해본다.
ret = token_scanner.parse_token()
# ret이 None이다 -> 파싱 실패 또는 파싱 종료.
filename, file_extension = os.path.splitext(file_path)
new_filename = f"{filename}.out"
if ret is None:
if token_scanner.parse_end() is True:
# print("성공")
# 성공했을 때의 출력
# pprint.pprint(token_list)
with open(new_filename, "w") as f:
import json
f.write(json.dumps({"body": token_list, "original": literal_list}))
# f.writelines(map(lambda t: f"{t}\n", token_list))
else:
end_pos = token_scanner.start_pos
all_lines = literal_list[0:end_pos + 1]
line_number = len(all_lines.splitlines())
literal_list_lines = literal_list.splitlines(keepends=True)
print(literal_list_lines, literal_list_lines[0:line_number])
length_line_before = len(''.join(literal_list_lines[0:line_number - 1]))
print(length_line_before)
local_pos = end_pos - length_line_before + 1
print(f"local_pos {local_pos} = end_pos {end_pos} - {length_line_before} + 1")
str = ""
str = str + f"error at line number {line_number}, column {local_pos}.\n\n"
original_line = literal_list_lines[line_number - 1]
str = str + f"{original_line}\n"
print(str)
with open(new_filename, "w") as f:
f.write(str)
pass
break
token_list.append(ret)
if len(token_list) > 1 \
and (token_list[-1][0] in [Token.NUM, Token.FLOAT] and "-" in token_list[-1][1]):
# print(1)
# 그 이전에 Number 가 바로 나오면 쪼갠다
# 그렇지 않으면 유지
finding_token = None
for i in range(len(token_list) - 1, 0, -1):
i = i - 1 # range 반복 값 보정.
# 블랭크는 제외하고 찾는다.
if token_list[i][0] == Token.WHITE_SPACE:
continue
finding_token = token_list[i]
break
if (finding_token is not None) and finding_token[0] in [Token.NUM, Token.FLOAT]:
# print(f"split {token_list[-1]}")
token_list[-1] = (token_list[-1][0], token_list[-1][1].replace("-", ""))
token_list.insert(-1, (Token.ADDSUB, "-"))
if __name__ == "__main__":
if(len(sys.argv) < 2):
print("plaese pass file path")
sys.exit()
file_path = sys.argv[1]
print("File path : " + file_path)
main(file_path)
|
pula39/compiler_assignment1
|
lexical.py
|
lexical.py
|
py
| 8,178 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5073225846
|
import asyncio
import logging
import aiohttp
import aiohttp.server
logger = logging.getLogger(__name__)
class ProxyRequestHandler(aiohttp.server.ServerHttpProtocol):
"""
Inspired by https://github.com/jmehnle/aiohttpproxy
Copyright Julian Mehnle, Apache License 2.0
"""
def __init__(self):
super(ProxyRequestHandler, self).__init__(keep_alive_on=False)
self.logger = logger
@asyncio.coroutine
def handle_request(self, message, payload):
url = message.path
logger.info('{0} {1}'.format(message.method, url))
if message.method in ('POST', 'PUT', 'PATCH'):
data = yield from payload.read()
else:
data = None
message, data = self.intercept_request(message, data)
if not message:
return
response = yield from aiohttp.request(message.method, url, headers=message.headers,
data=data)
response_content = yield from response.content.read()
response, response_content = self.intercept_response(response, response_content)
yield from self.response_to_proxy_response(response, response_content)
def response_to_proxy_response(self, response, response_content):
proxy_response = aiohttp.Response(self.writer, response.status, http_version=response.version)
# Copy response headers, except for Content-Encoding header,
# since unfortunately aiohttp transparently decodes content.
proxy_response_headers = [(name, value)
for name, value
in response.headers.items() if name not in ('CONTENT-ENCODING',)]
proxy_response.add_headers(*proxy_response_headers)
proxy_response.send_headers()
proxy_response.write(response_content)
yield from proxy_response.write_eof()
def intercept_request(self, message, data):
return message, data
def intercept_response(self, response, content):
return response, content
def run(port, cls=None):
cls = cls or ProxyRequestHandler
loop = asyncio.get_event_loop()
logging.basicConfig(
format='[proxy] {asctime} {levelname} {name}: {message}',
style='{',
level=logging.DEBUG
)
server_future = loop.create_server(lambda: cls(), '', port)
server = loop.run_until_complete(server_future)
logger.info('Accepting HTTP proxy requests on {0}:{1} ...'.format(*server.sockets[0].getsockname()))
loop.run_forever()
if __name__ == '__main__':
run(8080)
|
raphaelm/cockatiel
|
functional_tests/utils_proxy.py
|
utils_proxy.py
|
py
| 2,602 |
python
|
en
|
code
| 4 |
github-code
|
6
|
41058208626
|
import re
class Poly:
def __init__(self,*terms):
# __str__ uses the name self.terms for the dictionary of terms
# So __init__ should build this dictionary from terms
self.terms = {}
for coeff,power in terms:
assert type(coeff) in [int,float]
assert type(power) in [int]
assert power >=0, 'Power must be greater than or equal to 0.'
assert power not in self.terms.keys(), 'Power already exists.'
if coeff == 0:
pass
else:
self.terms[power] = coeff
# Fill in the rest of this method, using *terms to intialize self.terms
# I have written str(...) because it is used in the bsc.txt file and
# it is a bit subtle to get correct. Notice that it assumes that
# every Poly object stores a dict whose keys are powers and whose
# associated values are coefficients. This function does not depend
# on any other method in this class being written correctly.
def __str__(self):
def term(c,p,var):
return (str(c) if p == 0 or c != 1 else '') +\
('' if p == 0 else var+('^'+str(p) if p != 1 else ''))
if len(self.terms) == 0:
return '0'
else:
return ' + '.join([term(c,p,'x') for p,c in sorted(self.terms.items(),reverse=True)]).replace('+ -','- ')
def __repr__(self):
return 'Poly(' + ','.join([str(item) for item in self.terms.items()]) + ')'
def __len__(self):
return max(self.terms.keys())
def __call__(self,arg):
result = 0
for power,coeff in self.terms.items():
result += coeff*(arg**power)
return result
def __iter__(self):
ordered = sorted(self.terms, reverse=True)
for power in ordered:
yield (self.terms[power],power)
def __getitem__(self,index):
if type(index) != int or index < 0:
raise TypeError
if index not in self.terms.keys():
return 0
return self.terms[index]
def __setitem__(self,index,value):
power = index
coeff = value
if type(power) != int or power < 0:
raise TypeError
else:
if coeff == 0:
if power in self.terms.keys:
del self.terms[power]
else:
self.terms[power] = coeff
def __delitem__(self,index):
if type(index) != int or index < 0:
raise TypeError
if index in self.terms.keys():
del self.terms[index]
def _add_term(self,c,p):
assert type(c) in [int,float]
assert type(p) == int and p >= 0
if p not in self.terms.keys():
self.terms[p] = c
elif p in self.terms.keys():
self.terms[p] = self.terms[p] + c
if self.terms[p] == 0:
del self.terms[p]
def __add__(self,right):
for r_power,r_coeff in right.terms.items():
self._add_term(r_coeff,r_power)
return self
def __radd__(self,left):
self.__add__(left)
return self
def __mul__(self,right):
return self
def __rmul__(self,left):
self.__mul__(left)
return self
def __eq__(self,right):
return str(self) == str(right)
for power,coeff in self.terms.items():
try:
if coeff != right.terms[power]:
return False
except:
return False
if __name__ == '__main__':
# Some simple tests; you can comment them out and/or add your own before
# the driver is called.
# print('Start simple tests')
# p = Poly((3,2),(-2,1), (4,0))
# print(' For Polynomial: 3x^2 - 2x + 4')
# print(' str(p):',p)
# print(' repr(p):',repr(p))
# print(' len(p):',len(p))
# print(' p(2):',p(2))
# print(' list collecting iterator results:',[t for t in p])
# print(' p+p:',p+p)
# print(' p+2:',p+2)
# print(' p*p:',p*p)
# print(' p*2:',p*2)
# print('End simple tests\n')
import driver
#driver.default_show_exception=True
#driver.default_show_exception_message=True
#driver.default_show_traceback=True
driver.driver()
|
solomc1/python
|
ics 33/solutions/ile2 solutions/Lab 1/ZhongAaron/poly.py
|
poly.py
|
py
| 4,707 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70911492987
|
print('zohair hashmi - 18b-127-cs - Section A')
print('Practice Problem - 3.39')
def collision(x1,y1,r1,x2,y2,r2):
import math
c_diff = ((x2-x1)**2+(y2-y1)**2)
if c_diff <= (r1+r2)**2:
return True
else:
return False
x1 = collision(0,1,2,3,2,2)
print(x1)
x2 = collision(2,5,2,1,0,2)
print(x2)
|
zohairhashmi17/Programming-Fundamentals-Detailed-Assignment
|
3.39.py
|
3.39.py
|
py
| 347 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41495871576
|
from rdkit import Chem
import argparse
import math
import os
from sklearn.metrics import mean_squared_error, r2_score
from statistics import stdev
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculate the RMSD value for a molecular property.')
parser.add_argument('--original', '-o', metavar='ORIGINAL', required=True, type=str, help='File with original property values. SDF or SMILES format.')
parser.add_argument('--prediction', '-p', metavar='PREDICTION', required=True, type=str, help='File with predicted property values. SDF or SMILES format.')
parser.add_argument('--optimized', '-q', metavar="OPTIMIZED", type=str, help="File with optimized property predictions. SDF or SMILES format.")
parser.add_argument('-i', '--id', type=str, default="_Name", help="ID tag for files in SD format.")
parser.add_argument('-l', '--logfile', type=str, default="evaluation_result.log")
parser.add_argument('--label_predicted', type=str, required=True, help="Label of the prediction.")
parser.add_argument('--label_optimized', type=str, required=True, help="Label of the optimized prediction.")
parser.add_argument('property', metavar='Property', type=str, help='Property of interest (should be similar in both files.')
args = parser.parse_args()
original_dict = {}
pred_dict = {}
opti_dict = {}
orig_ext = os.path.splitext(args.original)[-1].lower()
pred_ext = os.path.splitext(args.prediction)[-1].lower()
id_label = args.id
# collect original values
if orig_ext == ".sdf":
original_mols = Chem.SDMolSupplier(args.original)
for mol in original_mols:
original_dict[mol.GetProp(id_label)] = float(mol.GetProp(args.property))
else:
for line in open(args.original, "r"):
line_arr = line.split("\t")
original_dict[line_arr[1]] = float(line_arr[2])
# collect values predicted values
no_deviation = False
if pred_ext ==".sdf":
pred_mols = Chem.SDMolSupplier(args.prediction)
for mol in pred_mols:
pred_dict[mol.GetProp(id_label)] = float(mol.GetProp(args.property))
else:
for line in open(args.prediction, "r"):
line_arr = line.split("\t")
pred_dict[line_arr[1]] = float(line_arr[2])
if args.optimized:
if os.path.splitext(args.optimized)[-1].lower() == ".sdf":
pred_mols = Chem.SDMolSupplier(args.optimized)
for mol in pred_mols:
opti_dict[mol.GetProp(id_label)] = float(mol.GetProp(args.label_optimized))
else:
for line in open(args.prediction, "r"):
line_arr = line.split("\t")
pred_dict[line_arr[1]] = float(line_arr[2]) + float(line_arr[3])
sum_sq = 0
preds = []
orgs = []
optis_all = [] # including unoptimizable prediction values
optis_only = []
orgs_only = []
unoptimizable_ids = []
unopt_values = []
unopt_values_for_ori = []
pred_in_net = []
pred_not_in_net = []
for id in pred_dict.keys():
preds.append(pred_dict[id])
orgs.append(original_dict[id])
diff = (pred_dict[id] - original_dict[id])
if args.optimized:
if id in opti_dict.keys():
optis_only.append(opti_dict[id])
orgs_only.append(original_dict[id])
optis_all.append(opti_dict[id])
pred_in_net.append(pred_dict[id])
else:
unoptimizable_ids.append(id)
optis_all.append(pred_dict[id])
unopt_values.append(pred_dict[id])
unopt_values_for_ori.append(original_dict[id])
pred_not_in_net.append(pred_dict[id])
with open(args.logfile, "w") as f:
stdDev_orig_all = stdev(orgs)
stdDev_orig_only_in_net = stdev(orgs_only)
stdDev_orig_only_not_in_net = stdev(unopt_values_for_ori)
stdDev_opt_all = stdev(optis_all)
stdDev_opt_only_in_net = stdev(optis_only)
f.write("StdDevs:\n")
f.write(f"original all values: {stdDev_orig_all}\n")
f.write(f"original only in net values: {stdDev_orig_only_in_net}\n")
f.write(f"original only NOT in net values: {stdDev_orig_only_not_in_net}\n")
f.write(f"optimized all values: {stdDev_opt_all}\n")
f.write(f"optimized only in net values: {stdDev_opt_only_in_net}\n\n")
f.write(f" Root Mean Square Deviation R² Num_of_Samples\n")
f.write(f"Predictions(all): {mean_squared_error(orgs, preds, squared=False):.6f} {r2_score(orgs, preds):.6f} {len(orgs)}\n")
f.write(f"Predictions(only in net): {mean_squared_error(orgs_only, pred_in_net, squared=False):.6f} {r2_score(orgs_only, pred_in_net):.6f} {len(orgs_only)}\n")
f.write(f"Predictions(only NOT in net): {mean_squared_error(unopt_values_for_ori, pred_not_in_net, squared=False):.6f} {r2_score(unopt_values_for_ori, pred_not_in_net):.6f} {len(unopt_values_for_ori)}\n\n")
if len(optis_all) > 0:
f.write(f"Optimized (all): {mean_squared_error(orgs, optis_all, squared=False):.6f} {r2_score(orgs, optis_all):.6f} {len(orgs)}\n")
f.write(
f"Optimized (only): {mean_squared_error(orgs_only, optis_only, squared=False):.6f} {r2_score(orgs_only, optis_only):.6f} {len(orgs_only)}\n\n")
if len(unopt_values) > 0:
f.write(f"Scores (un_opt): {mean_squared_error(unopt_values_for_ori, unopt_values, squared=False):.6f} {r2_score(unopt_values_for_ori, unopt_values):.6f} {len(unopt_values)}\n")
f.write(f"\nUnoptimizable molecules (IDs) ({len(unoptimizable_ids)} mols):\n")
for id in unoptimizable_ids:
f.write(f"{id}\n")
|
sophiahoenig/NetworkBalanceScaling
|
utils/evaluate_results.py
|
evaluate_results.py
|
py
| 6,038 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32500175374
|
from random import randint
"""
כתבו תוכנית הבוחרת באקראי מספר בין 1 ל-100. על המשתמש לנחש את המספר שנבחר ואחרי כל ניחוש יש להדפיס ״גדול מדי״ או ״קטן מדי״ לפי היחס בין המספר שנבחר לניחוש.
בונוס: כדי שיהיה מעניין דאגו שמדי פעם התוכנית תדפיס את ההודעה הלא נכונה.
"""
def check_user_input(user_number):
number = randint(1, 100)
if user_number > number:
print(f"The number is you choose is higher than the number which is being raffled{number}")
elif user_number < number:
print("The number is you choose is Lower than the number which is being raffled")
else:
print("YOU GUESSED THE NUMBER --> WELL DONE ")
while True:
try:
print("Hello User, Which number is being raffled?")
check_user_input(int(input()))
except ValueError:
print("Please enter a number")
|
eehud738/python-
|
Section 2/HW6.py
|
HW6.py
|
py
| 1,025 |
python
|
he
|
code
| 0 |
github-code
|
6
|
8834313838
|
import mysql.connector
from mysql.connector import Error
class MySQL:
def __init__(self, host='localhost', database=None, user=None, password=None):
if database == None:
return print("Please, enter your Database name!")
elif user == None:
return print("Please, enter your user name!")
elif password == None:
return print("Please, enter your password!")
else:
try:
self.connection = mysql.connector.connect(
host=host,
database=database,
user=user,
password=password
)
if self.connection.is_connected():
db_Info = self.connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
cursor = self.connection.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
print("You're connected to database:", record)
except Error as e:
print("Error while connecting to MySQL:", e)
def create_table(self, sql_script=None):
if sql_script == None:
return print("Please, enter your SQL Script to Create Table.")
else:
try:
cursor = self.connection.cursor()
result = cursor.execute(sql_script)
print("Table created successfully!")
except mysql.connector.Error as error:
print("Failed to create table in MySQL: {}".format(error))
finally:
cursor.close()
def insert_data(self, insert_query=None, records_to_insert=None):
if insert_query == None:
return print("Please, enter your SQL Script to insert data in the table.")
elif records_to_insert == None:
return print("Please, enter your records data to insert data in the table.")
else:
try:
cursor = self.connection.cursor()
cursor.executemany(insert_query, records_to_insert)
self.connection.commit()
print(cursor.rowcount, "Record inserted successfully into table")
except mysql.connector.Error as error:
print("Failed to insert records into MySQL table: {}".format(error))
finally:
cursor.close()
def get_hospital_detail(self, hospital_id=None):
if hospital_id == None:
return print("Please, enter Hospital ID.")
else:
try:
cursor = self.connection.cursor()
select_query = """select * from Hospital where Hospital_Id = %s"""
cursor.execute(select_query, (hospital_id,))
records = cursor.fetchall()
print("Printing Hospital record")
for row in records:
print("Hospital Id:", row[0], )
print("Hospital Name:", row[1])
print("Bed Count:", row[2])
except (Exception, mysql.connector.Error) as error:
print("Error while getting data", error)
finally:
cursor.close()
def get_doctor_detail(self, doctor_id=None):
if doctor_id == None:
return print("Please, enter Doctor ID.")
else:
try:
cursor = self.connection.cursor()
select_query = """select * from Doctor where Doctor_Id = %s"""
cursor.execute(select_query, (doctor_id,))
records = cursor.fetchall()
print("Printing Doctor record")
for row in records:
print("Doctor Id:", row[0])
print("Doctor Name:", row[1])
print("Hospital Id:", row[2])
print("Joining Date:", row[3])
print("Specialty:", row[4])
print("Salary:", row[5])
print("Experience:", row[6])
except (Exception, mysql.connector.Error) as error:
print("Error while getting data", error)
finally:
cursor.close()
def close_connection(self):
if self.connection.is_connected():
self.connection.close()
print("MySQL connection is closed.")
|
drigols/studies
|
modules/python-codes/modules/mysql/modules/exercises/hospital/hospital/database.py
|
database.py
|
py
| 3,910 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30543923938
|
import sys
import numpy as np
import torch
from tqdm import tqdm
import matplotlib.pyplot as plt
from Preprocessor import Preprocessor
"""
Dataset Snapshot:
Dataset A:
Normal
Murmur
Extra Heart Sound
Artifact
Dataset B:
Normal
Murmur
Extrasystole
"""
class PASCAL(Preprocessor):
def __init__(self):
super().__init__()
self.dataset_dir = {"normal": ["./data/PASCAL/Atraining_normal/", "./data/PASCAL/Training B Normal/"],
"murmur": ["./data/PASCAL/Atraining_murmur/", "./data/PASCAL/Btraining_murmur/"],
"extra-heart-sounds": ["./data/PASCAL/Atraining_extrahls/", "./data/PASCAL/Btraining_extrastole/"],
"artifact": ["./data/PASCAL/Atraining_artifact/"]}
self.lbls = {"normal": 0, "murmur": 1,
"extra-heart-sounds": 2, "artifact": 3}
self.data = []
self.data_lbls = []
def traverseDataset(self, location):
for label in tqdm(self.dataset_dir):
data_lbl = self.lbls[label]
for dir in self.dataset_dir[label]:
files = self.getFiles(dir)
for file in files:
raw_signal = self.getAudioSignal(f"{dir}{file}", 500)
segmented_signal = self.signalPreprocess(
raw_signal, length=5, sampleRate=500, includeLast=False)
for segment in segmented_signal:
self.data.append(segment.flatten()[:2500])
self.data_lbls.append(data_lbl)
self.data = torch.tensor(self.data).float()
self.data_lbls = torch.tensor(self.data_lbls).long()
print(self.data.shape)
print(self.data_lbls.shape)
torch.save({'data': self.data, 'labels': self.data_lbls}, location)
def signalPreprocess(self, data, **kargs):
segmented_signal = self.timeSegmentation(
data, length=kargs["length"], sampleRate=kargs["sampleRate"], includeLast=kargs["includeLast"])
return segmented_signal
dataset = PASCAL()
dataset.traverseDataset("./data/preprocessed/PASCAL.pt")
|
kendreaditya/heart-auscultation
|
src/preprocess/PASCAL-dataset.py
|
PASCAL-dataset.py
|
py
| 2,173 |
python
|
en
|
code
| 2 |
github-code
|
6
|
75189172348
|
import threading, time
#REGISTROS ESCALARES
Scalar_Reg = [ "FUC1K", "", "11110000", "00001111", "0000000A" ]
#REGISTROS VECTORIALES
Vector_Reg = [ ["0", "", "", "","", "", "", ""], ["1", "", "", "","", "", "", ""], ["2", "", "", "","", "", "", ""], ["3", "", "", "","", "", "", ""], ["4", "", "", "","", "", "", ""] ]
##Banco de registros
class REG_BANK(threading.Thread):
def __init__(self, CLK, DIR_W, DIR_A, DIR_B,Control,MemOrRegW):
self.MyCLK = CLK
self.DIR_A = DIR_A
self.DIR_B = DIR_B
self.DIR_W = DIR_W
self.MyControl = Control
self.MemOrRegW = MemOrRegW
self.RegA = self.getReg(DIR_A)
self.RegB = self.getReg(DIR_B)
self.RegW = self.getReg(DIR_W)
threading.Thread.__init__(self,target = self.listening, args = ())
def listening(self):
while True:
if(self.MyCLK.running):
self.MemOrRegW = self.MyControl.MemOrRegW
#print("loop en bank dirA" + str(self.DIR_A) + " dirB "+str(self.DIR_B))
#self.RegW = self.getReg(self.DIR_W)
self.RegA = self.getReg(self.DIR_A)
if(self.MemOrRegW == 1):
self.RegW = self.getReg(self.DIR_W)
if(self.MyControl.Imm != 1):
self.RegB = self.getReg(self.DIR_B)
else:
self.RegB = self.DIR_B
"""if(Reg_W and Imm):
self.RegA = self.getReg(int(DIR_W,16))
self.RegB = int(DIR_B,16)
self.DIR_W = self.DIR_A
#esto lo debe hacer la ALU
#self.DIR_W = self.RegW + self.DIR_B
if(Reg_D):
self.DIR_W = int(DIR_W,16)
self.RegA = self.getReg(int(DIR_A,16))
self.RegB = self.getReg(int(DIR_B,16))"""
def getReg(self,Reg):
print("este es Reg en Bank "+str(Reg))
Reg = int(Reg,2)
if(Reg<=10):
return Scalar_Reg[Reg]
if(Reg>10):
return Vector_Reg[Reg]
def setReg(self,Reg,DI):
Reg = int(Reg,2);
if(Reg<=10):
Scalar_Reg[Reg] = DI
if(Reg>10):
Vector_Reg[Reg] = DI
|
ger534/Proyecto2Arqui2
|
procesador/REG_BANK.py
|
REG_BANK.py
|
py
| 2,353 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32469600718
|
class Solution:
def findJudge(self, n: int, trust: List[List[int]]) -> int:
trust_counts = defaultdict(lambda: [0,0])
for truster, trusted in trust:
trust_counts[truster][0] += 1
trust_counts[trusted][1] += 1
candidates = [person for person in range(1,n+1) if trust_counts[person] == [0,n-1]]
return candidates[0] if len(candidates) == 1 else -1
|
MdAbedin/leetcode
|
0901 - 1000/0997 Find the Town Judge.py
|
0997 Find the Town Judge.py
|
py
| 437 |
python
|
en
|
code
| 7 |
github-code
|
6
|
26948867994
|
# Python for Everyone
# Chapter 3 exercise 2
try:
hrs = float(input('Enter Hours: '))
rate = float(input('Enter Rate: '))
except:
print("Please enter a number")
quit()
if hrs <= 40:
# no overtime
pay = rate * hrs
else:
# calculate overtime
pay = (hrs - 40) * rate * 1.5 + 40 * rate
print("Pay:", pay)
|
dansdevelopments/py4e
|
py4e/chapter03/ex03_02.py
|
ex03_02.py
|
py
| 334 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40696691343
|
import asyncio
import re
from collections import namedtuple
from magma.magmad.check import subprocess_workflow
DEFAULT_NUM_PACKETS = 4
DEFAULT_TIMEOUT_SECS = 20
PingCommandParams = namedtuple(
'PingCommandParams',
['host_or_ip', 'num_packets', 'timeout_secs'],
)
PingInterfaceCommandParams = namedtuple(
'PingInterfaceCommandParams',
['host_or_ip', 'num_packets', 'interface', 'timeout_secs'],
)
PingCommandResult = namedtuple(
'PingCommandResult',
['error', 'host_or_ip', 'num_packets', 'stats'],
)
ParsedPingStats = namedtuple(
'ParsedPingStats', [
'packets_transmitted',
'packets_received',
'packet_loss_pct',
'rtt_min',
'rtt_avg',
'rtt_max',
'rtt_mdev',
],
)
# regexp's for parsing
dec_re = r'\d+(\.\d+)?'
packet_line_re = re.compile(
r'^(?P<packets_transmitted>\d+) packets transmitted, '
+ r'(?P<packets_received>\d+) received, '
+ r'(?P<packet_loss_pct>{d})% packet loss, '.format(d=dec_re)
+ r'time .+$',
)
rtt_line_re = re.compile(
r'^rtt min/avg/max/mdev = '
+ r'(?P<rtt_min>{d})/(?P<rtt_avg>{d})/'.format(d=dec_re)
+ r'(?P<rtt_max>{d})/(?P<rtt_mdev>{d}) ms$'.format(d=dec_re),
)
def ping(ping_params):
"""
Execute ping commands via subprocess. Blocks while waiting for output.
Args:
ping_params ([PingCommandParams]): params for the pings to execute
Returns:
[PingCommandResult]: stats from the executed ping commands
"""
return subprocess_workflow.exec_and_parse_subprocesses(
ping_params,
_get_ping_command_args_list,
parse_ping_output,
)
@asyncio.coroutine
def ping_async(ping_params, loop=None):
"""
Execute ping commands asynchronously.
Args:
ping_params ([PingCommandParams]): params for the pings to execute
loop: asyncio event loop (optional)
Returns:
[PingCommandResult]: stats from the executed ping commands
"""
return subprocess_workflow.exec_and_parse_subprocesses_async(
ping_params,
_get_ping_command_args_list,
parse_ping_output,
loop,
)
@asyncio.coroutine
def ping_interface_async(ping_params, loop=None):
"""
Execute ping commands asynchronously through specified interface.
Args:
ping_params ([PingCommandParams]): params for the pings to execute
loop: asyncio event loop (optional)
Returns:
[PingCommandResult]: stats from the executed ping commands
"""
return subprocess_workflow.exec_and_parse_subprocesses_async(
ping_params,
_get_ping_command_interface_args_list,
parse_ping_output,
loop,
)
def _get_ping_command_args_list(ping_param):
return [
'ping', ping_param.host_or_ip,
'-c', str(ping_param.num_packets or DEFAULT_NUM_PACKETS),
'-w', str(ping_param.timeout_secs or DEFAULT_TIMEOUT_SECS),
]
def _get_ping_command_interface_args_list(ping_param):
return [
'ping', ping_param.host_or_ip,
'-c', str(ping_param.num_packets or DEFAULT_NUM_PACKETS),
'-I', str(ping_param.interface),
'-w', str(ping_param.timeout_secs or DEFAULT_TIMEOUT_SECS),
]
def parse_ping_output(stdout, stderr, param):
"""
Parse stdout output from a ping command.
Raises:
ValueError: If any errors are encountered while parsing ping output.
"""
def create_error_result(error_msg):
return PingCommandResult(
error=error_msg,
host_or_ip=param.host_or_ip,
num_packets=param.num_packets or DEFAULT_NUM_PACKETS,
stats=None,
)
def find_statistic_line_idx(ping_lines):
line_re = re.compile('^--- .+ statistics ---$')
for i, line in enumerate(ping_lines):
if line_re.match(line):
return i
raise ValueError('Could not find statistics header in ping output')
def match_ping_line(line, line_re, line_name='ping'):
line_match = line_re.match(line)
if not line_match:
raise ValueError(
'Could not parse {name} line:\n{line}'.format(
name=line_name,
line=line,
),
)
return line_match
def str_to_num(s_in):
try:
return int(s_in)
except ValueError:
return float(s_in)
if stderr:
return create_error_result(stderr)
else:
try:
stdout_lines = stdout.decode('ascii').strip().split('\n')
stat_header_line_idx = find_statistic_line_idx(stdout_lines)
if len(stdout_lines) <= stat_header_line_idx + 2:
raise ValueError(
'Not enough output lines in ping output. '
'The ping may have timed out.',
)
packet_match = match_ping_line(
stdout_lines[stat_header_line_idx + 1],
packet_line_re,
line_name='packet',
)
rtt_match = match_ping_line(
stdout_lines[stat_header_line_idx + 2],
rtt_line_re,
line_name='rtt',
)
match_dict = {}
match_dict.update(packet_match.groupdict())
match_dict.update(rtt_match.groupdict())
match_dict = {k: str_to_num(v) for k, v in match_dict.items()}
return PingCommandResult(
error=None,
host_or_ip=param.host_or_ip,
num_packets=param.num_packets or DEFAULT_NUM_PACKETS,
stats=ParsedPingStats(**match_dict),
)
except ValueError as e:
return create_error_result(str(e.args[0]))
|
magma/magma
|
orc8r/gateway/python/magma/magmad/check/network_check/ping.py
|
ping.py
|
py
| 5,786 |
python
|
en
|
code
| 1,605 |
github-code
|
6
|
3440116021
|
class Node:
def __init__(self, val):
self.val = val
self.freq = 1
self.next = None
self.last = None
self.seq_next = None
class FreqStack:
def __init__(self):
self.freq_to_root = {}
self.val_to_node = {}
self.max_freq = 0
def push(self, val: int) -> None:
if val not in self.val_to_node:
node = self.val_to_node[val] = Node(val)
else:
old_node = self.val_to_node[val]
node = Node(val)
node.seq_next = old_node
self.val_to_node[val] = node
node.freq = old_node.freq + 1
freq = node.freq
self.max_freq = max(self.max_freq, freq)
if freq not in self.freq_to_root:
root = self.freq_to_root[freq] = Node(None)
root.next, root.last = root, root
else:
root = self.freq_to_root[freq]
root.last.next, root.last, node.last, node.next = node, node, root.last, root
def pop(self) -> int:
root = self.freq_to_root[self.max_freq]
node = root.last
node.next.last, node.last.next = node.last, node.next
res = node.val
old_node = node.seq_next
if old_node is None:
del self.val_to_node[node.val]
else:
self.val_to_node[node.val] = old_node
while root.last == root:
self.max_freq -= 1
if self.max_freq == 0:
break
root = self.freq_to_root[self.max_freq]
return res
# Your FreqStack object will be instantiated and called as such:
# obj = FreqStack()
# obj.push(val)
# param_2 = obj.pop()
|
cuiy0006/Algorithms
|
leetcode/895. Maximum Frequency Stack.py
|
895. Maximum Frequency Stack.py
|
py
| 1,718 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26857827694
|
import os
import numpy as np
import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, latent_dim, img_shape):
super().__init__()
self.img_shape = img_shape
self.label_embed = nn.Embedding(10, 10)
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*block(latent_dim+10, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, int(np.prod(img_shape))),
nn.Tanh(),
)
def forward(self, z, labels):
c = self.label_embed(labels)
z = torch.cat([z, c], dim=1)
img = self.model(z)
img = img.view(img.shape[0], *self.img_shape)
return img
class Discriminator(nn.Module):
def __init__(self, img_shape):
super().__init__()
self.label_embed = nn.Embedding(10, 10)
self.model = nn.Sequential(
nn.Linear(int(np.prod(img_shape))+10, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid(),
)
def forward(self, img, labels):
img_flat = img.view(img.size(0), -1)
c = self.label_embed(labels)
x = torch.cat([img_flat, c], dim=1)
validity = self.model(x)
return validity
|
zeroone-universe/GM4MNIST
|
models/cGAN/model.py
|
model.py
|
py
| 1,673 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30866831466
|
import uuid
class CustomerGenerator:
def __init__(self, client):
self.client = client
self.customers_api = client.customers
def create_customer(self, **kwargs):
print('create_customer', kwargs)
result = self.customers_api.create_customer(body=kwargs)
if result.is_success():
print(result.body)
return result.body.get('customer', {})
elif result.is_error():
print(result.errors)
return None
|
cbonoz/square21
|
customer_generator.py
|
customer_generator.py
|
py
| 502 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25867921082
|
#import libraries
import pandas as pd
import numpy as np
import bokeh
from bokeh.plotting import figure, output_file, show
from bokeh.models.tools import HoverTool
from bokeh.core.properties import value
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.plotting import figure
import math
from bokeh.models import Range1d, LabelSet, Label
class Flight_Arrivals():
def __init__(self):
pass
def flights(self):
avion=pd.read_csv("fly_mia.csv",encoding="latin-1")
a=avion
a['est_arr_time'] = a['est_arr_time'].str.replace('?', '')
a['est_arr_time']=a['est_arr_time'].str.replace(r"\(.*\)","")
a=a[a.est_arr_time.str.contains('0')]
sun1=a[a.est_arr_time.str.contains('Sun')]
sun1['est_arr_time'] = sun1['est_arr_time'].str.replace('Sun', '2019-08-18')
sun1['dep_time'] = sun1['dep_time'].str.replace('Sun', '2019-08-18')
sat1=a[a.est_arr_time.str.contains('Sat')]
sat1['est_arr_time'] = sat1['est_arr_time'].str.replace('Sat', '2019-08-17')
sat1['dep_time'] = sat1['dep_time'].str.replace('Sat', '2019-08-17')
fri1=a[a.est_arr_time.str.contains('Fri')]
fri1['est_arr_time'] =fri1['est_arr_time'].str.replace('Fri', '2019-08-16')
fri1['dep_time'] =fri1['dep_time'].str.replace('Fri', '2019-08-16')
ok2=pd.concat([sun1,sat1,fri1],axis=0)
ok2['dep_time'] =ok2['dep_time'].str.replace('Fri', '2019-08-16')
ok2['dep_time'] =ok2['dep_time'].str.replace('Sat', '2019-08-17')
ok2['dep_time']=pd.to_datetime(ok2['dep_time'])
ok2['est_arr_time']=pd.to_datetime(ok2['est_arr_time'])
ok2['flight_time']=ok2['est_arr_time']-ok2['dep_time']
ok2['flight_time']=ok2['flight_time'].dt.total_seconds()
ok2['flight_time']=ok2['flight_time']/60 #to minutes
#airport time zones (departure zones)
#1. cest
cest=ok2[ok2.origin.str.contains('MAD|ZRH|BRU|MXP|CDG|DUS|FCO|VIE|FRA|Pisa|BCN|ZAZ|WAW|ORY|AMS')]
cest['flight_time']=cest['flight_time']+360
cest['flight_time'] = cest['flight_time'].apply(lambda x: 561 if x < 400 else x)
#2.south american flights
sa=ok2[ok2.origin.str.contains("GIG|FOR|COR|EZE|Dois de|BSB|GRU|REC|MVD|BEL|SNU")]
sa['flight_time']=sa['flight_time']+60
sa['flight_time']=sa['flight_time'].apply(lambda x: 451.5 if x<350 else x)
otro=ok2[~ok2.origin.str.contains('MAD|ZRH|BRU|MXP|CDG|DUS|FCO|VIE|FRA|Pisa|BCN|ZAZ|WAW|ORY|AMS|GIG|FOR|COR|EZE|Dois de|BSB|GRU|REC|MVD|BEL|SNU')]
todos=pd.concat([cest,sa,otro],axis=0)
# percent of flights less one hour
bins=[0,60,120,180,240,300,360,420,480,540,600,660]
todos['flight_bins']=pd.cut(todos['flight_time'], bins)
pct_time=todos['flight_bins'].value_counts()
pct_time=pd.DataFrame(pct_time)
pct_time.reset_index(level=0,inplace=True)
pct_time['pct']=pct_time['flight_bins']/todos.shape[0]
#ii. variance by origin
vaR=todos.groupby('origin')['flight_time'].var()
vaR.sort_values()
#iii. arrives by part of the day
tiempo=todos[["origin","est_arr_time"]]
t=tiempo
t['hours']=t['est_arr_time'].dt.hour
t['minutes']=t['est_arr_time'].dt.minute
mid_six=t[(t.hours>=0) & (t.hours<=6)]
seven_twelve=t[(t.hours>=7) & (t.hours<=12)]
one_six=t[(t.hours>=13) & (t.hours<=18)]
seven_twelve1=t[(t.hours>=19) & (t.hours<=23)]
#percent arrivals by time of the day
mid_sixP=mid_six.shape[0]/t.shape[0]
seven_twelveP=seven_twelve.shape[0]/t.shape[0]
one_sixP=one_six.shape[0]/t.shape[0]
seven_twelveP1=seven_twelve1.shape[0]/t.shape[0]
#origin counts
ori=t['origin'].value_counts()
ori=pd.DataFrame(ori)
ori.reset_index(level=0,inplace=True)
ori.columns=['origin','total']
#time between flights
tX=todos
tX.sort_values(['origin','dep_time'],inplace=True)
tX['diff_dep']=tX['dep_time'].diff()
mask=tX.origin !=tX.origin.shift(1)
tX['diff_dep'][mask]=np.nan
tX['diff_dep']=tX['diff_dep'].dt.total_seconds()
tX['diff_dep']=tX['diff_dep']/60 #to minutes
tX.iloc[0:10]
tX=tX[~(tX.diff_dep==0)]
takeoffs=tX.groupby('origin')['diff_dep'].median()
takeoffs=takeoffs.sort_values()
takeoffs=pd.DataFrame(takeoffs)
take=takeoffs
take=take[take.diff_dep>=1]
take1=take[take.diff_dep<=80]
s=t
s=s.set_index('est_arr_time')
s=s.loc['2019-08-17 00:00:00':'2019-08-17 23:59:59']
#VIZ I
#east coast time vs. cst,pdt, and mdt (comparing flight times)
west_cent=tX[tX.origin.str.contains('LAX|SFO|LAS|SEA|SAN|SNU|DFW|MEX|MDW|MSY|CMW|MEM|ORD|TUL|MSP|MCI|STL|MID|IAH|VRA|PNS|GDL|MTY|KSAT|BHM|SCU|HOG|TLC|HSV')]
east=tX[tX.origin.str.contains('NAS|PHI|Toron|Bahama|DCA|HAV|ORF|TPA|LGA|JAX|SAV|SDF|PIE|GGT|PLS|CVG|PIT|CHS|CLE|JFK|CAP|IND|DTW|KEY|CMH|BUF|RDU|SFB|MYEH|MYAM|CYUL|GSP|PBI|RIC|GSO|FMY|BDL|BWI|KTEB|ZSA|KMLB|KAPF|SGJ')]
#length of flights
wc=west_cent['flight_bins'].value_counts()
wc=pd.DataFrame(wc)
wc.columns=['flight_time']
wc.reset_index(level=0,inplace=True)
wc=wc.sort_values(by="index")
wc=wc.set_index('index')
ea=east['flight_bins'].value_counts()
ea=pd.DataFrame(ea)
ea.columns=['flight_time']
ea.reset_index(level=0,inplace=True)
ea=ea.sort_values(by="index")
ea=ea.set_index('index')
factors=[("0-60"),("60-120"),("120-180"), ("180-240"),("240-300"),("300-360"),("360-420"),("420-480"),("480-540"),("540-600"),("600-660")]
regions=['east_time_zone','other_time_zone']
east_data=ea.flight_time.tolist()
west_data=wc.flight_time.tolist()
source=ColumnDataSource(data=dict(x=factors,east_time_zone=east_data,other_time_zone=west_data,))
p = figure(x_range=FactorRange(*factors), plot_height=250,toolbar_location=None, tools="")
p.vbar_stack(regions, x='x', width=0.9, alpha=0.5, color=["orange", "purple"], source=source,legend=[value(x) for x in regions])
p.y_range.start = 0
p.y_range.end = 120
p.x_range.range_padding = 0.1
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
p.xaxis.axis_label='Flight Time (Minutes)'
p.yaxis.axis_label='Frequency'
p.legend.location = "top_right"
p.legend.orientation = "horizontal"
output_file("mia1.html")
#show(p)
#VIZ II (time between departures)
source1=ColumnDataSource(take1)
airports=source1.data['origin'].tolist()
p1=figure(x_range=airports)
p1.vbar_stack(stackers=['diff_dep'],x='origin',source=source1,width=0.5)
p1.title.text='Time Between Flight Departures'
p1.title.align="center"
p1.title.text_color="orange"
p1.xaxis.major_label_orientation = math.pi/4.25
p1.xaxis.axis_label=''
p1.yaxis.axis_label='Minutes'
hover=HoverTool()
hover.tooltips=[("Time Between Flights","@diff_dep minutes")]
hover.mode='vline'
p1.add_tools(hover)
output_file("mia2.html")
#show(p1)
#VIZ III (what time of the day do flights arrive?)
time_arr=['Midnight to 7 AM','7 AM to 1 PM','1 PM to 7 PM','7 PM to Midnight']
counts=[mid_sixP,seven_twelveP1,one_sixP,seven_twelveP1]
palette=['lavender','plum','darkviolet','indigo']
source = ColumnDataSource(data=dict(time_arr=time_arr, counts=counts))
p = figure(x_range=time_arr, plot_height=250, toolbar_location=None, title="When Do Flights to X Arrive?")
p.vbar(x='time_arr', top='counts', width=0.5, source=source, color="teal",
line_color='white')
p.xgrid.grid_line_color = None
p.y_range.start = 0.0
p.y_range.end = 0.6
p.xaxis.axis_label=""
p.yaxis.major_label_overrides = {0:'0',0.1:'10%',0.2:'20%',0.3:'30%',0.4:'40%',0.5:'50%'}
p.yaxis.axis_label="Total Flights"
p.legend.orientation = "horizontal"
p.legend.location = "top_center"
p.title.align="center"
output_file("mia3.html")
#show(p)
#VIZ IV (outlier flights time plot)
top_diez=tX['origin'].value_counts()
top_diez=pd.DataFrame(top_diez)
top_diez.reset_index(level=0,inplace=True)
air_names=top_diez.iloc[0:10]["index"]
an=air_names
an0=an.iloc[0]
an1=an.iloc[1]
an2=an.iloc[2]
an3=an.iloc[3]
an4=an.iloc[4]
an5=an.iloc[5]
an6=an.iloc[6]
an7=an.iloc[7]
an8=an.iloc[8]
an9=an.iloc[9]
sub_air=tX[(tX.origin==an0) | (tX.origin==an1) | (tX.origin==an2) | (tX.origin==an3) | (tX.origin==an4) | (tX.origin==an5) | (tX.origin==an6) | (tX.origin==an7) | (tX.origin==an8) | (tX.origin==an9)]
df=pd.DataFrame(dict(flight_time=sub_air['flight_time'],group=sub_air['origin']))
originS=df['group'].unique().tolist()
groups=df.groupby('group')
q1=groups.quantile(q=0.25)
q2=groups.quantile(q=0.50)
q3=groups.quantile(q=0.75)
iqr=q3-q1
upper=q3+1.5*iqr
lower=q1-1.5*iqr
#find outliers in each group
def outliers(group):
originS=group.name
return group[(group.flight_time > upper.loc[originS]['flight_time']) | (group.flight_time < lower.loc[originS]['flight_time'])]['flight_time']
out=groups.apply(outliers).dropna()
#prepare outlier data for plotting
if not out.empty:
outx=[]
outy=[]
for keys in out.index:
outx.append(keys[0])
outy.append(out.loc[keys[0]].loc[keys[1]])
p = figure(tools="", background_fill_color="#efefef", x_range=originS, toolbar_location=None)
#if no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin=groups.quantile(q=0.00)
qmax=groups.quantile(q=1.00)
upper.score=[min([x,y]) for (x,y) in zip(list(qmax.loc[:,'flight_time']),upper.flight_time)]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.loc[:,'flight_time']),lower.flight_time)]
# stems
p.segment(originS, upper.flight_time, originS, q3.flight_time, line_color="black")
p.segment(originS, lower.flight_time, originS, q1.flight_time, line_color="black")
# boxes
p.vbar(originS, 0.7, q2.flight_time, q3.flight_time, fill_color="aqua", line_color="black")
p.vbar(originS, 0.7, q1.flight_time, q2.flight_time, fill_color="maroon", line_color="black")
# whiskers (almost-0 height rects simpler than segments)
p.rect(originS, lower.flight_time, 0.2, 0.01, line_color="black")
p.rect(originS,upper.flight_time, 0.2, 0.01, line_color="black")
# outliers
if not out.empty:
p.circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = "white"
p.grid.grid_line_width = 2
p.xaxis.major_label_text_font_size="12pt"
p.xaxis.major_label_orientation = 3.5/2
p.xaxis.axis_label = ''
p.yaxis.axis_label = 'Flight Time (minutes)'
p.title.text='Flights That Are Shorter or Longer Than Average'
p.title.align="center"
output_file('mia4x.html')
#show(p)
#VIZ V
dep=tX['diff_dep'].tolist()
time=tX['flight_time'].tolist()
airports=tX['origin'].tolist()
source=ColumnDataSource(data=dict(dep=dep,time=time,airports=airports))
p=figure(title="Flight Time Vs. Time Between Departures",x_range=Range1d(0,1000))
p.scatter(x="dep",y="time",size=4,source=source)
p.xaxis[0].axis_label="Time Between Flights (Minutes)"
p.yaxis[0].axis_label="Flight Time (Minutes)"
labels = LabelSet(x='dep', y='time', text='airports', level='glyph',x_offset=5, y_offset=5, source=source, render_mode='canvas')
p.add_layout(labels)
show(p)
if __name__=='__main__':
flights=Flight_Arrivals()
flights.flights()
|
Fremont28/miami_flights-
|
flights_viz1.py
|
flights_viz1.py
|
py
| 12,432 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39690983841
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="visbeat",
version="0.0.9",
author="Abe Davis",
author_email="[email protected]",
description="Code for 'Visual Rhythm and Beat' SIGGRAPH 2018",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/abedavis/visbeat",
project_urls={
'Abe Davis': 'http://www.abedavis.com/',
'Visual Rhythm and Beat': 'http://www.abedavis.com/visualbeat/',
'Source': 'https://github.com/abedavis/visbeat',
'Demo': 'http://www.abedavis.com/visualbeat/demo/',
},
install_requires=[
'numpy',
'scipy',
'bs4',
'librosa',
'imageio',
'requests',
'moviepy',
'termcolor',
'youtube-dl',
'matplotlib',
],
scripts=['bin/dancefer'],
packages=setuptools.find_packages(exclude=['contrib', 'docs', 'tests*']),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 2.7",
"Operating System :: OS Independent",
],
)
|
abedavis/visbeat
|
setup.py
|
setup.py
|
py
| 1,170 |
python
|
en
|
code
| 220 |
github-code
|
6
|
73674519546
|
'''This script contains the functions used to contruct and train the GAN.'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import tensorflow as tf
# Change the environment variable TF_CPP_MIN_LOG_LEVEL to 2 to avoid the orderbooks about the compilation of the CUDA code
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from data_utils import *
import argparse
import logging
# from tensorflow.keras.utils import plot_model
from model_utils import *
import math
import gc
from scipy.stats import wasserstein_distance
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='''Main script used to train the GAN.''')
parser.add_argument("-l", "--log", default="info",
help=("Provide logging level. Example --log debug', default='info'"))
parser.add_argument('-N', '--N_days', type=int, help='Number of the day to consider')
parser.add_argument('-d', '--depth', help='Depth of the orderbook', type=int)
parser.add_argument('-bs', '--batch_size', help='Batch size', type=int)
parser.add_argument('-ld', '--latent_dim', help='Latent dimension', type=int)
parser.add_argument('-nlg', '--n_layers_gen', help='Number of generator layers', type=int)
parser.add_argument('-nld', '--n_layers_disc', help='Number of discriminator layers', type=int)
parser.add_argument('-tg', '--type_gen', help='Type of generator model (conv, lstm, dense)', type=str)
parser.add_argument('-td', '--type_disc', help='Type of discriminator model (conv, lstm, dense)', type=str)
parser.add_argument('-sc', '--skip_connection', action='store_true', help='Use or not skip connections')
parser.add_argument('-Tc', '--T_condition', help='Number of time steps to condition on', type=int, default=2)
parser.add_argument('-Tg', '--T_gen', help='Number of time steps to generate', type=int, default=1)
parser.add_argument('-ls', '--loss', help='Loss function (original, wasserstein)', type=str, default='original')
parser.add_argument('-lo', '--load', help='Load a model. The job_id must be provided', type=int, default=0)
args = parser.parse_args()
levels = {'critical': logging.CRITICAL,
'error': logging.ERROR,
'warning': logging.WARNING,
'info': logging.INFO,
'debug': logging.DEBUG}
if os.getenv("PBS_JOBID") != None:
job_id = os.getenv("PBS_JOBID")
else:
job_id = os.getpid()
logging.basicConfig(filename=f'train_{job_id}.log', format='%(message)s', level=levels[args.log])
logger = tf.get_logger()
logger.setLevel('ERROR')
# Set the seed for TensorFlow to the number of the beast
tf.random.set_seed(666)
# Print the current date and time
current_datetime = pd.Timestamp.now()
formatted_datetime = current_datetime.strftime("%Y-%m-%d %H:%M:%S")
logging.info(f"Current Date and Time:\n\t {formatted_datetime}")
# Enable device placement logging
tf.debugging.set_log_device_placement(True)
# Load the data
stock = 'MSFT'
date = '2018-04-01_2018-04-30_5'
total_depth = 5
N = args.N_days
depth = args.depth
logging.info(f'Stock:\n\t{stock}')
logging.info(f'Number of days:\n\t{N}')
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) == 0:
logging.info("No GPUs available.")
else:
logging.info("Available GPUs:")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
for device in physical_devices:
logging.info(f'\t{device}\n')
# Folders creation
os.mkdir(f'plots/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}') # Model architecture plots, metrics plots
os.mkdir(f'generated_samples/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}') # Generated samples
os.mkdir(f'models/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}') # Models
# Create the orderbook dataframe
orderbook_df = create_orderbook_dataframe(N, previos_days=False)
# Define the parameters of the GAN. Some of them are set via argparse
T_condition = args.T_condition
T_gen = args.T_gen
window_size = T_condition + T_gen
n_features_input = orderbook_df.shape[1]
n_features_gen = 2*depth
latent_dim = args.latent_dim
n_epochs = 5000
batch_size = args.batch_size
# Define the parameters for the early stopping criterion
best_gen_weights = None
best_disc_weights = None
best_wass_dist = float('inf')
patience_counter = 0
patience = 200
num_pieces = 5
if not os.path.exists(f'../data/input_train_{stock}_{window_size}_day{N}_orderbook.npy'):
logging.info('\n[Input] ---------- PREPROCESSING ----------')
data_input = orderbook_df.values
# data_input = np.load(f'anomaly_data_{N}.npy')
# logging.info(f'\nAre anomaly_data and normal_data the same?\n\t{np.all(data_input == data_input_a)}')
# exit()
# Divide input data into overlapping pieces
sub_data, length = divide_into_overlapping_pieces(data_input, window_size, num_pieces)
if sub_data[-1].shape[0] < window_size:
raise ValueError(f'The last piece has shape {sub_data[-1].shape} and it is smaller than the window size {window_size}.')
logging.info(f'Number of windows: {length}')
# Create a memmap to store the scaled data.
final_shape = (length-num_pieces*(window_size-1), window_size, n_features_input)
fp = np.memmap("final_data.dat", dtype='float32', mode='w+', shape=final_shape)
start_idx = 0
logging.info(f'\nStart scaling the data...')
for piece_idx, data in enumerate(sub_data):
logging.info(f'\t{piece_idx+1}/{num_pieces}')
windows = np.array(divide_into_windows(data, window_size))
logging.info(f'\twindows shape: {windows.shape}')
end_idx = start_idx + windows.shape[0]
fp[start_idx:end_idx] = windows
start_idx = end_idx
del windows # Explicit deletion
logging.info('Done.')
np.save(f'normal_data_{N}.npy', fp)
logging.info('\nDividing each window into condition and input...')
condition_train, input_train = fp[:, :T_condition, :], fp[:, T_condition:, :n_features_gen]
logging.info('Done.')
logging.info(f'input_train shape:\n\t{input_train.shape}')
logging.info(f'condition_train shape:\n\t{condition_train.shape}')
logging.info('\nSave the files...')
np.save(f'../data/condition_train_{stock}_{window_size}_day{N}_orderbook.npy', condition_train)
np.save(f'../data/input_train_{stock}_{window_size}_day{N}_orderbook.npy', input_train)
logging.info('Done.')
logging.info('\n[Input] ---------- DONE ----------')
else:
logging.info('Loading input_train, input_validation and input_test sets...')
input_train = np.load(f'../data/input_train_{stock}_{window_size}_{N}days_orderbook.npy', mmap_mode='r')
condition_train = np.load(f'../data/condition_train_{stock}_{window_size}_{N}days_orderbook.npy', mmap_mode='r')
logging.info(f'input_train shape:\n\t{input_train.shape}')
logging.info(f'condition_train shape:\n\t{condition_train.shape}')
logging.info(f"\nHYPERPARAMETERS:\n"
f"\tstock: {stock}\n"
f"\tdepth: {depth}\n"
f"\tgenerator: {args.type_gen}\n"
f"\tdiscriminator: {args.type_disc}\n"
f"\tn_layers_gen: {args.n_layers_gen}\n"
f"\tn_layers_disc: {args.n_layers_disc}\n"
f"\tskip_connection: {args.skip_connection}\n"
f"\tlatent_dim per time: {latent_dim}\n"
f"\tn_features_input: {n_features_input}\n"
f"\tn_features_gen: {n_features_gen}\n"
f"\tfeatures: {orderbook_df.columns}\n"
f"\tn_epochs: {n_epochs}\n"
f"\tT_condition: {T_condition}\n"
f"\tT_gen: {T_gen}\n"
f"\tbatch_size: {batch_size} (num_batches: {input_train.shape[0]//batch_size})\n"
f"\tloss: {args.loss}\n"
f"\tpatience: {patience}\n"
f"\tjob_id: {job_id}\n"
f"\tLoaded model: {None if args.load==0 else args.load}\n")
# Define the optimizers
generator_optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
optimizer = [generator_optimizer, discriminator_optimizer]
if args.load == 0:
# Build the models
generator_model = build_generator(args.n_layers_gen, args.type_gen, args.skip_connection, T_gen, T_condition, n_features_input, n_features_gen, latent_dim, True)
discriminator_model = build_discriminator(args.n_layers_disc, args.type_disc, args.skip_connection, T_gen, T_condition, n_features_input, n_features_gen, True, args.loss)
feature_extractor = build_feature_extractor(discriminator_model, [i for i in range(1, args.n_layers_disc)])
else:
prev_job_id = args.load
# Load the models
generator_model = tf.keras.models.load_model(f'models/{prev_job_id}.pbs01_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/generator_model.h5')
discriminator_model = tf.keras.models.load_model(f'models/{prev_job_id}.pbs01_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/discriminator_model.h5')
feature_extractor = build_feature_extractor(discriminator_model, [i for i in range(1, args.n_layers_disc)])
logging.info('\n[Model] ---------- MODEL SUMMARIES ----------')
generator_model.summary(print_fn=logging.info)
logging.info('\n')
discriminator_model.summary(print_fn=logging.info)
logging.info('[Model] ---------- DONE ----------\n')
# Define a dictionary to store the metrics
metrics = {'discriminator_loss': [], 'gen_loss': [], 'real_disc_out': [], 'fake_disc_out': []}
# Train the GAN.
logging.info('\n[Training] ---------- START TRAINING ----------')
dataset_train = tf.data.Dataset.from_tensor_slices((condition_train, input_train)).batch(batch_size)
num_batches = len(dataset_train)
logging.info(f'Number of batches:\n\t{num_batches}\n')
# Initialize a list to store the mean over all the features of the wasserstein distances at each epoch
wass_to_plot = []
for epoch in range(n_epochs):
j = 0
W_batch = [] # W_batch will have num_batches elements
noises = [[] for _ in range(num_batches)] # noises will have num_batches elements. Each elements is a list containing the noises used for each batch in that epoch
for batch_condition, batch_real_samples in dataset_train:
j += 1
batch_size = batch_real_samples.shape[0]
generator_model, discriminator_model, generated_samples, noise = train_step(batch_real_samples, batch_condition, generator_model, discriminator_model, feature_extractor, optimizer, args.loss, T_gen, T_condition, latent_dim, batch_size, num_batches, j, job_id, epoch, metrics, args)
# Append the noise
noises[j-1] = noise
W_features = [] # W_features will have n_features_gen elements
for feature in range(n_features_gen): # Iteration over the features
W_samples = [] # W_samples will have batch_size elements
for i in range(generated_samples.shape[0]): # Iteration over the samples
w = wasserstein_distance(batch_real_samples[i, :, feature], generated_samples[i, :, feature])
W_samples.append(w)
W_features.append(np.mean(np.array(W_samples))) # averaged over the samples in a batch
W_batch.append(np.mean(np.array(W_features))) # averaged over the features
overall_W_mean = np.mean(np.array(W_batch)) # averaged over the batches
wass_to_plot.append(overall_W_mean)
logging.info(f'Wasserstein distance: {overall_W_mean}')
if epoch % 150 == 0:
logging.info('Creating a time series with the generated samples...')
features = orderbook_df.columns[:n_features_gen]
plot_samples(dataset_train, generator_model, noises, features, T_gen, n_features_gen, job_id, epoch, args)
logging.info('Saving the models...')
generator_model.save(f'models/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/generator_model.h5')
discriminator_model.save(f'models/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/discriminator_model.h5')
logging.info('Done')
logging.info('Check Early Stopping Criteria...')
if epoch > 2500:
if overall_W_mean + 5e-4 < best_wass_dist:
logging.info(f'Wasserstein distance improved from {best_wass_dist} to {overall_W_mean}')
best_wass_dist = overall_W_mean
best_gen_weights = generator_model.get_weights()
best_disc_weights = discriminator_model.get_weights()
patience_counter = 0
np.save(f'generated_samples/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/noise_{epoch}.npy', noises)
else:
logging.info(f'Wasserstein distance did not improve from {best_wass_dist}')
patience_counter += 1
if patience_counter >= patience:
best_epoch = epoch - patience
logging.info(f"Early stopping on epoch {epoch}. Restoring best weights of epoch {best_epoch}...")
generator_model.set_weights(best_gen_weights) # restore best weights
discriminator_model.set_weights(best_disc_weights)
logging.info('Saving the models...')
generator_model.save(f'models/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/generator_model.h5')
discriminator_model.save(f'models/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/discriminator_model.h5')
logging.info('Done')
else:
logging.info(f'Early stopping criterion not met. Patience counter:\n\t{patience_counter}')
# Plot the wasserstein distance
plt.figure(figsize=(10, 6))
plt.plot(wass_to_plot)
plt.xlabel('Epoch')
plt.ylabel('Wasserstein distance')
plt.title(f'Mean over the features of the Wasserstein distances')
# add a vertical line at the best epoch
plt.axvline(x=epoch-patience_counter, color='r', linestyle='--', alpha=0.8, label=f'Best epoch: {epoch-patience_counter}')
plt.legend()
plt.savefig(f'plots/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/0_wasserstein_distance.png')
plt.close()
if patience_counter >= patience:
break
logging.info('[Training] ---------- DONE ----------\n')
logging.info('Plotting the first 2 principal components of the generated and real samples...')
# Plot the first 2 principal components of the generated and real samples
# Load the best generator
generator_model = tf.keras.models.load_model(f'models/{job_id}_{args.type_gen}_{args.type_disc}_{args.n_layers_gen}_{args.n_layers_disc}_{args.T_condition}_{args.loss}/generator_model.h5')
generated_samples = []
real_samples = []
k = 0
for batch_condition, batch in dataset_train:
gen_sample = generator_model([noises[k], batch_condition])
for i in range(gen_sample.shape[0]):
# All the appended samples will be of shape (T_gen, n_features_gen)
generated_samples.append(gen_sample[i, -1, :])
real_samples.append(batch[i, -1, :])
k += 1
plot_pca_with_marginals(generated_samples, real_samples, job_id, args)
logging.info('Done.')
logging.info('Computing the errors on the correlation matrix using bootstrap...')
# At the end of the training, compute the errors on the correlation matrix using bootstrap.
# In order to do so, I need the best generator and the noises used.
correlation_matrix(dataset_train, generator_model, noises, T_gen, n_features_gen, job_id)
logging.info('Done.')
# Maybe it is not necessary, but I prefer to clear all the memory and exit the script
gc.collect()
tf.keras.backend.clear_session()
sys.exit()
|
DanieleMDiNosse/GAN_Anomaly_Detection
|
train.py
|
train.py
|
py
| 17,245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33138124964
|
#mass import
import pandas as pd
import urllib.request
import json
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from sklearn.cluster import KMeans
pd.options.display.max_rows = 999
#for getting the date 5 days ago
daydif = str(datetime.today() - timedelta(days=5))
dayref = str(daydif[0:10])
today = str(datetime.today().strftime('%Y-%m-%d'))
#for retrieving the data from the APIs
link = ["https://environment.data.gov.uk/flood-monitoring/id/stations/46160/readings?since=",dayref, "&_sorted¶meter=rainfall"]
final = ""
final = final.join(link)
#print(final)
web = urllib.request.Request(final)
response = urllib.request.urlopen(web)
the_page = response.read()
jason = json.loads(the_page)
link1 = ["http://environment.data.gov.uk/flood-monitoring/id/measures/46126-level-stage-i-15_min-m/readings?since=",dayref]
final1 = ""
final1 = final1.join(link1)
#print(final1)
web1 = urllib.request.Request(final1)
response1 = urllib.request.urlopen(web1)
the_page1 = response1.read()
jason1 = json.loads(the_page1)
#creates dataframes for each API
df1 = pd.DataFrame(jason1["items"])
df1 = df1.sort_values('dateTime', ascending = True)
df = pd.DataFrame(jason["items"])
df = df.sort_values('dateTime', ascending = True)
#merged table containing level and fall, plots a graph
a = pd.merge(df, df1, on = 'dateTime', how = 'left')
b = a[['dateTime', 'value_x', 'value_y']].copy()
b = b.rename(columns = {"dateTime" : "Date/Time", "value_x" : "Rainfall", "value_y" : "River Level"})
#Calculates hourly results
c = b[['Rainfall', 'River Level']]
d = c['River Level'].groupby(c.index//4).mean()
d = d.diff()
e = c.groupby(c.index//4)['Rainfall'].sum()
hourly = pd.concat([d, e], axis = 1)
drip = hourly['Rainfall'].max()
drip = int(drip * 10)
calc = []
tester = pd.DataFrame()
for i in range (0, drip+1):
x = i/10
s = hourly.Rainfall.eq(x)
out = pd.DataFrame()
out['River'] = hourly.loc[s.shift(1, axis = 0) | s.shift(2, axis = 0), 'River Level']
runner = len(out)
out['Rain'] = x
tester = pd.concat([tester, out])
tester = tester.dropna()
#Machine learning : Kmeans clustering
reg = KMeans(n_clusters = 2, random_state = 0).fit(tester[['River']])
#Producing a graph of the regression
plt.scatter(tester['Rain'], tester['River'], color = 'Blue', marker = '+')
plt.plot(tester['Rain'], reg.predict(tester[['Rain']]), color = 'Red')
plt.show()
#Producing an example prediction
pred = pd.DataFrame()
#print(pred)
end = len(b.index)
for i in range(1, end):
prod = pd.DataFrame()
#print(b)
for row in b.itertuples():
temp = pd.DataFrame()
temp['RDelta'] = reg.predict([[row.Rainfall]])
temp['Rainfall'] = row.Rainfall
pred = pd.concat([pred, temp])
endriver = pd.DataFrame()
riverstart = b['River Level'].iloc[0]
for row in pred.itertuples():
rtemp = pd.DataFrame({'RPred': [riverstart]})
endriver = pd.concat([endriver, rtemp], ignore_index = True)
riverstart = riverstart + row.RDelta
#Producing a dataframe with the prediction and all other data together
fin = pd.merge(b, endriver, left_index = True, right_index = True)
print(fin)
#Plotting a graph of expected riverlevel and the actual river level
plt.plot(fin['Date/Time'],fin['River Level'],label = 'River Level')
plt.plot(fin['Date/Time'],fin['Rainfall'], label = 'Rainfall')
plt.plot(fin['Date/Time'],fin['RPred'], label = 'Predicted')
plt.locator_params(axis = 'Date/Time', nbins = 10)
plt.xticks(rotation = 'vertical')
ax = plt.gca()
ax.set_xticks(ax.get_xticks()[::48])
plt.show()
|
nicr0ss/RainDance
|
KMeans_model.py
|
KMeans_model.py
|
py
| 3,524 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8870924854
|
from collections import deque
import sys
while True:
try:
a=str(input('Input final configuration: '))
N=int(a.replace(' ',''))
if ((N>87654321)|(N<12345678)):
raise ValueError('Incorrect configuration, giving up...')
break
except ValueError:
print('Incorrect configuration, giving up...')
sys.exit()
final=[int(i) for i in str(N)]
def initial_state(N):
initial_list = []
for i in range(1, N + 1):
locals()['d' + str(i)] = i
name_list = [('d' + '%d' % i) for i in range(1, N + 1)]
for i in range(N):
initial_list.append(vars()[name_list[i]])
return initial_list
def row_exchange(L):
r_L = list(L)
d8, d7, d6, d5, d4, d3, d2, d1 = r_L[0], r_L[1], r_L[2], r_L[3], r_L[4], r_L[5], r_L[6], r_L[7]
r_L[0], r_L[1], r_L[2], r_L[3], r_L[4], r_L[5], r_L[6], r_L[7] = d1, d2, d3, d4, d5, d6, d7, d8
return r_L
def right_circular_shift(L):
r_L = list(L)
d1, d2, d3, d4, d5, d6, d7, d8 = r_L[0], r_L[1], r_L[2], r_L[3], r_L[4], r_L[5], r_L[6], r_L[7]
r_L[0], r_L[1], r_L[2], r_L[3], r_L[4], r_L[5], r_L[6], r_L[7] = d4, d1, d2, d3, d6, d7, d8, d5
return r_L
def middle_clockwise_rotation(L):
r_L = list(L)
d1, d2, d3, d4, d5, d6, d7, d8 = r_L[0], r_L[1], r_L[2], r_L[3], r_L[4], r_L[5], r_L[6], r_L[7]
r_L[0], r_L[1], r_L[2], r_L[3], r_L[4], r_L[5], r_L[6], r_L[7] = d1, d7, d2, d4, d5, d3, d6, d8
return r_L
def Rubik_Rectangle(initial_list, final_conf_list):
steps_need = 0
created_list = list(initial_list)
check_list = []
created_total = []
created_all=deque()
created_all.append([1, 2, 3, 4, 5, 6, 7, 8])
temp_created_all=[]
Flag = False
temp_created_all=[]
temp_created_all.append([1, 2, 3, 4, 5, 6, 7, 8])
don_t_know=set()
don_t_know.add((1, 2, 3, 4, 5, 6, 7, 8))
def List_compare(list_1, list_2, i):
if list_1 == list_2:
return True
else:
return False
for i in range(23):
#print('I',i)
#print('LENTH OF ALL',len(created_all))
steps_need = i
if initial_list == final_conf_list:
return steps_need
else:
for element in range(0, len(created_all)):
created_list = created_all[element]
if tuple(row_exchange(created_list)) not in don_t_know:
created_total.append(row_exchange(created_list))
check_list = row_exchange(created_list)
Flag = List_compare(check_list, final_conf_list, steps_need)
if Flag == True:
#print(check_list)
return steps_need + 1
for element in range(0, len(created_all)):
created_list = created_all[element]
if tuple(right_circular_shift(created_list)) not in don_t_know:
created_total.append(right_circular_shift(created_list))
check_list = right_circular_shift(created_list)
Flag = List_compare(check_list, final_conf_list, steps_need)
if Flag == True:
#print(check_list)
return steps_need + 1
for element in range(0, len(created_all)):
created_list = created_all[element]
if tuple(middle_clockwise_rotation(created_list)) not in don_t_know:
created_total.append(middle_clockwise_rotation(created_list))
check_list = middle_clockwise_rotation(created_list)
Flag = List_compare(check_list, final_conf_list, steps_need)
if Flag == True:
#print(check_list)
return steps_need + 1
#print('------------------------------------------')
#bug
bug_L=[1,3,7,14,26,51,92,159,274,453,720,1115,1727,2603,3701,4729,5620,6240,5840,4492,2120,328,5,0]
created_all1=deque(maxlen=bug_L[steps_need+1])
for i in range(len(created_total)):
if tuple(created_total[i]) not in don_t_know:
created_all1.append(created_total[i])
don_t_know.add(tuple(created_total[i]))
temp_created_all=list(created_all1)
created_all=list(created_all1)
for num in range(len(created_all)):
check_list = created_all[num]
Flag = List_compare(check_list, final_conf_list, steps_need)
if Flag == True:
#print(check_list)
return steps_need + 1
created_total = list()
initial_list=[1,2,3,4,5,6,7,8]
#final = [1,5,3,2,4,6,7,8]
#print('step',Rubik_Rectangle(initial_list, final))
nb_of_stairs=Rubik_Rectangle(initial_list, final)
stair_or_stairs = 'step is' if nb_of_stairs <= 1 else 'steps are'
print(f'{nb_of_stairs} {stair_or_stairs} needed to reach the final configuration.')
|
hanxuwu/Learning-Python
|
Principles of Programming/Assignment/Assignment1/ASS question2.files/rubiks_rectangle.py
|
rubiks_rectangle.py
|
py
| 5,218 |
python
|
en
|
code
| 3 |
github-code
|
6
|
74413558908
|
class Solution:
def reverse(self, x: int) -> int:
result = 0
x_abs = abs(x)
limit = [-2**31, 2**31 - 1]
while x_abs:
modulo = x_abs % 10
result = result * 10 + modulo
x_abs = x_abs // 10
if x < 0:
result = -result
if result < limit[0] or result > limit[1]:
return 0
return result
|
garimaarora1/LeetCode-2023
|
reverse-integer/reverse-integer.py
|
reverse-integer.py
|
py
| 436 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28381664206
|
import socket
''' Send given value to the given address. '''
def send_value(to_addr, value):
if not to_addr:
print("Remote Address Not Set!")
return
msg = "VALUE:%d" % value
print("Sending '%s' to %s:%d" % (msg, to_addr[0], to_addr[1]))
s = socket.socket()
try:
s.connect(to_addr)
s.send(bytes(msg+"\r\n\r\n", 'utf8'))
return_msg = str(s.recv(64), 'utf8')
if return_msg == "OK":
print(" Message Received!")
else:
print(" Error: '%s'" % return_msg)
except Exception as e:
print(" Problem connecting: %s" % str(e))
finally:
s.close()
''' Watch network for incoming value and send it to callback function. '''
def watch_for_value(my_addr, callback):
if not my_addr:
print("Local Address Not Set!")
return
print("Watching network at %s:%d" % my_addr)
s = socket.socket()
s.bind(my_addr)
s.listen(1)
value = -1
while True:
cl, remote_address = s.accept()
print("Client connected from %s:%d" % remote_address)
cl_file = cl.makefile('rwb', 0)
while True:
line = cl_file.readline()
if not line or line == b'\r\n':
break
if line[:6] == b'VALUE:':
value = int(line[6:])
break
cl.send("OK")
cl.close()
# print("Value = %d" % value)
if value >= 0:
callback(value)
|
jsayles/Thing1and2
|
src/utils.py
|
utils.py
|
py
| 1,489 |
python
|
en
|
code
| 1 |
github-code
|
6
|
13859758386
|
import bs4
import requests
from bs4 import BeautifulSoup
SUPPORTED_LANGUAGES = ("EN", "__test__")
def scrape_oxford_learners_dictionary(word: str) -> list[str]:
def url(i: int) -> str:
return (
f"https://www.oxfordlearnersdictionaries.com"
f"/definition/english/{word}_{i}"
)
# The website filters out requests without a proper User-Agent
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) "
"Gecko/20100101 Firefox/42.0",
}
list_of_definitions = []
for i in range(1, 5, 1):
response = requests.get(url(i), headers=headers)
if response.status_code != 200:
break
page = response.text
soup = BeautifulSoup(page, "html.parser")
find_pos = soup.find("span", class_="pos")
if isinstance(find_pos, bs4.Tag):
pos = find_pos.text
else:
pos = "-"
find_senses = soup.find("ol", class_="senses_multiple")
if isinstance(find_senses, bs4.Tag):
list_of_senses = find_senses.find_all(
"li", class_="sense", recursive=False
)
else:
find_senses = soup.find("ol", class_="sense_single")
if isinstance(find_senses, bs4.Tag):
list_of_senses = find_senses.find_all(
"li", class_="sense", recursive=False
)
else:
break
for sense in list_of_senses:
definition = sense.find("span", class_="def")
list_of_definitions.append(f"({pos}) " + definition.text)
return list_of_definitions
|
pavelkurach/vocab-builder
|
src/dict_scrapers.py
|
dict_scrapers.py
|
py
| 1,687 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29147674060
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
import bs4
# In[2]:
'http://books.toscrape.com/catalogue/page-2.html'
# In[3]:
'http://books.toscrape.com/catalogue/page-3.html'
# In[4]:
base_url = 'http://books.toscrape.com/catalogue/page-{}.html'
# In[5]:
base_url.format('20')
# In[9]:
page_num = 18
# In[7]:
'http://books.toscrape.com/catalogue/page-{page_num}.html'
# In[10]:
base_url.format(page_num)
# In[12]:
res = requests.get(base_url.format(1))
# In[13]:
soup = bs4.BeautifulSoup(res.text,'lxml')
# In[14]:
soup
# In[15]:
soup.select(".product_pod")
# In[17]:
len(soup.select(".product_pod"))
# In[18]:
products = soup.select(".product_pod")
# In[19]:
example = products[0]
# In[20]:
example
# In[21]:
'star-rating Thr' in str(example)
# In[22]:
example.select(".star-rating.Two")
# In[23]:
example.select('a')
# In[24]:
example.select('a')[1]
# In[25]:
example.select('a')[1]['title']
# In[28]:
two_star_titles = []
for n in range(1,51):
scrape_url = base_url.format(n)
res = requests.get(scrape_url)
soup = bs4.BeautifulSoup(res.text, 'lxml')
books = soup.select(".product_pod")
for book in books:
if len(book.select('.star-rating.Two')) != 0:
book_title = book.select('a')[1]['title']
two_star_titles.append(book_title)
# In[29]:
two_star_titles
# In[ ]:
|
maylinaung/python-learning
|
web_scrabing_book_example.py
|
web_scrabing_book_example.py
|
py
| 1,427 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15304075993
|
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
import pandas
#names=['Total no of questions','Average time to solve each questions','No of questions marked for review','No of questions right','Average no of clicks in each page']
dataset = pandas.read_csv("dataset.csv")
#dataset = dataset.apply(pandas.to_numeric,errors='ignore')
##cols.remove('Index')
##cols = dataset.columns
##for col in cols:
## try:
## dataset[col] = float(dataset[col])
## except:
## pass
array = dataset.values
array=array[1:]
X = array[:,0:5]
Y = array[:,5]
print(X)
print(Y)
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
print("X_train",X_train)
print("Y_train",Y_train)
print("X_validation",X_validation)
print("Y_validation",Y_validation)
seed = 7
scoring = 'accuracy'
# Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
knn = KNeighborsClassifier()
knn.fit(X_train, Y_train)
predictions = knn.predict(X_validation)
print("Actual Validators",Y_validation)
print("predictions",predictions)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
model = DecisionTreeClassifier()
model.fit(X_train, Y_train)
# make predictions
predictions = model.predict(X_validation)
#prediction = model.predict([[2.8,15,18,180]])
#print("prediction",prediction)
# summarize the fit of the model
print("predictions",predictions)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
|
tunir27/ICDCN-2019
|
Chennai_Floods_code/ML.py
|
ML.py
|
py
| 2,819 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21907313029
|
from heapq import *
import heapq
class SlidingWindowMedian:
def __init__(self):
self.maxHeap, self.minHeap = [], []
def find_sliding_window_median(self, nums, k):
result = [0.0 for x in range(len(nums) - k + 1)]
for i in range(0, len(nums)):
if not self.maxHeap or nums[i] <= -self.maxHeap[0]:
heappush(self.maxHeap, -nums[i])
else:
heappush(self.minHeap, nums[i])
self.rebalance_heaps()
if i - k + 1 >= 0:
if len(self.maxHeap) == len(self.minHeap):
result[i - k + 1] = -self.maxHeap[0] / 2.0 + self.minHeap[0] / 2.0
else:
result[i - k + 1] = -self.maxHeap[0] / 1.0
elementToBeRemoved = nums[i - k + 1]
if elementToBeRemoved <= -self.maxHeap[0]:
self.remove(self.maxHeap, -elementToBeRemoved)
else:
self.remove(self.minHeap, elementToBeRemoved)
self.rebalance_heaps()
return result
def remove(self, heap, element):
ind = heap.index(element)
heap[ind] = heap[-1]
del heap[-1]
if ind < len(heap):
heapq._siftup(heap, ind)
heapq._siftdown(heap, 0, ind)
def rebalance_heaps(self):
if len(self.maxHeap) > len(self.minHeap) + 1:
heappush(self.minHeap, -heappop(self.maxHeap))
elif len(self.maxHeap) < len(self.minHeap):
heappush(self.maxHeap, -heappop(self.minHeap))
def main():
slidingWindowMedian = SlidingWindowMedian()
result = slidingWindowMedian.find_sliding_window_median([1, 2, -1, 3, 5], 2)
print("Sliding window medians are: " + str(result))
main()
#time complexity: O(N*K) N: total number of elements, K: size of the sliding window
# inserting/removing numbers from heaps of size K: O(logK)
# removing element going out the sliding window will take O(K)
#space complexity: O(K): storing numbers in sliding window
|
justinyoonsk/GTCI
|
09_Two_Heaps/02_sliding_window_median.py
|
02_sliding_window_median.py
|
py
| 2,044 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3240628422
|
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from entrepreneur.models import Usuario
from authentication.serializers import UserListSerializers, UserSerializer
@api_view(['GET', 'POST'])
def user_api_view(request):
# lista los usuarios activos ingresados al sistemna
if request.method == 'GET':
users = Usuario.objects.filter(is_active=True).values('id', 'username', 'email', 'password', 'first_name')
user_serializer = UserListSerializers(users, many=True)
return Response(user_serializer.data, status=status.HTTP_200_OK)
elif request.method == 'POST':
"""
Habilita la creación de usuarios solicitando
"""
user_serializer = UserSerializer(data=request.data)
if user_serializer.is_valid():
user_serializer.save()
return Response({'message': 'Usuario creado correctamente!'}, status=status.HTTP_201_CREATED)
return Response(user_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def user_detail_api_view(request, pk=None):
user = Usuario.objects.filter(id=pk).first()
# Retrieve
if user:
if request.method == 'GET':
user_serializer = UserSerializer(user)
return Response(user_serializer.data, status=status.HTTP_200_OK)
# Update
elif request.method == 'PUT':
user_serializer = UserSerializer(user, data=request.data)
if user_serializer.is_valid():
user_serializer.save()
return Response(user_serializer.data, status=status.HTTP_200_OK)
return Response(user_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Delete
elif request.method == 'DELETE':
user.is_active = False
user.save()
return Response({'message': 'Usuario eliminado correctamente!'}, status=status.HTTP_201_CREATED)
return Response({'message': 'Sin datos para la consulta, favor corregir y reintentar!'},
status=status.HTTP_400_BAD_REQUEST)
|
DevApa/auth_em
|
register/api.py
|
api.py
|
py
| 2,157 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72531926909
|
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
import os
import shutil
from pathlib import Path
from typing import Callable
import pytest
import yaml
@pytest.fixture
def tmp_compose_spec(tests_data_dir: Path, tmp_path: Path):
src = tests_data_dir / "docker-compose-meta.yml"
dst = tmp_path / "docker-compose-meta.yml"
shutil.copyfile(src, dst)
return dst
def test_create_new_osparc_config(
run_program_with_args: Callable, tmp_compose_spec: Path
):
osparc_dir = tmp_compose_spec.parent / ".osparc"
assert not osparc_dir.exists()
result = run_program_with_args(
"config",
"--from-spec-file",
str(tmp_compose_spec),
)
assert result.exit_code == os.EX_OK, result.output
assert osparc_dir.exists()
meta_cfgs = set(osparc_dir.glob("./*/metadata.y*ml"))
runtime_cfgs = set(osparc_dir.glob("./*/runtime.y*ml"))
assert len(runtime_cfgs) == len(meta_cfgs)
assert {f.parent for f in meta_cfgs} == {f.parent for f in runtime_cfgs}
service_names = set(yaml.safe_load(tmp_compose_spec.read_text())["services"].keys())
assert service_names == set({f.parent.name for f in meta_cfgs})
|
ITISFoundation/osparc-simcore
|
packages/service-integration/tests/test_command_config.py
|
test_command_config.py
|
py
| 1,231 |
python
|
en
|
code
| 35 |
github-code
|
6
|
19581541837
|
import textract
import re
import os
import requests
from bs4 import BeautifulSoup
import time
import random
# ===================================== get paper url =====================================
urls = [
'https://sj.ctu.edu.vn/ql/docgia/nam-2015/loaichuyensan-2/xuatban-782.html',
'https://sj.ctu.edu.vn/ql/docgia/nam-2017/loaichuyensan-2/xuatban-1222/chuyensan-250.html',
'https://sj.ctu.edu.vn/ql/docgia/nam-2011/loaichuyensan-2/xuatban-182.html',
'https://sj.ctu.edu.vn/ql/docgia/nam-2013/loaichuyensan-2/xuatban-442/chuyensan-250.html',
'https://sj.ctu.edu.vn/ql/docgia/nam-2020/loaichuyensan-2/xuatban-2002.html'
'https://sj.ctu.edu.vn/ql/docgia/nam-2018/loaichuyensan-2/xuatban-1402.html',
'https://sj.ctu.edu.vn/ql/docgia/nam-2018/loaichuyensan-2/xuatban-1522.html'
]
paper_url = []
for url in urls:
page = requests.get(url)
data = BeautifulSoup(page.content, 'html.parser')
elements = data.select('.div-left.chitiet.grid_05')
for e in elements:
paper_url.append(str(e.parent.get("href")))
# ===================================== download pdf file =====================================
def download_file(writefile, url):
r = requests.get(url, allow_redirects=True)
with open(writefile, 'wb') as f:
f.write(r.content)
pdf_folder = '/content/drive/MyDrive/data/dhct'
raw_text_folder = '/content/drive/MyDrive/data/dhct_raw_txt'
clean_text_folder = '/content/drive/MyDrive/data/dhct_clean'
for i, url in enumerate(paper_url):
sleep_time = random.randint(1, 5)
print(f'file {i} -- sleep in {sleep_time}s')
file = os.path.join(pdf_folder, f'paper_{i}.pdf')
download_file(file, url)
time.sleep(sleep_time)
# ===================================== convert pdf to raw txt =====================================
def convert_pdf_to_raw_txt(pdf_file, txt_file):
text = textract.process(pdf_file, language='eng')
text = text.decode('utf-8')
with open(txt_file, 'w', encoding='utf-8') as f:
f.write(text)
# convert pdf to raw txt file. Raw mean there are still invalid characters
for file in os.listdir(pdf_folder):
file_name = file.split('.')[0] + '.txt'
pdf_file = os.path.join(pdf_folder, file)
txt_file = os.path.join(raw_text_folder, file_name)
convert_pdf_to_raw_txt(pdf_file, txt_file)
# ===================================== clean raw data =====================================
from util.shared import read_file, write_to_file
from .text_preprocessor import TextPreprocessor
processor = TextPreprocessor()
# clean raw text
for file in os.listdir(raw_text_folder):
text = read_file(os.path.join(raw_text_folder, file))
text = processor.remove_invalid_unicode(text)
text = re.sub('Tap chi Khoa hoc Trương Đai hoc Cân Thơ', 'Tạp chí Khoa học Trường Đại học Cần Thơ', text)
text = re.sub('Trương Đai hoc Cân Thơ', 'Trường Đại học Cần Thơ', text)
text = re.sub('Trương Đai hoc', 'Trường Đại học', text)
text = re.sub('Tap chı Khoa hoc Trươ ng Đai hoc Cân Thơ', 'Tạp chí Khoa học Trường Đại học Cần Thơ', text)
write_to_file(os.path.join(clean_text_folder, file), text)
print(file)
|
oldguard69/lvtn
|
server/core/archive/crawl_data_1.py
|
crawl_data_1.py
|
py
| 3,229 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2737490777
|
import wikipedia
import pyfiglet
word = pyfiglet.figlet_format("KAREN")
print(word)
while True:
engine=input ("Search: ")
def my_summary():
summ=wikipedia.summary(engine)
return summ
val = my_summary()
print(val)
|
Shuklabrother/Search-engine.py
|
Index.py
|
Index.py
|
py
| 272 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13015171086
|
import pickle
import torch
import argparse
from foresight.models import *
from foresight.pruners import *
from foresight.dataset import *
from foresight.weight_initializers import init_net
def get_num_classes(args):
return 100 if args.dataset == 'cifar100' else 10 if args.dataset == 'cifar10' else 120
def parse_arguments():
parser = argparse.ArgumentParser(description='Zero-cost Metrics for NAS-Bench-201')
parser.add_argument('--api_loc', default='data/NAS-Bench-201-v1_0-e61699.pth',
type=str, help='path to API')
parser.add_argument('--outdir', default='./',
type=str, help='output directory')
parser.add_argument('--init_w_type', type=str, default='none', help='weight initialization (before pruning) type [none, xavier, kaiming, zero]')
parser.add_argument('--init_b_type', type=str, default='none', help='bias initialization (before pruning) type [none, xavier, kaiming, zero]')
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset to use [cifar10, cifar100, ImageNet16-120]')
parser.add_argument('--gpu', type=int, default=0, help='GPU index to work on')
parser.add_argument('--num_data_workers', type=int, default=2, help='number of workers for dataloaders')
parser.add_argument('--dataload', type=str, default='random', help='random or grasp supported')
parser.add_argument('--dataload_info', type=int, default=1, help='number of batches to use for random dataload or number of samples per class for grasp dataload')
parser.add_argument('--seed', type=int, default=42, help='pytorch manual seed')
parser.add_argument('--write_freq', type=int, default=1, help='frequency of write to file')
parser.add_argument('--start', type=int, default=0, help='start index')
parser.add_argument('--end', type=int, default=0, help='end index')
parser.add_argument('--noacc', default=False, action='store_true', help='avoid loading NASBench2 api an instead load a pickle file with tuple (index, arch_str)')
args = parser.parse_args()
args.device = torch.device("cuda:"+str(args.gpu) if torch.cuda.is_available() else "cpu")
return args
if __name__ == '__main__':
args = parse_arguments()
if args.noacc:
api = pickle.load(open(args.api_loc,'rb'))
else:
from nas_201_api import NASBench201API as API
api = API(args.api_loc)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
train_loader, val_loader = get_cifar_dataloaders(args.batch_size, args.batch_size, args.dataset, args.num_data_workers)
cached_res = []
pre='cf' if 'cifar' in args.dataset else 'im'
pfn=f'nb2_{pre}{get_num_classes(args)}_seed{args.seed}_dl{args.dataload}_dlinfo{args.dataload_info}_initw{args.init_w_type}_initb{args.init_b_type}.p'
op = os.path.join(args.outdir,pfn)
args.end = len(api) if args.end == 0 else args.end
#loop over nasbench2 archs
for i, arch_str in enumerate(api):
if i < args.start:
continue
if i >= args.end:
break
res = {'i':i, 'arch':arch_str}
net = nasbench2.get_model_from_arch_str(arch_str, get_num_classes(args))
net.to(args.device)
init_net(net, args.init_w_type, args.init_b_type)
arch_str2 = nasbench2.get_arch_str_from_model(net)
if arch_str != arch_str2:
print(arch_str)
print(arch_str2)
raise ValueError
measures = predictive.find_measures(net,
train_loader,
(args.dataload, args.dataload_info, get_num_classes(args)),
args.device)
res['logmeasures']= measures
if not args.noacc:
info = api.get_more_info(i, 'cifar10-valid' if args.dataset=='cifar10' else args.dataset, iepoch=None, hp='200', is_random=False)
trainacc = info['train-accuracy']
valacc = info['valid-accuracy']
testacc = info['test-accuracy']
res['trainacc']=trainacc
res['valacc']=valacc
res['testacc']=testacc
#print(res)
cached_res.append(res)
#write to file
if i % args.write_freq == 0 or i == len(api)-1 or i == 10:
print(f'writing {len(cached_res)} results to {op}')
pf=open(op, 'ab')
for cr in cached_res:
pickle.dump(cr, pf)
pf.close()
cached_res = []
|
SamsungLabs/zero-cost-nas
|
nasbench2_pred.py
|
nasbench2_pred.py
|
py
| 4,715 |
python
|
en
|
code
| 137 |
github-code
|
6
|
70808003389
|
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
model = load_model("/home/yash/Desktop/PyImageSearch/checkpoints/emotion1.h5")
classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
img_size = 48
validation_dir = "/home/yash/Desktop/PyImageSearch/deep_survelliance_detector/fer2013/validation/"
validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_directory(validation_dir,
color_mode="grayscale",
target_size=(img_size,img_size),
batch_size=32,
class_mode="categorical",
shuffle=False)
#getting class labels
class_labels = validation_generator.class_indices
class_labels = {v:k for v,k in class_labels.items()}
#predicting
test_img = "/home/yash/Downloads/happy1.jpg"
def get_label(prediction):
for key, val in class_labels.items():
if prediction == val:
return key
return -1
def predict(test_img):
img = cv2.imread(test_img,cv2.IMREAD_GRAYSCALE)
faces = classifier.detectMultiScale(img,scaleFactor=1.2,minNeighbors=7)
face = []
for (x,y,w,h) in faces:
roi_gray = img[y:y+h,x:x+w]
roi = cv2.resize(roi_gray, (img_size,img_size), interpolation=cv2.INTER_AREA)
face.append(roi)
num_image = np.array(face, dtype=np.float32)
num_image /= 255.0
num_image = num_image.reshape(1,48,48,1)
predicted = model.predict(num_image)[0] #returns a list of probabilities of diff classes
pred = predicted.argmax() #getting the max value in the list
label = get_label(pred)
return label
pred_class = predict(test_img)
original_image = mpimg.imread(test_img)
plt.xlabel("Predicted: {0}".format(str(pred_class)))
plt.imshow(original_image)
plt.show()
|
theartificialguy/Deep-Learning-Projects
|
Emotion and Gender Classification/Emotion Classification/recognition.py
|
recognition.py
|
py
| 1,841 |
python
|
en
|
code
| 2 |
github-code
|
6
|
35154021664
|
import json
import os
import cherrypy
from jinja2 import Environment, FileSystemLoader
# GET CURRENT DIRECTORY
from helper import get_redis_connection, get_sorted_list
from scrapper import main1
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
env = Environment(loader=FileSystemLoader(CUR_DIR), trim_blocks=True)
class Index(object):
@cherrypy.expose
def index(self):
template = env.get_template('templates/home.html')
r = get_redis_connection()
try:
users = json.loads(r.get('ten_users'))
except:
main1() # If file was not downloaded then download the latest file.
users = json.loads(r.get('ten_users'))
return template.render(users=users, name='JAYANTH')
@cherrypy.expose
class UserService(object):
@cherrypy.tools.json_out()
def POST(self, name):
r = get_redis_connection()
users = json.loads(r.get('users'))
response = {}
try:
res = [user for user in users if name.lower() in user["name"].lower()] # search by name (substring match)
result = get_sorted_list(res, 'dict')
response['success'] = True
response['users'] = result[:10]
response['length'] = res.__len__()
return response
except:
response['success'] = False
return response
if __name__ == '__main__':
conf = {
'/': {
'tools.sessions.on': True,
'tools.staticdir.root': os.path.abspath(os.getcwd())
},
'/get_users': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.response_headers.on': True,
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': './static'
}
}
webapp = Index()
webapp.get_users = UserService()
cherrypy.server.socket_host = '0.0.0.0'
cherrypy.quickstart(webapp, '/', conf)
|
jayanthns/bseproect
|
run.py
|
run.py
|
py
| 1,974 |
python
|
en
|
code
| 0 |
github-code
|
6
|
130544530
|
from apirequests.stratz import search_match, get_global_variables, get_items_per_time
def inicia_overlay(imagem_path, root, label, hero_id, players, heroes_against):
regiao_busca = (580, 492, 62, 25)
#posicao = pyautogui.locateOnScreen(imagem_path, region=regiao_busca, grayscale=True)
label['text'] = "Aguarde..."
posicao = 1
if posicao:
response_strataz = search_match(players, hero_id, heroes_against)
while response_strataz is None and len(heroes_against) > 0:
heroes_against.pop(-1)
response_strataz = search_match(players, hero_id, heroes_against)
if response_strataz is not None:
label['text'] = response_strataz
else:
label['text'] = get_items_per_time(matchid=get_global_variables()[0]['id'], playerid=get_global_variables()[1])
else:
root.after(1, inicia_overlay(imagem_path, root, label, hero_id, players, heroes_against))
|
caiorodrig0/dotahelper1
|
overlay/overlayfunctions.py
|
overlayfunctions.py
|
py
| 950 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38048663522
|
from constants import API_STACK_BASE_URL
import requests
import sys
class IpStack:
def __init__(self, api_token: str, base_url: str):
if base_url is None or base_url == '':
base_url = API_STACK_BASE_URL
self.api_token = api_token
self.base_url = base_url
def get_ip_location(self, ip_address: str) -> tuple[str,str]:
endpoint = f"{self.base_url}/{ip_address}?access_key={self.api_token}"
try:
response = requests.get(endpoint, timeout=60)
json_response = response.json()
except requests.exceptions.RequestException as error:
print(f"Error: The request could not be resolved")
print(f"Provided base url: {self.base_url}")
if 'doc' in error.__dict__:
print(error.__dict__['doc'])
sys.exit(1)
if 'error' in json_response:
error_code = json_response['error']['code']
error_mesage = json_response['error']['info']
print(f"Error {error_code}: {error_mesage}")
sys.exit(1)
latitude = json_response['latitude']
longitude = json_response['longitude']
if latitude == 0 and longitude == 0:
print("Location not found")
sys.exit(1)
return latitude, longitude
|
AlejandroGC-SS/ip_stack_interface
|
ip_stack.py
|
ip_stack.py
|
py
| 1,318 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37946645164
|
import ServerModule
class StateMachine():
current_state = None
nickname = None
def __init__(self, start_state):
self.current_state = start_state
def login(self, nickname):
self.nickname = nickname
return ServerModule.login(nickname)
def view_profile(self, nickname):
ServerModule.show_user_data(nickname)
self.choose_stage(['BC','welcome', 'go back'], ['SL', 'sell_item', 'sell item'])
def sell_item(self, item_id):
ServerModule.show_user_data(nickname)
ServerModule.sell_item_by_id(input('Press item id\n'))
pass
def show_items_list(self):
ServerModule.show_items_list()
self.choose_stage(['BC','welcome', 'go back'], ['BY', 'buy_item', 'buy item'])
def welcome(self):
print(f'\nNice to see you {self.nickname}!\nWhat do you wanna to do?\n')
self.choose_stage(['LO','leave_or_stay', 'logout'], ['IT', 'show_items_list', 'show items list'], ['IN', 'view_profile', 'show inventory'])
def choose_stage(self, stage1, stage2, stage3 = ['', '', '']):
print(f"Press '{stage1[0]}' to {stage1[2]}\nPress '{stage2[0]}' to {stage2[2]}")
if stage3[0] != '':
print(f"Press '{stage3[0]}' to {stage3[2]}\n")
stage = input().upper()
if stage == stage1[0]:
self.current_state = stage1[1]
elif stage == stage2[0]:
self.current_state = stage2[1]
elif stage3[0] != '' and stage == stage3[0]:
self.current_state = stage3[1]
else:
print('Something went wrong. Try again.')
self.choose_stage(stage1, stage2, stage3)
|
8Air/server-client_app
|
StateMachine.py
|
StateMachine.py
|
py
| 1,678 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5390312333
|
class Node:
def __init__(self, val):
self.val = val
self.next = None
def deleteNode(root, target):
if not root or not target:
return "Error"
if target.next:
print(target.val)
target.val = target.next.val
target.next = target.next.next
if not target.next:
while root and root.next:
if not root.next.next:
root.next = None
root = root.next
def main():
root = Node(10)
point = root
for i in range(10):
point.next = Node(i)
point = point.next
target = point
deleteNode(root,target)
while root:
print(root.val)
root = root.next
if __name__ == "__main__":
main()
|
JarvisFei/leetcode
|
剑指offer代码/算法和数据操作/面试题18:删除链表的节点.py
|
面试题18:删除链表的节点.py
|
py
| 764 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41295805453
|
import pygame
from pygame.locals import *
import numpy as np
from conway import Life
class GameOfLife(Life):
def __init__(self, width = 1600, height = 1000, cell_size = 5, speed = 10):
Life.__init__(self, width // 10, height // 10)
self.width = width
self.height = height
self.cell_size = cell_size
# Устанавливаем размер окна
self.screen_size = width, height
# Создание нового окна
self.screen = pygame.display.set_mode(self.screen_size)
# Вычисляем количество ячеек по вертикали и горизонтали
self.cell_width = self.width // self.cell_size
self.cell_height = self.height // self.cell_size
# Скорость протекания игры
self.speed = speed
self.Mboard()
def draw_grid(self):
for x in range(0, self.width, self.cell_size):
pygame.draw.line(self.screen, pygame.Color('black'),
(x, 0), (x, self.height))
for y in range(0, self.height, self.cell_size):
pygame.draw.line(self.screen, pygame.Color('black'),
(0, y), (self.width, y))
def cells(self):
for row in range(self.rows):
for column in range(self.columns):
if self.board[row, column] == 1:
pygame.draw.rect(self.screen, (57, 255, 20), ((row * self.cell_size) + 1, (column * self.cell_size) +1, self.cell_size -1,
self.cell_size -1))
else:
pygame.draw.rect(self.screen, (255, 255, 255), ((row * self.cell_size) + 1, (column * self.cell_size) + 1, self.cell_size -1,
self.cell_size -1))
def run(self):
pygame.init()
clock = pygame.time.Clock()
pygame.display.set_caption('Game of Life')
self.screen.fill(pygame.Color('white'))
running = True
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
self.draw_grid()
self.cells()
self.cell()
pygame.display.flip()
clock.tick(self.speed)
pygame.quit()
if __name__ == '__main__':
game = GameOfLife()
game.run()
|
hatiff/GameLife
|
Life.py
|
Life.py
|
py
| 2,437 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29696388258
|
# Sales Program
# Written by Tanner McCarthy
# 9/20/21
# Prof Fried
packagePrice = 99
discount = 0
#used a while loop so that it will keep running until I am satisfied with the input given to me
#by the user
#use True so that it will always run
while True:
#used try and except so that if an error comes up that would crash the program occurs, this keeps
#the program running and ask the user again for a valid input
try:
numPack = int(input("How many packages did you purchase?\nPackages Purchased: "))
print("")
#except ValueError is to catch any error thrown so the program doesnt stop
except ValueError:
print("\nINVALID, Please try again\n")
#continue will continue the program and reask the user for a valid input
continue
#this if statement is to make sure that the user does not enter 0 or any negative numebrs
#since it would not make sense to have purchased 0 or a negative amount of packages
if numPack < 0 or numPack == 0:
print("\nINVALID, Please try again\n")
#continue will continue the program and reask the user for a valid input
continue
#this else is so that when we have a valid input we can break out of this forever running while loop
else:
#break will simply break us out of the while loop and continue the rest of the program
break
#made it its own if statement bc it will read "...purchased 1 package..." and not
#"...purchased 1 packages...". All of this so that it has correct grammar
if numPack == 1:
print("Since you purchased", numPack, "package, you are NOT entitled to any discount.")
#elif statements for the rest to check what discount they fall into
elif numPack <= 9:
print("Since you purchased", numPack, "packages, you are NOT entitled to any discount.")
elif numPack >= 10 and numPack <= 19:
print("Since you purchased", numPack, "packages, you are entitled to a 10% discount.")
discount = .10
elif numPack >= 20 and numPack <= 49:
print("Since you purchased", numPack, "packages, you are entitled to a 20% discount.")
discount = .20
elif numPack >= 50 and numPack <= 99:
print("Since you purchased", numPack, "packages, you are entitled to a 30% discount.")
discount = .30
#else statement and not a elif bc it is the last to run and I do not need to check anything since
#everything I needed to check has already been checked
else:
print("Since you purchased", numPack, "packages, you are entitled to a 40% discount.")
discount = .40
#math formula to apply the discount to get the total price
totalAmt = packagePrice * numPack*(1 - discount)
#commented this print line out bc the print(f) can format a nonconstant variable
#print("The total cost after applying your discount is", totalAmt )
print(f'The total cost after applying your discount is ${totalAmt:,.2f}.\n' )
#end of the first part of the program
#Start of the second part of the program
#The Fast Freight Shipping Company program
#use True so that it will always run
while True:
#used try and except so that if an error comes up that would crash the program occurs, this keeps
#the program running and ask the user again for a valid input
try:
weight = float(input("Please enter the weight of the package.\nPackage weight: "))
print("")
#except ValueError is to catch any error thrown so the program doesnt stop
except ValueError:
print("\nINVALID, Please try again.\n")
#continue will continue the program and reask the user for a valid input
continue
#this if statement is to make sure that the user does not enter 0 or any negative numebrs
#since it would not make sense to have a weight of 0 or a negative weight
if weight < 0 or weight == 0:
print("\nINVALID, Please try again.\n")
#continue will continue the program and reask the user for a valid input
continue
#this else is so that when we have a valid input we can break out of this forever running while loop
else:
#break will simply break us out of the while loop and continue the rest of the program
break
#created the variable finalCost
finalCost = float(0)
#test to see if weight is less than 2lbs it applies the correct rpp
if weight <= 2.00:
finalCost = float(weight * 1.50)
print(f'The total cost after applying our rate per pound, $1.50, the shipping cost is ${finalCost:,.2f}')
#test to see if weight is inbetween 2lbs and 6lbs and applies the correct rpp
elif weight > 2.00 and weight <= 6.00:
finalCost = float(weight * 3.00)
print(f'The total cost after applying our rate per pound, $3.00, the shipping cost is ${finalCost:,.2f}')
#test to see if weight is inbetween 2lbs and 6lbs and applies the correct rpp
elif weight > 6.00 and weight <= 10.00:
finalCost = float(weight * 4.00)
print(f'The total cost after applying our rate per pound, $4.00, the shipping cost is ${finalCost:,.2f}')
#else statement and not a elif bc it is the last to run and I do not need to check anything since
#everything I needed to check has already been checked
else:
finalCost = float(weight * 4.75)
print(f'The total cost after applying our rate per pound, $4.75, the shipping cost is ${finalCost:,.2f}')
#end of the second part of the program
#end of the entire program
|
TannerMcCarthy/SalesHW
|
sales.py
|
sales.py
|
py
| 5,233 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42591021729
|
import os
import numpy as np
import pickle
import argparse
from implicit.bpr import BayesianPersonalizedRanking
from implicit.nearest_neighbours import CosineRecommender
from scipy.sparse import csr_matrix
from methods import consul, oracle, PrivateRank, PrivateWalk
np.random.seed(0)
def recall(li, gt):
if gt in li:
return 1
return 0
def nDCG(li, gt):
if gt in li:
return 1 / np.log2(li.tolist().index(gt) + 2)
return 0
def list_minimum_group(li, sensitive):
return np.bincount(sensitive[li], minlength=sensitive.max() + 1).min()
parser = argparse.ArgumentParser()
parser.add_argument('--data', choices=['100k', '1m', 'home', 'hetrec'], default='100k')
parser.add_argument('--prov', choices=['cosine', 'bpr'], default='cosine')
parser.add_argument('--sensitive', choices=['popularity', 'old'], default='popularity', help='`old` is valid only for MovieLens')
parser.add_argument('--split', type=int, default=1, help='Total number of parallel execusion (only for parallel execusion, set 1 otherwise)')
parser.add_argument('--block', type=int, default=0, help='Id of the current execusion (only for parallel excecusion, set 0 otherwise)')
args = parser.parse_args()
assert(args.sensitive == 'popularity' or args.data in ['100k', '1m'])
assert(0 <= args.block and args.block < args.split)
#
# Load Data
#
if args.data == '100k':
n = 943
m = 1682
filename = 'ml-100k/u.data'
delimiter = '\t'
elif args.data == '1m':
n = 6040
m = 3952
filename = 'ml-1m/ratings.dat'
delimiter = '::'
K = 10
if args.data == '100k' or args.data == '1m':
raw_R = np.zeros((n, m))
history = [[] for i in range(n)]
with open(filename) as f:
for r in f:
user, movie, r, t = map(int, r.split(delimiter))
user -= 1
movie -= 1
raw_R[user, movie] = r
history[user].append((t, movie))
elif args.data == 'hetrec':
raw_R = np.log2(np.load('hetrec.npy') + 1)
n, m = raw_R.shape
history = [[] for i in range(n)]
for i in range(n):
for j in np.nonzero(raw_R[i] > 0)[0]:
history[i].append((np.random.rand(), j))
elif args.data == 'home':
raw_R = np.load('Home_and_Kitchen.npy')
n, m = raw_R.shape
with open('Home_and_Kitchen_history.pickle', 'br') as f:
history = pickle.load(f)
if args.sensitive == 'popularity':
mask = raw_R > 0
if args.data == '100k':
sensitive = mask.sum(0) < 50
elif args.data == '1m':
sensitive = mask.sum(0) < 300
elif args.data == 'hetrec':
sensitive = mask.sum(0) < 50
elif args.data == 'home':
sensitive = mask.sum(0) < 50
sensitive = sensitive.astype('int')
elif args.sensitive == 'old':
sensitive = np.zeros(m, dtype='int')
if args.data == '100k':
filename = 'ml-100k/u.item'
delimiter = '|'
elif args.data == '1m':
filename = 'ml-1m/movies.dat'
delimiter = '::'
with open(filename, encoding='utf8', errors='ignore') as f:
for r in f:
li = r.strip().split(delimiter)
if '(19' in li[1]:
year = 1900 + int(li[1].split('(19')[1].split(')')[0])
elif '(20' in li[1]:
year = 2000 + int(li[1].split('(20')[1].split(')')[0])
sensitive[int(li[0]) - 1] = year < 1990
#
# Data Loaded
#
damping_factor = 0.01
tau = 5
provider_recall = 0
provider_nDCG = 0
provider_minimum = 0
oracle_recall = 0
oracle_nDCG = 0
oracle_minimum = 0
PR_recall = 0
PR_nDCG = 0
PR_minimum = 0
PW_recall = 0
PW_nDCG = 0
PW_minimum = 0
random_recall = 0
random_nDCG = 0
random_minimum = 0
consul_recall = 0
consul_nDCG = 0
consul_minimum = 0
PW_cnt = np.array(0)
consul_cnt = np.array(0)
start_index = int(n * args.block / args.split)
end_index = int(n * (args.block + 1) / args.split)
for i in range(start_index, end_index):
gt = sorted(history[i])[-1][1]
source = sorted(history[i])[-2][1]
used = [y for x, y in history[i] if y != gt]
R = raw_R.copy()
R[i, gt] = 0
mask = R > 0
if args.prov == 'bpr':
model = BayesianPersonalizedRanking(num_threads=1, random_state=0)
elif args.prov == 'cosine':
model = CosineRecommender()
sR = csr_matrix(mask.T)
model.fit(sR, show_progress=False)
if args.prov == 'bpr':
score = model.item_factors @ model.item_factors.T
else:
score = np.zeros((m, m))
for item in range(m):
for j, v in model.similar_items(item, m):
score[item, j] = v
score_remove = score.copy()
score_remove[:, used] -= score.max() + 1
score_remove -= np.eye(m) * (score.max() + 1)
list_provider = np.argsort(-score_remove[source])[:K]
provider_recall += recall(list_provider, gt)
provider_nDCG += nDCG(list_provider, gt)
provider_minimum += list_minimum_group(list_provider, sensitive)
oracle_list = oracle(score_remove[source], sensitive, tau, used, K)
oracle_recall += recall(oracle_list, gt)
oracle_nDCG += nDCG(oracle_list, gt)
oracle_minimum += list_minimum_group(oracle_list, sensitive)
# Construct the recsys graph
A = np.zeros((m, m))
rank = np.argsort(-score_remove, 1)[:, :K]
weight = 1 / np.log2(np.arange(K) + 2)
weight /= weight.sum()
A[np.arange(m).repeat(K), rank.reshape(-1)] += weight.repeat(m).reshape(K, m).T.reshape(-1)
# Consul
consul_list = consul(rank, sensitive, tau, source, used, K, access_conuter=consul_cnt)
consul_recall += recall(consul_list, gt)
consul_nDCG += nDCG(consul_list, gt)
consul_minimum += list_minimum_group(consul_list, sensitive)
# PrivateRank
PR_list = PrivateRank(A, sensitive, tau, source, used, K, damping_factor)
PR_recall += recall(PR_list, gt)
PR_nDCG += nDCG(PR_list, gt)
PR_minimum += list_minimum_group(PR_list, sensitive)
# PrivateWalk
PW_list = PrivateWalk(rank, sensitive, tau, source, used, K, access_conuter=PW_cnt)
PW_recall += recall(PW_list, gt)
PW_nDCG += nDCG(PW_list, gt)
PW_minimum += list_minimum_group(PW_list, sensitive)
# Random
np.random.seed(0)
random_score = np.random.rand(m)
random_list = oracle(random_score, sensitive, tau, used, K)
random_recall += recall(random_list, gt)
random_nDCG += nDCG(random_list, gt)
random_minimum += list_minimum_group(random_list, sensitive)
print('#')
print('# User {} - {}'.format(start_index, i))
print('#')
print('-' * 30)
print('provider recall {:.2f}'.format(provider_recall))
print('oracle recall ', oracle_recall)
print('consul recall ', consul_recall)
print('PrivateRank recall', PR_recall)
print('PrivateWalk recall', PW_recall)
print('random recall ', random_recall)
print('-' * 30)
print('provider nDCG {:.2f}'.format(provider_nDCG))
print('oracle nDCG ', oracle_nDCG)
print('consul nDCG ', consul_nDCG)
print('PrivateRank nDCG', PR_nDCG)
print('PrivateWalk nDCG', PW_nDCG)
print('random nDCG ', random_nDCG)
print('-' * 30)
print('provider least count {:.2f}'.format(provider_minimum))
print('oracle least count ', oracle_minimum)
print('consul least count ', consul_minimum)
print('PrivateRank least count', PR_minimum)
print('PrivateWalk least count', PW_minimum)
print('random least count ', random_minimum)
print('-' * 30)
print('consul access ', consul_cnt)
print('PrivateWalk access', PW_cnt)
print('-' * 30)
if not os.path.exists('out'):
os.mkdir('out')
with open('out/{}-{}-{}-{}.txt'.format(args.data, args.prov, args.sensitive, args.block), 'w') as f:
print(provider_recall, file=f)
print(provider_nDCG, file=f)
print(provider_minimum, file=f)
print(oracle_recall, file=f)
print(oracle_nDCG, file=f)
print(oracle_minimum, file=f)
print(consul_recall, file=f)
print(consul_nDCG, file=f)
print(consul_minimum, file=f)
print(consul_cnt, file=f)
print(PR_recall, file=f)
print(PR_nDCG, file=f)
print(PR_minimum, file=f)
print(PW_recall, file=f)
print(PW_nDCG, file=f)
print(PW_minimum, file=f)
print(PW_cnt, file=f)
print(random_recall, file=f)
print(random_nDCG, file=f)
print(random_minimum, file=f)
|
joisino/consul
|
evaluate.py
|
evaluate.py
|
py
| 8,347 |
python
|
en
|
code
| 5 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.