seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
8224649664
|
import json, requests, io
import pandas as pd
import numpy as np
import sys, argparse
#Managing input
parser = argparse.ArgumentParser(description='Script to download data of lung cancer from TCGA')
parser.add_argument('-t', '--type',
help='Sample type. Ej: NAD',
required='True',
choices=['NAD', 'TAD', 'NSC', 'TSC'],
default='NAD')
results = parser.parse_args(sys.argv[1:])
filename = results.type
if filename == "NAD":
dirname = "Adeno"
dirname2 = "NAD"
elif filename == "TAD":
dirname = "Adeno"
dirname2 = "TAD"
elif filename == "NSC":
dirname = "Squamous"
dirname2 = "NSC"
elif filename == "TSC":
dirname = "Squamous"
dirname2 = "TSC"
#Begining of the programm
cases = pd.read_csv("Data/" + dirname + "/" + dirname2 + "/" + filename + "-cases.tsv", sep='\t')
mirna_fid = []
mirna_fname = []
mirna_count = []
for index, row in cases.iterrows():
print(row['case'])
with open("json/qbyMIRNA.json", 'r') as f:
filters = json.load(f)
filters['content'][0]['content']['value'] = row['case']
cases_endpt = "https://api.gdc.cancer.gov/files"
params = {
"filters": json.dumps(filters),
"fields": "file_name,data_format,file_id",
"format": "TSV",
"size": "10000" #HACK: modificar si los casos superan los hints
}
response = requests.get(cases_endpt, params = params)
try:
df = pd.read_csv(io.StringIO(response.text), sep='\t', header=0)
mirna_count.append(df.shape[0])
mirna_fid.append(df.loc[0, "file_id"])
mirna_fname.append(df.loc[0, 'file_name'])
except:
df = np.nan
mirna_count.append(0)
mirna_fid.append(df)
mirna_fname.append(df)
cases['mirna_count'] = mirna_count
cases['mirna_fname'] = mirna_fname
cases['mirna_fid'] = mirna_fid
cases.rename(columns={'fid':'rnaseq_fid'}, inplace = True)
cases.to_csv("Data/" + dirname + "/" + dirname2 + "/" + filename + "-mirna.tsv", sep="\t", index = False)
|
josemaz/lung-mirnas
|
py/casemirna.py
|
casemirna.py
|
py
| 1,900 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27655143629
|
from db.run_sql import run_sql
from models.slot import Slot
# CREATE TABLE slots (
# id SERIAL PRIMARY KEY,
# slot_num INT,
# time_stamp VARCHAR(255),
# turbo_slot BOOLEAN
# );
# CREATE
def save(slot):
sql = "INSERT INTO slots (slot_num, time_stamp, turbo_slot) VALUES (%s, %s, %s) RETURNING id"
values = [slot.slot_num, slot.time_stamp, slot.turbo_slot]
results = run_sql(sql, values)
slot.id = results[0]['id']
return slot
# READ
def select(id):
result = None
sql = "SELECT * FROM slots WHERE id = %s"
values = [id]
result = run_sql(sql, values)[0]
if result is not None:
slot = Slot(result['slot_num'], result['time_stamp'], result['turbo_slot'], result['id'])
return slot
def select_all():
slots = []
sql = "SELECT * FROM slots"
results = run_sql(sql)
for row in results:
slot = Slot(row['slot_num'], row['time_stamp'], row['turbo_slot'], row['id'])
slots.append(slot)
return slots
# UPDATE
def update(slot):
sql = "UPDATE slots SET (slot_num, time_stamp, turbo_slot) = (%s, %s, %s) WHERE id = %s"
values = [slot.slot_num, slot.time_stamp, slot.turbo_slot, slot.id]
run_sql(sql, values)
# DELETE
def delete(id):
sql = "DELETE * FROM slots WHERE id = %s"
values = [id]
run_sql(sql, values)
def delete_all():
sql = "DELETE FROM slots"
run_sql(sql)
|
MistaRae/TurboGym
|
turbo_gym/repositories/slot_repository.py
|
slot_repository.py
|
py
| 1,401 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25070333505
|
from django.db import models
from django.contrib import admin
from django import forms
import purplship.server.providers.models as carriers
def model_admin(model):
class _Admin(admin.ModelAdmin):
list_display = ("__str__", "test", "active")
exclude = ["active_users", "metadata"]
formfield_overrides = {
models.CharField: {
"widget": forms.TextInput(
attrs={
"type": "text",
"readonly": "true",
"class": "vTextField",
"data - lpignore": "true",
"autocomplete": "keep-off",
"onfocus": "this.removeAttribute('readonly');",
}
)
}
}
def get_queryset(self, request):
query = super().get_queryset(request)
return query.filter(created_by=None)
return type(f"{model.__class__.__name__}Admin", (_Admin,), {})
for name, model in carriers.MODELS.items():
admin.site.register(model, model_admin(model))
|
danh91/purplship
|
server/modules/core/purplship/server/providers/admin.py
|
admin.py
|
py
| 1,114 |
python
|
en
|
code
| null |
github-code
|
6
|
30281915177
|
def near_ten(n):
# nearten = 10, 11, 12, 18, 19, 20, 21, 22, etc
satuan = n % 10
# if satuan == 0 or satuan == 1 or satuan == 2 or satuan == 8 or satuan == 9:
if satuan in [0, 1, 2, 8, 9]:
print(True)
else:
print(False)
bilangan = int(input('Masukkan sebuah bilangan: '))
print(near_ten(bilangan))
|
TIxKostan/latihan_Python
|
near_ten.py
|
near_ten.py
|
py
| 338 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39005103669
|
#!/usr/local/bin/python
# The previous line (which must be the first one to work) makes the script self-executing,
# assuming that the system has the Python interpreter at path /usr/local/bin/python.
# This wants to be run in Python 3.
# Reference Pre-Processor
# Given: A string reference
# An integer horizon, which tells us how far to look ahead if we do look ahead
# Return: A suffix array
#
# by Moyaccercchi, 19th of Apr 2015
#
# version 2:
# allowing two-SNIPs for non-nested graphs as long as they are not so close to each other
# that several two-SNIPs appear in the same virtual read
import collections
fo = open('in.txt', 'r')
referencepath = fo.readline().strip()
horizon = int(fo.readline().strip())
fo.close()
def loadTextFromFile(filepath):
fhandle = open(filepath, 'r')
lastread = fhandle.readline().strip()
if (filepath[-6:].lower() == '.fasta'):
# We ignore the first read, as it is a comment line.
res = ''
while (lastread != ""):
lastread = fhandle.readline().strip()
res += lastread
else:
res = lastread
fhandle.close()
return res
def generate_suffix_array(referencepath, horizon):
ret = collections.defaultdict(lambda: '')
reference = loadTextFromFile(referencepath)
if (referencepath[-6:].lower() == '.fasta'):
# string reference in FASTA
for i in range(0, len(reference)):
ret[reference[i:i+horizon]] += str(i) + ","
else:
if '(' in reference:
# (two-SNIP, non-nested) graph reference in STPU
i = 0;
lr = len(reference);
while i < lr:
# ATTENTION!
# So far, this assumes that there only ever is one two-SNIP per read;
# having multiple two-SNIPs in the same read requires an a bit more
# elaborate approach. =)
# imagine the following reads as [ ] and the following reference:
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read AGAGA without graph
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read GAGA, find '(' in last position, expand by 4
# [ ] <- read GAGAT and GAGAC
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read AGATA and AGACA
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read GATAG and GACAG
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read ATAGA and ACAGA
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read TAGAG and CAGAG
#
# instead of i+1, do i+5 (or i+4 and then i+1 due to the loop)
# also, unexpand
#
# ...AGAGA(T|C)AGAGA...
# [ ] <- read AGAGA, unexpanded
#
rf = reference[i:i+horizon]
if rf[len(rf)-1] == '(':
horizon += 4
rf = reference[i:i+horizon]
if '(' in rf:
rfs = [];
grStart = rf.find("(", 0, len(rf));
rfs.append(rf[0:grStart] + rf[grStart+1] + rf[grStart+5:len(rf)]);
rfs.append(rf[0:grStart] + rf[grStart+3] + rf[grStart+5:len(rf)]);
for rfline in rfs:
ret[rfline] += str(i) + ","
else:
ret[rf] += str(i) + ","
if rf[0] == '(':
horizon -= 4
i += 4
i += 1
else:
# string reference in STPU
for i in range(0, len(reference)):
ret[reference[i:i+horizon]] += str(i) + ","
return ret
def dicttoadjacency(ourdict):
ret = []
for fromnode, tonode in ourdict.items():
ret.append(fromnode + ' -> ' + tonode[:-1])
return '\n'.join(sorted(ret))
res = dicttoadjacency(generate_suffix_array(referencepath, horizon))
fo = open('out.txt', 'w')
fo.write(res)
fo.close()
|
Moyaccercchi/bio-info-graph
|
python/2_reference_preprocessor/e2.py
|
e2.py
|
py
| 4,348 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71455781947
|
from flask import Flask,flash, render_template, url_for, request, redirect
import googleapiclient.discovery
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from gtts import gTTS
import heapq
import nltk
import string
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
from nltk import pos_tag
api_service_name = "youtube"
api_version = "v3"
DEVELOPER_KEY = "AIzaSyDp9zqixqm846mM_kH9LyNsUp95IMNMfiM"
import numpy as np
import pandas as pd
import re
import os
import tensorflow as tf
import tensorflow_hub as hub
import re
from numpy import array
from keras.datasets import imdb
from keras.preprocessing import sequence
from keras.models import load_model
from keras.models import Sequential
import demoji
import re
from textblob import TextBlob
lemmatizer=WordNetLemmatizer()
word_to_id = imdb.get_word_index()
def videoidfetcher(link):
url_data = urlparse.urlparse(link)
query = urlparse.parse_qs(url_data.query)
video = query["v"][0]
return video
def init():
global model,graph
# load the pre-trained Keras model
model = load_model('sentiment_analysis.h5')
graph = tf.compat.v1.get_default_graph()
youtube = googleapiclient.discovery.build(api_service_name, api_version, developerKey = DEVELOPER_KEY)
stop=stopwords.words("english")
punc=[pun for pun in string.punctuation]
stop+=punc
print(stop)
import warnings
warnings.filterwarnings("ignore")
app = Flask(__name__)
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
@app.route('/')
def hello():
return render_template('index.html')
def videoidfetcher(link):
match = re.search("(?:\/|%3D|v=|vi=)([0-9A-z-_]{11})(?:[%#?&]|$)", link)
if match:
result = match.group(1)
else:
result = ""
return result
def sent_anly_prediction(comment):
words = comment.split()
x_test = [[word_to_id[word] if (word in word_to_id and word_to_id[word]<=5000) else 0 for word in words]]
x_test = sequence.pad_sequences(x_test, maxlen=500)
vector = np.array([x_test.flatten()])
with graph.as_default():
probability = model.predict(array([vector][0]))[0][0]
print(probability)
class1 = model.predict_classes(array([vector][0]))[0][0]
if class1 == 0:
return "Negative"
else:
return "Positive"
@app.route('/text', methods = ['POST'])
def predict_text():
if request.method == 'POST':
text = request.form['text']
strip_special_chars = re.compile("[^A-Za-z0-9 ]+")
comment = text.lower().replace("<br />", " ")
comment=re.sub(strip_special_chars, "", comment.lower())
g=TextBlob(comment).sentiment.polarity
g=int((g+1)*50)
result_dic = {
'positive':g,
'negative':100-g,
'text':text,
}
print(g)
return render_template('index.html',prediction=result_dic)
@app.route('/', methods = ['POST'])
def upload_file():
if request.method == 'POST':
my_colors={}
my_colors[1]="primary"
my_colors[2]="secondary"
my_colors[3]="success"
my_colors[4]="danger"
my_colors[5]="warning"
text = request.form['youtube_video_url']
video_id= videoidfetcher(text)
if(video_id==""):
flash('Looks like you have entered invalid youtube link!!!')
return render_template('index.html')
print(video_id)
heap_of_good_likes=[]
most_liked_comments=[]
query_results =youtube.commentThreads().list(part="snippet",maxResults=100,textFormat="plainText",order="relevance",videoId=video_id).execute()
negative=0
positive=0
for x in (query_results['items']):
comment=x['snippet']['topLevelComment']['snippet']['textDisplay']
strip_special_chars = re.compile("[^A-Za-z0-9 ]+")
comment = comment.lower().replace("<br />", " ")
comment=re.sub(strip_special_chars, "", comment.lower())
cleaned_comment=comment
if(TextBlob(cleaned_comment).sentiment.polarity<0):
print(cleaned_comment)
print(TextBlob(cleaned_comment).sentiment.polarity)
negative=negative+1
else:
print(cleaned_comment)
print(TextBlob(cleaned_comment).sentiment.polarity)
positive=positive+1
get_like_count=x['snippet']['topLevelComment']['snippet']['likeCount']
if len(heap_of_good_likes)<5:
heapq.heappush(heap_of_good_likes,(get_like_count,comment));
else:
top=heapq.heappop(heap_of_good_likes)
if(top[0]<get_like_count):
heapq.heappush(heap_of_good_likes,(get_like_count,comment));
else:
heapq.heappush(heap_of_good_likes,top)
while heap_of_good_likes:
most_liked_comments.append(heapq.heappop(heap_of_good_likes))
most_liked_comments.reverse()
my_positive=int((positive/(positive+negative))*100)
my_negative=100-my_positive
result_dic = {
'positive':my_positive,
'negative':my_negative,
'youtube_video':video_id,
'most_liked_comments':most_liked_comments,
'mycolors':my_colors
}
return render_template('index.html',results=result_dic)
def get_simple_POS(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def deEmojify(text):
regrex_pattern = re.compile(pattern = "["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags = re.UNICODE)
return regrex_pattern.sub(r'',text)
def cleanwords(sentence):
sentence_emogis=demoji.findall(sentence)
sentence_emogis_short=" "
for value in sentence_emogis.values():
sentence_emogis_short=sentence_emogis_short+(str(value)+" ")
sentence=deEmojify(sentence)
words=word_tokenize(sentence)
words=[lemmatizer.lemmatize(word,pos=get_simple_POS(pos_tag(word)[0][1])).lower() for word in words if not word.lower() in stop and not word.isdigit()]
return " ".join(words)
if __name__ == '__main__':
init()
app.config['TEMPLATES_AUTO_RELOAD']=True
app.run(debug = False)
|
Anmol567/Youtube_Comment_Reviewer
|
my_new_flask_app.py
|
my_new_flask_app.py
|
py
| 6,600 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41207274862
|
"""Environment using Gymnasium API for Franka robot.
The code is inspired by the D4RL repository hosted on GitHub (https://github.com/Farama-Foundation/D4RL), published in the paper
'D4RL: Datasets for Deep Data-Driven Reinforcement Learning' by Justin Fu, Aviral Kumar, Ofir Nachum, George Tucker, Sergey Levine.
This code was also implemented over the repository relay-policy-learning on GitHub (https://github.com/google-research/relay-policy-learning),
published in Relay Policy Learning: Solving Long-Horizon Tasks via Imitation and Reinforcement Learning, by
Abhishek Gupta, Vikash Kumar, Corey Lynch, Sergey Levine, Karol Hausman
Original Author of the code: Abhishek Gupta & Justin Fu
The modifications made involve separatin the Kitchen environment from the Franka environment and addint support for compatibility with
the Gymnasium and Multi-goal API's
This project is covered by the Apache 2.0 License.
"""
from os import path
import mujoco
import numpy as np
from gymnasium import spaces
from gymnasium.envs.mujoco.mujoco_env import MujocoEnv
from gymnasium_robotics.envs.franka_kitchen.ik_controller import IKController
from gymnasium_robotics.utils.mujoco_utils import MujocoModelNames, robot_get_obs
from gymnasium_robotics.utils.rotations import euler2quat
MAX_CARTESIAN_DISPLACEMENT = 0.2
MAX_ROTATION_DISPLACEMENT = 0.5
DEFAULT_CAMERA_CONFIG = {
"distance": 2.2,
"azimuth": 70.0,
"elevation": -35.0,
"lookat": np.array([-0.2, 0.5, 2.0]),
}
class FrankaRobot(MujocoEnv):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 10,
}
def __init__(
self,
model_path="../assets/kitchen_franka/franka_assets/franka_panda.xml",
frame_skip=50,
ik_controller: bool = True,
control_steps=5,
robot_noise_ratio: float = 0.01,
default_camera_config: dict = DEFAULT_CAMERA_CONFIG,
**kwargs,
):
xml_file_path = path.join(
path.dirname(path.realpath(__file__)),
model_path,
)
self.control_steps = control_steps
self.robot_noise_ratio = robot_noise_ratio
observation_space = (
spaces.Box(low=-np.inf, high=np.inf, shape=(9,), dtype=np.float32),
)
super().__init__(
xml_file_path,
frame_skip,
observation_space,
default_camera_config=default_camera_config,
**kwargs,
)
self.init_ctrl = np.array([0, 0, 0, -1.57079, 0, 1.57079, 0, 255])
if ik_controller:
self.controller = IKController(self.model, self.data)
action_size = 7 # 3 translation + 3 rotation + 1 gripper
else:
self.controller = None
action_size = 8 # 7 joint positions + 1 gripper
self.action_space = spaces.Box(
low=-1.0, high=1.0, dtype=np.float32, shape=(action_size,)
)
# Actuator ranges
ctrlrange = self.model.actuator_ctrlrange
self.actuation_range = (ctrlrange[:, 1] - ctrlrange[:, 0]) / 2.0
self.actuation_center = (ctrlrange[:, 1] + ctrlrange[:, 0]) / 2.0
self.model_names = MujocoModelNames(self.model)
def step(self, action):
action = np.clip(action, -1.0, 1.0)
if self.controller is not None:
current_eef_pose = self.data.site_xpos[
self.model_names.site_name2id["EEF"]
].copy()
target_eef_pose = current_eef_pose + action[:3] * MAX_CARTESIAN_DISPLACEMENT
quat_rot = euler2quat(action[3:6] * MAX_ROTATION_DISPLACEMENT)
current_eef_quat = np.empty(
4
) # current orientation of the end effector site in quaternions
target_orientation = np.empty(
4
) # desired end effector orientation in quaternions
mujoco.mju_mat2Quat(
current_eef_quat,
self.data.site_xmat[self.model_names.site_name2id["EEF"]].copy(),
)
mujoco.mju_mulQuat(target_orientation, quat_rot, current_eef_quat)
ctrl_action = np.zeros(8)
# Denormalize gripper action
ctrl_action[-1] = (
self.actuation_center[-1] + action[-1] * self.actuation_range[-1]
)
for _ in range(self.control_steps):
delta_qpos = self.controller.compute_qpos_delta(
target_eef_pose, target_orientation
)
ctrl_action[:7] = self.data.ctrl.copy()[:7] + delta_qpos[:7]
# Do not use `do_simulation`` method from MujocoEnv: value error due to discrepancy between
# the action space and the simulation control input when using IK controller.
# TODO: eliminate error check in MujocoEnv (action space can be different from simulaton control input).
self.data.ctrl[:] = ctrl_action
mujoco.mj_step(self.model, self.data, nstep=self.frame_skip)
if self.render_mode == "human":
self.render()
else:
# Denormalize the input action from [-1, 1] range to the each actuators control range
action = self.actuation_center + action * self.actuation_range
self.do_simulation(action, self.frame_skip)
if self.render_mode == "human":
self.render()
obs = self._get_obs()
return obs, 0.0, False, False, {}
def _get_obs(self):
# Gather simulated observation
robot_qpos, robot_qvel = robot_get_obs(
self.model, self.data, self.model_names.joint_names
)
# Simulate observation noise
robot_qpos += self.robot_noise_ratio * self.np_random.uniform(
low=-1.0, high=1.0, size=robot_qpos.shape
)
robot_qvel += self.robot_noise_ratio * self.np_random.uniform(
low=-1.0, high=1.0, size=robot_qvel.shape
)
return np.concatenate((robot_qpos.copy(), robot_qvel.copy()))
def reset_model(self):
qpos = self.init_qpos
qvel = self.init_qvel
self.data.ctrl[:] = self.init_ctrl
self.set_state(qpos, qvel)
obs = self._get_obs()
return obs
|
Skeli9989/Gymnasium-Robotics
|
gymnasium_robotics/envs/franka_kitchen/franka_env.py
|
franka_env.py
|
py
| 6,370 |
python
|
en
|
code
| null |
github-code
|
6
|
6193285358
|
#! /usr/bin/env python
"""
Clean a text file containing tabular data.
Leon Hostetler, June 9, 2018
USAGE: python clean.py
"""
from __future__ import division, print_function
import numpy as np
import sys
# Filenames
inputfile = 'inputdata.txt'
outputfile = 'outputdata.txt'
# Import the data from the file. Commented rows are not imported.
# Only import the first five columns
data = np.genfromtxt(inputfile, comments='#', usecols=(0,1,2,3,4), dtype=str)
# Clean the first two columns by replacing each 'p' with '.'
# Then recombine the columns
cleancol1 = [s.replace('p' , '.') for s in data[:,0]]
cleancol2 = [s.replace('p' , '.') for s in data[:,1]]
data1 = np.column_stack([cleancol1,cleancol2,data[:,2],data[:,3],data[:,4]])
# Remove the duplicate rows
# NOTE: This changes the order of the rows in the array
data2 = np.vstack({tuple(row) for row in data1})
# Write the first row to the output file
with open(outputfile, "w") as text_file:
text_file.write("#col1 col2 col3 col4 col5\n")
# Append the cleaned data to the output file
f = open(outputfile, "a")
np.savetxt(f, data2, fmt='%s')
f.close()
|
leonhostetler/sample-programs
|
python/data_cleaning/clean.py
|
clean.py
|
py
| 1,127 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25010873745
|
"""
В одномерном массиве найти сумму элементов, находящихся между минимальным и максимальным элементами.
Сами минимальный и максимальный элементы в сумму не включать.
"""
from random import randint
arr_spread = 10
arr_size = 10
arr = [randint(1, arr_spread) for _ in range(arr_size)]
print(arr)
lim1, lim2 = 0, arr_spread+1
lower_index, upper_index = 0, 0
for i in arr:
lim1 = i if lim1 < i else lim1
lim2 = i if lim2 > i else lim2
lower_index, upper_index = (arr.index(lim2), arr.index(lim1)) if arr.index(lim1) > arr.index(lim2) \
else (arr.index(lim1), arr.index(lim2))
if lower_index == upper_index-1:
print(f"Max and min elements are ajdacent({lower_index} and {upper_index}), sum will be zero")
else:
summ = 0
for j in arr[lower_index+1:upper_index]:
summ += j
print(f'Summ of elements between arr[{lower_index}] and arr[{upper_index}] = {summ}')
|
the-nans/py2-repo_gb
|
lesson3_hw/l3_task6.py
|
l3_task6.py
|
py
| 1,028 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
5556252937
|
from data.legacy_datagen import eddy_forcing,spatial_filter_dataset
from data.high_res_dataset import HighResCm2p6
from constants.paths import FINE_CM2P6_PATH,TEMPORARY_DATA
from utils.xarray_oper import plot_ds,fromtorchdict
from data.load import load_grid,load_xr_dataset
import xarray as xr
import os
import numpy as np
def just_filtering(u_v_dataset, grid_data, scale_filter):
return spatial_filter_dataset(u_v_dataset, grid_data, scale_filter)
def main():
path = FINE_CM2P6_PATH(True,False)
# ds = xr.open_zarr(path).isel(time = [0,])
sigma = 4
# ds = ds.drop('surface_temp').drop('xt_ocean yt_ocean'.split())
# grid_data = load_grid(ds.copy(),spacing = "asdf")
isel_dict = {
key + v :slice(1500,2500) for key in 'u t'.split() for v in 'lon lat'.split()
}
# ds = ds#.isel(**isel_dict)
# grid_data = grid_data#.isel(**isel_dict)
ds,_ = load_xr_dataset('--spacing long_flat --mode data'.split())
ds = ds.isel(**isel_dict)
grid_data = ds.rename(
{'ulat':'yu_ocean','ulon':'xu_ocean','u':'usurf','v':'vsurf'}
).isel(depth = 0,time = [0]).drop(['temp','dxt','dyt']).drop(['tlat','tlon'])
ds1 = grid_data.drop('dxu dyu'.split())
forces = eddy_forcing(ds1,grid_data,sigma)
rename = {'yu_ocean':'lat','xu_ocean':'lon',\
'usurf':'u','vsurf':'v','S_x':'Su','S_y':'Sv'}
# rename1 = {'yu_ocean':'ulat','xu_ocean':'ulon',\
# 'yt_ocean':'tlat','xt_ocean':'tlon',\
# 'usurf':'u','vsurf':'v','surface_temp':'temp'}
forces = forces.rename(
rename
).isel(time = 0)
# path1 = os.path.join(TEMPORARY_DATA,'arthur_data.nc')
# forces.to_netcdf(path1)
# ds = grid_data.rename(rename1)
# ds['depth'] = [0]
hrcm = HighResCm2p6(ds,sigma,filtering = 'gaussian')
data_vars,coords = hrcm[0]
x = xr.Dataset(data_vars = data_vars,coords = coords)
x = x.isel(time = 0,depth = 0).drop('time depth'.split())
# print(x)
# return
plot_ds(np.log10(np.abs(x)),'cem_forces.png',ncols = 3,)
x = x.drop('Stemp temp'.split())
x1 = x.rename(
{key:'cem'+key for key in x.data_vars.keys()}
)
f = xr.merge([x1,forces])
plot_ds(f,'arthur_forces.png',ncols = 3,)
err = np.log10(np.abs(x - forces))
plot_ds(err,'arthur_forces_err.png',ncols = 3,)
if __name__ == '__main__':
main()
|
CemGultekin1/cm2p6
|
temp/data_comparison.py
|
data_comparison.py
|
py
| 2,408 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13383663470
|
# Alex Zaremba
# October 11th, 2022
file_name = input('Please enter name of the file: ') # Ask user for file name
out_file = file_name + ".html" # Convert file to html
body_h1 = input('Please enter character\'s name: ') # Request name of character
body_h1 = "<h1>" + body_h1 + "</h1>\n"
body_h2 = input('Please enter the actor\'s name: ') # Request name of actor
body_h2 = "<h2>Played by " + body_h2 + "</h2>\n"
body_description = input('Please enter a description of the character: ') # Request a description of the character
body_description = "<p>" + body_description + "</p>\n"
body_img = input('Please enter location of an image file of the character, either on your PC or through a url: ') # Request location of character image
body_img = '<img src="' + body_img + '" alt="' + file_name + '">\n'
combined_body = file_name + body_h1 + body_h2 + body_description + body_img # Merge user inputs into one variable
# Define template pages
page_head = ()
page_footer = ()
# Read template files and put into variables
f = open('assets/page-head.txt', "r")
combined_head = ''.join(f.readlines())
f.close()
f= open('assets/page-footer.txt', "r")
combined_footer = '' .join(f.readlines())
f.close()
# Write HTML file
f =open(out_file, "w")
f.write(combined_head + combined_body + combined_footer)
f.close()
print ("Created {}".format(out_file))
|
abzaremba97/Character-Generator
|
create-webpage.py
|
create-webpage.py
|
py
| 1,355 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22895124073
|
import serial
import getch
serialport = serial.Serial("/dev/ttyS0")
serialport.baudrate = 115200
while True:
x = getch.getch()
if "W" == x.upper():
# Forwards
command = "+100+10015+00"
elif "S" == x.upper():
# Backwards
command = "-250-25015+00"
elif x == "A" or x == "a":
# Left
command = "-150+15015+00"
elif x == "D" or x == "d":
# Right
command = "+150-15015+00"
elif x == "h" or x == "H":
# Stop
command = "+000+00015+00"
else:
break
serialport.write(command.encode())
|
SinaRabiee/Digital_LAB_SSH
|
ssh-raspberry.py
|
ssh-raspberry.py
|
py
| 597 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2839743102
|
'''
Given an array nums of n integers and an integer target, are there elements a, b, c, and d in nums such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.
Note:
The solution set must not contain duplicate quadruplets.
Example:
Given array nums = [1, 0, -1, 0, -2, 2], and target = 0.
A solution set is:
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
'''
# time complexity O(n^3)
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums.sort()
res = []
for i in range(len(nums)-3):
if i > 0 and nums[i] == nums[i-1]:
continue
if nums[i]*4 >target:
break
for j in range(i+1, len(nums)-2):
if j > i+1 and nums[j] == nums[j-1]:
continue
l, r = j+1, len(nums) - 1
while l < r:
result = nums[i] + nums[j] + nums[l] + nums[r]
if result == target:
res.append([nums[i], nums[j], nums[l], nums[r]])
while l < r and nums[l] == nums[l+1]: l+=1
while l < r and nums[r] == nums[r-1]: r-=1
l += 1
r -= 1
elif result < target:
l += 1
else:
r -= 1
return res
|
sgmzhou4/leetcode_problems
|
Array/18_4Sum.py
|
18_4Sum.py
|
py
| 1,549 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36065549678
|
from dotenv import load_dotenv
import os
import requests
from pprint import pprint
from datetime import datetime, timedelta
from flight_data import FlightData
load_dotenv()
API_KEY = os.getenv("flight_search_api")
KIWI_ENDPOINT = "https://api.tequila.kiwi.com"
class FlightSearch:
#This class is responsible for talking to the Flight Search API.
def __init__(self):
self.header = {
"apikey":API_KEY,
}
def iata_code(self, city_name):
params = {
'term': city_name,
'location_types': 'city',
'limit': 1,
}
response = requests.get(url=f'{KIWI_ENDPOINT}/locations/query', headers=self.header, params=params)
print(response.status_code)
# Check response status code
if response.status_code == 200:
# Parse JSON response
data = response.json()
# Extract city code from the response
if 'locations' in data and len(data['locations']) > 0:
city_code = data['locations'][0]['code']
if city_code == None and len(data['locations'][0]['alternative_departure_points'])>0:
distance = 200
for nearby in data['locations'][0]['alternative_departure_points']:
if nearby['distance'] < distance:
distance = nearby['distance']
nearby_citycode = nearby['id']
return nearby_citycode
elif city_code == None:
return "City not found"
return city_code
else:
return "City not found"
else:
return f"Error occurd: {response.raise_for_status}"
def find_flights(self, origin_city_code,destination_city_code):
presentday = datetime.now()
tomorrow_date = presentday + timedelta(1)
tomorrow_date = (tomorrow_date).strftime('%d/%m/%Y')
six_months_date = presentday + timedelta(180)
six_months_date = (six_months_date).strftime('%d/%m/%Y')
params={
"fly_from": origin_city_code,
"fly_to": destination_city_code,
"date_from": tomorrow_date,
"date_to": six_months_date,
"nights_in_dst_from": 7,
"nights_in_dst_to": 28,
"flight_type": "round",
"one_for_city": 1,
"max_stopovers": 0,
"curr": "INR"
}
response = requests.get(url=f'{KIWI_ENDPOINT}/v2/search', headers=self.header,params=params)
try:
data = response.json()["data"][0]
except IndexError:
print(f"No flights found for {destination_city_code}.")
return None
flight_data = FlightData(
price=data["price"],
origin_city=data["route"][0]["cityFrom"], #return city name
origin_airport=data["route"][0]["flyFrom"], # return iata code of airport
destination_city=data["route"][0]["cityTo"], #return city name
destination_airport=data["route"][0]["flyTo"], # return iata code of airport
out_date=data["route"][0]["local_departure"].split("T")[0],
return_date=data["route"][1]["local_departure"].split("T")[0]
)
print(f"{flight_data.destination_city}: ₹{flight_data.price}")
return flight_data
# print(f"{arrival_city}: {}")
|
Shivam29k/Python_Projects
|
flight_deals_alert/flight_search.py
|
flight_search.py
|
py
| 3,453 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12712406140
|
import tensorflow_hub as hub
import matplotlib.pyplot as plt
import numpy as np
import scipy.cluster.hierarchy as scp
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
text_model = hub.load(module_url)
def embed_compare(sentence):
text_embedding = text_model(sentence)
sim_mat = np.inner(text_embedding,text_embedding)
return sim_mat
def plot_sim(sim_mat, labels):
im = plt.imshow(sim_mat)
plt.colorbar(im,fraction=0.046, pad=0.04)
plt.xticks(np.arange(len(labels)), labels, rotation=90)
plt.yticks(np.arange(len(labels)), labels)
plt.title('Semantic similarity')
for i in range(len(labels)):
for j in range(len(labels)):
plt.text(i, j, np.round(sim_mat[i, j], 2), color='black', ha='center', va='center', fontsize=8)
words = ['An','All','My','Your','This','That','These','Those']
sim_word = embed_compare(words)
phrases = ['An apple','All apples','My apples','Your apples','This apple','That apple','These apples','Those apples']
sim_phrase = embed_compare(phrases)
f = plt.figure(figsize=(10,8))
plt.suptitle('Universal Sentence Encoder')
plt.subplot(221)
plot_sim(sim_word, words)
plt.subplot(222)
plot_sim(sim_phrase, phrases)
plt.subplot(223)
Zw = scp.linkage(1-sim_word)
dendrow = scp.dendrogram(Zw, labels=words,leaf_font_size=8, leaf_rotation=90)
plt.subplot(224)
Zp = scp.linkage(1-sim_phrase)
dendrop = scp.dendrogram(Zp, labels=phrases,leaf_font_size=8, leaf_rotation=90)
plt.tight_layout()
plt.show()
f.savefig('../Fig/USE_analysis.png')
f.savefig('../Fig/USE_analysis.svg')
nouns = ['apple','apples','onion','onions','carrot','carrots','orange','oranges']
sim_nouns = embed_compare(nouns)
Zn = scp.linkage(1-sim_nouns)
f2 = plt.figure()
plt.suptitle('Universal Sentence Encoder')
plt.subplot(121)
plot_sim(sim_nouns, nouns)
plt.subplot(122)
dendron = scp.dendrogram(Zn, labels=nouns,leaf_font_size=8, leaf_rotation=90)
plt.tight_layout()
plt.show()
f2.savefig('../Fig/USE_noun_analysis.png')
f2.savefig('../Fig/USE_noun_analysis.svg')
|
mgkumar138/determiners-objdet
|
submodels/universal_sentence_encoder_analysis.py
|
universal_sentence_encoder_analysis.py
|
py
| 2,035 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11454255593
|
import torch
import os
import chartmodel
from torch.utils.data import Dataset
import albumentations
from albumentations.pytorch import ToTensorV2 as AT
from charttype import dataset
batch_size = 32
num_workers = 4
if __name__ == '__main__':
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
img_size = 256
data_transforms_test = albumentations.Compose([
albumentations.Resize(img_size, img_size),
albumentations.Normalize(),
AT()
])
test_list = list()
test_path = os.path.dirname(os.path.abspath(__file__)) + "../data/chart_type/test/images/"
for pic in os.listdir(test_path):
test_list.append(test_path + pic)
testset = dataset.ChartsDataset('/', test_list, transform=data_transforms_test, mode="test")
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, num_workers=num_workers)
model = chartmodel.get_model()
model.load_state_dict(torch.load('../data/chart_type/model.pt', map_location=device))
model.eval()
for img, img_filename in testloader:
with torch.no_grad():
img = img.to(device)
output = model(img)
pred = torch.argmax(output, dim=1).cpu().numpy()
types = dataset.ChartsDataset.get_class_names(pred)
for i in range(len(img_filename)):
print(f'filename: {os.path.basename(img_filename[i])}; type: {types[i]}; label: {pred[i]}')
|
ksvyatov/chart-recognizer
|
charttype/test.py
|
test.py
|
py
| 1,473 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16502029718
|
#! /usr/bin/python3
import logging
import os
from urllib3 import make_headers
from telegram import (InlineKeyboardButton, InlineKeyboardMarkup, InputTextMessageContent,
ReplyKeyboardMarkup, ReplyKeyboardRemove)
from telegram.ext import (Updater, CommandHandler, MessageHandler, CallbackQueryHandler,
Filters, RegexHandler, ConversationHandler)
from selects import *
from bot.states import *
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def add_location(bot, update):
user = update.message.from_user
add_user_message(update)
logger.info("user %s. adding new location to db", user.first_name)
update.message.reply_text(
'Okay, let\'s see... Tell me the name of workspace in which new location is .. hm.. located!')
return LOCATION
def add_location_name(bot, update):
user = update.message.from_user
add_user_message(update)
workspace_name = update.message.text.lower().strip()
workspace = get_workspace(workspace_name)
if workspace is not None:
logger.info("user %s. adding location for workspace %s",
user.first_name, update.message.text)
update.message.reply_text('Great! Now tell me the name of your location!')
return LOCATION_NAME
else:
logger.info("user %s. adding location for non-existing workspace %s",
user.first_name, update.message.text)
update.message.reply_text('Sorry, mate. I don\'t know this workspace.\
Please, create one in the main menu and try again.')
reply_keyboard = [['Check meetings', 'Add meeting'],
['Add workspace', 'Add location'],
['Cancel meeting']]
reply_markup = ReplyKeyboardMarkup(reply_keyboard)
update.message.reply_text('Please choose:', reply_markup=reply_markup)
return ACTION
def added_location(bot, update):
user = update.message.from_user
workspace_name = last_message(user.id).text
add_user_message(update)
workspace = get_workspace(workspace_name)
add_location_to_workspace(update.message.text.lower().strip(), workspace.id)
logger.info("user %s. location %s added.", user.first_name, update.message.text)
update.message.reply_text(
'Great! Now you can hold meetings at %s in workspace %s' % (
update.message.text, workspace_name
))
reply_keyboard = [['Check meetings', 'Add meeting'],
['Add workspace', 'Add location'],
['Cancel meeting']]
reply_markup = ReplyKeyboardMarkup(reply_keyboard)
update.message.reply_text('Please choose:', reply_markup=reply_markup)
return ACTION
location_states = {
LOCATION: [MessageHandler(Filters.text, add_location_name)],
LOCATION_NAME: [MessageHandler(Filters.text, added_location)]
}
|
oleges1/meet_bot
|
bot/add_location.py
|
add_location.py
|
py
| 2,998 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11899942747
|
import requests
import json
import datetime
# On importe la liste des jours pou lesquels on a déjà les données
with open("days.txt", "r") as days:
completed_days = days.read()
days.close()
# On importe la date d'aujourd'hui et on la formatte
today = datetime.datetime.now()
today = str(today).split(" ")
today = today[0]
# On regarde si l'on a déjà les données d'aujourd'hui
if today not in completed_days:
auth_key = input("Enter your own API key: ")
# Si on a pas encore les données
# On fait une requête à l'API Stormglass
response = requests.get(
"https://api.stormglass.io/v2/weather/point",
params={
# Villers-sur-Mer : 49.32195479823806, -0.011785196637717673
"lat": 49.322,
"lng": -0.012,
"params": "windSpeed",
},
headers={
"Authorization": auth_key
}
)
# Copies results and formats them too
json_data = response.json()
filename = "wind_villers_"+str(datetime.datetime.now())[0:10]+".json"
with open(filename, "w") as data:
json.dump(json_data, data, indent=4)
data.writelines("\n")
data.close()
with open("days.txt", "a") as days:
days.writelines(today)
days.writelines("\n")
days.close()
|
Aarrn33/auto-wind-importer
|
get_today.py
|
get_today.py
|
py
| 1,312 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71239238267
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import OrderedDict
import numpy
import json
import sys
import os.path
def readXYZ(filename):
# read molecular coordinates from .xyz file
# return list of symbols and list of coordinate
geom = []
with open(filename, "r") as f:
for line in f:
tmp=line.split()
if len(tmp)==4:
atom = OrderedDict()
atom["atom"] = tmp[0]
atom["xyz"] = list(map(float,tmp[1:]))
geom.append(atom)
return geom
if len(sys.argv) < 4:
print( "Usage: %s template.json molecule.xyz molecule.json" % os.path.basename(sys.argv[0]) )
print( " " )
print( " create JSON input file for BAGEL" )
print( " All sections are copied from 'template.json' except for the molecule" )
print( " section, which is taken from 'molecule.xyz'." )
print( " " )
exit(-1)
args = sys.argv[1:]
# load input from template
with open(args[0], "r") as f:
input_sec = json.load(f)
# find molecule section
for sec in input_sec["bagel"]:
if sec["title"] == "molecule":
molecule_sec = sec
break
else:
raise RuntimeError("Molecule section not found in JSON template!")
# The geometry in the 'molecule' section is replaced with the one read from the xyz-file.
geom = readXYZ(args[1])
molecule_sec["angstrom"] = True
molecule_sec["geometry"] = geom
# The modified JSON is written to the new input file
input_filename = args[2]
def to_json(o, level=0, nflag=0):
"""
serialize an object in the JSON format
"""
INDENT = 2
SPACE = " "
NEWLINE = "\n"
ret = ""
if isinstance(o, dict):
if len(o) == 2 and "atom" in o:
ret += NEWLINE + SPACE * INDENT * (level+1) + "{"
else:
ret += "{" + NEWLINE
comma = ""
for k,v in o.items():
ret += comma
if k == "atom":
comma = ","
else:
comma = ",\n"
if k != "xyz" and k != "atom":
ret += SPACE * INDENT * (level+1)
ret += '"' + str(k) + '":' + SPACE
ret += to_json(v, level + 1, nflag=nflag)
if k == "xyz":
ret += " }"
else:
nflag = 0
ret += NEWLINE + SPACE * INDENT * level + "}"
elif isinstance(o, str):
ret += '"' + o + '"'
elif isinstance(o, list):
ret += "[" + ",".join([to_json(e, level+1) for e in o]) + "]"
elif isinstance(o, bool):
ret += "true" if o else "false"
elif isinstance(o, int):
ret += str(o)
elif isinstance(o, float):
ret += '%12.8f' % o
elif isinstance(o, numpy.ndarray) and numpy.issubdtype(o.dtype, numpy.integer):
ret += "[" + ','.join(map(str, o.flatten().tolist())) + "]"
elif isinstance(o, numpy.ndarray) and numpy.issubdtype(o.dtype, numpy.inexact):
ret += "[" + ','.join(map(lambda x: '%.7g' % x, o.flatten().tolist())) + "]"
else:
raise TypeError("Unknown type '%s' for json serialization" % str(type(o)))
return ret
#print to_json(input_sec)
with open(input_filename, "w") as f:
f.writelines(to_json(input_sec))
|
humeniuka/chem-queue
|
scripts/bagel_template.py
|
bagel_template.py
|
py
| 3,245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8412882304
|
from psiturk.psiturk_config import PsiturkConfig
import subprocess
CONFIG = PsiturkConfig()
CONFIG.load_config()
sections = ['psiTurk Access','AWS Access']
for section in sections:
for item in CONFIG.items(section):
#print 'heroku config:set ' + '='.join(item)
subprocess.call('heroku config:set ' + '='.join(item), shell=True)
subprocess.call('heroku config:set ON_HEROKU=true', shell=True)
|
markkho/value-guided-construal
|
experiment.psiturkapp/set-heroku-settings.py
|
set-heroku-settings.py
|
py
| 423 |
python
|
en
|
code
| 20 |
github-code
|
6
|
31684540429
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class DouluoSpider(CrawlSpider):
name = 'douluo'
allowed_domains = ['tycqxs.com']
start_urls = ['http://www.tycqxs.com/54_54196/']
custom_settings = {'ITEM_PIPELINES': {'shop.pipelines.DoupoPipeline': 300}}
rules = (
Rule(LinkExtractor(restrict_xpaths=r'//div[@id="list"]//dd[10]/a'),callback='parse_item', follow=True),
Rule(LinkExtractor(restrict_xpaths=r'//div[@class="bottem1"]/a[4]'), callback='parse_item', follow=True),
)
def parse_item(self, response):
zhangjie = response.xpath('//h1/text()').extract_first()
neirong = response.xpath('//div[@id="content"]/text()').extract()
next_url = response.xpath('//div[@class="bottem1"]/a[4]/@href').extract_first()
print(zhangjie)
yield {
'zhangjie': zhangjie,
'neirong': neirong,
'next_url': response.urljoin(next_url)
}
|
zhangcq1/MyCrawler
|
爬虫基础梳理/shop/shop/spiders/douluo.py
|
douluo.py
|
py
| 1,034 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42663404209
|
import copy
import math #needed for calculation of weight and bias initialization
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
import torch, torch.nn as nn, torch.nn.functional as F
import torchvision
from torchvision import transforms, models, utils
#Set seeds
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
#Import components
from . import components as cts
#TODOS FOR ALL MODELS:
#TODO try with Sigmoid on the attention instead of Softmax
#TODO try with self-attention instead of fixed learned attention weights
class BodyAvgDiseaseFeatureAttn(nn.Module): #7/1/2020
"""(1) ResNet18 [slices, 512, 14, 14]
(2) conv_final to [slices, 16, 6, 6]
(3) Make a 'copy representation': create n_outputs number of copies
by tiling: [slices, n_outputs, 16, 6, 6]
(4) Element wise multiply the 'copy representation' by a learned
weight vector of shape [1, n_outputs, 16, 1, 1]. This learned
weight vector re-weights the features for each disease separately.
Out shape: [slices, n_outputs, 16, 6, 6] (unchanged because we used
element-wise multiplication with broadcasting).
(5) Apply disease-specific FC layers which for each of the n_outputs
diseases will transform the 16*6*6 representation into a single
disease score. This step is analogous to the final FC layer in
the baseline model, except that in the baseline model we can
implement it easily with Conv2d whereas here because we have
separate disease representations we have to do something
trickier to implement disease-specific FC layers.
Out shape: [slices, n_outputs]
(6) Avg pooling over slices to get [n_outputs]"""
def __init__(self, n_outputs):
super(BodyAvgDiseaseFeatureAttn, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
self.dzfeatweights = nn.Parameter(torch.ones((1,n_outputs,16,1,1), dtype=torch.float32),requires_grad=True)
self.softmax = nn.Softmax(dim=2) #make the 16 feature weights per disease add to 1
self.fclayers_weights, self.fclayers_biases = init_stacked_fc_layers(total_independent_fc_layers = n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
#Copy the representation n_outputs number of times, so that we can
#calculate disease-specific intermediate representations, in which
#the features have been reweighted for each disease separately:
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
#Element wise multiply the copy representation by the learned weights
#The learned weights perform the feature reweighting per disease.
#The softmax makes the features for one disease "compete against each other"
x = torch.mul(x,self.softmax(self.dzfeatweights)) #out shape [slices, 83, 16, 6, 6]
#Flatten
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
#Apply disease-specific FC layers
slice_preds = apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
#Final steps are the same as for baseline model:
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
return x
class BodyAvg_Testing(nn.Module): #7/2/2020
"""BodyAvg model, implemented using the 'copy representation' and
disease-specific FC layers of BodyAvgDiseaseFeatureAttn. The only purpose
of this model is code testing: to figure out if the performance is exactly
the same as for the BodyAvg model."""
def __init__(self, n_outputs):
super(BodyAvg_Testing, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
self.fclayers_weights, self.fclayers_biases = init_stacked_fc_layers(total_independent_fc_layers = n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
slice_preds = apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
return x
class BodyAvgDiseaseFeatureAttn2(nn.Module): #7/2/2020, updated 7/7/2020
"""See BodyAvgDiseaseFeatureAttn for more documentation including code comments.
Difference from BodyAvgDiseaseFeatureAttn: in step (4) this model shares
the learned feature weights between the right lung and theleft lung."""
def __init__(self, n_outputs_lung, n_outputs_heart, nonlinearity):
super(BodyAvgDiseaseFeatureAttn2, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = (2*n_outputs_lung)+n_outputs_heart
self.n_outputs_lung = n_outputs_lung
self.n_outputs_heart = n_outputs_heart
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
self.dzfeatweights_lung = nn.Parameter(torch.ones((1,n_outputs_lung,16,1,1), dtype=torch.float32),requires_grad=True)
self.dzfeatweights_heart = nn.Parameter(torch.ones((1,n_outputs_heart,16,1,1), dtype=torch.float32),requires_grad=True)
#Nonlinearity that gets applied to the feature weighting:
if nonlinearity == 'softmax':
self.nonlinearity = nn.Softmax(dim=2) #make the 16 feature weights per disease add to 1
elif nonlinearity == 'sigmoid':
self.nonlinearity = nn.Sigmoid()
self.fclayers_weights, self.fclayers_biases = init_stacked_fc_layers(total_independent_fc_layers = self.n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
#Apply the feature weights.
#Must follow ground truth label order, which is heart, left_lung, right_lung
x_heart = torch.mul(x[:,0:self.n_outputs_heart,:,:,:],self.nonlinearity(self.dzfeatweights_heart))
x_left_lung = torch.mul(x[:,self.n_outputs_heart:self.n_outputs_heart+self.n_outputs_lung,:,:,:],self.nonlinearity(self.dzfeatweights_lung))
x_right_lung = torch.mul(x[:,-1*self.n_outputs_lung:,:,:,:],self.nonlinearity(self.dzfeatweights_lung))
x = torch.cat((x_heart,x_left_lung,x_right_lung),dim=1) #out shape [slices, 83, 16, 6, 6]
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
slice_preds = apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
return x
class BodyLocationAttn3(nn.Module): #7/2/2020, updated 7/7/2020
"""See BodyAvgDiseaseFeatureAttn for more documentation including code comments.
Difference from BodyAvgDiseaseFeatureAttn: uses spatial attention instead of
feature attention. Specifically there is right lung, heart, and left lung
spatial attention. Also, instead of being fixed weights every time, the
weights are learned based on using the center slices (since the center
slices are most indicative of where the right lung, heart, and left
lung are located.) So this is trainable soft self-attention."""
def __init__(self, n_outputs_lung, n_outputs_heart, nonlinearity):
super(BodyLocationAttn3, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = (2*n_outputs_lung)+n_outputs_heart
self.n_outputs_lung = n_outputs_lung
self.n_outputs_heart = n_outputs_heart
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
#Calculate the spatial attention based on center slices
if nonlinearity == 'softmax':
chosen_nonlinearity = nn.Softmax()
elif nonlinearity == 'sigmoid':
chosen_nonlinearity = nn.Sigmoid()
self.heart_attn_fc = nn.Sequential(nn.Linear(3*16*6*6, 6*6),
chosen_nonlinearity)
self.left_lung_attn_fc = nn.Sequential(nn.Linear(3*16*6*6, 6*6),
chosen_nonlinearity)
self.right_lung_attn_fc = nn.Sequential(nn.Linear(3*16*6*6, 6*6),
chosen_nonlinearity)
self.fclayers_weights, self.fclayers_biases = init_stacked_fc_layers(total_independent_fc_layers = self.n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
#Calculate the attention maps based on the center slices
#Use slices 6, 7, and 8 because these are in the exact center and
#also have the highest attention weight when you do height attention.
center_slices = x[6:9,:,:,:] #out shape [3, 16, 6, 6]
center_slices_flat = center_slices.flatten().unsqueeze(dim=0) #out shape [1,1728]
self.heart_spatial = self.heart_attn_fc(center_slices_flat).reshape(1,1,1,6,6) #out shape [1,1,1,6,6]
self.left_lung_spatial = self.left_lung_attn_fc(center_slices_flat).reshape(1,1,1,6,6) #out shape [1,1,1,6,6]
self.right_lung_spatial = self.right_lung_attn_fc(center_slices_flat).reshape(1,1,1,6,6) #out shape [1,1,1,6,6]
#Repeat x
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
#Apply the attention maps
#Must follow ground truth label order, which is heart, left_lung, right_lung
x_heart = torch.mul(x[:,0:self.n_outputs_heart,:,:,:],self.heart_spatial)
x_left_lung = torch.mul(x[:,self.n_outputs_heart:self.n_outputs_heart+self.n_outputs_lung,:,:,:],self.left_lung_spatial)
x_right_lung = torch.mul(x[:,-1*self.n_outputs_lung:,:,:,:],self.right_lung_spatial)
x = torch.cat((x_heart,x_left_lung,x_right_lung),dim=1) #out shape [slices, 83, 16, 6, 6]
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
slice_preds = apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
return x
class BodyDiseaseSpatialAttn4(nn.Module): #7/7/2020 #TODO test this
"""See BodyAvgDiseaseFeatureAttn for more documentation including code comments.
Difference from BodyLocationAttn3: while 4 also uses spatial
attention (like 3), 4 does spatial attention per disease instead of per
location."""
def __init__(self, n_outputs, nonlinearity):
super(BodyDiseaseSpatialAttn4, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
#Calculate the spatial attention based on center slices
if nonlinearity == 'softmax':
self.nonlinearity = nn.Softmax(dim=2)
elif nonlinearity == 'sigmoid':
self.nonlinearity = nn.Sigmoid()
#FC layers for calculating the disease-specific spatial attention
#For each disease and each element of the 6x6 I learn a different FC layer:
self.fcattns_weights, self.fcattns_biases = init_stacked_fc_layers(total_independent_fc_layers = n_outputs*6*6, in_features = 16)
#FC layers for calculating the final disease predictions
self.fclayers_weights, self.fclayers_biases = init_stacked_fc_layers(total_independent_fc_layers = n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
#Calculate the disease-specific spatial attention:
attn_raw_list = []
for slice_num in range(self.slices):
slice_data = x[slice_num,:,:,:,:] #out shape [83, 16, 6, 6]
slice_data = slice_data.flatten(start_dim=2,end_dim=3).transpose(1,2) #out shape [83, 6*6, 16]
slice_data = slice_data.flatten(start_dim=0,end_dim=1) #out shape [83*6*6, 16]
temp1 = torch.mul(slice_data,self.fcattns_weights) #out shape [83*6*6, 16]
temp2 = torch.sum(temp1,dim=1) #out shape [83*6*6]
temp3 = (temp2+self.fcattns_biases).unsqueeze(0) #out shape [83*6*6]
attn_raw_list.append(temp3)
attn_raw = torch.cat(attn_raw_list,dim=0) #out shape [slices, 83*6*6]
attn_raw = torch.reshape(attn_raw,(self.slices,self.n_outputs,6*6)) #out shape [slices, 83, 6*6]
attn = self.nonlinearity(attn_raw) #out shape [slices, 83, 6*6]
attn = torch.reshape(attn,(self.slices,self.n_outputs,6,6)).unsqueeze(2) #out shape [slices, 83, 1, 6, 6]
#Apply the attention
x_times_attn = torch.mul(x, attn) #out shape [slices, 83, 16, 6, 6]
#Disease predictions
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
slice_preds = apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
return x
class BodyDiseaseSpatialAttn5(nn.Module): #7/7/2020 #TODO test this
"""See BodyAvgDiseaseFeatureAttn for more documentation including code comments.
Difference from BodyDiseaseSpatialAttn4: whereas 4 learns a different
mapping of 16 features -> 1 spatial attn value for each element of the 6x6
square, 5 uses a convolution layer such that the mapping of 16 -> 1 is
the same for each element of the 6x6 square"""
def __init__(self, n_outputs, nonlinearity):
super(BodyDiseaseSpatialAttn5, self).__init__()
self.slices = 15 #9 projections
self.n_outputs = n_outputs
self.features = cts.resnet_features()
self.conv2d = cts.final_conv()
#Calculate the spatial attention based on center slices
if nonlinearity == 'softmax':
self.nonlinearity = nn.Softmax(dim=2)
elif nonlinearity == 'sigmoid':
self.nonlinearity = nn.Sigmoid()
#Conv layer for calculating the disease-specific spatial attention
#For each disease and each element of the 6x6 I learn a different FC layer:
self.attn_conv = nn.Sequential(
nn.Conv2d(16, 83, kernel_size = (1,1), stride=(1,1), padding=0),
self.nonlinearity)
#FC layers for calculating the final disease predictions
self.fclayers_weights, self.fclayers_biases = init_stacked_fc_layers(total_independent_fc_layers = n_outputs, in_features = 16*6*6)
self.avgpool_1d = nn.AvgPool1d(kernel_size=self.slices)
def forward(self, x):
x = cts.reshape_x(x, self.slices)
x = self.features(x) #out shape [slices,512,14,14]
x = self.conv2d(x) #out shape [slices, 16, 6, 6]
#Calculate the disease-specific spatial attention:
attn = self.attn_conv(x).unsqueeze(2) #out shape [slices, 83, 1, 6, 6]
#Apply the attention
x = x.repeat(self.n_outputs,1,1,1,1) #out shape [83, slices, 16, 6, 6]
x = x.transpose(0,1) #out shape [slices, 83, 16, 6, 6]
x_times_attn = torch.mul(x, attn) #out shape [slices, 83, 16, 6, 6]
#Disease predictions
x = x.flatten(start_dim=2,end_dim=4) #out shape [slices, 83, 16*6*6] = [slices, 83, 576]
slice_preds = apply_disease_fc_layers(x, self.fclayers_weights, self.fclayers_biases)
x = slice_preds.transpose(0,1).unsqueeze(0) #out shape [1, 83, slices]
x = self.avgpool_1d(x) #out shape [1, 83, 1]
x = torch.squeeze(x, dim=2) #out shape [1, 83]
return x
#############
# Functions #-------------------------------------------------------------------
#############
def init_stacked_fc_layers(total_independent_fc_layers, in_features):
"""Return the weights and biases of <total_independent_fc_layers>
fully connected layers.
Let's say there are 83 <total_independent_fc_layers> and there are
16*6*6 in_features. Then the produced fclayers_weights will have shape
83 x 576 and the produced fclayers_biases will have shape 83.
Each row corresponds to one FC layer that goes from a 1 x 576 representation
to a 1."""
#dzfclayers_weights holds the weights for each disease-specific fc layer.
#https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/conv.py#L40
fclayers_weights_list = []
fclayers_biases_list = []
out_features = 1
for layernum in range(total_independent_fc_layers):
#kaiming uniform init following https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py
#for the case where we are doing disease-specific FC layers (i.e.
#where total_independent_fc_layers = 83 and in_features = 16*6*6)
#in order to be equivalent to the initialization of the final
#conv2d layer in the baseline model, the fan_in used should be 576.
#That is what we'll get in the calculation because in_features
#is 16*6*6=576, and the weights are defined as weight = Parameter(torch.Tensor(out_features, in_features))
#>>> nn.init._calculate_fan_in_and_fan_out(torch.rand(1,16*6*6))
#(576, 1)
#weight:
weight = torch.Tensor(out_features, in_features)
nn.init.kaiming_uniform_(weight, a=math.sqrt(5))
#bias:
bias = torch.Tensor(out_features)
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weight)
assert fan_in == in_features #e.g. 576 for in_features = 16*6*6. sanity check based on my calculations
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(bias, -bound, bound)
fclayers_weights_list.append(weight)
fclayers_biases_list.append(bias)
fclayers_weights = nn.Parameter(torch.cat(fclayers_weights_list,dim=0)) #e.g. shape [83, 576]
fclayers_biases = nn.Parameter(torch.cat(fclayers_biases_list,dim=0)) #e.g. shape [83]
return fclayers_weights, fclayers_biases
def apply_disease_fc_layers(x, fclayers_weights, fclayers_biases):
"""Apply the disease-specific fully connected layers"""
slice_preds_list = []
for slice_num in range(x.shape[0]):
slice_data = x[slice_num,:,:] #out shape [83, 576]
#apply all the disease-specific FC layers at once
#Weight multiplication
#element-wise multiply and then sum over the columns (because this
#is equivalent to doing vector-vector multiplication between
#the rows of slice_data and the corresponding rows of self.fclayers_weights)
temp1 = torch.mul(slice_data,fclayers_weights) #out shape [83, 576]
temp2 = torch.sum(temp1,dim=1) #out shape [83]
#Bias addition
temp3 = (temp2+fclayers_biases).unsqueeze(0) #out shape [1,83]
#Now we have our 83 disease predictions for this slice.
#Append these slice predictions to our list:
slice_preds_list.append(temp3)
slice_preds = torch.cat(slice_preds_list,dim=0) #out shape [slices, 83]
return slice_preds
|
rachellea/explainable-ct-ai
|
src/models/custom_models_diseasereps.py
|
custom_models_diseasereps.py
|
py
| 21,347 |
python
|
en
|
code
| 3 |
github-code
|
6
|
29017166061
|
from django.forms import model_to_dict
from django.http import JsonResponse
from rest_framework.decorators import api_view
from rest_framework.parsers import JSONParser
from api.models.facility import Facility
from api.serializers.facility_serializer import FacilitySerializer
@api_view(['GET'])
def facility(request, facility_id):
found = Facility.objects.get(id=facility_id)
if found is not None:
return JsonResponse(model_to_dict(found, fields=[field.name for field in found._meta.fields]), status=200)
else:
return JsonResponse({
"message": "failure"
}, status=404)
@api_view(['POST'])
def reset_tests(request):
Facility.objects.all().delete()
return JsonResponse({
"message": "success"
}, status=200)
@api_view(['POST', 'GET'])
def facilitys(request, *args, **kwargs):
if request.method == 'POST':
newFacility = JSONParser().parse(request)
already = Facility.objects.filter(name=newFacility['name']).first()
if already is None:
serializer = FacilitySerializer(data=newFacility)
if serializer.is_valid():
created = Facility.objects.create(**serializer.validated_data)
return JsonResponse({
"message": "success",
"created": model_to_dict(created, fields=[field.name for field in created._meta.fields])
}, status=201)
else:
return JsonResponse({
"message": "failure"
}, status=400)
else:
return JsonResponse({
"message": "previously created",
"created": model_to_dict(already, fields=[field.name for field in already._meta.fields])
}, status=200)
if request.method == 'GET':
name = request.query_params['name'] if 'name' in request.query_params else None
org_id = request.query_params['org_id'] if 'org_id' in request.query_params else None
if name is not None:
found = Facility.objects.filter(name=name).first()
if found is not None:
return JsonResponse({
"message": "success",
"matched": model_to_dict(found, fields=[field.name for field in found._meta.fields])
}, status=200)
if org_id is not None:
founds = Facility.objects.filter(org_id=org_id)
if founds is not None:
return JsonResponse({
"message": "success",
"matched": [model_to_dict(found, fields=[field.name for field in found._meta.fields]) for found in founds]
}, status=200)
else:
return JsonResponse({
"message": "require name or org_id for facility query"
}, status=400)
|
jazzhammer/jitgurup
|
api/views/facilitys_view.py
|
facilitys_view.py
|
py
| 2,880 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74358331067
|
TITLE = 2
NAME = 0
PID = 1
SALARY = 3
with open('hr_system.txt', 'r') as f:
lines = f.readlines()
persons = [line.split() for line in lines]
for person in persons:
print(f'Name: {person[NAME].strip()}, Title: {person[TITLE].strip()}')
for person in persons:
salary = float(person[SALARY])
salary = salary / 24
if person[TITLE].lower() == 'engineer':
salary += 1000
print(f'{person[NAME]} (ID: {person[PID]}), {person[TITLE]} - ${salary:.2f}')
|
gabrielchboff/CSE110
|
src/Week06/team_activity.py
|
team_activity.py
|
py
| 482 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14699326224
|
# -*- coding: utf-8 -*-
import sys
from os import listdir
from os.path import isfile, join
import io
def extract_files(path):
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
return onlyfiles
def get_text(onlyfiles,path):
text = ''
for file in onlyfiles:
f = io.open(join(path,file), mode="r", encoding="utf-8")
text = text + f.read() + '\n'
return text
def main(argv):
path = argv[0]
output_file = argv[1]
onlyfiles= extract_files(path)
text= get_text(onlyfiles,path)
f = io.open(output_file, mode="w", encoding="utf-8")
f.write(text)
if __name__ == '__main__':
main(sys.argv[1:])
|
SerPablo/redib_extractor
|
src/text_extractor.py
|
text_extractor.py
|
py
| 683 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2140571757
|
from tkinter import ttk, constants, messagebox, font
from services.study_app_service import study_app_service
from ui.navigation import Navigation
class CourseView:
"""Kurssinäkymästä vastaava luokka. Näyttää yksittäisen kurssin tehtävät."""
def __init__(self, root, previous_view, create_task, show_task, logout):
"""Luokan konstruktori. Luo uuden kurssinäkymän.
Args:
root: TKinter-elementti, jonka sisään näkymä alustetaan.
previous_view:
Kutsuttava arvo, jota kutsutaan, kun palataan kaikkien kurssien näkymään.
create_task:
Kutsuttava arvo, jota kutsutaan, kun siirrytään tehtävän luomisnäkymään.
show_task:
Kutsuttava arvo, jota kutsutaan, kun siirrytään tehtävänäkymään.
logout:
Kutsuttava arvo, jota kutsutaan, kun käyttäjä kirjautuu ulos.
"""
self._root = root
self._frame = None
self._previous_view = previous_view
self._create_task = create_task
self._show_task = show_task
self._logout = logout
self._course = study_app_service.get_current_course()
self._course.tasks = study_app_service.get_tasks_by_course(
self._course)
self._initialize()
def pack(self):
"""Näyttää näkymän."""
self._frame.pack(fill=constants.X)
def destroy(self):
"""Tuhoaa näkymän."""
self._frame.destroy()
def _initialize_task_entity(self, task):
"""Alustaa painikkeen yksittäiselle tehtävälle.
Args:
task: Tehtävä Task-oliona.
"""
task_button = ttk.Button(
master=self._frame,
text=f"{task.title}",
command=lambda: [study_app_service.set_current_task(
task), self._show_task()]
)
task_button.grid(padx=5, pady=5, sticky=constants.EW)
def _initialize_remove_course_popup(self):
"""Alustaa viesti-ikkunan kurssin poistamiselle.
"""
msg_box = messagebox.askyesno(
title="Remove course",
message=f"Are you sure you want to remove {self._course.name} from your courses?",
)
if msg_box == True:
study_app_service.remove_course(self._course)
self._previous_view()
def _initialize(self):
self._frame = ttk.Frame(master=self._root)
self._frame.grid_columnconfigure(0, weight=1, minsize=400)
navigation = Navigation(self._frame, self._previous_view, self._logout)
navigation.initialize(1)
label = ttk.Label(
master=self._frame,
text=f"{self._course.name}",
font=font.Font(weight='bold')
)
label.grid(row=1, padx=5, pady=5, sticky=constants.W)
add_task_button = ttk.Button(
master=self._frame,
text="Add task",
command=self._create_task
)
add_task_button.grid(sticky=constants.N)
todo_label = ttk.Label(
master=self._frame,
text="To-do:",
font=font.Font(weight='bold')
)
todo_label.grid(padx=5, pady=5, sticky=constants.W)
for task in self._course.tasks:
if task.state == 1:
self._initialize_task_entity(task)
done_label = ttk.Label(
master=self._frame,
text="\nCompleted tasks:",
font=font.Font(weight='bold')
)
done_label.grid(padx=5, pady=5, sticky=constants.W)
for task in self._course.tasks:
if task.state == 0:
self._initialize_task_entity(task)
remove_button = ttk.Button(
master=self._frame,
text="Remove course",
command=self._initialize_remove_course_popup
)
remove_button.grid(padx=10, pady=10, sticky=constants.E)
|
erjavaskivuori/ot-harjoitustyo
|
study-app/src/ui/course_view.py
|
course_view.py
|
py
| 3,973 |
python
|
fi
|
code
| 0 |
github-code
|
6
|
24994085411
|
"""
constants definition
"""
# definition for resourcetype
COLLECTION=1
OBJECT=None
DAV_PROPS=['creationdate', 'displayname', 'getcontentlanguage', 'getcontentlength', 'getcontenttype', 'getetag', 'getlastmodified', 'lockdiscovery', 'resourcetype', 'source', 'supportedlock']
# Request classes in propfind
RT_ALLPROP=1
RT_PROPNAME=2
RT_PROP=3
|
factorlibre/openerp-extra-6.1
|
document_webdav_old/webdav/DAV/constants.py
|
constants.py
|
py
| 350 |
python
|
en
|
code
| 9 |
github-code
|
6
|
15764585120
|
"""
Anisha Kadri 2017
[email protected]
A Module containing methods to create networks from different models.
1) For pure preferential attachement:-
pref_att(N, m)
2) For random attachment:-
rand_att(N,m)
3) For a mixture of the two, attachment via random walk:-
walk_att(N,m,L)
References
----------
[1] A. L. Barabási and R. Albert "Emergence of scaling in
random networks", Science 286, pp 509-512, 1999.
"""
import networkx as nx
import random
import math
def pref_att(N, m, seed=None):
"""Returns a graph that is created using the Barabasi-Albert Model,
of N nodes in total and a node with m edges added at each time increment.
Parameters
----------
n = total number of nodes
m = number of edges attached to each new node, or degree of new node.
(value must be < N)
seed = optional argument, initialises random number generator to a starting state.
Returns
-------
A Barabasi Albert Graph, with pure preferential attachment.
"""
#this ensures that the maximum degree is always less than number of nodes
if m >= N:
raise Exception("m-value must be less than N")
if m < 1:
raise Exception("graph gowth is sub-critical.Degree of new node cannot be 0")
# Intialises the pseudo-random number generator, allowing result replication.
random.seed(seed)
# Creates new graph of m nodes, of equal degree
nodes = list(range(m))
G = nx.complete_graph(m)
G.name = "Graph with N = %s, m = %s"%(N,m)
# Target nodes for new edges
attach_list = nodes
# Maintains a list of nodes for random sampling,
# a concantenated edge list
# thus, number of instances of each node in the list is proportional to it's degree
# (i.e. the list has k_i instances of node i)
node_list=[]
for i in nodes:
node_list.extend([i]*m)
N_tot = m # N_tot = No. of nodes in network, also index numbering for new node
while N_tot < N:
new_stubs = [N_tot]*m #create new stubs
new_edges = zip(new_stubs,attach_list) #create new edges between chosen nodes
G.add_edges_from(new_edges)
#add new edges to the list
node_list.extend(attach_list)
node_list.extend(new_stubs)
# m nodes are chosen from the edge_list to form new targets.
attach_list = set() # making this a set ensures that edges added are all unique (not a multigraph)
while len(attach_list)< m:
random_node =random.choice(node_list)
attach_list.add(random_node)
N_tot += 1
attach_list = list(attach_list)
return G
def rand_att(N,m, seed=None):
if m >= N:
raise Exception("m-value must be less than N")
if m < 1:
raise Exception("graph gowth is sub-critical.Degree of new node cannot be 0")
# Intialises the pseudo-random number generator, allowing result replication.
random.seed(seed)
# Creates new graph of m nodes, and no edges
G = nx.generators.classic.empty_graph(m)
G.name = "Graph with N = %s, m = %s"%(N,m)
# Target nodes for new edges
attach_list = nx.nodes(G)
N_tot = m # N_tot = No. of nodes in network, also index numbering for new node
while N_tot < N:
new_stubs = [N_tot]*m #create new stubs
new_edges = zip(new_stubs,attach_list) #create new edges between chosen nodes
G.add_edges_from(new_edges)
node_list = nx.nodes(G)
# m nodes are chosen at random from the node_list to form new targets.
attach_list =random.sample(node_list, m)
N_tot += 1
return G
def random_walk(N,m, L, seed = None):
if m >= N:
raise Exception("m-value must be less than N")
if m < 1:
raise Exception("graph gowth is sub-critical.Degree of new node cannot be 0")
# Intialises the pseudo-random number generator, allowing result replication.
random.seed(seed)
# Creates new graph of m nodes, of equal degree
G = nx.complete_graph(m)
nodes = list(range(m))
# Target nodes for new edges
attach_list = nodes
N_tot = m # N_tot = No. of nodes in network, also index numbering for new node
while N_tot < N:
new_stubs = [N_tot]*m #create new stubs
new_edges = zip(new_stubs,attach_list) #create new edges between chosen nodes
G.add_edges_from(new_edges)
node_list = nx.nodes(G)
# m nodes are chosen from the edge_list to form new targets.
attach_list = set() # making this a set ensures that edges added are all unique (not a multigraph)
random_list = set()
#uniformly choose start point of walk
while len(random_list)< m:
random_node =random.choice(node_list)
random_list.add(random_node)
N_tot += 1
#take a random walk of length L
for i in random_list:
node = i
steps=0
if steps<= L:
neighbours = G.neighbours(node)
random_node =random.choice(neighbours)
node = random_node
steps += 1
attach_list.add(node)
attach_list = list(attach_list)
return G
|
anishakadri/barabasialbert
|
model.py
|
model.py
|
py
| 5,291 |
python
|
en
|
code
| 0 |
github-code
|
6
|
312775884
|
from .models import Cart
from django.contrib.auth.models import User
def cart_count(request):
if request.user.isauthencated:
accountuser = request.user
products = Cart.get_all_by_user(accountuser)
count = products.count()
return {'cart_count': count}
else:
return {'cart_count': 0}
|
Yashraj098/ShopIt
|
ecom/context_processors.py
|
context_processors.py
|
py
| 330 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3225854902
|
#!/usr/bin/python2.7
from functools import wraps
from flask import Flask, request, jsonify, Response, abort, json
import MySQLdb, collections
app = Flask(__name__)
MYSQL_DATABASE_HOST = "127.0.0.1"
MYSQL_DATABASE_USER = "twitter"
MYSQL_DATABASE_PASSWORD = "DF7U7q2yy6pUPSn3"
MYSQL_DATABASE_DB = "twitter"
db = MySQLdb.connect(host=MYSQL_DATABASE_HOST, user=MYSQL_DATABASE_USER, passwd=MYSQL_DATABASE_PASSWORD, db=MYSQL_DATABASE_DB)
def run_query(q):
c = db.cursor()
size = c.execute(q)
return size, c
@app.route('/twitter.py')
def ret_twitter_source():
return "<pre>" + open('twitter.py', 'r').read() + "</pre>"
@app.route('/testsuite.py')
def ret_testsuite_source():
return "<pre>" + open('testsuite.py', 'r').read() + "</pre>"
@app.route('/schema.sql')
def ret_schema_source():
return "<pre>" + open('schema.sql', 'r').read() + "</pre>"
@app.route('/')
def hello():
out = """
<pre>URLS
<a href=/user_timeline.json>User Timeline</a> params: token, username (optional, defaults to users token)
<a href=/friendslist.json>Friends List</a> params: token, username (optional, defaults to users token)
<a href=/followerslist.json>Followers List</a> params: token, username (optional, defaults to users token)
<a href=/createfriend.json>Add Friend</a> params: token, username
<a href=/destroyfriend.json>Destroy Friend</a> params: token, username
<a href=/tweet.json>Add tweet</a> (not tested) params: token, message
append query string token=<token> for user-context token
append query string username=<desired user> to query parameters about
i.e. /friendslist.json?token=1b43ef1e0618de6d&username=brian
<a href=/twitter.py>twitter.py source</a>
<a href=/testsuite.py>testsuite.py source</a>
<a href=/schema.sql>database schema</a>
"""
query = "SELECT username, token FROM users"
size, ret = run_query(query)
rows = ret.fetchall()
for row in rows:
out += "User: %s, Token: %s\n" % (row[0], row[1])
out += "</pre>"
return out
@app.route('/user_timeline.json')
def get_tweets():
"""Returns JSON-encoded list of tweets belonging to the specified username, and their friends
If no username is specified, default to the authenticating user
Returns HTTP Error 400 if given username doesn't exist"""
auth_user = verify_token(get_req_args_or_fail('token', 401))
target_user = ""
if check_None_or_empty(request.args.get('username', None)):
target_user = auth_user
else:
target_user = get_req_args_or_fail('username')
get_userid(target_user)
query = "SELECT timestamp, users.username, messageId, message FROM tweets LEFT JOIN users ON users.id = tweets.userId WHERE tweets.userId = ANY (SELECT id FROM users WHERE username = '%s' UNION SELECT friends.followingId FROM users JOIN friends ON friends.userId = users.id WHERE users.username = '%s') ORDER BY timestamp DESC" % (target_user, target_user)
size, ret = run_query(query)
tweets = []
rows = ret.fetchall()
for row in rows:
d = collections.OrderedDict()
d['timestamp'] = row[0].isoformat()
d['username'] = row[1]
d['messageId'] = row[2]
d['tweet'] = row[3]
tweets.append(d)
return jsonify(tweets=tweets)
@app.route('/friendslist.json')
def get_friends():
"""Returns a list of users the specified username is friends with
If no username is specified, default to the authenticating user
Returns HTTP Error 400 if given username doesn't exist"""
auth_user = verify_token(get_req_args_or_fail('token', 401))
target_user = ""
if check_None_or_empty(request.args.get('username', None)):
target_user = auth_user
else:
target_user = get_req_args_or_fail('username')
get_userid(target_user)
query = "SELECT id, username FROM users WHERE id IN (SELECT friends.followingId FROM users JOIN friends ON friends.userId = users.id WHERE users.username = '%s')" % target_user
size, ret = run_query(query)
friends = []
rows = ret.fetchall()
for row in rows:
d = collections.OrderedDict()
d['id'] = row[0]
d['username'] = row[1]
friends.append(d)
return jsonify(users=friends)
@app.route('/followerslist.json')
def get_followers():
"""Returns a list of users who follow the specified username
If no username is specified, default to the authenticating user
Returns HTTP Error 400 if given username doesn't exist"""
auth_user = verify_token(get_req_args_or_fail('token', 401))
target_user = ""
if check_None_or_empty(request.args.get('username', None)):
target_user = auth_user
else:
target_user = get_req_args_or_fail('username')
get_userid(target_user)
query = "SELECT id, username FROM users WHERE id IN (SELECT friends.userId FROM users JOIN friends ON friends.followingId = users.id WHERE users.username = '%s')" % target_user
size, ret = run_query(query)
followers = []
rows = ret.fetchall()
for row in rows:
d = collections.OrderedDict()
d['id'] = row[0]
d['username'] = row[1]
followers.append(d)
return jsonify(users=followers)
@app.route('/tweet.json')
def add_tweet():
"""EXPERIMENTAL Add tweet for the authenticating user"""
auth_user = verify_token(get_req_args_or_fail('token', 401))
message = get_req_args_or_fail('message')
userid = get_userid(auth_user)
query = "INSERT into tweets (userId, message) VALUES ('%i', '%s')" % (userid, message)
try:
size, ret = run_query(query)
except:
abort(400)
return jsonify(tweet={'status': "Success!"})
@app.route('/createfriend.json')
def add_friend():
"""Adds the specified username to the authenticating users friends list
Returns HTTP Erorr 400 if username is None, an empty string, the authenticating user or a non-existant user
Returns the user id and username upon successful friending"""
auth_user = verify_token(get_req_args_or_fail('token', 401))
target_user = get_req_args_or_fail('username')
if target_user == auth_user: abort(400)
userid = get_userid(auth_user)
target_userid = get_userid(target_user)
query = "INSERT into friends (userId, followingId) VALUES ('%i', '%i')" % (userid, target_userid)
try:
size, ret = run_query(query)
except:
abort(400)
d = collections.OrderedDict()
d['id'] = target_userid
d['username'] = target_user
return jsonify(user=d)
@app.route('/destroyfriend.json')
def remove_friend():
"""Removes the specified username from the authenticating users friends list
Returns HTTP Erorr 400 if username is None, an empty string, the authenticating user or a non-existant user
Returns the user id and username upon successful removing"""
auth_user = verify_token(get_req_args_or_fail('token', 401))
target_user = get_req_args_or_fail('username')
if target_user == auth_user: abort(400)
userid = get_userid(auth_user)
target_userid = get_userid(target_user)
query = "DELETE FROM friends where userId = '%i' AND followingId = '%i'" % (userid, target_userid)
size, ret = run_query(query)
if size == 0: abort(400)
d = collections.OrderedDict()
d['id'] = target_userid
d['username'] = target_user
return jsonify(user=d)
def get_userid(user):
"""Simple statement to retrieve a given users id
Throw an HTTP Error 400 if the user doesn't exist"""
query = "SELECT id FROM users WHERE username = '%s'" % user
size, ret = run_query(query)
if size != 1:
abort(400)
uid = ret.fetchone()[0]
return uid
def get_req_args_or_fail(attribute, abortCode=400):
"""Retrieves query string parameters and verifies they are not None/empty
Returns the value if succesful, or throws an HTTP Error code (400 by default)"""
value = request.args.get(attribute, None)
if check_None_or_empty(value): abort(abortCode)
return MySQLdb.escape_string(value)
def check_None_or_empty(string):
"""Returns True if the string is None or empty, False otherwise"""
if string is None or string == "": return True
return False
def verify_token(token):
"""Verifies a user API token is valid
Returns the authenticating username, or throws an HTTP Error 401 Authorization Denied"""
query = "SELECT username FROM users WHERE token = '%s'" % token
size, ret = run_query(query)
if size == 0: abort(401)
if size == 1:
return ret.fetchone()[0]
abort(401)
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0")
|
brianfife/twitterapi
|
twitter.py
|
twitter.py
|
py
| 8,268 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28514991368
|
"""
This module transforms the corpus into the format require by each benchmarked tool
"""
import json
def liwc(senders, data):
ds = dict()
for hashed_addr in senders:
try:
emails = '. '.join(data[hashed_addr])
ds[hashed_addr] = emails
except KeyError:
continue
with open(file="dataset/LIWC/data/dataset.csv", mode='w') as csv_file:
for key in ds:
csv_file.write("\"\"\"{}\"\"\",\"\"\"{}\"\"\"\n".format(key, ds[key]))
def personality_recognizer(senders, data):
for hashed_addr in senders:
try:
emails = '. '.join(data[hashed_addr])
with open("dataset/PersonalityRecognizer/data/{}.txt".format(hashed_addr), 'w') as f:
f.write("%s\n" % emails)
except KeyError:
continue
def twitpersonality(senders, data):
for hashed_addr in senders:
try:
emails = '. '.join(data[hashed_addr])
with open("dataset/twitpersonality/Data/{}.txt".format(hashed_addr), 'w') as f:
f.write("%s\n" % emails)
except KeyError:
continue
if __name__ == '__main__':
"""
The file mailcorpus-sha.json contains the emails written by the developers.
"""
with open("dataset/goldstandard/mailcorpus-sha.json", mode="r", encoding="utf-8") as f:
email_corpus = json.load(f)
"""
Here we retrieve the list of developers to perform the emails merging.
"""
with open(file="dataset/goldstandard/address_list_sha.txt", mode="r") as f:
hashed_senders = [line.strip() for line in f.readlines()]
liwc(hashed_senders, email_corpus)
personality_recognizer(hashed_senders, email_corpus)
twitpersonality(hashed_senders, email_corpus)
|
collab-uniba/tosem2021-personality-rep-package
|
src/data_preparation.py
|
data_preparation.py
|
py
| 1,782 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7935100756
|
# -*- coding: utf-8 -*-
#import sys
#reload(sys)
#sys.setdefaultencoding('utf-8') #gb2312
import codecs
import random
import numpy as np
from tflearn.data_utils import pad_sequences
from collections import Counter
import os
import pickle
import json
import jieba
from predictor.data_util_test import pad_truncate_list
PAD_ID = 0
UNK_ID=1
_PAD="_PAD"
_UNK="UNK"
def load_data_multilabel(traning_data_path, valid_data_path, test_data_path, vocab_word2index, accusation_label2index,
sentence_len, name_scope='cnn', test_mode=False):
"""
convert data as indexes using word2index dicts.
:param traning_data_path:
:param vocab_word2index:
:param vocab_label2index:
:return:
"""
# 1. use cache file if exist
cache_data_dir = 'cache' + "_" + name_scope
cache_file =cache_data_dir+"/"+'train_valid_test_shiwan_3w_high.pik'
print("cache_path:",cache_file,"train_valid_test_file_exists:",os.path.exists(cache_file))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as data_f:
print("going to load cache file from file system and return")
return pickle.load(data_f)
# 2. read source file
train_file_object = codecs.open(traning_data_path, mode='r', encoding='utf-8')
valid_file_object = codecs.open(valid_data_path, mode='r', encoding='utf-8')
test_data_obejct = codecs.open(test_data_path, mode='r', encoding='utf-8')
train_lines = train_file_object.readlines()
valid_lines=valid_file_object.readlines()
test_lines=test_data_obejct.readlines()
random.shuffle(train_lines)
random.shuffle(valid_lines)
random.shuffle(test_lines)
if test_mode:
train_lines=train_lines[0:1000]
# 3. transform to train/valid data to standardized format
train = transform_data_to_index(train_lines, vocab_word2index, accusation_label2index, sentence_len,'train',name_scope)
valid = transform_data_to_index(valid_lines, vocab_word2index, accusation_label2index, sentence_len,'valid',name_scope)
test = transform_data_to_index(test_lines, vocab_word2index, accusation_label2index, sentence_len,'test',name_scope)
# 4. save to file system if vocabulary of words not exists
if not os.path.exists(cache_file):
with open(cache_file, 'ab') as data_f:
print("going to dump train/valid/test data to file system.")
pickle.dump((train,valid,test),data_f, protocol=4)
return train, valid, test
splitter = ':'
num_mini_examples=1900
def transform_data_to_index(lines, vocab_word2index, accusation_label2index, sentence_len, data_type, name_scope):
"""
transform data to index using vocab and label dict.
:param lines:
:param vocab_word2index:
:param accusation_label2index:
:param article_label2index:
:param deathpenalty_label2index:
:param lifeimprisonment_label2index:
:param sentence_len: max sentence length
:return:
"""
X = []
Y_accusation = [] # discrete
accusation_label_size=len(accusation_label2index)
# load frequency of accu and relevant articles, so that we can copy those data with label are few. ADD 2018-05-29
accusation_freq_dict = load_accusation_freq_dict(accusation_label2index, name_scope)
for i, line in enumerate(lines):
if i % 10000 == 0:
print("i:", i)
json_string = json.loads(line.strip())
# 1. transform input x.discrete
facts = json_string['fact']
input_list = token_string_as_list(facts) # tokenize
x = [vocab_word2index.get(x, UNK_ID) for x in input_list] # transform input to index
x = pad_truncate_list(x, sentence_len)
# 2. transform accusation.discrete
accusation_list = json_string['meta']['accusation']
accusation_list = [accusation_label2index[label] for label in accusation_list]
y_accusation = transform_multilabel_as_multihot(accusation_list, accusation_label_size)
# OVER-SAMPLING:if it is training data, copy labels that are few based on their frequencies.
# num_copy = 1
# if data_type == 'train': #set specially weight and copy some examples when it is training data.
# freq_accusation = accusation_freq_dict[accusation_list[0]]
# if freq_accusation <= num_mini_examples:
# freq = freq_accusation
# num_copy=max(1,num_mini_examples/freq)
# if i%1000==0:
# print("####################freq_accusation:", freq_accusation, ";num_copy:", num_copy)
#
# for k in range(int(num_copy)):
# X.append(x)
# Y_accusation.append(y_accusation)
#### no oversampling
X.append(x)
Y_accusation.append(y_accusation)
#shuffle
number_examples = len(X)
X_ = []
Y_accusation_ = []
permutation = np.random.permutation(number_examples)
for index in permutation:
X_.append(X[index])
Y_accusation_.append(Y_accusation[index])
X_ = np.array(X_)
data = (X_, Y_accusation_)
return data
def transform_multilabel_as_multihot(label_list,label_size):
"""
convert to multi-hot style
:param label_list: e.g.[0,1,4], here 4 means in the 4th position it is true value(as indicate by'1')
:param label_size: e.g.199
:return:e.g.[1,1,0,1,0,0,........]
"""
result=np.zeros(label_size)
#set those location as 1, all else place as 0.
result[label_list] = 1
return result
def transform_mulitihot_as_dense_list(multihot_list):
length = len(multihot_list)
result_list = [i for i in range(length) if multihot_list[i] > 0]
return result_list
#use pretrained word embedding to get word vocabulary and labels, and its relationship with index
def create_or_load_vocabulary(data_path, predict_path, training_data_path, vocab_size, name_scope='cnn', test_mode=False):
"""
create vocabulary
:param training_data_path:
:param vocab_size:
:param name_scope:
:return:
"""
cache_vocabulary_label_pik='cache'+"_"+name_scope # path to save cache
if not os.path.isdir(cache_vocabulary_label_pik): # create folder if not exists.
os.makedirs(cache_vocabulary_label_pik)
#0.if cache exists. load it; otherwise create it.
cache_path =cache_vocabulary_label_pik+"/"+'vocab_label_shiwan_high.pik'
print("cache_path:",cache_path,"file_exists:",os.path.exists(cache_path))
if os.path.exists(cache_path):
with open(cache_path, 'rb') as data_f:
print("going to load cache file.vocab of words and labels")
return pickle.load(data_f)
else:
vocab_word2index = {}
vocab_word2index[_PAD] = PAD_ID
vocab_word2index[_UNK] = UNK_ID
accusation_label2index = {}
#1.load raw data
file_object = codecs.open(training_data_path, mode='r', encoding='utf-8')
lines = file_object.readlines()
random.shuffle(lines)
if test_mode:
lines=lines[0:10000]
#2.loop each line,put to counter
c_inputs = Counter()
c_accusation_labels = Counter()
for i,line in enumerate(lines):
if i % 10000 == 0:
print(i)
json_string = json.loads(line.strip())
facts = json_string['fact']
input_list = token_string_as_list(facts)
c_inputs.update(input_list)
accusation_list = json_string['meta']['accusation']
c_accusation_labels.update(accusation_list)
#3.get most frequency words
vocab_list = c_inputs.most_common(vocab_size)
word_vocab_file = predict_path+"/"+'word_freq_shiwan_3w_high.txt'
if os.path.exists(word_vocab_file):
print("word vocab file exists.going to delete it.")
os.remove(word_vocab_file)
word_freq_file = codecs.open(word_vocab_file,mode='a',encoding='utf-8')
for i, tuplee in enumerate(vocab_list):
word,freq = tuplee
word_freq_file.write(word+":"+str(freq)+"\n")
vocab_word2index[word] = i+2
#4.1 accusation and its frequency.
accusation_freq_file = codecs.open(cache_vocabulary_label_pik+"/"+'accusation_freq_shiwan_3w_high.txt',mode='a',encoding='utf-8')
accusation_label_list = c_accusation_labels.most_common()
for i, tuplee in enumerate(accusation_label_list):
label,freq = tuplee
accusation_freq_file.write(label+":"+str(freq)+"\n")
#4.2 accusation dict, code the accusation with number
accusation_voc_file = data_path+"/accu.txt"
accusation_voc_object = codecs.open(accusation_voc_file,mode='r',encoding='utf-8')
accusation_voc_lines = accusation_voc_object.readlines()
for i, accusation_name in enumerate(accusation_voc_lines):
accusation_name=accusation_name.strip()
accusation_label2index[accusation_name] = i
#6.save to file system if vocabulary of words not exists.
if not os.path.exists(cache_path):
with open(cache_path, 'ab') as data_f:
print("going to save cache file of vocab of words and labels")
pickle.dump((vocab_word2index, accusation_label2index), data_f, protocol=4)
#7.close resources
word_freq_file.close()
accusation_freq_file.close()
print("create_vocabulary.ended")
return vocab_word2index, accusation_label2index
def token_string_as_list(string,tokenize_style='word'):
#string=string.decode("utf-8")
string = replace_money_value(string) #TODO add normalize number ADD 2018.06.11
length = len(string)
if tokenize_style == 'char':
listt = [string[i] for i in range(length)]
elif tokenize_style == 'word':
listt = jieba.lcut(string)
listt = [x for x in listt if x.strip()]
return listt
def get_part_validation_data(valid, num_valid=6000):
valid_X, valid_Y_accusation = valid
number_examples = len(valid_X)
permutation = np.random.permutation(number_examples)[0:num_valid]
valid_X2, valid_Y_accusation2 = [], []
for index in permutation:
valid_X2.append(valid_X[index])
valid_Y_accusation2.append(valid_Y_accusation[index])
return valid_X2, valid_Y_accusation2
def load_accusation_freq_dict(accusation_label2index, name_scope):
cache_vocabulary_label_pik = 'cache'+"_"+name_scope # path to save cache
#load dict of accusations
accusation_freq_file = codecs.open(cache_vocabulary_label_pik + "/" + 'accusation_freq_shiwan_3w_high.txt', mode='r',encoding='utf-8')
accusation_freq_lines = accusation_freq_file.readlines()
accusation_freq_dict = {}
for i, line in enumerate(accusation_freq_lines):
acc_label, freq = line.strip().split(splitter) #编造、故意传播虚假恐怖信息:122
accusation_freq_dict[accusation_label2index[acc_label]] = int(freq)
return accusation_freq_dict
import re
def replace_money_value(string):
#print("string:")
#print(string)
moeny_list = [1,2,5,7,10, 20, 30,50, 100, 200, 500, 800,1000, 2000, 5000,7000, 10000, 20000, 50000, 80000,100000,200000, 500000, 1000000,3000000,5000000,1000000000]
double_patten = r'\d+\.\d+'
int_patten = r'[\u4e00-\u9fa5,,.。;;]\d+[元块万千百十余,,。.;;]'
doubles=re.findall(double_patten,string)
ints=re.findall(int_patten,string)
ints=[a[1:-1] for a in ints]
#print(doubles+ints)
sub_value=0
for value in (doubles+ints):
for money in moeny_list:
if money >= float(value):
sub_value=money
break
string=re.sub(str(value),str(sub_value),string)
return string
|
201520815029009/Text-classification-augmented-with-label-definitions
|
cnn_classification/data_util.py
|
data_util.py
|
py
| 11,736 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38705340896
|
## Script (Python) "getPautasPanoramaIpea"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=Retorna a lista de pautas do Panorama IPEA
pautas = context.portal_catalog.searchResults(portal_type='Pauta', \
getMidias='ipea', \
sort_on='getData', sort_order='reverse')
return pautas
|
lflrocha/ebc.pauta
|
ebc/pauta/skins/ebc_pauta_custom_templates/getPautasPanoramaIpea.py
|
getPautasPanoramaIpea.py
|
py
| 468 |
python
|
fi
|
code
| 0 |
github-code
|
6
|
30366634041
|
""" Simple polygon plot.
The UI allows you to change some of the attributes of the plot.
"""
import numpy as np
from traits.api import HasTraits, Instance, Range
from traitsui.api import View, UItem, Item, Group, HGroup, VGroup, spring
from chaco.api import Plot, ArrayPlotData, PolygonPlot
from enable.api import ComponentEditor, LineStyle
class PolygonPlotDemo(HasTraits):
# The main plot container.
plot = Instance(Plot)
# Data holder for `plot`.
apd = Instance(ArrayPlotData)
# The polygon plot renderer.
polygon_plot = Instance(PolygonPlot)
# Assorted styles that will be set on `polygon_plot`.
edge_style = LineStyle
edge_width = Range(value=1, low=0, high=8)
edge_alpha = Range(value=1.0, low=0.0, high=1.0)
face_alpha = Range(value=0.4, low=0.0, high=1.0)
alpha = Range(value=1.0, low=0.0, high=1.0)
traits_view = View(
VGroup(
Group(
UItem("plot", editor=ComponentEditor(), style="custom"),
),
VGroup(
HGroup(
Item("edge_style"),
spring,
),
Item("edge_width"),
Item("edge_alpha"),
Item("face_alpha"),
Item("alpha"),
),
),
resizable=True,
)
# ----------------------------------------------------------------------
# Default values
# ----------------------------------------------------------------------
def _apd_default(self):
# Create the data to plot.
px = np.array([0.5, 1.0, 2.0, 2.5, 2.0, 1.5, 0.5, 0.0])
py = np.array([0.0, 0.8, 0.5, 3.0, 3.5, 2.0, 3.0, 0.5])
# Create the ArrayPlotData container used by the Plot.
apd = ArrayPlotData(px=px, py=py)
return apd
def _plot_default(self):
plot = Plot(self.apd, title="PolygonPlot Demo")
return plot
def _polygon_plot_default(self):
p = self.plot.plot(
("px", "py"),
type="polygon",
face_color=(0, 0.8, 1) + (self.face_alpha,),
edge_color=(0, 0, 0) + (self.edge_alpha,),
edge_style=self.edge_style,
alpha=self.alpha,
)
return p[0]
# ----------------------------------------------------------------------
# Trait change handlers
# ----------------------------------------------------------------------
def _edge_style_changed(self):
self.polygon_plot.edge_style = self.edge_style
def _edge_width_changed(self):
self.polygon_plot.edge_width = self.edge_width
def _edge_alpha_changed(self):
self.polygon_plot.edge_color = self.polygon_plot.edge_color[:3] + (
self.edge_alpha,
)
def _face_alpha_changed(self):
self.polygon_plot.face_color = self.polygon_plot.face_color[:3] + (
self.face_alpha,
)
def _alpha_changed(self):
self.polygon_plot.alpha = self.alpha
demo = PolygonPlotDemo()
# Hack to force initial rendering of the plot.
demo.face_alpha = 0.5
if __name__ == "__main__":
demo.configure_traits()
|
enthought/chaco
|
chaco/examples/demo/basic/polygon_plot_demo.py
|
polygon_plot_demo.py
|
py
| 3,167 |
python
|
en
|
code
| 286 |
github-code
|
6
|
38760601495
|
import sys
import netCDF4
import math
import itertools
from functools import reduce
def array_pieces(ndarray, max_bytes=None, overlap=0):
'''
Generator to return a series of numpy arrays less than max_bytes in size and the offset within the complete data from a NetCDF variable
Parameters:
ndarray: Numpy array or NetCDF array variable
overlap: number of pixels to add to each edge
max_bytes: Maximum number of bytes to retrieve. Defaults to 500,000,000 for NCI's OPeNDAP
Yields:
piece_array: array subset less than max_bytes in size
array_offset: start indices of subset in whole array
'''
max_bytes = max_bytes or 500000000 # Defaults to 500MB for NCI's OPeNDAP
array_shape = ndarray.shape
array_dimensions = len(array_shape)
# Determine overall array size in bytes
array_bytes = ndarray.dtype.itemsize * \
reduce(lambda x, y: x * y, array_shape)
if array_bytes > max_bytes: # Multiple pieces required
# Determine number of divisions in each axis required to keep pieces
# under max_bytes in size
axis_divisions = int(math.ceil(
math.pow(math.ceil(array_bytes / float(max_bytes)), 1.0 / array_dimensions)))
# Determine chunk size for pieces or default to natural divisions if no
# chunking set
try:
chunking = ndarray.chunking() or (1, 1)
except: # Numpy arrays don't have chunking
chunking = (1, 1)
# Disregard chunking if it's too big to be useful
chunking = [chunking[index] if chunking[index] < array_shape[index] // axis_divisions else 1
for index in range(array_dimensions)]
# Determine piece shape rounded down to chunk sizes
piece_shape = [array_shape[index] // axis_divisions // chunking[index]
* chunking[index] for index in range(array_dimensions)]
# Determine total number of pieces in each axis
axis_pieces = [int(math.ceil(float(array_shape[index]) // piece_shape[index]))
for index in range(array_dimensions)]
# Iterate over every piece of array
for piece_indices in itertools.product(*[range(axis_pieces[dimension_index])
for dimension_index in range(array_dimensions)]):
# Compute base start indices with no overlap
start_indices = [piece_indices[dimension_index] * piece_shape[dimension_index]
for dimension_index in range(array_dimensions)]
# Compute end indices plus overlap from start indices
end_indices = [min(start_indices[dimension_index] + piece_shape[dimension_index] + overlap,
array_shape[dimension_index])
for dimension_index in range(array_dimensions)]
# Subtract overlap from base start indices
start_indices = [max(0, start_indices[dimension_index] - overlap)
for dimension_index in range(array_dimensions)]
array_slices = [slice(start_indices[dimension_index],
end_indices[dimension_index])
for dimension_index in range(array_dimensions)]
piece_array = ndarray[array_slices]
yield piece_array, tuple(start_indices)
else: # Only one piece required
yield ndarray[...], (0, 0)
def main():
'''
Main function for testing
'''
netcdf_path = sys.argv[1]
netcdf_dataset = netCDF4.Dataset(netcdf_path)
# Find variable with "grid_mapping" attribute - assumed to be 2D data
# variable
try:
data_variable = [variable for variable in netcdf_dataset.variables.values(
) if hasattr(variable, 'grid_mapping')][0]
except:
raise Exception(
'Unable to determine data variable (must have "grid_mapping" attribute')
piece_count = 0
for piece_array, array_offset in array_pieces(data_variable, overlap=0):
piece_count += 1
piece_bytes = data_variable.dtype.itemsize * \
reduce(lambda x, y: x * y, piece_array.shape)
print('piece_array.shape = %s, array_offset = %s, piece_bytes = %d'.format(piece_array.shape, array_offset, piece_bytes))
print('piece_count = %s'.format(piece_count))
if __name__ == '__main__':
main()
|
GeoscienceAustralia/geophys_utils
|
geophys_utils/_array_pieces.py
|
_array_pieces.py
|
py
| 4,493 |
python
|
en
|
code
| 22 |
github-code
|
6
|
75234645628
|
from odoo import models, fields, api, exceptions, _
from odoo.exceptions import Warning, ValidationError
import datetime
from dateutil.relativedelta import relativedelta
class ExtraContractInherit(models.Model):
_inherit = 'hr.contract'
date_of_birth = fields.Date(string='تاريخ ميلاد الموظف', compute='cal_contract_birth_from_emp', store=True)
# current_emp_age = fields.Integer(string='عمر الموظف', compute='get_age_for_alloc_by_birth')
current_emp_age = fields.Integer(string='عمر الموظف', )
now_date = fields.Date(default=fields.Date.today())
form_registration_date = fields.Date(string='تاريخ تسجيل الاستمارة')
form_six_date = fields.Date(string='تاريخ استمارة 6')
social_insurances = fields.Selection([
('insured', "مؤمن عليه"),
('not_insured', "غير مؤمن عليه"),
], string='التأمينات الاجتماعية', default='not_insured', related='employee_id.social_insurances', readonly=False)
non_insurance_reason = fields.Char(string='سبب عدم التأمين')
insurance_number = fields.Char(string='الرقم التأميني', related='employee_id.insurance_number', readonly=False)
insurances_calculation = fields.Selection([
('insurance_salary', "الراتب التأميني"),
('modified_salary', "راتب معدل"),
], string='طريقة احتساب التأمينات', default='insurance_salary')
register_method = fields.Selection([
('token', "Token"),
('office', "Office"),
], string='طريقة التسجيل', default='token', related='employee_id.register_method', readonly=False)
insurance_status = fields.Selection([
('open', "Open"),
('paid', "Paid"),
], string='حالة التأمين', default='open')
modified_salary = fields.Float(string='الراتب المعدل')
company_percentage = fields.Float(string='نسبة الشركة', readonly=True, compute='calc_emp_co_percentage')
employee_percentage = fields.Float(string='نسبة الموظف', readonly=True, compute='calc_emp_co_percentage')
over_age = fields.Float(string='عمر فوق السن', compute='calc_emp_co_percentage')
insurance_date_start = fields.Date('تاريخ بداية احتساب التأمينات', default=fields.Date.today, copy=True)
total_insurance = fields.Float(string='Total Insurance', )
total_insurance_company = fields.Float()
# total_insurance = fields.Float(string='Total Insurance', compute='cal_total_insurance')
# total_insurance_company = fields.Float(compute='cal_total_insurance')
insurance_table = fields.One2many('insurance.monthly', 'inv_history')
struct_id = fields.Many2one('hr.payroll.structure', string='Salary Structure', compute='cal_all_struct')
work_overtime = fields.Float()
bounce = fields.Float()
annual_raise = fields.Float()
retroactive_raise = fields.Float()
total_salary = fields.Float(compute='calculate_basic_salary', store=True, readonly=False)
@api.depends('wage', 'work_overtime', 'bounce', 'annual_raise', 'retroactive_raise')
def calculate_basic_salary(self):
for rec in self:
rec.total_salary = rec.wage + rec.work_overtime + rec.annual_raise + rec.bounce + rec.retroactive_raise
@api.depends('social_insurances')
def cal_all_struct(self):
for rec in self:
if rec.social_insurances == 'insured':
asd = self.env['hr.payroll.structure'].search([('is_insured', '=', True)], limit=1)
if asd:
rec.struct_id = asd.id
else:
rec.struct_id = False
elif rec.social_insurances == 'not_insured':
asd = self.env['hr.payroll.structure'].search([('not_insured', '=', True)], limit=1)
if asd:
rec.struct_id = asd.id
else:
rec.struct_id = False
else:
rec.struct_id = False
@api.depends('employee_id.birthday')
def cal_contract_birth_from_emp(self):
for rec in self:
if rec.employee_id:
print('heloo emp')
if rec.employee_id.birthday:
print('hello birth')
print(rec.date_of_birth)
rec.date_of_birth = rec.employee_id.birthday
print(rec.date_of_birth)
else:
print('no birth')
else:
print('no emp')
@api.onchange('social_insurances', 'wage')
def check_insuurance_range(self):
asd = self.env['emp.insurance'].search([('active', '=', True)])
for line in self:
if line.social_insurances == 'insured':
if line.wage:
if (line.wage < asd.min_insurance_salary):
raise ValidationError('Wage of this employee out of insurance range')
# (line.wage > asd.max_insurance_salary) or
@api.depends('wage', 'modified_salary', 'insurances_calculation', 'over_age')
def calc_emp_co_percentage(self):
asd = self.env['emp.insurance'].search([('active', '=', True)])
if asd:
for rec in self:
rec.over_age = asd.over_age
if rec.current_emp_age <= rec.over_age:
if rec.insurances_calculation == 'insurance_salary':
rec.company_percentage = (asd.company_percentage / 100) * rec.wage
rec.employee_percentage = (asd.employee_percentage / 100) * rec.wage
elif rec.insurances_calculation == 'modified_salary':
rec.company_percentage = (asd.company_percentage / 100) * rec.modified_salary
rec.employee_percentage = (asd.employee_percentage / 100) * rec.modified_salary
else:
if asd.is_over_age == True:
if rec.insurances_calculation == 'insurance_salary':
rec.company_percentage = (asd.over_age_company_percentage / 100) * rec.wage
rec.employee_percentage = (asd.over_age_employee_percentage / 100) * rec.wage
elif rec.insurances_calculation == 'modified_salary':
rec.company_percentage = (asd.over_age_company_percentage / 100) * rec.modified_salary
rec.employee_percentage = (asd.over_age_employee_percentage / 100) * rec.modified_salary
else:
raise ValidationError(
'there is no insurance configuration for over age employees please configur it and try again')
else:
raise ValidationError('there is no insurance configuration for employees please configur it and try again')
@api.depends("date_of_birth", "now_date")
def get_age_for_alloc_by_birth(self):
for rec in self:
if rec.now_date and rec.date_of_birth:
fmt = '%Y-%m-%d'
d1 = datetime.datetime.strptime(str(rec.now_date).strip(' \t\r\n').split(".")[0], fmt)
d2 = datetime.datetime.strptime(str(rec.date_of_birth).strip(' \t\r\n').split(".")[0], fmt)
years_between_dates = str((d1 - d2).days / 365)
rec.current_emp_age = int(float(years_between_dates))
print(years_between_dates)
@api.depends('insurance_table')
def cal_total_insurance(self):
for line in self:
if line.insurance_table:
for rec in line.insurance_table:
line.total_insurance += rec.emp_amount
line.total_insurance_company += rec.company_amount
@api.onchange('name', 'employee_id')
def cal_name_from_emp_number(self):
for rec in self:
if rec.employee_id and rec.employee_id.hiring_date:
rec.name = str(rec.employee_id.internal_number)
rec.date_start = rec.employee_id.hiring_date
rec.date_end = rec.date_start + relativedelta(years=1)
rec.trial_date_end = rec.date_start + relativedelta(months=3)
@api.onchange('name', 'state', 'form_registration_date', 'insurance_number', 'wage', 'company_percentage',
'employee_percentage', 'insurance_status',
'social_insurances', 'register_method')
def move_employee_fields(self):
for rec in self:
if rec.state == 'open':
check_emp = self.env['hr.employee'].search([('id', '=', rec.employee_id.id)])
if check_emp:
check_emp.write(
{
'contract_end_date': rec.date_end,
'form_registration_date': rec.form_registration_date,
'social_insurances': rec.social_insurances,
'insurance_number': rec.insurance_number,
'register_method': rec.register_method,
'insurance_status': rec.insurance_status,
'company_percentage': rec.company_percentage,
'employee_percentage': rec.employee_percentage,
})
class InsuranceMonthlyRecords(models.Model):
_name = 'insurance.monthly'
date = fields.Date('Date')
emp_amount = fields.Float('Employee Percentage')
company_amount = fields.Float('Company Percentage')
inv_history = fields.Many2one('hr.contract')
class HREmployee(models.Model):
_inherit = 'hr.employee.public'
hiring_date = fields.Date(string='Hiring Date', store=True, copy=True)
hiring_date = fields.Date(string='Hiring Date', store=True, copy=True)
internal_number = fields.Char(string="Tawzef Number")
employee_number = fields.Char(string="Client Number", store=True)
contract_end_date = fields.Date('Contract End Date')
medic_exam = fields.Char()
form_registration_date = fields.Date(string='تاريخ تسجيل الاستمارة', )
social_insurances = fields.Selection([
('insured', "مؤمن عليه"),
('not_insured', "غير مؤمن عليه"),
], string='التأمينات الاجتماعية', default='not_insured')
insurance_number = fields.Char(string='الرقم التأميني')
register_method = fields.Selection([
('token', "Token"),
('office', "Office"),
], string='طريقة التسجيل', default='token')
insurance_status = fields.Selection([
('open', "Open"),
('paid', "Paid"),
], string='حالة التأمين', default='open')
company_percentage = fields.Float(string='نسبة الشركة', readonly=True)
employee_percentage = fields.Float(string='نسبة الموظف', readonly=True)
company_period = fields.Float(string='نسبة الشركة خلال الفترة', readonly=True, store=True)
employee_period = fields.Float(string='نسبة الموظف خلال الفترة', readonly=True, store=True)
working_schedule = fields.Many2one('work.schedule')
service_id = fields.Many2one('product.product', domain="[('type','=','service')]", string="Current Service",
tracking=True)
branch_id = fields.Many2one('res.branch')
class HREmployeeContractInsurance(models.Model):
_inherit = 'hr.employee'
hiring_date = fields.Date(string='Hiring Date', store=True, copy=True)
internal_number = fields.Char(string="Tawzef Number")
employee_number = fields.Char(string="Client Number", store=True)
contract_end_date = fields.Date('Contract End Date')
medic_exam = fields.Char()
form_registration_date = fields.Date(string='تاريخ تسجيل الاستمارة', )
social_insurances = fields.Selection([
('insured', "مؤمن عليه"),
('not_insured', "غير مؤمن عليه"),
], string='التأمينات الاجتماعية', default='not_insured')
insurance_number = fields.Char(string='الرقم التأميني')
register_method = fields.Selection([
('token', "Token"),
('office', "Office"),
], string='طريقة التسجيل', default='token')
insurance_status = fields.Selection([
('open', "Open"),
('paid', "Paid"),
], string='حالة التأمين', default='open')
company_percentage = fields.Float(string='نسبة الشركة', readonly=True)
employee_percentage = fields.Float(string='نسبة الموظف', readonly=True)
company_period = fields.Float(string='نسبة الشركة خلال الفترة', readonly=True, store=True)
employee_period = fields.Float(string='نسبة الموظف خلال الفترة', readonly=True, store=True)
@api.onchange('name', 'insurance_number', 'social_insurances', 'register_method')
def cal_emp_insurance_data_to_contract(self):
for rec in self:
print('hello everybody')
print(self._origin.id)
contr = self.env['hr.contract'].search([('state', '=', 'open'), ('employee_id', '=', self._origin.id)])
if contr:
contr.write(
{
'social_insurances': rec.social_insurances,
'insurance_number': rec.insurance_number,
'register_method': rec.register_method,
'name': rec.internal_number,
})
# print('yes')
# for line in contr:
# line.insurance_number = rec.insurance_number
# line.social_insurances = rec.social_insurances
# line.register_method = rec.register_method
# print(line.social_insurances,rec.social_insurances)
# print(line.insurance_number, rec.insurance_number)
# print(line.register_method, rec.register_method)
else:
print('nooo')
class HRPayrollContractInsurance(models.Model):
_inherit = 'hr.payroll.structure'
is_insured = fields.Boolean('مؤمن عليه')
not_insured = fields.Boolean('غيرمؤمن عليه')
|
emadraafatgad/visoneer
|
hr_insurance/models/employee_contract.py
|
employee_contract.py
|
py
| 14,406 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6836039340
|
from fastapi import APIRouter, Depends, Response
from queries.games import GamesQueries
from typing import Union
router = APIRouter()
@router.get("/api/games/{game_id}")
def get_game(
game_id: int,
response: Response,
queries: GamesQueries = Depends(),
):
data = queries.get_game_by_id(game_id)
if data is None:
response.status_code = 404
else:
return data
@router.get("/api/games/{game_id}/screenshots")
def get_game(
game_id: int,
response: Response,
queries: GamesQueries = Depends(),
):
data = queries.get_screenshots_by_id(game_id)
if data is None:
response.status_code = 404
else:
return data
@router.get("/api/games")
def get_games(
response: Response,
queries: GamesQueries = Depends(),
search: Union[str, None] = None,
):
if search is not None:
data = queries.get_games_by_search(search)
if data is None:
response.status_code = 404
else:
return data
else:
data = queries.get_all_games()
if data is None:
response.status_code = 404
else:
return data
@router.get("/api/genres")
def get_genres(
response: Response,
queries: GamesQueries = Depends(),
):
data = queries.get_all_genres()
if data is None:
response.status_code = 404
else:
return data
@router.get("/api/games/genres/{genre_id}")
def get_games_by_genre(
genre_id: int,
response: Response,
queries: GamesQueries = Depends(),
):
data = queries.get_games_by_genre(genre_id)
if data is None:
response.status_code = 404
else:
return data
|
tinatran079/netstix
|
games/routers/games.py
|
games.py
|
py
| 1,682 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9136295998
|
#1.创建一个文件夹
import os
from multiprocessing import Pool,Manager
def copyFileTask(name,oldFolderName,newFolderName,queue):
fr=open(oldFolderName+'/'+name)
fw=open(newFolderName+'/'+name,'w')
content=fr.read()
fw.write(content)
fr.close()
fw.close()
queue.put(name)
def main():
# 0.获取用户要输入的文件夹名字
oldFolderName = input("请输入文件夹的名字:")
newFloderName = oldFolderName + "-复件"
os.mkdir(newFloderName)
# 2.获取旧文件夹中的所有文件的名字
fileName = os.listdir(oldFolderName)
# 3.使用多进程copy文件
pool = Pool(5)
queue=Manager().Queue()
for name in fileName:
pool.apply_async(copyFileTask, args=(name,oldFolderName,newFloderName,queue))
num=0
allnum=len(fileName)
while num!=allnum:
queue.get()
num += 1
copyRate=num / allnum
print('\r copy的进度是:%.2f%%'%(copyRate*100),end='')
print("\n 已完成copy")
if __name__=="__main__":
main()
|
pgg-pgg/pythonTest
|
19-多进程文件copy.py
|
19-多进程文件copy.py
|
py
| 1,051 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70097864829
|
import pygame as pg
import sys
from pygame.sprite import Group
from constants import *
from settings import Settings
from ship import Ship
import functions as funcs
from stats import Stats
from button import Button
from score import Score
from sound import Sound
def main():
sound = Sound()
pg.init()
clock = pg.time.Clock()
game_settings = Settings() # create an object from a class
game_stats = Stats(game_settings)
screen = pg.display.set_mode((game_settings.screen_width, game_settings.screen_height))
pg.display.set_caption("Alien Invasion")
spaceShip = Ship(screen, game_settings)
bullets = Group()
aliens = Group()
funcs.create_fleet(screen, game_settings, aliens, spaceShip)
play_btn = Button(game_settings, screen, "Play!")
score = Score(game_settings, screen, game_stats)
pg.mouse.set_visible(False)
# sound.bgm.play(loops=-1)
# pg.mixer.Channel(0).play(sound.bgm, loops=-1)
font = pg.font.Font(None, 40)
font_img = font.render("WELCOME", True, (50, 50, 50), (200, 200, 200))
# main game loop
while True:
funcs.check_input_events(spaceShip, game_settings, screen, bullets, aliens, game_stats, play_btn, score, sound)
if game_stats.game_state == GAME_STATE_MENU:
screen.fill((100, 100, 100))
screen.blit(font_img, (200, 200))
pg.display.flip()
elif game_stats.game_state == GAME_STATE_PLAY:
if game_stats.game_over == False:
spaceShip.update()
funcs.update_bullets(bullets, aliens, game_settings, screen, spaceShip, game_stats, score, sound)
funcs.update_fleet(game_settings, screen, game_stats, aliens, spaceShip, bullets, score)
funcs.update_screen(screen, game_settings, game_stats, spaceShip, bullets, aliens, play_btn, score)
clock.tick(60)
if __name__ == '__main__':
main()
|
hoangdesu/Alien-Invasion-Pygame
|
main.py
|
main.py
|
py
| 2,009 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37553859431
|
# Cleans peers.txt of duplicates and nodes on same first network octet
# thus improving geographic spread of peers
import re
global netlist #so it can be accessed in multiple places
netlist = []
def clean_tuples(sent_tuples): # sent_tuples is the peer tuple list from local or remote
cleaned = [] # holder for the adjusted peer list
for tuple in sent_tuples:
HOST = tuple[0]
mnet = HOST.split(".")
tnet = mnet[0] # get the first octet - normally the network id (e.g. google cloud uses 104.x.x.x so we need the 104 bit)
match = False # we set false as we are testing for a match
for item in netlist:
if tnet == item: # if the network id is already in netlist then it's a match - we don't want any more this session
match = True
if not match:
netlist.append(tnet) # if we don't have this yet then we want to add it
cleaned.append(tuple) # as it is good we append to the returned tuple
return cleaned # this is sent back to the caller
# example below
with open("peers.txt", "r") as peer_list:
peers = peer_list.read()
raw_tuples = re.findall("'([\d\.]+)', '([\d]+)'", peers)
print(raw_tuples)
print("\n")
peer_tuples = clean_tuples(raw_tuples)
print(peer_tuples)
output = open("peers.txt", 'w')
for x in peer_tuples:
output.write(str(x) + "\n")
output.close()
# the peer_tuples are now clean !! so you can do things with it
|
Stoner19/stuff
|
peer_clean.py
|
peer_clean.py
|
py
| 1,377 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20629891320
|
import argparse
import json
from collections import OrderedDict
import kernel_tuner as kt
import common
# Parse command line arguments
def parse_command_line():
parser = argparse.ArgumentParser(description='Tuning script for add_fluxes_kernel kernel')
parser.add_argument('--tune', default=False, action='store_true')
parser.add_argument('--run', default=False, action='store_true')
parser.add_argument('--best_configuration', default=False, action='store_true')
parser.add_argument('--block_size_x', type=int, default=96)
parser.add_argument('--block_size_y', type=int, default=1)
parser.add_argument('--block_size_z', type=int, default=1)
return parser.parse_args()
# Run one instance of the kernel and test output
def run_and_test(params: OrderedDict):
print(f"Running {kernel_name} [{params['block_size_x']}, {params['block_size_y']}, {params['block_size_z']}]")
result = kt.run_kernel(kernel_name, kernels_src, problem_size, args, params, compiler_options=common.cp)
common.compare_fields(flux_up + radn_up, result[6], "flux_up")
common.compare_fields(flux_dn + radn_dn, result[7], "flux_dn")
common.compare_fields(flux_up_jac + radn_up_jac, result[8], "flux_up_jac")
# Tuning
def tune():
tune_params = OrderedDict()
tune_params["block_size_x"] = [2**i for i in range(0, 11)]
tune_params["block_size_y"] = [2**i for i in range(0, 11)]
tune_params["block_size_z"] = [2**i for i in range(0, 7)]
restrictions = [f"block_size_x <= {ncol}", f"block_size_y <= {nlev}", f"block_size_z <= {ngpt}"]
print(f"Tuning {kernel_name}")
answer = [None for _ in range(0, len(args))]
answer[6] = flux_up + radn_up
answer[7] = flux_dn + radn_dn
answer[8] = flux_up_jac + radn_up_jac
result, env = kt.tune_kernel(kernel_name, kernels_src, problem_size, args, tune_params, answer=answer,
compiler_options=common.cp, verbose=True, restrictions=restrictions)
with open("timings_add_fluxes_kernel.json", "w") as fp:
json.dump(result, fp)
if __name__ == '__main__':
command_line = parse_command_line()
kernels_src = common.dir_name + "../src_kernels_cuda/rte_solver_kernels.cu"
# Input
ncol = common.type_int(512)
nlay = common.type_int(140)
nlev = common.type_int(nlay + 1)
ngpt = common.type_int(224)
flux_size = ncol * nlev * ngpt
radn_up = common.random(flux_size, common.type_float)
radn_dn = common.random(flux_size, common.type_float)
radn_up_jac = common.random(flux_size, common.type_float)
# Output
flux_up = common.random(flux_size, common.type_float)
flux_dn = common.random(flux_size, common.type_float)
flux_up_jac = common.random(flux_size, common.type_float)
kernel_name = f"add_fluxes_kernel<{common.str_float}>"
problem_size = (ncol, nlev, ngpt)
args = [ncol, nlev, ngpt, radn_up, radn_dn, radn_up_jac, flux_up, flux_dn, flux_up_jac]
if command_line.tune:
tune()
elif command_line.run:
parameters = OrderedDict()
if command_line.best_configuration:
best_configuration = common.best_configuration("timings_add_fluxes_kernel.json")
parameters["block_size_x"] = best_configuration["block_size_x"]
parameters["block_size_y"] = best_configuration["block_size_y"]
parameters["block_size_z"] = best_configuration["block_size_z"]
else:
parameters["block_size_x"] = command_line.block_size_x
parameters["block_size_y"] = command_line.block_size_y
parameters["block_size_z"] = command_line.block_size_z
run_and_test(parameters)
|
earth-system-radiation/rte-rrtmgp-cpp
|
tuning_kernels_cuda/add_fluxes_kernel.py
|
add_fluxes_kernel.py
|
py
| 3,673 |
python
|
en
|
code
| 3 |
github-code
|
6
|
30461892433
|
import csv
import base64
import pprint
import mysql.connector
from time import sleep as s
from functions import files, getLinesFromFile, getIPs, nmapScan, toLogFile
#fromLogs
#+---------------+--------------+------+-----+-------------------+-------------------+
#| Field | Type | Null | Key | Default | Extra |
#+---------------+--------------+------+-----+-------------------+-------------------+
#| id | int | NO | PRI | NULL | auto_increment |
#| logFile | varchar(100) | NO | | NULL | |
#| ipAddr | varchar(30) | NO | | NULL | |
#| timeSubmitted | timestamp | NO | | CURRENT_TIMESTAMP | DEFAULT_GENERATED |
#| dateSubmitted | datetime | NO | | CURRENT_TIMESTAMP | DEFAULT_GENERATED |
#+---------------+--------------+------+-----+-------------------+-------------------+
def c1():
file = "/home/sam/pull/test.txt"
with open(file, 'r') as f:
coded = f.readline()
f.close()
temp1 = base64.b64decode(coded)
temp2 = temp1.decode('utf-8')
db = mysql.connector.connect(
host="localhost",
passwd = temp2,
user="localUser1",
database="main",
auth_plugin='mysql_native_password'
)
return db
def getAllIPs():
db = c1()
cursor = db.cursor()
cursor.execute("SELECT ipAddr FROM fromLogs;")
temp1 = cursor.fetchall()
x = 0
finial = []
while(x != len(temp1)):
temp2 = str(temp1[x])
temp2 = temp2.strip("[(',')]")
finial.append(temp2)
x += 1
db.close()
return finial
def getMultiples(list1):
db = c1()
cursor = db.cursor()
size = len(list1)
dups = []
x = 0
while(x != size):
temp1 = str(list1[x])
cursor.execute("SELECT ipAddr FROM fromLogs WHERE ipAddr = (%s);", (temp1,))
t2 = cursor.fetchall()
if(len(t2) != 1):
print("is a dup ", t2)
getFiles(str(t2.pop()))
else:
print("single entry ", t2)
x += 1
def getFiles(ip):
db = c1()
cursor = db.cursor()
ip = ip.strip("[(',')]")
cursor.execute("SELECT logFile FROM fromLogs WHERE ipAddr = (%s);", (ip,))
t2 = cursor.fetchall()
pprint.pprint(t2)
def toCSV(ips):
with open("/home/sam/Documents/ips.csv", 'w') as f:
wr = csv.writer(f,delimiter=',')
wr.writerows(ips)
f.close()
###########################################
# created by Samuel Schatz #
# github: https://github.com/sschatz1997 #
# email: [email protected] #
###########################################
|
sschatz1997/Sams_website
|
py_MySQL/IPcount.py
|
IPcount.py
|
py
| 2,677 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8522443313
|
import requests
from io import BytesIO
import time
from PIL import UnidentifiedImageError
import warnings
class PlateClient:
def __init__(self, url: str):
self.url = url
def readNumber(self, im) -> str:
res = requests.post(
f'{self.url}/readNumber',
headers={'Content-Type': 'application/x-www-form-urlencoded'},
data=im)
return res.json()['name']
def getNumber(self, id) -> str:
res = requests.get(f'{self.url}/getNumber?id={id}')
return res.json()
def getNumbers(self, ids) -> str:
res = requests.get(f'{self.url}/getNumbers?ids={ids}')
return res.json()
if __name__ == '__main__':
client = PlateClient('http://127.0.0.1:8080/')
res = client.getNumbers('10022-9965')
print(res)
# if __name__ == '__main__':
# client = PlateClient('http://127.0.0.1:8080/')
# res = client.getNumber('10022')
# print(res)
# if __name__ == '__main__':
# client = PlateClient('http://127.0.0.1:8080/')
# with open('images/10022.jpg', 'rb') as im:
# res = client.getNumber(im)
# print(res)
|
alexej-anosov/aaa_backend_hw
|
src/plate_client.py
|
plate_client.py
|
py
| 1,142 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29010586279
|
import logging
import dposutils
def _get_url():
"""
"""
if __pillar__.get('app_port'):
url = 'http://localhost:{}'.format(__pillar__.get('app_port'))
else:
return None
return url
def _get_api():
"""
"""
url = _get_url()
if not url:
return None
return dposutils.dposAPI(url)
def enable_forging():
"""
Enable forging on a node based on delegate name
CLI Example::
salt 'dpos-delegate1' salty_dpos_post.enable_forging
"""
if not __pillar__.get('secret'):
return "No secret set in pillar data"
else:
secret = __pillar__.get('secret').strip()
payload = {'secret': secret}
return _get_api().delegates('enable_forging',
payload)
def disable_forging():
"""
Disable forging on a node based on delegate name
CLI Example::
salt 'dpos-delegate1' salty_dpos_post.disable_forging
"""
if not __pillar__.get('secret'):
return "No secret set in pillar data"
else:
secret = __pillar__.get('secret').strip()
payload = {'secret': secret}
return _get_api().delegates('disable_forging',
payload)
|
treverson/salty-dpos
|
salt/_modules/salty_dpos_post.py
|
salty_dpos_post.py
|
py
| 1,262 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20800563712
|
import math
tris = [int(i) for i in input().split()]
def angle(a, b, c): return math.degrees(math.acos((a**2 + b**2 - c**2)/(2.0 * a * b)))
def equal(a,b,c, a1, b1, c1):
aa = angle(a,b,c)
bb = angle(b,a,c)
cc = 180 - aa - bb
aa1 = angle(a1, b1, c1)
bb1 = angle(b1, a1, c1)
cc1 = 180 - aa1 - bb1
return sorted([aa, bb, cc]) == sorted([aa1, bb1, cc1])
print(int(equal(tris[0], tris[1], tris[2], tris[3], tris[4], tris[5])))
|
michbogos/olymp
|
50/17_simmilar_tris.py
|
17_simmilar_tris.py
|
py
| 458 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6358696084
|
#!/usr/bin/env python3
import json
from statistics import median
from datetime import datetime, timedelta
SENT_MESSAGE = "Sent message"
RECEIVED_MESSAGE = "Received message"
TRANSACTION_INIT = "Initialising transaction"
TRANSACTION_COMMIT = "Delivered transaction"
WITNESS_SET_SELECTED = "Witness set selected"
WITNESS_SET_SELECTION = "Witness set selection"
SIMULATION_STARTED = "Simulation started"
RELIABLE_ACCOUNTABILITY = "reliable_accountability"
CONSISTENT_ACCOUNTABILITY = "consistent_accountability"
LOG_PREFIXES = {
SENT_MESSAGE,
RECEIVED_MESSAGE,
TRANSACTION_INIT,
TRANSACTION_COMMIT,
WITNESS_SET_SELECTED,
WITNESS_SET_SELECTION,
SIMULATION_STARTED
}
class TransactionInitInfo:
def __init__(self, process_id, init_timestamp):
self.process_id = process_id
self.init_timestamp = init_timestamp
class TransactionCommitInfo:
def __init__(self, process_id, received_messages_cnt, commit_timestamp):
self.process_id = process_id
self.received_messages_cnt = received_messages_cnt
self.commit_timestamp = commit_timestamp
def drop_date(line):
start = 0
while start < len(line):
if line[start].isalpha():
break
start += 1
return line[start::]
def parse_data_from_logged_line(line):
return list(map(
lambda elem: elem.split(': ')[1],
line.split(', ')
))
def get_log_line_prefix(line):
prefix = ""
for log_prefix in LOG_PREFIXES:
if line.startswith(log_prefix):
prefix = log_prefix
break
return prefix
def parse_data_from_files(directory, n):
sent_messages = {}
received_messages = {}
transaction_inits = {}
transaction_commit_infos = {}
transaction_histories = {}
transaction_witness_sets = {
"own": {},
"pot": {}
}
simulation_start = None
simulation_end = None
for process_id in range(n):
f = open(f"{directory}/process{process_id}.txt", "r")
for line in f:
line = drop_date(line.strip(" \n"))
prefix = get_log_line_prefix(line)
if prefix == "":
continue
data = parse_data_from_logged_line(line)
timestamp = int(data[-1])
if simulation_end is None or timestamp > simulation_end:
simulation_end = timestamp
if prefix == SIMULATION_STARTED:
if simulation_start is None or timestamp < simulation_start:
simulation_start = timestamp
elif prefix == SENT_MESSAGE:
sent_messages[data[0]] = timestamp
elif prefix == RECEIVED_MESSAGE:
received_messages[data[0]] = timestamp
elif prefix == TRANSACTION_INIT:
transaction_inits[data[0]] = \
TransactionInitInfo(process_id=process_id, init_timestamp=timestamp)
elif prefix == TRANSACTION_COMMIT:
transaction = data[0]
received_messages_cnt = int(data[2])
if transaction_commit_infos.get(transaction) is None:
transaction_commit_infos[transaction] = []
transaction_commit_infos[transaction].append(
TransactionCommitInfo(
process_id=process_id,
received_messages_cnt=received_messages_cnt,
commit_timestamp=timestamp)
)
elif prefix == WITNESS_SET_SELECTION:
transaction = data[0]
assert data[2][0] == '[' and data[2][-1] == ']'
history_str = data[2][1:-1]
history = set()
if len(history_str) != 0:
history = set(history_str.split(' '))
if transaction_histories.get(transaction) is None:
transaction_histories[transaction] = []
transaction_histories[transaction].append(history)
elif prefix == WITNESS_SET_SELECTED:
ws_type = data[0]
transaction = data[1]
assert data[2][0] == '[' and data[2][-1] == ']'
pids_str = data[2][1:-1]
pids = set()
if len(pids_str) != 0:
pids = set(pids_str.split(' '))
if transaction_witness_sets[ws_type].get(transaction) is None:
transaction_witness_sets[ws_type][transaction] = []
transaction_witness_sets[ws_type][transaction].append(pids)
return {
"sent_messages": sent_messages,
"received_messages": received_messages,
"transaction_inits": transaction_inits,
"transaction_commit_infos": transaction_commit_infos,
"transaction_histories": transaction_histories,
"transaction_witness_sets": transaction_witness_sets,
"simulation_start": simulation_start,
"simulation_end": simulation_end
}
def calc_message_latencies(sent_messages, received_messages):
sum_latency = 0
message_cnt = 0
for message, send_timestamp in sent_messages.items():
receive_timestamp = received_messages.get(message)
if receive_timestamp is None:
continue
latency = receive_timestamp - send_timestamp
sum_latency += latency
message_cnt += 1
if message_cnt == 0:
return 0
return sum_latency / message_cnt
def calc_transaction_stat(n, transaction_inits, transaction_commit_infos, simulation_start, simulation_end):
sum_latency = 0
sum_messages_exchanged = 0
transaction_cnt = 0
latencies = []
throughput_distribution = {}
for transaction, init_info in transaction_inits.items():
commit_infos = transaction_commit_infos.get(transaction)
if commit_infos is None:
# print(f"Transaction {transaction} was not committed")
continue
# if len(commit_infos) != n:
# committed_pids = set(map(lambda commit_info: commit_info.process_id, commit_infos))
# not_committed_pids = set(range(n)).difference(committed_pids)
# print(f"Transaction {transaction} wasn't committed by processes {not_committed_pids}")
commit_timestamp = None
messages_exchanged = 0
for commit_info in commit_infos:
if commit_info.process_id == init_info.process_id:
commit_timestamp = commit_info.commit_timestamp
messages_exchanged += commit_info.received_messages_cnt
if commit_timestamp is None:
# print(f"Transaction {transaction} wasn't committed by source")
continue
commit_date_time = datetime.fromtimestamp(commit_timestamp // 1e9)
throughput_distribution[commit_date_time] = \
throughput_distribution.get(commit_date_time, 0) + 1
latency = commit_timestamp - init_info.init_timestamp
latencies.append(latency)
sum_latency += latency
sum_messages_exchanged += messages_exchanged
transaction_cnt += 1
first_commit = datetime.max
last_commit = datetime.min
for commit_date_time, _ in throughput_distribution.items():
if commit_date_time < first_commit:
first_commit = commit_date_time
if commit_date_time > last_commit:
last_commit = commit_date_time
while first_commit < last_commit:
throughput_distribution[first_commit] = throughput_distribution.get(first_commit, 0)
first_commit = first_commit + timedelta(seconds=1)
avg_latency = 0
avg_messages_exchanged = 0
median_latency = -1
if transaction_cnt > 0:
avg_latency = sum_latency / transaction_cnt
median_latency = median(latencies)
avg_messages_exchanged = int(sum_messages_exchanged / transaction_cnt)
throughput = transaction_cnt * 1e9 / (simulation_end - simulation_start)
return avg_latency, median_latency, avg_messages_exchanged, throughput, \
median(list(throughput_distribution.values())), transaction_cnt
def get_distance_metrics(sets):
max_diff = 0
for i in range(len(sets)):
for j in range(i + 1, len(sets)):
intersection_size = len(sets[i].intersection(sets[j]))
union_size = len(sets[i].union(sets[j]))
max_diff = max(max_diff, union_size - intersection_size)
return max_diff
def get_witness_sets_diff_metrics(transaction_witness_sets, n, ws_type):
metrics = []
for transaction, witness_sets in transaction_witness_sets[ws_type].items():
if len(witness_sets) != n:
continue
metrics.append(get_distance_metrics(witness_sets))
return metrics
def get_histories_diff_metrics(transaction_histories, n):
metrics = []
for transaction, histories in transaction_histories.items():
if len(histories) != n:
continue
metrics.append(get_distance_metrics(histories))
return metrics
def calculate_stat(directory, n):
data = parse_data_from_files(directory, n)
avg_message_latency = \
calc_message_latencies(
sent_messages=data["sent_messages"],
received_messages=data["received_messages"]
)
avg_transaction_latency, median_latency, avg_messages_exchanged, throughput, median_throughput, transaction_cnt = \
calc_transaction_stat(
n=n,
transaction_inits=data["transaction_inits"],
transaction_commit_infos=data["transaction_commit_infos"],
simulation_start=data["simulation_start"],
simulation_end=data["simulation_end"]
)
own_ws_diff_metrics = get_witness_sets_diff_metrics(
transaction_witness_sets=data["transaction_witness_sets"],
n=n,
ws_type="own"
)
pot_ws_diff_metrics = get_witness_sets_diff_metrics(
transaction_witness_sets=data["transaction_witness_sets"],
n=n,
ws_type="pot"
)
histories_diff_metrics = get_histories_diff_metrics(
transaction_histories=data["transaction_histories"],
n=n
)
return {
"avg_message_latency": avg_message_latency / 1e9,
"avg_transaction_latency": avg_transaction_latency / 1e9,
"median_transaction_latency": median_latency / 1e9,
"avg_messages_exchanged": avg_messages_exchanged,
"throughput": throughput,
"median_throughput": median_throughput,
"transaction_cnt": transaction_cnt,
"own_witness_sets_diff_metrics": own_ws_diff_metrics,
"pot_witness_sets_diff_metrics": pot_ws_diff_metrics,
"histories_diff_metrics": histories_diff_metrics
}
if __name__ == "__main__":
input_file = open("input.json")
input_json = json.load(input_file)
protocol = input_json["protocol"]
process_cnt = input_json["parameters"]["n"]
print(f"Protocol: {protocol}, {process_cnt} processes")
print()
stat = calculate_stat(directory="outputs", n=process_cnt)
avg_message_latency = stat["avg_message_latency"]
avg_transaction_latency, median_transaction_latency, avg_messages_exchanged = \
stat["avg_transaction_latency"], stat["median_transaction_latency"], stat["avg_messages_exchanged"]
throughput, median_throughput, transaction_cnt = \
stat["throughput"], stat["median_throughput"], stat["transaction_cnt"]
own_witness_sets_diff_metrics = stat["own_witness_sets_diff_metrics"]
pot_witness_sets_diff_metrics = stat["pot_witness_sets_diff_metrics"]
histories_diff_metrics = stat["histories_diff_metrics"]
print("Message latencies:")
print(f"\tAverage: {avg_message_latency}")
print()
print("Transaction latency statistics:")
print(f"\tAverage: {avg_transaction_latency}")
print(f"\tMedian: {median_transaction_latency}")
print()
print(f"Average number of exchanged messages per one transaction: {avg_messages_exchanged}")
print()
print("Throughput per second:")
print(f"\tAverage: {throughput}")
print(f"\tMedian: {median_throughput}")
print()
print(f"Transactions committed: {transaction_cnt}")
print()
if len(own_witness_sets_diff_metrics) != 0:
print(f"Difference metrics for own witness sets: {own_witness_sets_diff_metrics}")
print()
if len(pot_witness_sets_diff_metrics) != 0:
print(f"Difference metrics for pot witness sets: {pot_witness_sets_diff_metrics}")
print()
if len(histories_diff_metrics) != 0:
print(f"Difference metrics of histories: {histories_diff_metrics}")
print()
|
interestIngc/simulation-analyzer
|
logs_analyzer.py
|
logs_analyzer.py
|
py
| 12,715 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34670400006
|
import json
from app_logic_extractor import app_logic_extractor
from lib.entity import App, Device, Net
from lib.util import convert_to_prolog_symbol, convert_to_prolog_var
from vul_analyzer import vul_analyzer
from vul_scanner import vul_scanner
def translate_vul_exists(prolog_dev_name, cve_id):
"""
Given a device full name and the list of CVE-IDs on that device, translate each CVE ID to
Prolog's `vulExistsV2` predicate
:param prolog_dev_name: a string of the device full name, in Prolog camel format
:param cve_id: a CVE-ID on that device
:return: a converted string of Prolog `vulExistsV2` predicate
"""
return 'vulExistsV2(' + prolog_dev_name + ', \'' + cve_id + '\').\n'
def translate_vul_property(exploit_model_tuple):
"""
Translate the app_logic_tuple returned by vul_analyzer() function to Prolog's `vulPropertyV2` predicate
:param exploit_model_tuple: a tuple of exploit model for a CVE ID: (cve_id, precondition, effect, probability, impact_score)
:return: a converted string of Prolog `vulPropertyV2` predicate
"""
(cve_id, precondition, effect, probability, impact_score) = exploit_model_tuple
return 'vulPropertyV2(\'' + cve_id + '\', ' + precondition + ', ' + effect + ', ' + str(probability) + ', ' + str(impact_score) + ').\n'
def parse_app_config(app_config_file):
"""
Parse `app_config.json` file and return a list of App objects
:param app_config_file: path to `app_config.json` file
:return: a list of App objects
"""
f_app_config = open(app_config_file)
app_json = json.load(f_app_config)
app_list = []
for app in app_json['apps']:
app_name = app['App name']
app_desc = app['description']
app_dev_map = app['device map']
app_list.append(App(app_name, app_desc, app_dev_map))
f_app_config.close()
return app_list
def parse_sys_config(sys_config_file):
"""
Parse `sys_config.json` file and return a list of Device objects
:param sys_config_file: path to `sys_config.json` file
:return: a tuple of (a list of Device objects, a tuple of Network objects)
"""
f_dev_config = open(sys_config_file)
dev_json = json.load(f_dev_config)
dev_list = []
for dev in dev_json['devices']:
dev_name = dev['name']
dev_type = dev['type']
dev_net_list = dev['network']
cur_dev_obj = Device(dev_name, dev_type, dev_net_list)
dev_list.append(cur_dev_obj)
if 'outdoor' in dev.keys():
outdoor = dev['outdoor']
cur_dev_obj.outdoor = outdoor
if 'plug into' in dev.keys():
plug_into = dev['plug into']
cur_dev_obj.plug_into = plug_into
net_list = []
for net in dev_json['networks']:
net_name = net['name']
net_type = net['type']
net_list.append(Net(net_name, net_type))
f_dev_config.close()
return dev_list, net_list
def translate_device_predicates(device_list):
"""
Given the device objects list, generate Prolog facts about device type, device inNetwork, plugInto, outdoor,
vulExistsV2, and vulPropertyV2
:param device_list: a list of Device objects
:return: a string of translated Prolog predicates
"""
res = ''
for device in device_list:
prolog_dev_type = convert_to_prolog_symbol(device.type)
prolog_dev_name = convert_to_prolog_symbol(device.name)
# translate facts about device type declaration
res += prolog_dev_type + '(' + prolog_dev_name + ').\n'
# translate facts about device outdoor declaration
if device.outdoor:
res += 'outdoor(' + prolog_dev_name + ').\n'
# translate facts about device plug into declaration
if device.plug_into:
prolog_outlet = convert_to_prolog_symbol(device.plug_into)
res += 'plugInto(' + prolog_dev_name + ', ' + prolog_outlet + ').\n'
# translate facts about device in network
for net in device.net_list:
prolog_net_name = convert_to_prolog_symbol(net)
res += 'inNetwork(' + prolog_dev_name + ', ' + prolog_net_name + ').\n'
# Translate facts about vulnerability existence and property
# run vul_scanner to get CVEIDs for the given device
cve_list = vul_scanner(device.name)
for cve_id in cve_list:
res += translate_vul_exists(prolog_dev_name, cve_id)
# run vul_analyzer to get the exploit model for that CVE-ID
exploit_model_tuple = vul_analyzer(cve_id, device.type)
res += translate_vul_property(exploit_model_tuple)
res += '\n'
return res
def translate_sys_config(sys_config_file):
"""
Translate IoT system configuration to Prolog facts
:param sys_config_file: path to `sys_config.json` file
:return: a converted string of Prolog rules for the app logic
"""
# Parse sys config JSON file
dev_list, net_list = parse_sys_config(sys_config_file)
# Translate facts about: device type, device inNetwork, plugInto, outdoor, vulExistsV2, and vulPropertyV2
res = translate_device_predicates(dev_list)
# Translate facts about: network type declaration, e.g., `wifi(wifi1).` `zigbee(zigbee1).`
for network in net_list:
res += convert_to_prolog_symbol(network.type) + '(' + convert_to_prolog_symbol(network.name) + ').\n'
return res
def translate_app_logic(app_config_file):
"""
Translate app logic to Prolog rules based on app configuration file and device configuration file
IMPORTANT: An IoT app in proper form always has one action in the [[main clause]],
and the [[conditional clause]] should have NONE or multiple conditions connected by AND.
:param app_config_file: path to `app_config.json` file
:return: a converted string of Prolog rules for the app logic
"""
# Parse app config JSON file
app_list = parse_app_config(app_config_file)
# Translate app logic to Prolog rules
res = ''
for app in app_list:
# convert app description to Python tuple
# app_logic_tuple = app_logic_extractor(app.desc)
app_logic_tuple = ('AND', ['motion sensor', 'door contact sensor'], ['motion', 'open'], 'NONE', ['bulb'], ['on'])
# app_logic_tuple = ('NONE', ['motion sensor'], ['motion'], 'NONE', ['bulb'], ['on'])
if app_logic_tuple is None:
print('error: the input app logic tuple is None\n')
return ''
cond_relation, cond_np_list, cond_vp_list, main_relation, main_np_list, main_vp_list = app_logic_tuple
# Convert the app logic
return translate_app_logic_AND_cond_clause(app.dev_map, cond_np_list, cond_vp_list, main_np_list, main_vp_list)
def translate_app_logic_AND_cond_clause(app_dev_map, cond_np_list, cond_vp_list, main_np_list, main_vp_list):
"""
The cond lists can have one or multiple elements. But if they multiple elements, they must be in logical AND relationship
E.g., app_logic_tuple = ('AND', ['motion sensor', 'door contact sensor'], ['motion', 'open'], 'NONE', ['bulb'], ['on'])
:return: a string of Prolog rules
"""
# Convert main clause
action = main_vp_list[0]
actuator_type = main_np_list[0]
res = action + '(' + convert_to_prolog_symbol(app_dev_map[actuator_type]) + ') :-\n'
# Convert conditional clause
for trigger_dev, trigger_act in zip(cond_np_list, cond_vp_list):
if trigger_dev == 'motion sensor':
if trigger_act == 'motion':
res += '\treportsMotion(' + convert_to_prolog_symbol(app_dev_map[trigger_dev]) + '),\n'
if trigger_dev == 'door contact sensor':
if trigger_act == 'open':
res += '\treportsOpen(' + convert_to_prolog_symbol(app_dev_map[trigger_dev]) + '),\n'
return res[:-2] + '.\n'
def test_translate_vul_exists():
return translate_vul_exists('Nest Cam IQ indoor', 'CVE-2019-5035')
# should return:
# vulExistsV2(nestCamIQIndoor, 'CVE-2019-5035').\n
def test_translate_vul_analyzer():
exploit_tuple = ('CVE-2019-5035', 'network', 'rootPrivilege', 0.55, 10.0)
return translate_vul_property(exploit_tuple)
# should return:
# vulPropertyV2('CVE-2019-5035', network, rootPrivilege, 0.55, 10.0).
def test_translate_app_logic():
# app_logic_tuple = ('AND', ['motion sensor', 'door contact sensor'], ['motion', 'open'], 'NONE', ['light'], ['on'])
app_config_file = 'YOUR_IOTA_ROOT/python/test/app_config.json'
dev_config_file = 'YOUR_IOTA_ROOT/python/test/dev_config.json'
return translate_app_logic(app_config_file)
|
pmlab-ucd/IOTA
|
python/translator.py
|
translator.py
|
py
| 8,669 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70101306109
|
"""Control api connections and information gathering."""
import os
import requests
from constants.endpoints import endpoints
from typing import Tuple, Optional
class Binance:
"""Class to manage connection with Binance and data retrieving and inputting."""
def __init__(self, api_type: str = 'prod', endpoints = endpoints):
self.auth: Tuple[Optional[str], Optional[str]]
self.endpoints = endpoints
self.api_type = api_type
if self.api_type == 'test':
self.main_endpoint = 'https://testnet.binance.vision'
self.auth_dict = {
'key' : os.environ.get('TEST_KEY'),
'skey' : os.environ.get('TEST_SKEY'),
}
elif self.api_type == 'prod':
self.main_endpoint = 'https://api1.binance.com'
self.options_endpoint = 'https://vapi.binance.com'
self.auth_dict = {
'key' : os.environ.get('SPOT_KEY'),
'skey' : os.environ.get('SPOT_SKEY'),
}
# Complete endpoints strings.
for i in self.endpoints:
if i[0:7] == 'options':
self.endpoints[i] = self.options_endpoint + self.endpoints[i]
else:
self.endpoints[i] = self.main_endpoint + self.endpoints[i]
print(self.endpoints)
try:
r = requests.get(endpoints['test'])
print('ping: ' + str(r) + '\nConnection successful')
except:
print('Could not ping Binance API.')
def get_tickers(self, market: str = 'USDT') -> list[str]:
r1 = requests.get(endpoints['exchange_info'], auth=(self.auth_dict['key'], self.auth_dict['skey']))
tickers: list
tickers = []
for i in range(0, len(r1.json()['symbols'])):
if r1.json()['symbols'][1]['status'] == 'TRADING':
if r1.json()['symbols'][i]['quoteAsset'] == market:
tickers.append(r1.json()['symbols'][i]['symbol'])
elif market == None:
tickers.append(r1.json()['symbols'][i]['symbol'])
print(tickers)
return tickers
|
cthadeufaria/redesigned-pas-trading
|
src/utils/api.py
|
api.py
|
py
| 2,165 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26626842826
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from product.models import Product
from satchmo_store.shop import signals
from signals_ahoy.signals import application_search
def search_view(request, template="shop/search.html"):
"""Perform a search based on keywords and categories in the form submission"""
if request.method=="GET":
data = request.GET
else:
data = request.POST
keywords = data.get('keywords', '').split(' ')
category = data.get('category', None)
keywords = filter(None, keywords)
results = {}
# this signal will usually call listeners.default_product_search_listener
application_search.send(Product, request=request,
category=category, keywords=keywords, results=results)
context = RequestContext(request, {
'results': results,
'category' : category,
'keywords' : keywords})
return render_to_response(template, context_instance=context)
|
dokterbob/satchmo
|
satchmo/apps/satchmo_store/shop/views/search.py
|
search.py
|
py
| 1,013 |
python
|
en
|
code
| 30 |
github-code
|
6
|
31609185391
|
def notas(*resp, sit=False):
"""
-> Função para analisar notas e situações de alunos.
:param resp: uma ou mais notas de alunos
:param sit: valor opcional, indicando se deve ou não aparecer a situação de cada aluno
:return: dicionário com várias informações sobre a situação do aluno
"""
dict = {}
dict['Notas informadas'] = len(resp)
dict['Maior nota'] = max(resp)
dict['Menor nota'] = min(resp)
dict['Média'] = sum(resp)/ len(resp)
if sit == True:
if dict['Média'] >= 6:
dict['Situação'] = 'Aprovado'
else:
dict['Situação'] = 'Reprovado'
return dict
resp = notas(3.5, 2, 6.5, 2, 7, 4, sit=True)
print(resp)
|
pedrobarauna8/CursoPython
|
ex105.py
|
ex105.py
|
py
| 720 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
10438441538
|
import cluster
import graph_builder
import pandas as pd
def main():
# The data files should be stored in these given locations.
country_codes_filename = "./data/COW-country-codes.csv"
dyadic_mid_filename = "./data/dyadic_mid_4.01.csv"
graphs_directory = "./graphs"
graph_builder.override_data_location(country_codes_filename, dyadic_mid_filename, graphs_directory)
this_cluster, that_cluster = cluster.find_mid_clusters(1900, 1950, "United States of America", 0)
print(this_cluster)
print(that_cluster)
if __name__ == "__main__":
main()
|
pmacg/mid-clustering
|
main.py
|
main.py
|
py
| 587 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71362243707
|
import networkx as nx
import config.config as config
def graph_kernel_map_to_nodetypes(_graph):
"""
NOT SUPPORTED AFTER GRAPH ENCODING CHANGE.
A pre-processing step to collapse nodes to their model types.
:param _graph:
:return:
"""
graph_relabelled = nx.relabel_nodes(_graph, {node: _graph.nodes[node]['model'].node_acronym for node in _graph.nodes})
all_node_relabels = []
for node_subtypes in config.NODE_TYPES_ALL.values():
for node_subtype in node_subtypes.values():
all_node_relabels.append(node_subtype.node_acronym)
graph_relabelled.add_nodes_from(all_node_relabels)
return graph_relabelled
def similarity_full_ged(g1, g2):
"""
Measures the Graph Edit Distance similarity between two graphs exactly. Can be slow, it is suggested use the
approximate (reduced) method instead
:param _graph1: Graph object
:param _graph2: Graph object
:return: similarity (integer number of steps needed to transform Graph 1 to Graph 2
"""
sim = nx.algorithms.similarity.graph_edit_distance(g1, g2,
edge_subst_cost=edge_match,
node_subst_cost=node_match,
upper_bound=30.0,
timeout=10.0,
)
return sim
def similarity_reduced_ged(g1, g2):
"""
Approximated the Graph Edit Distance similarity between two graphs exactly.
:param _graph1: Graph object
:param _graph2: Graph object
:return: similarity (integer number of steps needed to transform Graph 1 to Graph 2
"""
ged_approx = nx.algorithms.similarity.optimize_graph_edit_distance(g1, g2,
edge_subst_cost=edge_match,
node_subst_cost=node_match,
upper_bound=30.0,
)
sim = next(ged_approx) # faster, but less accurate
return sim
def edge_match(e1, e2):
# provides the comparison for the cost of substituting two edges or two nodes in the GED calculation
if type(e1['model']) == type(e2['model']):
cost = 0.0
else:
cost = 1.0
return cost
def node_match(e1, e2):
# provides the comparison for the cost of substituting two edges or two nodes in the GED calculation
if type(e1['model']) == type(e2['model']):
cost = 0.0
else:
cost = 0.5
return cost
|
benjimaclellan/aesop
|
algorithms/assets/graph_edit_distance.py
|
graph_edit_distance.py
|
py
| 2,807 |
python
|
en
|
code
| 2 |
github-code
|
6
|
42339560418
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def majuscule(mot):
for lettre in mot:
x= " "
if ord(lettre)>=65 and ord(lettre)<=90:
x=ord(lettre) + 32
return "Cette lettre en minuscule est " + chr(x)
def minuscule(mot):
for lettre in mot:
x= " "
if ord(lettre)>=97 and ord(lettre)<=122:
x=ord(lettre) - 32
return "Cette lettre en majuscule est " + chr(x)
if __name__ == '__main__':
mots = [
'riz',
'cours',
'voiture',
'oiseau',
'bonjour',
'églantier',
'arbre'
]
for i in range(len(mots)):
mots[i] = majuscule(mots[i])
print(mots)
|
INF1007-2022H/ch2-RTorrella
|
exercice.py
|
exercice.py
|
py
| 687 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
19330268889
|
class Solution(object):
def uniqueOccurrences(self, arr):
"""
:type arr: List[int]
:rtype: bool
"""
counter = dict()
for num in arr:
if num not in counter.keys():
counter[num] = 1
else:
counter[num]+=1
unique_list = []
for key in counter.keys():
if counter[key] in unique_list:
return False
else:
unique_list.append(counter[key])
return True
|
clarkeand/MoCodeMoProblems
|
my-folder/problems/unique_number_of_occurrences/solution.py
|
solution.py
|
py
| 565 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19416857297
|
"""Visualises the range of potentials relative to demand in each municipality."""
from itertools import chain, repeat
import click
import pandas as pd
import geopandas as gpd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pycountry
from src.vis import RED, GREEN, BLUE
SORT_QUANTILE = 0.5
@click.command()
@click.argument("path_to_results")
@click.argument("path_to_plot")
def visualise_normed_potentials(path_to_results, path_to_plot):
"""Visualises the range of potentials relative to demand in each municipality."""
sns.set_context('paper')
units = pd.DataFrame(gpd.read_file(path_to_results))
units = units[["country_code", "population_sum", "normed_potential"]]
units["country"] = units["country_code"].map(lambda country_code: pycountry.countries.lookup(country_code).name)
units["country"].replace("Macedonia, Republic of", value="Macedonia", inplace=True) # too long
units["country"].replace("Bosnia and Herzegovina", value="Bosnia", inplace=True) # too long
people = pd.DataFrame(
data={
"country": list(chain(*[
(repeat(unit[1].country, round(unit[1].population_sum / 100)))
for unit in units.iterrows()
])),
"normed_potential": list(chain(*[
(repeat(unit[1].normed_potential, round(unit[1].population_sum / 100)))
for unit in units.iterrows()
]))
}
)
people_eu = people.copy()
people_eu["country"] = "Europe"
people = pd.concat([people, people_eu])
fig = plt.figure(figsize=(8, 10), constrained_layout=True)
ax = fig.add_subplot(111)
sns.boxplot(
data=people,
x="normed_potential",
y="country",
order=people.groupby("country").normed_potential.quantile(SORT_QUANTILE).sort_values().index,
ax=ax,
color=GREEN,
whis=[2.5, 97.5],
saturation=0.85,
linewidth=1.3,
width=0.7,
boxprops=dict(linewidth=1.3, edgecolor=GREEN),
whiskerprops=dict(linewidth=1, color=GREEN),
flierprops=dict(markerfacecolor="k", markeredgecolor="k", markersize=0, marker="o"),
capprops=dict(color=GREEN)
)
ax.axvline(1, color=RED, linewidth=1.5)
ax.set_xlabel("potential relative to demand")
ax.set_ylabel("country")
ax.set_xscale('log')
ax.set_xlim(0.08, 100)
ax.set_xticklabels(["{:.0f}%".format(tick * 100) for tick in ax.get_xticks()])
eu_position = list(
people.groupby("country").normed_potential.quantile(SORT_QUANTILE).sort_values().index
).index("Europe")
eu_patch = [child for child in ax.get_children() if isinstance(child, matplotlib.patches.PathPatch)][eu_position]
eu_patch.set_facecolor(BLUE)
eu_patch.set_edgecolor(BLUE)
eu_patch.set_zorder(2)
if path_to_plot[-3:] == "png":
fig.savefig(path_to_plot, dpi=600, transparent=False)
else:
fig.savefig(path_to_plot, dpi=600, transparent=False, pil_kwargs={"compression": "tiff_lzw"})
if __name__ == "__main__":
visualise_normed_potentials()
|
timtroendle/possibility-for-electricity-autarky
|
src/vis/potentials_normed_boxplot.py
|
potentials_normed_boxplot.py
|
py
| 3,114 |
python
|
en
|
code
| 10 |
github-code
|
6
|
74187907709
|
#Richard Janssen <[email protected]>
#28/07/2023
#CS50 Introduction to Programming with Python
#File Input/Output
#This program expect for two command-line arguments, the first one is a image file input and the second
#is the output file name or path. This program overlay a "shirt image" on the given input file
#input and output are expected to have same format (.jpeg, .jpg, or .png)
#you can use befor1.jpg, before2.jpg or before3.jpg to test this script
# ------------------------------------------------
import sys
import PIL
from PIL import Image
def main():
check_argv(sys.argv)
shirt = Image.open("shirt.png")
with Image.open(sys.argv[1]) as input:
resized = PIL.ImageOps.fit(input, shirt.size)
resized.paste(shirt,shirt)
resized.save(sys.argv[2])
def check_argv(argv):
if len(argv) > 3:
sys.exit("Too many command-line arguments.")
elif len(argv) < 3:
sys.exit("Too few command-line arguments.")
elif get_extension(argv[1]) not in ["jpg", "jpeg", "png"]:
sys.exit("Not a valid format file.")
elif get_extension(argv[1]) != get_extension(argv[2]):
sys.exit("Input and output must have the same format")
try:
open(sys.argv[1],"r")
except FileNotFoundError:
sys.exit("Can't find the file.")
def get_extension(str):
return str.rsplit(".",1)[1]
if __name__ == "__main__":
main()
|
richardnj14/CS50_python
|
file_input_output/shirt/shirt.py
|
shirt.py
|
py
| 1,411 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10417120713
|
from __future__ import annotations
import argparse
from argparse import ArgumentParser
def render_region_graph_logic(args):
import hashlib
import re
import graphviz
from randovania.game_description import default_database
from randovania.game_description.db.dock_node import DockNode
from randovania.game_description.db.pickup_node import PickupNode
from randovania.game_description.requirements.base import Requirement
from randovania.games.game import RandovaniaGame
gd = default_database.game_description_for(RandovaniaGame(args.game))
regions = list(gd.region_list.regions)
single_image: bool = args.single_image
added_edges = set()
vulnerabilities_colors = {
"Normal Door": None,
"Morph Ball Door": None,
"Other Door": None,
"Scan Portal": None,
"Missile": "#ff1919",
"Super Missile": "#38c914",
"Seeker Launcher": "#b233e8",
"Power Bomb": "#dfe833",
"Wave Door": "#a30af5",
"Ice Door": "#7cdede",
"Plasma Door": "#870f0f",
"Light Door": "#bfd9d9",
"Dark Door": "#3b3647",
"Annihilator Door": "#616969",
"Light Portal": "#bfd9d9",
"Dark Portal": "#3b3647",
}
def _weakness_name(s: str):
return re.sub(r"\([^)]*\)", "", s).replace(" Blast Shield", "").strip()
def _hash_to_color(s: str) -> str:
h = hashlib.blake2b(s.encode("utf-8"), digest_size=3).digest()
return "#{:06x}".format(int.from_bytes(h, "big"))
def _add_connection(dot: graphviz.Digraph, dock_node: DockNode):
the_region = gd.region_list.nodes_to_region(dock_node)
source_area = gd.region_list.nodes_to_area(dock_node)
target_node = gd.region_list.node_by_identifier(dock_node.default_connection)
target_area = gd.region_list.nodes_to_area(target_node)
if dock_node.default_dock_weakness.requirement == Requirement.impossible():
return
if dock_node.identifier in added_edges:
return
weak_name = _weakness_name(dock_node.default_dock_weakness.name)
direction = None
if isinstance(target_node, DockNode) and _weakness_name(target_node.default_dock_weakness.name) == weak_name:
direction = "both"
added_edges.add(target_node.identifier)
color = vulnerabilities_colors.get(weak_name, _hash_to_color(weak_name))
dot.edge(
f"{the_region.name}-{source_area.name}",
f"{the_region.name}-{target_area.name}",
weak_name,
dir=direction,
color=color,
fontcolor=color,
)
added_edges.add(dock_node.identifier)
def _add_teleporter(dot: graphviz.Digraph, teleporter_node: DockNode):
source_region = gd.region_list.nodes_to_region(teleporter_node)
source_area = gd.region_list.nodes_to_area(teleporter_node)
target_node = gd.region_list.node_by_identifier(teleporter_node.default_connection)
target_region = gd.region_list.nodes_to_region(target_node)
target_area = gd.region_list.nodes_to_area(target_node)
weak_name = _weakness_name(teleporter_node.default_dock_weakness.name)
color = vulnerabilities_colors.get(weak_name, _hash_to_color(weak_name))
dot.edge(
f"{source_region.name}-{source_area.name}",
f"{target_region.name}-{target_area.name}",
weak_name,
color=color,
fontcolor=color,
)
def _cross_region_dock(node: DockNode):
return node.default_connection.region_name != node.identifier.region_name
per_game_colors = {
RandovaniaGame.METROID_PRIME_ECHOES: {
"Agon Wastes": "#ffc61c",
"Torvus Bog": "#20ff1c",
"Sanctuary Fortress": "#3d62ff",
"Temple Grounds": "#c917ff",
"Great Temple": "#c917ff",
},
}
colors = per_game_colors.get(gd.game)
if colors is None:
colors = {region.name: _hash_to_color(region.name) for region in gd.region_list.regions}
dark_colors = {
"Agon Wastes": "#a88332",
"Torvus Bog": "#149612",
"Sanctuary Fortress": "#112991",
"Temple Grounds": "#7d2996",
"Great Temple": "#7d2996",
}
if single_image:
full_dot = graphviz.Digraph(name=gd.game.short_name, comment=gd.game.long_name)
else:
full_dot = None
per_region_dot = {}
for region in regions:
if single_image:
this_dot = full_dot
else:
this_dot = graphviz.Digraph(name=region.name)
per_region_dot[region.name] = this_dot
for area in region.areas:
shape = None
if any(isinstance(node, DockNode) and _cross_region_dock(node) for node in area.nodes):
shape = "polygon"
c = (dark_colors if area.in_dark_aether else colors)[region.name]
fillcolor = "".join(f"{max(0, int(c[i * 2 + 1:i * 2 + 3], 16) - 64):02x}" for i in range(3))
this_dot.node(
f"{region.name}-{area.name}",
area.name,
color=c,
fillcolor=f"#{fillcolor}",
style="filled",
fontcolor="#ffffff",
shape=shape,
penwidth="3.0",
)
for node in area.nodes:
if args.include_pickups and isinstance(node, PickupNode):
this_dot.node(
str(node.pickup_index), re.search(r"Pickup [^(]*\(([^)]+)\)", node.name).group(1), shape="house"
)
this_dot.edge(f"{region.name}-{area.name}", str(node.pickup_index))
for region in regions:
print(f"Adding docks for {region.name}")
for area in region.areas:
for node in area.nodes:
if isinstance(node, DockNode) and not _cross_region_dock(node):
_add_connection(per_region_dot[region.name], node)
elif isinstance(node, DockNode) and _cross_region_dock(node) and args.include_teleporters:
_add_teleporter(per_region_dot[region.name], node)
if single_image:
full_dot.render(format="png", view=True, cleanup=True)
else:
for name, this_dot in per_region_dot.items():
this_dot.render(format="png", view=True, cleanup=True)
def render_regions_graph(sub_parsers):
parser: ArgumentParser = sub_parsers.add_parser(
"render-region-graph",
help="Renders an image with all area connections",
formatter_class=argparse.MetavarTypeHelpFormatter,
)
parser.add_argument("--include-teleporters", action="store_true")
parser.add_argument("--include-pickups", action="store_true")
parser.add_argument("--single-image", action="store_true")
parser.set_defaults(func=render_region_graph_logic)
|
randovania/randovania
|
randovania/cli/commands/render_regions.py
|
render_regions.py
|
py
| 6,961 |
python
|
en
|
code
| 165 |
github-code
|
6
|
19528086110
|
# -*- coding: utf-8 -*-
__author__='zhaicao'
from PyQt5 import QtCore, QtGui, QtWidgets
from frameUI.CreateControls import TraceControlsUI
from frameUI.CreateTextUI import TraceCreateTextUI
from frameUI.MainData import TraceObjItems
from eventAction.DefinedActions import TraceActions
from eventAction.DefinedSolot import TraceSolot
from eventAction.Utils import ObjRepository
from frameUI import resoure_rc
class TraceMainWidget(TraceControlsUI, TraceCreateTextUI, TraceObjItems):
def setupUi(self, MainWindow):
# 主窗口设置
MainWindow.setWindowTitle("追溯分析部署配置工具 V2.0.6(Bate)")
MainWindow.setObjectName("MainWindow")
MainWindow.resize(500, 660)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icon/logo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setStyleSheet(".QGroupBox {border-radius: 3px;border: 1px solid #BFBFBF;}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)
self.stackedWidget.setObjectName("stackedWidget")
self.verticalLayout.addWidget(self.stackedWidget)
MainWindow.setCentralWidget(self.centralwidget)
# 设置菜单
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 23))
self.menubar.setObjectName("menubar")
# 部署菜单
self.menuDeploy = QtWidgets.QMenu(self.menubar)
self.menuDeploy.setObjectName("menuDeploy")
# 更新菜单
self.menuUpdate = QtWidgets.QMenu(self.menubar)
self.menuUpdate.setObjectName("menuUpdate")
MainWindow.setMenuBar(self.menubar)
# 部署菜单子菜单
self.firstDeploy = QtWidgets.QAction(MainWindow)
self.firstDeploy.setObjectName("firstDeploy")
self.menuDeploy.addAction(self.firstDeploy)
# 升级菜单子菜单
self.updateDB = QtWidgets.QAction(MainWindow)
self.updateDB.setObjectName("updateDB")
self.menuUpdate.addAction(self.updateDB)
self.updateNifi = QtWidgets.QAction(MainWindow)
self.updateNifi.setObjectName("updateNifi")
self.menuUpdate.addAction(self.updateNifi)
self.menubar.addAction(self.menuDeploy.menuAction())
self.menubar.addAction(self.menuUpdate.menuAction())
# 生成控件
super().initControls(self.stackedWidget)
self.stackedWidget.setCurrentIndex(0)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# 生成控件文字显示
super().initControlTexts()
# 生成控件名和对象关联dict
super().initObjItems()
# 初始化菜单切换信号槽
self.connSignalMenu()
# 初始化第一个页面控件信号槽
self.connSignalPage_1(MainWindow)
# 初始化第两个页面控件信号槽
self.connSignalPage_2(MainWindow)
# 初始化第三个页面控件信号槽
self.connSignalPage_3(MainWindow)
# 初始化控件库
self._objsDict = ObjRepository(MainWindow, self.deployConfItem, self.manifestConfItem, self.dbConfItem, self.nifiConfItem)
# 初始化事件
self._action = TraceActions()
# 初始化槽函数
self._solot = TraceSolot()
# 公共信号
def connSignalMenu(self):
self.firstDeploy.triggered.connect(lambda: self._solot.changeMenuPage(self.stackedWidget, 0))
self.updateDB.triggered.connect(lambda: self._solot.changeMenuPage(self.stackedWidget, 1))
self.updateNifi.triggered.connect(lambda: self._solot.changeMenuPage(self.stackedWidget, 2))
# 控件信号
def connSignalPage_1(self, mainWidget):
# 绑定切换页签的信号槽
self.tabWidget.currentChanged.connect(
lambda: self._solot.buttonChange(self.tabWidget, self.dep_confirmBtn, self.dep_copyDepBtn, self.dep_copyManBtn))
# 定义Next按钮信号槽
self.dep_confirmBtn.clicked.connect(
lambda: self._solot.nextClicked(mainWidget, self.tabWidget, self.dep_confirmBtn, self.dep_copyDepBtn,
self.dep_copyManBtn, self.deployConfItem, self.manifestConfItem,
self._objsDict))
# 定义复制部署按钮信号槽
self.dep_copyDepBtn.clicked.connect(
lambda: self._solot.copyConfClipboard(mainWidget, 'deploy', self.deployConfItem, self.manifestConfItem,
self._objsDict))
# 定义复制定制按钮信号槽
self.dep_copyManBtn.clicked.connect(
lambda: self._solot.copyConfClipboard(mainWidget, 'manifest', self.deployConfItem, self.manifestConfItem,
self._objsDict))
# Cancel按钮信号绑定退出槽函数
self.dep_cancelBtn.clicked.connect(mainWidget.close)
# 是否抽历史库checkbox绑定信号槽
self.dep_input_6.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'his'))
# 是否抽工艺参数checkbox绑定信号槽
self.dep_input_24.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'pp'))
# 工艺参数是否支持网络访问checkbox绑定信号槽
self.dep_input_25.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'ppNet'))
# 是否启用单点登录绑定信号槽
self.dep_input_45.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'login'))
# 是否启用Nifi登录绑定信号槽
self.dep_input_53.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'nifiLogin'))
# 获取业务库,联动
# 业务库测试按钮绑定信号槽
self.getDBBtn_1.clicked.connect(lambda: self._solot.setDBNameList(mainWidget,
{
'ip': self.dep_input_1.text(),
'port': self.dep_input_2.text(),
'user': self.dep_input_3.text(),
'pwd': self.dep_input_4.text(),
},
self.dep_input_5))
# 输入框修改初始化下拉框数据
self.dep_input_1.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_5, '请选择业务库'))
self.dep_input_2.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_5, '请选择业务库'))
self.dep_input_3.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_5, '请选择业务库'))
self.dep_input_4.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_5, '请选择业务库'))
# 获取历史库,联动
# 历史库测试绑定信号槽
self.getDBBtn_2.clicked.connect(lambda: self._solot.setDBNameList(mainWidget,
{
'ip': self.dep_input_7.text(),
'port': self.dep_input_8.text(),
'user': self.dep_input_9.text(),
'pwd': self.dep_input_10.text(),
},
self.dep_input_11))
# 输入框修改初始化下拉框数据
self.dep_input_7.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_11, '请选择历史库'))
self.dep_input_8.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_11, '请选择历史库'))
self.dep_input_9.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_11, '请选择历史库'))
self.dep_input_10.textChanged.connect(lambda: self._action.initComboBoxDB(self.dep_input_11, '请选择历史库'))
# page_2控件信号
def connSignalPage_2(self, mainWidget):
# 是否更新系统库
self.db_input_1.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'db_das'))
# 是否更新BI库
self.db_input_7.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'db_bi'))
# 是否更新工艺参数
self.db_input_13.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'db_pp'))
# 升级DB的信号
self.db_comfirmBtn.clicked.connect(lambda: self._solot.createFullDB(mainWidget))
# page_3控件信号
def connSignalPage_3(self, mainWidget):
# 是否抽取历史库
self.nifi_input_11.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'nifi_history'))
# 是否抽取工艺参数
self.nifi_input_17.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'nifi_pp'))
# 是否启用登录
self.nifi_input_23.stateChanged.connect(lambda: self._solot.cbSetEnabledSlot(self._objsDict, 'nifi_islogin'))
# 业务库试绑定信号槽
self.getDBBtn_3.clicked.connect(lambda: self._solot.setDBNameList(mainWidget,
{
'ip': self.nifi_input_1.text(),
'port': self.nifi_input_2.text(),
'user': self.nifi_input_3.text(),
'pwd': self.nifi_input_4.text(),
},
self.nifi_input_5))
# 业务库输入框修改初始化下拉框数据
self.nifi_input_1.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_5, '请选择业务库'))
self.nifi_input_2.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_5, '请选择业务库'))
self.nifi_input_3.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_5, '请选择业务库'))
self.nifi_input_4.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_5, '请选择业务库'))
# BI库试绑定信号槽
self.getDBBtn_4.clicked.connect(lambda: self._solot.setDBNameList(mainWidget,
{
'ip': self.nifi_input_6.text(),
'port': self.nifi_input_7.text(),
'user': self.nifi_input_8.text(),
'pwd': self.nifi_input_9.text(),
},
self.nifi_input_10))
# BI库输入框修改初始化下拉框数据
self.nifi_input_6.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_10, '请选择BI库'))
self.nifi_input_7.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_10, '请选择BI库'))
self.nifi_input_8.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_10, '请选择BI库'))
self.nifi_input_9.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_10, '请选择BI库'))
# 历史库试绑定信号槽
self.getDBBtn_5.clicked.connect(lambda: self._solot.setDBNameList(mainWidget,
{
'ip': self.nifi_input_12.text(),
'port': self.nifi_input_13.text(),
'user': self.nifi_input_14.text(),
'pwd': self.nifi_input_15.text(),
},
self.nifi_input_16))
# 历史库输入框修改初始化下拉框数据
self.nifi_input_12.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_16, '请选择历史库'))
self.nifi_input_13.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_16, '请选择历史库'))
self.nifi_input_14.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_16, '请选择历史库'))
self.nifi_input_15.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_16, '请选择历史库'))
# 工艺参数试绑定信号槽
self.getDBBtn_6.clicked.connect(lambda: self._solot.setDBNameList(mainWidget,
{
'ip': self.nifi_input_18.text(),
'port': self.nifi_input_19.text(),
'user': self.nifi_input_20.text(),
'pwd': self.nifi_input_21.text(),
},
self.nifi_input_22))
# 工艺参数输入框修改初始化下拉框数据
self.nifi_input_18.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_22, '请选择工艺参数库'))
self.nifi_input_19.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_22, '请选择工艺参数库'))
self.nifi_input_20.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_22, '请选择工艺参数库'))
self.nifi_input_21.textChanged.connect(lambda: self._action.initComboBoxDB(self.nifi_input_22, '请选择工艺参数库'))
# 选择Nifi模板地址
self.getFile.clicked.connect(lambda: self._solot.getNifiTemplate(mainWidget, self._objsDict))
# 升级Nifi更新按钮信号
self.nifi_confirmBtn.clicked.connect(lambda: self._solot.updateNifiTemplate(mainWidget, self.nifiConfItem, self._objsDict))
|
zhaicao/pythonWorkspace
|
DeployTool/frameUI/mainUI.py
|
mainUI.py
|
py
| 15,888 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1008816122
|
'''Euler problem 527
https://projecteuler.net/problem=527'''
from random import randint
def binSearch(n):
'''number of guesses needed to find a
number in range n'''
t = randint(1,n)
lower = 1
upper = n
guesses = 0
while True:
#print("t=",t)
#print("lower = ",lower)
#print("upper = ",upper)
guesses += 1
guess = int((lower + upper)/2)
#print("guess=",guess)
if guess == t:
return guesses
elif guess < t:
lower = guess+1
else:
upper = guess-1
def average(listA):
return sum(listA)/len(listA)
def randSearch(n):
'''number of guesses needed to find a
number in range n'''
t = randint(1,n)
lower = 1
upper = n
guesses = 0
while True:
#print("t=",t)
#print("lower = ",lower)
#print("upper = ",upper)
guesses += 1
guess = randint(lower,upper)
#print("guess=",guess)
if guess == t:
return guesses
elif guess < t:
lower = guess+1
else:
upper = guess-1
Bsearches = [] #binary search
Rsearches = [] #random binary search
for i in range(10000):
Bsearches.append(binSearch(7))
Rsearches.append(randSearch(7))
print(average(Rsearches)-average(Bsearches))
|
hackingmath/Project-Euler
|
euler527.py
|
euler527.py
|
py
| 1,421 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24526237848
|
import pandas as pd
import sys
def parse_osteometric_data(df):
try:
clavicle_df = df.loc[df['Element'] == 'Clavicle'][['Id', 'Side', 'Element',
'Cla_01', 'Cla_04', 'Cla_05']]
clavicle_l = clavicle_df.loc[clavicle_df['Side'] == 'Left']
clavicle_r = clavicle_df.loc[clavicle_df['Side'] == 'Right']
except KeyError:
try:
clavicle_l = df[['Id', 'Cla_01', 'Cla_04', 'Cla_05']]
clavicle_r = df[['Id', 'Cla_01R', 'Cla_04R', 'Cla_05R']]
clavicle_r.rename(columns={'Cla_01R': 'Cla_01', 'Cla_04R': 'Cla_04', 'Cla_05R': 'Cla_05'}, inplace=True)
clavicle_l.loc[:, 'Side'] = 'Left'
clavicle_l.loc[:, 'Element'] = 'Clavicle'
clavicle_r.loc[:, 'Side'] = 'Right'
clavicle_r.loc[:, 'Element'] = 'Clavicle'
except KeyError:
clavicle_df = pd.DataFrame(columns={'Id', 'Side', 'Element', 'Cla_01', 'Cla_04', 'Cla_05'})
clavicle_l = clavicle_df.loc[clavicle_df['Side'] == 'Left']
clavicle_r = clavicle_df.loc[clavicle_df['Side'] == 'Right']
clavicle_l = clavicle_l.dropna(subset=['Cla_01', 'Cla_04', 'Cla_05'], thresh=1)
clavicle_r = clavicle_r.dropna(subset=['Cla_01', 'Cla_04', 'Cla_05'], thresh=1)
try:
femur_df = df.loc[df['Element'] == 'Femur'][['Id', 'Side', 'Element',
'Fem_01', 'Fem_02', 'Fem_03', 'Fem_04',
'Fem_05', 'Fem_06', 'Fem_07']]
femur_l = femur_df.loc[femur_df['Side'] == 'Left']
femur_r = femur_df.loc[femur_df['Side'] == 'Right']
except KeyError:
try:
femur_l = df[['Id', 'Fem_01', 'Fem_02', 'Fem_03', 'Fem_04', 'Fem_05', 'Fem_06', 'Fem_07']]
femur_r = df[['Id', 'Fem_01R', 'Fem_02R', 'Fem_03R', 'Fem_04R', 'Fem_05R', 'Fem_06R', 'Fem_07R']]
femur_r.rename(columns={'Fem_01R': 'Fem_01', 'Fem_02R': 'Fem_02', 'Fem_03R': 'Fem_03',
'Fem_04R': 'Fem_04', 'Fem_05R': 'Fem_05', 'Fem_06R': 'Fem_06',
'Fem_07R': 'Fem_07'
}, inplace=True)
femur_l.loc[:, 'Side'] = 'Left'
femur_l.loc[:, 'Element'] = 'Femur'
femur_r.loc[:, 'Side'] = 'Right'
femur_r.loc[:, 'Element'] = 'Femur'
except KeyError:
femur_df = pd.DataFrame(columns={'Id', 'Side', 'Element', 'Fem_01', 'Fem_02', 'Fem_03', 'Fem_04',
'Fem_05', 'Fem_06', 'Fem_07'})
femur_l = femur_df.loc[femur_df['Side'] == 'Left']
femur_r = femur_df.loc[femur_df['Side'] == 'Right']
femur_l = femur_l.dropna(subset=['Fem_01', 'Fem_02', 'Fem_03', 'Fem_04', 'Fem_05', 'Fem_06', 'Fem_07'],
thresh=1)
femur_r = femur_r.dropna(subset=['Fem_01', 'Fem_02', 'Fem_03', 'Fem_04', 'Fem_05', 'Fem_06', 'Fem_07'],
thresh=1)
try:
fibula_df = df.loc[df['Element'] == 'Fibula'][['Id', 'Side', 'Element', 'Fib_01', 'Fib_02']]
fibula_l = fibula_df.loc[fibula_df['Side'] == 'Left']
fibula_r = fibula_df.loc[fibula_df['Side'] == 'Right']
except KeyError:
try:
fibula_l = df[['Id', 'Fib_01', 'Fib_02']]
fibula_r = df[['Id', 'Fib_01R', 'Fib_02R']]
fibula_r.rename(columns={'Fib_01R': 'Fib_01', 'Fib_02R': 'Fib_02'}, inplace=True)
fibula_l.loc[:, 'Side'] = 'Left'
fibula_l.loc[:, 'Element'] = 'Fibula'
fibula_r.loc[:, 'Side'] = 'Right'
fibula_r.loc[:, 'Element'] = 'Fibula'
except:
fibula_df = pd.DataFrame(columns={'Id', 'Side', 'Element', 'Fib_01', 'Fib_02'})
fibula_l = fibula_df.loc[fibula_df['Side'] == 'Left']
fibula_r = fibula_df.loc[fibula_df['Side'] == 'Right']
fibula_l = fibula_l.dropna(subset=['Fib_01', 'Fib_02'])
fibula_r = fibula_r.dropna(subset=['Fib_01', 'Fib_02'])
try:
humerus_df = df.loc[df['Element'] == 'Humerus'][['Id', 'Side', 'Element',
'Hum_01', 'Hum_02', 'Hum_03', 'Hum_04',
'Hum_05']]
humerus_l = humerus_df.loc[humerus_df['Side'] == 'Left']
humerus_r = humerus_df.loc[humerus_df['Side'] == 'Right']
except KeyError:
try:
humerus_l = df[['Id', 'Hum_01', 'Hum_02', 'Hum_03', 'Hum_04',
'Hum_05']]
humerus_r = df[['Id', 'Hum_01R', 'Hum_02R', 'Hum_03R', 'Hum_04R',
'Hum_05R']]
humerus_r.rename(columns={'Hum_01R': 'Hum_01', 'Hum_02R': 'Hum_02',
'Hum_03R': 'Hum_03', 'Hum_04R': 'Hum_04',
'Hum_05R': 'Hum_05'}, inplace=True)
humerus_l.loc[:, 'Side'] = 'Left'
humerus_l.loc[:, 'Element'] = 'Humerus'
humerus_r.loc[:, 'Side'] = 'Right'
humerus_r.loc[:, 'Element'] = 'Humerus'
except KeyError:
humerus_df = pd.DataFrame(columns={'Id', 'Side', 'Element', 'Hum_01', 'Hum_02', 'Hum_03', 'Hum_04',
'Hum_05'})
humerus_l = humerus_df.loc[humerus_df['Side'] == 'Left']
humerus_r = humerus_df.loc[humerus_df['Side'] == 'Right']
humerus_l = humerus_l.dropna(subset=['Hum_01', 'Hum_02', 'Hum_03', 'Hum_04','Hum_05'], thresh=1)
humerus_r = humerus_r.dropna(subset=['Hum_01', 'Hum_02', 'Hum_03', 'Hum_04', 'Hum_05'], thresh=1)
try:
os_coxa_df = df.loc[df['Element'] == 'Os coxa'][['Id', 'Side', 'Element',
'Osc_01', 'Osc_02']]
os_coxa_l = os_coxa_df.loc[os_coxa_df['Side'] == 'Left']
os_coxa_r = os_coxa_df.loc[os_coxa_df['Side'] == 'Right']
except KeyError:
try:
os_coxa_l = df[['Id', 'Osc_01', 'Osc_02']]
os_coxa_r = df[['Id', 'Osc_01R', 'Osc_02R']]
os_coxa_r.rename(columns={'Osc_01R': 'Osc_01', 'Osc_02R': 'Osc_02'}, inplace=True)
os_coxa_l.loc[:, 'Side'] = 'Left'
os_coxa_l.loc[:, 'Element'] = 'Os coxa'
os_coxa_r.loc[:, 'Side'] = 'Right'
os_coxa_r.loc[:, 'Element'] = 'Os coxa'
except KeyError:
os_coxa_df = pd.DataFrame(columns={'Id', 'Side', 'Element', 'Osc_01', 'Osc_02'})
os_coxa_l = os_coxa_df.loc[os_coxa_df['Side'] == 'Left']
os_coxa_r = os_coxa_df.loc[os_coxa_df['Side'] == 'Right']
os_coxa_l = os_coxa_l.dropna(subset=['Osc_01', 'Osc_02'], thresh=1)
os_coxa_r = os_coxa_r.dropna(subset=['Osc_01', 'Osc_02'], thresh=1)
try:
radius_df = df.loc[df['Element'] == 'Radius'][['Id', 'Side', 'Element',
'Rad_01', 'Rad_05', 'Rad_06']]
radius_l = radius_df.loc[radius_df['Side'] == 'Left']
radius_r = radius_df.loc[radius_df['Side'] == 'Right']
except KeyError:
try:
radius_l = df[['Id', 'Rad_01', 'Rad_05', 'Rad_06']]
radius_r = df[['Id', 'Rad_01R', 'Rad_05R', 'Rad_06R']]
radius_r.rename(columns={'Rad_01R': 'Rad_01', 'Rad_05R': 'Rad_05', 'Rad_06R': 'Rad_06'}, inplace=True)
radius_l.loc[:, 'Side'] = 'Left'
radius_l.loc[:, 'Element'] = 'Radius'
radius_r.loc[:, 'Side'] = 'Right'
radius_r.loc[:, 'Element'] = 'Radius'
except KeyError:
radius_df = pd.DataFrame(columns={'Id', 'Side', 'Element', 'Rad_01', 'Rad_05', 'Rad_06'})
radius_l = radius_df.loc[radius_df['Side'] == 'Left']
radius_r = radius_df.loc[radius_df['Side'] == 'Right']
radius_l = radius_l.dropna(subset=['Rad_01', 'Rad_05', 'Rad_06'], thresh=1)
radius_r = radius_r.dropna(subset=['Rad_01', 'Rad_05', 'Rad_06'], thresh=1)
try:
scapula_df = df.loc[df['Element'] == 'Scapula'][['Id', 'Side', 'Element',
'Sca_01', 'Sca_02']]
scapula_l = scapula_df.loc[scapula_df['Side'] == 'Left']
scapula_r = scapula_df.loc[scapula_df['Side'] == 'Right']
except KeyError:
try:
scapula_l = df[['Id', 'Sca_01', 'Sca_02']]
scapula_r = df[['Id', 'Sca_01R', 'Sca_02R']]
scapula_r.rename(columns={'Sca_01R': 'Sca_01', 'Sca_02R': 'Sca_02'}, inplace=True)
scapula_l.loc[:, 'Side'] = 'Left'
scapula_l.loc[:, 'Element'] = 'Scapula'
scapula_r.loc[:, 'Side'] = 'Right'
scapula_r.loc[:, 'Element'] = 'Scapula'
except KeyError:
scapula_df = pd.DataFrame(columns={'Id', 'Side', 'Element', 'Sca_01', 'Sca_02'})
scapula_l = scapula_df.loc[scapula_df['Side'] == 'Left']
scapula_r = scapula_df.loc[scapula_df['Side'] == 'Right']
scapula_l = scapula_l.dropna(subset=['Sca_01', 'Sca_02'], thresh=1)
scapula_r = scapula_r.dropna(subset=['Sca_01', 'Sca_02'], thresh=1)
try:
tibia_df = df.loc[df['Element'] == 'Tibia'][['Id', 'Side', 'Element',
'Tib_01', 'Tib_02', 'Tib_03', 'Tib_04',
'Tib_05']]
tibia_l = tibia_df.loc[tibia_df['Side'] == 'Left']
tibia_r = tibia_df.loc[tibia_df['Side'] == 'Right']
except KeyError:
try:
tibia_l = df[['Id', 'Tib_01', 'Tib_02', 'Tib_03', 'Tib_04', 'Tib_05']]
tibia_r = df[['Id', 'Tib_01R', 'Tib_02R', 'Tib_03R', 'Tib_04R', 'Tib_05R']]
tibia_r.rename(columns={'Tib_01R': 'Tib_01', 'Tib_02R': 'Tib_02', 'Tib_03R': 'Tib_03',
'Tib_04R': 'Tib_04', 'Tib_05R': 'Tib_05'}, inplace=True)
tibia_l.loc[:, 'Side'] = 'Left'
tibia_l.loc[:, 'Element'] = 'Tibia'
tibia_r.loc[:, 'Side'] = 'Right'
tibia_r.loc[:, 'Element'] = 'Tibia'
except KeyError:
tibia_df = pd.DataFrame(columns={'Id', 'Side', 'Element', 'Tib_01', 'Tib_02', 'Tib_03', 'Tib_04', 'Tib_05'})
tibia_l = tibia_df.loc[tibia_df['Side'] == 'Left']
tibia_r = tibia_df.loc[tibia_df['Side'] == 'Right']
tibia_l = tibia_l.dropna(subset=['Tib_01', 'Tib_02', 'Tib_03', 'Tib_04', 'Tib_05'], thresh=1)
tibia_r = tibia_r.dropna(subset=['Tib_01', 'Tib_02', 'Tib_03', 'Tib_04', 'Tib_05'], thresh=1)
try:
ulna_df = df.loc[df['Element'] == 'Ulna'][['Id', 'Side', 'Element',
'Uln_01', 'Uln_04', 'Uln_05', 'Uln_06']]
ulna_l = ulna_df.loc[ulna_df['Side'] == 'Left']
ulna_r = ulna_df.loc[ulna_df['Side'] == 'Right']
except KeyError:
try:
ulna_l = df[['Id', 'Uln_01', 'Uln_04', 'Uln_05', 'Uln_06']]
ulna_r = df[['Id', 'Uln_01R', 'Uln_04R', 'Uln_05R', 'Uln_06R']]
ulna_r.rename(columns={'Uln_01R': 'Uln_01', 'Uln_04R': 'Uln_04', 'Uln_05R': 'Uln_05',
'Uln_06R': 'Uln_06'}, inplace=True)
ulna_l.loc[:, 'Side'] = 'Left'
ulna_l.loc[:, 'Element'] = 'Ulna'
ulna_r.loc[:, 'Side'] = 'Right'
ulna_r.loc[:, 'Element'] = 'Ulna'
except KeyError:
ulna_df = pd.DataFrame(columns={'Id', 'Side', 'Element', 'Uln_01', 'Uln_04', 'Uln_05', 'Uln_06'})
ulna_l = ulna_df.loc[ulna_df['Side'] == 'Left']
ulna_r = ulna_df.loc[ulna_df['Side'] == 'Right']
ulna_l = ulna_l.dropna(subset=['Uln_01', 'Uln_04', 'Uln_05', 'Uln_06'], thresh=1)
ulna_r = ulna_r.dropna(subset=['Uln_01', 'Uln_04', 'Uln_05', 'Uln_06'], thresh=1)
return {'Tibia': (tibia_l, tibia_r), 'Fibula': (fibula_l, fibula_r),
'Femur': (femur_l, femur_r), 'Os_coxa': (os_coxa_l, os_coxa_r),
'Humerus': (humerus_l, humerus_r),'Radius': (radius_l, radius_r),
'Ulna': (ulna_l, ulna_r), 'Scapula': (scapula_l, scapula_r),
'Clavicle': (clavicle_l, clavicle_r)}
def get_measurement_keys(element):
measurement_keys = []
if 'Clavicle' == element:
measurement_keys.extend(['Cla_01', 'Cla_04', 'Cla_05'])
if 'Femur' == element:
measurement_keys.extend(['Fem_01', 'Fem_02', 'Fem_03', 'Fem_04', 'Fem_05', 'Fem_06', 'Fem_07']) #Lowerr
if 'Fibula' == element:
measurement_keys.extend(['Fib_01', 'Fib_02'])
if 'Humerus' == element:
measurement_keys.extend(['Hum_01', 'Hum_02', 'Hum_03', 'Hum_04',
'Hum_05'])
if 'Os_coxa' == element:
measurement_keys.extend(['Osc_01', 'Osc_02'])
if 'Radius' == element:
measurement_keys.extend(['Rad_01', 'Rad_05', 'Rad_06'])
if 'Scapula' == element:
measurement_keys.extend(['Sca_01', 'Sca_02'])
if 'Tibia' == element:
measurement_keys.extend(['Tib_01', 'Tib_02', 'Tib_03', 'Tib_04', 'Tib_05'])
if 'Ulna' == element:
measurement_keys.extend(['Uln_01', 'Uln_04', 'Uln_05', 'Uln_06'])
return measurement_keys
|
jwarsom/osteometrics
|
osteometrics/utils/util.py
|
util.py
|
py
| 13,345 |
python
|
it
|
code
| 0 |
github-code
|
6
|
2780102996
|
################################################################################################
#Busca de erro
################################################################################################
@n1_web_tools.route('/buscador-de-erros/', methods=['GET', 'POST'])
def erros():
form = SearchForm(request.form)
if request.method == 'POST' and form.validate():
try:
'''
Aqui vai a lógica quando é realizado um POST na página
'''
return render_template('index.html', form=form)
except Exception as error:
trace = traceback.format_exc()
print(trace)
print(str(error))
error = 'Ops, ocorreu um erro: '+str(error)
return render_template('index.html', form=form, error=error)
else:
return render_template('index.html', form=form)
return render_template('index.html')
################################################################################################
#Busca de erro API
################################################################################################
@n1_web_tools.route('/api/', methods=['GET', 'POST'])
def errosAPI():
gerenciamento_conn = None
try:
'''
Aqui vai a lógica da API
'''
return jsonify({'data': 'true'})
except Exception as e:
trace = traceback.format_exc()
print(trace)
print(e)
return jsonify({'erro': str(e), 'stack':str(trace)})
|
quesmues/example-form-onsubmit-script
|
view.py
|
view.py
|
py
| 1,519 |
python
|
de
|
code
| 0 |
github-code
|
6
|
10754618870
|
from nltk.corpus import stopwords
import pandas as pd
from nltk.stem.snowball import SnowballStemmer
import re
import nltk
class ngrams:
def __init__(self, df,column,n=10):
texto = " ".join(str(x) for x in df[column].values)
tokens = texto.split()
tokens=[x.lower() for x in tokens]
stopset = set(stopwords.words('english')) # dictionary of stop words
tokens = [w for w in tokens if not w in stopset]
stemmer=SnowballStemmer("english")
stemm_words=[]
tokens_clean=[]
for j in tokens:
sa=re.sub('[^A-Za-z]+', '', j)
tokens_clean.append(sa)
for s in tokens_clean:
try:
stem= stemmer.stem(s)
if s!='':
stemm_words.append(str(stem))
except:
pass
cuenta = len(tokens_clean)
bigrams = nltk.bigrams(tokens_clean)
trigrams=nltk.trigrams(tokens_clean)
fdist = nltk.FreqDist(bigrams)
fdist1 = nltk.FreqDist(trigrams)
#for i,j in fdist.items():
# print i,j
frecuentbigrams=fdist.most_common(n)
frecuenttrigrams=fdist1.most_common(10)
bigramslist=[]
trigramslist=[]
for x in frecuentbigrams:
a,b=x
l,m=a
if l !='' and m !='' and l!=m:
bigramslist.append(a)
bigramsduplicates=[]
for idx, x in enumerate(bigramslist):
for idy, y in enumerate(bigramslist):
if idx!=idy:
if x[0]==y[1]:
duplicate=(x[1],x[0])
#print bigramsduplicates
#print x
if x not in bigramsduplicates:
bigramslist.pop(idx)
bigramsduplicates.append(x)
bigramsduplicates.append(duplicate)
for x in frecuenttrigrams:
a,b=x
trigramslist.append(a)
self.bigrams=bigramslist
self.trigrams=trigramslist
|
omedranoc/ThesisPreprocessing
|
model/ngrams.py
|
ngrams.py
|
py
| 1,850 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39870180773
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import imageio
import scipy.misc
import numpy as np
import videostyletransfer as vst
video_id = 'temple_2'
content_path = os.getcwd() + '/input/' + video_id + '/'
style_path = os.getcwd() + '/style-images/starry_night.jpg'
flow_path = os.getcwd() + '/flow/' + video_id + '/'
height = 384#192#96#384#436
width = 512#256#128#512#1024
num_frames = 5
fps = 30
content = []
for i in range(1, num_frames + 1):
content_image = imageio.imread(content_path + ('frame_%04d.png' % i))
content.append(content_image[:height,:width,:])
style = imageio.imread(style_path)
style = scipy.misc.imresize(style, [height, width])
style = np.array(style)
vst_module = vst.VideoStyleTransferModule(content, style, flow_path)
styled_frames = vst_module.optimize_images()
vid_id = os.getcwd() + '/output/' + video_id + '.mp4'
writer = imageio.get_writer(vid_id, fps=fps)
for f in styled_frames:
writer.append_data(f)
writer.close()
|
tomstrident/Video-Style-Transfer
|
video_style_transfer_demo.py
|
video_style_transfer_demo.py
|
py
| 986 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42673179666
|
import graphene
from graphene_django.types import DjangoObjectType
from graphql import GraphQLError
from .models import *
from django.contrib.auth.models import User
class user(DjangoObjectType):
class Meta:
model = User
class task(DjangoObjectType):
class Meta:
model = Task
class Query(object):
all_tasks = graphene.List(task)
profile = graphene.NonNull(user)
def resolve_all_tasks(self, info, **kwargs):
if(not info.context.user.is_authenticated):
raise GraphQLError('Please log in')
return Task.objects.filter(user=info.context.user)
def resolve_profile(self, info, **kwargs):
if(not info.context.user.is_authenticated):
raise GraphQLError('Please log in')
return info.context.user
# def resolve_all_users(self, info, **kwargs):
# return User.objects.all()
|
neelansh/Todo_app_graphql
|
todo/app/schema.py
|
schema.py
|
py
| 879 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73203288508
|
from konlpy.tag import Kkma
from konlpy.tag import Twitter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import normalize
import numpy as np
import pymongo
from pymongo import MongoClient
import pymysql
class SentenceTokenizer(object):
def __init__(self):
self.kkma = Kkma()
self.twitter = Twitter()
self.stopwords = []
def text_to_sentence(self, text):
sentences = self.kkma.sentences(text)
# 너무 짧은 문장은 앞 문장에 addition 처리
for i in range(0, len(sentences)):
if len(sentences[i]) <= 10:
sentences[i-1] += (' ' + sentences[i])
sentences[i] = ''
return sentences
def get_words(self, sentences):
words = []
for s in sentences:
if s is not '':
# twitter 모듈의 nouns(string) 함수를 통해 키워드 추출 후
# 추출한 키워드 리스트 중 stop words가 아니고 두글자 이상인 경우 결과 리스트에 append
words.append(' '.join([word for word in self.twitter.nouns(str(s)) if word not in self.stopwords and len(word) > 1]))
return words
class GraphMatrix(object):
def __init__(self):
self.tfidf = TfidfVectorizer()
self.cnt_vec = CountVectorizer()
self.graph_sentence = []
# TF-IDF 모델 적용한 sentence-term matrix 생성
def create_sentence_graph(self, sentence):
tfidf_matrix = self.tfidf.fit_transform(sentence).toarray()
# sentence-matrix와 그 전치행렬을 곱하여 sentence correlation matrix 생성(가중치 계산)
self.graph_sentence = np.dot(tfidf_matrix, tfidf_matrix.T)
return self.graph_sentence
# term count 방식을 활용한 sentence-term matrix 생성
def create_words_graph(self, sentence):
cnt_vec_mat = normalize(self.cnt_vec.fit_transform(sentence).toarray().astype(float), axis=0)
# sentence 배열에서 추출한 vocabulary
vocab = self.cnt_vec.vocabulary_
return np.dot(cnt_vec_mat.T, cnt_vec_mat), {vocab[word] : word for word in vocab}
# TextRank 수식 계산
class TextRank(object):
def get_rank(self, graph, d=0.85):
A = graph
matrix_size = A.shape[0]
for i in range(matrix_size):
A[i, i] = 0 # 대각선 부분 = 0
link_sum = np.sum(A[:,i]) # A[:, i] = A[:][i]
if link_sum != 0:
A[:,i] /= link_sum
A[:,i] *= -d
A[i,i] = 1
B = (1-d) * np.ones((matrix_size, 1))
ranks = np.linalg.solve(A, B) # solve Ax = B
return {idx: r[0] for idx, r in enumerate(ranks)}
class Ranking(object):
def __init__(self, doc):
self.sentence_tokenize = SentenceTokenizer()
self.sentences = []
for text in doc:
self.sentences += self.sentence_tokenize.text_to_sentence(text)
self.words = self.sentence_tokenize.get_words(self.sentences)
self.graph_matrix = GraphMatrix()
self.sentence_graph = self.graph_matrix.create_sentence_graph(self.words)
self.words_graph, self.idx2word = self.graph_matrix.create_words_graph(self.words)
self.textRank = TextRank()
self.sentence_rank_idx = self.textRank.get_rank(self.sentence_graph)
self.sorted_sent_rank_idx = sorted(self.sentence_rank_idx, key=lambda k: self.sentence_rank_idx[k], reverse=True)
self.word_rank_idx = self.textRank.get_rank(self.words_graph)
self.sorted_word_rank_idx = sorted(self.word_rank_idx, key=lambda k: self.word_rank_idx[k], reverse=True)
def keywords(self, word_num=20):
keywords = []
index = []
for idx in self.sorted_word_rank_idx[:word_num]:
index.append(idx)
for idx in index:
keywords.append(self.idx2word[idx])
return keywords
# MongoDB connection & querying data
username = 'hs'
password = '12345'
client = MongoClient('mongodb://%s:%s@localhost:27017/allreview'%(username, password))
db = client['allreview']
document = []
for review in db.review.find({'category':'beauty'}):
document.append(review['context'])
# Top20 keywords extraction
rank = Ranking(document)
print(rank.keywords())
# MySQL 연동
conn = pymysql.connect(host='localhost', user='root', password='12345', db='allreview', charset='utf8')
curs = conn.cursor()
sql = 'insert into keyword(word, category) values(%s, %s)'
for keyword in rank.keywords():
curs.execute(sql,(keyword,8))
conn.commit()
|
hanseul1/Text-summarize-with-TextRank
|
main.py
|
main.py
|
py
| 4,729 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75224158908
|
from asyncio import run
import discord
from discord import app_commands
from discord import Interaction
from discord import Intents
from discord.ext import commands, tasks
import os
import asqlite
# Import the Guild ID
from misc.load import TEST_SERVER_ID
# Import the bot token
from Secure import BOT_TOKEN
# Bot intents are set to default
intents = Intents.default()
intents.message_content = True
intents.members = True
# Create the bot
bot = commands.Bot(command_prefix='!', intents=intents, activity=discord.Activity(type=discord.ActivityType.playing, name="with myself, cause these commands are amazing!"))
# Create the on_ready event
@bot.event
async def on_ready():
print('----------------------')
print(f'Logged in as {bot.user.name}#{bot.user.discriminator}')
print('----------------------')
async def first_start_db():
# Database setup
connection = await asqlite.connect('database.db')
cursor = await connection.cursor()
await cursor.execute("""CREATE TABLE IF NOT EXISTS user_data (
id INTEGER PRIMARY KEY,
user_id INTEGER,
name TEXT,
class TEXT,
zone INTEGER,
max_zone INTEGER,
level INTEGER,
xp INTEGER,
xp_cap INTEGER,
gold INTEGER,
hp INTEGER,
max_hp INTEGER,
attack INTEGER,
defense INTEGER,
agility INTEGER,
luck INTEGER,
intelligence INTEGER
)""")
await cursor.execute("""CREATE TABLE IF NOT EXISTS monster_data (
id INTEGER PRIMARY KEY,
name TEXT,
zone INTEGER,
is_boss BOOLEAN,
level INTEGER,
hp INTEGER,
max_hp INTEGER,
attack INTEGER,
defense INTEGER,
dodge_chance INTEGER,
give_xp INTEGER,
give_gold INTEGER
)""")
await cursor.execute("""CREATE TABLE IF NOT EXISTS shop_data (
item_id INTEGER PRIMARY KEY,
name TEXT,
price INTEGER,
description TEXT,
sellback_price INTEGER
)""")
await cursor.execute("""CREATE TABLE IF NOT EXISTS inventory_data (
user_id INTEGER,
item_id INTEGER,
item_name TEXT,
item_amount INTEGER,
item_sell_price INTEGER,
item_sellable BOOLEAN
)""")
await cursor.execute("""CREATE TABLE IF NOT EXISTS zone_data (
zone_id INTEGER PRIMARY KEY,
name TEXT,
description TEXT
)""")
await connection.commit()
async def add_db_items():
connection = await asqlite.connect('database.db')
cursor = await connection.cursor()
# Add items to the shop
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (1, 'Small Health Potion', 10, 'Heals 25 HP', 5)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (2, 'Medium Health Potion', 20, 'Heals 50 HP' , 10)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (3, 'Large Health Potion', 30, 'Heals 75 HP', 15)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (4, 'Perfect Health Potion', 50, 'Heals 100 HP', 25)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (100, 'Wheat seed', 100, 'Used to farm wheat', 0)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (101, 'Carrot seed', 300, 'Used to farm carrots', 0)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (102, 'Potato seed', 500, 'Used to farm potatoes', 0)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (103, 'Beetroot seed', 750, 'Used to farm beetroot', 0)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (104, 'Melon seed', 1000, 'Used to farm melons', 0)")
await connection.commit()
await cursor.execute("INSERT INTO shop_data (item_id, name, price, description, sellback_price) VALUES (105, 'Pumpkin seed', 1500, 'Used to farm pumpkins', 0)")
await connection.commit()
# Add zones to the zone database
await cursor.execute("INSERT INTO zone_data (zone_id, name, description) VALUES (1, 'The young forest', 'A small bright forest full of life')")
await connection.commit()
await cursor.execute("INSERT INTO zone_data (zone_id, name, description) VALUES (2, 'The deep forest', 'A deep dark forest roamed only by ferocious animals')")
await connection.commit()
await cursor.execute("INSERT INTO zone_data (zone_id, name, description) VALUES (3, 'The Adventurer Road', 'The road that leads to the Town of Beginnings!')")
await connection.commit()
await cursor.execute("INSERT INTO zone_data (zone_id, name, description) VALUES (4, 'The Town of Beginnings', 'The town where everything starts! Fight good adventurers to grab the attention of the Adventurers Guild leader!')")
await connection.commit()
# Add monsters to the monster database
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Slimes', 1, 0, 1, 15, 15, 5, 5, 0, 5, 1)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Spider', 1, 0, 1, 20, 20, 7, 5, 0, 8, 3)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Rabbit', 1, 0, 1, 10, 10, 5, 5, 0, 3, 1)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Giant Spider', 1, 1, 1, 30, 30, 25, 10, 0, 15, 10)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Wolf', 2, 0, 1, 25, 25, 20, 15, 25, 15, 15)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Brown Bear', 2, 0, 1, 40, 40, 15, 30, 0, 15, 15)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Wolf Pack Leader', 2, 1, 1, 50, 50, 30, 15, 30, 25, 25)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Bandit Archer', 3, 0 , 1, 30, 30, 40, 0, 20, 25, 25)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Bandit Thug', 3, 0, 1, 50, 50, 25, 20, 0, 25, 25)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Armored Bandit', 3, 0, 1, 60, 60, 20, 30, 0, 25, 25)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Rogue Bandit', 3, 0, 1, 35, 35, 40, 0, 40, 25, 25)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Bandit Leader', 3, 1, 1, 70, 70, 30, 20, 20, 40, 50)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Adventurer Fighter', 4, 0, 1, 100, 100, 15, 25, 0, 30, 75)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Adventurer Mage', 4, 0, 1, 50, 50, 50, 10, 20, 30, 75)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Adventurer Archer', 4, 0, 1, 75, 75, 30, 5, 40, 30, 75)")
await connection.commit()
await cursor.execute("INSERT INTO monster_data (name, zone, is_boss, level, hp, max_hp, attack, defense, dodge_chance, give_xp, give_gold) VALUES ('Adventurer Guild Leader', 4, 1, 1, 150, 150, 25, 30, 20, 50, 100)")
await connection.commit()
tree = bot.tree
@tree.command(name='test', description='testing command to make sure everything is working')
@app_commands.checks.cooldown(1, 20)
async def test(interaction: Interaction):
await interaction.response.send_message(f'The command is working properly, {interaction.user.mention}!')
# Sync the tree commands
def check_if_its_me(interaction: discord.Interaction) -> bool:
return interaction.user.id == 263628384674775040
@tree.command(name='sync', description='Sync all the commands')
@app_commands.checks.cooldown(1, 100)
@app_commands.check(check_if_its_me)
async def sync(interaction: Interaction):
await tree.sync()
await interaction.response.send_message('Synced all the commands', ephemeral=True)
@tree.command(name="launchdb", description="launches the database")
@app_commands.check(check_if_its_me)
async def launchdb(interaction: Interaction):
await first_start_db()
await interaction.response.send_message("Database launched successfully", ephemeral=True)
@tree.command(name='adddbitems', description='Adds items to the database')
@app_commands.check(check_if_its_me)
async def adddbitems(interaction: Interaction):
await add_db_items()
await interaction.response.send_message("Items added successfully", ephemeral=True)
# Error checks
@test.error
async def on_test_error(interaction: Interaction, error: app_commands.AppCommandError):
if isinstance(error, app_commands.errors.CommandOnCooldown):
embed = discord.Embed(title='Error', description=f'You are on cooldown, please wait **{error.retry_after:.2f} seconds**', color=0xff0000)
await interaction.response.send_message(embed=embed, ephemeral=True)
# Check errors for all app commands
@tree.error
async def on_app_command_error(interaction: Interaction, error: app_commands.AppCommandError):
if isinstance(error, app_commands.errors.CommandOnCooldown):
embed = discord.Embed(title='Error', description=f'You are on cooldown, please wait {error.retry_after:.2f} seconds')
await interaction.response.send_message(embed=embed)
elif isinstance(error, app_commands.errors.MissingPermissions):
embed = discord.Embed(title='Error', description=f'You are missing permissions to use this command, please contact the owner or the bot developer if you believe this is an issue')
await interaction.response.send_message(embed=embed, ephemeral=True)
elif isinstance(error, app_commands.errors.MissingRole):
embed = discord.Embed(title='Error', description=f'You are missing the role to use this command, please contact the owner or the bot developer if you believe this is an issue')
await interaction.response.send_message(embed=embed, ephemeral=True)
elif isinstance(error, app_commands.errors.BotMissingPermissions):
embed = discord.Embed(title='Error', description=f'The bot is missing the permission to do the command, please contact the owner or the bot developer if you believe this is an issue')
await interaction.response.send_message(embed=embed, ephemeral=True)
else:
raise error
# Load all cogs
async def load():
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
await bot.load_extension(f'cogs.{filename[:-3]}')
print("All cogs loaded successfully")
print('----------------------')
for filename in os.listdir('./rpgcogs'):
if filename.endswith('.py'):
await bot.load_extension(f'rpgcogs.{filename[:-3]}')
print("All rpgcogs loaded successfully")
print('----------------------')
async def main():
await load()
await bot.start(BOT_TOKEN)
run(main())
|
Alexici/discordpybot
|
main.py
|
main.py
|
py
| 12,874 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9087413248
|
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import os
import json
import math
import random, os
import shutil
# Extract and save MFCCs from audiofiles
def save_mfcc(dataset_path, json_path, n_mfcc=20, n_fft=2048, hop_length=1024, num_segments=1):
data = {
"mapping": [],
"mfcc": [],
"labels": []
}
samples_per_segment = int(SAMPLES_PER_FILE / num_segments)
expected_num_mfcc_vectors = math.ceil(samples_per_segment / hop_length)
for i, (dirpath, dirnames ,filenames) in enumerate(os.walk(dataset_path)):
if dirpath is not dataset_path:
dirpath_components = dirpath.split("\\")
semantic_label = dirpath_components[-1]
data["mapping"].append(semantic_label)
print("\nProcessing {}".format(semantic_label))
for file in filenames:
file_path = os.path.join(dirpath, file)
signal, sr = librosa.load(file_path, sr=SAMPLE_RATE)
for j in range(num_segments):
start_sample = samples_per_segment*j
finish_sample = start_sample + samples_per_segment
mfccs = librosa.feature.mfcc(signal[start_sample:finish_sample],
n_fft=n_fft,
hop_length=hop_length,
n_mfcc=n_mfcc, sr=SAMPLE_RATE)
mfccs = mfccs.T
#print(type(mfccs), np.shape(mfccs))
if len(mfccs) == expected_num_mfcc_vectors:
data["mfcc"].append(mfccs.tolist())
data["labels"].append(i-1)
print("{}, segment:{}".format(file_path, j+1))
with open(JSON_PATH, 'w') as fp:
json.dump(data, fp, indent=4)
return 0
# for mfcc
DATASET_PATH = "../Database_500"
JSON_PATH = "../Features/mfcc_500.json"
SAMPLE_RATE = 44100 # Sample rate of the audio signals (frequency)
DURATION = 10 # Duration of each audio data (seconds)
SAMPLES_PER_FILE = SAMPLE_RATE * DURATION
# -------------------------------------------------
save_mfcc(DATASET_PATH, JSON_PATH)
|
NOR2R/ML_miniproject
|
pythonScripts/MFCCExtraction_SaveJson.py
|
MFCCExtraction_SaveJson.py
|
py
| 2,349 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12010103629
|
import time
from appium import webdriver
# 设备连接前置配置
descried_caps = dict()
descried_caps["platformName"] = "android"
descried_caps["platformVersion"] = "5.1.1"
descried_caps["deviceName"] = "emulator-5554"
descried_caps["appPackage"] = "com.bjcsxq.chat.carfriend"
descried_caps["appActivity"] = ".MainActivity"
# 实例化driver驱动对象 # 注意url不要写错
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', descried_caps)
time.sleep(3)
# 将app退到后台
driver.close_app()
time.sleep(3)
# 启动app (包名, 启动名)
driver.start_activity("com.ss.android.ugc.aweme", ".main.MainActivity")
print(driver.current_package)
time.sleep(3)
# 关闭driver对象(关闭了连接对象)
driver.quit()
|
1chott/appAutoStudy
|
code_D_04/code_03_常见api启动关闭app.py
|
code_03_常见api启动关闭app.py
|
py
| 747 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39912096462
|
import jwt
from django.contrib.auth import authenticate
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.generics import GenericAPIView
from rest_framework import status
from smtplib import SMTPException
from .serializers import SignupSerializer, VerifyAccountSerializer, LogininSerializer
from .emails import send_otp_via_email
from .models import YourPoolUser
class SignupView(GenericAPIView):
serializer_class = SignupSerializer
def post(self, request):
try:
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save()
send_otp_via_email(serializer.data["email"])
return Response(
{
"status": 201,
"message": "registration successfully check email",
"data": serializer.data,
}
)
return Response(
{
"status": 400,
"message": "something went wrong",
"data": serializer.errors,
}
)
except SMTPException as e:
return Response(
{
"status": e.smtp_code,
"message": e.smtp_error,
"data": serializer.errors,
}
)
class VerifyOTP(APIView):
def post(self, request):
try:
data = request.data
serializer = VerifyAccountSerializer(data=data)
if serializer.is_valid():
email = serializer.data["email"]
otp = serializer.data["otp"]
user = YourPoolUser.objects.filter(email=email)
if not user.exists():
return Response(
{
"status": 400,
"message": "something went wrong",
"data": "invalid email",
}
)
if user[0].otp != otp:
return Response(
{
"status": 400,
"message": "something went wrong",
"data": "wrong otp",
}
)
user = user.first()
user.is_email_verified = True
user.save()
return Response(
{
"status": 200,
"message": "account verified",
"data": {},
}
)
return Response(
{
"status": 400,
"message": "something went wrong",
"data": serializer.errors,
}
)
except Exception as e:
print(e)
class LoginView(GenericAPIView):
serializer_class = LogininSerializer
def post(self, request):
email = request.data.get("email", None)
password = request.data.get("password", None)
user = authenticate(username=email, password=password)
if user and user.is_email_verified:
serializer = self.serializer_class(user)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(
{"message": "Invalid credentials, try again"},
status=status.HTTP_401_UNAUTHORIZED,
)
|
TEAM-ILSAN/yourpool-backend
|
users/views.py
|
views.py
|
py
| 3,630 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16782914255
|
import torch
import random
import gc
import optuna
import pandas as pd
import numpy as np
from utils import map3, compute_metrics, set_seed
from config3 import CFG
from config import CFG1
from datasets import Dataset
from torch.optim import AdamW
from pathlib import Path
from transformers import AutoModelForMultipleChoice, TrainingArguments, Trainer, AutoModel, \
get_cosine_schedule_with_warmup
from data import preprocess, DataCollatorForMultipleChoice, tokenizer, EarlyStoppingCallback, RemoveOptimizerCallback
from peft import LoraConfig, prepare_model_for_kbit_training, get_peft_model, PeftModel, PeftConfig, TaskType, \
PeftModelForSequenceClassification
from colorama import Fore, Back, Style
from sklearn.model_selection import StratifiedKFold
from eda import augment_fn, augmentation_data, eda
def model_init(trial):
return AutoModelForMultipleChoice.from_pretrained("model/deberta-v3-large-hf-weights")
###############################################################
################## Train/Valid Dataset ###################
###############################################################
def get_datasets(df, ext_df, fold):
train_df = ext_df
# valid_ext_df = ext_df.query("fold==@fold")
# valid_df = pd.concat([df, valid_ext_df], axis=0).reset_index(drop=True)
valid_df = df
valid_labels = valid_df['answer']
train_dataset = Dataset.from_pandas(train_df)
train_dataset = train_dataset.map(preprocess, remove_columns=['prompt', 'A', 'B', 'C', 'D', 'E', 'answer'])
valid_dataset = Dataset.from_pandas(valid_df)
valid_dataset = valid_dataset.map(preprocess, remove_columns=['prompt', 'A', 'B', 'C', 'D', 'E', 'answer'])
return train_dataset, valid_dataset, valid_labels
###############################################################
################## Hyperparameter Search #################
###############################################################
def optuna_hp_space(trial):
# 定义需要调优的超参数空间
hyperparameters = {
# Floating point parameter (log)
"learning_rate": trial.suggest_float("learning_rate", 5e-6, 1e-2, log=True),
# Floating point parameter (log)
"weight_decay": trial.suggest_float("weight_decay", 0.001, 0.01, log=True),
# Floating point parameter (log)
"warm_up_radio": trial.suggest_float("warmup_ratio", 0.1, 0.8, log=True),
# Integer parameter(step)
"gradient_accumulation_steps": trial.suggest_int("gradient_accumulation_steps", 2, 10, step=2)
}
train_df = pd.read_csv("./data/train_context.csv")
ext_df = pd.read_csv("./data/ext_train_context.csv")[:1500]
ext_df["prompt"] = ext_df["context"][:100] + " #### " + ext_df["prompt"]
ext_df = ext_df.sample(frac=1, random_state=CFG.seed).reset_index(drop=True)
train_dataset, valid_dataset, valid_label = get_datasets(train_df, ext_df, fold=0)
model = AutoModelForMultipleChoice.from_pretrained(CFG.model_path)
training_args = TrainingArguments(
warmup_ratio=hyperparameters["warm_up_radio"],
learning_rate=hyperparameters["learning_rate"],
weight_decay=hyperparameters["weight_decay"],
per_device_train_batch_size=CFG.per_device_train_batch_size,
per_device_eval_batch_size=CFG.per_device_eval_batch_size,
num_train_epochs=CFG.epochs,
report_to='none',
gradient_accumulation_steps=hyperparameters["gradient_accumulation_steps"],
output_dir=CFG.output_dir,
evaluation_strategy="steps",
save_strategy="steps",
save_total_limit=1,
load_best_model_at_end=True,
seed=CFG.seed,
fp16=True,
lr_scheduler_type='cosine'
)
trainer = Trainer(
model=model,
args=training_args,
tokenizer=tokenizer,
data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer),
train_dataset=train_dataset,
eval_dataset=valid_dataset,
compute_metrics=compute_metrics,
callbacks=[RemoveOptimizerCallback()],
)
trainer.train()
valid_pred = trainer.predict(valid_dataset).predictions
valid_pred_ids = np.argsort(-valid_pred, 1)
valid_pred_letters = np.array(list('ABCDE'))[valid_pred_ids][:, :3]
valid_map3 = map3(valid_label, valid_pred_letters)
return valid_map3
def optuna_hp_space_train1(trial):
# 定义需要调优的超参数空间
hyperparameters = {
# Floating point parameter (log)
"learning_rate": trial.suggest_float("learning_rate", 5e-6, 1e-2, log=True),
# Floating point parameter (log)
"weight_decay": trial.suggest_float("weight_decay", 0.001, 0.01, log=True),
# Floating point parameter (log)
"warm_up_radio": trial.suggest_float("warmup_ratio", 0.1, 0.8, log=True),
# Integer parameter(step)
"gradient_accumulation_steps": trial.suggest_int("gradient_accumulation_steps", 2, 10, step=2)
}
return hyperparameters
def optuna_hp_space_way2(trial):
return {
# Floating point parameter (log)
"learning_rate": trial.suggest_float("learning_rate", 5e-6, 1e-2, log=True),
# Floating point parameter (log)
"weight_decay": trial.suggest_float("weight_decay", 0.001, 0.01, log=True),
# Floating point parameter (log)
"warm_up_radio": trial.suggest_float("warmup_ratio", 0.1, 0.8, log=True),
# Integer parameter(step)
"gradient_accumulation_steps": trial.suggest_int("gradient_accumulation_steps", 2, 10, step=2)
}
def main0():
study = optuna.create_study(direction="maximize")
study.optimize(optuna_hp_space, n_trials=10)
# 输出最优的超参数组合和性能指标
print('Best hyperparameters: {}'.format(study.best_params))
print('Best performance: {:.4f}'.format(study.best_value))
best_params = study.best_params
def main1():
set_seed(CFG.seed)
df_train = pd.read_csv("./data/train.csv")
df_train = df_train.drop(columns="id")
df_train.dropna(inplace=True)
df_train = df_train.reset_index(drop=True)
stem_df = pd.read_csv("./data/stem_1k_v1.csv")
stem_df = stem_df.drop(columns="id")
ext_df = pd.concat([
pd.read_csv("data/6000_train_examples.csv"), # 6000
pd.read_csv("data/extra_train_set.csv"),
pd.read_csv("data/llm-science-3k-data-test.csv"), # 3000
stem_df # 1000
])
ext_len = len(ext_df) // 3
ext_df = ext_df[:ext_len]
del stem_df
ext_df = ext_df.drop_duplicates()
# 删除ext_df中存在于df_train中的row
values_to_exclude = df_train['prompt'].values
mask = ext_df['prompt'].isin(values_to_exclude)
ext_df = ext_df[~mask]
del values_to_exclude, mask
if CFG1.use_shuffle_options:
shuffle_df_train = augment_fn(df_train)
df_train = pd.concat([df_train, shuffle_df_train], axis=0)
shuffle_ext_df = augment_fn(ext_df)
ext_df = pd.concat([ext_df, shuffle_ext_df], axis=0)
ext_df = ext_df.sample(frac=1, random_state=CFG1.seed).reset_index(drop=True)
df_train = df_train.sample(frac=1, random_state=CFG1.seed).reset_index(drop=True)
train_dataset, valid_dataset, valid_label = get_datasets(df_train, ext_df, fold=0)
# model = AutoModelForMultipleChoice.from_pretrained(CFG1.model_path)
#
# if CFG1.is_freezingEmbedding:
# # Freeze the embeddings
# for param in model.base_model.embeddings.parameters():
# param.requires_grad = False
#
# if CFG1.use_self_optimizer:
# # Create optimizer and learning rate scheduler
# # Define different learning rates and weight decay for different layers
# optimizer_grouped_parameters = [
# {
# "params": [p for n, p in model.named_parameters() if "base_model.embeddings" not in n],
# "lr": 1e-5, # Example learning rate for top layers
# "weight_decay": 0.01, # Example weight decay
# },
#
# {
# "params": [p for n, p in model.named_parameters() if "base_model.embeddings" in n],
# "lr": 1e-4, # Example learning rate for bottom layers
# "weight_decay": 0.001, # Example weight decay
# },
# ]
# optimizer = AdamW(optimizer_grouped_parameters, lr=CFG1.learning_rate,
# weight_decay=CFG1.weight_decay)
#
# # Create a cosine learning rate scheduler
# num_training_steps = CFG1.epochs * (ext_len // (CFG1.per_device_train_batch_size * 2))
# scheduler = get_cosine_schedule_with_warmup(optimizer,
# num_warmup_steps=CFG1.warmup_ratio * num_training_steps,
# num_training_steps=num_training_steps)
training_args = TrainingArguments(
learning_rate=CFG1.learning_rate,
weight_decay=CFG1.weight_decay,
warmup_ratio=CFG1.warmup_ratio,
per_device_train_batch_size=CFG1.per_device_train_batch_size,
per_device_eval_batch_size=CFG1.per_device_eval_batch_size,
num_train_epochs=CFG1.epochs,
report_to='none',
output_dir=CFG1.output_dir,
evaluation_strategy="steps",
save_strategy="steps",
save_total_limit=1,
load_best_model_at_end=True,
seed=CFG1.seed
)
trainer = Trainer(
model_init=model_init,
args=training_args,
tokenizer=tokenizer,
data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer),
train_dataset=train_dataset,
eval_dataset=valid_dataset,
compute_metrics=compute_metrics,
callbacks=[RemoveOptimizerCallback()],
)
trainer.hyperparameter_search(
direction="maximize",
backend="optuna",
hp_space=optuna_hp_space_way2,
n_trials=10,
compute_objective=compute_metrics
)
def main2():
train_df = pd.read_csv("./data/train_context.csv")
ext_df = pd.read_csv("./data/ext_train_context.csv")[:1500]
ext_df["prompt"] = ext_df["context"] + " #### " + ext_df["prompt"]
ext_df = ext_df.sample(frac=1, random_state=CFG.seed).reset_index(drop=True)
train_dataset, valid_dataset, valid_label = get_datasets(train_df, ext_df, fold=0)
training_args = TrainingArguments(
learning_rate=CFG.learning_rate,
weight_decay=CFG.weight_decay,
warmup_ratio=CFG.warmup_ratio,
per_device_train_batch_size=CFG.per_device_train_batch_size,
per_device_eval_batch_size=CFG.per_device_eval_batch_size,
num_train_epochs=CFG.epochs,
report_to='none',
output_dir=CFG.output_dir,
evaluation_strategy="steps",
save_strategy="steps",
save_total_limit=1,
load_best_model_at_end=True,
metric_for_best_model='eval_loss',
seed=CFG.seed
)
trainer = Trainer(
model_init=model_init,
args=training_args,
tokenizer=tokenizer,
data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer),
train_dataset=train_dataset,
eval_dataset=valid_dataset,
compute_metrics=compute_metrics,
callbacks=[RemoveOptimizerCallback()],
)
trainer.hyperparameter_search(
direction="maximize",
backend="optuna",
hp_space=optuna_hp_space_way2,
n_trials=10,
compute_objective=compute_metrics
)
if __name__ == "__main__":
main2()
|
zdhdream/LLM-Science-Exam
|
Hyperparameter-Search.py
|
Hyperparameter-Search.py
|
py
| 11,550 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22124073233
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#todo: stop words and symbols are mucking things up
import os
import os.path
import nltk
import operator
from nltk import word_tokenize
import collections
import math
import sklearn
import sklearn.cluster
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from collections import OrderedDict
from sklearn.feature_extraction.text import TfidfVectorizer
def ppmitopwords():
d = os.getcwd()
stupid_symbols = ["(", ")", ".", ",", "'s", "``", "''", "'", "n't", ": ", ";", "?"]
common_words_to_ignore = ["walt", "whitman", "mr", "and", "own", "thy", "thee"]
#import docs into program, placing each in dictionary with content as the key
docs_dict = {}
the_big_corpus = ""
path = 'pages/scripts/ppmi/source_text/'
path = os.path.join(d, path)
for filename in os.listdir(path):
with open(os.path.join(path, filename)) as currentfile:
current_file = currentfile.read()
current_file = current_file.replace('xml', '')
for word in stupid_symbols:
current_file = current_file.replace(word, '')
for word in common_words_to_ignore:
current_file = current_file.replace(word, '')
#current_file = current_file.decode('utf-8')
the_big_corpus = the_big_corpus + current_file
docs_dict[current_file] = filename
#change to numbers so I can print in order of years, otherwise it comes out out of order and I'm picky.
for now in docs_dict.keys():
filename = docs_dict[now]
filename = filename.replace('.txt', '')
file_number = int(filename)
docs_dict[now] = file_number
#ppmi
print("------------------PMI RESULTS, TOP 10 WORDS PER YEAR-----------------------\n")
raw_matrix_words = []
raw_matrix_counts = []
token_to_index = {}
#raw counts of words into matrix, put tokens in raw_matrix_words to create matrix of words
for key in docs_dict.keys():
tokens_dict = {}
content = key
tokens = word_tokenize(content)
raw_matrix_words.append(tokens)
#get raw token count
for token_set in raw_matrix_words:
counter_dict = {}
for token in token_set:
counter_dict[token] = 0
for token in token_set:
counter_dict[token] = counter_dict[token] + 1
list_for_tokens_tups = []
for word in counter_dict.keys():
word_tup = (word, counter_dict[word])
list_for_tokens_tups.append(word_tup)
raw_matrix_counts.append(list_for_tokens_tups)
#now the raw_matrix_counts contains an entry for each list of tuples, for alignment
#idea, don't make a matrix, for each doc find entire sum, find sum of all matching words in lists, work from there...
total = 0 #sum of full 'matrix' starts here
for a_list in raw_matrix_counts:
for a_tup in a_list:
total = total + a_tup[1]
#now get each column (word)
word_dict = {} #represent sum of columns
the_big_tokens = word_tokenize(the_big_corpus)
for a_list in raw_matrix_counts:
for a_tup in a_list:
word = a_tup[0]
word_dict[word] = 0
for a_list in raw_matrix_counts:
for a_tup in a_list:
word = a_tup[0]
word_dict[word] = word_dict[word] + a_tup[1]
#col_dict stores the sum of the column divided by the total
col_dict = {}
for word in word_dict:
value = float(word_dict[word])
value = float(value/total)
col_dict[word] = value
#doc dict will contain sum of all words in a document
docu_dict = {}
list_of_years = list(docs_dict.values())
year_index = 0
for a_list in raw_matrix_counts:
total_in_doc = 0
for a_tup in a_list:
total_in_doc = total_in_doc + a_tup[1]
docu_dict[list_of_years[year_index]] = total_in_doc
year_index = year_index + 1
#so now we have the sum of the rows in docu_dict, with the key being the year the document is associated with
#we also have the sum of the columns, with the word being the key for the raw count, the col_dict contains the sum divided by the scalar value
row_dict = docu_dict
for key in row_dict.keys():
value = row_dict[key]
value = float(value)
value = float(value/total)
row_dict[key] = value
#row_dict = sum/value of docs // col_dict = sum/value of words
col_dict_len = len(col_dict)
row_dict_len = len(row_dict)
#going to do the scalar product now... difficult! (actually, coming back, not scalar, misnamed it, oh well)
scalar_dict = {}
for key_row, value_row in row_dict.items():
scalar_dict_value = {}
for key_col, value_col in col_dict.items():
value = float(col_dict[key_col]*row_dict[key_row])
scalar_dict_value[key_col] = value
scalar_dict[key_row] = scalar_dict_value #keeps in order of year and word for later extraction
#next, we get the "real" values, observed values, all above are "predictive values"... how much we EXPECT to see a word in each doc.
real_count_dict = {}
for key_doc, value_filename in docs_dict.items():
filename = value_filename
content = key_doc
tokens = word_tokenize(content)
tokens_dict = {}
for token in tokens: #initalize all to 0 before raw count
tokens_dict[token] = 0
for token in tokens:
tokens_dict[token] = tokens_dict[token] + 1 #raw counts for THIS DOC should be aquired
#now store doc
for token in tokens:
value = float(tokens_dict[token])
tokens_dict[token] = float(value/total)
real_count_dict[filename] = tokens_dict
#now get the ratio of the observed/predicted
for key in real_count_dict.keys():
for key2 in real_count_dict[key].keys():
real_count_dict[key][key2] = float(real_count_dict[key][key2] / scalar_dict[key][key2])
#now take the log of the new matrix (real_count_dict), according to online that implies taking the log of each value... lets hope this works.
for key in real_count_dict.keys():
for key2 in real_count_dict[key].keys():
if real_count_dict[key][key2] > 0.0:
real_count_dict[key][key2] = float(math.log(real_count_dict[key][key2]))
else:
real_count_dict[key][key2] = 0.0
for key in real_count_dict.keys():
dict_to_sort = real_count_dict[key]
sorted_dict = OrderedDict(sorted(dict_to_sort.items(), key=operator.itemgetter(1)))
real_count_dict[key] = sorted_dict
for key in real_count_dict.keys():
print(key) #key is year
print("-------------------------")
for key2 in real_count_dict[key].keys()[:10]: #key2 is word
#print only top 10
word = key2
value = real_count_dict[key][key2]
print_string = " {} : {} "
print(word, value)
#myprintout.format(unicode(word).encode("iso-8859-2", "replace"), value)
print("\n")
return real_count_dict
#cooccurrence by year
#keyword search
|
mcwatera/WWTBHT
|
wwtbht/pages/scripts/ppmi/mcwatera_fp.py
|
mcwatera_fp.py
|
py
| 6,495 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74413560828
|
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if k == 0:
return
k = k % len(nums)
def reverse(arr, i, j):
while i < j:
arr[i], arr[j] = arr[j], arr[i]
i += 1
j -= 1
reverse(nums, 0, len(nums)-1)
reverse(nums, 0, k-1)
reverse(nums, k, len(nums)-1)
|
garimaarora1/LeetCode-2023
|
rotate-array/rotate-array.py
|
rotate-array.py
|
py
| 494 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12567993602
|
'''Create big blob of data
'''
import os
import pandas as pd
from dataset_creators import bikerawdata
TPE_FILE_ROOT = '../bike_raw_data/taipei'
HEL_FILE_ROOT = '../bike_raw_data/helsinki'
LON_FILE_ROOT = '../bike_raw_data/london'
TOR_FILE_ROOT = '../bike_raw_data/toronto'
TPE_FILES = ['{}/{}'.format(TPE_FILE_ROOT, f) for f in os.listdir(TPE_FILE_ROOT)]
HEL_FILES = ['{}/{}'.format(HEL_FILE_ROOT, f) for f in os.listdir(HEL_FILE_ROOT)]
LON_FILES = ['{}/{}'.format(LON_FILE_ROOT, f) for f in os.listdir(LON_FILE_ROOT)]
TOR_FILES = ['{}/{}'.format(TOR_FILE_ROOT, f) for f in os.listdir(TOR_FILE_ROOT)]
TPE_BLOB = '../data_blob/taipei_bikeshare.csv'
HEL_BLOB = '../data_blob/helsinki_bikeshare.csv'
LON_BLOB = '../data_blob/london_bikeshare.csv'
TOR_BLOB = '../data_blob/toronto_bikeshare.csv'
def make_blob_by_city():
for df in bikerawdata.parse('taipei', TPE_FILES):
df.to_csv(TPE_BLOB, mode='a', header=not os.path.exists(TPE_BLOB), index=False)
for df in bikerawdata.parse('helsinki', HEL_FILES):
df.to_csv(HEL_BLOB, mode='a', header=not os.path.exists(HEL_BLOB), index=False)
for df in bikerawdata.parse('london', LON_FILES):
df.to_csv(LON_BLOB, mode='a', header=not os.path.exists(LON_BLOB), index=False)
for df in bikerawdata.parse('toronto', TOR_FILES):
df.to_csv(TOR_BLOB, mode='a', header=not os.path.exists(TOR_BLOB), index=False)
if __name__ == '__main__':
make_blob_by_city()
|
anderzzz/viral-bikers
|
dataset_creators/make_blob.py
|
make_blob.py
|
py
| 1,444 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72532766909
|
"""
Functions and models to query scicrunch service REST API (https://scicrunch.org/api/)
- http client for API requests
- Error handling:
- translates network errors
- translates request error codes
Free functions with raw request scicrunch.org API
- client request context
- raise_for_status=True -> Raise an aiohttp.ClientResponseError if the response status is 400 or higher
- validates response and prunes using pydantic models
SEE test_scicrunch_service_api.py
"""
import logging
from typing import Any
from aiohttp import ClientSession
from pydantic import BaseModel, Field
from yarl import URL
from .models import ResourceHit
from .settings import SciCrunchSettings
logger = logging.getLogger(__name__)
# MODELS --
#
# NOTE: These models are a trucated version of the data payload for a scicrunch response.#
# NOTE: Examples of complete responses can be found in test_scicrunch.py::mock_scicrunch_service_api
#
class FieldItem(BaseModel):
field_name: str = Field(..., alias="field")
required: bool
value: str | None | list[Any] = None
class ResourceView(BaseModel):
resource_fields: list[FieldItem] = Field([], alias="fields")
version: int
curation_status: str
last_curated_version: int
scicrunch_id: str
@classmethod
def from_response_payload(cls, payload: dict):
assert payload["success"] == True # nosec
return cls(**payload["data"])
@property
def is_curated(self) -> bool:
return self.curation_status.lower() == "curated"
def _get_field(self, fieldname: str):
for field in self.resource_fields:
if field.field_name == fieldname:
return field.value
raise ValueError(f"Cannot file expected field {fieldname}")
def get_name(self):
return str(self._get_field("Resource Name"))
def get_description(self):
return str(self._get_field("Description"))
def get_resource_url(self):
return URL(str(self._get_field("Resource URL")))
class ListOfResourceHits(BaseModel):
__root__: list[ResourceHit]
# REQUESTS
async def get_all_versions(
unprefixed_rrid: str, client: ClientSession, settings: SciCrunchSettings
) -> list[dict[str, Any]]:
async with client.get(
f"{settings.SCICRUNCH_API_BASE_URL}/resource/versions/all/{unprefixed_rrid}",
params={"key": settings.SCICRUNCH_API_KEY.get_secret_value()},
raise_for_status=True,
) as resp:
body = await resp.json()
output: list[dict[str, Any]] = body.get("data") if body.get("success") else []
return output
async def get_resource_fields(
rrid: str, client: ClientSession, settings: SciCrunchSettings
) -> ResourceView:
async with client.get(
f"{settings.SCICRUNCH_API_BASE_URL}/resource/fields/view/{rrid}",
params={"key": settings.SCICRUNCH_API_KEY.get_secret_value()},
raise_for_status=True,
) as resp:
body = await resp.json()
assert body.get("success") # nosec
return ResourceView(**body.get("data", {}))
async def autocomplete_by_name(
guess_name: str, client: ClientSession, settings: SciCrunchSettings
) -> ListOfResourceHits:
async with client.get(
f"{settings.SCICRUNCH_API_BASE_URL}/resource/fields/autocomplete",
params={
"key": settings.SCICRUNCH_API_KEY.get_secret_value(),
"field": "Resource Name",
"value": guess_name.strip(),
},
raise_for_status=True,
) as resp:
body = await resp.json()
assert body.get("success") # nosec
return ListOfResourceHits.parse_obj(body.get("data", []))
|
ITISFoundation/osparc-simcore
|
services/web/server/src/simcore_service_webserver/scicrunch/_rest.py
|
_rest.py
|
py
| 3,706 |
python
|
en
|
code
| 35 |
github-code
|
6
|
39081271022
|
import pandas as pd
def getAirline(row: pd.Series) -> str:
"""Run through the potential columns and return the one that matches
Parameters
----------
row:pd.Series : row series of dataframe with Airline run through get
dummies
Returns
-------
string of the airline
"""
ret = ''
for c in ['AirTran', 'Alaska', 'Allegiant', 'AmericaWest', 'American', 'Continental', 'Delta', 'Frontier', 'Hawaiian', 'Northwest', 'Southwest', 'Spirit', 'US Airways', 'United', 'VirginAmerica', 'jetBlue']:
if row[c]:
ret = c
break
return ret
def get_airline_from_dummies(df: pd.DataFrame) -> pd.Series:
"""This is to reverse the effect of the get dummies call on the dataframe
Parameters
----------
df:pd.DataFrame : dataframe with the Airline column run
through Panda's get dummies
Returns
-------
pandas series of airline labels
"""
ret = df.apply(lambda row: getAirline(row), axis=1)
return ret
|
SL477/Predicting_future_trends_in_airline_profitability
|
data/notebookhelp/getAirlineFromDummies.py
|
getAirlineFromDummies.py
|
py
| 1,013 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34381391905
|
"""
Compile QEMU Version 5.1.0 or newer. 5.1.0 is when AVR support was introduced.
.. code-block:: console
$ wget https://download.qemu.org/qemu-6.1.0.tar.xz
$ tar xvJf qemu-6.1.0.tar.xz
$ cd qemu-6.1.0
$ ./configure --target-list="avr-softmmu"
$ make -j $(($(nproc)*4))
Change directory to this file's parent directory and run using unittest
.. code-block:: console
$ cd python/cmd_msg_test/
$ python -u -m unittest discover -v
test_connect (test_cmd_msg.TestSerial) ... qemu-system-avr: -chardev socket,id=serial_port,path=/tmp/tmpuuq3oqvj/socket,server=on: info: QEMU waiting for connection on: disconnected:unix:/tmp/tmpuuq3oqvj/socket,server=on
reading message from arduino
b''
b''
qemu-system-avr: terminating on signal 2 from pid 90395 (python)
ok
----------------------------------------------------------------------
Ran 1 test in 4.601s
OK
"""
import os
import sys
import signal
import pathlib
import tempfile
import unittest
import subprocess
import contextlib
import dataclasses
import main
# top level directory in this git repo is three levels up
REPO_ROOT = pathlib.Path(__file__).parents[2].resolve()
@contextlib.contextmanager
def start_qemu(bios):
with tempfile.TemporaryDirectory() as tempdir:
socket_path = pathlib.Path(tempdir, "socket")
qemu_cmd = [
"qemu-system-avr",
"-mon",
"chardev=none",
"-chardev",
f"null,id=none",
"-serial",
"chardev:serial_port",
"-chardev",
f"socket,id=serial_port,path={socket_path},server=on",
"-nographic",
"-machine",
"arduino-uno",
"-cpu",
"avr6-avr-cpu",
"-bios",
str(bios),
]
qemu_proc = subprocess.Popen(qemu_cmd, start_new_session=True)
serial_port_path = pathlib.Path(tempdir, "ttyACM0")
socat_cmd = [
"socat",
f"PTY,link={serial_port_path},rawer,wait-slave",
f"UNIX:{socket_path}",
]
socat_proc = subprocess.Popen(socat_cmd, start_new_session=True)
try:
while not serial_port_path.exists():
pass
yield str(serial_port_path)
finally:
# Kill the whole process group (for problematic processes like qemu)
os.killpg(qemu_proc.pid, signal.SIGINT)
os.killpg(socat_proc.pid, signal.SIGINT)
qemu_proc.wait()
socat_proc.wait()
class RunQEMU(unittest.TestCase):
"""
Base class which will start QEMU to emulate an Arduino Uno machine using the
BIOS (the .elf output of arduino-cli compile) provided.
qemu-system-avr from QEMU Version 5.1.0 or newer is required.
Starts a new virtual machine for each test_ function.
"""
BIOS = REPO_ROOT.joinpath("build", "serial_cmd_test.ino.elf")
def setUp(self):
self.qemu = start_qemu(self.BIOS)
# __enter__ is called at the begining of a `with` block. __exit__ is
# called at the end of a `with` block. By calling these functions
# explicitly within setUp() and tearDown() we ensure a new VM is created
# and destroyed each time.
self.serial_port = self.qemu.__enter__()
def tearDown(self):
self.qemu.__exit__(None, None, None)
del self.qemu
class TestSerial(RunQEMU, unittest.TestCase):
def test_connect(self):
main.main(self.serial_port)
|
sedihglow/braccio_robot_arm
|
python/archive/QEMU_arduino_serial_testing/test_cmd_msg.py
|
test_cmd_msg.py
|
py
| 3,529 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12449590446
|
from distutils.errors import LibError
from tabnanny import verbose
from time import timezone
from django.db import models
from django.forms import CharField
from django.contrib.auth.models import User
from datetime import datetime,date, time
from django.utils import timezone
# Create your models here.
class TipoVehiculo(models.Model):
cod_vehiculo = models.CharField(max_length=100, null=False,verbose_name="Código vehículo")
tipo_vehiculo = models.CharField(max_length=100, null=False,verbose_name="Tipo vehículo")
creado_el = models.DateTimeField(auto_now_add=True, verbose_name='Creado el')
class Meta:
verbose_name = 'Tipo de vehículo'
verbose_name_plural = 'Tipos de vehículos'
ordering = ['-creado_el']
def __str__(self):
return self.tipo_vehiculo
class TipoPoliza(models.Model):
cod_poliza = models.CharField(max_length=100, null=False,verbose_name="Código póliza")
tipo_poliza = models.CharField(max_length=100, null=False,verbose_name="Tipo póliza")
creado_el = models.DateTimeField(auto_now_add=True, verbose_name='Creado el')
class Meta:
verbose_name = 'Tipo de póliza'
verbose_name_plural = 'Tipos de póliza'
ordering = ['-creado_el']
def __str__(self):
return self.tipo_poliza
class TipoSiniestro(models.Model):
cod_siniestro= models.CharField(max_length=100, null=False,verbose_name="Código siniestro")
tipo_siniestro = models.CharField(max_length=100, null=False,verbose_name="Tipo siniestro")
creado_el = models.DateTimeField(auto_now_add=True, verbose_name='Creado el')
class Meta:
verbose_name = 'Tipo Siniestro'
verbose_name_plural = 'Tipos de siniestro'
ordering = ['-creado_el']
def __str__(self):
return self.tipo_siniestro
class Marca(models.Model):
cod_marca= models.CharField(max_length=100, null=False,verbose_name="Código marca")
marca = models.CharField(max_length=100, null=False,verbose_name="Marca")
creado_el = models.DateTimeField(auto_now_add=True, verbose_name='Creado el')
class Meta:
verbose_name = 'Marca'
verbose_name_plural = 'Marcas'
ordering = ['marca']
def __str__(self):
return self.marca
class ModeloVehiculo(models.Model):
marca_modelo = models.OneToOneField(Marca,null=False,on_delete= models.CASCADE)
#tipo_modelo = models.OneToOneField(TipoVehiculo,null=False,on_delete= models.CASCADE)
cod_modelo= models.CharField(max_length=100, null=False,verbose_name="Código modelo")
modelo = models.CharField(max_length=100, null=False,verbose_name="Modelo vehículo")
creado_el = models.DateTimeField(auto_now_add=True, verbose_name='Creado el')
class Meta:
verbose_name = 'Modelo vehículo'
verbose_name_plural = 'Modelos de vehículo'
ordering = ['modelo']
def __str__(self):
return self.modelo
class TablaSiniestros(models.Model):
tipo_de_siniestro = models.OneToOneField(TipoSiniestro,null=False,on_delete=models.CASCADE,default="")
tipo_de_poliza = models.OneToOneField(TipoPoliza,null=False,on_delete=models.CASCADE,default="")
nombre_marca = models.OneToOneField(Marca,null=False,on_delete=models.CASCADE,default="")
nombre_modelo = models.OneToOneField(ModeloVehiculo,null=False,on_delete=models.CASCADE,default="")
tipo_de_vehiculo = models.OneToOneField(TipoVehiculo,null=False,on_delete=models.CASCADE,default="")
nombre_conductor = models.CharField(max_length=100, null=False, verbose_name="Nombre conductor(a)")
apellido_conductor = models.CharField(max_length=100, null=False, verbose_name="Apellido conductor(a)")
edad_conductor = models.IntegerField(null=False, verbose_name="Edad conductor(a)",default="18")
rut_conductor = models.CharField(max_length=100, null=False, verbose_name="Rut conductor(a)")
fecha_siniestro = models.DateField(null=False, verbose_name="Fecha siniestro")
fecha_registro = models.DateTimeField(auto_now_add=True, verbose_name='Fecha registro')
descripcion_siniestro = models.TextField(max_length=300,null=False,verbose_name="Descripción siniestro")
class Meta:
verbose_name = 'Siniestro'
verbose_name_plural = 'BD Siniestros'
ordering = ['rut_conductor']
def __str__(self):
return self.rut_conductor
INGRESADO= 'Ingresado'
APROBADO = 'Aprobado'
EN_REPARACION = 'En reparación'
EN_ENTREGA = 'En entrega'
CERRADA = 'Cerrada'
INCIDENCIA = 'Incidencia'
ESTADO_CHOICES = (
(INGRESADO,INGRESADO),
(APROBADO,APROBADO),
(EN_REPARACION,EN_REPARACION),
(EN_ENTREGA,EN_ENTREGA),
(CERRADA,CERRADA),
(INCIDENCIA,INCIDENCIA),
)
class TablaDeSiniestros(models.Model):
usuario = models.ForeignKey(
User,
on_delete=models.CASCADE,
null=True,
blank=True)
tipo_de_siniestro = models.ForeignKey(TipoSiniestro,blank=False,on_delete=models.CASCADE,verbose_name="Tipo de siniestro",default="")
tipo_de_modelo = models.ForeignKey(ModeloVehiculo,blank=False,on_delete=models.CASCADE, verbose_name="Modelo vehículo",default="")
nombre_marca = models.ForeignKey(Marca,blank=False,on_delete=models.CASCADE,verbose_name="Marca vehículo",default="")
tipo_de_vehiculo = models.ForeignKey(TipoVehiculo,blank=False,on_delete=models.CASCADE,verbose_name="Tipo de vehículo",default="")
nombre_conductor = models.CharField(max_length=100, blank=False, verbose_name="Nombre conductor(a)",default="")
apellido_conductor = models.CharField(max_length=100, blank=False, verbose_name="Apellido conductor(a)",default="")
edad_conductor = models.IntegerField(blank=False, verbose_name="Edad conductor(a)",default=18)
rut_conductor = models.CharField(max_length=100, blank=False, verbose_name="Rut conductor(a)",default="")
tipo_de_poliza = models.ForeignKey(TipoPoliza,blank=False,on_delete=models.CASCADE,verbose_name="Póliza contratada",default="")
fecha_siniestro = models.DateField(auto_now_add=False,auto_now=False, null=False, blank=False, verbose_name="Fecha siniestro",default=timezone.now)
descripcion_siniestro = models.TextField(max_length=300,blank=False, null=False,verbose_name="Descripción siniestro",default="")
fecha_registro = models.DateTimeField(auto_now_add=True, verbose_name='Fecha registro')
updated_at = models.DateField(auto_now=True,verbose_name="Última actualización")
estado_siniestro = models.CharField(max_length=50,choices=ESTADO_CHOICES,default=INGRESADO)
class Meta:
verbose_name = 'Ingreso de siniestro'
verbose_name_plural = 'Ingreso de siniestros'
#ordering = ['tipo_de_siniestro']
def __str__(self):
datos = f'{self.nombre_conductor} {self.apellido_conductor} / Rut {self.rut_conductor}'
return datos
ADMIN = 'Administrador'
LIQUIDADOR = 'Liquidador'
CLIENTE = 'Cliente'
USUARIO_CHOICES = (
(ADMIN,ADMIN),
(LIQUIDADOR,LIQUIDADOR),
(CLIENTE,CLIENTE),
)
class DataBaseUsuarios(models.Model):
tipo_usuario = models.CharField(max_length=50,choices=USUARIO_CHOICES)
nombre_usuario = models.CharField(max_length=50,blank=False,default='')
apellido_usuario = models.CharField(max_length=50,blank=False,default='')
cargo_usuario = models.CharField(max_length=50,blank=False,default='')
mail_usuario = models.EmailField(max_length=100, default='@liquidaya.com')
class Meta:
ordering = ['tipo_usuario','apellido_usuario']
def __str__(self):
nombre_completo = f'{self.apellido_usuario}, {self.nombre_usuario} / {self.tipo_usuario} / {self.cargo_usuario}'
return nombre_completo
class FormularioContacto(models.Model):
nombre_contacto = models.CharField(max_length=100,verbose_name="Nombre")
apellido_contacto = models.CharField(max_length=100,verbose_name="Apellido")
email_contacto = models.EmailField(max_length=100,verbose_name="Email")
texto_contacto = models.TextField(max_length=400,verbose_name="Mensaje")
def __str__(self):
return self.email_contacto
|
xpilasi/segundo_liquidadora
|
web_base/models.py
|
models.py
|
py
| 8,166 |
python
|
es
|
code
| 0 |
github-code
|
6
|
69860227068
|
from igraph import *
import csv
from concurrent import futures
f_input_ncol='/tmp/socgraph/cnn_comment_yearweek.ncol'
f_output_graphml='/tmp/socgraph/output/week%d.graphml'
'''
This snippet:
+reads in a "big-graph" in which edges are created at different time (different weeks).
+Partitions "big-graph" into smaller subgraphs by weeks
+For each subgraph, finds the components and clusters in each components
the input is f_input_ncol (in ncol format)
the output,each subgraph for one week is stored in one file (graphml format)
'''
def main():
#read input(social graph, edge's weight is interpreted as the week in which the edge is created)
g= Graph.Read_Ncol(f_input_ncol,directed=False)
weeks={int(e['weight']) for e in g.es}
minweek=min(weeks)
maxweek=max(weeks)
args=((g,week) for week in range(minweek,maxweek+1))
with futures.ThreadPoolExecutor(max_workers=64) as executor:
for i in executor.map(worker_clustering, args):
pass
def worker_clustering(arg):
(g, week)=arg
es=[e for e in g.es if e['weight']==week]
if len(es)==0: return
g=g.subgraph_edges(es)
g.simplify()
doCompoClustering(g)
g.write_graphml(f_output_graphml%int(week))
def doCompoClustering(g,compo_prop='compo',clust_prop='clust'):
vertices={v['name']:v for v in g.vs}
g.simplify()
compos=g.components().subgraphs()
for i,compo in enumerate(compos):
for v in compo.vs:
v[compo_prop]=i
clusts=doClustering(compo)
for j,vs in enumerate(clusts):
for v in vs:
vertices[v['name']][compo_prop]=i
vertices[v['name']][clust_prop]=j
def doClustering(g):
g.to_undirected()
clusts=[]
for clust in g.community_fastgreedy().as_clustering().subgraphs():
clusts.append([v for v in clust.vs])
return clusts
if __name__=='__main__':
main()
|
pdphuong/soclust
|
bigobject/socgraph_clustering.py
|
socgraph_clustering.py
|
py
| 1,766 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23982957449
|
"""
[Week 1 - Session 1]
Problem #1 Reverse a String
"""
"""
UNDERSTAND:
-Is the user input always going to be string?
-> First assume that it is
-empty string should just print out empty string
MATCH:
-indexing string from the back and concatenating
PLAN:
-Create a new empty string
-Using a simple for loop that indexing from the last index of the string
-Add each character as the for loops reads from the back
-Return the new string
"""
#Implement
#Solution 1: O(n^2) runtime
def reverse1(string):
reversed = ""
for i in range(len(string)-1,-1,-1):
reversed+=string[i]
return reversed
#Solution 2: O(n) runtime using an array and two pointers
def reverse2(string):
arr = list(string)
first, last = 0, len(string)-1
while(first<last):
temp = arr[first]
arr[first] = arr[last]
arr[last] = temp
first, last = first+1, last-1
return "".join(arr)
def main():
print(reverse2(input("Enter a string: ")))
main()
|
ryder0705/codepath
|
week1/StringReverse.py
|
StringReverse.py
|
py
| 990 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31841821973
|
from rest_framework.views import APIView
from app.super_admin.controller import SuperAdminController
from common.django_utility import send_response
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.permissions import IsAuthenticated
from dateutil.parser import parse
superAdminController = SuperAdminController()
class SuperAdminView(APIView):
authentication_classes = [JWTAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request):
response_data = []
description = None
total_records = None
try:
exported_date = None
if request.query_params.get('exported_date'):
exported_date = parse(request.query_params.get('exported_date'))
response_data = superAdminController.export(exported_date)
exception_occured = False
except Exception as error_msg:
description = error_msg
exception_occured = True
finally:
return send_response(exception_occured=exception_occured, custom_description=description, request=request, total_records=total_records, response_data=response_data)
|
ayush431/m_56studios
|
m56studios_be/app/super_admin/views.py
|
views.py
|
py
| 1,194 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41661344170
|
def parse_args(args, opts):
values = [ [] for opt in opts ]
flat_opts = []
for opt in opts:
if isinstance(opt,list):
flat_opts.extend(opt)
else:
flat_opts.append(opt)
for arg_number, arg in enumerate(args):
for opt_number, opt in enumerate(opts):
if arg in opt and len(args) > arg_number+1 and args[arg_number+1] not in flat_opts:
values[opt_number].append(args[arg_number+1])
return values
|
nmaxwell/pyTexNotes
|
tools.py
|
tools.py
|
py
| 512 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7171062634
|
#Answer to Set .add()
n = int(input())
a = set()
for i in range(n):
a.add(input())
print(len(a))
"""
>>> s = set('HackerRank')
>>> s.add('H')
>>> print s
set(['a', 'c', 'e', 'H', 'k', 'n', 'r', 'R'])
>>> print s.add('HackerRank')
None
>>> print s
set(['a', 'c', 'e', 'HackerRank', 'H', 'k', 'n', 'r', 'R'])
"""
|
CompetitiveCode/hackerrank-python
|
Practice/Sets/Set .add().py
|
Set .add().py
|
py
| 316 |
python
|
en
|
code
| 1 |
github-code
|
6
|
44344352975
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
from heapq import heappush, heappop
class Solution:
def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:
heap = []
for i in range(len(lists)):
while lists[i]:
heappush(heap, lists[i].val)
lists[i] = lists[i].next
head = merged = ListNode()
while heap:
merged.next = ListNode(heappop(heap))
merged = merged.next
return head.next
|
nayeonkinn/algorithm
|
leetcode/[Hard] 23. Merge k Sorted Lists.py
|
[Hard] 23. Merge k Sorted Lists.py
|
py
| 622 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23055518013
|
"""
Creation:
Author: Martin Grunnill
Date: 2023-01-25
Description: Derivation of Basic Reproductive Number (R0) and beta given R0 for model described in manuscript.
For methods see:
Diekmann, O., Heesterbeek, J. A. P., & Roberts, M. G. (2010). The construction of next-generation matrices
for compartmental epidemic models. Journal of the Royal Society Interface, 7(47), 873–885.
https://doi.org/10.1098/rsif.2009.0386
"""
import sympy
all_params = ['epsilon_1', 'gamma_A_1', 'p_s', 'gamma_I_1', 'epsilon_2', 'gamma_I_2', 'alpha',
'p_h_s', 'epsilon_H', 'epsilon_3', 'N', 'theta', 'gamma_A_2', 'gamma_H', 'beta']
all_states = ['S', 'E', 'G_I', 'G_A', 'P_I', 'P_A', 'M_H', 'M_I', 'M_A', 'F_H', 'F_I', 'F_A', 'R']
for list_of_symbols in [all_params, all_states]:
for symbol in list_of_symbols:
exec(symbol + ' = sympy.symbols("'+symbol +'")')
odes = sympy.Matrix([[R*alpha - S*beta*(F_A*theta + F_I + M_A*theta + M_H + M_I + P_A*theta + P_I*theta)/N],
[-E*epsilon_1*p_s - E*epsilon_1*(1 - p_s) + S*beta*(F_A*theta + F_I + M_A*theta + M_H + M_I + P_A*theta + P_I*theta)/N],
[E * epsilon_1 * (1 - p_s) - G_A * epsilon_2],
[E*epsilon_1*p_s - G_I*epsilon_2],
[G_A*epsilon_2 - P_A*epsilon_3],
[G_I * epsilon_2 - P_I * epsilon_3 * p_h_s - P_I * epsilon_3 * (1 - p_h_s)],
[-M_A*gamma_A_1 + P_A*epsilon_3],
[-M_I*gamma_I_1 + P_I*epsilon_3*(1 - p_h_s)],
[-M_H*epsilon_H + P_I*epsilon_3*p_h_s],
[-F_A * gamma_A_2 + M_A * gamma_A_1],
[-F_I * gamma_I_2 + M_I * gamma_I_1],
[-F_H*gamma_H + M_H*epsilon_H],
[F_A*gamma_A_2 + F_H*gamma_H + F_I*gamma_I_2 - R*alpha]])
infecteds = odes[1:-1]
infecteds = sympy.Matrix(odes[1:-1])
infecteds = infecteds.subs(S, N)
infecteds_jacobian = infecteds.jacobian(X=[E,
G_A, G_I,
P_A, P_I,
M_A, M_I, M_H,
F_A, F_I, F_H
])
# e.g. removing people becoming infected from the jacobian above.
Sigma = infecteds_jacobian.subs(beta, 0)
Sigma
# Obtainning matrix of transitions into of infectious stages (T)
# E.g. removing people transitioning from the jacobian above.
# Suggest not use T to name a variable could be confused with transpose of a matrix.
T_inf_births_subs = {eval(param):0
for param in all_params
if param not in ['beta', 'theta', 'kappa']}
T_inf_births = infecteds_jacobian.subs(T_inf_births_subs)
T_inf_births
# Obtainning Next Geneation Matrix
Sigma_inv = Sigma**-1 # note for powers in python it is ** not ^.
neg_Sigma_inv = -Sigma_inv
K_L = T_inf_births*neg_Sigma_inv
K_L
# Finally the Basic Reproductive Number
eigen_values = K_L.eigenvals()
eigen_values
none_zero_eigen_values = [item for item in eigen_values.keys() if item !=0]
eq_R0 = none_zero_eigen_values[0]
#%%
eq_R0 = sympy.simplify(eq_R0)
#%%
# Dervining Beta
R0 = sympy.symbols('R0')
eq_R0 = sympy.Eq(eq_R0, R0)
beta_eq = sympy.solve(eq_R0, beta)
beta_eq = beta_eq[0]
#%%
beta_eq = sympy.simplify(beta_eq)
|
LIAM-COVID-19-Forecasting/Modelling-Disease-Mitigation-at-Mass-Gatherings-A-Case-Study-of-COVID-19-at-the-2022-FIFA-World-Cup
|
meta_population_models/reproductive_numbers/MGE_single_population_derivation.py
|
MGE_single_population_derivation.py
|
py
| 3,417 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5584993015
|
import asyncio
import httpx
from .._utils import chunk_file, format_locations
from .._exceptions import UploadException, LocationRetrieveException
class ConcatenateUploader:
default_concatenate_headers = {
"Tus-Resumable": "1.0.0",
"Upload-Concat": "partial",
}
default_chunk_size = 4 * 1024 * 1024
def __init__(self, client):
self.client = client
def get_creation_concatenate_headers(self, upload_length):
headers = {
"Upload-Length": str(upload_length),
**self.default_concatenate_headers,
}
return headers
def get_upload_concatenate_headers(self, content_length):
headers = {
"Upload-Offset": "0",
"Content-Length": str(content_length),
"Content-Type": "application/offset+octet-stream",
**self.default_concatenate_headers,
}
return headers
def get_concatenate_headers(self, *location_urls):
_location_urls = iter(location_urls)
return {
"Upload-Concat": format_locations(*location_urls),
}
async def get_location(self, upload_url, headers=None):
response: httpx.Response = await self.client.post(
upload_url, headers=headers or {}
)
if not response.is_success:
raise LocationRetrieveException(response.text)
return response.headers["location"]
async def upload_chunk(self, chunk, upload_url):
_chunk_len = len(chunk)
creation_headers = self.get_creation_concatenate_headers(_chunk_len)
location = await self.get_location(upload_url, headers=creation_headers)
concatenate_headers = self.get_upload_concatenate_headers(_chunk_len)
response = await self.client.patch(
location, data=chunk, headers=concatenate_headers
)
if not response.is_success:
raise UploadException(response.text)
return location, response
async def upload_chunks(self, fp, upload_url, chunk_size=None):
chunk_size = chunk_size or self.default_chunk_size
tasks = [
self.upload_chunk(
chunk,
upload_url,
)
for chunk in chunk_file(fp, chunk_size=chunk_size)
]
results = await asyncio.gather(*tasks)
summary = dict(results)
failures = [res for res in summary.values() if not res.is_success]
if failures:
raise UploadException()
return summary
async def perform_concatenate(self, upload_url, *locations):
headers = {
**self.default_concatenate_headers,
**self.get_concatenate_headers(*locations),
}
location = await self.get_location(upload_url, headers=headers)
return location
async def upload(self, fp, upload_url, chunk_size=None):
self.client.timeout.write = None
summary = await self.upload_chunks(fp, upload_url, chunk_size=chunk_size)
locations = summary.keys()
location = await self.perform_concatenate(upload_url, *locations)
return location
|
LesPrimus/aiotusx
|
aiotusx/_uploaders/concatenate.py
|
concatenate.py
|
py
| 3,146 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29834597841
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 16:16:07 2021
@author: chomi
"""
from detecto import core, utils, visualize
import os
model = core.Model.load('new_model.pth', ['0_','1_', '2_','3_','4_'])
directory = 'C:/Users/chomi/Desktop/test/'
for filename in os.listdir(directory):
if(filename.endswith(".png")): # or '.jpg'
img = utils.read_image(directory + filename)
pred = model.predict(img)
lbl, box, score = pred
visualize.show_labeled_image(img, box, lbl)
print("score: ", score)
|
Mcful123/auto_training
|
example.py
|
example.py
|
py
| 557 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37319818
|
from IPython import get_ipython
from IPython.core.magic import Magics, magics_class, cell_magic
from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring
import mamo
@magics_class
class MamoMagics(Magics):
@cell_magic
@magic_arguments()
@argument("name", type=str, default=None, help="Name of the cell.")
def mamo(self, line, cell_code):
"""mamo cell wrapper, only tracks global stores!"""
assert isinstance(line, str)
assert isinstance(cell_code, str)
args = parse_argstring(self.mamo, line)
mamo.run_cell(args.name, cell_code, self.shell.user_ns)
get_ipython().register_magics(MamoMagics)
|
BlackHC/mamo
|
mamo/support/ipython.py
|
ipython.py
|
py
| 678 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10876951886
|
import yaml
import json
import os
import subprocess
class MLOps(object):
spool_dir = "/tmp/ta"
agent_dir = "/opt/mlops-agent"
mlops_dir_name = "datarobot_mlops_package-8.1.2"
total_dir_path = agent_dir + "/" + mlops_dir_name
def __init__(self, api_token, path):
self.token = api_token
if os.path.exists(path):
with open(path) as f:
mlops_config = json.load(f)
self.endpoint = mlops_config['datarobot_mlops_service_url']
self.model_id = mlops_config['model_id']
self.deployment_id = mlops_config['deployment_id']
self.mlops_name = mlops_config.get('mlops_dir_name', 'datarobot_mlops_package-8.1.2')
if "MLOPS_SERVICE_URL" in os.environ:
self.endpoint = os.environ['MLOPS_SERVICE_URL']
if "MODEL_ID" in os.environ:
self.model_id = os.environ['MODEL_ID']
if "DEPLOYMENT_ID" in os.environ:
self.deployment_id = os.environ['DEPLOYMENT_ID']
if not os.path.exists(self.agent_dir):
raise Exception("environment is not configured for mlops.\nPlease select a valid mlops enabled environment.")
if self.endpoint is None:
raise Exception("'no endpoint found, please add 'MLOPS_SERVICE_URL' environment variable, or create an "
"mlops.json file")
if self.model_id is None:
raise Exception("no model_id found, please add 'MODEL_ID' environment variable, or create an mlops.json "
"file")
if self.deployment_id is None:
raise Exception("no deployment_id found, please add 'DEPLOYMENT_ID' environment variable, or create an "
"mlops.json file")
def init(self):
os.environ['MLOPS_DEPLOYMENT_ID'] = self.deployment_id
os.environ['MLOPS_MODEL_ID'] = self.model_id
os.environ['MLOPS_SPOOLER_TYPE'] = "FILESYSTEM"
os.environ['MLOPS_FILESYSTEM_DIRECTORY'] = self.spool_dir
with open(self.total_dir_path + '/conf/mlops.agent.conf.yaml') as f:
documents = yaml.load(f, Loader=yaml.FullLoader)
documents['mlopsUrl'] = self.endpoint
documents['apiToken'] = self.token
with open(self.total_dir_path + '/conf/mlops.agent.conf.yaml', 'w') as f:
yaml.dump(documents, f)
subprocess.call(self.total_dir_path + '/bin/start-agent.sh')
check = subprocess.Popen([self.total_dir_path + '/bin/status-agent.sh'], stdout=subprocess.PIPE)
output = check.stdout.readlines()[0]
check.terminate()
if b"DataRobot MLOps-Agent is running as a service." in output:
return True
else:
raise Exception(output)
|
algorithmiaio/algorithmia-adk-python
|
adk/mlops.py
|
mlops.py
|
py
| 2,766 |
python
|
en
|
code
| 6 |
github-code
|
6
|
39441351801
|
import re
import spacy
from bpemb import BPEmb
from mlearn import base
from string import punctuation
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.classes.preprocessor import TextPreProcessor
class Preprocessors(object):
"""A class to contain preprocessors and wrap preprocessing functions and their requirements."""
def __init__(self, liwc_dir: str = None):
"""Initialise cleaner class."""
self.tagger = spacy.load('en_core_web_sm', disable = ['ner', 'parser'])
self.liwc_dict = None
self.slurs = None
self.slur_window = None
if liwc_dir is None:
self.liwc_path = None
else:
self.liwc_path = liwc_dir + 'liwc-2015.csv'
def select_experiment(self, exp: str, slur_window: int = None) -> base.Callable:
"""
Select experiment to run.
:exp (str): The experiment to run.
:returns experiment: Return th experiment to run.
"""
if exp == 'word':
experiment = self.word_token
elif exp == 'liwc':
experiment = self.compute_unigram_liwc
elif exp in ['ptb', 'pos']:
experiment = self.ptb_tokenize
elif exp == 'length':
experiment = self.word_length
elif exp == 'syllable':
experiment = self.syllable_count
elif exp == 'slur':
self.slur_window = slur_window
experiment = self.slur_replacement
return experiment
def word_length(self, doc: base.DocType) -> base.List[int]:
"""
Represent sentence as the length of each token.
:doc (base.DocType): Document to be processed.
:returns: Processed document.
"""
return [len(tok) for tok in doc]
def syllable_count(self, doc: base.DocType) -> base.List[int]:
"""
Represent sentence as the syllable count for each word.
:doc (base.DocType): Document to be processed.
:returns: Processed document.
"""
return [self._syllable_counter(tok) for tok in doc]
def _syllable_counter(self, tok: str) -> int:
"""
Calculate syllables for each token.
:tok (str): The token to be analyzed.
:returns count (int): The number of syllables in the word.
"""
count = 0
vowels = 'aeiouy'
exceptions = ['le', 'es', 'e']
prev_char = '<s>'
for i, char in enumerate(tok):
if i == len(tok) and (prev_char + char in exceptions or char in exceptions):
pass
if (char in vowels) and (prev_char not in vowels and char != prev_char):
count += 1
prev_char = char
return count
def load_slurs(self):
"""Load slurs file."""
self.slurs = None
# TODO Update this with a slur list
def slur_replacement(self, doc: base.DocType):
"""
Produce documents where slurs are replaced.
:doc (base.List[str]): Document to be processed.
:returns doc: processed document
"""
if self.slurs is None:
self.slurs = self.load_slurs()
slur_loc = [i for i, tok in enumerate(doc) if tok in self.slurs]
pos = [tok for tok in self.tagger(" ".join(doc))]
for ix in slur_loc: # Only look at the indices where slurs exist
min_ix = 0 if ix - self.slur_window < 0 else ix - self.slur_window
max_ix = len(doc) - 1 if ix + self.slur_window > len(doc) - 1 else ix + self.slur_window
for i in range(min_ix, max_ix, 1): # Do replacements within the window
doc[i] = pos[i]
return doc
def word_token(self, doc: base.DocType) -> base.DocType:
"""
Produce word tokens.
:doc (base.List[str]): Document to be processed.
:returns: processed document
"""
return doc
def ptb_tokenize(self, document: base.DocType, processes: base.List[str] = None) -> base.DocType:
"""
Tokenize the document using SpaCy, get PTB tags and clean it as it is processed.
:document: Document to be parsed.
:processes: The cleaning processes to engage in.
:returns toks: Document that has been passed through spacy's tagger.
"""
self.processes = processes if processes else self.processes
toks = [tok.tag_ for tok in self.tagger(" ".join(document))]
return toks
def read_liwc(self) -> dict:
"""Read LIWC dict."""
with open(self.liwc_path, 'r') as liwc_f:
liwc_dict = {}
for line in liwc_f:
k, v = line.strip('\n').split(',')
if k in liwc_dict:
liwc_dict[k] += [v]
else:
liwc_dict.update({k: [v]})
return liwc_dict
def _compute_liwc_token(self, tok: str, kleene_star: base.List[str]) -> str:
"""
Compute LIWC categories for a given token.
:tok (str): Token to identify list of.
:kleen_star: List of kleen_start tokens.
:returns (str): Token reprented in terms of LIWC categories.
"""
if tok in self.liwc_dict:
term = self.liwc_dict[tok]
else:
liwc_cands = [r for r in kleene_star if tok.startswith(r)]
num_cands = len(liwc_cands)
if num_cands == 0:
term = 'NUM' if re.findall(r'[0-9]+', tok) else 'UNK'
elif num_cands == 1:
term = liwc_cands[0] + '*'
elif num_cands > 1:
sorted_cands = sorted(liwc_cands, key=len, reverse = True) # Longest first
term = sorted_cands[0] + '*'
if term not in ['UNK', 'NUM']:
liwc_term = self.liwc_dict[term]
if isinstance(liwc_term, list):
term = "_".join(liwc_term)
else:
term = liwc_term
if isinstance(term, list):
term = "_".join(term)
return term
def compute_unigram_liwc(self, doc: base.DocType) -> base.DocType:
"""
Compute LIWC for each document document.
:doc (base.DocType): Document to operate on.
:returns liwc_doc (base.DocType): Document represented as LIWC categories.
"""
if not self.liwc_dict:
self.liwc_dict = self.read_liwc()
kleene_star = [k[:-1] for k in self.liwc_dict if k[-1] == '*']
parse_doc = []
doc = doc.split() if isinstance(doc, str) else doc
for w in doc:
if all(c in punctuation for c in w):
parse_doc.append(w)
elif any(c in punctuation for c in w):
parse_doc.append(w.strip(punctuation))
else:
parse_doc.append(w)
liwc_doc = [self._compute_liwc_token(tok, kleene_star) for tok in parse_doc]
assert(len(liwc_doc) == len(parse_doc))
return liwc_doc
# TODO Othering language:
# Parse the document to see if there are us/them, we/them/ i/you
# Consider looking at a window that are 2-5 words before/after a slur.
class Cleaner(object):
"""A class for methods for cleaning."""
def __init__(self, processes: base.List[str] = None, ekphrasis_base: bool = False):
"""
Initialise cleaner class.
:processes (base.List[str]): Cleaning operations to be taken.
:ekprhasis_base (bool, default = False): Use ekphrasis to pre-process data in cleaner.
"""
self.processes = processes if processes is not None else []
self.tagger = spacy.load('en_core_web_sm', disable = ['ner', 'parser', 'textcats'])
self.bpe = BPEmb(lang = 'en', vs = 200000).encode
self.ekphrasis_base = ekphrasis_base
self.ekphrasis = None
self.liwc_dict = None
def clean_document(self, text: base.DocType, processes: base.List[str] = None, **kwargs):
"""
Clean document.
:text (types.DocType): The document to be cleaned.
:processes (List[str]): The cleaning processes to be undertaken.
:returns cleaned: Return the cleaned text.
"""
if processes is None:
processes = self.processes
cleaned = str(text)
if 'lower' in processes:
cleaned = cleaned.lower()
if 'url' in processes:
cleaned = re.sub(r'https?:/\/\S+', 'URL', cleaned)
if 'hashtag' in processes:
cleaned = re.sub(r'#[a-zA-Z0-9]*\b', 'HASHTAG', cleaned)
if 'username' in processes:
cleaned = re.sub(r'@\S+', 'USER', cleaned)
return cleaned
def tokenize(self, document: base.DocType, processes: base.List[str] = None, **kwargs):
"""
Tokenize the document using SpaCy and clean it as it is processed.
:document: Document to be parsed.
:processes: The cleaning processes to engage in.
:returns toks: Document that has been passed through spacy's tagger.
"""
toks = [tok.text for tok in self.tagger(self.clean_document(document, processes = processes, **kwargs))]
return toks
def bpe_tokenize(self, document: base.DocType, processes: base.List[str] = None, **kwargs):
"""
Tokenize the document using BPE and clean it as it is processed.
:document: Document to be parsed.
:processes: The cleaning processes to engage in.
:returns toks: Document that has been passed through spacy's tagger.
"""
toks = self.bpe(self.clean_document(document, processes = processes, **kwargs))
return toks
def _load_ekphrasis(self, annotate: set, filters: base.List[str] = None, normalize: base.List[str] = None,
segmenter: str = 'twitter', corrector: str = 'twitter', hashtags: bool = False,
contractions: bool = True, elong_spell: bool = False,
**kwargs) -> None:
"""
Set up ekphrasis tokenizer.
:annotate (set): Set of annotations to use (controls corrections).
:filters (base.List[str], default = None): List of tokens to remove from documents.
:normalize (base.List[str], default = None): List of normalisations.
:segmenter (str, default = 'twitter'): Choose which ekphrasis segmenter to use.
:corrector (str, default = 'twitter'): Choose which ekphrasis spell correction to use.
:hashtags (bool, default = False): Unpack hashtags into multiple tokens (e.g. #PhDLife -> PhD Life).
:contractions (bool, default = True): Unpack contractions into multiple tokens (e.g. can't -> can not)
:elong_spell (bool, default = True): Spell correct elongations.
"""
self.ekphrasis = TextPreProcessor(normalize = normalize if normalize is not None else [],
annotate = annotate,
fix_html = True,
segmenter = segmenter,
corrector = corrector,
unpack_hashtags = hashtags,
unpack_contractions = contractions,
spell_correct_elong = elong_spell,
tokenizer = SocialTokenizer(lowercase = True).tokenize)
self.filters = filters
def _filter_ekphrasis(self, document: base.DocType, **kwargs) -> base.List[str]:
"""
Remove Ekphrasis specific tokens.
:document (base.DocType): The document to process.
:returns document: Document filtered for ekphrasis specific tokens.
"""
if isinstance(document, list):
document = " ".join(document)
if self.filters is not None:
for filtr in self.filters:
document = document.replace(filtr, '')
document = document.split(" ")
return document
def ekphrasis_tokenize(self, document: base.DocType, processes: base.List[str] = None, **kwargs
) -> base.DocType:
"""
Tokenize the document using BPE and clean it as it is processed.
:document: Document to be parsed.
:processes: The cleaning processes to engage in.
:returns toks: Document that has been passed through spacy's tagger.
"""
if isinstance(document, list):
document = " ".join(document)
document = self.clean_document(document, processes, **kwargs)
document = self.ekphrasis.pre_process_doc(document)
return self._filter_ekphrasis(document, **kwargs)
|
zeeraktalat/mlearn
|
mlearn/data/clean.py
|
clean.py
|
py
| 12,772 |
python
|
en
|
code
| 2 |
github-code
|
6
|
19637216652
|
import pickle
import lasagne
import numpy as np
import theano as th
import theano.tensor as T
import lasagne.layers as ll
from data_reader import load
from settings import DATA_DIR
from inception_v3 import build_network, preprocess
def extract(data, layer, batch_size):
nr_batches_train = int(data.shape[0]/batch_size)
x_temp = T.tensor4()
features = ll.get_output(layer, x_temp , deterministic=True)
extract_features = th.function(inputs=[x_temp ], outputs=features)
output_features = []
for t in range(nr_batches_train):
train_temp = data[t*batch_size:(t+1)*batch_size]
tx_resized = []
for n in range(batch_size):
tx_resized.append(preprocess(np.transpose(train_temp[n],(1,2,0))))
tx_resized = np.concatenate(tx_resized, axis=0)
output_features.append(extract_features(tx_resized))
return np.concatenate(output_features, axis=0)
with open('inception_v3.pkl', 'rb') as f:
params = pickle.load(f)
net = build_network()
lasagne.layers.set_all_param_values(net['softmax'], params['param values'])
trainx, _ = load(DATA_DIR, subset='train')
testx, _ = load(DATA_DIR, subset='test')
minibatch_size = 10
feature_layer = net['pool3']
print("Extracting features from train data...")
train_features = extract(trainx, feature_layer, minibatch_size)
print("Extracting features from test data...")
test_features = extract(testx, feature_layer, minibatch_size)
print(train_features.shape)
print(test_features.shape)
np.savez_compressed('cifar_train_x', train_features)
np.savez_compressed('cifar_test_x', test_features)
|
maciejzieba/svmCIFAR10
|
extract_inception.py
|
extract_inception.py
|
py
| 1,602 |
python
|
en
|
code
| 2 |
github-code
|
6
|
23007800095
|
import numpy as np
from keras.utils import to_categorical
def create_labels(train_positives, train_negatives=None, flag=False):
''' This function creates labels for model training '''
if flag == False :
# only positive data in trainings
labels = np.zeros(train_positives.shape[0])
labels[:] = 1
else:
# negatives & positives data in training
labels = np.zeros(train_positives.shape[0] + train_negatives.shape[0])
labels[:train_positives.shape[0]] = 1
return np.expand_dims(labels, axis=1)
def reshape_X(data, nrows, ncols):
data_t = np.zeros((data.shape[0], nrows, ncols))
data_cols = data[0].shape[0]-1
ctr = 0
for i, j in zip(range(0, data_cols//2, 2), range(data_cols//2, data_cols, 2)):
data_t[:, ctr, :] = np.hstack([data[:, i:i+2], data[:, j:j+2]])
ctr += 1
return data, data_t
def reshape_y(y, nrows):
y = to_categorical(y)
print("\ny shape : ", y.shape)
y_ = np.zeros((nrows, y.shape[0], y.shape[1]))
for i in range(nrows):
y_[i, :, :] = y
return y_
def split_train_validation(x, y, val_split=0.1):
m = x.shape[0]
val_size = int(0.1 * m)
return x[:-val_size], y[:, :-val_size, :], x[-val_size:], y[:, -val_size:, :]
|
nikitamalviya/user-authentication-using-siamese
|
features_and_labels.py
|
features_and_labels.py
|
py
| 1,281 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35239523373
|
print("hello")
string = "MALAYALAM"
temp = []
for i in string:
if i not in temp:
temp.append(i)
print(i, "->",string.count(i))
class Node:
def __init__(self, name, age):
self.name = name
self.age = age
def print1(self):
print("Name ->", self.name)
print("Age ->", self.age)
p = Node("goutham", 25)
p.print1()
class Node1:
def __init__(self, data):
self.data = data
self.next = None
class linkedList:
def __init__(self):
self.Head = None
self.last = None
self.count = 0
def insertnode(self, data):
status = self.search(data)
if status == -1:
new_node = Node1(data)
if self.Head == None:
self.Head = new_node
self.count += 1
else:
last = self.Head
while last.next:
last = last.next
last.next = new_node
self.last = last.next
self.count += 1
else:
print(f"Given element '{data}'is already present in linked list")
def search(self, ele):
index = 1
node = self.Head
while node:
if node.data == ele:
return index
index += 1
node = node.next
return -1
def insertpos(self, ele, pos):
status = self.search(ele)
if status != -1:
print(f"Given element '{ele}'is already present in linked list")
return 0
if pos > self.count+1:
print("Can't insert at given position...")
return 0
new_node = Node1(ele)
if pos == 1:
new_node.next = self.Head
self.Head = new_node
self.count += 1
else:
node = self.Head
for i in range(1, pos-1):
node = node.next
temp = node.next
node.next = new_node
new_node.next = temp
self.count += 1
def delete(self, ele):
index = self.search(ele)
if index == -1:
print("Given element is not found...")
else:
if index == 1:
node = self.Head.next
self.Head = node
self.count -= 1
else:
print("index -> ",index)
node = self.Head
for i in range(1,index-1):
node = node.next
node.next = node.next.next
self.count -= 1
def traverse(self):
next_node = self.Head
print("Linked list ", end="")
while next_node:
print("->", next_node.data, end="")
next_node = next_node.next
print("")
link = linkedList()
link.insertnode(10)
link.insertnode(30)
link.insertnode(40)
link.insertnode(20)
link.insertnode(60)
link.insertnode(50)
link.insertnode(50)
link.traverse()
# print(link.search(20))
link.delete(60)
link.traverse()
# link.insertpos(100, 6)
# link.insertpos(100, 6)
# link.traverse()
# option = int(input("Enter"))
|
cadetgoutham/master
|
Practice/Python/sample.py
|
sample.py
|
py
| 3,130 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18674675586
|
import os
import shutil
import random as rand
from constants.constants import EXTRACTED_IMAGES, VALIDATION_IMAGES
def split_data():
"""
Splits data into training and validation sections for training.
:return:
"""
# check if validation folder exists, create if not
if not os.path.exists(VALIDATION_IMAGES):
os.mkdir(VALIDATION_IMAGES)
for folder in os.listdir(EXTRACTED_IMAGES):
source_dir = os.path.join(EXTRACTED_IMAGES, folder)
destination_dir = os.path.join(VALIDATION_IMAGES, folder)
# check if folder for images exists, create if not
if not os.path.exists(destination_dir):
os.mkdir(destination_dir)
for file_name in os.listdir(source_dir):
# decide whether or not to add a file to our validation folder. Try and get a 50/50 split
if len(os.listdir(source_dir)) <= len(os.listdir(destination_dir)):
break
if bool(rand.getrandbits(1)):
shutil.move(os.path.join(source_dir, file_name), os.path.join(destination_dir, file_name))
|
prsn670/Handwritten-Equation-Solver
|
prep_data/split_data.py
|
split_data.py
|
py
| 1,091 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11890317934
|
n = int(input())
array = list(map(int,input().split()))
even_pos = [elem for elem in array[::2]]
even_pos.sort()
i = 0
for elem in even_pos:
array[i] = elem
i += 2
print(' '.join([str(n) for n in array]))
|
syedjaveed18/codekata-problems
|
Arrays/Q23.py
|
Q23.py
|
py
| 215 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35176426735
|
'''
Organisation Model.py file
'''
import uuid
from django.db import models
class Organisation(models.Model):
'''
Organisation Table
id - Organisations ID
name - Organisations Name (Max length of 255 characters)
'''
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
Organisation_Name = models.CharField(
max_length=255,
name="Organisation_Name"
)
def __str__(self):
'''
Returns the Organisation's Name
'''
return self.Organisation_Name
|
Code-Institute-Submissions/Support-Software-Inc
|
organisations/models.py
|
models.py
|
py
| 590 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18515188174
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 21 16:49:53 2017
@author: mducoffe
appendix : expansion operator for convolutional KFAC
"""
from keras.models import Model, Sequential
from keras.layers import Dense
import keras.backend as K
from keras.layers.merge import Concatenate
from keras.engine import InputSpec, Input, Layer
import numpy as np
def keras_expansion_op(A, delta, input_shape):
if K.image_dim_ordering() == "th":
(_, J, X, Y) = input_shape
else:
(_, X, Y, J) = input_shape
A = A.transpose((0, 3, 2, 1))
d_x = delta[0]/2; d_y = delta[1]/2
var_x = []
for n_x in range(d_x, X-d_x):
var_y = []
for n_y in range(d_y, Y-d_y):
tmp = A[:,:, n_x -d_x:n_x+d_x+1, n_y-d_y:n_y+d_y+1]
tmp = tmp[:,:, ::-1, ::-1, None]
var_y.append(tmp)
var_y = K.concatenate(var_y, axis=4)
var_y = var_y[:,:,:,:,:,None]
var_x.append(var_y)
var_x = K.concatenate(var_x, axis=5)
E_A = var_x.transpose((0, 5, 4, 1, 2, 3))
batch_size = E_A.shape[0]
coeff = 1./((X-2*d_x)*(Y-2*d_y)) # 1/sqrt(tau)
E_A = E_A.reshape((batch_size, (X-2*d_x)*(Y-2*d_y), J*(2*d_x+1)*(2*d_y+1)))
return coeff*E_A
|
mducoffe/Active_Learning_Variational_Inference_Deep_Networks
|
appendix.py
|
appendix.py
|
py
| 1,264 |
python
|
en
|
code
| 4 |
github-code
|
6
|
39583332735
|
# -*- coding:utf-8 -*-
from flask import json
from DataSet import db
from DataSet.fastdfs.view import fun
from DataSet.models import Image, Label
def storage(up_file, collection, file_name):
try:
image_status = fun.upload(up_file, file_ext_name='jpg')
image = Image()
image.name = file_name
image.site = image_status.get('file_id')
image.collection_id = collection.id
db.session.add(image)
db.session.commit()
except Exception as e:
fun.remove(image_status['filename'])
db.session.rollback()
return '{"err_no": "1", "err_desc": "数据保存失败", "data": %s}' % e
images = Image.query.filter_by(collection_id=collection.id, site=image_status['filename']).first()
return images
class ChangeJsonFile(object):
def __init__(self):
self.join_images = '{' + '"images": {'
self.join_images += '"data_uploaded": "%s",' + '"file_name": "%s",'
self.join_images += '"height": "%s",' + '"width": "%s",' + '"id": %s},'
self.json_label_dict = ["background", "person", "bicycle", "car", "motorcycle", "airplane", "bus",
"train", "truck", "boat", "traffic_light", "fire_hydrant", "stop_sign", "parking_meter",
"bench",
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe",
"backpack",
"umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports_ball",
"kite",
"baseball_bat", "baseball_glove", "skateboard", "surfboard", "tennis_racket", "bottle",
"wine_glass",
"cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange",
"broccoli", "carrot",
"hot_dog", "pizza", "donut", "cake", "chair", "couch", "potted_plant", "bed",
"dining_table", "toilet",
"tv", "laptop", "mouse", "remote", "keyboard", "cell_phone", "microwave", "oven",
"toaster", "sink",
"refrigerator", "book", "clock", "vase", "scissors", "teddy_bear", "hair_drier",
"toothbrush"]
def segmentation(self, images, size, file_name):
str_data = file_name
json_dict_data = json.loads(str_data)
annotation = json_dict_data.get('annotation')
data_list = []
for i in annotation:
category_id = i.get('category_id') # 标签label_id
try:
label_name = self.json_label_dict[category_id]
# print( label_name )
labels = Label.query.filter_by(name=label_name, collection_id=images.collection_id).first()
except:
continue
# labels = Label.query.filter_by(label_id=category_id, collection_id=images.collection_id).first()
if not labels:
continue
a = '{"bbox": ' + str(i['bbox']) + ','
a += '"category_id": ' + str(labels.label_id) + ',' + '"category_name": ' + '"{}"'.format(labels.name) + ','
a += '"segmentation": [' + str(i['segmentation']) + ']}' + ','
data_list.append(a)
next_join = '"classification": ' + '[],'
next_join += '"annotation": [' + ''.join(data_list)[:-1] + ']}'
str_list = [self.join_images, next_join]
data = ''.join(str_list)
up_file = data % (images.create_time, images.site[10:], size.get('height'), size.get('width'), images.id)
file_json = fun.upload(up_file, file_ext_name='json')
images.status = 3
images.label_path = file_json.get('file_id')
db.session.add(images)
db.session.commit()
cjf = ChangeJsonFile()
|
limingzhang513/lmzrepository
|
data_module/src/Data_Processing/DataSet/utils/change_json_file.py
|
change_json_file.py
|
py
| 4,014 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72013269309
|
import numpy as np
from collections import deque
from segment_tree import SumSegmentTree,MinSegmentTree
class ReplayBuff(object):
def __init__(self,max_size,observation_shape):
self.max_size=max_size
self.observations=np.zeros([max_size,observation_shape],dtype=np.float32)
self.actions=np.zeros([max_size],dtype=np.int)
self.rewards=np.zeros([max_size],dtype=np.float32)
self.next_observations=np.zeros([max_size,observation_shape],dtype=np.float32)
self.terminals=np.zeros(max_size,dtype=np.float32)
self.size=0
self.ptr=0
def append(self,obs,action,reward,next_obs,terminal):
self.observations[self.ptr]=obs
self.actions[self.ptr]=action
self.rewards[self.ptr]=reward
self.next_observations[self.ptr]=next_obs
self.terminals[self.ptr]=terminal
self.ptr=(self.ptr+1)%self.max_size
self.size=min(self.size+1,self.max_size)
def sample(self,batch_size):
if batch_size > self.size:
batch_idxs=np.arange(self.size)
else:
batch_idxs=np.random.choice(self.size, size=batch_size,replace=False)
return dict(obs=self.observations[batch_idxs],
action=self.actions[batch_idxs],
reward=self.rewards[batch_idxs],
next_obs=self.next_observations[batch_idxs],
done=self.terminals[batch_idxs])
def __len__(self):
return self.size
class PrioritizedReplayBuff(ReplayBuff):
def __init__(self,max_size,observation_shape,alpha=0.6):
assert alpha>=0
super(PrioritizedReplayBuff,self).__init__(max_size,observation_shape)
self.max_priority=1.0
self.tree_ptr=0
self.alpha=alpha
tree_capacity=1
while tree_capacity < self.max_size:
tree_capacity*=2
self.sum_tree=SumSegmentTree(tree_capacity)
self.min_tree=MinSegmentTree(tree_capacity)
def append(self,obs,action,reward,next_obs,terminal):
super(PrioritizedReplayBuff,self).append(obs,action,reward,next_obs,terminal)
self.sum_tree[self.tree_ptr]=self.max_priority ** self.alpha
self.min_tree[self.tree_ptr]=self.max_priority ** self.alpha
self.tree_ptr=(self.tree_ptr+1)%self.max_size
def sample(self,batch_size,beta=0.4):
assert beta>0
if batch_size>self.size:
batch_size=self.size
batch_idxs=self._sample_proportional(batch_size)
weights=np.array([self._calculate_weight(i,beta) for i in batch_idxs],dtype=np.float32)
return dict(obs=self.observations[batch_idxs],
action=self.actions[batch_idxs],
reward=self.rewards[batch_idxs],
next_obs=self.next_observations[batch_idxs],
done=self.terminals[batch_idxs],
weights=weights,
indices=batch_idxs)
def update_priorities(self,idxs,priorities):
assert len(idxs) == len(priorities)
for idx,priority in zip(idxs,priorities):
assert priority>0
assert 0<=idx<len(self)
self.sum_tree[idx]=priority**self.alpha
self.min_tree[idx]=priority**self.alpha
self.max_priority=max(self.max_priority,priority)
def _sample_proportional(self,batch_size):
batch_idxs=[]
p_total=float(self.sum_tree.sum(0,len(self)-1))
segment=p_total/batch_size
for i in range(batch_size):
upperbound=np.random.uniform(segment*i,segment*(i+1))
batch_idxs.append(self.sum_tree.retrieve(upperbound))
return batch_idxs
def _calculate_weight(self,idx,beta):
p_min=float(self.min_tree.min())/self.sum_tree.sum()
max_weight=(p_min*len(self))**(-beta)
p_sample=self.sum_tree[idx]/float(self.sum_tree.sum())
weight=(p_sample*len(self))**(-beta)
weight=weight/max_weight
return weight
class multistepReplayBuff(object):
def __init__(self,max_size,observation_shape,n_step,gamma):
self.max_size=max_size
self.observations=np.zeros([max_size,observation_shape],dtype=np.float32)
self.actions=np.zeros([max_size],dtype=np.int)
self.rewards=np.zeros([max_size],dtype=np.float32)
self.next_observations=np.zeros([max_size,observation_shape],dtype=np.float32)
self.terminals=np.zeros(max_size,dtype=np.float32)
self.size=0
self.ptr=0
# for multi-step dqn
self.multi_step_buffer = deque(maxlen=n_step)
self.n_step=n_step
self.gamma=gamma
def append(self,obs,action,reward,next_obs,done):
transtion = (obs,action,reward,next_obs,done)
self.multi_step_buffer.append(transtion)
if len(self.multi_step_buffer) >= self.n_step:
reward,next_obs,done = self._get_n_step_info()
obs,action = self.multi_step_buffer[0][:2]
self.observations[self.ptr]=obs
self.actions[self.ptr]=action
self.rewards[self.ptr]=reward
self.next_observations[self.ptr]=next_obs
self.terminals[self.ptr]=done
self.ptr=(self.ptr+1)%self.max_size
self.size=min(self.size+1,self.max_size)
def sample(self,batch_size):
if batch_size > self.size:
batch_idxs=np.arange(self.size)
else:
batch_idxs=np.random.choice(self.size, size=batch_size,replace=False)
return dict(obs=self.observations[batch_idxs],
action=self.actions[batch_idxs],
reward=self.rewards[batch_idxs],
next_obs=self.next_observations[batch_idxs],
done=self.terminals[batch_idxs])
def sample_from_indexs(self,batch_idxs):
return dict(obs=self.observations[batch_idxs],
action=self.actions[batch_idxs],
reward=self.rewards[batch_idxs],
next_obs=self.next_observations[batch_idxs],
done=self.terminals[batch_idxs])
def _get_n_step_info(self):
for index in range(self.n_step):
if self.multi_step_buffer[index][-1]:
break
reward, next_obs, done = self.multi_step_buffer[index][-3:]
if index:
for transition in reversed(list(self.multi_step_buffer)[:index]):
r = transition[2]
reward = r + self.gamma * reward
return reward, next_obs, done
def __len__(self):
return self.size
if __name__=='__main__':
rb=ReplayBuff(512,6)
for i in range(50):
rb.append(np.random.randn(6),np.random.randn(),3.7,np.random.randn(6),3.3)
#print("sample test\n sample return type:"+str(type(rb.sample(1))))
#print(rb.sample(128))
|
linnaeushuang/RL-pytorch
|
value-based/rainbow/memory.py
|
memory.py
|
py
| 6,851 |
python
|
en
|
code
| 8 |
github-code
|
6
|
1008840332
|
'''Problem 62 cubic permutations'''
import time
from itertools import permutations
t1 = time.time()
cubes = [x**3 for x in range(1001,10000)]
def make_list(cube):
cubestring = str(cube)
#print(cubestring)
cubelist = [int(x) for x in cubestring]
cubelist.sort()
return cubelist#.sort()
cube_lists = {x:make_list(x) for x in cubes}
#print(cube_lists)
#print(make_list(1234**3))
for cube in cube_lists.values():
sames = []
for cube2 in cube_lists.values():
if cube == cube2:
sames.append(cube)
sames.append(cube2)
if len(sames) == 5:
print(sames)
def fact(n):
if n<=1: return 1
return n*fact(n-1)
def permut(n):
tot = 0
'''returns a list of all the permutations of n'''
plist = set()
nstr = str(n)
p = permutations(nstr)
for i in range(fact(len(nstr))):
tempstr = ''
t = next(p)
#print("next p:",t)
for digit in t:
tempstr += digit
#print(tempstr,"plist:",plist)
if int(tempstr) in cubes:
print("Found cube:",tempstr,"plist:",plist)
plist.add(int(tempstr))
tot += 1
if len(plist) == 5:
return plist
return
#plist.append(int(tempstr))
'''for n in plist:
if n in cubes:
print("Found cube:",n)
tot += 1
return tot'''
#permut(1234**3)
'''for c in cubes[:5]:
if permut(c) == 5:
print("solution:",c)
break'''
t2 = time.time()
print(t2-t1)
#
|
hackingmath/Project-Euler
|
euler62.py
|
euler62.py
|
py
| 1,623 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22075010385
|
from flask import Flask, request, jsonify
from flask_cors import CORS
import sqlite3
import base64
app = Flask(__name__)
CORS(app)
@app.route('/')
def index():
return 'Index Page'
@app.route('/deleteUser', methods=['DELETE'])
def delete_user():
print("Petición DELETE")
try:
data = request.get_json()
username = data['username']
conn = sqlite3.connect('../Back-end/Database/MABG.db')
cursor = conn.cursor()
# Verifica si el usuario existe antes de eliminarlo
cursor.execute("SELECT * FROM Users WHERE username=?", (username,))
user = cursor.fetchone()
if user:
cursor.execute("DELETE FROM Users WHERE username=?", (username,))
conn.commit() # Guarda los cambios en la base de datos
conn.close()
return jsonify({"status": "Elemento eliminado"})
else:
conn.close()
return jsonify({"error": "Usuario no encontrado"}), 404
except sqlite3.Error as e:
return jsonify({'error': str(e)}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/users')
def users():
print("peticion de users")
conn = sqlite3.connect('../Back-end/Database/MABG.db')
cursor = conn.cursor()
try:
cursor.execute("SELECT username, role_id, picture FROM Users")
users = cursor.fetchall()
roles = ["Administrador" if user[1] <=
2 else "Usuario" for user in users]
user_info = []
for user, role in zip(users, roles):
username, _, picture_path = user
if user[2] is None:
picture_path = "/home/eliezercode/Documents/VSC/Proyecto MABG/Back-end/pictures/user/user.png"
with open(picture_path, "rb") as image_file:
picture_base64 = base64.b64encode(
image_file.read()).decode("utf-8")
user_info.append({
"username": username,
"role": role,
"picture": picture_base64
})
return jsonify({"data": user_info})
except sqlite3.Error as e:
return jsonify({'error': str(e)}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
finally:
conn.close()
@app.route('/login', methods=['POST'])
def login():
print("peticion de login")
conn = sqlite3.connect('../Back-end/Database/MABG.db')
cursor = conn.cursor()
try:
print("peticion de login")
data = request.get_json()
username = data['usuario']
password = data['password']
cursor.execute("SELECT * FROM Users WHERE username=?", (username,))
user = cursor.fetchone()
if not user:
return jsonify({'mensaje': 'No existe ese usuario'}), 404
rol = "Administrador" if user[3] <= 2 else "Usuario"
print("imagen:",user[2])
if user[2] is None:
image_binary = "/home/eliezercode/Documents/VSC/Proyecto MABG/Back-end/pictures/user/user.png"
with open(image_binary, "rb") as image_file:
image_binary = base64.b64encode(
image_file.read()).decode("utf-8")
else:
with open(user[2], "rb") as image_file:
image_binary = base64.b64encode(
image_file.read()).decode("utf-8")
user_data = {'name': user[1], 'pictureUrl': image_binary,
'role': rol, 'username': user[5]}
if user[4] == password:
return jsonify({'user_data': user_data, 'mensaje': 'Inicio de sesion correctamente'}, 200)
return jsonify({'mensaje': 'Inicio de sesion fallido'}), 401
except sqlite3.Error as e:
print("sqlite: ", e)
return jsonify({'error': str(e)}), 500
except Exception as e:
print("exeption: ", e)
return jsonify({'error': str(e)}), 500
finally:
conn.close()
@app.route('/addUsers', methods=['POST'])
def addUsers():
print("peticion de addUsers")
conn = sqlite3.connect('../Back-end/Database/MABG.db')
cursor = conn.cursor()
try:
data = request.get_json()
name = data['usuario']
username = data['username']
password = data['password']
rol = data['rol']
print("data: ", name, username, password, rol)
cursor.execute("INSERT INTO Users(name, role_id, password, username) VALUES (?, ?, ?, ?)",
(name, rol, password, username))
conn.commit()
conn.close()
print("Finished correctly")
return jsonify({'mensaje': 'Registro de usuario correcto'}, 200)
except sqlite3.Error as e:
print("error", e)
return jsonify({'error': str(e)}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
finally:
conn.close()
if __name__ == '__main__':
CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
# cors = CORS(app, resources={r"/login": {"origins": "http://localhost:5173"}})
app.run(debug=True)
|
DevEliezerMartinez/PosMABG
|
Back-end/server.py
|
server.py
|
py
| 5,235 |
python
|
en
|
code
| 1 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.