seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
30489056010
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 4 16:13:47 2021
@author: Roman
"""
from spektral.transforms import AdjToSpTensor
from spektral.data import Dataset
from spektral.transforms.normalize_one import NormalizeOne
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix
from astropy.coordinates import SkyCoord
import astropy.units as u
from src.graph.motgraph import MOTGraph
import time as t
import platform
def sliding_window(base_value, window_size = 4, overlap = 2, copy = False):
"""
build an array containing multiple view of base_value in order to create a sliding window with overlap
Parameters
----------
base_value : numpy array
values used to make the window.
window_size : int, optional
size of the window. The default is 4.
overlap : int, optional
number of value in the overlaping gap. The default is 2.
copy : bool, optional
DESCRIPTION. The default is False.
Returns
-------
numpy array
multiple view that compose the window.
"""
sh = (base_value.size - window_size + 1, window_size)
st = base_value.strides * 2
view = np.lib.stride_tricks.as_strided(base_value, strides = st, shape = sh)[0::overlap]
if copy:
return view.copy()
else:
return view
def loadSSOdata(month, _class, point_limit):
"""
load solar system alerts from local file
Parameters
----------
month : string
a string number used to specify which file will be loaded.
_class : string
specify which class object wil be loaded, values can only be 'Solar System MPC' or 'Solar System Candidate'.
point_limit : int
a value to limit the number of alerts loaded by taken only the object seen more than point_limit times, work only when _class is 'Solar System MPC'.
Returns
-------
dataframe
all alerts seen in the month, belonging to _class and seen more than point_limit times
"""
if platform.system() == 'Linux':
path = "../../data/month=" + month
elif platform.system() == 'Windows':
path = "..\..\data\month=" + month
else:
raise ValueError
df_sso = pd.read_pickle(path)
df_class = df_sso[df_sso['fink_class'] == _class]
if _class == 'Solar System MPC':
mpc_trajectory = df_class.groupby(['ssnamenr']).count()
mpc_index = mpc_trajectory[mpc_trajectory['ra'] >= point_limit].index
feature = ['ra', 'dec', 'jd', 'nid', 'dcmag', 'fid', 'ssnamenr', 'candid']
return df_class[df_class['ssnamenr'].isin(mpc_index)][feature]
else:
feature = ['ra', 'dec', 'jd', 'nid', 'dcmag', 'fid', 'candid']
return df_class[feature]
class EdgeNormalizeOne:
r"""
Normalizes the edge attributes by dividing each row by its sum, so that it
sums to 1:
$$
\X_i \leftarrow \frac{\X_i}{\sum_{j=1}^{N} \X_{ij}}
$$
"""
def __call__(self, graph):
e_sum = np.sum(graph.e, -1)
e_sum[e_sum == 0] = 1
graph.e = graph.e / e_sum[..., None]
return graph
class MOTGraphDataset(Dataset):
def __init__(self, date, load_candidates, lightcurves_point_limit, window_params = None, **kwargs):
"""
Build graph dataset from local solar system alert dataset
Parameters
----------
date : string
a string number used to specify which file will be loaded.
load_candidates : string
specify which class object wil be loaded, values can only be 'Solar System MPC' or 'Solar System Candidate'.
lightcurves_point_limit : int
a value to limit the number of alerts loaded by taken only the object seen more than point_limit times, work only when _class is 'Solar System MPC'.
window_params : int tuple, optional
parameter of the window, first is size, second is overlap. The default is None.
Returns
-------
None.
"""
self.date = date
self.lcpl = lightcurves_point_limit
self.load_candidates = load_candidates
self.window_params = window_params
super().__init__(**kwargs)
def read(self):
"""
method call by the class internally, perform file reading and graph building in order to create graph dataset
Returns
-------
output : graph list
all the graph build from the overlaping window.
"""
print("reading data...")
output = []
df_sso = loadSSOdata(self.date, self.load_candidates, self.lcpl)
print("number of sso_alert remaining after limitation by number of point in lightcurves: {}"\
.format(len(df_sso)))
nid = np.unique(df_sso['nid'])
window = 10
overlap = 5
if self.window_params is not None:
window, overlap = self.window_params
frames_window = sliding_window(nid, window, overlap)
print("construct graph by overlapping window on night id")
print("number of graph: {}".format(len(frames_window)))
nb_graph = 1
for frames in frames_window:
df_frames = df_sso[df_sso['nid'].isin(frames)]
df_frames = df_frames.assign(candid_idx=pd.Series(np.arange(len(df_frames))).values)
df_frames = df_frames.assign(label=pd.Series(np.zeros(len(df_frames))).values)
tmp_df = pd.merge(df_frames, df_frames, on='label')
graph_prune = tmp_df[(tmp_df['candid_x'] != tmp_df['candid_y'])\
& (tmp_df['nid_x'] != tmp_df['nid_y'])\
& (((tmp_df['dcmag_x'] - tmp_df['dcmag_y']) / (tmp_df['jd_x'] - tmp_df['jd_y'])) <= 1.0)
]
del tmp_df
ra_x, dec_x = np.array(graph_prune['ra_x']), np.array(graph_prune['dec_x'])
ra_y, dec_y = np.array(graph_prune['ra_y']), np.array(graph_prune['dec_y'])
c1 = SkyCoord(ra_x, dec_x, unit = u.degree)
c2 = SkyCoord(ra_y, dec_y, unit = u.degree)
alerts_sep = c1.separation(c2)
graph_prune['alert_sep'] = alerts_sep
graph_prune = graph_prune[graph_prune['alert_sep'] <= 0.8]
print("constructing graph nb {} with {} nodes and {} edges"\
.format(nb_graph, len(df_frames), len(graph_prune)))
# take edges where extremity nodes are the same mpc object
same_mpc = graph_prune[graph_prune['ssnamenr_x'] == graph_prune['ssnamenr_y']]
# take edges where the left node have been created before the right node
forward_same_mpc = same_mpc[same_mpc['nid_x'] < same_mpc['nid_y']]
# take only one edge if multiple exists
idx_label = forward_same_mpc.groupby(['ssnamenr_x', 'nid_x'])['nid_y'].idxmin()
# create the training label
graph_prune.loc[same_mpc.loc[idx_label].index, 'label'] = 1
edge_label = graph_prune['label'].to_numpy().astype(np.int32)
row = list(graph_prune['candid_idx_x'])
col = list(graph_prune['candid_idx_y'])
data = np.ones(len(col))
sparse_adj_mat = coo_matrix((data, (row, col)), shape=(len(df_frames), len(df_frames))).tocsr()
node_feature = df_frames[['ra', 'dec', 'jd', 'dcmag', 'nid', 'fid']].to_numpy()
edge_feature = np.c_[np.array(np.abs(graph_prune['dcmag_x'] - graph_prune['dcmag_y'])),
np.array(graph_prune['jd_x'] - graph_prune['jd_y']),
np.array(graph_prune['alert_sep']),
np.array(graph_prune['nid_x'] - graph_prune['nid_y'])]
past_index = np.where(edge_feature[:, -1] > 0)[0]
past_index = past_index.reshape((len(past_index), 1))
futur_index = np.where(edge_feature[:, -1] < 0)[0]
futur_index = futur_index.reshape((len(futur_index), 1))
if self.load_candidates == 'Solar System MPC':
graph_prune = graph_prune[['candid_x', 'nid_x', 'ssnamenr_x',
'candid_y', 'nid_y', 'ssnamenr_y', 'label']]
else:
graph_prune = graph_prune[['candid_x', 'nid_x', 'candid_y', 'nid_y']]
g = MOTGraph(node_feature, sparse_adj_mat, edge_feature, edge_label.reshape((len(edge_label), 1)),
graph_prune, past_index, futur_index)
output.append(g)
nb_graph += 1
print()
print("end reading")
return output
if __name__ == "__main__":
print("test")
t_before = t.time()
tr_dataset = MOTGraphDataset("03", 'Solar System MPC', 15, window_params=(5, 2),
transforms=[EdgeNormalizeOne(), NormalizeOne(), AdjToSpTensor()])
print("tr_dataset construct time: ", t.time() - t_before)
for g in tr_dataset:
print(g.y.sum())
|
FusRoman/Alert-Association-previous-work
|
src/graph/motgraphdataset.py
|
motgraphdataset.py
|
py
| 9,253 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29233364056
|
#!/usr/bin/env python3
import re
import sys
import os
from os.path import dirname, join as path_join, abspath, exists
extra_paths = [path_join(dirname(abspath(__file__)), "include")]
def find_file(included_name, current_file):
current_dir = dirname(abspath(current_file))
for idir in [current_dir] + extra_paths:
try_path = path_join(idir, included_name)
if exists(try_path):
return try_path
return None
def process_file(file_path, out_lines=[], front_matter_lines=[], back_matter_lines=[], processed_files=[]):
out_lines += "//BEGIN_FILE_INCLUDE: " + file_path + "\n"
with open(file_path, "r") as f:
for line in f:
m_inc = re.match(r'# *include\s*[<"](.+)[>"]\s*', line)
if m_inc:
inc_name = m_inc.group(1)
inc_path = find_file(inc_name, file_path)
if inc_path not in processed_files:
if inc_path is not None:
processed_files += [inc_path]
process_file(inc_path, out_lines, front_matter_lines, back_matter_lines, processed_files)
else:
# assume it's a system header; add it to the front matter just to be clean
front_matter_lines += [line]
continue
m_once = re.match(r"#pragma once\s*", line)
# ignore pragma once; we're handling it here
if m_once:
continue
# otherwise, just add the line to the output
if line[-1] != "\n": line = line + "\n"
out_lines += [line]
out_lines += "//END_FILE_INCLUDE: " + file_path + "\n"
return "".join(front_matter_lines) + "\n" + "".join(out_lines) + "".join(back_matter_lines)
if __name__ == "__main__":
print(process_file(abspath(sys.argv[1]), [],
# We use an include guard instead of `#pragma once` because Godbolt will
# cause complaints about `#pragma once` when they are used in URL includes.
["#ifndef _MDSPAN_SINGLE_HEADER_INCLUDE_GUARD_\n",
"#define _MDSPAN_SINGLE_HEADER_INCLUDE_GUARD_\n"],
["#endif // _MDSPAN_SINGLE_HEADER_INCLUDE_GUARD_\n"],
[abspath(sys.argv[1])]))
|
rapidsai/raft
|
cpp/include/raft/thirdparty/mdspan/make_single_header.py
|
make_single_header.py
|
py
| 2,236 |
python
|
en
|
code
| 452 |
github-code
|
6
|
27066275793
|
#! /usr/bin/python
from Prettier import *
from Solution_Checker import *
from Words_List_Prep import *
from Scorer import *
from Word_Processor import *
from Box_Maker import *
def main():
"""
Main IBM Puzzle
find boxes that have between 2 and 5 solutions
"""
### Intro Box Text
introPrinter()
### Start a recording text file for boxes that are good
f = open("AnswerBoxes", "w")
### Creates a list of possible boxes
# matrixBoxes = all possible boxes
# abcdefghijklmnopqrstuvwxyz
alphabet = "abcdefghijklmnopqrstuvwxyz"
matrixBoxes = makeReadableBoxes(makeScrambledPartition(alphabet))
### Loop through Boxes and print if there is exactly one solution
for iSidedBox in matrixBoxes:
solutions = solve(iSidedBox)
if solutions == 1:
message = str(iSidedBox) + "\n"
print(message)
f.write(message)
### Close text file when Done
f.close()
### Outro Box Line
outroPrinter()
return None
def solve(sidedBox):
"""
Main solution initiator
"""
### Initialize entry parameters:
combinedLetters = str()
for iSide in sidedBox:
combinedLetters += iSide
nSides = len(sidedBox)
### Make set of useful words specific to Box
entireWords = initializeDictionary()
usefulWords = wordsReducer(sidedBox, entireWords)
usefulWords = removeUnwantedCharacter(usefulWords)
### Make list of Playable Words from usefulWords
# EntireWords = all words in word list
# UsefulWords = words that have all letters from box
# PlayableWords = words that can actually be played in game
playableWords = list()
for iWord in usefulWords:
if playableCheck(iWord, sidedBox):
playableWords += [iWord]
### Scoring to optimize word choice
# Make a dictionary of scores:
dictWordScores = makeScoredWords(playableWords)
### TOGGLE FOR SOLUTION PRINTING
### Show top scoring words from scorer.py:
topScoringWords(combinedLetters, dictWordScores)
### Combine two words into new dictionary:
dictTwoSolves = wordCombiner(combinedLetters, playableWords)
### TOGGLE FOR SOLUTION PRINTING
#for iTwoSolution in dictTwoSolves:
# print(iTwoSolution)
### Prints the number of TwoSolutions
#print("Number of 2-Solves: ", len(dictTwoSolves))
#return topScoringWords
return dictTwoSolves
#return len(dictTwoSolves)
if __name__ == "__main__":
sidedBox = ["ion","abc","elt","khr"]
print(solve(sidedBox))
#main()
|
tmangan/PonderThis
|
2022_December/solver.py
|
solver.py
|
py
| 2,591 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8704975946
|
import time
from stable_baselines3 import PPO, A2C
from batkill_gym import BatkillEnv
import os
models_dir = "ppo"
logdir = f"logs"
if not os.path.exists(models_dir):
os.makedirs(models_dir)
if not os.path.exists(logdir):
os.makedirs(logdir)
env = BatkillEnv()
env.reset()
TIMESTEPS = 100000
model = PPO('MlpPolicy', env, verbose=1, tensorboard_log=logdir)
model.learn(total_timesteps=TIMESTEPS)
model.save(f"{models_dir}/{TIMESTEPS}")
# img = model.env.render(mode='rgb_array')
# imageio.mimsave('lander_a2c.gif', [np.array(img) for i, img in enumerate(images) if i%2 == 0], fps=29)
|
polako/batkill
|
batkill_ai_train.py
|
batkill_ai_train.py
|
py
| 595 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42448338704
|
from alpha_vantage.timeseries import TimeSeries
from bs4 import BeautifulSoup
import json
with open("config.json", "r") as config_file:
config = json.load(config_file)
api_key = config.get("api_key")
print("apik key: ", api_key)
ts1 = TimeSeries(key=api_key)
# Retrieve the monthly time series data for AAPL
# data, meta_data = ts1.get_monthly("AAPL")
data = ts1.get_weekly("AAPL")
# Print the data
print("Monthly Time Series Data for AAPL:")
print(data)
# Optionally, you can print the metadata as well
print("Meta Data:")
# print(meta_data)
|
tokyo-lab/alpha
|
data_using_alpha_vantage_package.py
|
data_using_alpha_vantage_package.py
|
py
| 554 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26804283291
|
num_cells, num_epochs = [int(data) for data in input().split()]
cells = [int(data) for data in input()]
new_cells = [0] * num_cells
binary = bin(num_epochs)[2:]
num_bits = len(binary)
for bin_idx, bin_r_idx in zip(range(num_bits), reversed(range(num_bits))):
if binary[bin_idx] == '1':
shift = 2 ** bin_r_idx
for idx in range(num_cells):
new_cells[idx] = (cells[(idx - shift) % num_cells] ^
cells[(idx + shift) % num_cells])
cells = new_cells.copy()
print("".join(str(data) for data in new_cells))
|
Stevan-Zhuang/DMOJ
|
CCC/CCC '16 S5 - Circle of Life.py
|
CCC '16 S5 - Circle of Life.py
|
py
| 570 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72743495547
|
from csv import reader
from operator import add
import datetime
#fares dataset
fares_rdd = sc.textFile("/user/hc2660/hw2data/Fares.csv", 1)
fares_rdd = fares_rdd.mapPartitions(lambda x: reader(x))
#fares_rdd.take(10)
#trips dataset
trips_rdd = sc.textFile("/user/hc2660/hw2data/Trips.csv", 1)
trips_rdd = trips_rdd.mapPartitions(lambda x: reader(x))
#trips_rdd.take(10)
#license dataset
header1 = fares_rdd.first()
fares2 = fares_rdd.filter(lambda line: line != header1)
#fares2.take(10)
header2 = trips_rdd.first()
trips2 = trips_rdd.filter(lambda line: line != header2)
trips3 = trips2.map(lambda line : ((line[0],line[1], line[2], line[5]), (line[3], line[4], line[6], line[7], line[8], line[9], line[10], line[11], line[12], line[13])))
#trips3.take(10)
fares3 = fares2.map(lambda line : ((line[0], line[1], line[2], line[3]), (line[4], line[5], line[6], line[7], line[8], line[9], line[10])))
allfare = trips3.join(fares3)
allfare1 = allfare.sortByKey(True, 10, keyfunc=lambda k:(k[0], k[1], k[3]))
dup = allfare1.map(lambda x:(x[0][0],x[0][3]))
dup2 = dup.sortBy(lambda x:(x[0],x[1]))
dup3 = dup2.map(lambda x: ((x[0],x[1]),1)).reduceByKey(add)
dup4 = dup3.filter(lambda x: (x[1]>1))
dup5 = dup4.map(lambda x: (x[0][0],x[0][1]))
dup6 = dup5.sortBy(lambda x:(x[0],x[1]))
dup7 = dup6.map(lambda r: ','.join([str(KVPair) for KVPair in r]))
dup8 = dup7.map(lambda r: r.replace("'", ""))
dup9 = dup8.map(lambda r: r.replace('(', '').replace(')', ''))
dup9.saveAsTextFile('task3b.out')
#dtime= ['2013-08-06 15:41:00','2013-08-05 18:03:00','2013-08-06 20:55:00','2013-08-07 01:24:00','2013-08-05 09:02:00']
#d2 = sorted(dtime)
|
Zeus197/bigdata_assignment
|
assignment1/task3b.py
|
task3b.py
|
py
| 1,629 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22893421369
|
# -*- coding: utf-8 -*-
from collective.transmogrifier.interfaces import ISection
from collective.transmogrifier.interfaces import ISectionBlueprint
from collective.transmogrifier.utils import resolvePackageReferenceOrFile
from zope.interface import classProvides
from zope.interface import implements
import os
try:
import json
except ImportError:
import simplejson as json
DATAFIELD = '_datafield_'
class JSONSource(object):
"""
"""
classProvides(ISectionBlueprint)
implements(ISection)
def __init__(self, transmogrifier, name, options, previous):
self.transmogrifier = transmogrifier
self.name = name
self.options = options
self.previous = previous
self.context = transmogrifier.context
self.path = resolvePackageReferenceOrFile(options['path'])
if self.path is None or not os.path.isdir(self.path):
raise Exception('Path (' + str(self.path) + ') does not exists.')
self.datafield_prefix = options.get('datafield-prefix', DATAFIELD)
def __iter__(self):
for item in self.previous:
yield item
for item3 in sorted([
int(i) for i in os.listdir(self.path) if not i.startswith('.')
]):
for item2 in sorted([
int(j[:-5])
for j in os.listdir(os.path.join(self.path, str(item3)))
if j.endswith('.json')
]):
f = open(os.path.join(
self.path, str(item3), '%s.json' % item2
))
item = json.loads(f.read())
f.close()
yield item
|
eikichi18/collective.jsonmigrator
|
collective/jsonmigrator/blueprints/source_json.py
|
source_json.py
|
py
| 1,658 |
python
|
en
|
code
| null |
github-code
|
6
|
40333923387
|
import numpy as np
import wave
import pyaudio
from scipy.io import wavfile
from scipy import interpolate
import math
import matplotlib.pyplot as plt
#MaxVal = 2147483647
MaxVal = 2147483647
#found relavant blog post:
#http://yehar.com/blog/?p=197
def clippingFunction(inSample):
threshold = MaxVal #maximum 24 bit output
outSample = threshold - threshold/(abs(inSample) + math.sqrt(threshold) + 1) ** 2
#return 1
return outSample * np.sign(inSample) #preserves sign
def softClip(sampleArr):
numSamples = len(sampleArr)
sampleArrOut = [[0] * 2 for i in range(numSamples)]
for i in range(numSamples):
sampleArrOut[i][0] = clippingFunction(sampleArr[i][0])
sampleArrOut[i][1] = clippingFunction(sampleArr[i][1])
return sampleArrOut
def main():
#testAudioIn = 'sinC2'
testAudioIn = 'flume test'
fileName = 'TestAudioIn/' + testAudioIn + '.wav'
sampleRate, sampleArr = wavfile.read(fileName)
stepSize = 500
#sampleArrClipped = softClip(sampleArr)
#wavfile.write("test.wav", sampleRate, np.array(sampleArrClipped)) #need to convert to a numpy array for this function
for i in range(1, 500+1, 50):
(splineEval, skipNValues, linSpace) = applySpline(sampleArr, i)
wavfile.write("TestAudioOut/" "" + testAudioIn + "_" + str(int(i/50)) + ".mp3", sampleRate, np.array(splineEval)) #need to convert to a numpy array for this function
#graphSignal(sampleArr)
#graphSignal([[i*2 + 1, i*2 + 1] for i in range(10)])
#graphSignal([sampleArr, sFlat])
print("File Name:", fileName)
print("Frame Rate:", sampleRate)
#print("Sample Array In:", sampleArr[0:100])
#print("Sample Array Out :", sampleArrClipped[0:100])
graphSignal(sampleArr, stepSize)
def applySpline(sampleArrs, stepSize):
extractedChannel0 = list(map(lambda x: x[0]/MaxVal, sampleArrs))
skipNValues = extractedChannel0[::stepSize]
linSpace = list(range(0, len(extractedChannel0), stepSize))
interpolationSpace = list(range(0, len(extractedChannel0)))
splineRep = interpolate.splrep(linSpace, skipNValues, s="0")
splineEval = interpolate.splev(interpolationSpace, splineRep)
return (splineEval, skipNValues, linSpace)
def graphSignal(sampleArrs, stepSize):
(splineEval, skipNValues, linSpace) = applySpline(sampleArrs, stepSize)
plt.plot(splineEval)
plt.plot(linSpace, skipNValues, marker = "x", linestyle = 'None')
plt.axis([0, 10000, -1, 1])
plt.show()
main()
'''
Cades Clipper
yOut = threshold - frac(threshold)(yIn +1)^power
Sigmoid Clipper
yOut = (2*threshold)/1+e^(power*-yIn) - threshold
Bounce Clipper:
Recursively mirrors yIn over threshold until yOut is inbetween the threshold values.
'''
'''
The following is tests regarding using the wave library
with wave.open('TestAudioIn/silence.wav', 'rb') as inFile:
print ( "Number of channels",inFile.getnchannels())
print ( "Sample width",inFile.getsampwidth())
print ( "Frame rate.",inFile.getframerate())
print ( "Number of frames",inFile.getnframes())
print ( "parameters:",inFile.getparams())
samplerate, data = wavfile.read('TestAudioIn/silence.wav')
frame = inFile.setpos(100)
f1 = inFile.readframes(1)
f1Int = int.from_bytes(f1, "big")
frame = inFile.setpos(50)
f2 = inFile.readframes(1)
f2Int = int.from_bytes(f2, "big")
#print(frames)
#print( f1Int)
#print( f2Int)
'''
|
theshieber/Spline-Filter
|
splinefilterPOC.py
|
splinefilterPOC.py
|
py
| 3,288 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70799503229
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import requests
import random
from itertools import count
# Request fails unless we provide a user-agent
api_response = requests.get('https://api.thevirustracker.com/free-api?countryTimeline=US', headers={"User-Agent": "Chrome"})
covid_stats = api_response.json()['timelineitems']
# Break out individual stats
date= []
deaths = []
daily_deaths =[]
total_casesL = []
daily_cases = []
for i in covid_stats:
print (i)
del i['stat']
for c_date, info in i.items():
print("\nDate:", c_date)
date.append(c_date)
print ('Total Cases:',info['total_cases'])
total_casesL.append(info['total_cases'])
print ('New Cases:',info['new_daily_cases'])
daily_cases.append(info['new_daily_cases'])
daily_deaths.append(info['new_daily_deaths'])
deaths.append(info['total_deaths'])
print(total_casesL)
print(daily_cases)
print(daily_deaths)
print (date)
print (plt.style.available)
death_rate = deaths[-1]/total_casesL[-1]
c = str(death_rate)
print('Death rate: ' + c[1:6])
y = np.arange(len(date))
plt.plot(y,total_casesL, label = 'Cases', marker = '.', linewidth=3 )
plt.plot(y,daily_cases , 'y', label = 'New Cases', linestyle = '--', )
plt.plot(y, daily_deaths, 'k', label = 'New Deaths' )
plt.plot(y, deaths, color = 'r', label = 'Deaths' , )
plt.ylabel('People')
plt.xlabel('Days')
plt.title('Covid 19 Cases (USA)')
plt.tight_layout()
plt.grid(True)
plt.legend()
plt.show()
|
it2515/Covid-19
|
Covid19.py
|
Covid19.py
|
py
| 1,590 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24823838001
|
import ctypes
from lone.util.struct_tools import ComparableStruct
class RegsStructAccess(ComparableStruct):
def read_data(self, get_func, offset, size_bytes):
read_data = bytearray()
for read_byte in range(size_bytes):
read_data += get_func(offset).to_bytes(1, 'little')
offset += 1
return read_data
def __setattr__(self, name, value):
# If _access_.set_func is not set, just use ctypes
# This is how "direct access" works when the object
# is just in userspace memory
if not self._access_.set_func:
object.__setattr__(self, name, value)
# If we are not accessing it directly, but the user requested
# something that is not in the _fields_ attribute, just
# use the regular __setattr__
elif name not in [f[0] for f in object.__getattribute__(self, '_fields_')]:
object.__setattr__(self, name, value)
# User requested a field that is in _fields_, and the structure
# has the _access_ attribute. Use the _access_.get_func to
# read bytes and make up the requested return value
else:
# Get offset and size for the structure that contains
# "name". We read/modify/write below
# We read/modify/write below because the user may be
# setting an arbitraty number of bits/bytes/etc in the
# structure
size_bytes = ctypes.sizeof(self.__class__)
offset = self._base_offset_
# READ size_bytes from offset into a temporary bytearray, then make up an
# object of the type we are modifying
read_data = self.read_data(self._access_.get_func, offset, size_bytes)
read_obj = self.__class__.from_buffer(read_data)
# MODIFY our read data with the new value at name
object.__setattr__(read_obj, name, value)
# WRITE the full structure back to the registers
offset = self._base_offset_
for write_byte in (ctypes.c_uint8 * size_bytes).from_address(
ctypes.addressof(read_obj)):
self._access_.set_func(offset, write_byte)
offset += 1
def __getattribute__(self, name):
# If _access_.get_func is not set, just use ctypes
# This is how "direct access" works when the object
# is just in userspace memory
if not object.__getattribute__(self, '_access_').get_func:
return object.__getattribute__(self, name)
# If we are not accessing it directly, but the user requested
# something that is not in the _fields_ attribute, just
# use the regular __getattribute__
elif name not in [f[0] for f in object.__getattribute__(self, '_fields_')]:
return object.__getattribute__(self, name)
# User requested a field that is in _fields_, and the structure
# has the _access_ attribute. Use the _access_.get_func to
# read bytes and make up the requested return value
else:
# Get registers offset and size for this structure
size_bytes = ctypes.sizeof(self.__class__)
offset = self._base_offset_
# READ the latest value from registers
read_data = self.read_data(self._access_.get_func, offset, size_bytes)
# Create an object with the read value
data = self.__class__.from_buffer(read_data)
value = object.__getattribute__(data, name)
return value
|
edaelli/lone
|
python3/lone/nvme/spec/registers/__init__.py
|
__init__.py
|
py
| 3,603 |
python
|
en
|
code
| 3 |
github-code
|
6
|
13322067740
|
import sys
import getopt
import time
import random
import os
import math
import Checksum
import BasicSender
'''
This is a skeleton sender class. Create a fantastic transport protocol here.
'''
class Sender(BasicSender.BasicSender):
def __init__(self, dest, port, filename, debug=False):
super(Sender, self).__init__(dest, port, filename, debug)
def handle_response(self,response_packet):
if Checksum.validate_checksum(response_packet):
print("recv: %s" % response_packet)
else:
print("recv: %s <--- CHECKSUM FAILED" % response_packet)
# Main sending loop.
def start(self):
seqno = 0
msg = self.infile.read(500).decode()
msg_type = None
while not msg_type == 'end':
next_msg = self.infile.read(500).decode()
msg_type = 'data'
if seqno == 0:
msg_type = 'start'
elif next_msg == "":
msg_type = 'end'
packet = self.make_packet(msg_type,seqno,msg)
self.send(packet.encode())
print("sent: %s" % packet)
##### your code goes here ... #####
# your code should be able to handle packet
# 1. loss
# 2. corruption
# 3. duplication
# 4. delay
# add new functions as necessary
response = self.receive()
resp_str = response.decode()
self.handle_response(resp_str)
##### your code ends here ... #####
msg = next_msg
seqno += 1
self.infile.close()
'''
This will be run if you run this script from the command line. You should not
change any of this; the grader may rely on the behavior here to test your
submission.
'''
if __name__ == "__main__":
def usage():
print("BEARDOWN-TP Sender")
print("-f FILE | --file=FILE The file to transfer; if empty reads from STDIN")
print("-p PORT | --port=PORT The destination port, defaults to 33122")
print("-a ADDRESS | --address=ADDRESS The receiver address or hostname, defaults to localhost")
print("-d | --debug Print debug messages")
print("-h | --help Print this usage message")
try:
opts, args = getopt.getopt(sys.argv[1:],
"f:p:a:d", ["file=", "port=", "address=", "debug="])
except:
usage()
exit()
port = 33122
dest = "localhost"
filename = None
debug = False
for o,a in opts:
if o in ("-f", "--file="):
filename = a
elif o in ("-p", "--port="):
port = int(a)
elif o in ("-a", "--address="):
dest = a
elif o in ("-d", "--debug="):
debug = True
s = Sender(dest,port,filename,debug)
try:
s.start()
except (KeyboardInterrupt, SystemExit):
exit()
|
weichen-ua/MIS543O_Project2
|
Sender.py
|
Sender.py
|
py
| 2,910 |
python
|
en
|
code
| 3 |
github-code
|
6
|
20395581562
|
"""empty message
Revision ID: fbfbb357547c
Revises: 2152db7558b2
Create Date: 2021-05-07 17:56:36.699948
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'fbfbb357547c'
down_revision = '2152db7558b2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task', sa.Column('finished', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task', 'finished')
# ### end Alembic commands ###
|
metalsalmon/remote_monitoring
|
migrations/versions/fbfbb357547c_.py
|
fbfbb357547c_.py
|
py
| 653 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38649574551
|
import csv
import unicodedata
import re
def strip_accents(string):
"""
Remove acentos e caracteres especiais de uma string
:param string: String para remoção dos acentos e caracteres especiais
:return String sem acentos e caracteres especiais:
"""
return ''.join(ch for ch in unicodedata.normalize('NFKD', string) if not unicodedata.combining(ch))
def read_file(file_path=None):
cadastros_list = []
with open(file_path, newline='', encoding='utf-8') as csvfile:
cadastros = csv.reader(csvfile, delimiter=',')
for row in cadastros:
cadastros_list.append({
'nome': row[0],
'email': row[1],
'cpf': row[2],
'celular': row[3],
'idade': row[4],
'data_nascimento': row[5],
'data_cadastro': row[6]
})
return cadastros_list
def validate_cadastro(cadastro):
"""
Valida os cadastros (nome, cpf, celular, idade, data_nascimento e data_cadastro) e faz uma proposta de adequação:
Validações:
nome : tamanho máximo de 25 caracteres
email : formato “primeiroNome.ú[email protected]”
cpf : formato "xxx.xxx.xxx-xx" representando um cpf válido
celular : formato "(xx) xxxxx-xxxx"
idade : inteiro
data_nascimento : formato "dd/mm/YYYY"
data_cadastro : formato "dd/mm/YYYY"
return:
dicionario contendo:
nome, email, cpf, celular, idade, data_nascimento, data_cadastro, status, reason
"""
validated_erros = []
name = cadastro['nome']
email = cadastro['email']
cpf = cadastro['cpf']
celular = cadastro['celular']
idade=cadastro['idade']
data_nascimento = cadastro['data_nascimento']
data_cadastro = cadastro['data_cadastro']
cpf_pattern = re.compile(r"^\d{3}.\d{3}.\d{3}-\d{2}$")
celular_pattern = re.compile(r"^\([1-9]{2}\) [9]{1}[0-9]{3}\-[0-9]{5}$")
# o formato do celular segundo o arquivo de referencia r"^\(\d{2}\) \d{5}-\d{4}$"
data_pattern = re.compile(r"^\d{2}/\d{2}/\d{4}$")
idade_pattern = re.compile(r"^\d{1,3}$")
# Validacao nome
if type(name) is not str:
validated_erros.append(f'nome invalido por nao ser uma str')
if type(name) is str and len(name) >= 25:
validated_erros.append(f'Nome inválido: {name} - Correção: Precisa ter 25 ou menos caracteres')
#validacao email
if type(email) is not str:
validated_erros.append('email invalido por não ser uma str')
name_parts = name.split(' ')
first_name = strip_accents(name_parts[0])
last_name = strip_accents(name_parts[len(name_parts) - 1])
email_pattern = f'{first_name}.{last_name}@gmail.com'
regex = rf"^{email_pattern}$"
pattern = re.compile(regex, re.IGNORECASE | re.UNICODE)
if not pattern.match(email):
validated_erros.append(f'E-mail inválido: {email} - Correção: Precisa estar no formato (primeiroNome.ú[email protected]) - ' \
f'Sugestão: {email_pattern.lower()}')
#validacao cpf
if type(cpf) is not str:
validated_erros.append('cpf invalido por nao ser uma str')
if type(cpf) is str and not re.fullmatch(cpf_pattern, cpf):
validated_erros.append(f'CPF inválido: {cpf} - Correção: Precisa estar no formato (xxx.xxx.xxx-xx)')
#validacao celular
if type(celular) is not str:
validated_erros.append('celular invalido por nao ser uma str')
if type(celular) is str and not re.fullmatch(celular_pattern, celular):
validated_erros.append(f'Celular inválido: {celular} - Correção: Precisa estar no formato ((xx) xxxxx-xxxx)')
#validacao da data de nascimento
if type(data_nascimento) is not str:
validated_erros.append('data de nascimento invalida por nao ser uma str')
if type(data_nascimento) is str and not re.fullmatch(data_pattern, data_nascimento):
validated_erros.append(f'Data de nascimento inválida: {data_nascimento} - Precisa estar no formato (dd/mm/YYYY)')
#validacao da data de cadrastro
if type(data_cadastro) is not str:
validated_erros.append('data de cadastro invalida por nao ser uma str')
if type(data_cadastro) is str and not re.fullmatch(data_pattern, data_cadastro):
validated_erros.append(f'Data de cadastro inválida: {data_cadastro} - Precisa estar no formato (dd/mm/YYYY)')
#validacao da idade
if not re.fullmatch(idade_pattern, idade):
validated_erros.append(f'Idade inválida: {idade} - Precisa ser um número inteiro')
if validated_erros !=[] :
cadastro['status']= "inválido"
cadastro['reason'] = validated_erros
else:
cadastro['status']= "válido"
# print(cadastro)
return (cadastro)
def export_validate_registers(cadastros_list):
"""
exporta o arquivo dos cadastros validados no formato txt
"""
file = open('resultados.txt', 'w')
for cadastro in cadastros_list[1:]:
vc=validate_cadastro(cadastro)
file.write(str(vc))
print(str(vc))
file.write('\n')
file.close()
if __name__ == '__main__':
cadastros_list = read_file('cadastros.csv')
export_validate_registers(cadastros_list)
|
RayBasilio123/desafio-estagio-desenvolvimento-2022Q1-Ray_da_Silva_Basilio
|
main.py
|
main.py
|
py
| 5,381 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
72824107709
|
# [변수와 데이터타입]
# 주석(comment) : # 입력 또는 ctrl + / 로 표현한다. 메모같은 것
# 변수(variable) : 변하는 수
id = "pencil"
print(id)
print("뭐로 쓸까요? - ",id,"으로 쓰죠.")
# 숫자 변수 : ""를 입력하지 않으며, 숫자. 연산가능. 데이터타입이 숫자인 변수.
num = 10
plus = 1
minus = 2
multiply = 3
divide = 5
print(num + plus)
print(num - minus)
print(num * multiply) # 30
print(num / divide) # 2
# int 정수 : 1, 2, 3, 4
# float 실수 : 1.0, 1.1 ...
num2 = 1.0
print(num2 + 2)
a = 5 # int
b = 1.1 # float
c = 'hello' # string
# print(c/b) # error
a = a + 2
a = a + 3
a = ((a-5)*3/5) + 2
print(a)
print(c,a+b)
# 문자열 변수 : ""안에 입력하며, 문자. 연산불가
a = 'hello' # character
b = "world!" # string
c = "안녕하세요, 저는 'D.Yang'입니다."
d = """
안녕하세요, 저는 "D.Yang"입니다. """ # long string
e = """
안녕하세요,
저는 "D.Yang"입니다.
긴 글을 작성할 때도 이런식으로 스트링 지정을
해줄 수 있습니다.
줄 바꿈이 적용이 되니깐요.
"""
print(a,b,c,d,e)
# 논리 변수 boolean
a = 0
b = 2
c = bool(a < b)
print(c) # true
# null값(비어있는 값) = false, 그 외에는 전부 true
a = bool(a) # false
b = bool("") # false
c = bool([]) # false
d = bool(None) # false # null
print(a, b, c, d)
# 배열 변수 list > dict > tuple
# list 리스트 a = []
# dict 딕셔너리 a ={}
# tuple 튜플 a = ()
a = [1, 2, 3]
b = {1, 2, 3}
c = (1, 2, 3)
print(a)
print(b)
print(c)
a = [{1, 2, 3}, {4, 5, 6}] # ok 2중배열!
# 총정리! : 변수의 데이터타입
# int 정수 1
# float 실수 0.1, 1.0
# string 문자열 "", ''
# boolean 참, 거짓 true, flase
# list 리스트 a = []
# dict 딕셔너리 a ={}
# tuple 튜플 a = ()
# 배열의 인덱스 : 0부터 시작
rainbow = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'purple']
first = rainbow[0]
last = rainbow[-1] # = rainbow[6] 끝에서 첫번째
# print(first)
# print(last)
print('무지개 첫번째 색깔은 {}, 마지막 색깔은 {}이네요.'.format(first, last))
# 배열은 arrayList 또는 array라고 불려요. 이제는 array라고 할께요.
# append : array 추가
list1 = [1, 2]
list1.append(3) # 새 배열 인덱스를 생성하면서 그 안에 값 3을 추가
list1.append(4)
list1.append(5)
print(list1)
list2 = []
a = 1
b = 2
list2.append(a)
list2.append(b)
print(list2)
list3 = []
list3.append(list1)
list3.append(list1)
print(list3) # list3 == [[1,2,3,4,5], [1,2,3,4,5]] # 이중배열
list4 = []
list4 = list4 + [1, 2]
list4 = list4 + [3, 4, 5]
print(list4)
# array 삭제
list1 = [1,2,3,4,5,6,7,8,9,10]
del list1[-1] # 인덱스를 찾아서 삭제
list1.remove(8) # 값을 찾아서 삭제
print(list1)
# array의 종류
list1 = [1, 2, 3, 4] # 가장 일반적인 배열
dict1 = { 'id': 'lala', 'password': 'lalala', 'name': 'lee' } # Dictionary, 사전같이 "키 : 값" 형태의 배열
tuple1 = (1, 4) # 짝끼리 묶는 느낌의 배열
# array와 if문의 조합 : "검색"
list1 = [1, 2, 3]
if 4 in list1:
print('있네')
else:
print('없네')
|
Azumait/grammar1
|
document1_수집/4_students_list1/교육완료자/전소연 파이썬/python jsy/1.py
|
1.py
|
py
| 3,141 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
22877254400
|
import matplotlib.pyplot as plt
import math
for number in range(0,15,5):
formatString = "%0.1f" % (number/10.0)
filename = "data/stats_2000n_"+formatString+"00000th_"+str(int(number/10) + 1)+"00000times_0.600000kmin_0.200000kstep_2.000000kmax_10statsize.dat"
f = open(filename, 'r')
headers = f.readline().replace('\n','').split('\t')
x = []
y = []
for line in f:
datum = line.replace('\n','').split('\t')
x.append(float(datum[0]))
y.append(float(datum[1]))
f.close()
if number == 0:
plt.scatter(x,y, c='r', marker='s', label="theta = {0}".format(formatString))
elif number == 5:
plt.scatter(x,y, c='g', marker='^', label="theta = {0}".format(formatString))
else:
plt.scatter(x,y, c='b', marker='o', label="theta = {0}".format(formatString))
plt.title("Size of Largest Component: theta = 0, 0.5, 1")
plt.xlabel("Average Degree")
plt.ylabel("Fraction of Vertices in Largest Component")
##plt.xlim(0, 2)
##plt.ylim(0,1)
plt.legend(loc=4)
plt.savefig("data/degree_model_lc_2000n.pdf")
|
vitchyr/Research-in-Math
|
degree_model/data_analysis.py
|
data_analysis.py
|
py
| 1,119 |
python
|
en
|
code
| 4 |
github-code
|
6
|
25144655510
|
# pylint: disable=W0611, E0401
"""
Main goal of this module is to scrape and parse data from "visityerevan.am" website
"""
import logging
import sys
from dataclasses import dataclass
from urllib.parse import urljoin
from httpx import Client
from selectolax.parser import HTMLParser, Node
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARNING)
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
"[%(asctime)s] %(levelname)s:%(name)s:%(lineno)d:%(message)s")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
HEADERS = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) " +
"AppleWebKit/537.36 (KHTML, like Gecko) " +
"Chrome/109.0.0.0 Safari/537.36"}
@dataclass
class Event:
""" Class contains all info about event """
title: str
description: str
url_to_original: str
time: str
price: str
img: str
@dataclass
class Response:
""" Class contains html of page and info about existing of the next page """
body_html: HTMLParser
status_code: int
def serialize_event(event):
""" Resulting format for each event """
return {
"id": "work in progress...",
"type": "parsed_v1",
"parserName": "visityerevan",
"title": event.title,
"description": event.description,
"date": event.time,
"durationInSeconds": 0,
"location": {
"country": "Armenia",
"city": "Erevan",
},
"image": event.img,
"price": {
"amount": event.price,
"currency": "AMD"
},
"timezone": {
"timezoneName": "AMT",
"timezoneOffset": "UTC +4",
},
"url": event.url_to_original,
}
def get_page(client: Client, url: str) -> Response:
""" Scrape html from page and check if next pages appears """
resp = client.get(url, headers=HEADERS)
html = HTMLParser(resp.text)
return Response(body_html=html, status_code=resp.status_code)
def get_pages_amount(client: Client, url: str) -> int:
""" func to get number of pages with events """
resp = client.get(url, headers=HEADERS)
html = HTMLParser(resp.text)
pages_amount = html.css("ul[class='pagination justify-content-center'] >" +
"li[class='page-item']")[-1:][0].text()
return int(pages_amount)
def is_valid(data):
""" Helps us to catch website's structure changes """
if data is None:
logger.warning(
"Seems that website changed structure. Please recheck code and website")
return False
else:
return True
def parse_detail(blocks: list) -> list:
""" Clean and prepare all data that we need """
result = []
# In this loop we will extract all
# Info that we can from each event's div
for block in blocks:
# Extract and prepare "time"
month_day = block.css_first(
"div[class='col-12 mt-n1'] > div")
# Need validate data each parsing attempt
if is_valid(month_day):
month_day = month_day.text().replace('\n', '').strip()
time = block.css_first(
"div[class='text-grey text-md mb-2']")
if is_valid(time):
time = time.text().replace('\n', '').strip().split(' ')
cleaned_time = f"{month_day} {time[-1:][0]}"
else:
cleaned_time = None
# Extract and prepare "description"
description = block.css_first("p")
if is_valid(description):
description = description.text().strip()
# Clean and prepare "url"
url = block.css_first("a").attrs["href"]
if is_valid(url):
url = "https://www.visityerevan.am" + url
# Extract price
price = ''
cards = block.css("p.card-text > span")
if len(cards) == 0:
logger.warning(
"Seems that website changed structure. Please recheck code and website")
else:
for card in cards:
card = card.text()
if "AMD" in card:
price = card.replace("AMD", "").strip()
else:
price = "no info"
# Extract img link
img = block.css_first("img").attrs["src"]
if is_valid(img):
img = "https://www.visityerevan.am" + img
# There is not need in cleaning "title"
# With data we have create a new event object
event = Event(
title=block.css_first("h5").text(),
description=description,
url_to_original=url,
time=cleaned_time,
price=price,
img=img
)
result.append(serialize_event(event))
return result
def scrape_blocks(html: HTMLParser) -> list:
""" Getting all divs with information from page """
blocks = html.css("div[class='row px-lg-7']" +
" > div")
return blocks
def pagination_loop(client: Client) -> list:
""" Loop through all pages """
url = "https://www.visityerevan.am/browse/things-to-do-events/ru/"
# How many pages we will scrape
pages_amount = get_pages_amount(client, url)
# Blocks contains all divs that we need
blocks = []
# Iterating through all pages
for page_number in range(1, pages_amount + 1):
# Mutating a url to get page with current page number
url = urljoin(url, f"?sel_filters=¤t_page={page_number}")
# Get object with scraped html markup from current page
page = get_page(client, url)
# Grad all divs with events data and append to list
blocks += scrape_blocks(page.body_html)
# Scraping is done, time to close session
client.close()
return blocks
async def scrape_website() -> list:
""" Main function which contains all logic """
# Start a new session
client = Client()
# Create list with all divs which contain info about events
all_blocks = pagination_loop(client)
# Parsing data from divs
parsed_data = parse_detail(all_blocks)
return parsed_data
|
EPguitars/events-parsing-archive
|
standalone/scraper_visityerevan.py
|
scraper_visityerevan.py
|
py
| 6,391 |
python
|
en
|
code
| 1 |
github-code
|
6
|
43462667811
|
import pyshorteners
def shorten(url):
link = pyshorteners.Shortener()
return link.tinyurl.short(url)
if __name__ == "__main__":
url = input("Enter link for sorting:")
print(f"\n {shorten(url)}")
# https://github.com/urmil89
|
urmil404/url-Sorter
|
main.py
|
main.py
|
py
| 245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71861349309
|
import os
import sys
# 修改工作目录为程序所在目录,这样通过注册表实现开机自动启动时也能获取到正确的工作目录
# PS: 放到这个地方,是确保在所有其他初始化代码之前先修改掉工作目录
dirpath = os.path.dirname(os.path.realpath(sys.argv[0]))
old_path = os.getcwd()
os.chdir(dirpath)
import argparse
import datetime
import time
from multiprocessing import freeze_support
import psutil
import ga
from check_first_run import check_first_run_async
from config import config, load_config
from db_def import try_migrate_db
from first_run import is_weekly_first_run
from log import color, log_directory, logger
from main_def import (
auto_send_cards,
check_all_skey_and_pskey,
check_djc_role_binding,
check_proxy,
check_update,
get_user_buy_info,
print_update_message_on_first_run_new_version,
run,
sas,
show_ask_message_box,
show_buy_info,
show_extra_infos,
show_lottery_status,
show_multiprocessing_info,
show_notices,
show_pay_info,
show_recommend_reward_tips,
try_auto_update,
try_auto_update_ignore_permission_on_special_case,
try_join_xinyue_team,
try_load_old_version_configs_from_user_data_dir,
try_report_usage_info,
try_save_configs_to_user_data_dir,
try_take_dnf_helper_chronicle_task_awards_again_after_all_accounts_run_once,
try_take_xinyue_team_award,
)
from pool import close_pool, init_pool
from qq_login import QQLogin
from show_usage import show_usage
from update import notify_manual_check_update_on_release_too_long
from usage_count import increase_counter
from util import (
MiB,
async_call,
async_message_box,
change_console_window_mode_async,
change_title,
clean_dir_to_size,
disable_pause_after_run,
disable_quick_edit_mode,
is_run_in_github_action,
kill_other_instance_on_start,
pause,
remove_old_version_portable_chrome_files,
show_head_line,
show_unexpected_exception_message,
)
from version import author, now_version, ver_time
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--no_max_console", default=False, action="store_true", help="是否不将窗口调整为最大化")
parser.add_argument(
"--wait_for_pid_exit",
default=0,
type=int,
help="启动后是否等待对应pid的进程结束后再启动,主要用于使用配置工具启动小助手的情况,只有配置工具退出运行,自动更新才能正常进行",
)
parser.add_argument("--max_wait_time", default=5, type=int, help="最大等待时间")
args = parser.parse_args()
return args
def prepare_env():
args = parse_args()
# 最大化窗口
if not args.no_max_console:
logger.info("尝试调整窗口显示模式,打包exe可能会运行的比较慢")
change_console_window_mode_async()
if args.wait_for_pid_exit != 0:
# 通过配置工具打开
increase_counter(ga_category="open_by", name="config_tool", ga_misc_params={"dr": "config_tool"})
logger.info(f"等待pid为{args.wait_for_pid_exit}的配置工具退出运行,从而确保可能有的自动更新能够正常进行,最大将等待{args.max_wait_time}秒")
wait_time = 0.0
retry_time = 0.1
while wait_time <= args.max_wait_time:
if not psutil.pid_exists(args.wait_for_pid_exit):
logger.info("配置工具已成功退出,将开始运行小助手~")
break
time.sleep(retry_time)
wait_time += retry_time
else:
# 直接打开
increase_counter(ga_category="open_by", name="directly", ga_misc_params={"dr": "directly"})
def main():
try_migrate_db()
increase_counter(name="run/begin", ga_type=ga.GA_REPORT_TYPE_PAGE_VIEW)
prepare_env()
# 启动时检查是否需要同步本机数据目录备份的旧版本配置
try_load_old_version_configs_from_user_data_dir()
change_title()
print_update_message_on_first_run_new_version()
logger.warning(f"开始运行DNF蚊子腿小助手,ver={now_version} {ver_time},powered by {author}")
logger.warning(color("fg_bold_cyan") + "如果觉得我的小工具对你有所帮助,想要支持一下我的话,可以帮忙宣传一下或打开付费指引/支持一下.png,扫码打赏哦~")
# 读取配置信息
load_config("config.toml", "config.toml.local")
cfg = config()
if len(cfg.account_configs) == 0:
raise Exception("未找到有效的账号配置,请检查是否正确配置。ps:多账号版本配置与旧版本不匹配,请重新配置")
try_auto_update_ignore_permission_on_special_case(cfg)
notify_manual_check_update_on_release_too_long(cfg.common)
check_proxy(cfg)
try_report_usage_info(cfg)
if cfg.common.disable_cmd_quick_edit:
disable_quick_edit_mode()
show_notices()
if cfg.common.allow_only_one_instance:
logger.info("当前仅允许单个实例运行,将尝试干掉其他实例~")
async_call(kill_other_instance_on_start)
else:
logger.info("当前允许多个实例同时运行~")
pool_size = cfg.get_pool_size()
init_pool(pool_size)
change_title(multiprocessing_pool_size=pool_size, enable_super_fast_mode=cfg.common.enable_super_fast_mode)
show_multiprocessing_info(cfg)
account_names = []
for account_cfg in cfg.account_configs:
account_names.append(account_cfg.name)
logger.info(f"当前共配置{len(account_names)}个账号,具体如下:{account_names}")
clean_dir_to_size(log_directory, cfg.common.max_logs_size * MiB, cfg.common.keep_logs_size * MiB)
clean_dir_to_size(f"utils/{log_directory}", cfg.common.max_logs_size * MiB, cfg.common.keep_logs_size * MiB)
current_chrome_version = QQLogin(cfg.common).get_chrome_major_version()
remove_old_version_portable_chrome_files(current_chrome_version)
show_ask_message_box(cfg)
# 检查是否有更新,用于提示未购买自动更新的朋友去手动更新~
if cfg.common.check_update_on_start:
check_update(cfg)
check_all_skey_and_pskey(cfg)
check_djc_role_binding()
# 确保道聚城绑定OK后在活动运行同时进行异步的弹窗提示
check_first_run_async(cfg)
# 挪到所有账号都登陆后再尝试自动更新,从而能够判定是否已购买DLC
try_auto_update(cfg)
# 查询付费信息供后面使用
show_head_line("查询付费信息")
logger.warning("开始查询付费信息,请稍候~")
user_buy_info = get_user_buy_info(cfg.get_qq_accounts())
show_buy_info(user_buy_info, cfg, need_show_message_box=False)
sas(cfg, "启动时展示账号概览", user_buy_info)
# 预先尝试创建和加入固定队伍,从而每周第一次操作的心悦任务也能加到队伍积分中
try_join_xinyue_team(cfg, user_buy_info)
# 正式进行流程
run(cfg, user_buy_info)
try_take_dnf_helper_chronicle_task_awards_again_after_all_accounts_run_once(cfg, user_buy_info)
# 尝试领取心悦组队奖励
try_take_xinyue_team_award(cfg, user_buy_info)
# # 尝试派赛利亚出去打工
# try_xinyue_sailiyam_start_work(cfg)
# 活动开启关闭时调这个开关即可
enable_card_lottery = True
if enable_card_lottery:
auto_send_cards(cfg)
show_extra_infos(cfg)
sas(cfg, "运行完毕展示账号概览", user_buy_info)
if enable_card_lottery:
show_lottery_status("卡片赠送完毕后展示各账号抽卡卡片以及各礼包剩余可领取信息", cfg, need_show_tips=True)
show_pay_info(cfg)
show_recommend_reward_tips(user_buy_info)
# 显示小助手的使用概览
if cfg.common._show_usage:
show_usage()
# 运行结束展示下多进程信息
show_multiprocessing_info(cfg)
# 检查是否有更新,用于提示未购买自动更新的朋友去手动更新~
if cfg.common.check_update_on_end:
check_update(cfg)
# 运行完毕备份配置到本机数据目录
try_save_configs_to_user_data_dir()
increase_counter(name="run/end", ga_type=ga.GA_REPORT_TYPE_PAGE_VIEW)
show_head_line("运行完毕")
def main_wrapper():
freeze_support()
logger.info(color("bold_green") + f"已将工作目录设置为小助手所在目录:{dirpath},之前为:{old_path}")
try:
run_start_time = datetime.datetime.now()
main()
total_used_time = datetime.datetime.now() - run_start_time
logger.warning(color("fg_bold_yellow") + f"运行完成,共用时{total_used_time}")
# 如果总用时太高的情况时,尝试提示开启多进程和超快速模式
cfg = config()
if total_used_time > datetime.timedelta(minutes=10) and (
not cfg.common.enable_multiprocessing or not cfg.common.enable_super_fast_mode
):
msg = (
f"当前累计用时似乎很久({total_used_time}),是否要尝试多进程和超快速模式?\n"
"多进程模式下,将开启多个进程并行运行不同账号的领取流程\n"
"额外开启超快速模式,会进一步将不同账号的不同活动都异步领取,进一步加快领取速度\n"
"\n"
"如果需要开启,请打开配置工具,在【公共配置】tab中勾选【是否启用多进程功能】和【是否启用超快速模式(并行活动)】"
)
logger.warning(color("bold_cyan") + msg)
if is_weekly_first_run("用时过久提示"):
async_message_box(msg, "用时过久", print_log=False)
# 按照分钟级别来统计使用时长
total_minutes = int(total_used_time.total_seconds()) // 60
increase_counter(ga_category="run_used_time_minutes", name=total_minutes)
except Exception as e:
show_unexpected_exception_message(e)
# 如果在github action,则继续抛出异常
if is_run_in_github_action():
raise e
finally:
# 暂停一下,方便看结果
if not disable_pause_after_run() and not is_run_in_github_action():
async_call_close_pool_after_some_time()
pause()
close_pool()
def async_call_close_pool_after_some_time():
def _close():
wait_time = 10 * 60
logger.info(f"{wait_time} 秒后将自动关闭进程池,方便有足够时间查看进程池中触发的弹窗信息")
time.sleep(wait_time)
close_pool()
async_call(_close)
if __name__ == "__main__":
main_wrapper()
|
fzls/djc_helper
|
main.py
|
main.py
|
py
| 10,763 |
python
|
zh
|
code
| 319 |
github-code
|
6
|
38750349973
|
from imagenet_c import *
from torchvision.datasets import ImageNet
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import os
import torch
import gorilla
DATA_ROOT = './data'
CORRUPTION_PATH = './corruption'
corruption_tuple = (gaussian_noise, shot_noise, impulse_noise, defocus_blur,
glass_blur, motion_blur, zoom_blur, snow, frost, fog,
brightness, contrast, elastic_transform, pixelate, jpeg_compression)
corruption_dict = {corr_func.__name__: corr_func for corr_func in corruption_tuple}
class corrupt(object):
def __init__(self, corruption_name, severity=5):
self.corruption_name = corruption_name
self.severity = severity
return
def __call__(self, x):
# x: PIL.Image
x_corrupted = corruption_dict[self.corruption_name](x, self.severity)
return np.uint8(x_corrupted)
def __repr__(self):
return "Corruption(name=" + self.corruption_name + ", severity=" + str(self.severity) + ")"
if os.path.exists(os.path.join(DATA_ROOT, CORRUPTION_PATH)) is False:
os.mkdir(os.path.join(DATA_ROOT, CORRUPTION_PATH))
for corruption in corruption_dict.keys():
if os.path.exists(os.path.join(DATA_ROOT, CORRUPTION_PATH, corruption + '.pth')):
continue
print(corruption)
val_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
corrupt(corruption, 5)
])
target_dataset = ImageNet(DATA_ROOT, 'val', transform=val_transform)
target_dataloader = DataLoader(target_dataset, batch_size=256, shuffle=False, drop_last=False, num_workers=2)
datas = []
for batch in gorilla.track(target_dataloader):
datas.append(batch[0])
datas = torch.concat(datas)
torch.save(datas, os.path.join(DATA_ROOT, CORRUPTION_PATH, corruption + '.pth'))
|
Gorilla-Lab-SCUT/TTAC
|
imagenet/utils/create_corruption_dataset.py
|
create_corruption_dataset.py
|
py
| 1,892 |
python
|
en
|
code
| 37 |
github-code
|
6
|
71634339387
|
from flask import Flask
import requests
URL="https://en.wikipedia.org/w/api.php"
app = Flask(__name__)
#configuring the server name as required
app.config['SERVER_NAME'] = "wiki-search.com:5000"
@app.route("/")
def home():
return 'Enter you query as the subdomain.'
@app.route('/', subdomain="<SEARCHPAGE>")
#function that searches for the URL
def url_search(SEARCHPAGE):
if SEARCHPAGE is None:
return 'Enter you search query as the subdomain'
title_data = requests.get(URL, params={
"action": "query",
"format": "json",
"list": "search",
"srsearch": SEARCHPAGE}).json()
#creating a list named titles and appending the titles of every search result into it
titles = []
for title in title_data['query']['search']:
titles.append(title['title'])
#creating a list named urls to which the url of every title is appended
urls = []
for title in titles:
url_data = requests.get(URL, params={
"action": "query",
"format": "json",
"titles": title,
"prop": "info",
"inprop": "url"}).json()
for key in url_data['query']['pages']:
urls.append(url_data['query']['pages'][key]['fullurl'])
#creating a dictionary that contains the links appended as a list to the key named links
results={"links":urls}
return results
if __name__ == '__main__':
app.run(debug=True,port=5000)
|
jubinjacob93/Opensearch-Server
|
wiksearch.py
|
wiksearch.py
|
py
| 1,513 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20801954442
|
import math
from os import TMP_MAX
MAX = 700
lookup = [[0 for i in range(100001)]
for j in range(20)]
def buildSparseTable(arr, n):
for i in range(0, n):
lookup[i][0] = arr[i]
j = 1
while (1 << j) <= n:
i = 0
while (i + (1 << j) - 1) < n:
if (lookup[i][j - 1] >
lookup[i + (1 << (j - 1))][j - 1]):
lookup[i][j] = lookup[i][j - 1]
else:
lookup[i][j] = \
lookup[i + (1 << (j - 1))][j - 1]
i += 1
j += 1
def query(L, R):
j = int(math.log2(R - L + 1))
if lookup[L][j] >= lookup[R - (1 << j) + 1][j]:
return lookup[L][j]
else:
return lookup[R - (1 << j) + 1][j]
n = int(input())
nums = [int(i) for i in input().split()]
num_q = int(input())
queries = []
sparse_table = buildSparseTable(nums, len(nums))
for i in range(num_q):
l, r = [int(i) for i in input().split()]
if l > r:
tmp = l
l = r
r = tmp
print(query(l-1, r-1))
|
michbogos/olymp
|
eolymp/summation/RMQ.py
|
RMQ.py
|
py
| 1,109 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22477012107
|
import pandas as pd
import timeit
pd.set_option('mode.chained_assignment', None)
def main():
print('ok')
def sortim(df):
new_cols = ['PERFIL','BANDEIRA','LOCAL_NOVO','LOCAL','TAMANHO','REGIÃO','REGIAO 2','CONCATENADO','CONCATENADO 2','CONCATENADO 3','CONCATENADO 4','CONCATENADO 5']
for i in new_cols:
df[i] = ''
print('computado!')
for i in range(len(df)):
if 'RUA' in df["DESCTAMANHO"][i]:
df['LOCAL_NOVO'][i] = "CONVENCIONAL"
df['LOCAL'][i] = "RUA"
elif 'SHOPPING' in df["DESCTAMANHO"][i]:
df['LOCAL_NOVO'][i] = "CONVENCIONAL"
df['LOCAL'][i] = "SHOPPING"
elif 'QUIOSQUE' in df["DESCTAMANHO"][i]:
df['LOCAL_NOVO'][i] = "QUISOQUE"
df['LOCAL'][i] = "QUISOQUE"
else:
df['LOCAL_NOVO'][i] = "COMPACTA"
df['LOCAL'][i] = "COMPACTA"
if '-CB-' in df["DESCTAMANHO"][i]:
df['BANDEIRA'][i] = "CB"
else:
df['BANDEIRA'][i] = "PF"
if '-CB-' in df["DESCTAMANHO"][i]:
df['BANDEIRA'][i] = "CB"
else:
df['BANDEIRA'][i] = "PF"
if '-PERFIL 1-' in df["DESCTAMANHO"][i]:
df['PERFIL'][i] = "PERFIL 1"
elif '-PERFIL 2-' in df["DESCTAMANHO"][i]:
df['PERFIL'][i] = "PERFIL 2"
elif '-PERFIL 3-' in df["DESCTAMANHO"][i]:
df['PERFIL'][i] = "PERFIL 3"
elif '-PERFIL 4-' in df["DESCTAMANHO"][i]:
df['PERFIL'][i] = "PERFIL 4"
else:
df['PERFIL'][i] = "PERFIL 5"
if '-PP-' in df["DESCTAMANHO"][i]:
df['TAMANHO'][i] = "PP"
elif '-P-' in df["DESCTAMANHO"][i]:
df['TAMANHO'][i] = "P"
elif '-M-' in df["DESCTAMANHO"][i]:
df['TAMANHO'][i] = "M"
elif '-G-' in df["DESCTAMANHO"][i]:
df['TAMANHO'][i] = "G"
elif '-GG-' in df["DESCTAMANHO"][i]:
df['TAMANHO'][i] = "GG"
else:
df['TAMANHO'][i] = "MEGA"
if '-RJ/ES' in df["DESCTAMANHO"][i]:
df['REGIÃO'][i] = "RJ/ES"
df['REGIAO 2'][i] = "RJ/ES"
elif '-SPI' in df["DESCTAMANHO"][i]:
df['REGIÃO'][i] = "SPI"
df['REGIAO 2'][i] = "SPI"
elif '-MG' in df["DESCTAMANHO"][i]:
df['REGIÃO'][i] = "MG"
df['REGIAO 2'][i] = "MG"
elif '-SUL' in df["DESCTAMANHO"][i]:
df['REGIÃO'][i] = "SUL"
df['REGIAO 2'][i] = "SUL"
elif '-GDE SP' in df["DESCTAMANHO"][i]:
df['REGIÃO'][i] = "GDE SP"
df['REGIAO 2'][i] = "GDE SP"
elif '-CO/N' in df["DESCTAMANHO"][i]:
df['REGIÃO'][i] = "CO/N"
df['REGIAO 2'][i] = "CON/N"
else:
df['REGIÃO'][i] = "NE"
df['REGIAO 2'][i] = "NE"
df['CONCATENADO'][i] = f"{df['LOCAL_NOVO'][i]}-{df['PERFIL'][i]}-{df['TAMANHO'][i]}"
df['CONCATENADO 2'][i] = f"{df['LOCAL_NOVO'][i]}-{df['PERFIL'][i]}-{df['REGIÃO'][i]}-{df['TAMANHO'][i]}"
df['CONCATENADO 3'] = df['LOCAL_NOVO'].map(str) + '-' + df['PERFIL'].map(str) + '-' + df['REGIÃO'].map(
str)
df['CONCATENADO 4'] = df['LOCAL'].map(str) + '-' + df['TAMANHO'].map(str)
df['CONCATENADO 5'] = df['LOCAL_NOVO'].map(str) + '-' + df['PERFIL'].map(str) + '-' + df['BANDEIRA'].map(
str) + '-' + df['TAMANHO']
df['CONCATENADO 6'] = df['PERFIL'].map(str) + '-' + df['BANDEIRA'].map(str) + '-' + df['LOCAL'].map(
str) + '-' + df['REGIÃO']
colorder=['CODPUBLICO','DESCPUBLICO','CODSECAO','DESCSECAO','CODSORTIMENTO','CODTAMANHO','DESCTAMANHO','PERFIL','BANDEIRA','LOCAL_NOVO','LOCAL','TAMANHO','REGIÃO','REGIAO 2','CONCATENADO','CONCATENADO 2','CONCATENADO 3','CONCATENADO 4','CONCATENADO 5','CONCATENADO 6','EMPRESA','FILIAL','TIPOPUBLICO','SOLICVOLTDIF']
df = df[colorder]
return df
def sortemp(df):
new_cols = ['PERFIL', 'BANDEIRA', 'LOCAL_NOVO', 'LOCAL', 'TAMANHO', 'REGIÃO', 'REGIAO 2', 'CONCATENADO',
'CONCATENADO 2']
start = timeit.default_timer()
for i in new_cols:
df[i] = ' '
df = df.reindex()
print('novas colunas')
df['LOCAL_NOVO'] = df['DESCTAMANHO'].apply(lambda x: 'CONVENCIONAL' if 'RUA' in x else x)
df['LOCAL_NOVO'] = df['LOCAL_NOVO'].apply(lambda x: 'CONVENCIONAL' if 'SHOPPING' in x else x)
df['LOCAL_NOVO'] = df['LOCAL_NOVO'].apply(lambda x: 'QUIOSQUE' if 'QUIOSQUE' in x else x)
df['LOCAL_NOVO'] = df['LOCAL_NOVO'].apply(lambda x: 'COMPACTA' if 'COMPACTA' in x else x)
df['LOCAL_NOVO'] = df['LOCAL_NOVO'].apply(lambda x: 'DIGITAL' if 'DIGITAL' in x else x)
df['LOCAL'] = df['DESCTAMANHO'].apply(lambda x: 'RUA' if 'RUA' in x else x)
df['LOCAL'] = df['LOCAL'].apply(lambda x: 'SHOPPING' if 'SHOPPING' in x else x)
df['LOCAL'] = df['LOCAL'].apply(lambda x: 'QUIOSQUE' if 'QUIOSQUE' in x else x)
df['LOCAL'] = df['LOCAL'].apply(lambda x: 'COMPACTA' if 'COMPACTA' in x else x)
df['LOCAL'] = df['LOCAL'].apply(lambda x: 'DIGITAL' if 'DIGITAL' in x else x)
df['BANDEIRA'] = df['DESCTAMANHO'].apply(lambda x: 'CB' if '-CB-' in x else x)
df['BANDEIRA'] = df['BANDEIRA'].apply(lambda x: 'PF' if '-PF-' in x else x)
df['PERFIL'] = df['DESCTAMANHO'].apply(lambda x: 'PERFIL 1' if '-PERFIL 1-' in x else x)
df['PERFIL'] = df['PERFIL'].apply(lambda x: 'PERFIL 2' if '-PERFIL 2-' in x else x)
df['PERFIL'] = df['PERFIL'].apply(lambda x: 'PERFIL 3' if '-PERFIL 3-' in x else x)
df['PERFIL'] = df['PERFIL'].apply(lambda x: 'PERFIL 4' if '-PERFIL 4-' in x else x)
df['PERFIL'] = df['PERFIL'].apply(lambda x: 'PERFIL 5' if '-PERFIL 5-' in x else x)
df['TAMANHO'] = df['DESCTAMANHO'].apply(lambda x: 'PP' if '-PP-' in x else x)
df['TAMANHO'] = df['TAMANHO'].apply(lambda x: 'P' if '-P-' in x else x)
df['TAMANHO'] = df['TAMANHO'].apply(lambda x: 'M' if '-M-' in x else x)
df['TAMANHO'] = df['TAMANHO'].apply(lambda x: 'G' if '-G-' in x else x)
df['TAMANHO'] = df['TAMANHO'].apply(lambda x: 'GG' if '-GG-' in x else x)
df['TAMANHO'] = df['TAMANHO'].apply(lambda x: 'MEGA' if '-MEGA-' in x else x)
df['REGIÃO'] = df['DESCTAMANHO'].apply(lambda x: 'RJ/ES' if '-RJ/ES' in x else x)
df['REGIÃO'] = df['REGIÃO'].apply(lambda x: 'SPI' if '-SPI' in x else x)
df['REGIÃO'] = df['REGIÃO'].apply(lambda x: 'MG' if '-MG' in x else x)
df['REGIÃO'] = df['REGIÃO'].apply(lambda x: 'SUL' if '-SUL' in x else x)
df['REGIÃO'] = df['REGIÃO'].apply(lambda x: 'GDE SP' if '-GDE SP' in x else x)
df['REGIÃO'] = df['REGIÃO'].apply(lambda x: 'CO/N' if '-CO/N' in x else x)
df['REGIÃO'] = df['REGIÃO'].apply(lambda x: 'NE' if '-NE' in x else x)
df['REGIAO 2'] = df['DESCTAMANHO'].apply(lambda x: 'RJ/ES' if '-RJ/ES' in x else x)
df['REGIAO 2'] = df['REGIAO 2'].apply(lambda x: 'SPI' if '-SPI' in x else x)
df['REGIAO 2'] = df['REGIAO 2'].apply(lambda x: 'MG' if '-MG' in x else x)
df['REGIAO 2'] = df['REGIAO 2'].apply(lambda x: 'SUL' if '-SUL' in x else x)
df['REGIAO 2'] = df['REGIAO 2'].apply(lambda x: 'GDE SP' if '-GDE SP' in x else x)
df['REGIAO 2'] = df['REGIAO 2'].apply(lambda x: 'CO/N' if '-CO/N' in x else x)
df['REGIAO 2'] = df['REGIAO 2'].apply(lambda x: 'NE' if '-NE' in x else x)
df['CONCATENADO'] = df['LOCAL_NOVO'].map(str) + '-' + df['PERFIL'].map(str) + '-' + df['TAMANHO']
df['CONCATENADO 2'] = df['LOCAL_NOVO'].map(str) + '-' + df['PERFIL'].map(str) + '-' + df['REGIÃO'].map(
str) + '-' + df['TAMANHO']
colorder = ['CODSORTIMENTO', 'CODSECAO', 'DESCSECAO', 'CODTAMANHO', 'DESCTAMANHO', 'CODPUBLICO',
'DESCPUBLICO',
'CODMERCADORIA', 'DESCMERCADORIA', 'MERCCONJ', 'PRIORIDADE', 'QTDE', 'VOLTAGEM',
'VIGENCIAINICIO',
'VIGENCIAFIM', 'PERFIL', 'BANDEIRA', 'LOCAL', 'LOCAL_NOVO', 'TAMANHO', 'REGIÃO', 'STATUS',
'DESCMARCA', 'DESCESPECIE', 'CONCATENADO', 'CONCATENADO 2']
df = df[colorder]
return df
|
zameethi/Flask_Rel_New
|
apps/sortemp_sortim.py
|
sortemp_sortim.py
|
py
| 9,072 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
10364079292
|
from wordbank import wbank
import random
state = { 'word': '',
'history' : [],
'gc' : 0
}
def userguess(inp, state) -> dict:
guess = list(inp.upper())
wo = list(state['word'])
out = []
for lg in guess:
if lg in wo:
out.append(('yellow', "*" + lg))
else:
out.append(('white', lg))
for i in range(len(guess)):
if guess[i] == wo[i]:
out[i] = ('green', "#" + guess[i])
state['history'].append(out)
return state
def userinput() -> tuple:
inp = input("[][][][][]")
inp = inp.strip()
if len(inp) == 5:
return True, inp
else:
return False, inp
def wordle(state) -> None:
print("Hello, let's play!")
print("A * next to a letter means it's in the word, a # means it's in the right place!")
while True:
while True:
valid, inp = userinput()
if valid:
break
else:
print(f"{inp} wasn't a valid guess, your guess needs to be 5 letters long")
state = userguess(inp, state)
state['gc'] += 1
for h in state['history']:
print(f"[{']['.join([x[1] for x in h])}]")
if len([x[0] for x in state['history'][-1] if x[0] == 'green']) == 5:
print("You win!")
break
else:
if state['gc'] == 5:
print(f"The word was {state['word']}, you didn't win!")
break
if __name__ == "__main__":
while True:
state = { 'word': random.choice(wbank).upper(),
'history' : [],
'gc' : 0
}
wordle(state)
inp = input("Play again? y/n")
if inp.strip().lower() == "y":
continue
else:
print("Goodbye! Thanks for playing!")
break
|
aaronlael/cli_wordle
|
wordle.py
|
wordle.py
|
py
| 1,928 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30814400750
|
__author__ = "https://github.com/kdha0727"
import os
import functools
import contextlib
import torch
import torch.distributed as dist
from torch.cuda import is_available as _cuda_available
RANK = 0
WORLD_SIZE = 1
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Setup Tools #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def is_initialized():
# if pytorch isn't compiled with c10d, is_initialized is omitted from namespace.
# this function wraps
"""
Returns c10d (distributed) runtime is initialized.
"""
return dist.is_available() and getattr(dist, "is_initialized", lambda: False)()
def setup_dist(temp_dir, rank, world_size):
"""
Set up a distributed process group.
"""
if is_initialized():
return True
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
dist.init_process_group(
backend='gloo', init_method=init_method, rank=rank, world_size=world_size)
else:
init_method = f'file://{init_file}'
dist.init_process_group(
backend='nccl', init_method=init_method, rank=rank, world_size=world_size)
global RANK, WORLD_SIZE
RANK = rank
WORLD_SIZE = world_size
torch.cuda.set_device(dev())
torch.cuda.empty_cache()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# General Tools #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
@functools.lru_cache(maxsize=None)
def get_rank(group=None):
if group is not None and is_initialized():
return dist.get_rank(group=group)
return RANK
@functools.lru_cache(maxsize=None)
def get_world_size(group=None):
if group is not None and is_initialized():
return dist.get_world_size(group=group)
return WORLD_SIZE
def barrier(*args, **kwargs):
if is_initialized():
return dist.barrier(*args, **kwargs)
@contextlib.contextmanager
def synchronized_ops():
barrier()
yield
barrier()
return
@functools.lru_cache(maxsize=None)
def dev(group=None):
"""
Get the device to use for torch.distributed.
"""
if _cuda_available():
return torch.device(get_rank(group))
return torch.device("cpu")
def load_state_dict(local_or_remote_path, **kwargs):
"""
Load a PyTorch file.
"""
with open(local_or_remote_path, "rb") as f:
return torch.load(f, **kwargs)
def broadcast(tensor, src=0, group=None, async_op=False):
"""
Synchronize a Tensor across ranks from {src} rank. (default=0)
:param tensor: torch.Tensor.
:param src: source rank to sync params from. default is 0.
:param group:
:param async_op:
"""
if not is_initialized():
return
with torch.no_grad():
dist.broadcast(tensor, src, group=group, async_op=async_op)
def sync_params(params, src=0, group=None, async_op=False):
"""
Synchronize a sequence of Tensors across ranks from {src} rank. (default=0)
:param params: Sequence of torch.Tensor.
:param src: source rank to sync params from. default is 0.
:param group:
:param async_op:
"""
if not is_initialized():
return
for p in params:
broadcast(p, src, group=group, async_op=async_op)
|
studio-YAIVERSE/studio-YAIVERSE
|
dist_util.py
|
dist_util.py
|
py
| 3,621 |
python
|
en
|
code
| 20 |
github-code
|
6
|
74987096188
|
from deep_rl_for_swarms.common import explained_variance, zipsame, dataset
from deep_rl_for_swarms.common import logger
import deep_rl_for_swarms.common.tf_util as U
import tensorflow as tf, numpy as np
import time
import os
from deep_rl_for_swarms.common import colorize
from mpi4py import MPI
from collections import deque
from deep_rl_for_swarms.common.mpi_adam import MpiAdam
from deep_rl_for_swarms.common.cg import cg
from contextlib import contextmanager
from deep_rl_for_swarms.common.act_wrapper import ActWrapper
import sys
from gym import spaces
import matplotlib.pyplot as plt
def traj_segment_generator(pi, env, horizon, stochastic):
# Initialize state variables
t = 0
n_agents = env.nr_agents
new = True
ob = env.reset()
cur_ep_ret = 0
cur_ep_len = 0
ep_rets = []
ep_lens = []
time_steps = []
# Initialize history arrays
obs = np.array([ob for _ in range(horizon)])
rews = np.zeros([horizon, n_agents], 'float32')
vpreds = np.zeros([horizon, n_agents], 'float32')
news = np.zeros([horizon, n_agents], 'int32')
if isinstance(env.action_space, spaces.Box):
ac = np.vstack([env.action_space.sample() for _ in range(n_agents)]) # Used only to initialize vectors!!
acs = np.array([ac for _ in range(horizon)])
elif isinstance(env.action_space, spaces.Discrete):
ac = np.array([env.action_space.sample() for _ in range(n_agents)])
acs = np.zeros([horizon, n_agents], 'int32') # For discrete actions
else:
raise NotImplementedError
prevacs = acs.copy()
time = np.zeros(horizon, 'float32') # To store the time of acting
# Info to be saved in the logger
keys_to_save = ['attackers_caught', 'attackers_not_caught', 'mean_total_rwd', 'total_rwd']
if env.attack_mode == 'phy':
keys_to_save.extend(['phy_fc_error_rate'])
if env.attack_mode == 'mac':
keys_to_save.extend(['total_mac_tx', 'total_mac_col', 'total_bits_tx', 'prop_t_tx', 'mean_prop_bits_tx_at',
'mean_prop_bits_tx_no'])
info_indiv = []
while True:
prevac = ac
ac, vpred = pi.act(stochastic, np.vstack(ob))
if isinstance(env.action_space, spaces.Box):
ac = np.clip(ac, env.action_space.low, env.action_space.high) # To ensure actions are in the right limit!
# Slight weirdness here because we need value function at time T
# before returning segment [0, T-1] so we get the correct
# terminal value
if t > 0 and t % horizon == 0:
info_total = {}
for key in keys_to_save:
aux = 0
for i in range(len(info_indiv)):
aux += info_indiv[i][key]
info_total[key] = aux / len(info_indiv)
if isinstance(env.action_space, spaces.Box):
yield [
dict(
ob=np.array(obs[:, na, :]),
rew=np.array(rews[:, na]),
vpred=np.array(vpreds[:, na]),
new=np.array(news[:, na]),
ac=np.array(acs[:, na, :]),
prevac=np.array(prevacs[:, na, :]),
nextvpred=vpred[na] * (1 - new),
ep_rets=[epr[na] for epr in ep_rets],
ep_lens=ep_lens,
time_steps=np.array(time_steps),
time=time,
) for na in range(n_agents)
], info_total
elif isinstance(env.action_space, spaces.Discrete):
yield [
dict(
ob=np.array(obs[:, na, :]),
rew=np.array(rews[:, na]),
vpred=np.array(vpreds[:, na]),
new=np.array(news[:, na]),
ac=np.array(acs[:, na]),
prevac=np.array(prevacs[:, na]),
nextvpred=vpred[na] * (1 - new),
ep_rets=[epr[na] for epr in ep_rets],
ep_lens=ep_lens,
time_steps=np.array(time_steps),
time=time
) for na in range(n_agents)
], info_total
else:
raise NotImplementedError
_, vpred = pi.act(stochastic, ob)
# Be careful!!! if you change the downstream algorithm to aggregate
# several of these batches, then be sure to do a deepcopy
ep_rets = []
ep_lens = []
time_steps = []
info_indiv = []
i = t % horizon
time_steps.append(t)
obs[i] = ob
vpreds[i] = vpred
news[i] = new
acs[i] = ac
prevacs[i] = prevac
if env.attack_mode == 'mac':
time[i] = sum(env.t_counter)
elif env.attack_mode == 'phy':
time[i] = env.timestep
else:
raise RuntimeError('Environment not recognized')
ob, rew, new, info = env.step(ac)
rews[i] = rew
#mask_undetected[i] = np.logical_not(env.banned[0:env.nr_agents])
cur_ep_ret += rew
cur_ep_len += 1
if new:
info_indiv.append(info)
ep_rets.append(cur_ep_ret)
ep_lens.append(cur_ep_len)
cur_ep_ret = 0
cur_ep_len = 0
ob = env.reset()
sys.stdout.write('\r Current horizon length = ' + str((t + 1) % horizon) + '/' + str(horizon))
sys.stdout.flush()
t += 1
def add_vtarg_and_adv(seg, gamma, lam):
new = [np.append(p["new"], 0) for p in seg] # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = [np.append(p["vpred"], p["nextvpred"]) for p in seg]
for i, p in enumerate(seg):
T = len(p["rew"])
p["adv"] = gaelam = np.empty(T, 'float32')
rew = p["rew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1 - new[i][t + 1]
delta = rew[t] + gamma * vpred[i][t + 1] * nonterminal - vpred[i][t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
p["tdlamret"] = p["adv"] + p["vpred"]
'''
def add_vtarg_and_adv(seg, gamma, lam): # Modified version to include time!
new = [np.append(p["new"], 0) for p in seg] # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = [np.append(p["vpred"], p["nextvpred"]) for p in seg]
for i, p in enumerate(seg):
T = len(p["rew"])
p["adv"] = gaelam = np.empty(T, 'float32')
rew = p["rew"]
lastgaelam = 0
time = np.append(p['time'], p['time'][-1] + 1) # Increase the final time by 1 to obtain the differential time
difft = time[1:] - time[0: -1]
for t in reversed(range(T)):
nonterminal = 1 - new[i][t + 1]
gt = gamma ** difft[t] # Note that when difft is negative, nonterminal is 0!!
lt = lam ** difft[t]
delta = rew[t] + gt * vpred[i][t + 1] * nonterminal - vpred[i][t]
gaelam[t] = lastgaelam = delta + gt * lt * nonterminal * lastgaelam
p["tdlamret"] = p["adv"] + p["vpred"]
'''
def learn(env, policy_fn, *,
timesteps_per_batch, # what to train on
max_kl, cg_iters,
gamma, lam, # advantage estimation
entcoeff=0.0,
cg_damping=1e-2,
vf_stepsize=3e-4,
vf_iters =3,
max_timesteps=0, max_episodes=0, max_iters=0, # time constraint
callback=None,
save_dir=None,
save_flag=False,
plot_flag=False
):
nworkers = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
np.set_printoptions(precision=3)
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_fn("pi", ob_space, ac_space)
oldpi = policy_fn("oldpi", ob_space, ac_space)
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
ob = U.get_placeholder_cached(name="ob")
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = entcoeff * meanent
vferr = tf.reduce_mean(tf.square(pi.vpred - ret))
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold (advantage--> Next line)
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
losses = [optimgain, meankl, entbonus, surrgain, meanent]
loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
dist = meankl
all_var_list = pi.get_trainable_variables()
var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("pol")] # Policy variables
var_list.extend([v for v in all_var_list if v.name.split("/")[1].startswith("me")]) # Mean embedding variables
vf_var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("vf")] # Value function variables
vfadam = MpiAdam(vf_var_list)
get_flat = U.GetFlat(var_list)
set_from_flat = U.SetFromFlat(var_list)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
sz = U.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start:start + sz], shape))
start += sz
gvp = tf.add_n([tf.reduce_sum(g * tangent) for (g, tangent) in zipsame(klgrads, tangents)]) # pylint: disable=E1111
fvp = U.flatgrad(gvp, var_list)
assign_old_eq_new = U.function([], [], updates=[tf.assign(oldv, newv)
for (oldv, newv) in
zipsame(oldpi.get_variables(), pi.get_variables())])
compute_losses = U.function([ob, ac, atarg], losses)
compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)])
compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list))
@contextmanager
def timed(msg):
if rank == 0:
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds" % (time.time() - tstart), color='magenta'))
else:
yield
def allmean(x):
assert isinstance(x, np.ndarray)
out = np.empty_like(x)
MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
out /= nworkers
return out
act_params = {
'name': "pi",
'ob_space': ob_space,
'ac_space': ac_space,
}
pi = ActWrapper(pi, act_params)
U.initialize()
th_init = get_flat()
MPI.COMM_WORLD.Bcast(th_init, root=0)
set_from_flat(th_init)
vfadam.sync()
print("Init param sum", th_init.sum(), flush=True)
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1
while True:
if callback: callback(locals(), globals())
if max_timesteps and timesteps_so_far >= max_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
if max_timesteps:
print(colorize(str(100 * timesteps_so_far / max_timesteps) + ' % of timesteps', color='magenta'))
elif max_episodes:
print(colorize(str(100 * episodes_so_far / max_episodes) + ' % of episodes', color='magenta'))
elif max_iters:
print(colorize(str(100 * iters_so_far / max_iters) + ' % of iters', color='magenta'))
logger.log("********** Iteration %i ************" % iters_so_far)
with timed("sampling"):
seg, info = seg_gen.__next__()
add_vtarg_and_adv(seg, gamma, lam)
ob = np.concatenate([s['ob'] for s in seg], axis=0)
ac = np.concatenate([s['ac'] for s in seg], axis=0)
atarg = np.concatenate([s['adv'] for s in seg], axis=0)
tdlamret = np.concatenate([s['tdlamret'] for s in seg], axis=0)
vpredbefore = np.concatenate([s["vpred"] for s in seg], axis=0) # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
# if hasattr(pi, "ret_rms"): pi.ret_rms.update(tdlamret)
# if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy
args = ob, ac, atarg
fvpargs = [arr[::5] for arr in args]
def fisher_vector_product(p):
return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p
assign_old_eq_new() # set old parameter values to new parameter values
with timed("computegrad"):
*lossbefore, g = compute_lossandgrad(*args)
lossbefore = allmean(np.array(lossbefore))
g = allmean(g)
if np.allclose(g, 0):
logger.log("Got zero gradient. not updating")
else:
with timed("cg"):
stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank == 0)
assert np.isfinite(stepdir).all()
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
lm = np.sqrt(shs / max_kl)
# logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
fullstep = stepdir / lm
expectedimprove = g.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
set_from_flat(thnew)
meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve))
if not np.isfinite(meanlosses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl > max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
set_from_flat(thbefore)
if nworkers > 1 and iters_so_far % 20 == 0:
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) # list of tuples
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
with timed("vf"):
for _ in range(vf_iters):
for (mbob, mbret) in dataset.iterbatches((ob, tdlamret),
include_final_partial_batch=False, batch_size=64):
g = allmean(compute_vflossandgrad(mbob, mbret))
vfadam.update(g, vf_stepsize)
lrlocal = (seg[0]["ep_lens"], seg[0]["ep_rets"]) # local values
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
lens, rews = map(flatten_lists, zip(*listoflrpairs))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
# Add info values
logger.record_tabular("AttC", info['attackers_caught'])
logger.record_tabular("AttNC", info['attackers_not_caught'])
logger.record_tabular("MtR", info['mean_total_rwd']) # Mean total reward
logger.record_tabular("TtR", info['total_rwd']) # Total reward
if env.attack_mode == 'phy':
logger.record_tabular("Fce", info['phy_fc_error_rate'])
if env.attack_mode == 'mac':
logger.record_tabular("Tmt", info['total_mac_tx'])
logger.record_tabular("Tmc", info['total_mac_col'])
logger.record_tabular("Tbt", info['total_bits_tx'])
logger.record_tabular("Ptt", info['prop_t_tx'])
logger.record_tabular("MpbtA", info['mean_prop_bits_tx_at'])
logger.record_tabular("MpbtN", info['mean_prop_bits_tx_no'])
if rank == 0:
logger.dump_tabular()
if save_flag:
pi.save(os.path.normpath(save_dir + '/models_trpo/model_{}.pkl'.format(iters_so_far)))
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_]
|
jparras/dla
|
deep_rl_for_swarms/rl_algo/trpo_mpi/trpo_mpi_attack.py
|
trpo_mpi_attack.py
|
py
| 17,701 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18490735964
|
import boto3
import json
import uuid
print('Loading function')
def lambda_handler(event, context):
bucketName = event['Records'][0]['s3']['bucket']['name']
fileName = event['Records'][0]['s3']['object']['key']
return detect_labels_and_put_dynamoDB(fileName, bucketName)
def detect_labels_and_put_dynamoDB(photo, bucket):
rekognitionClient=boto3.client('rekognition', 'us-east-2')
dynamoClient = boto3.client('dynamodb')
response = rekognitionClient.detect_labels(Image={'S3Object':{'Bucket':bucket,'Name':photo}},
MaxLabels=10)
print('Detected labels for ' + photo)
for label in response['Labels']:
dynamoClient.put_item(
TableName='RekognitionDetails',
Item= {
'ID' : {
'S': str(uuid.uuid4())
},
'Filename': {
'S': photo
},
'Category': {
'S' : label['Name']
},
'Confidence': {
'N': str(label['Confidence'])
}
})
def main():
photo=''
bucket=''
label_count=detect_labels(photo, bucket)
print("Labels detected: " + str(label_count))
|
Samir42/RekognitionService
|
RekognitionLambda.py
|
RekognitionLambda.py
|
py
| 1,249 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72279625789
|
import firebase_admin
import googleapiclient
from firebase_admin import credentials
from firebase_admin import db
import os
from os.path import join, dirname
from dotenv import load_dotenv
from XmlParser import XmlParser
class FirebaseService:
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
DATABASE_URL = os.environ.get("DATABASE_URL")
if not firebase_admin._apps:
print('初期化')
cred = credentials.Certificate('hologram-test-firebase-adminsdk.json')
firebase_admin.initialize_app(cred, {
'databaseURL': DATABASE_URL,
})
ref_db = db.reference('/video')
def __init__(self, video_item):
self.video_item = video_item
# FirestoreのドキュメントIDを取得
def get_db_id(self):
print("FirebaseService", 'get_db_id')
id_list = []
key_val = self.ref_db.get()
# DB上に書き込まれたアイテムのvideoIdを取得
for key, val in key_val.items():
id_list.append(key)
return id_list
#
def write_video_item(self):
print("FirebaseService", "write_video_item")
self.ref_db.update(self.video_item)
def delete_video_item(self, update_db_items, xml_video_ids, error_channel_ids):
print("FirebaseService", 'update_db_items', len(update_db_items), update_db_items)
print("FirebaseService", 'xml_video_ids', len(xml_video_ids), xml_video_ids)
print("FirebaseService", 'error_channel_ids', len(error_channel_ids), error_channel_ids)
# 一週間以上まえのアイテムのみ抽出
# (更新後のDB上のアイテム)-(XMLで取得したアイテム)
last_week_ids = set(update_db_items).difference(set(xml_video_ids))
print('last_week_ids', last_week_ids)
for single_id in last_week_ids:
db_channelId = self.ref_db.child(f'{single_id}').child('channelId').get()
print('db_channel_id', db_channelId)
# dbから取得したアイテムのチャンネルIDがエラーが発生したチャンネルIDリストの中に含まれていなければ削除
# xml_parseで取得できなかったチャンネルの動画情報が削除されてしまうため
if db_channelId not in set(error_channel_ids):
print('delete', f'{single_id}')
self.ref_db.child(f'{single_id}').delete()
|
CIDRA4023/Hologram-backend
|
FirebaseService.py
|
FirebaseService.py
|
py
| 2,457 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
20157578435
|
import numpy as np
import pandas as pd
import time
from metric import SampleScore,EventScore, AdjustedMutualInfoScore
from joblib import Parallel, delayed
class Experiment:
def __init__(self,algorithms:list, configurations:list, thresholds = np.linspace(0,1,101),njobs=1,verbose = True) -> None:
"""Initialization
Args:
algorithms (list): list of algorithm classes
configurations (list): list of list of configurations as dictionnaries for each algorithms classes
thresholds (np.ndarray, optional): numpy array of thresholds to consider for the event based metric. Defaults to numpy.linspace(0,1,101).
"""
self.algorithms = algorithms
self.configurations = configurations
self.thresholds = thresholds
self.njobs = njobs
self.verbose = verbose
def compute_scores(self,label,prediction):
single_pred = np.clip(np.sum(prediction,axis=0),0,1).reshape(1,-1)
single_label = np.clip(np.sum(label,axis=0),0,1).reshape(1,-1)
scores = []
#single sample score
p,r,f = SampleScore().score(single_label,single_pred)
scores.append(["sss-precision",p])
scores.append(["sss-recall",r])
scores.append(["sss-fscore",f])
#sample score
p,r,f = SampleScore().score(label,prediction)
scores.append(["ss-precision",p])
scores.append(["ss-recall",r])
scores.append(["ss-fscore",f])
# weigthed sample score
p,r,f = SampleScore(averaging="weighted").score(label,prediction)
scores.append(["w-ss-precision",p])
scores.append(["w-ss-recall",r])
scores.append(["w-ss-fscore",f])
#single event score
lp,lr,lf = EventScore().score(single_label,single_pred,self.thresholds)
for t,p,r,f in zip(self.thresholds,lp,lr,lf):
scores.append([f"ses-precision_{np.round(t,2)}",p])
scores.append([f"ses-recall_{np.round(t,2)}",r])
scores.append([f"ses-fscore_{np.round(t,2)}",f])
scores.append(["ses-auc-precision",np.mean(lp)])
scores.append(["ses-auc-recall",np.mean(lr)])
scores.append(["ses-auc-fscore",np.mean(lf)])
#event score
lp,lr,lf = EventScore().score(label,prediction,self.thresholds)
for t,p,r,f in zip(self.thresholds,lp,lr,lf):
scores.append([f"es-precision_{np.round(t,2)}",p])
scores.append([f"es-recall_{np.round(t,2)}",r])
scores.append([f"es-fscore_{np.round(t,2)}",f])
scores.append(["es-auc-precision",np.mean(lp)])
scores.append(["es-auc-recall",np.mean(lr)])
scores.append(["es-auc-fscore",np.mean(lf)])
# weighted event score
lp,lr,lf = EventScore(averaging="weighted").score(label,prediction,self.thresholds)
for t,p,r,f in zip(self.thresholds,lp,lr,lf):
scores.append([f"w-es-precision_{np.round(t,2)}",p])
scores.append([f"w-es-recall_{np.round(t,2)}",r])
scores.append([f"w-es-fscore_{np.round(t,2)}",f])
scores.append(["w-es-auc-precision",np.mean(lp)])
scores.append(["w-es-auc-recall",np.mean(lr)])
scores.append(["w-es-auc-fscore",np.mean(lf)])
#ajusted mutual information
scores.append(["amis",AdjustedMutualInfoScore().score(label,prediction)])
return scores
def signal_algo_class_experiement(self,signal_idx,signal,label,algo_class,config,config_idx):
"Return a DF"
#keep only labels row that are activated by the signal
label = label[label.sum(axis=1)>0]
#update the number of patterns to predict if required
t_config = config.copy()
if ("n_patterns" in t_config.keys()):
if (isinstance(t_config["n_patterns"],int)):
t_config["n_patterns"] = label.shape[0]
else:
t_config["n_patterns"] = None
try:
#get predictions
algo = algo_class(**t_config)
start = time.time()
algo.fit(signal)
end = time.time()
#compute scores
scores = self.compute_scores(label,algo.prediction_mask_)
tdf = pd.DataFrame(scores,columns=["metric","score"])
tdf["algorithm"] = algo_class.__name__
tdf["config_idx"] = config_idx
tdf["execution_time"] = end - start
tdf["signal_idx"] = signal_idx
tdf["n_patterns"] = label.shape[0]
tdf["predicted_n_patterns"] = algo.prediction_mask_.shape[0]
if self.verbose:
s1 = np.round(tdf[tdf["metric"] == "es-auc-fscore"].score.values[0],2)
s2 = np.round(tdf[tdf["metric"] == "amis"].score.values[0],2)
print(f"signal_id: {signal_idx}, algo: {algo_class.__name__}, config_id: {config_idx}, f-auc: {s1}, ami: {s2}")
return tdf
except:
s= f"signal_id: {signal_idx}, algo: {algo_class.__name__}, config_id: {config_idx} failed to fit."
if self.verbose:
print(s)
if self.logs_path_ is not None:
with open(self.logs_path_,"a") as f:
f.write(s +"\n")
def run_experiment(self,dataset:np.ndarray,labels:np.ndarray,backup_path = None,batch_size=10,logs_path = None,verbose = True)->np.ndarray:
"""_summary_
Args:
dataset (np.ndarray): array of signals, signal shape (L,), variable length allowed
labels (np.ndarray): array of labels, label shape (L,), variable length allowed
signal_configs (pd.DataFrame, optional): Dataframe containing the configuration of the synthetic generator for each signals.
backup_path (str, optional): Path to store df in case of big experiment. If None no saving. Defaults to None.
batch_size (int, optional)
verbose (bool, optional): verbose. Defaults to True.
Returns:
pd.DataFrame: scores_df
"""
self.logs_path_ = logs_path
n_signals = len(dataset)
n_configs = np.sum([len(conf) for conf in self.configurations])
total = n_signals*n_configs
if backup_path != None:
n_batches = n_signals//batch_size
if n_batches >0:
batches =[zip(dataset[i*batch_size:(i+1)*batch_size],labels[i*batch_size:(i+1)*batch_size]) for i in range(n_batches)]
else:
batches = []
if n_signals % batch_size !=0:
batches.append(zip(dataset[n_batches*batch_size:],labels[n_batches*batch_size:]))
else:
batches = [zip(dataset,labels)]
self.df_ = pd.DataFrame()
counts = 0
for batch in batches:
results = Parallel(n_jobs=self.njobs)(
delayed(self.signal_algo_class_experiement)(counts+id_s,signal,label,algo,config,id_c)
for id_s,(signal,label) in enumerate(batch)
for id_a,algo in enumerate(self.algorithms)
for id_c,config in enumerate(self.configurations[id_a])
)
counts = min(counts+batch_size,n_signals)
self.df_= pd.concat((self.df_,*results)).reset_index(drop = True)
self.df_ = self.df_.astype({'metric':str, "score":float, "algorithm":str,'config_idx':int,"signal_idx":int, "n_patterns":int, "predicted_n_patterns":int})
if backup_path != None:
self.df_.to_csv(backup_path)
if verbose:
print(f"Achieved [{counts*n_configs}/{total}]")
return self.df_
|
thibaut-germain/lt-normalized
|
src/experiment.py
|
experiment.py
|
py
| 7,754 |
python
|
en
|
code
| 0 |
github-code
|
6
|
45483801886
|
import pickle
import numpy as np
import random
import os
import pandas as pd
import yaml
import copy
from tqdm import tqdm
from . import utils
from . import visual
import xarray as xr
from .proxy import ProxyDatabase
from .gridded import Dataset
from .utils import (
pp,
p_header,
p_hint,
p_success,
p_fail,
p_warning,
cfg_abspath,
cwd_abspath,
geo_mean,
nino_indices,
calc_tpi,
global_hemispheric_means,
)
from .da import (
enkf_update_array,
cov_localization,
)
class ReconJob:
''' Reconstruction Job
General rule of loading parameters: load from the YAML first if available, then update with the parameters in the function calling,
so the latter has a higher priority
'''
def __init__(self, configs=None, proxydb=None, prior=None, obs=None):
self.configs = configs
self.proxydb = proxydb
self.prior = prior
self.obs = obs
def copy(self):
return copy.deepcopy(self)
def load_configs(self, cfg_path=None, job_dirpath=None, verbose=False):
''' Load the configuration YAML file
self.configs will be updated
Parameters
----------
cfg_path : str
the path of a configuration YAML file
'''
pwd = os.path.dirname(__file__)
if cfg_path is None:
cfg_path = os.path.abspath(os.path.join(pwd, './cfg/cfg_template.yml'))
self.cfg_path = cfg_path
if verbose: p_header(f'LMRt: job.load_configs() >>> loading reconstruction configurations from: {cfg_path}')
self.configs = yaml.safe_load(open(cfg_path, 'r'))
if verbose: p_success(f'LMRt: job.load_configs() >>> job.configs created')
if job_dirpath is None:
if os.path.isabs(self.configs['job_dirpath']):
job_dirpath = self.configs['job_dirpath']
else:
job_dirpath = cfg_abspath(self.cfg_path, self.configs['job_dirpath'])
else:
job_dirpath = cwd_abspath(job_dirpath)
self.configs['job_dirpath'] = job_dirpath
os.makedirs(job_dirpath, exist_ok=True)
if verbose:
p_header(f'LMRt: job.load_configs() >>> job.configs["job_dirpath"] = {job_dirpath}')
p_success(f'LMRt: job.load_configs() >>> {job_dirpath} created')
pp.pprint(self.configs)
def load_proxydb(self, path=None, verbose=False, load_df_kws=None):
''' Load the proxy database
self.proxydb will be updated
Parameters
----------
proxydb_path : str
if given, should point to a pickle file with a Pandas DataFrame underlying
'''
# update self.configs with not None parameters in the function calling
if path is None:
if os.path.isabs(self.configs['proxydb_path']):
path = self.configs['proxydb_path']
else:
path = cfg_abspath(self.cfg_path, self.configs['proxydb_path'])
else:
path = cwd_abspath(path)
self.configs['proxydb_path'] = path
if verbose: p_header(f'LMRt: job.load_proxydb() >>> job.configs["proxydb_path"] = {path}')
# load proxy database
proxydb = ProxyDatabase()
proxydb_df = pd.read_pickle(self.configs['proxydb_path'])
load_df_kws = {} if load_df_kws is None else load_df_kws.copy()
proxydb.load_df(proxydb_df, ptype_psm=self.configs['ptype_psm'],
ptype_season=self.configs['ptype_season'], verbose=verbose, **load_df_kws)
if verbose: p_success(f'LMRt: job.load_proxydb() >>> {proxydb.nrec} records loaded')
proxydb.source = self.configs['proxydb_path']
self.proxydb = proxydb
if verbose: p_success(f'LMRt: job.load_proxydb() >>> job.proxydb created')
def filter_proxydb(self, ptype_psm=None, dt=1, pids=None, verbose=False):
if ptype_psm is None:
ptype_psm = self.configs['ptype_psm']
else:
self.configs['ptype_psm'] = ptype_psm
if verbose: p_header(f'LMRt: job.filter_proxydb() >>> job.configs["ptype_psm"] = {ptype_psm}')
proxydb = self.proxydb.copy()
if self.configs['ptype_psm'] is not None:
ptype_list = list(self.configs['ptype_psm'].keys())
if verbose: p_header(f'LMRt: job.filter_proxydb() >>> filtering proxy records according to: {ptype_list}')
proxydb.filter_ptype(ptype_list, inplace=True)
proxydb.filter_dt(dt, inplace=True)
if pids is not None:
self.configs['assim_pids'] = pids
if verbose: p_header(f'LMRt: job.filter_proxydb() >>> job.configs["assim_pids"] = {pids}')
if 'assim_pids' in self.configs and self.configs['assim_pids'] is not None:
proxydb.filter_pids(self.configs['assim_pids'], inplace=True)
if verbose: p_success(f'LMRt: job.filter_proxydb() >>> {proxydb.nrec} records remaining')
self.proxydb = proxydb
def seasonalize_proxydb(self, ptype_season=None, verbose=False):
if ptype_season is None:
ptype_season = self.configs['ptype_season']
else:
self.configs['ptype_season'] = ptype_season
if verbose: p_header(f'LMRt: job.seasonalize_proxydb() >>> job.configs["ptype_season"] = {ptype_season}')
proxydb = self.proxydb.copy()
if self.configs['ptype_season'] is not None:
if verbose: p_header(f'LMRt: job.seasonalize_proxydb() >>> seasonalizing proxy records according to: {self.configs["ptype_season"]}')
proxydb.seasonalize(self.configs['ptype_season'], inplace=True)
if verbose: p_success(f'LMRt: job.seasonalize_proxydb() >>> {proxydb.nrec} records remaining')
self.proxydb = proxydb
if verbose: p_success(f'LMRt: job.seasonalize_proxydb() >>> job.proxydb updated')
def load_prior(self, path_dict=None, varname_dict=None, verbose=False, anom_period=None):
''' Load model prior fields
Parameters
----------
path_dict: dict
a dict of environmental variables
varname_dict: dict
a dict to map variable names, e.g. {'tas': 'sst'} means 'tas' is named 'sst' in the input NetCDF file
'''
# update self.configs with not None parameters in the function calling
if path_dict is None:
path_dict = cfg_abspath(self.cfg_path, self.configs['prior_path'])
self.configs['prior_path'] = path_dict
else:
self.configs['prior_path'] = cwd_abspath(path_dict)
if verbose: p_header(f'LMRt: job.load_prior() >>> job.configs["prior_path"] = {path_dict}')
if anom_period is None:
anom_period = self.configs['anom_period']
else:
self.configs['anom_period'] = anom_period
if verbose: p_header(f'LMRt: job.load_prior() >>> job.configs["anom_period"] = {anom_period}')
vn_dict = {
'time': 'time',
'lat': 'lat',
'lon': 'lon',
}
if 'prior_varname' in self.configs:
vn_dict.update(self.configs['prior_varname'])
if varname_dict is not None:
vn_dict.update(varname_dict)
self.configs['prior_varname'] = vn_dict
# load data
if verbose: p_header(f'LMRt: job.load_prior() >>> loading model prior fields from: {self.configs["prior_path"]}')
ds = Dataset()
ds.load_nc(self.configs['prior_path'], varname_dict=self.configs['prior_varname'], anom_period=anom_period, inplace=True)
if verbose:
p_hint('LMRt: job.load_prior() >>> raw prior')
print(ds)
self.prior = ds
if verbose: p_success(f'LMRt: job.load_prior() >>> job.prior created')
def seasonalize_ds_for_psm(self, ds_type=None, seasonalized_ds_path=None, save_path=None, ptype_season=None, verbose=False):
if seasonalized_ds_path is not None and os.path.exists(seasonalized_ds_path):
with open(seasonalized_ds_path, 'rb') as f:
if ds_type == 'prior':
self.seasonalized_prior = pickle.load(f)
elif ds_type == 'obs':
self.seasonalized_obs = pickle.load(f)
else:
raise ValueError('Wrong ds_type')
else:
if ptype_season is None:
ptype_season = self.configs['ptype_season']
else:
self.configs['ptype_season'] = ptype_season
if verbose: p_header(f'LMRt: job.seasonalize_ds_for_psm() >>> job.configs["ptype_season"] = {ptype_season}')
all_seasons = []
for ptype, season in ptype_season.items():
if isinstance(season[0], list):
# when ptype_season[pobj.ptype] contains multiple seasonality possibilities
for sn in season:
if sn not in all_seasons:
all_seasons.append(sn)
else:
# when ptype_season[pobj.ptype] contains only one seasonality possibility
if season not in all_seasons:
all_seasons.append(season)
# print(all_seasons)
if ds_type == 'prior':
ds = self.prior.copy()
elif ds_type == 'obs':
ds = self.obs.copy()
else:
raise ValueError('Wrong ds_type')
seasonalized_ds = {}
for season in all_seasons:
if verbose: p_header(f'LMRt: job.seasonalize_ds_for_psm() >>> Seasonalizing variables from {ds_type} with season: {season}')
season_tag = '_'.join(str(s) for s in season)
seasonalized_ds[season_tag] = ds.seasonalize(season, inplace=False)
if ds_type == 'prior':
self.seasonalized_prior = seasonalized_ds
elif ds_type == 'obs':
self.seasonalized_obs = seasonalized_ds
else:
raise ValueError('Wrong ds_type')
if save_path is not None:
with open(save_path, 'wb') as f:
pickle.dump(seasonalized_ds, f)
if verbose: p_success(f'LMRt: job.seasonalize_ds_for_psm() >>> job.seasonalized_{ds_type} created')
def seasonalize_prior(self, season=None, verbose=False):
if season is None:
season = self.configs['prior_season']
else:
self.configs['prior_season'] = season
if verbose: p_header(f'LMRt: job.seasonalize_prior() >>> job.configs["prior_season"] = {season}')
ds = self.prior.copy()
ds.seasonalize(self.configs['prior_season'], inplace=True)
if verbose:
p_hint(f'LMRt: job.seasonalize_prior() >>> seasonalized prior w/ season {season}')
print(ds)
self.prior = ds
if verbose: p_success(f'LMRt: job.seasonalize_prior() >>> job.prior updated')
def regrid_prior(self, ntrunc=None, verbose=False):
if ntrunc is None:
ntrunc = self.configs['prior_regrid_ntrunc']
self.configs['prior_regrid_ntrunc'] = ntrunc
ds = self.prior.copy()
ds.regrid(self.configs['prior_regrid_ntrunc'], inplace=True)
if verbose:
p_hint('LMRt: job.regrid_prior() >>> regridded prior')
print(ds)
self.prior = ds
if verbose: p_success(f'LMRt: job.regrid_prior() >>> job.prior updated')
def crop_prior(self, domain_range=None, verbose=False):
''' Take a smaller domain for reconstruction
Parameters
----------
domain_range : list
[lat_min, lat_max, lon_min, lon_max]
'''
if domain_range is None:
if 'prior_crop_domain_range' not in self.configs:
self.configs['prior_crop_domain_range'] = None
else:
domain_range = self.configs['prior_crop_domain_range']
else:
self.configs['prior_crop_domain_range'] = domain_range
if self.configs['prior_crop_domain_range'] is None:
if verbose: p_success(f'LMRt: job.crop_prior() >>> job.prior not updated as the domain_range is set to None')
else:
ds = self.prior.copy()
ds.crop(self.configs['prior_crop_domain_range'], inplace=True)
if verbose:
p_hint('LMRt: job.crop_prior() >>> cutted prior')
print(ds)
self.prior = ds
if verbose: p_success(f'LMRt: job.crop_prior() >>> job.prior updated')
def load_obs(self, path_dict=None, varname_dict=None, verbose=False, anom_period=None):
''' Load instrumental observations fields
Parameters
----------
path_dict: dict
a dict of environmental variables
varname_dict: dict
a dict to map variable names, e.g. {'tas': 'sst'} means 'tas' is named 'sst' in the input NetCDF file
'''
if path_dict is None:
obs_path = cfg_abspath(self.cfg_path, self.configs['obs_path'])
else:
obs_path = cwd_abspath(path_dict)
self.configs['obs_path'] = obs_path
if anom_period is None:
anom_period = self.configs['anom_period']
else:
self.configs['obs_anom_period'] = anom_period
if verbose: p_header(f'LMRt: job.load_obs() >>> job.configs["anom_period"] = {anom_period}')
vn_dict = {
'time': 'time',
'lat': 'lat',
'lon': 'lon',
}
if 'obs_varname' in self.configs:
vn_dict.update(self.configs['obs_varname'])
if varname_dict is not None:
vn_dict.update(varname_dict)
self.configs['obs_varname'] = vn_dict
if verbose: p_header(f'LMRt: job.load_obs() >>> loading instrumental observation fields from: {self.configs["obs_path"]}')
ds = Dataset()
ds.load_nc(self.configs['obs_path'], varname_dict=vn_dict, anom_period=anom_period, inplace=True)
self.obs = ds
if verbose: p_success(f'LMRt: job.load_obs() >>> job.obs created')
def calibrate_psm(self, ptype_season=None,
seasonalized_prior_path=None, prior_loc_path=None,
seasonalized_obs_path=None, obs_loc_path=None,
calibed_psm_path=None, calib_period=None, verbose=False):
if ptype_season is None:
ptype_season = self.configs['ptype_season']
else:
self.configs['ptype_season'] = ptype_season
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["ptype_season"] = {ptype_season}')
ptype_season = {k:self.configs['ptype_season'][k] for k in self.configs['ptype_psm'].keys()}
# set paths for precalculated data
if 'prepcalc' not in self.configs:
self.configs['precalc'] = {}
if seasonalized_prior_path is None:
seasonalized_prior_path = os.path.abspath(os.path.join(self.configs['job_dirpath'], 'seasonalized_prior.pkl'))
self.configs['precalc']['seasonalized_prior_path'] = seasonalized_prior_path
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["precalc"]["seasonalized_prior_path"] = {seasonalized_prior_path}')
if seasonalized_obs_path is None:
seasonalized_obs_path = os.path.abspath(os.path.join(self.configs['job_dirpath'], 'seasonalized_obs.pkl'))
self.configs['precalc']['seasonalized_obs_path'] = seasonalized_obs_path
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["precalc"]["seasonalized_obs_path"] = {seasonalized_obs_path}')
if prior_loc_path is None:
prior_loc_path = os.path.abspath(os.path.join(self.configs['job_dirpath'], 'prior_loc.pkl'))
self.configs['precalc']['prior_loc_path'] = prior_loc_path
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["precalc"]["prior_loc_path"] = {prior_loc_path}')
if obs_loc_path is None:
obs_loc_path = os.path.abspath(os.path.join(self.configs['job_dirpath'], 'obs_loc.pkl'))
self.configs['precalc']['obs_loc_path'] = obs_loc_path
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["precalc"]["obs_loc_path"] = {obs_loc_path}')
if calibed_psm_path is None:
calibed_psm_path = os.path.abspath(os.path.join(self.configs['job_dirpath'], 'calibed_psm.pkl'))
self.configs['precalc']['calibed_psm_path'] = calibed_psm_path
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["precalc"]["calibed_psm_path"] = {calibed_psm_path}')
for ds_type, seasonalized_path, loc_path in zip(
['prior', 'obs'], [seasonalized_prior_path, seasonalized_obs_path], [prior_loc_path, obs_loc_path]):
# seasonalize ds for PSM calibration
self.seasonalize_ds_for_psm(ds_type=ds_type, ptype_season=ptype_season,
seasonalized_ds_path=seasonalized_path, save_path=seasonalized_path, verbose=verbose)
if ds_type == 'prior':
ds = self.prior
seasonalized_ds = self.seasonalized_prior
elif ds_type == 'obs':
ds = self.obs
seasonalized_ds = self.seasonalized_obs
# get modeled environmental variables at proxy locales from prior
psm_types = set([v for k, v in self.configs['ptype_psm'].items()])
if 'bilinear' in psm_types:
var_names = ['tas', 'pr']
else:
var_names = ['tas']
self.proxydb.find_nearest_loc(var_names, ds=ds, ds_type=ds_type, ds_loc_path=loc_path, save_path=loc_path, verbose=verbose)
self.proxydb.get_var_from_ds(seasonalized_ds, ptype_season, ds_type=ds_type, verbose=verbose)
# initialize PSM
self.proxydb.init_psm(verbose=verbose)
# calibrate PSM
if calib_period is None:
calib_period = self.configs['psm_calib_period']
else:
self.configs['psm_calib_period'] = calib_period
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["psm_calib_period"] = {calib_period}')
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> PSM calibration period: {calib_period}')
if calibed_psm_path is not None and os.path.exists(calibed_psm_path):
self.proxydb.calib_psm(calib_period=calib_period, calibed_psm_path=calibed_psm_path, verbose=verbose)
else:
self.proxydb.calib_psm(calib_period=calib_period, save_path=calibed_psm_path, verbose=verbose)
def forward_psm(self, verbose=False):
self.proxydb.forward_psm(verbose=verbose)
def gen_Xb(self, recon_vars=None, verbose=False):
''' Generate Xb
'''
if not hasattr(self, 'prior_sample_years'):
raise ValueError('job.prior_sample_years not existing, please run job.gen_Ye() first!')
if recon_vars is None:
recon_vars = self.configs['recon_vars']
else:
self.configs['recon_vars'] = recon_vars
if verbose: p_header(f'LMRt: job.gen_Xb() >>> job.configs["recon_vars"] = {recon_vars}')
if type(recon_vars) is str:
# contains only one variable
recon_vars = [recon_vars]
vn_1st = recon_vars[0]
self.prior_sample_idx = [list(self.prior.fields[vn_1st].time).index(yr) for yr in self.prior_sample_years]
if verbose: p_success(f'LMRt: job.gen_Xb() >>> job.prior_sample_idx created')
nens = np.size(self.prior_sample_years)
Xb_var_irow = {} # index of rows in Xb to store the specific var
loc = 0
for vn in recon_vars:
nt, nlat, nlon = np.shape(self.prior.fields[vn].value)
lats, lons = self.prior.fields[vn].lat, self.prior.fields[vn].lon
lon2d, lat2d = np.meshgrid(lons, lats)
fd_coords = np.ndarray((nlat*nlon, 2))
fd_coords[:, 0] = lat2d.flatten()
fd_coords[:, 1] = lon2d.flatten()
fd = self.prior.fields[vn].value[self.prior_sample_idx]
fd = np.moveaxis(fd, 0, -1)
fd_flat = fd.reshape((nlat*nlon, nens))
if vn == vn_1st:
Xb = fd_flat
Xb_coords = fd_coords
else:
Xb = np.concatenate((Xb, fd_flat), axis=0)
Xb_coords = np.concatenate((Xb_coords, fd_coords), axis=0)
Xb_var_irow[vn] = [loc, loc+nlat*nlon-1]
loc += nlat*nlon
self.Xb = Xb
self.Xb_coords = Xb_coords
self.Xb_var_irow = Xb_var_irow
if verbose:
p_success(f'LMRt: job.gen_Xb() >>> job.Xb created')
p_success(f'LMRt: job.gen_Xb() >>> job.Xb_coords created')
p_success(f'LMRt: job.gen_Xb() >>> job.Xb_var_irow created')
def gen_Ye(self, proxy_frac=None, nens=None, verbose=False, seed=0):
''' Generate Ye
'''
if proxy_frac is None:
proxy_frac = self.configs['proxy_frac']
else:
self.configs['proxy_frac'] = proxy_frac
if verbose: p_header(f'LMRt: job.gen_Ye() >>> job.configs["proxy_frac"] = {proxy_frac}')
if nens is None:
nens = self.configs['recon_nens']
else:
self.configs['recon_nens'] = nens
if verbose: p_header(f'LMRt: job.gen_Xb() >>> job.configs["recon_nens"] = {nens}')
self.proxydb.split(proxy_frac, verbose=verbose, seed=seed)
vn_1st = list(self.prior.fields.keys())[0]
time = self.prior.fields[vn_1st].time
Ye_assim_df = pd.DataFrame(index=time)
Ye_eval_df = pd.DataFrame(index=time)
Ye_assim_lat = []
Ye_assim_lon = []
Ye_eval_lat = []
Ye_eval_lon = []
Ye_assim_coords = np.ndarray((self.proxydb.assim.nrec, 2))
Ye_eval_coords = np.ndarray((self.proxydb.eval.nrec, 2))
for pid, pobj in self.proxydb.assim.records.items():
series = pd.Series(index=pobj.ye_time, data=pobj.ye_value)
Ye_assim_df[pid] = series
Ye_assim_lat.append(pobj.lat)
Ye_assim_lon.append(pobj.lon)
Ye_assim_df.dropna(inplace=True)
Ye_assim_coords[:, 0] = Ye_assim_lat
Ye_assim_coords[:, 1] = Ye_assim_lon
for pid, pobj in self.proxydb.eval.records.items():
series = pd.Series(index=pobj.ye_time, data=pobj.ye_value)
Ye_eval_df[pid] = series
Ye_eval_lat.append(pobj.lat)
Ye_eval_lon.append(pobj.lon)
Ye_eval_df.dropna(inplace=True)
Ye_eval_coords[:, 0] = Ye_eval_lat
Ye_eval_coords[:, 1] = Ye_eval_lon
Ye_df = pd.concat([Ye_assim_df, Ye_eval_df], axis=1).dropna()
self.Ye_df = Ye_df
nt = len(Ye_df)
self.Ye_assim_df = Ye_assim_df
self.Ye_eval_df = Ye_eval_df
random.seed(seed)
sample_idx = random.sample(list(range(nt)), nens)
self.prior_sample_years = Ye_df.index[sample_idx].values
if verbose:
p_success(f'LMRt: job.gen_Ye() >>> job.prior_sample_years created')
# use self.prior_sample_idx for sampling
self.Ye_assim = np.array(Ye_assim_df)[sample_idx].T
self.Ye_eval = np.array(Ye_eval_df)[sample_idx].T
self.Ye_assim_coords = Ye_assim_coords
self.Ye_eval_coords = Ye_eval_coords
if verbose:
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_df created')
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_assim_df created')
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_eval_df created')
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_assim created')
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_eval created')
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_assim_coords created')
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_eval_coords created')
def update_yr(self, target_yr, Xb_aug, Xb_aug_coords, recon_loc_rad, recon_timescale=1, verbose=False, debug=False):
start_yr = target_yr - recon_timescale/2
end_yr = target_yr + recon_timescale/2
Xb = np.copy(Xb_aug)
i = 0
for pid, pobj in self.proxydb.assim.records.items():
mask = (pobj.time >= start_yr) & (pobj.time <= end_yr)
nYobs = np.sum(mask)
if nYobs == 0:
i += 1
continue # skip to next proxy record
Yobs = pobj.value[mask].mean()
loc = cov_localization(recon_loc_rad, pobj, Xb_aug_coords)
Ye = Xb[i - (self.proxydb.assim.nrec+self.proxydb.eval.nrec)]
ob_err = pobj.R / nYobs
Xa = enkf_update_array(Xb, Yobs, Ye, ob_err, loc=loc, debug=debug)
if debug:
Xb_mean = Xb[:-(self.proxydb.assim.nrec+self.proxydb.eval.nrec)].mean()
Xa_mean = Xa[:-(self.proxydb.assim.nrec+self.proxydb.eval.nrec)].mean()
innov = Yobs - Ye.mean()
if np.abs(innov / Yobs) > 1:
print(pid, i - (self.proxydb.assim.nrec+self.proxydb.eval.nrec))
print(f'\tXb_mean: {Xb_mean:.2f}, Xa_mean: {Xa_mean:.2f}')
print(f'\tInnovation: {innov:.2f}, ob_err: {ob_err:.2f}, Yobs: {Yobs:.2f}, Ye_mean: {Ye.mean():.2f}')
Xbvar = Xb.var(axis=1, ddof=1)
Xavar = Xa.var(axis=1, ddof=1)
vardiff = Xavar - Xbvar
if (not np.isfinite(np.min(vardiff))) or (not np.isfinite(np.max(vardiff))):
raise ValueError('Reconstruction has blown-up. Exiting!')
if debug: print('min/max change in variance: ('+str(np.min(vardiff))+','+str(np.max(vardiff))+')')
i += 1
Xb = Xa
return Xb
def run_da(self, recon_period=None, recon_loc_rad=None, recon_timescale=None, verbose=False, debug=False):
if recon_period is None:
recon_period = self.configs['recon_period']
else:
self.configs['recon_period'] = recon_period
if verbose: p_header(f'LMRt: job.run_da() >>> job.configs["recon_period"] = {recon_period}')
if recon_timescale is None:
recon_timescale = self.configs['recon_timescale']
else:
self.configs['recon_timescale'] = recon_timescale
if verbose: p_header(f'LMRt: job.run_da() >>> job.configs["recon_timescale"] = {recon_timescale}')
if recon_loc_rad is None:
recon_loc_rad = self.configs['recon_loc_rad']
else:
self.configs['recon_loc_rad'] = recon_loc_rad
if verbose: p_header(f'LMRt: job.run_da() >>> job.configs["recon_loc_rad"] = {recon_loc_rad}')
recon_yrs = np.arange(recon_period[0], recon_period[-1]+1)
Xb_aug = np.append(self.Xb, self.Ye_assim, axis=0)
Xb_aug = np.append(Xb_aug, self.Ye_eval, axis=0)
Xb_aug_coords = np.append(self.Xb_coords, self.Ye_assim_coords, axis=0)
Xb_aug_coords = np.append(Xb_aug_coords, self.Ye_eval_coords, axis=0)
nt = np.size(recon_yrs)
nrow, nens = np.shape(Xb_aug)
Xa = np.ndarray((nt, nrow, nens))
for yr_idx, target_yr in enumerate(tqdm(recon_yrs, desc='KF updating')):
Xa[yr_idx] = self.update_yr(target_yr, Xb_aug, Xb_aug_coords, recon_loc_rad, recon_timescale, verbose=verbose, debug=debug)
recon_fields = {}
for vn, irow in self.Xb_var_irow.items():
_, nlat, nlon = np.shape(self.prior.fields[vn].value)
recon_fields[vn] = Xa[:, irow[0]:irow[-1]+1, :].reshape((nt, nlat, nlon, nens))
recon_fields[vn] = np.moveaxis(recon_fields[vn], -1, 1)
self.recon_fields = recon_fields
if verbose: p_success(f'LMRt: job.run_da() >>> job.recon_fields created')
def save_recon(self, save_path, compress_dict={'zlib': True, 'least_significant_digit': 1}, verbose=False,
output_geo_mean=False, target_lats=[], target_lons=[], output_full_ens=False, dtype=np.float32):
output_dict = {}
for vn, fd in self.recon_fields.items():
nyr, nens, nlat, nlon = np.shape(fd)
if output_full_ens:
output_var = np.array(fd, dtype=dtype)
output_dict[vn] = (('year', 'ens', 'lat', 'lon'), output_var)
else:
output_var = np.array(fd.mean(axis=1), dtype=dtype)
output_dict[vn] = (('year', 'lat', 'lon'), output_var)
lats, lons = self.prior.fields[vn].lat, self.prior.fields[vn].lon
try:
gm_ens = np.ndarray((nyr, nens), dtype=dtype)
nhm_ens = np.ndarray((nyr, nens), dtype=dtype)
shm_ens = np.ndarray((nyr, nens), dtype=dtype)
for k in range(nens):
gm_ens[:,k], nhm_ens[:,k], shm_ens[:,k] = global_hemispheric_means(fd[:,k,:,:], lats)
output_dict[f'{vn}_gm_ens'] = (('year', 'ens'), gm_ens)
output_dict[f'{vn}_nhm_ens'] = (('year', 'ens'), nhm_ens)
output_dict[f'{vn}_shm_ens'] = (('year', 'ens'), shm_ens)
except:
if verbose: p_warning(f'LMRt: job.save_recon() >>> Global hemispheric means cannot be calculated')
if vn == 'tas':
try:
nino_ind = nino_indices(fd, lats, lons)
nino12 = nino_ind['nino1+2']
nino3 = nino_ind['nino3']
nino34 = nino_ind['nino3.4']
nino4 = nino_ind['nino4']
wpi = nino_ind['wpi']
nino12 = np.array(nino12, dtype=dtype)
nino3 = np.array(nino3, dtype=dtype)
nino34 = np.array(nino34, dtype=dtype)
nino4 = np.array(nino4, dtype=dtype)
output_dict['nino1+2'] = (('year', 'ens'), nino12)
output_dict['nino3'] = (('year', 'ens'), nino3)
output_dict['nino3.4'] = (('year', 'ens'), nino34)
output_dict['nino4'] = (('year', 'ens'), nino4)
output_dict['wpi'] = (('year', 'ens'), wpi)
except:
if verbose: p_warning(f'LMRt: job.save_recon() >>> NINO or West Pacific Indices cannot be calculated')
# calculate tripole index (TPI)
try:
tpi = calc_tpi(fd, lats, lons)
tpi = np.array(tpi, dtype=dtype)
output_dict['tpi'] = (('year', 'ens'), tpi)
except:
if verbose: p_warning(f'LMRt: job.save_recon() >>> Tripole Index (TPI) cannot be calculated')
if output_geo_mean:
geo_mean_ts = geo_mean(fd, lats, lons, target_lats, target_lons)
output_dict['geo_mean'] = (('year', 'ens'), geo_mean_ts)
ds = xr.Dataset(
data_vars=output_dict,
coords={
'year': np.arange(self.configs['recon_period'][0], self.configs['recon_period'][1]+1),
'ens': np.arange(nens),
'lat': lats,
'lon': lons,
})
if compress_dict is not None:
encoding_dict = {}
for k in output_dict.keys():
encoding_dict[k] = compress_dict
ds.to_netcdf(save_path, encoding=encoding_dict)
else:
ds.to_netcdf(save_path)
if verbose: p_header(f'LMRt: job.save_recon() >>> Reconstructed fields saved to: {save_path}')
def prepare(self, job_dirpath=None, proxydb_path=None, ptype_psm=None, ptype_season=None, verbose=False,
prior_path=None, prior_varname_dict=None, prior_season=None, prior_regrid_ntrunc=None,
obs_path=None, obs_varname_dict=None, anom_period=None,
calib_period=None, seasonalized_prior_path=None, seasonalized_obs_path=None,
prior_loc_path=None, obs_loc_path=None, calibed_psm_path=None, prep_savepath=None):
if job_dirpath is None:
job_dirpath = self.configs['job_dirpath']
else:
self.configs['job_dirpath'] = job_dirpath
if verbose: p_header(f'LMRt: job.prepare() >>> job.configs["job_dirpath"] = {job_dirpath}')
os.makedirs(job_dirpath, exist_ok=True)
if prep_savepath is None:
prep_savepath = os.path.join(job_dirpath, f'job.pkl')
else:
if 'precalc' not in self.configs:
self.configs['precalc'] = {}
self.configs['precalc']['prep_savepath'] = prep_savepath
if verbose: p_header(f'LMRt: job.prepare() >>> job.configs["precalc"]["prep_savepath"] = {prep_savepath}')
if os.path.exists(prep_savepath):
job_prep = pd.read_pickle(prep_savepath)
if verbose: p_header(f'LMRt: job.prepare() >>> Prepration data loaded from: {prep_savepath}')
self.proxydb = job_prep.proxydb
self.prior = job_prep.prior
self.obs = job_prep.obs
del(job_prep)
else:
# load & process proxy database
self.load_proxydb(path=proxydb_path, verbose=verbose)
self.filter_proxydb(ptype_psm=ptype_psm, verbose=verbose)
self.seasonalize_proxydb(ptype_season=ptype_season, verbose=verbose)
# load prior & obs
self.load_prior(path_dict=prior_path, varname_dict=prior_varname_dict, anom_period=anom_period, verbose=verbose)
self.load_obs(path_dict=obs_path, varname_dict=obs_varname_dict, anom_period=anom_period, verbose=verbose)
# calibrate & forward PSM
self.calibrate_psm(
seasonalized_prior_path=seasonalized_prior_path,
seasonalized_obs_path=seasonalized_obs_path,
prior_loc_path=prior_loc_path,
obs_loc_path=obs_loc_path,
calibed_psm_path=calibed_psm_path,
calib_period=calib_period,
verbose=verbose,
)
self.forward_psm(verbose=verbose)
# seasonalize & regrid prior
del(self.seasonalized_prior)
del(self.seasonalized_obs)
self.seasonalize_prior(season=prior_season, verbose=verbose)
self.regrid_prior(ntrunc=prior_regrid_ntrunc, verbose=verbose)
# save result
pd.to_pickle(self, prep_savepath)
self.configs['precalc']['prep_savepath'] = prep_savepath
if verbose:
p_header(f'LMRt: job.prepare() >>> Prepration data saved to: {prep_savepath}')
p_header(f'LMRt: job.prepare() >>> job.configs["precalc"]["prep_savepath"] = {prep_savepath}')
def save(self, prep_savepath=None, verbose=False):
if hasattr(self, 'seasonalized_prior'):
del(self.seasonalized_prior)
if hasattr(self, 'seasonalized_obs'):
del(self.seasonalized_obs)
if prep_savepath is None:
prep_savepath = os.path.join(self.configs['job_dirpath'], f'job.pkl')
if 'prepcalc' not in self.configs:
self.configs['precalc'] = {}
pd.to_pickle(self, prep_savepath)
self.configs['precalc']['prep_savepath'] = prep_savepath
if verbose:
p_header(f'LMRt: job.save_job() >>> Prepration data saved to: {prep_savepath}')
p_header(f'LMRt: job.save_job() >>> job.configs["precalc"]["prep_savepath"] = {prep_savepath}')
def run(self, recon_seeds=None, recon_vars=None, recon_period=None, recon_timescale=None, recon_loc_rad=None,
nens=None, proxy_frac=None, verbose=False, save_configs=True,
compress_dict={'zlib': True, 'least_significant_digit': 1},
output_geo_mean=False, target_lats=[], target_lons=[],
output_full_ens=False, dtype=np.float32):
job_dirpath = self.configs["job_dirpath"]
if recon_seeds is None:
recon_seeds = self.configs['recon_seeds']
else:
self.configs['recon_seeds'] = np.array(recon_seeds).tolist()
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_seeds"] = {recon_seeds}')
if recon_vars is None:
recon_vars = self.configs['recon_vars']
else:
self.configs['recon_vars'] = recon_vars
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_vars"] = {recon_vars}')
if type(recon_vars) is str:
# contains only one variable
recon_vars = [recon_vars]
if nens is None:
nens = self.configs['recon_nens']
else:
self.configs['recon_nens'] = nens
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_nens"] = {nens}')
if proxy_frac is None:
proxy_frac = self.configs['proxy_frac']
else:
self.configs['proxy_frac'] = proxy_frac
if verbose: p_header(f'LMRt: job.run() >>> job.configs["proxy_frac"] = {proxy_frac}')
if recon_period is None:
recon_period = self.configs['recon_period']
else:
self.configs['recon_period'] = recon_period
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_period"] = {recon_period}')
if recon_timescale is None:
recon_timescale = self.configs['recon_timescale']
else:
self.configs['recon_timescale'] = recon_timescale
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_timescale"] = {recon_timescale}')
if recon_loc_rad is None:
recon_loc_rad = self.configs['recon_loc_rad']
else:
self.configs['recon_loc_rad'] = recon_loc_rad
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_loc_rad"] = {recon_loc_rad}')
# add settings for data saving to configs
self.configs['save_settings'] = {}
self.configs['save_settings']['compress_dict'] = compress_dict
self.configs['save_settings']['output_geo_mean'] = output_geo_mean
self.configs['save_settings']['target_lats'] = target_lats
self.configs['save_settings']['target_lons'] = target_lons
self.configs['save_settings']['output_full_ens'] = output_full_ens
if dtype is np.float32:
self.configs['save_settings']['dtype'] = 32
elif dtype is np.float64:
self.configs['save_settings']['dtype'] = 64
else:
raise ValueError('Wrong dtype!')
if verbose: p_header(f'LMRt: job.run() >>> job.configs["save_settings"] = {self.configs["save_settings"]}')
os.makedirs(job_dirpath, exist_ok=True)
if save_configs:
cfg_savepath = os.path.join(job_dirpath, f'job_configs.yml')
with open(cfg_savepath, 'w') as f:
yaml.dump(self.configs, f)
if verbose: p_header(f'LMRt: job.run() >>> job.configs saved to: {cfg_savepath}')
for seed in recon_seeds:
p_header(f'LMRt: job.run() >>> seed: {seed} | max: {recon_seeds[-1]}')
recon_savepath = os.path.join(job_dirpath, f'job_r{seed:02d}_recon.nc')
if os.path.exists(recon_savepath):
p_header(f'LMRt: job.run() >>> reconstruction existed at: {recon_savepath}')
continue
else:
self.gen_Ye(proxy_frac=proxy_frac, nens=nens, seed=seed)
self.gen_Xb(recon_vars=recon_vars)
idx_savepath = os.path.join(job_dirpath, f'job_r{seed:02d}_idx.pkl')
pd.to_pickle([self.prior_sample_idx, self.proxydb.calibed_idx_assim, self.proxydb.calibed_idx_eval], idx_savepath)
if verbose: p_header(f'LMRt: job.run() >>> randomized indices for prior and proxies saved to: {idx_savepath}')
print(self.proxydb.assim)
self.run_da(recon_period=recon_period, recon_timescale=recon_timescale, recon_loc_rad=recon_loc_rad)
self.save_recon(recon_savepath, compress_dict=compress_dict, output_geo_mean=output_geo_mean, verbose=verbose,
target_lats=target_lats, target_lons=target_lons, output_full_ens=output_full_ens, dtype=dtype)
p_header(f'LMRt: job.run() >>> DONE!')
def run_cfg(self, cfg_path, job_dirpath=None, recon_seeds=None, verbose=False, save_configs=True):
self.load_configs(cfg_path, verbose=verbose)
if job_dirpath is None:
if os.path.isabs(self.configs['job_dirpath']):
job_dirpath = self.configs['job_dirpath']
else:
job_dirpath = cfg_abspath(self.cfg_path, self.configs['job_dirpath'])
else:
job_dirpath = cwd_abspath(job_dirpath)
self.configs['job_dirpath'] = job_dirpath
os.makedirs(job_dirpath, exist_ok=True)
if verbose:
p_header(f'LMRt: job.load_configs() >>> job.configs["job_dirpath"] = {job_dirpath}')
p_success(f'LMRt: job.load_configs() >>> {job_dirpath} created')
proxydb_path = cfg_abspath(cfg_path, self.configs['proxydb_path'])
ptype_psm = self.configs['ptype_psm']
ptype_season = self.configs['ptype_season']
prior_path = cfg_abspath(cfg_path, self.configs['prior_path'])
prior_varname_dict = self.configs['prior_varname']
prior_season = self.configs['prior_season']
prior_regrid_ntrunc = self.configs['prior_regrid_ntrunc']
prior_crop_domain_range = self.configs['prior_crop_domain_range'] if 'prior_crop_domain_range' in self.configs else None
obs_path = cfg_abspath(cfg_path, self.configs['obs_path'])
obs_varname_dict = self.configs['obs_varname']
anom_period = self.configs['anom_period']
psm_calib_period = self.configs['psm_calib_period']
try:
seasonalized_prior_path = self.configs['precalc']['seasonalized_prior_path']
seasonalized_obs_path = self.configs['precalc']['seasonalized_obs_path']
prior_loc_path = self.configs['precalc']['prior_loc_path']
obs_loc_path = self.configs['precalc']['obs_loc_path']
calibed_psm_path = self.configs['precalc']['calibed_psm_path']
prep_savepath = self.configs['precalc']['prep_savepath']
except:
seasonalized_prior_path = None
seasonalized_obs_path = None
prior_loc_path = None
obs_loc_path = None
calibed_psm_path = None
prep_savepath = None
if recon_seeds is None:
recon_seeds = self.configs['recon_seeds']
else:
self.configs['recon_seeds'] = np.array(recon_seeds).tolist()
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_seeds"] = {recon_seeds}')
recon_vars = self.configs['recon_vars']
recon_period = self.configs['recon_period']
recon_timescale = self.configs['recon_timescale']
recon_loc_rad = self.configs['recon_loc_rad']
recon_nens = self.configs['recon_nens']
proxy_frac = self.configs['proxy_frac']
try:
compress_dict = self.configs['save_settings']['compress_dict']
output_geo_mean = self.configs['save_settings']['output_geo_mean']
target_lats = self.configs['save_settings']['target_lats']
target_lons = self.configs['save_settings']['target_lons']
output_full_ens = self.configs['save_settings']['output_full_ens']
dtype_int = self.configs['save_settings']['dtype']
if dtype_int == 32:
dtype = np.float32
elif dtype_int == 64:
dtype = np.float64
else:
raise ValueError(f'Wrong dtype in: {cfg_path}! Should be either 32 or 64.')
except:
compress_dict={'zlib': True, 'least_significant_digit': 1}
output_geo_mean=False
target_lats=[]
target_lons=[]
output_full_ens=False
dtype=np.float32
self.prepare(job_dirpath, prep_savepath=prep_savepath, proxydb_path=proxydb_path, ptype_psm=ptype_psm, ptype_season=ptype_season,
prior_path=prior_path, prior_varname_dict=prior_varname_dict, prior_season=prior_season, prior_regrid_ntrunc=prior_regrid_ntrunc,
obs_path=obs_path, obs_varname_dict=obs_varname_dict, anom_period=anom_period,
calib_period=psm_calib_period, seasonalized_prior_path=seasonalized_prior_path, seasonalized_obs_path=seasonalized_obs_path,
prior_loc_path=prior_loc_path, obs_loc_path=obs_loc_path, calibed_psm_path=calibed_psm_path, verbose=verbose)
# crop the domain if set to
if prior_crop_domain_range is not None:
self.crop_prior(prior_crop_domain_range, verbose=verbose)
self.save(prep_savepath=prep_savepath, verbose=verbose)
self.run(recon_seeds=recon_seeds, recon_vars=recon_vars, recon_period=recon_period, save_configs=save_configs,
recon_timescale=recon_timescale, recon_loc_rad=recon_loc_rad, nens=recon_nens, proxy_frac=proxy_frac, verbose=verbose,
compress_dict=compress_dict, output_geo_mean=output_geo_mean, target_lats=target_lats, target_lons=target_lons,
output_full_ens=output_full_ens, dtype=dtype)
|
fzhu2e/LMRt
|
LMRt/reconjob.py
|
reconjob.py
|
py
| 45,613 |
python
|
en
|
code
| 9 |
github-code
|
6
|
35988325228
|
import joblib
model = None
def init_model(
db,
model_themes_path='./flaskr/model/log_reg_themes',
model_cats_path='./flaskr/model/log_reg_cats'
):
global model
cur = db.cursor()
query = """
select id from theme order by id;
"""
cur.execute(query)
theme_ids = [id[0] for id in cur.fetchall()]
cur.close()
cur = db.cursor()
query = """
select id from category order by id;
"""
cur.execute(query)
cats_ids = [id[0] for id in cur.fetchall()]
cur.close()
model = Model(theme_ids, cats_ids, model_themes_path, model_cats_path)
def get_model():
return model
class Model:
def __init__(self, theme_ids, cats_id, model_themes_path, model_cats_path):
self.model_themes = joblib.load(model_themes_path)
self.model_cats = joblib.load(model_cats_path)
self.theme_ids = theme_ids
self.cats_ids = cats_id
def analyse_theme(self, text: str, detailed_text:str = None, probs_count=3):
probs = self.model_themes.predict_proba([text])[0]
if detailed_text:
d_probs = self.model_themes.predict_proba([detailed_text])[0]
probs = probs * d_probs
most_likely_probs = None
if probs_count <= 0:
most_likely_probs = [self.theme_ids[id]
for id in probs.argsort().tolist()[:][::-1]]
else:
most_likely_probs = [self.theme_ids[id]
for id in probs.argsort().tolist()[-probs_count:][::-1]]
return most_likely_probs
def analyse_cat(self, text: str, detailed_text:str = None):
probs = self.model_cats.predict_proba([text])[0]
if detailed_text:
d_probs = self.model_cats.predict_proba([detailed_text])[0]
probs = probs * d_probs
most_likely_probs = [self.cats_ids[id]
for id in probs.argsort().tolist()[:][::-1]]
return most_likely_probs
|
dimayasha7123/kursach3
|
flaskr/model/model.py
|
model.py
|
py
| 1,979 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74200612027
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : train.py
# @Author: stoneye
# @Date : 2023/09/01
# @Contact : [email protected]
import tensorflow as tf
import tensorflow.contrib.slim as slim
import utils
from models import ModelUtil
from models import NextvladModel
from models import TextExactor
from models import TrnModel
from tensorflow.python.client import device_lib
class VideoModel():
def __init__(self, args):
"""
:param args: config params
"""
# init the config
self.init_config()
model_util_obj = ModelUtil()
if self.model_type == 'nextvlad':
model_obj = NextvladModel() # nextvlad model
else:
model_obj = TrnModel() # trn model
title_obj = TextExactor() # text model
# 加载预先训练的embeddding
word_embeddings = model_util_obj._init_vocab_and_emb(word_vocab=args.word_vocab,
pre_train_emb_path=args.pre_Train_path)
self.word_embeddings = tf.Variable(word_embeddings,
name='word_embeddings',
dtype=tf.float32)
self.init_placeholder()
# build_graph,support sigle gpu or multi gpus
self.total_loss, self.tag_total_prob, self.cate_total_prob, self.train_op = \
self.multi_gpu_bulid_graph(num_gpu=self.num_gpu,
lr=self.lr,
ad_strength=self.ad_strength,
word_embeddings=self.word_embeddings,
tag_gt_label=self.tag_gt_label,
cate_gt_label=self.cate_gt_label,
tag_nums=self.tag_nums,
cate_nums=self.cate_nums,
rgb_fea_input=self.input_video_rgb_feature,
rgb_fea_true_frame=self.rgb_fea_true_frame,
audio_fea_input=self.input_video_audio_feature,
audio_fea_true_frame=self.audio_fea_true_frame,
max_frames_rgb=self.max_frames_rgb,
max_frames_audio=self.max_frames_audio,
title_fea_input=self.title_id_int_list,
word_sequence_length=self.word_sequence_length,
model_obj=model_obj,
title_obj=title_obj,
is_training=self.is_training,
dropout_keep_prob=self.dropout_keep_prob,
model_util_obj=model_util_obj,
task_type=self.task_type)
def init_config(self):
# task name:["cate","tag","cate_and_tag"]
# 1)"cate": only cate task; 2)"tag":only tag task; 3)"cate_and_tag": multi-task, cate and tag
self.task_type = args.task_type
# nums of gpu
self.num_gpu = args.num_gpu
# learning rate
self.lr = args.lr
# ratio of adversarial perturbations
self.ad_strength = args.ad_strength
# the num of tag
self.tag_nums = args.tag_nums
# the num of cate
self.cate_nums = args.cate_nums
# the num of video frames
self.max_frames_rgb = args.rgb_frames
# the num of audio frames
self.max_frames_audio = args.audio_frames
# the max length word(word id) of title
self.title_max_len = args.title_max_len
# main aggregate model : light-weight: trn ; heavy-weight: nextvlad
self.model_type = args.model_type
# the feature size of img frames.
self.rgb_fea_size = args.rgb_fea_size
# the feature size of audio frames.
self.audio_fea_size = args.audio_fea_size
def init_placeholder(self):
"""
:return:
"""
# title:[batch,max_len]
self.title_id_int_list = tf.placeholder(tf.int32,
shape=[None, self.title_max_len])
word_sequence_length = tf.reduce_sum(tf.sign(
self.title_id_int_list), axis=1) # [batch,]
self.word_sequence_length = tf.cast(word_sequence_length, tf.int32) # [batch,]
# cate ground truth
self.cate_gt_label = tf.placeholder(tf.float32,
shape=[None, self.tag_nums],
name="cate_gt_label")
# tag ground truth
self.tag_gt_label = tf.placeholder(tf.float32,
shape=[None, self.tag_nums],
name="tag_gt_label")
# rgb fea
self.input_video_rgb_feature = tf.placeholder(tf.float32,
shape=[None, self.max_frames_rgb,
self.rgb_fea_size])
# the num of rgb frames
self.rgb_fea_true_frame = tf.placeholder(tf.int32,
shape=[None, ])
# the num of audio frames
self.input_video_audio_feature = tf.placeholder(tf.float32,
shape=[None,
self.max_frames_audio,
self.audio_fea_size])
# audio frames
self.audio_fea_true_frame = tf.placeholder(tf.int32,
shape=[None, ])
# keep dropout
self.dropout_keep_prob = tf.placeholder(tf.float32,
name="dropout_keep_prob")
# is train stage or not
self.is_training = tf.placeholder(tf.bool,
name="is_training")
def cal_loss(self, rgb_fea, rgb_fea_true_frame,
audio_fea, audio_fea_true_frame,
title_emb_fea, word_sequence_length,
tag_gt_label, cate_gt_label,
tag_nums, cate_nums,
max_frames_rgb, max_frames_audio,
is_training, dropout_keep_prob, model_obj,
title_obj, model_util_obj, reuse,
task_type,
hidden_size=256, embedding_size=200,
num_filters=100, num_outputs=1024,
):
with tf.variable_scope("cl_loss_from_emb", reuse=reuse):
# rgb dense vector
rgb_cluster_fea = model_obj.forward(is_training=is_training,
fea_input=rgb_fea,
dropout_keep_prob=dropout_keep_prob,
fea_type='rgb',
max_frames=max_frames_rgb,
true_frames=rgb_fea_true_frame,
name_scope='rgb_cluster_fea')
# audio dense vector
audio_cluster_fea = model_obj.forward(is_training=is_training,
fea_input=audio_fea,
dropout_keep_prob=dropout_keep_prob,
fea_type='audio',
max_frames=max_frames_audio,
true_frames=audio_fea_true_frame,
name_scope='audio_cluster_fea')
# title dense vector baesd on bilstm model
bilstm_title_feature = title_obj._bilstm_feature(
embedding_descript=title_emb_fea,
hidden_size=hidden_size,
des_sequence_length=word_sequence_length,
dtype=tf.float32,
reuse=None)
# title dense vector based on textcnn model
textcnn_title_feature = title_obj._text_cnn_feature(
embedding_descript=title_emb_fea,
embedding_size=embedding_size,
filter_sizes=list(map(int, "2,3,4,5".split(","))),
num_filters=num_filters,
reuse=None
)
title_fea = tf.concat([bilstm_title_feature, textcnn_title_feature], axis=1)
title_fea_drop = slim.dropout(title_fea,
keep_prob=dropout_keep_prob,
is_training=is_training,
scope="title_fea_drop")
title_fea_dense = slim.fully_connected(inputs=title_fea_drop,
num_outputs=num_outputs,
activation_fn=None,
scope="title_fea_dense")
# batch normalization
title_fea_dense_bn = slim.batch_norm(
title_fea_dense,
center=True,
scale=True,
is_training=is_training,
scope="title_fea_dense_bn",
fused=False)
with slim.arg_scope([slim.fully_connected],
normalizer_fn=slim.batch_norm,
normalizer_params={'is_training': is_training, 'center': True,
'scale': True}):
# multi-modal
total_fea = tf.concat([rgb_cluster_fea, audio_cluster_fea, title_fea_dense_bn], 1)
# se gate
concate_features_se = model_util_obj._se_module(is_training=is_training,
activation=total_fea,
name_scope="concat_se")
concate_features_se_drop = tf.nn.dropout(concate_features_se, dropout_keep_prob)
if task_type == 'cate':
cate_total_loss, cate_total_prob = ModelUtil.cate_hmc_layer(
fea_vector=concate_features_se_drop,
dropout_keep_prob=dropout_keep_prob,
cate_nums=cate_nums,
ml_tag=cate_gt_label,
name_scope='cate1_total_loss')
tag_total_prob = tf.zeros_like(tag_gt_label)
return cate_total_loss, tag_total_prob, cate_total_prob
elif task_type == 'tag':
tag_total_loss, \
tag_total_prob = ModelUtil.tag_hmc_layer(fea_vector=concate_features_se_drop,
dropout_keep_prob=dropout_keep_prob,
tag_nums=tag_nums,
ml_tag=tag_gt_label,
name_scope='tag_total_loss')
cate_total_prob = tf.zeros_like(cate_gt_label)
return tag_total_loss, tag_total_prob, cate_total_prob
elif task_type == 'cate_and_tag':
cate_total_loss, cate_total_prob = ModelUtil.cate_hmc_layer(
fea_vector=concate_features_se_drop,
dropout_keep_prob=dropout_keep_prob,
cate_nums=cate_nums,
ml_tag=cate_gt_label,
name_scope='cate1_total_loss')
tag_total_loss, \
tag_total_prob = ModelUtil.tag_hmc_layer(fea_vector=concate_features_se_drop,
dropout_keep_prob=dropout_keep_prob,
tag_nums=tag_nums,
ml_tag=tag_gt_label,
name_scope='tag_total_loss')
return cate_total_loss + tag_total_loss, tag_total_prob, cate_total_prob
else:
raise Exception('task_type:{} not in [cate,tag,cate_and_tag]')
def multi_gpu_bulid_graph(self, num_gpu, lr, ad_strength, word_embeddings,
tag_gt_label, cate_gt_label, tag_nums, cate_nums,
title_fea_input, word_sequence_length,
rgb_fea_input, rgb_fea_true_frame,
audio_fea_input, audio_fea_true_frame,
max_frames_rgb, max_frames_audio,
model_obj, title_obj, is_training,
dropout_keep_prob, model_util_obj, task_type):
"""
:param num_gpu: # the nums of gpu
:param lr: #learning rate
:param ad_strength: # adversarial perturbation
:param word_embeddings: #word embedding [batch,emb_size]
:param tag_gt_label: # tag gt label [batch,tag_nums]
:param cate_gt_label: #cate gt label [batch,cate_nums]
:param tag_nums: # the nums of tag
:param cate_nums: # the nums of cate
:param title_fea_input: # title fea [batch,seq_len]
:param word_sequence_length: # the truth length of title
:param rgb_fea_input: #rgb fea [batch,frame,fea_size]
:param rgb_fea_true_frame: #the truth frames of rgb fea
:param audio_fea_input: #audio fea [batch,frame,fea_size]
:param audio_fea_true_frame: #the truth frames of audio fea
:param max_frames_rgb: #the max frames of rgb
:param max_frames_audio: #the max frames of audio
:param model_obj: #aggregate model: nextvlad or trn
:param title_obj: #textcnn or Bi-LSTM
:param is_training: # True or False
:param dropout_keep_prob: # float
:param model_util_obj: #
:param task_type: #the type of task:cate, tag, cate_and_tag:multi-task,cate & tag
:return:
"""
local_device_protos = device_lib.list_local_devices()
gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']
gpus = gpus[:num_gpu]
num_gpus = len(gpus)
if num_gpus > 0:
print("Using the following GPUs to train: {}".format(gpus))
num_towers = num_gpus
device_string = '/gpu:%d'
else:
print("No GPUs found. Training on CPU.")
num_towers = 1
device_string = '/cpu:%d'
self.global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(lr)
tower_rgb_fea_input = tf.split(rgb_fea_input, num_towers)
tower_rgb_fea_true_frame = tf.split(rgb_fea_true_frame, num_towers)
tower_audio_fea_input = tf.split(audio_fea_input, num_towers)
tower_audio_fea_true_frame = tf.split(audio_fea_true_frame, num_towers)
tower_title_fea_input = tf.split(title_fea_input, num_towers)
tower_word_sequence_length = tf.split(word_sequence_length, num_towers)
tower_tag_gt_label = tf.split(tag_gt_label, num_towers)
tower_cate_gt_label = tf.split(cate_gt_label, num_towers)
tower_gradients = []
tower_predict_tag_probs = []
tower_predict_cate_probs = []
tower_total_losses = []
for i in range(num_towers):
with tf.device(device_string % i):
with (tf.variable_scope(("tower"), reuse=True if i > 0 else None)):
with (slim.arg_scope([slim.model_variable, slim.variable],
device="/cpu:0" if num_gpu != 1 else "/gpu:0")):
result = self.build_graph(
word_embeddings=word_embeddings,
rgb_fea_input=tower_rgb_fea_input[i],
rgb_fea_true_frame=tower_rgb_fea_true_frame[i],
audio_fea_input=tower_audio_fea_input[i],
audio_fea_true_frame=tower_audio_fea_true_frame[i],
max_frames_rgb=max_frames_rgb,
max_frames_audio=max_frames_audio,
title_fea_input=tower_title_fea_input[i],
word_sequence_length=tower_word_sequence_length[i],
tag_gt_label=tower_tag_gt_label[i],
cate_gt_label=tower_cate_gt_label[i],
is_training=is_training,
ad_strength=ad_strength,
tag_nums=tag_nums,
cate_nums=cate_nums,
model_obj=model_obj,
title_obj=title_obj,
dropout_keep_prob=dropout_keep_prob,
model_util_obj=model_util_obj,
task_type=task_type
)
cl_tag_prob = result["tag_prob"]
tower_predict_tag_probs.append(cl_tag_prob)
cl_cate_prob = result["cate_prob"]
tower_predict_cate_probs.append(cl_cate_prob)
loss = result["loss"]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
tower_total_losses.append(loss)
gradients = \
optimizer.compute_gradients(loss, colocate_gradients_with_ops=False)
tower_gradients.append(gradients)
total_loss = tf.reduce_mean(tf.stack(tower_total_losses))
total_tag_prob = tf.concat(tower_predict_tag_probs, 0)
total_cate_prob = tf.concat(tower_predict_cate_probs, 0)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
merged_gradients = utils.combine_gradients(tower_gradients)
train_op = optimizer.apply_gradients(merged_gradients, global_step=self.global_step)
return total_loss, total_tag_prob, total_cate_prob, train_op
def build_graph(self, word_embeddings, tag_gt_label,
cate_gt_label, tag_nums, cate_nums, ad_strength,
rgb_fea_input, rgb_fea_true_frame,
audio_fea_input, audio_fea_true_frame,
title_fea_input, word_sequence_length,
max_frames_rgb, max_frames_audio,
is_training, model_obj, title_obj,
dropout_keep_prob, model_util_obj, task_type):
# [batch,25,emb_size]
embedded_title = tf.nn.embedding_lookup(word_embeddings,
title_fea_input)
# sigmoid cross entropy loss
cl_loss, cl_tag_prob, cl_cate_prob = self.cal_loss(rgb_fea=rgb_fea_input,
rgb_fea_true_frame=rgb_fea_true_frame,
max_frames_rgb=max_frames_rgb,
max_frames_audio=max_frames_audio,
audio_fea=audio_fea_input,
audio_fea_true_frame=audio_fea_true_frame,
title_emb_fea=embedded_title,
word_sequence_length=word_sequence_length,
is_training=is_training,
tag_gt_label=tag_gt_label,
cate_gt_label=cate_gt_label,
tag_nums=tag_nums,
cate_nums=cate_nums,
dropout_keep_prob=dropout_keep_prob,
model_obj=model_obj,
title_obj=title_obj,
model_util_obj=model_util_obj,
task_type=task_type,
reuse=None)
# add the perturbation on rgb fea
rgb_fea_perturbated = model_util_obj.add_perturbation(rgb_fea_input, cl_loss,
norm_length=ad_strength)
# add the perturbation on audio fea
audio_fea_perturbated = model_util_obj.add_perturbation(audio_fea_input, cl_loss,
norm_length=ad_strength)
# add the perturbation on text(title) fea
title_emb_fea_perturbated = model_util_obj.add_perturbation(embedded_title, cl_loss,
norm_length=ad_strength)
# sigmoid cross entropy loss of perturbation
ad_loss, _, _ = self.cal_loss(rgb_fea=rgb_fea_perturbated,
rgb_fea_true_frame=rgb_fea_true_frame,
max_frames_rgb=max_frames_rgb,
max_frames_audio=max_frames_audio,
audio_fea=audio_fea_perturbated,
audio_fea_true_frame=audio_fea_true_frame,
title_emb_fea=title_emb_fea_perturbated,
word_sequence_length=word_sequence_length,
is_training=is_training,
tag_gt_label=tag_gt_label,
cate_gt_label=cate_gt_label,
tag_nums=tag_nums,
cate_nums=cate_nums,
dropout_keep_prob=dropout_keep_prob,
model_obj=model_obj,
title_obj=title_obj,
model_util_obj=model_util_obj,
task_type=task_type,
reuse=True)
return {'loss': cl_loss + ad_loss, 'tag_prob': cl_tag_prob, 'cate_prob': cl_cate_prob}
|
stoneyezhenxu/Multimodal_Video_Classification
|
src/video_model.py
|
video_model.py
|
py
| 23,282 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14025841679
|
"""
@author: Yuhao Cheng
@contact: yuhao.cheng[at]outlook.com
"""
#!!!!! ignore the warning messages
import warnings
warnings.filterwarnings('ignore')
import os
import pickle
import math
import torch
import time
import numpy as np
from PIL import Image
from collections import OrderedDict
import torchvision.transforms as T
import torchvision.transforms.functional as tf
from torch.utils.data import DataLoader
from pyanomaly.core.utils import AverageMeter, flow_batch_estimate, tensorboard_vis_images, make_info_message, ParamSet
from pyanomaly.datatools.evaluate.utils import psnr_error
from ..abstract.base_engine import BaseTrainer, BaseInference
from ..engine_registry import ENGINE_REGISTRY
__all__ = ['MEMAETrainer', 'MEMAEInference']
@ENGINE_REGISTRY.register()
class MEMAETrainer(BaseTrainer):
NAME = ["MEMAE.TRAIN"]
def custom_setup(self):
# basic meter
self.loss_meter_MemAE = AverageMeter(name='loss_memae')
def train(self,current_step):
# Pytorch [N, C, D, H, W]
# initialize
start = time.time()
self.set_requires_grad(self.MemAE, True)
self.MemAE.train()
writer = self.kwargs['writer_dict']['writer']
global_steps = self.kwargs['writer_dict']['global_steps_{}'.format(self.kwargs['model_type'])]
# get the data
data, anno, meta = next(self._train_loader_iter) # the core for dataloader
self.data_time.update(time.time() - start)
input_data = data.cuda()
# True Process =================Start===================
output_rec, att = self.MemAE(input_data)
loss_rec = self.rec_loss(output_rec, input_data)
loss_mem = self.mem_loss(att)
loss_memae_all = self.loss_lamada['rec_loss'] * loss_rec + self.loss_lamada['mem_loss'] * loss_mem
# loss_memae_all = self.loss_lamada['rec_loss'] * loss_rec
self.optim_MemAE.zero_grad()
# with torch.autograd.set_detect_anomaly(True):
loss_memae_all.backward()
self.optim_MemAE.step()
self.loss_meter_MemAE.update(loss_memae_all.detach())
if self.config.TRAIN.adversarial.scheduler.use:
self.lr_memae.step()
# ======================End==================
self.batch_time.update(time.time() - start)
if (current_step % self.steps.param['log'] == 0):
msg = make_info_message(current_step, self.steps.param['max'], self.kwargs['model_type'], self.batch_time,
self.config.TRAIN.batch_size, self.data_time, [self.loss_meter_MemAE])
self.logger.info(msg)
writer.add_scalar('Train_loss_MemAE', self.loss_meter_MemAE.val, global_steps)
if (current_step % self.steps.param['vis'] == 0):
vis_objects = OrderedDict({
'train_output_rec_memeae': output_rec.detach(),
'train_input': input_data.detach()
})
tensorboard_vis_images(vis_objects, writer, global_steps, self.normalize.param['train'])
global_steps += 1
# reset start
start = time.time()
# self.saved_model = {'MemAE':self.MemAE}
self.saved_model['MemAE'] = self.MemAE
# self.saved_optimizer = {'optim_MemAE': self.optim_MemAE}
self.saved_optimizer['optimizer_MemAE']= self.optim_MemAE
# self.saved_loss = {'loss_MemAE':self.loss_meter_MemAE.val}
self.saved_loss['loss_MemAE'] = self.loss_meter_MemAE.val
self.kwargs['writer_dict']['global_steps_{}'.format(self.kwargs['model_type'])] = global_steps
@ENGINE_REGISTRY.register()
class MEMAEInference(BaseInference):
NAME = ["MEMAE.INFERENCE"]
def inference(self):
for h in self._hooks:
h.inference()
|
YuhaoCheng/PyAnomaly
|
pyanomaly/core/engine/functions/memae.py
|
memae.py
|
py
| 3,913 |
python
|
en
|
code
| 107 |
github-code
|
6
|
3167027289
|
#!/usr/bin/env python3
import random
from typing import Tuple
from functions.aes import AESCipher, pkcs7_pad, get_blocks, gen_random_bytes
def _encryption_oracle(bytes_: bytes) -> Tuple[bytes, str]:
key = gen_random_bytes(16)
iv = gen_random_bytes(16)
prefix = gen_random_bytes(random.randint(5, 10))
suffix = gen_random_bytes(random.randint(5, 10))
pt = prefix + bytes_ + suffix
cbc_mode = random.choice([True, False])
if cbc_mode:
cbc = AESCipher(AESCipher.MODE_CBC, key, iv=iv)
ct = cbc.encrypt(pkcs7_pad(pt))
answer = "cbc"
else:
ecb = AESCipher(AESCipher.MODE_ECB, key)
ct = ecb.encrypt(pkcs7_pad(pt))
answer = "ecb"
return ct, answer
def challenge11() -> bool:
pt = bytes(gen_random_bytes(1) * random.randint(100, 200))
ct, answer = _encryption_oracle(pt)
blocks = get_blocks(ct)
unique_blocks = len(set(blocks))
guess = "cbc" if len(blocks) == unique_blocks else "ecb"
return True if guess == answer else False
if __name__ == "__main__":
for _ in range(100):
assert challenge11(), "The result does not match the expected value"
print("Ok")
|
svkirillov/cryptopals-python3
|
cryptopals/set2/challenge11.py
|
challenge11.py
|
py
| 1,185 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27867118318
|
import tensorflow as tf
from utils.nn import linear
from .tdnn import TDNN
def embed_characters(input, vocab_size, embed_dim=40, scope=None, reuse=None,
use_batch_norm=True, use_highway=True, highway_layers=2):
""" Character-level embedding """
with tf.variable_scope(scope or 'Embedder') as scope:
if reuse: scope.reuse_variables()
input = tf.unpack(tf.transpose(input, [1, 0, 2])) # L * [N, W]
embedding = tf.get_variable('embedding', [vocab_size, embed_dim])
embedded = []
for word in input:
embed = tf.nn.embedding_lookup(embedding, word) # [N, W, d]
conved = TDNN(embed, embed_dim)
if use_batch_norm:
conved = batch_norm(conved)
if use_highway:
conved = highway(conved, conved.get_shape()[1], highway_layers, 0)
embedded.append(conved)
scope.reuse_variables()
return embedded
def batch_norm(x, epsilon=1e-5):
shape = x.get_shape().as_list()
with tf.variable_scope('BatchNorm'):
gamma = tf.get_variable("gamma", [shape[-1]],
initializer=tf.random_normal_initializer(1., 0.02))
beta = tf.get_variable("beta", [shape[-1]],
initializer=tf.constant_initializer(0.))
mean, variance = tf.nn.moments(x, [0, 1])
return tf.nn.batch_norm_with_global_normalization(
x, mean, variance, beta, gamma, epsilon,
scale_after_normalization=True)
def highway(input_, size, layer_size=1, bias=-2, f=tf.nn.relu):
"""Highway Network (cf. http://arxiv.org/abs/1505.00387).
t = sigmoid(Wy + b)
z = t * g(Wy + b) + (1 - t) * y
where g is nonlinearity, t is transform gate, and (1 - t) is carry gate.
"""
with tf.variable_scope('Highway'):
output = input_
for idx in range(layer_size):
output = f(linear(output, size, 0, scope='output_lin_%d' % idx, init='he'))
transform_gate = tf.sigmoid(linear(input_, size, 0, scope='transform_lin_%d' % idx) + bias)
carry_gate = 1. - transform_gate
output = transform_gate * output + carry_gate * input_
return output
|
therne/logue
|
models/embedding.py
|
embedding.py
|
py
| 2,203 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21897871134
|
import got3
import pymongo
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# connect to mongo deamon
connection = pymongo.MongoClient("mongodb://localhost")
# connect to the collection called uber_tweets in the kubrick db
db = connection.kubrick.uberban_tweets
count = 0
try:
while True:
tweetCriteria = got3.manager.TweetCriteria().setSince("2017-09-22").setQuerySearch("uberban")
#tweetCriteria = got3.manager.TweetCriteria().setQuerySearch("uberban")
tweet = got3.manager.TweetManager.getTweets(tweetCriteria)[count]
sent = SentimentIntensityAnalyzer().polarity_scores(tweet.text)['compound']
print(tweet.text)
print(sent)
print(tweet.date)
db.insert_many([{"tweet": tweet.text, "sentiment": sent}])
count += 1
except:
print("tweet scrape ended with {no_tweets} tweets".format(no_tweets = count))
|
JackJoeKul/cities-in-need
|
Old UberBan Tweets Scrape + Sentiment Analysis/old_tweets.py
|
old_tweets.py
|
py
| 906 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26999768171
|
import random
def structDataSampling(**kwargs):
global tmp
result = list()
num = kwargs.get("num", -1)
if num == -1:
raise Exception("Wrong number input")
for index in range(0, num):
element = list()
for key, value in kwargs.items():
if key == "int":
it = iter(value['datarange'])
tmp = random.randint(next(it), next(it))
elif key == "float":
it = iter(value['datarange'])
tmp = random.uniform(next(it), next(it))
elif key == "str":
tmp = ''.join(random.SystemRandom().choice(value['datarange']) for _ in range(value['len']))
else:
continue
element.append(tmp)
result.append(element)
return result
def example():
result = structDataSampling(num=3,
int={"datarange": [1, 100]},
float={"datarange": [1.0, 100.0]},
str={"datarange": ['a', 'b', 'c', 'd', 'e'], "len": 3}
)
for i in result:
print(i)
|
wanghan79/2023_Python
|
2021011593陈俊聪/PythonFinalTest_2021011593cjc/work_1_cjc/RandomDataSampling.py
|
RandomDataSampling.py
|
py
| 1,194 |
python
|
en
|
code
| 8 |
github-code
|
6
|
30489464890
|
from .master import Master
import numpy as np
import poselib
import time
import math
import test_module.linecloud as lineCloudTest
import test_module.recontest as recontest
import utils.pose.pose_estimation as pe
import utils.pose.vector as vector
from utils.pose import dataset
from utils.pose import line
from utils.l2precon import calculate
from static import variable
np.random.seed(variable.RANDOM_SEED)
class OLC(Master):
def __init__(self, dataset_path, output_path):
self.pts_to_line = dict()
self.line_to_pts = dict()
self.line_3d = None
self.pts_2d_query = None # Images.txt
self.pts_3d_query = None # Points3D.txt
self.camera_dict_gt = None # cameras.txt
self.queryIds = None
self.queryNames = None
self.image_dict_gt = None
self.resultPose = list()
self.resultRecon = list()
self.map_type = "OLC"
self.points_3D_recon = list()
self.lines_3D_recon = list()
super().__init__(dataset_path, output_path)
self.pts_3d_ids = list(self.pts_3d_query.keys())
np.random.shuffle(self.pts_3d_ids)
def makeLineCloud(self):
print("OLC: Random distribution line cloud")
_pts_3d = np.array([v.xyz for v in self.pts_3d_query.values()])
_pts_ids = np.array([k for k in self.pts_3d_query.keys()])
self.points_3D, self.line_3d, self.ind_to_id, self.id_to_ind = line.drawlines_olc(_pts_3d,_pts_ids)
for i, k in enumerate(self.pts_3d_query.keys()):
self.pts_to_line[k] = self.line_3d[i]
self.line_to_pts[i] = k
def maskSparsity(self, sparisty_level):
new_shape = int(len(self.pts_3d_ids) * sparisty_level)
self.sparse_line_3d_ids = set(self.pts_3d_ids[:new_shape])
def matchCorrespondences(self, query_id):
connected_pts3d_idx = np.where(self.pts_2d_query[query_id].point3D_ids != -1)[0]
connected_pts3d_ids = self.pts_2d_query[query_id].point3D_ids[connected_pts3d_idx]
p2 = np.array([self.pts_3d_query[k].xyz for k in connected_pts3d_ids],dtype=np.float64)
x1 = np.array(self.pts_2d_query[query_id].xys[connected_pts3d_idx],dtype=np.float64)
pts_to_ind = {}
for _i, k in enumerate(connected_pts3d_ids):
pts_to_ind[k] = _i
if self.pts_3d_query[k].xyz[0] != p2[_i][0]:
raise Exception("Point to Index Match Error", k)
self.valid_pts_3d_ids = self.sparse_line_3d_ids.intersection(set(connected_pts3d_ids))
newIndex = []
_x2 = []
for _pid in self.valid_pts_3d_ids:
newIndex.append(pts_to_ind[_pid])
_x2.append(self.pts_to_line[_pid])
if newIndex:
newIndex = np.array(newIndex)
# p1: 2D Point
# x1: 2D Line
# p2: 3D Offset
# x2: 3D Line
self._x1 = x1[newIndex]
self._p2 = p2[newIndex]
self._x2 = np.array(_x2)
else:
self._x1 = np.array([])
self._p2 = np.array([])
self._x2 = np.array([])
print("Found correspondences: ", self._x1.shape[0])
def addNoise(self, noise_level):
super().addNoise(noise_level)
def estimatePose(self, query_id):
if self._x1.shape[0] >= 6:
gt_img = pe.get_GT_image(query_id, self.pts_2d_query, self.image_dict_gt)
cam_id = gt_img.camera_id
cam_p6l = [pe.convert_cam(self.camera_dict_gt[cam_id])]
res = poselib.estimate_p6l_relative_pose(self._x1, self._p2, self._x2, cam_p6l, cam_p6l, variable.RANSAC_OPTIONS, variable.BUNDLE_OPTIONS, variable.REFINE_OPTION)
super().savePoseAccuracy(res, gt_img, cam_p6l[0])
def savePose(self, sparisty_level, noise_level):
super().savePose(sparisty_level, noise_level)
def saveAllPoseCSV(self):
super().saveAllPoseCSV()
def recoverPts(self, estimator, sparsity_level, noise_level):
print("OLC recover image", "\n")
self.sparse_pts_3d_ids =[]
self.id_to_ind_recon = {}
self.ind_to_id_recon = {}
self.points_3D_recon = []
self.lines_3D_recon = []
for i in range(len(self.sparse_line_3d_ids)):
_pts_3d_id = self.line_to_pts[i]
self.sparse_pts_3d_ids.append(_pts_3d_id)
self.points_3D_recon.append(self.pts_3d_query[_pts_3d_id].xyz)
self.lines_3D_recon.append(self.pts_to_line[_pts_3d_id])
self.id_to_ind_recon[_pts_3d_id] = i
self.ind_to_id_recon[i] = _pts_3d_id
self.points_3D_recon = np.array(self.points_3D_recon)
self.lines_3D_recon = np.array(self.lines_3D_recon)
ref_iter = variable.REFINE_ITER
if estimator=='SPF':
# No swap
ests = calculate.coarse_est_spf(self.points_3D_recon, self.lines_3D_recon)
ests_pts = calculate.refine_est_spf(self.points_3D_recon, self.lines_3D_recon, ests, ref_iter)
info = [sparsity_level, noise_level, 0, estimator]
super().saveReconpoints(ests_pts, info)
if estimator=='TPF':
print("OLC should't be estimated with TPF")
pass
def reconTest(self,estimator):
#reconTest
recontest.recontest_pt_idx([self.points_3D_recon],[self.ind_to_id_recon],self.pts_3d_query)
def test(self,recover,esttype):
# recon test
print("Consistency test for",self.map_type)
if recover:
self.reconTest(esttype)
|
Fusroda-h/ppl
|
domain/olc.py
|
olc.py
|
py
| 5,682 |
python
|
en
|
code
| 3 |
github-code
|
6
|
31515309886
|
#!/usr/bin/env python3
""" Machine Translation model with RNN's """
import tensorflow as tf
SelfAttention = __import__('1-self_attention').SelfAttention
class RNNDecoder(tf.keras.layers.Layer):
""" RNN Decoder part of the translation model
"""
def __init__(self, vocab, embedding, units, batch):
""" initialized the variables
Arg:
- batch: int with the batch size
- vocab: int the size of the input vocabulary
- embedding: int dimensionality of the embedding vector
- units: int the number of hidden units in the RNN cell
- batch: int representing the batch size
Public instance attributes:
- embedding: a keras Embedding layer converts words from the
vocabulary into an embedding vector
- gru: a keras GRU layer with units units
- F: a Dense layer with vocab units
"""
super(RNNDecoder, self).__init__()
self.embedding = tf.keras.layers.Embedding(
vocab, embedding)
self.gru = tf.keras.layers.GRU(
units,
recurrent_initializer="glorot_uniform",
return_sequences=True,
return_state=True)
self.F = tf.keras.layers.Dense(vocab)
def call(self, x, s_prev, hidden_states):
""" Calling the GRU RNN layer to construct the dencoding part of the
tranlation model
Arg:
- x: tensor of shape (batch, 1) containing the previous word in the
target sequence as an index of the target vocabulary
- s_prev: is a tensor of shape (batch, units) containing the previous
decoder hidden state
- hidden_states: is a tensor of shape (batch, input_seq_len, units)
containing the outputs of the encoder
Return:
- y: tensor of shape (batch, vocab) with the output word as a one hot
vector in the target vocabulary
- s: tensor of shape (batch, units) with the new decoder hidden state
"""
# embedding the input x
embedding = self.embedding(x)
# self-attention of the inputs per state
attention = SelfAttention(s_prev.shape[1])
# get the attention of the inputs
context, weights = attention(s_prev, hidden_states)
# getting the context per each input, decoder part
context = tf.expand_dims(context, axis=1)
# concat the context plus the embedding to pass thought the model
inputs = tf.concat([embedding, context], -1)
decode_outs, state = self.gru(inputs,
initial_state=hidden_states[:, -1])
# get the outputs of the RNN attention model
y = tf.reshape((decode_outs), [-1, decode_outs.shape[2]])
# reduce the output to the vocab len
y = self.F(y)
return y, state
|
macoyulloa/holbertonschool-machine_learning
|
supervised_learning/0x11-attention/2-rnn_decoder.py
|
2-rnn_decoder.py
|
py
| 2,882 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36153730574
|
import boto3
import traceback
import datetime
import os
from botocore.exceptions import ClientError
from ..models.bucket import Bucket
from ..util.preprocessor import preprocess
"""
S3 functions
"""
def get_active_bucket_or_create_new(username):
"""
Returns the user's current active bucket. If there are no buckets,
or all the users buckets are full, a new one will be created first.
returns: Bucket object
"""
try:
# queries database for non-full buckets
range_cond = Bucket.sort.startswith("BUCKET_")
buckets = Bucket.query(hash_key=username, range_key_condition=range_cond)
# return existing if not full or create new
for buck in buckets:
if not buck.full:
return buck
else:
bucket = create_bucket(username)
return bucket
except Exception as e:
traceback.print_exc()
response_object = {
"status": "failed",
"message": "Could not query buckets in DB. {}".format(e),
}
return response_object, 500
def create_bucket(username, region="eu-central-1"):
"""
Creates an S3 bucket in S3.
Naming format: 'flasktextapi-{ENV}-{USERNAME}-BUCKET{BUCKET_ID}'
IMPORTANT: underscores in usernames are converted to dashes.
returns: bucket
"""
# create S3 bucket
try:
bucket_id = _generate_bucket_id(username)
username_conv = username.replace("_", "-")
bucket_name = "flasktextapi-{env}-{username}-bucket{id}".format(
env=os.getenv("BOILERPLATE_ENV"), username=username_conv, id=bucket_id
)
bucket = boto3.resource("s3").Bucket(bucket_name)
location = {"LocationConstraint": region}
response = bucket.create(CreateBucketConfiguration=location)
if response["ResponseMetadata"]["HTTPStatusCode"] == 200:
try:
db_bucket = _create_db_bucket(username, bucket_id, bucket_name)
return db_bucket
except Exception:
traceback.print_exc()
response_object = {
"status": "failed",
"message": "Bucket created successfully but bucket details could not be stored to database.",
}
return response_object, 500
else:
response_object = {"status": "failed", "message": "could not create bucket"}
return response_object, 500
except Exception as e:
traceback.print_exc()
response_object = {
"status": "failed",
"message": "Could not create bucket. {}".format(e),
}
return response_object, 500
def add_file(username, input, bucket_name, id, region="eu-central-1"):
"""
Adds a text to an S3 bucket.
Naming format of file: 'unprocessed_{username}_{text.public_id}'
@return: Name of the file as String.
"""
# check input type
if not isinstance(input, str):
raise ValueError("Text needs to be a String.")
bucket = boto3.resource("s3").Bucket(bucket_name)
key = "unprocessed_{id}_{username}".format(id=id, username=username)
bucket.put_object(Body=bytes(input, "utf-8"), Key=key)
return key
def add_preprocessed_file(username, input, bucket_name, id, region="eu-central-1"):
"""
Adds a text to an S3 bucket.
Naming format of file: 'unprocessed_{username}_{text.public_id}'
@return: Name of the file as String.
"""
# check input type
if not isinstance(input, str):
raise ValueError("Text needs to be a String.")
# preprocess input
prepr_input = preprocess(input)
bucket = boto3.resource("s3").Bucket(bucket_name)
key = "preprocessed_{id}_{username}".format(id=id, username=username)
bucket.put_object(Body=bytes(prepr_input, "utf-8"), Key=key)
return key
def get_object(bucket_name, key):
"""
Fetches an object from S3.
returns: String
"""
s3 = boto3.resource("s3")
object = s3.Object(bucket_name, key)
return object.get()["Body"].read().decode("utf-8")
def delete_objects(username, bucket_id, objects):
"""
Deletes an object from an s3 bucket.
Returns: List of deleted objects
"""
db_bucket = Bucket.get(hash_key=username, range_key="BUCKET_{}".format(bucket_id))
bucket = boto3.resource("s3").Bucket(db_bucket.bucket_name)
delete_dict = {"Objects": [{"Key": name} for name in objects]}
response = bucket.delete_objects(Delete=delete_dict)
if response["ResponseMetadata"]["HTTPStatusCode"] == 200:
deleted_items = []
for item in response["Deleted"]:
deleted_items.append(item["Key"])
return deleted_items
else:
deleted_items = []
return deleted_items
"""
DB Functions related to S3
"""
def _create_db_bucket(username, id, bucket_name):
new_bucket = Bucket(
username=username,
sort="BUCKET_{}".format(id),
bucket_name=bucket_name,
created_date=datetime.datetime.utcnow(),
public_id=id,
full=False,
)
new_bucket.save()
return new_bucket
"""
Helper functions
"""
def _generate_bucket_id(username):
full_buckets = Bucket.query(
hash_key=username, range_key_condition=Bucket.sort.startswith("BUCKET_")
)
new_id = 0
for buck in full_buckets:
if buck.public_id > new_id:
new_id = buck.public_id + 1
return new_id
def _bucket_full(bucket_name):
bucket = boto3.resource("s3").Bucket(bucket_name)
size = sum([object.size for object in bucket.objects.all()])
if size > 4990000000000:
return True
else:
return False
|
jkausti/flask-textsapi
|
app/textsapi/service/s3buckets.py
|
s3buckets.py
|
py
| 5,725 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1004762180
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 11 20:06:31 2019
@author: saksake
"""
import numpy as np
from sklearn.datasets import load_iris
def datasets() :
# LOAD BOSTON HOUSING DATASET
boston = load_iris()
# MAKE FEATURE DICTIONARY
all_features = {}
for i in range(len(boston.feature_names)):
key_ = str(boston.feature_names[i])
key_split = key_.split()
key = key_split[0]+'_'+key_split[1]
all_features[key] = boston.data[:,i]
# ADD TARGET DICTIONARY
all_features['species'] = boston.target
return all_features
def splitdict(feature_dict, train_portion, label_key) :
train_feature, train_label = {}, {}
key = list(feature_dict.keys())
ndata = len(feature_dict[key[0]])
train_n = int(ndata*train_portion)
idxs = np.array(range(ndata))
np.random.shuffle(idxs)
train_idx = idxs[:train_n]
test_idx = idxs[train_n:]
for key in feature_dict :
if key == label_key :
train_label[key] = {}
train_label[key] = np.array(feature_dict[key])[train_idx]
else :
train_feature[key] = {}
train_feature[key] = np.array(feature_dict[key])[train_idx]
test_feature, test_label = {}, {}
for key in feature_dict :
if key == label_key :
test_label[key] = {}
test_label[key] = np.array(feature_dict[key])[test_idx]
else :
test_feature[key] = {}
test_feature[key] = np.array(feature_dict[key])[test_idx]
return train_feature, train_label, test_feature, test_label
use_feature_name = ['sepal_length',
'sepal_width',
'petal_length',
'petal_width',
'species']
name_columns_category = []
name_columns_bucket = []
name_columns_numeric = ['sepal_length',
'sepal_width',
'petal_length',
'petal_width']
label_key ='species'
train_portion = 0.6
all_features = datasets()
for key in all_features:
print("'{:}',".format(key))
# CHOOSE INTEREST FEATURES FROM ALL FEATURES
used_features = {}
for key in all_features:
if key in use_feature_name :
used_features[key] = all_features[key]
inp_train_feature, inp_train_label, inp_test_feature, inp_test_label = splitdict(feature_dict = used_features,
train_portion = train_portion,
label_key = label_key)
import tensorflow as tf
# MAKE INPUT FUNCTION
# TRAIN DATA
input_fn_train = tf.estimator.inputs.numpy_input_fn(
x = inp_train_feature,
y = inp_train_label[label_key],
shuffle=True,
batch_size=128,
num_epochs=None
)
# TEST DATA
input_fn_test = tf.estimator.inputs.numpy_input_fn(
x = inp_test_feature,
y = inp_test_label[label_key],
shuffle=False,
batch_size=128,
num_epochs=1
)
# Define feature columns.
feature_columns_numeric, feature_columns_category, feature_columns_bucket = [], [], []
for key in inp_train_feature :
# Define numeric feature columns.
if key in name_columns_numeric :
feature_columns_numeric.append(tf.feature_column.numeric_column(key))
# Define categorycal feature columns.
elif key in name_columns_category :
uniq = (np.unique(inp_train_feature[key])).tolist()
cat_column = tf.feature_column.categorical_column_with_vocabulary_list(key = key,
vocabulary_list = uniq)
embed_column = tf.feature_column.embedding_column(
categorical_column=cat_column,
dimension=len(uniq)
)
feature_columns_category.append(embed_column)
# Define bucket feature columns.
elif key in name_columns_bucket :
numeric_column = tf.feature_column.numeric_column(key)
# make bucket boundaries
arr = np.linspace(min(inp_train_feature[key]), max(inp_train_feature[key]), 1000)
n_bucket = 3
q = 1./(n_bucket+1.)
boundaries = []
for i in range(n_bucket):
boundaries.append(int(np.quantile(arr, q*(i+1))))
# Then, bucketize the numeric column on the years 1960, 1980, and 2000.
bucketized_feature_column = tf.feature_column.bucketized_column(
source_column = numeric_column,
boundaries = boundaries)
feature_columns_bucket.append(bucketized_feature_column)
feature_columns = feature_columns_numeric + feature_columns_category + feature_columns_bucket
# DEFINE ESTIMATOR
estimator= tf.estimator.DNNClassifier(
feature_columns = feature_columns,
# Two hidden layers
hidden_units=[512, 256],
optimizer='Adagrad', #'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'
activation_fn=tf.nn.relu, # relu. tanh, sigmoid
n_classes = len(np.unique(inp_train_label[label_key])),
# Model directory
model_dir = 'Iris')
# TRAIN MODEL
estimator.train(input_fn=input_fn_train, steps=5000)
# EVALUATE MODEL
print('-------------------------------------')
evaluate = estimator.evaluate(input_fn = input_fn_test)
print('-------------------------------------')
# PREDICT
pred = list(estimator.predict(input_fn = input_fn_test))
# VISUALIZE TESTING DAN PREDICTED
y_prob = [x['probabilities'] for x in pred]
y_pred = np.asarray([np.argmax(x) for x in y_prob])
y_real = inp_test_label[label_key]
ntrue = len(np.where(y_pred == y_real)[0])
acc = ntrue/float(len(y_real))
print('Accuracy = {:}'.format(acc))
|
rofiqq/Machine-Learning
|
High_API/classifier/iris/iris.py
|
iris.py
|
py
| 5,883 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1197619153
|
"""
13. Write a Python program that accepts a comma separated sequence of words
as input and prints the unique words in sorted form (alphanumerically).
"""
def sort_comma_seperated_words(sentence):
if not sentence.__contains__(","):
raise Exception("Only Comma Separated sentences is Accepted!")
slitted_list = sentence.split(",")
slitted_list.sort()
return slitted_list
if __name__ == '__main__':
try:
print(sort_comma_seperated_words("hello should throw error"))
except Exception as e:
print(e)
|
asmitbhantana/Insight-Workshop
|
PythonProgrammingAssignmentsI/Data Types/q13.py
|
q13.py
|
py
| 553 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8625677438
|
import numpy as np
import copy
import random
import math
value_points = {
'J' : 11,
'Q' : 12,
'K' : 13,
'A' : 14}
hh_dict = {
'straight_flush' : 9,
'four_of_a_kind' : 8,
'full_house' : 7,
'flush' : 6,
'straight' : 5,
'three_of_a_kind' : 4,
'two_pair' : 3,
'pair' : 2,
'high_card' : 1}
#Takes list of 5 Card objects and returns
#a 6 element list, where the first element is the hand hierarchy
#(straight_flush = 9, high_card=1) and the subsequent five elements
#are the numerical card ranks (A=14, K=13, etc.), "double-sorted"
#by kind (set, pair, etc.) then by rank. For example, the hand
#[(2, C), (K, C), (3, S), (2, D), (K, H)] gets translated to:
#[3, 13, 13, 2, 2, 3].
def hand_strength(hand):
#Break down hand into sub-lists of values, suits, and frequencies
#Create list of ranks
values_list = [x.rank for x in hand]
#Create sorted values list
sl = list(sorted(values_list))
#Create list of suits
suits_list = [x.suit for x in hand]
#Create list of unique values
uvalues_list = list(set(values_list))
#Create dict of {value : frequency}
val_freq_dict = {}
for x in uvalues_list:
val_freq_dict[x] = values_list.count(x)
#Create list of (frequency, value) tuples.
freq_list = []
for x in values_list:
freq_list.append(val_freq_dict.get(x))
freq_val_list = zip(freq_list, values_list)
freq_val_sorted = sorted(freq_val_list, reverse=True)
val_sorted = [x[1] for x in freq_val_sorted]
#Determine hand hierarchy
if 2 in freq_list:
if len(uvalues_list) == 4:
hh = 'pair'
elif len(uvalues_list) == 3:
hh = 'two_pair'
else:
hh = 'full_house'
elif 3 in freq_list:
hh = 'three_of_a_kind'
elif len(set(suits_list)) == 1:
hh = 'flush'
elif sl[0] == sl[1] - 1 == sl[2] - 2 == sl[3] - 3 == sl[4] - 4 or sl == [2, 3, 4, 5, 14]:
hh = 'straight'
elif len(uvalues_list) == 5:
hh = 'high_card'
elif 4 in freq_list:
hh = 'four_of_a_kind'
else:
hh = 'straight_flush'
hh = hh_dict.get(hh)
#Generate list representing full strength of hand.
#hh will be at index 0, followed by the ordered values.
hand_strength = val_sorted[:]
hand_strength.insert(0, hh)
return hand_strength
#Takes list of 7 cards (hole cards + community cards)
#and returns the list of 21 unique 5-card permutations.
def permuts(hand):
permutsl = []
#I know this is not the Pythonic way... (though it should be fast)
permutsl.append([hand[0]] + [hand[1]] + [hand[2]] + [hand[3]] + [hand[4]]) #1
permutsl.append([hand[0]] + [hand[1]] + [hand[2]] + [hand[3]] + [hand[5]]) #2
permutsl.append([hand[0]] + [hand[1]] + [hand[2]] + [hand[3]] + [hand[6]]) #3
permutsl.append([hand[0]] + [hand[1]] + [hand[2]] + [hand[4]] + [hand[5]]) #4
permutsl.append([hand[0]] + [hand[1]] + [hand[2]] + [hand[4]] + [hand[6]]) #5
permutsl.append([hand[0]] + [hand[1]] + [hand[2]] + [hand[5]] + [hand[6]]) #6
permutsl.append([hand[0]] + [hand[1]] + [hand[3]] + [hand[4]] + [hand[5]]) #7
permutsl.append([hand[0]] + [hand[1]] + [hand[3]] + [hand[4]] + [hand[6]]) #8
permutsl.append([hand[0]] + [hand[1]] + [hand[3]] + [hand[5]] + [hand[6]]) #9
permutsl.append([hand[0]] + [hand[1]] + [hand[4]] + [hand[5]] + [hand[6]]) #10
permutsl.append([hand[0]] + [hand[2]] + [hand[3]] + [hand[4]] + [hand[5]]) #11
permutsl.append([hand[0]] + [hand[2]] + [hand[3]] + [hand[4]] + [hand[6]]) #12
permutsl.append([hand[0]] + [hand[2]] + [hand[3]] + [hand[5]] + [hand[6]]) #13
permutsl.append([hand[0]] + [hand[2]] + [hand[4]] + [hand[5]] + [hand[6]]) #14
permutsl.append([hand[0]] + [hand[3]] + [hand[4]] + [hand[5]] + [hand[6]]) #15
permutsl.append([hand[1]] + [hand[2]] + [hand[3]] + [hand[4]] + [hand[5]]) #16
permutsl.append([hand[1]] + [hand[2]] + [hand[3]] + [hand[4]] + [hand[6]]) #17
permutsl.append([hand[1]] + [hand[2]] + [hand[3]] + [hand[5]] + [hand[6]]) #18
permutsl.append([hand[1]] + [hand[2]] + [hand[4]] + [hand[5]] + [hand[6]]) #19
permutsl.append([hand[1]] + [hand[3]] + [hand[4]] + [hand[5]] + [hand[6]]) #20
permutsl.append([hand[2]] + [hand[3]] + [hand[4]] + [hand[5]] + [hand[6]]) #21
return permutsl
#Takes list of 6-element 'hand-strength' lists
#and returns the index of the strongest hand.
#Should only be used in combination with other functions
def adjudicate(hands):
index = 0
multiplier = 100
search = True
while search:
#Start with empty array; clear the array for subsequent loops
array = []
#Populate array with the 1st value of each 'hand-strength' list.
#The first value will be the int designating the hand hierarchy
#The multiplier is used to magnify the hierarchy code so that
#it outweighs any subsequent values when we take running sum.
for i in hands:
array.append(i[index])
i[index] = i[index]*multiplier
#Set the multiplier to 1 so that the subsequent elements in the list
#are not magnified. (Only want to magnify the hierarchy code.)
multiplier = 1
#Test whether there is a unique max. If so, stop and declare index
#of strongest hand
array_max = max(array)
if array.count(array_max) == 1:
position = array.index(array_max)
search = False
#Test whether we have exhausted the list, in which case multiple hands
#are tied. If so, stop and declare the index of the 1st tied best hand
elif index + 1 == len(hands[0]):
position = array.index(array_max)
search = False
#Note: This is valid for adjudicating among permutations for a given player,
#but NOT for determining a winning hand across players (in that case, should be a tie)
#Need to build in this functionality!
#If both tests fail, increment the index to compare the next element
#(card). Instead of comparing the next element per se, we will compare
#the cumulative running sum of elements evaluated so far. The purpose
#of this is to take into account the hierarchy & preceding cards,
#rather than comparing each card on its own
else:
index += 1
for i in hands:
i[index] += i[index-1]
return position
#Takes list of 7 cards (hole cards + community cards)
#and returns the hand that should be played (best hand)
def hand_to_play(seven_cards):
#Generate list of all 21 possible 5-card hands
permuts_list = permuts(seven_cards)
#Convert each 5-card hand into 'hand-strength' code
hand_strength_list = []
for hand in permuts_list:
hand_strength_list.append(hand_strength(hand))
#Generate index of the hand to play
index = adjudicate(hand_strength_list)
#Return best hand
return permuts_list[index]
#Takes multiple 5-card hands and returns the best hand.
#Note that the argument is a list of lists of 2-tuples
def declare_winner(all_hands):
#Convert each hand to 'hand-strength' code
hand_strength_list = []
for hand in all_hands:
hand_strength_list.append(hand_strength(hand))
#Generate index of the hand to play
index = adjudicate(hand_strength_list)
#Return best hand
return all_hands[index]
#Need to write this so that it permits ties...
def declare_winner_dict(all_hands):
#Convert each hand to 'hand-strength' code and pull out players into list
hand_strength_list = []
players_list = []
for hand in all_hands:
players_list.append(hand)
hand_strength_list.append(hand_strength(all_hands[hand]))
#Generate index of the best hand
index = adjudicate(hand_strength_list)
#Return winning player
return players_list[index]
|
bspringw/poker
|
poker_functions.py
|
poker_functions.py
|
py
| 7,314 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24506033331
|
from nose.tools import eq_
from mock import patch, Mock, sentinel
from noderunner.process import open_process
@patch("subprocess.Popen", return_value=sentinel.proc)
def test_open_process(p):
ret = open_process(sentinel.fd,
sentinel.secret,
nodepath=sentinel.node_path)
eq_(ret, sentinel.proc)
p.assert_called_once()
|
williamhogman/noderunner
|
tests/test_process.py
|
test_process.py
|
py
| 379 |
python
|
en
|
code
| 6 |
github-code
|
6
|
6827552389
|
""" https://adventofcode.com/2020/day/10 """
from typing import Dict, List
from collections import defaultdict
Adapters = List[int]
def part1(adapters: Adapters) -> int:
""" O(nLogn) solution """
jolts = 0
diffs: Dict[int, int] = defaultdict(int)
for adapter in sorted(adapters):
diffs[adapter - jolts] += 1
jolts = adapter
return diffs[1] * (diffs[3] + 1)
def part2(adapters: Adapters) -> int:
""" O(nLogn) solution """
adapters = sorted(adapters)
adapters = [0] + adapters + [max(adapters)+3]
paths = {adapters[0]: 1}
for x in adapters[1:]:
paths[x] = sum(paths[x - y] for y in range(1, 4) if x - y in paths)
return paths[adapters[-1]]
if __name__ == "__main__":
TEST1 = [int(line.strip()) for line in open("tests/d10.txt", "r")]
TEST2 = [int(line.strip()) for line in open("tests/d10_2.txt", "r")]
PUZZLE = [int(line.strip()) for line in open("puzzles/d10.txt", "r")]
assert part1(TEST1) == 35
assert part1(TEST2) == 220
assert part2(TEST1) == 8
assert part2(TEST2) == 19208
print(f"Part 1: {part1(PUZZLE)}")
print(f"Part 2: {part2(PUZZLE)}")
|
pozhega/AoC
|
2020/d10.py
|
d10.py
|
py
| 1,164 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6854075721
|
from flask import Flask,request,render_template,redirect, url_for
from flask import jsonify
import requests
from cassandra.cluster import Cluster
from collections import OrderedDict
app = Flask(__name__)
KEYSPACE = "twitterkeyspace"
@app.route('/', methods=['GET'])
def home():
if request.method == 'GET':
return render_template('lab.html',flag=0)
# Lab Query 1
@app.route('/labquery1', methods=['GET','POST'])
def labquery1():
if request.method == 'GET':
return render_template('lab.html',flag=0)
elif request.method == 'POST':
date = request.form['date1']
# print (date)
# Connecting cassandra session
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
session.default_timeout = 60
session.set_keyspace(KEYSPACE)
query = "SELECT author_id FROM tweet_table1 WHERE date = '{}'".format(date)
rows = session.execute(query)
session.execute("DROP TABLE IF EXISTS tweet_table_author_frequency")
session.execute("""
CREATE TABLE tweet_table_author_frequency (
author_id text,
frequency counter,
PRIMARY KEY (author_id)
)
""")
for row in rows:
if row.author_id:
query = "UPDATE tweet_table_author_frequency SET frequency = frequency + 1 WHERE author_id = '{}'".format(row.author_id)
session.execute(query)
rows = session.execute("SELECT * FROM tweet_table_author_frequency")
pop = {}
for row in rows:
pop.update({row.author_id : row.frequency})
res = sorted(pop,key=pop.get,reverse=True)
result = []
k = 0
for r in res:
temp = []
temp.append(date)
temp.append(r)
temp.append(pop[r])
result.append(temp)
k += 1
print (temp)
return render_template('lab.html',result=result,count=k,flag=1)
# Lab Query 2
@app.route('/labquery2', methods=['GET','POST'])
def labquery2():
if request.method == 'GET':
return render_template('lab.html',flag=0)
elif request.method == 'POST':
date = request.form['date2']
# print (date)
# Connecting cassandra session
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
session.default_timeout = 60
session.set_keyspace(KEYSPACE)
query = "SELECT hashtag, location FROM tweet_table2 WHERE date = '{}'".format(date)
rows = session.execute(query)
session.execute("DROP TABLE IF EXISTS tweet_table_hashtag_location")
session.execute("""
CREATE TABLE tweet_table_hashtag_location (
hashtag text,
location text,
frequency counter,
PRIMARY KEY ((hashtag,location))
)
""")
for row in rows:
if row.hashtag and row.location:
print (row.hashtag,row.location)
query = "UPDATE tweet_table_hashtag_location SET frequency = frequency + 1 WHERE hashtag = '{}'".format(row.hashtag) + "AND location = '{}'".format(row.location)
session.execute(query)
rows = session.execute("SELECT * FROM tweet_table_hashtag_location")
pop = {}
for row in rows:
pop.update({(row.hashtag,row.location) : row.frequency})
res = sorted(pop,key=pop.get,reverse=True)
result = []
k = 0
for r in res:
temp = []
temp.append(date)
temp.append(r[0])
temp.append(r[1])
temp.append(pop[r])
result.append(temp)
print (temp)
k += 1
return render_template('lab.html',result=result,count=k,flag=2)
else:
return render_template('lab.html',flag=0)
if __name__ == '__main__':
app.run(host='127.0.0.1',port=5000,debug=True)
|
piyush-jain1/Databases
|
Cassandra/Assignment2/app.py
|
app.py
|
py
| 3,427 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22049716249
|
from os import name
import sys
import requests
import time
import threading
sys.path.append('../')
from DeskFoodModels.DeskFoodLib import Item, OrderStatus, Order
from PyQt5.uic import loadUi
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QCheckBox, QComboBox, QDialog, QApplication, QListWidget, QMenu, QPushButton, QStackedWidget, QTextBrowser, QWidget
from urllib.request import urlopen
import json
from DeskFoodModels import firebaseAuth
#---Global Variables---#
#orderID = ""
#order = Order()
userID = ""
#--------------------Login Window--------------------
class loginScreen(QDialog):
def __init__(self):
#TODO if login info is wrong, maybe raise an error message
super(loginScreen, self).__init__()
loadUi("Login.ui", self)
self.loginButton.clicked.connect(self.login)
self.registerButton.clicked.connect(self.register)
self.passwordEdit.setEchoMode(QtWidgets.QLineEdit.Password)
def login(self):
self.username = self.emailEdit.text()
self.password = self.passwordEdit.text()
self.user = firebaseAuth.login(self.username, self.password)
global userID
userID =self.user["localId"]
if self.user:
#BUG: If a user types admin with a capital letter, it will take them to the customer or runner screen instead of the kitchen menu
if(self.username == "[email protected]"):
self.acceptadmin()
else:
self.accept()
else:
self.emailEdit.setText("")
self.passwordEdit.setText("")
self.emailEdit.setFocus()
#self.errorLabel.setText("Invalid username or password")
def register(self):
kScreen = registerScreen()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def accept(self):
kScreen = customerORRunner()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def acceptadmin(self):
kScreen = kitchenMenu()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Register Window--------------------
class registerScreen(QDialog):
def __init__(self):
super(registerScreen, self).__init__()
loadUi("SignUp.ui", self)
self.registerButton.clicked.connect(self.register)
self.registerButton.setEnabled(False)
self.passwordEdit.setEchoMode(QtWidgets.QLineEdit.Password)
self.passwordConfirmEdit.setEchoMode(QtWidgets.QLineEdit.Password)
self.termsAndConditionsRadioButton.toggled.connect(self.enableRegisterButton)
def register(self):
self.username = self.userNameEdit.text()
self.password = self.passwordEdit.text()
self.passwordConfirm = self.passwordConfirmEdit.text()
self.email = self.emailEdit.text()
if self.username != "" and self.password != "" and self.passwordConfirm != "":
if self.password == self.passwordConfirm:
self.user = firebaseAuth.register(self.email, self.password, self.username)
if self.user:
global userID
userID = self.user["localId"]
self.accept()
self.passwordEdit.setText("")
self.passwordConfirmEdit.setText("")
self.userNameEdit.setFocus()
#self.errorLabel.setText("Invalid username or password")
def enableRegisterButton(self):
if self.termsAndConditionsRadioButton.isChecked():
self.registerButton.setEnabled(True)
else:
self.registerButton.setEnabled(False)
def accept(self):
kScreen = customerORRunner()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Customer or Runner Window---------------
class customerORRunner(QDialog):
def __init__(self):
super(customerORRunner, self).__init__()
loadUi("customerORrunner.ui", self)
self.customerBTN.clicked.connect(self.customer)
self.runnerBTN.clicked.connect(self.runner)
def customer(self):
kScreen = orderWindow()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def runner(self):
kScreen = RunnerPickOrder()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Runner Pick Orders Window--------------------
class RunnerPickOrder(QDialog):
def __init__(self):
super(RunnerPickOrder, self).__init__()
loadUi("RunnerPickOrder.ui", self)
self.loadOrders()
self.returnBTN.clicked.connect(self.goBack)
self.orderList.itemDoubleClicked.connect(self.orderDetails)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def loadOrders(self):
# parameter for urlopen
url = "http://127.0.0.1:8000/Orders/Status/Ready"
# store the response of URL
response = urlopen(url)
# storing the JSON response
# # from url in data
data_json = json.loads(response.read())
# Clear orderList
self.orderList.clear()
# iterate over the data and append the id of the orders to a list
for i in range(len(data_json)):
self.orderList.addItem(data_json[i]['order_id'])
def orderDetails(self):
# Switch to the order details window
kScreen = RunnerOrderDetails(orderID=self.orderList.currentItem().text())
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Runner Order Details Window--------------------
class RunnerOrderDetails(QDialog):
def __init__(self, orderID):
super(RunnerOrderDetails, self).__init__()
loadUi("RunnerOrderDetails.ui", self)
self.returnBTN.clicked.connect(self.goBack)
self.setCustomer(orderID)
self.setOrder(orderID)
self.setOrderItems(orderID)
self.setDeliveryLocation(orderID)
self.setOrderStatus(orderID)
self.setOrderTotal(orderID)
self.setOrderInstructions(orderID)
self.statusButton.clicked.connect(self.changeStatusToEnRoute)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
# Set the customer label to the userID of the order
def setCustomer(self, orderID):
# parameter for urlopen
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/UserID"
response = urlopen(url)
userID = json.loads(response.read())
self.customerIDLabel.setText(userID)
# Set the order label to the orderID of the order
def setOrder(self, orderID):
self.orderIDLabel.setText(orderID)
# Populate the items list with the items in the order
def setOrderItems(self, orderID):
# parameter for urlopen
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Items"
response = urlopen(url)
data_json = json.loads(response.read())
self.itemsList.addItems(data_json)
# Set the delivery location label to the delivery location of the order
def setDeliveryLocation(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/DeliveryLocation"
response = urlopen(url)
data_json = json.loads(response.read())
self.deliveryLocationLabel.setText(data_json)
# Set the order status label to the order status of the order
def setOrderStatus(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Status"
response = urlopen(url)
data_json = json.loads(response.read())
self.orderStatusLabel.setText(data_json)
# Set the order total label to the order total of the order
def setOrderTotal(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Total"
response = urlopen(url)
data_json = json.loads(response.read())
self.orderTotalLabel.setText("$" + str(data_json))
# Set the order instructions label to the order instructions of the order
def setOrderInstructions(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Instructions"
response = urlopen(url)
data_json = json.loads(response.read())
self.orderInstructionsLabel.setText(data_json)
def changeStatusToEnRoute(self):
orderID = self.orderIDLabel.text()
#Update the order status to en route
r = requests.put("http://localhost:8000/Orders/" + orderID + "/Status" + "?status=" + OrderStatus.ON_THE_WAY.value)
#Update the order RunnerID to the current runner
r = requests.put("http://localhost:8000/Orders/" + orderID + "/RunnerID" + "?runnerId=" + userID)
self.setOrderStatus(orderID)
self.statusButton.setText("Confirm Delivery")
self.statusButton.clicked.connect(self.changeStatusToDelivered)
def changeStatusToDelivered(self):
orderID = self.orderIDLabel.text()
#Update the order status to delivered
r = requests.put("http://localhost:8000/Orders/" + orderID + "/Status" + "?status=" + OrderStatus.DELIVERED.value)
self.setOrderStatus(orderID)
#Switch back to the RunnerPickOrder window
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.currentWidget().loadOrders()
widget.removeWidget(self)
#--------------------Order Window----------------------------
class orderWindow(QDialog):
def __init__(self):
super(orderWindow, self).__init__()
loadUi("Order.ui", self)
self.subtotalText.setText("0")
self.loadKitchens()
self.kitchensList.itemDoubleClicked.connect(self.loadMenu)
self.returnBTN.clicked.connect(self.goBack)
self.menuList.itemDoubleClicked.connect(self.addToOrder)
self.finishBTN.clicked.connect(self.finish)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def loadKitchens(self):
# parameter for urlopen
url = "http://127.0.0.1:8000/Kitchens"
# store the response of URL
response = urlopen(url)
# storing the JSON response
# # from url in data
data_json = json.loads(response.read())
self.kitchensList.addItems(data_json)
def loadMenu(self):
nameOfKitchen = self.kitchensList.currentItem().text()
# parameter for urlopen
url = "http://127.0.0.1:8000/Kitchens/" + nameOfKitchen
# store the response of URL
response = urlopen(url)
# storing the JSON response
# # from url in data
data_json = json.loads(response.read())
self.menuList.clear()
myArray = data_json.keys()
for val in myArray:
if (data_json[val]["Available"]):
self.menuList.addItem(str(val) + ": $" + "%0.2f" % float(data_json[val]["Price"]))
def addToOrder(self):
itemToAdd = self.menuList.currentItem().text()
temp = itemToAdd.split(':')
itemToAdd2 = temp[0]
self.orderList.addItem(itemToAdd2)
subtotal = float(self.subtotalText.toPlainText())
temp2 = itemToAdd.split('$')
subtotal2 = float(temp2[1])
subtotal = round(subtotal + subtotal2, 2)
self.subtotalText.setText( "%0.2f" % subtotal )
tax = round(subtotal * .08, 2)
self.taxText.setText( "%0.2f" % tax)
subtotal = float(self.subtotalText.toPlainText())
self.totalText.setText( "%0.2f" % round(tax + subtotal, 2) )
def finish(self):
kScreen = OrderConfirmaiton(self.orderList, self.totalText.toPlainText())
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Order Confirmation Window----------------------------
class OrderConfirmaiton(QDialog):
def __init__(self, orderList, total):
super(OrderConfirmaiton, self).__init__()
loadUi("OrderConfirmation.ui", self)
self.returnBTN.clicked.connect(self.goBack)
self.ConfirmBTN.clicked.connect(self.finish)
for i in range(orderList.count()):
self.orderItemList.addItem(orderList.item(i).text())
self.TotalField.setText(total)
self.DeliveryLocation.returnPressed.connect(self.enableConfirmButton)
# The Button should not be enabled until the user has entered their location
self.ConfirmBTN.setEnabled(False)
#Method to enable the confirm button
def enableConfirmButton(self):
# Check if the location is empty
if self.DeliveryLocation.text() != "":
self.ConfirmBTN.setEnabled(True)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def finish(self):
orderItems = []
for i in range(self.orderItemList.count()):
orderItems.append(self.orderItemList.item(i).text())
order = Order(
user_id = userID,
delivery_location = self.DeliveryLocation.text(),
items = orderItems,
total = self.TotalField.text(),
instructions = self.Instructions.text()
)
kScreen = paymentWindow(order)
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Payment Window--------------------
class paymentWindow(QDialog):
def __init__(self, order):
super(paymentWindow, self).__init__()
loadUi("Payment.ui", self)
self.setWindowTitle("Payment")
self.studentID.setHidden(True)
self.returnBTN.clicked.connect(self.goBack)
self.studentIDCheck.clicked.connect(self.clickSID)
self.debitcreditCheck.clicked.connect(self.clickDCC)
# Don't know why this works but stack overflow says it does
self.finishBTN.clicked.connect(lambda: self.finish(order))
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def clickSID(self):
self.studentIDCheck.setChecked(1)
self.debitcreditCheck.setChecked(0)
self.fullName.setHidden(True)
self.ccNumber.setHidden(True)
self.expDate.setHidden(True)
self.CVV.setHidden(True)
self.nameInput.setHidden(True)
self.dccInput.setHidden(True)
self.expInput.setHidden(True)
self.cvvInput.setHidden(True)
self.studentID.setHidden(False)
self.idInput.setHidden(False)
def clickDCC(self):
self.studentIDCheck.setChecked(0)
self.debitcreditCheck.setChecked(1)
self.studentID.setHidden(True)
self.idInput.setHidden(True)
self.fullName.setHidden(False)
self.ccNumber.setHidden(False)
self.expDate.setHidden(False)
self.CVV.setHidden(False)
self.nameInput.setHidden(False)
self.dccInput.setHidden(False)
self.expInput.setHidden(False)
self.cvvInput.setHidden(False)
def finish(self, order):
#Stores the orderID that's created in the database
r = requests.post("http://127.0.0.1:8000/CreateNewOrder", order.json())
print(r.text)
kScreen = statusWindow()
widget.addWidget(kScreen)
kScreen.textOrderID.setText(r.text)
kScreen.textOrderID.hide()
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Order Status Window--------------------
class statusWindow(QDialog):
def __init__(self):
self.x = 1
super(statusWindow, self).__init__()
loadUi("OrderStatus.ui", self)
self.setWindowTitle("Order Status")
threading.Thread(target=self.update, daemon=True).start()
#self.homeBTN.clicked.connect(self.home)
def update(self):
print("Updating Order Status")
self.orderStatus()
time.sleep(1)
if(self.x == 1):
self.update()
def orderStatus(self):
# print("This is what we're getting" + orderID)
#NOTE: This is a bit of a hack, idk why the orderID keeps the " " around it
url = "http://127.0.0.1:8000/Orders/" + self.textOrderID.toPlainText().replace('"', "") + "/Status"
#url = "http://127.0.0.1:8000/Orders/" + "-Mpr0leituNsBbqY2CDq" + "/Status"
response = urlopen(url)
data_json = json.loads(response.read())
if (data_json == OrderStatus.PENDING.value):
self.statusLBL.setText("Order is pending!")
elif (data_json == OrderStatus.PREPARING.value):
self.statusLBL.setText("Preparing the order!")
elif (data_json == OrderStatus.COOKING.value):
self.statusLBL.setText("Cooking Order!")
elif (data_json == OrderStatus.READY.value):
self.statusLBL.setText("Order is ready!")
elif (data_json == OrderStatus.ON_THE_WAY.value):
self.statusLBL.setText("Order is on the way!")
elif (data_json == OrderStatus.DELIVERED.value):
self.statusLBL.setText("Order is delivered!")
else:
self.statusLBL.setText("Something went wrong!")
"""def home(self):
kScreen = orderWindow()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)"""
#--------------------Select Option Window--------------------
class kitchenMenu(QDialog):
def __init__(self):
super(kitchenMenu, self).__init__()
loadUi("KitchenMenu.ui", self)
self.AddItemBTN.clicked.connect(self.addItem)
self.updatePriceBTN.clicked.connect(self.updatePrice)
self.updateAvailabilityBTN.clicked.connect(self.updateAvailability)
self.viewKitchensBTN.clicked.connect(self.viewKitchens)
self.RemoveItemBTN.clicked.connect(self.removeItem)
self.orderDetailsBTN.clicked.connect(self.viewOrders)
def addItem(self):
kScreen = kitchenAddItem()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def updatePrice(self):
kScreen = kitchenUpdatePrice()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def updateAvailability(self):
kScreen = kitchenUpdateAvailability()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def viewKitchens(self):
kScreen = KitchensScreen()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def removeItem(self):
kScreen = KitchenRemoveItem()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def viewOrders(self):
kScreen = KitchenSeeOrders()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Kitchens See orders window--------------------
class KitchenSeeOrders(QDialog):
def __init__(self):
super(KitchenSeeOrders, self).__init__()
loadUi("KitchenOrderDetails.ui", self)
self.loadOrders()
self.returnBTN.clicked.connect(self.goBack)
self.orderList.itemDoubleClicked.connect(self.orderDetails)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def loadOrders(self):
# parameter for urlopen
url = "http://127.0.0.1:8000/Orders/Status/Pending"
# store the response of URL
response = urlopen(url)
# storing the JSON response
# # from url in data
data_json = json.loads(response.read())
# Clear orderList
self.orderList.clear()
# iterate over the data and append the id of the orders to a list
for i in range(len(data_json)):
self.orderList.addItem(data_json[i]['order_id'])
def orderDetails(self):
# Switch to the order details window
kScreen = KitchenSeeOrdersDetails(orderID=self.orderList.currentItem().text())
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Expanded Kitchens order details window--------------------
class KitchenSeeOrdersDetails(QDialog):
def __init__(self, orderID):
super(KitchenSeeOrdersDetails, self).__init__()
loadUi("OrderDetail.ui", self)
self.setCustomer(orderID)
self.setOrder(orderID)
self.setOrderItems(orderID)
self.setDeliveryLocation(orderID)
self.setOrderStatus(orderID)
self.setOrderTotal(orderID)
self.setOrderInstructions(orderID)
self.statusButton.clicked.connect(self.changeStatusToCooking)
# Set the customer label to the userID of the order
def setCustomer(self, orderID):
# parameter for urlopen
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/UserID"
response = urlopen(url)
userID = json.loads(response.read())
self.customerIDLabel.setText(userID)
# Set the order label to the orderID of the order
def setOrder(self, orderID):
self.orderIDLabel.setText(orderID)
# Populate the items list with the items in the order
def setOrderItems(self, orderID):
# parameter for urlopen
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Items"
response = urlopen(url)
data_json = json.loads(response.read())
self.itemsList.addItems(data_json)
# Set the delivery location label to the delivery location of the order
def setDeliveryLocation(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/DeliveryLocation"
response = urlopen(url)
data_json = json.loads(response.read())
self.deliveryLocationLabel.setText(data_json)
# Set the order status label to the order status of the order
def setOrderStatus(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Status"
response = urlopen(url)
data_json = json.loads(response.read())
self.orderStatusLabel.setText(data_json)
# Set the order total label to the order total of the order
def setOrderTotal(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Total"
response = urlopen(url)
data_json = json.loads(response.read())
self.orderTotalLabel.setText(str(data_json))
# Set the order instructions label to the order instructions of the order
def setOrderInstructions(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Instructions"
response = urlopen(url)
data_json = json.loads(response.read())
self.orderInstructionsLabel.setText(data_json)
def changeStatusToCooking(self):
orderID = self.orderIDLabel.text()
#Update the order status to cooking
r = requests.put("http://localhost:8000/Orders/" + orderID + "/Status" + "?status=" + OrderStatus.COOKING.value)
self.setOrderStatus(orderID)
self.statusButton.setText("Complete Order")
self.statusButton.clicked.connect(self.completeOrder)
def completeOrder(self):
orderID = self.orderIDLabel.text()
#Update the order status to complete
r = requests.put("http://localhost:8000/Orders/" + orderID + "/Status" + "?status=" + OrderStatus.READY.value)
self.setOrderStatus(orderID)
#Switch back to the kitchenorders window
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.currentWidget().loadOrders()
widget.removeWidget(self)
#--------------------Kitchens Add Item Window--------------------
class kitchenAddItem(QDialog):
def __init__(self):
super(kitchenAddItem, self).__init__()
loadUi("KitchenAddItem.ui", self)
self.returnBTN.clicked.connect(self.goBack)
self.freshensCheck.clicked.connect(self.unclickF)
self.deliCheck.clicked.connect(self.unclickD)
self.pizzaCheck.clicked.connect(self.unclickP)
self.burgerCheck.clicked.connect(self.unclickB)
self.marketCheck.clicked.connect(self.unclickM)
self.finishBTN.clicked.connect(self.finish)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def unclickF(self):
self.freshensCheck.setChecked(1)
self.deliCheck.setChecked(0)
self.pizzaCheck.setChecked(0)
self.burgerCheck.setChecked(0)
self.marketCheck.setChecked(0)
def unclickD(self):
self.freshensCheck.setChecked(0)
self.deliCheck.setChecked(1)
self.pizzaCheck.setChecked(0)
self.burgerCheck.setChecked(0)
self.marketCheck.setChecked(0)
def unclickP(self):
self.freshensCheck.setChecked(0)
self.deliCheck.setChecked(0)
self.pizzaCheck.setChecked(1)
self.burgerCheck.setChecked(0)
self.marketCheck.setChecked(0)
def unclickB(self):
self.freshensCheck.setChecked(0)
self.deliCheck.setChecked(0)
self.pizzaCheck.setChecked(0)
self.burgerCheck.setChecked(1)
self.marketCheck.setChecked(0)
def unclickM(self):
self.freshensCheck.setChecked(0)
self.deliCheck.setChecked(0)
self.pizzaCheck.setChecked(0)
self.burgerCheck.setChecked(0)
self.marketCheck.setChecked(1)
def finish(self):
if(len(self.textName.toPlainText()) > 0):
if(len(self.textCost.toPlainText()) > 0):
if((self.freshensCheck.checkState()) or (self.deliCheck.checkState()) or (self.pizzaCheck.checkState()) or (self.burgerCheck.checkState()) or self.marketCheck.checkState()):
available = False
if(self.checkBox.checkState()): available = True
if(self.freshensCheck.checkState()): mykitchen = "Freshens"
if(self.deliCheck.checkState()): mykitchen = "Deli"
if(self.pizzaCheck.checkState()): mykitchen = "Pizza"
if(self.burgerCheck.checkState()): mykitchen = "Burgers"
if(self.marketCheck.checkState()): mykitchen = "Market"
item = Item(name = self.textName.toPlainText(), price = self.textCost.toPlainText(), available = available)
r = requests.put("http://localhost:8000/AddToMenu/" + mykitchen, item.json())
self.textName.setText("")
self.textCost.setText("")
self.textDecription.setText("")
self.checkBox.setChecked(0)
self.freshensCheck.setChecked(0)
self.deliCheck.setChecked(0)
self.pizzaCheck.setChecked(0)
self.burgerCheck.setChecked(0)
self.marketCheck.setChecked(0)
#--------------------Kitchens Remove Item Window--------------------
class KitchenRemoveItem(QDialog):
def __init__(self):
super(KitchenRemoveItem, self).__init__()
loadUi("KitchenRemoveItem.ui", self)
self.ReturnButton.clicked.connect(self.goBack)
self.ConfirmButton.clicked.connect(self.RemoveItem)
self.fillBTN.clicked.connect(self.fillItems)
url = "http://127.0.0.1:8000/Kitchens"
response = urlopen(url)
data_json = json.loads(response.read())
self.kitchenBox.addItems(data_json)
def fillItems(self):
nameOfKitchen = self.kitchenBox.currentText()
url = "http://127.0.0.1:8000/Kitchens/" + nameOfKitchen
response = urlopen(url)
data_json = json.loads(response.read())
self.itemBox.clear()
self.itemBox.addItems(list(data_json.keys()))
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def RemoveItem(self):
if(len(self.itemBox.currentText()) > 0):
r = requests.delete("http://localhost:8000/RemoveItemFromMenu/" + self.kitchenBox.currentText() + "/" +self.itemBox.currentText())
#--------------------Kitchens Update Price Window--------------------
class kitchenUpdatePrice(QDialog):
def __init__(self):
super(kitchenUpdatePrice, self).__init__()
loadUi("KitchenUpdatePrice.ui", self)
self.returnBTN.clicked.connect(self.goBack)
self.finishBTN.clicked.connect(self.finish)
self.fillBTN.clicked.connect(self.fillItems)
self.fillPriceBTN.clicked.connect(self.fillPrice)
#fill kitchen combo box
url = "http://127.0.0.1:8000/Kitchens"
response = urlopen(url)
data_json = json.loads(response.read())
self.kitchenBox.addItems(data_json)
def fillItems(self):
nameOfKitchen = self.kitchenBox.currentText()
url = "http://127.0.0.1:8000/Kitchens/" + nameOfKitchen
response = urlopen(url)
data_json = json.loads(response.read())
self.itemBox.clear()
self.itemBox.addItems(list(data_json.keys()))
def fillPrice(self):
nameOfKitchen = self.kitchenBox.currentText()
nameOfItem = self.itemBox.currentText()
#NOTE: this is a bit of a hack, but it works. Essentially the URL does not like spaces in the item name, so I had to replace them with '%20'.
url = "http://127.0.0.1:8000/Kitchens/" + nameOfKitchen + "/" + nameOfItem.replace(' ', '%20') + "/Price"
response = urlopen(url)
data_json = json.loads(response.read())
self.textCost.setText(str(data_json))
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def finish(self):
if(len(self.itemBox.currentText()) > 0):
r = requests.put("http://localhost:8000/UpdateItemPrice/" + self.kitchenBox.currentText() + "/" + self.itemBox.currentText() + "?price=" + self.textCost.toPlainText())
self.textCost.setText("")
#--------------------Kitchens Update Availability Window--------------------
class kitchenUpdateAvailability(QDialog):
def __init__(self):
super(kitchenUpdateAvailability, self).__init__()
loadUi("KitchenUpdateAvailability.ui", self)
self.returnBTN.clicked.connect(self.goBack)
self.finishBTN.clicked.connect(self.finish)
self.fillBTN.clicked.connect(self.fillItems)
url = "http://127.0.0.1:8000/Kitchens"
response = urlopen(url)
data_json = json.loads(response.read())
self.kitchenBox.addItems(data_json)
def fillItems(self):
nameOfKitchen = self.kitchenBox.currentText()
url = "http://127.0.0.1:8000/Kitchens/" + nameOfKitchen
response = urlopen(url)
data_json = json.loads(response.read())
self.itemBox.clear()
self.itemBox.addItems(list(data_json.keys()))
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def finish(self):
if(len(self.itemBox.currentText()) > 0):
if(self.checkBox.checkState()):
available = True
else: available = False
r = requests.put("http://localhost:8000/UpdateItemAvailability/" + self.kitchenBox.currentText() + "/" + self.itemBox.currentText() + "?availability=" + str(available))
#--------------------Kitchens and Menu Window--------------------
class KitchensScreen(QDialog):
def __init__(self):
super(KitchensScreen, self).__init__()
loadUi("ListOfKitchens.ui", self)
self.loadKitchens()
self.kitchensList.itemDoubleClicked.connect(self.loadMenu)
self.locationLabel.setText("Campus Center Market")
self.returnBTN.clicked.connect(self.goBack)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def loadKitchens(self):
# parameter for urlopen
url = "http://127.0.0.1:8000/Kitchens"
# store the response of URL
response = urlopen(url)
# storing the JSON response
# # from url in data
data_json = json.loads(response.read())
self.kitchensList.addItems(data_json)
def loadMenu(self):
nameOfKitchen = self.kitchensList.currentItem().text()
print(nameOfKitchen)
# parameter for urlopen
url = "http://127.0.0.1:8000/Kitchens/" + nameOfKitchen
# store the response of URL
response = urlopen(url)
# storing the JSON response
# # from url in data
data_json = json.loads(response.read())
print(data_json)
self.menuList.clear()
self.menuList.addItems(list(data_json.keys()))
#--------------------MAIN--------------------
#Setting up App
app = QApplication(sys.argv)
loginScreen = loginScreen()
widget = QStackedWidget()
widget.addWidget(loginScreen)
widget.setFixedHeight(800)
widget.setFixedWidth(1200)
widget.show()
try:
sys.exit(app.exec_())
except:
print("Exiting")
|
YY0NII/DeskFood
|
Frontend/Main.py
|
Main.py
|
py
| 33,421 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41236533985
|
import rest_framework.authentication
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework import permissions
from user.auth.auth import JwtQueryParamsAuthentication
schema_view = get_schema_view(
openapi.Info(
title="接口文档",
default_version="1.0",
terms_of_service='',
contact=openapi.Contact(name="Andy Z Wright", email="[email protected]"),
license=openapi.License(name="MIT LICENCE"),
),
public=True,
permission_classes=(permissions.AllowAny,),
# authentication_classes=(JwtQueryParamsAuthentication,)
authentication_classes=(),
)
|
beishangongzi/porcelain-backend
|
swagger_doc/views.py
|
views.py
|
py
| 653 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21998584846
|
from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
n = len(nums)
left = 0
right = n - 1
def get_num(i):
if i == -1 or i == n:
return float('-inf')
return nums[i]
ans = -1
while right >= left:
mid = left + (right - left) // 2
if get_num(mid - 1) < get_num(mid) > get_num(mid + 1):
return mid
elif get_num(mid) < get_num(mid + 1):
left = mid + 1
else:
right = mid - 1
return ans
|
hangwudy/leetcode
|
100-199/162. 寻找峰值.py
|
162. 寻找峰值.py
|
py
| 620 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16105808925
|
#Server
from socket import *
serverPort = 12002
listeningSocket = socket(AF_INET, SOCK_STREAM)
listeningSocket.bind(('', serverPort))
listeningSocket.listen(1)
print('Server ready, socket', listeningSocket.fileno(), 'listening on localhost :', serverPort)
connectionSocket, addr = listeningSocket.accept() #client address & socket
while 1:
msg = bytes.decode(connectionSocket.recv(1024))
if(msg == 'exit'):
connectionSocket.close()
break
print('Client says: ',msg)
connectionSocket.send(str.encode(str(addr[0])+':'+str(addr[1])+':'+ msg))
connectionSocket.close()
|
kelly870114/TCPSocket
|
TCPServer.py
|
TCPServer.py
|
py
| 598 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9007439548
|
"""
Week 2 - Data mining
By Christopher Diaz Montoya
"""
# Problem 1!!
store=[] # Empty array to store values
for a in range (1000, 2000): # Loop to check over all numbers in range
if (a % 11 == 0) and not (a % 3 == 0):
# Above line makes sure if multiple of 11 and not of 3 execute line below
store.append(str(a)) # Stores numbers that met above requirements
print (*store, sep = ", ")
# Learnt the above print line from the website below to print output correctly
# https://www.kite.com/python/answers/how-to-print-a-list-without-brackets-in-python#:~:text=Use%20*%20to%20print%20a%20list,set%20sep%20to%20%22%2C%20%22%20.
# Problem 2!!
print("Please input a sentance: ") # Allows user to input sentance
sentance = input()
# Above line assigns input to the varibale called sentance
# Below 2 lines will be used as counters for upper and lower case letters
UpperCase = 0
LowerCase = 0
# For loop to check each character for the length of the string sentance
for char in range(len(sentance)):
# Below says if char is in the lower letter alphabet add and assigns to lower
# case counter else if in the upper case alphabet add to the upper counter
if(sentance[char]>='A' and sentance[char]<='Z'):
UpperCase += 1
elif(sentance[char]>='a' and sentance[char]<='z'):
# Learnt in my other module how to convert from lower case to upper case
# without libraries so played around with the code as it's like a range
# and that's how I got the above line
LowerCase += 1 # Add 1 to counter
print('Upper case = ', UpperCase)
print('Lower case = ', LowerCase)
# Above prints the count and I used the comma to print the string and counter
# int. As I only mentioned the alpahbets there is no issue with the space and
# is not counted by accident.
# Problem 3!!
# Below made a funtion that turns an int into a string
def NumToWord(a):
b = str(a) # Casts int into a string and stored in b
print(b) # Prints b which is casted into a string
print(type(b)) # Double check what daat tybe b is
# Below int is used to make sure input value is an integer, learnt last
# academic year.
num = int(input("Please enter a number: "))
NumToWord(num) # Calls functions and passes input "num" into the funciton.
# Problem 4!!
import itertools # Import from library to help iterate through all outcomes
# Below stored for easy access
subject = ["I", "You"]
verb = ["Read", "Borrow"]
ob = ["Shakerpeare's plays", "Shakespeare's poems"]
# Below prints and iterates over each possible out come from the lists
# mentioned whille the varibles stay in the same order. List ensures prints
# in the right way
print(list(itertools.product(subject, verb, ob)))
# https://www.codegrepper.com/code-examples/python/how+to+find+all+combinations+of+a+list+python
# Problem 5!! Part 1
import matplotlib.pyplot as plt # imported and given a shorter name
x, y = [1, 2, 3], [2, 4, 1] # Assigning values to varibles x and y
plt.xlabel("X axis", fontsize = 15) # Prints x label and size
plt.ylabel("Y axis", fontsize = 15) # Prints y label and size
plt.title("My first graph", fontsize = 20) # Prints title
# Learnt how to change size and label names from
# https://stackoverflow.com/questions/12444716/how-do-i-set-the-figure-title-and-axes-labels-font-size-in-matplotlib
# Some of above learnt from lectures and exra study help from uni.
# This plots the points on the graph
plt.plot(x, y)
plt.show() # This shows the graph
# Part 2
X = [] # Created empty lists to store values read from document
Y = []
a = open("test.txt", "r") # a is a variable which are the contents
for row in a: # Loops all rows in the txt file
row = row.split(" ") # splits numbers in file when it reads a space
X.append(row[0]) # First nunber is added to X
Y.append(int(row[1])) # Second number is added to Y
plt.xlabel("X axis", fontsize = 15) # Prints x label
plt.ylabel("Y axis", fontsize = 15) # Prints y label
plt.title("My second graph", fontsize = 20) # Prints title
plt.plot(X, Y) # This plots the points on the graph
plt.show() # This shows the graph
#https://www.geeksforgeeks.org/python-create-graph-from-text-file/
# Problem 6!!
# below importing relevant libraries
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv ("train.csv")
# Above imports and reads the data set
df.info() # Did this to see how many columns there are along with what data
# types are in the data set which are 3, along with being able to see which
# columns have missing dat
df["Loan_Status"].value_counts(normalize=True).plot.bar()
# Used to see the column which shows how many people got approved in a barchart
catColumns = ["Gender", "Married", "Dependents", "Education", "Self_\
Employed", "Property_Area", "Credit_History"]
for x in catColumns: # Loops over all data in each column
# Crosstab checks against another group of data I want to analyse against,
# in this case Loan_Status https://pbpython.com/pandas-crosstab.html against
# all the columns in Columns
y = pd.crosstab(df["Loan_Status"], df[x], normalize = "columns")
# https://www.journaldev.com/45109/normalize-data-in-python taught me how
# to normalize data and https://machinelearningmastery.com/rescaling-data-for-machine-learning-in-python-with-scikit-learn/#:~:text=Normalization%20refers%20to%20rescaling%20real,preparation%20of%20coefficients%20in%20regression.
# taught me what it does, makes all values inbetween 0 and 1.
print(y) # Prints output
y.plot(kind = "bar") # Plots bar chart for each column
df.boxplot(column = "ApplicantIncome", by = "Education") # Wanted to see the
# correlation between graduate income and non graduate income
numColumns = ["ApplicantIncome", "CoapplicantIncome", "LoanAmount", "Loan_Amount_Term"]
# I did above as I wanted to check if graduates earned more than non graduates
# Learnt this in lectue slides
for z in numColumns: # For loop to make a graph for each column
# for each loop until every column in numColumns has a graph
# shows column in numColumns against Loan_status
result = df.boxplot(column = z, by = "Loan_Status") # Plots graph
plt.show(result) # Shows graphs
# The graphs used in the abov loop were learnt from the lecture slides
|
diaz080800/Python-programming
|
Week 2/Week2.py
|
Week2.py
|
py
| 6,360 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70327957627
|
#!/usr/bin/env python3
import rospy
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from math import sqrt, atan2, exp, atan, cos, sin, acos, pi, asin, atan2, floor
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from time import sleep
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
global x_n, y_n, theta_n
class Node():
def __init__(self, value, x, y):
self.value = value
self.gCost = 0
self.hCost = 0
self.x = x
self.y = y
self.parent = None
self.path = None
def fCost(self):
return self.gCost + self.hCost
class Astar():
def __init__(self, map, goal, start, cell_size):
self.map = map
self.goal = goal
self.start = start
self.cell_size = cell_size
self.goal2d = self.goal_to_node2d()
self.rows = len(self.map)
self.cols = len(self.map[0])
self.targetNode = self.map[self.goal2d[0]][self.goal2d[1]]
#self.map[self.goal2d[0]][self.goal2d[1]] = 2
#self.map[self.goal2d[1]][self.goal2d[0]].value = 2
#print(*self.map)
def goal_to_node2d(self):
#goal: x,y
goal2d = np.array([0,0])
goal2d[0] = self.goal[0]/self.cell_size
goal2d[1] = -self.goal[1]/self.cell_size
return goal2d
def node2d_to_goal(self, cell):
x = cell.x*self.cell_size + cell_size/2
y = -cell.y*self.cell_size - cell_size/2
return (x,y)
def isGoalValid(self):
if(self.map[self.goal2d[0]][self.goal2d[1]].value == 1):
return False
elif(self.map[self.goal2d[0]][self.goal2d[1]].value == 0):
return True
def getNeighbors(self, node):
neighbors = []
for x in range(-1,2):
for y in range(-1,2):
if(x == 0 and y == 0):
continue
checkX = node.x + x
checkY = node.y + y
#print('check:',x,y)
if(checkX >= 0 and checkX < self.rows and checkY >= 0 and checkY < self.cols):
neighbors.append(self.map[checkX][checkY])
return neighbors
def getDistance(self, nodeA, nodeB):
distX = abs(nodeA.x - nodeB.x)
distY = abs(nodeA.y - nodeB.y)
if(distX > distY):
return 14*distY + 10*(distX - distY)
else:
return 14*distX + 10*(distY - distX)
def tracePath(self, startNode, endNode):
path = []
currentNode = endNode
while(currentNode is not startNode):
path.append(currentNode)
currentNode = currentNode.parent
#print('node:', currentNode)
path.reverse()
#print('path:',path)
return path
def findPath(self):
openSet = []
closeSet = []
print(vars(self.map[self.start[0]][self.start[1]]))
startNode = self.map[self.start[0]][self.start[1]]
openSet.append(startNode)
while(len(openSet) > 0):
currentNode = openSet[0]
for i in range(1,len(openSet)):
#print(openSet[i].fCost())
if(openSet[i].fCost() < currentNode.fCost() or (openSet[i].fCost() == currentNode.fCost() and openSet[i].hCost < currentNode.hCost)):
currentNode = openSet[i]
#print('in while: ', currentNode.x, currentNode.y, currentNode.fCost())
#print('goal: ', self.goal2d[0] , self.goal2d[1])
openSet.remove(currentNode)
closeSet.append(currentNode)
if(currentNode.x == self.goal2d[0] and currentNode.y == self.goal2d[1]):
print('search done')
self.path = self.tracePath(startNode, self.targetNode)
return
neighbors = self.getNeighbors(currentNode)
for neighbor in neighbors:
#print(vars(neighbor))
if(neighbor.value == 1 or (neighbor in closeSet)):
print('continue')
continue
newMovementCostToNeighbor = currentNode.gCost + self.getDistance(currentNode, neighbor)
if(newMovementCostToNeighbor < neighbor.gCost or not (neighbor in openSet)):
neighbor.gCost = newMovementCostToNeighbor
neighbor.hCost = self.getDistance(neighbor, self.targetNode)
neighbor.parent = currentNode
#print(neighbor.gCost)
if(neighbor not in openSet):
openSet.append(neighbor)
print('next')
def plotGrid(self):
for i in range(len(self.map)):
line = []
linefCost = []
for j in range(len(self.map[0])):
line.append(self.map[i][j].value)
linefCost.append(self.map[i][j].gCost)
#print(line)
print(linefCost)
def refference_trajectory(x_goal, y_goal):
x_ref = x_goal
y_ref = y_goal
Vx_ref = 0
Vy_ref = 0
return (x_ref, y_ref, Vx_ref, Vy_ref)
# Rotina para a geracao da entrada de controle
def trajectory_controller(x_ref, y_ref, Vx_ref, Vy_ref, Kp, Usat):
global x_n, y_n, theta_n
Ux = Vx_ref + Kp * (x_ref - x_n)
Uy = Vy_ref + Kp * (y_ref - y_n)
absU = sqrt(Ux ** 2 + Uy ** 2)
if (absU > Usat):
Ux = Usat * Ux / absU
Uy = Usat * Uy / absU
return (Ux, Uy)
# Rotina feedback linearization
def feedback_linearization(Ux, Uy, d):
global x_n, y_n, theta_n
VX = cos(theta_n) * Ux + sin(theta_n) * Uy
WZ = (-sin(theta_n) / d) * Ux + (cos(theta_n) / d) * Uy
return (VX, WZ)
# Rotina callback para a obtencao da pose do robo
def callback_pose(data):
global x_n, y_n, theta_n
x_n = data.pose.pose.position.x # posicao 'x' do robo no mundo
y_n = data.pose.pose.position.y # posicao 'y' do robo no mundo
x_q = data.pose.pose.orientation.x
y_q = data.pose.pose.orientation.y
z_q = data.pose.pose.orientation.z
w_q = data.pose.pose.orientation.w
euler = euler_from_quaternion([x_q, y_q, z_q, w_q])
theta_n = euler[2] # orientaco 'theta' do robo no mundo
return
def calcDistance(x_n, y_n, x_d, y_d):
return sqrt(((x_d - x_n)**2 + (y_d - y_n)**2))
def readImage(cell_size):
fig = plt.figure(figsize=(8,8), dpi=100)
img = 1 - mpimg.imread('../worlds/map_1.png')
# Apenas para garantir que só teremos esses dois valores
threshold = 0.5
img[img > threshold] = 1
img[img<= threshold] = 0
map_dims = np.array([60, 60]) # Cave
# Escala Pixel/Metro
print(img.shape)
sy, sx = img.shape[0:2] / map_dims
# Tamanho da célula do nosso Grid (em metros)
rows, cols = (map_dims / cell_size).astype(int)
#grid = np.zeros((rows, cols))
grid = [[Node(0,0,0) for x in range(cols)] for y in range(rows)]
# Preenchendo o Grid
for r in range(rows):
for c in range(cols):
xi = int(c*cell_size*sx)
xf = int(xi + cell_size*sx)
yi = int(r*cell_size*sy)
yf = int(yi + cell_size*sy)
value = np.sum(img[yi:yf,xi:xf])
if(value > threshold):
value = 1
else:
value = 0
node = Node(value, r, c)
grid[r][c] = node
return grid
def control(poses):
#Tempo de simulacao no stage
global x_n, y_n
freq = 100
Usat = 5
d = 0.8
Kp = 1
#Define uma variavel que controlar a a frequencia de execucao deste no
rate = rospy.Rate(freq)
vel = Twist()
sleep(0.2)
# O programa do no consiste no codigo dentro deste while
for pose in poses:
print(pose)
x_goal = pose[0]
y_goal = pose[1]
# Incrementa o tempo
dist = calcDistance(x_n,y_n,x_goal,y_goal)
while(dist > 0.5):
[x_ref, y_ref, Vx_ref, Vy_ref] = refference_trajectory(x_goal, y_goal)
[Ux, Uy] = trajectory_controller(x_ref, y_ref, Vx_ref, Vy_ref, Kp, Usat)
[V_forward, w_z] = feedback_linearization(Ux, Uy, d)
vel.linear.x = V_forward
vel.angular.z = w_z
pub_stage.publish(vel)
dist = calcDistance(x_n, y_n, x_goal, y_goal)
#Espera por um tempo de forma a manter a frequencia desejada
rate.sleep()
if __name__ == '__main__':
try:
rospy.init_node("Astar_node") #inicializa o no "este no"
pub_stage = rospy.Publisher("/cmd_vel", Twist, queue_size=1) #declaracao do topico para comando de velocidade
rospy.Subscriber("/base_pose_ground_truth", Odometry, callback_pose) #declaracao do topico onde sera lido o estado do robo
cell_size = 2
x_goal, y_goal = input('(x_goal, y_goal)').split()
x_goal, y_goal = [float(i) for i in [x_goal, y_goal]]
grid = readImage(cell_size)
start_x = floor(x_n/cell_size)
start_y = floor(-y_n/cell_size)
print('pose: ', start_x, start_y)
Astar = Astar(grid, np.array([x_goal, y_goal]), np.array([start_x,start_y]), cell_size)
if(not Astar.isGoalValid()):
print('Posicao de alvo invalida')
exit()
Astar.findPath()
path = Astar.path
planConverted = []
for node in path:
pose = Astar.node2d_to_goal(node)
planConverted.append(pose)
print(pose)
Astar.plotGrid()
control(planConverted)
except rospy.ROSInterruptException:
pass
|
lucca-leao/path-planning
|
scripts/Astar.py
|
Astar.py
|
py
| 9,706 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9903389782
|
# UDP receiver
# Olle Bergkvist & August M Rosenqvist
from socket import *
serverPort = 12000
counter = 10000
# Create UDP socket and bind to specified port
serverSocket = socket(AF_INET, SOCK_DGRAM)
serverSocket.bind(('', serverPort))
print ("The UDP receiver is ready to receive.\n")
while True:
# Read client's message and remember client's address (IP and port)
counter += 1
message, clientAddress = serverSocket.recvfrom(2048)
message = message.decode()
messageArray = message.split(";")
sequenceNumber = int(messageArray[0])
extractedMessage = messageArray[1]
if counter != sequenceNumber:
print("Wrong package order detected!")
print("Expected ID:", counter, "Received ID:", sequenceNumber)
else:
print("Correct package received:", sequenceNumber)
|
ollebergkvist/telekom-lab2
|
UDPreceiver.py
|
UDPreceiver.py
|
py
| 821 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37379213866
|
#影像命名:县(0表示西秀,1表示剑河县)_序号(在points列表中的序号,从0开始)_同一位置的序号(同一位置可能有多张,标个序号,从0开始)_年份(2021之类的)_img
#施工标签命名:县(0表示西秀,1表示剑河县)_序号(在points列表中的序号,从0开始)_年份(2021之类的)_conslabel
#分类标签命名:县(0表示西秀,1表示剑河县)_序号(在points列表中的序号,从0开始)_2021_classlabel
from osgeo import gdal,osr
import pickle
import os
import numpy
def getSRSPair(dataset):
prosrs=osr.SpatialReference()
prosrs.ImportFromWkt(dataset.GetProjection())
geosrs=prosrs.CloneGeogCS()
return prosrs,geosrs
def lonlat2geo(dataset,lon,lat):
prosrs,geosrs=getSRSPair(dataset)
ct=osr.CoordinateTransformation(geosrs,prosrs)
coords=ct.TransformPoint(lon,lat)
return coords[:2]
def geo2imagexy(dataset,x,y):
trans=dataset.GetGeoTransform()
a=numpy.array([[trans[1],trans[2]],[trans[4],trans[5]]])
b=numpy.array([x-trans[0],y-trans[3]])
result=numpy.linalg.solve(a,b)
return int(result[0]),int(result[1])
def getImageBound(dataset):
#[minx.maxx,miny,maxy]
trans=dataset.GetGeoTransform()
img_width,img_height=dataset.RasterXSize,dataset.RasterYSize
result=[min(trans[0],trans[0]+trans[1]*img_width),max(trans[0],trans[0]+trans[1]*img_width),min(trans[3],trans[3]+trans[5]*img_height),max(trans[3],trans[3]+trans[5]*img_height)]
return result
def getAllImage_tif(path):
result=[]
d=os.listdir(path)
for i in d:
if i.split('.')[-1] == 'tif':
result.append(i)
return result
def contain(bound,point):
if bound[0]<point[0]<bound[1] and bound[2]<point[1]<bound[3]:
return True
else:
return False
def clip_label_cons(county,year,path_pkl,path_src,path_dst,size=2048):
dataset=gdal.Open(path_src)
bound=getImageBound(dataset)
with open(path_pkl,'rb') as f:
points_dict=pickle.load(f)
if county==0:
points=points_dict['xixiu']
else:
points=points_dict['jianhe']
for j,point in enumerate(points,0):
x,y=lonlat2geo(dataset,point[0],point[1])
if contain(bound,(x,y)):
p=geo2imagexy(dataset,x,y)
if p[0]+size>dataset.RasterXSize or p[1]+size>dataset.RasterYSize:
continue
clip_image=dataset.ReadAsArray(p[0],p[1],size,size)
clip_image_path=path_dst+'\\'+str(county)+'_'+str(j)+'_'+str(year)+'_conslabel.tif'
clip_image_driver=gdal.GetDriverByName('GTiff')
clip_image_dataset=clip_image_driver.Create(clip_image_path,size,size,1,gdal.GDT_Float32)
clip_image_dataset.SetGeoTransform((x,dataset.GetGeoTransform()[1],0,y,0,dataset.GetGeoTransform()[5]))
clip_image_dataset.SetProjection(dataset.GetProjection())
clip_image_dataset.GetRasterBand(1).WriteArray(clip_image)
clip_image_dataset.FlushCache()
clip_image_dataset=None
def clip_label_class(county,year,path_pkl,path_src,path_dst,size=2048):
dataset=gdal.Open(path_src)
bound=getImageBound(dataset)
with open(path_pkl,'rb') as f:
points_dict=pickle.load(f)
if county==0:
points=points_dict['xixiu']
else:
points=points_dict['jianhe']
for j,point in enumerate(points,0):
x,y=lonlat2geo(dataset,point[0],point[1])
if contain(bound,(x,y)):
p=geo2imagexy(dataset,x,y)
if p[0]+size>dataset.RasterXSize or p[1]+size>dataset.RasterYSize:
continue
clip_image=dataset.ReadAsArray(p[0],p[1],size,size)
clip_image_path=path_dst+'\\'+str(county)+'_'+str(j)+'_'+str(year)+'_classlabel.tif'
clip_image_driver=gdal.GetDriverByName('GTiff')
clip_image_dataset=clip_image_driver.Create(clip_image_path,size,size,1,gdal.GDT_Float32)
clip_image_dataset.SetGeoTransform((x,dataset.GetGeoTransform()[1],0,y,0,dataset.GetGeoTransform()[5]))
clip_image_dataset.SetProjection(dataset.GetProjection())
clip_image_dataset.GetRasterBand(1).WriteArray(clip_image)
clip_image_dataset.FlushCache()
clip_image_dataset=None
if __name__=='__main__':
from matplotlib import pyplot
id_county=1
year=2021
path_pkl=r'K:\points.pkl'
path_src=r'H:\剑河县\2021标签\label_2021.tif'
path_dst=r'H:\剑河县\2021标签裁剪\分类'
size=2048
clip_label_class(id_county,year,path_pkl,path_src,path_dst,size)
id_county=1
year=2021
path_pkl=r'K:\points.pkl'
path_src=r'H:\剑河县\2021标签\label_2021_cons.tif'
path_dst=r'H:\剑河县\2021标签裁剪\施工'
size=2048
clip_label_cons(id_county,year,path_pkl,path_src,path_dst,size)
id_county=1
year=2020
path_pkl=r'K:\points.pkl'
path_src=r'H:\剑河县\2020标签\label_2020.tif'
path_dst=r'H:\剑河县\2020标签裁剪\施工'
size=2048
clip_label_cons(id_county,year,path_pkl,path_src,path_dst,size)
|
faye0078/RS-ImgShp2Dataset
|
lee/clip_label.py
|
clip_label.py
|
py
| 5,260 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74128409788
|
from collections import deque
def find_correct(string):
stack = []
for c in string:
if c == "[" or c == "{" or c == "(":
stack.append(c)
else:
if "[" in stack and c == "]" and stack[-1] == "[":
stack.pop()
elif "{" in stack and c == "}" and stack[-1] == "{":
stack.pop()
elif "(" in stack and c == ")" and stack[-1] == "(":
stack.pop()
else:
return 0
return 1 if not stack else 0
def solution(s):
answer = 0
deq = deque(list(s))
for i in range(len(s)):
deq.rotate(-1)
string = "".join(deq)
answer += find_correct(string)
return answer
|
Dayeon1351/TIL
|
programmers/level2/괄호회전하기/solution.py
|
solution.py
|
py
| 781 |
python
|
en
|
code
| 0 |
github-code
|
6
|
45364250336
|
import pygame
import solveModuleNotFoundError
from Game import *
from Game.Scenes import *
from Game.Shared import *
class Breakout:
def __init__(self):
self.__lives = 5
self.__score = 0
self.__level = Level(self)
self.__level.load(0)
self.__pad = Pad((GameConstant.SCREEN_SIZE[0]/2,GameConstant.SCREEN_SIZE[1] - GameConstant.PAD_SIZE[1]),pygame.transform.scale(pygame.image.load(GameConstant.SPRITE_PAD) , GameConstant.PAD_SIZE))
self.__balls = [
Ball((400,400) , pygame.transform.scale(pygame.image.load(GameConstant.SPRITE_BALL) , GameConstant.BALL_SIZE) ,self)
]
pygame.init()
pygame.mixer.init()
pygame.display.set_caption("Brick Breaker")
pygame.mouse.set_visible(0)
self.__clock = pygame.time.Clock()
self.screen = pygame.display.set_mode(GameConstant.SCREEN_SIZE , pygame.DOUBLEBUF, 32)
self.bg = pygame.transform.scale(pygame.image.load(GameConstant.BG).convert_alpha() , GameConstant.SCREEN_SIZE)
self.__scenes = (
PlayingGameScene(self),
GameOverScene(self),
HighscoreScene(self),
MenuScene(self)
)
self.__currentScene = 3
self.__sounds = (
pygame.mixer.Sound(GameConstant.SOUND_FILE_GAMEOVER),
pygame.mixer.Sound(GameConstant.SOUND_FILE_HIT_BRICK),
pygame.mixer.Sound(GameConstant.SOUND_FILE_HIT_BRICK_LIFE),
pygame.mixer.Sound(GameConstant.SOUND_FILE_HIT_BRICK_SPEED),
pygame.mixer.Sound(GameConstant.SOUND_FILE_HIT_WALL),
pygame.mixer.Sound(GameConstant.SOUND_FILE_HIT_PAD)
)
def start(self):
while 1:
self.__clock.tick(60)
self.screen.fill((0,0,0))
self.screen.blit( self.bg, (0,0))
currentScene = self.__scenes[self.__currentScene]
currentScene.handleEvents(pygame.event.get())
currentScene.render()
pygame.display.update()
def changeScene(self , scene):
self.__currentScene = scene
def getLevel(self):
return self.__level
def getLives(self):
return self.__lives
def getScore(self):
return self.__score
def getBalls(self):
return self.__balls
def getPad(self):
return self.__pad
def playSound(self, soundClip):
sound = self.__sounds[soundClip]
sound.stop()
sound.play()
def increaseScore(self , score):
self.__score += score
def increaseLives(self):
self.__lives += 1
def reduceLives(self):
self.__lives -= 1
def reset(self):
self.__score = 0
self.__lives = 5
self.__level.reset()
self.__level.load(0)
Breakout().start()
|
grapeJUICE1/Grape-Bricks
|
Game/Breakout.py
|
Breakout.py
|
py
| 2,928 |
python
|
en
|
code
| 7 |
github-code
|
6
|
23915032189
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from apps.data.models import Entry
from apps.data.forms import DataForm
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseNotFound
@login_required
def add(request):
form = None
if request.method == 'POST':
form = AddBookForm(request.POST, request.FILES)
if form.is_valid():
instance = form.save(commit=False)
instance.title = request.title
instance.text = request.text
instance.save()
else:
form = AddBookForm()
ctx = {
'form': form
}
return render_to_response('data/create.html', ctx, context_instance=RequestContext(request))
@login_required
def entry(request, id):
# try:
entrie = Entry.objects.get(id=id)
ctx = {'entrie': entrie}
return render_to_response('data/entry.html', ctx, context_instance=RequestContext(request))
# except:
# return HttpResponse('<h1>Page was found</h1>')
@login_required
def create(request):
a = Entry()
form = DataForm(request.POST or None)
if form.is_valid():
form.save()
ctx = {
'form': form
}
return render_to_response('data/create.html', ctx, context_instance=RequestContext(request))
# Create your views here.
|
msakhnik/just-read
|
apps/data/views.py
|
views.py
|
py
| 1,497 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15802748190
|
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework.documentation import include_docs_urls
api_patterns = [
url(r'^docs/', include_docs_urls(title='Documentation')),
url(r'^', include(('my_website.apps.youtube_download.urls', 'youtube_download'), namespace='youtube_download')),
]
urlpatterns = [
url(r'^api/auth/', include('django.contrib.auth.urls')),
url(r'^api/rest-auth/', include(('rest_auth.urls', 'youtube_download'), namespace='rest_auth')),
url(r'^api/rest-auth/registration/', include(('rest_auth.registration.urls', 'youtube_download'), namespace='rest_auth_registration')),
# Allauth
url(r'^api/accounts/', include('allauth.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/', include((api_patterns, 'youtube_download'), namespace='api')),
url(r'^api/admin/', admin.site.urls),
]
|
zsoman/my-website
|
my_website/urls.py
|
urls.py
|
py
| 902 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16398002041
|
import os
import pygame
from Engine import MainMenu
from Entities.Maps.SimpleCheck import SimpleCheck, ConditionsType
class BlockChecks(SimpleCheck):
def __init__(self, ident, name, positions, linked_map):
SimpleCheck.__init__(self, ident, name, positions, linked_map, True)
self.position_logic_indicator = None
self.surface_logic_indicator = None
self.list_checks = []
self.show_checks = False
self.all_logic = False
self.logic_cpt = 0
self.focused = False
def add_check(self, check):
self.list_checks.append(check)
self.update()
def get_checks(self):
return self.list_checks
def update(self):
self.logic_cpt = 0
self.all_logic = True
self.checked = True
self.focused = False
for check in self.list_checks:
check.update()
if not check.hide and not check.checked:
if not check.checked:
self.checked = False
if not self.focused:
self.focused = check.focused
if check.state == ConditionsType.LOGIC:
self.logic_cpt += 1
else:
self.all_logic = False
font = self.map.tracker.core_service.get_font("mapFont")
map_font_path = os.path.join(self.map.tracker.core_service.get_tracker_temp_path(), font["Name"])
font_number = self.map.tracker.core_service.get_font("mapFontChecksNumber")
font_path = os.path.join(self.map.tracker.core_service.get_tracker_temp_path(), font_number["Name"])
groups_datas = self.map.tracker.tracker_json_data[4]["SizeGroupChecks"]
zoom = self.map.tracker.core_service.zoom
index_positions = self.map.index_positions
self.pin_rect = pygame.Rect(
(index_positions[0] * zoom) + (self.positions["x"] * zoom),
(index_positions[1] * zoom) + (self.positions["y"] * zoom),
groups_datas["w"] * zoom,
groups_datas["h"] * zoom
)
color = "Done" if self.checked else (
"Logic" if self.all_logic else ("HaveLogic" if self.logic_cpt > 0 else "NotLogic"))
self.pin_color = self.map.tracker.core_service.get_color_from_font(font, color)
temp_surface = pygame.Surface((0, 0), pygame.SRCALPHA, 32).convert_alpha()
self.surface_logic_indicator, self.position_logic_indicator = MainMenu.MainMenu.draw_text(
text=f"{self.logic_cpt}",
font_name=font_path,
color=self.map.tracker.core_service.get_color_from_font(font_number, "Normal"),
font_size=font_number["Size"] * zoom,
surface=temp_surface,
position=(self.pin_rect.x, self.pin_rect.y),
outline=0.5 * zoom
)
rect = self.surface_logic_indicator.get_rect()
x_number = self.pin_rect.x + (self.pin_rect.w / 2) - (rect.w / 2) + (0.5 * zoom)
y_number = self.pin_rect.y + (self.pin_rect.h / 2) - (rect.h / 2) + (1.5 * zoom)
self.position_logic_indicator = (x_number, y_number)
def draw(self, screen):
if not self.all_check_hidden():
font = self.map.tracker.core_service.get_font("mapFont")
border_color = self.map.tracker.core_service.get_color_from_font(font, "Focused") if self.focused else (0, 0, 0)
self.draw_rect(screen, self.pin_color, border_color, self.pin_rect, 2 * self.map.tracker.core_service.zoom)
if self.logic_cpt > 0:
screen.blit(self.surface_logic_indicator, self.position_logic_indicator)
def left_click(self, mouse_position):
if not self.map.current_block_checks:
pygame.mouse.set_cursor(pygame.SYSTEM_CURSOR_ARROW)
self.update()
self.map.current_block_checks = self
self.map.update()
def right_click(self, mouse_position):
tracker = None
for check in self.list_checks:
check.checked = not self.checked
check.update()
tracker = check.tracker
self.update()
if tracker:
tracker.current_map.update()
def wheel_click(self, mouse_position):
tracker = None
for check in self.list_checks:
check.focused = not check.focused
check.update()
tracker = check.tracker
self.update()
if tracker:
tracker.current_map.update()
def get_rect(self):
return self.pin_rect
@staticmethod
def draw_rect(surface, fill_color, outline_color, rect, border=1):
surface.fill(outline_color, rect)
surface.fill(fill_color, rect.inflate(-border * 2, -border * 2))
def get_data(self):
checks_datas = []
for check in self.list_checks:
checks_datas.append(check.get_data())
data = {
"id": self.id,
"name": self.name,
"checks_datas": checks_datas
}
return data
def set_data(self, datas):
i = 0
for data in datas["checks_datas"]:
for check in self.list_checks:
if (check.id == data["id"]) and (check.name == data["name"]):
i = i + 1
check.set_data(data)
break
def all_check_hidden(self):
hidden = 0
for check in self.list_checks:
if check.hide:
hidden = hidden + 1
return hidden == len(self.list_checks)
|
linsorak/LinSoTracker
|
Entities/Maps/BlockChecks.py
|
BlockChecks.py
|
py
| 5,551 |
python
|
en
|
code
| 3 |
github-code
|
6
|
33272837414
|
import concurrent.futures
import timeit
import matplotlib.pyplot as plt
import numpy
from controller import Controller
def mainUtil():
result = []
for i in range(50):
c = Controller(300)
c.GradientDescendAlgorithm(0.000006, 1000)
result.append(c.testWhatYouHaveDone())
return result
if __name__ == '__main__':
start = timeit.default_timer()
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
print('This takes anywhere from 5 to 12 minutes to run (depending on how powerful your machine is).\n\tGo grab some popcorn')
Q1 = executor.submit(mainUtil)
Q2 = executor.submit(mainUtil)
Q3 = executor.submit(mainUtil)
Q4 = executor.submit(mainUtil)
Q5 = executor.submit(mainUtil)
errorList = Q1.result() + Q2.result() + Q3.result() + Q4.result() + Q5.result()
print('In 250 runs you have achieved:')
print('\tMaximum Error: ', max(errorList))
print('\tMinimum Error: ', min(errorList))
print('\tAverage Error: ', numpy.average(errorList))
plt.plot(errorList, 'ro')
plt.show()
end = timeit.default_timer()
print('Time: ', (end - start) / 60)
|
CMihai998/Artificial-Intelligence
|
Lab7 - GDA/main.py
|
main.py
|
py
| 1,133 |
python
|
en
|
code
| 3 |
github-code
|
6
|
73549678908
|
__doc__ = """
Script for collection of training data for deep learning image recognition.
Saving standardised pictures of detected faces from webcam stream to given folder.
Ver 1.1 -- collect_faces.py
Author: Aslak Einbu February 2020.
"""
import os
import cv2
import datetime
import imutils
import time
import numpy as np
# Loading neural net model for face detection
net = cv2.dnn.readNetFromCaffe("model/face_detect/deploy.prototxt.txt",
"model/face_detect/res10_300x300_ssd_iter_140000.caffemodel")
# Setting folder name for saving of detected images.
person = input("Hvem er personen?")
bildepath = f'/tmp/dnn/{person}'
if not os.path.exists(bildepath):
os.makedirs(bildepath)
bildepath = f'/tmp/dnn/{person}'
def main():
"""
Analysing webcam video stream and displaying detected faces.
Applies deep neural net model for detection of faces in in image.
Saves images of detected faces to given folder (stops saving after 1000 pictures).
"""
antall = 0
sistetid = time.time()
stdtxt = "Ingen fjes observert!"
dcttxt = "Fjes observert!"
noen = False
camera = cv2.VideoCapture(0)
print("Analyserer webcam bildestrøm...")
print(f'Lagrer alle passfoto i {bildepath}.')
while True:
(grabbed, frame) = camera.read()
if not grabbed:
break
detekt_txt = stdtxt
frame = imutils.resize(frame, width=500)
lager = frame.copy()
# Detecting faces:
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
net.setInput(blob)
detections = net.forward()
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence < 0.7:
continue
detekt_txt = dcttxt
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
text = "{:.1f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
try:
fjes = lager[startY:endY, startX:endX]
fjes = cv2.resize(fjes, (100, 120))
# Saving image of face
if (time.time() - sistetid) > 0.5:
sistetid = time.time()
if antall < 1000:
cv2.imwrite(f'{bildepath}/{str(time.time())}.jpg', fjes)
antall = antall + 1
print(f'\rAntall bilder lagra: {antall}', sep='', end='', flush=True)
except:
pass
noen = True
if (noen):
try:
frame[255:375, 0:100] = fjes
cv2.putText(frame, "Siste person", (10, 270), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1)
except:
pass
cv2.putText(frame, detekt_txt, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
cv2.putText(frame, datetime.datetime.now().strftime(" %d %B %Y %I:%M:%S%p"), (4, 40), cv2.FONT_HERSHEY_SIMPLEX,
0.4, (0, 0, 0), 1)
cv2.putText(frame, f'Bilder lagra:{antall}', (10, 250), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1)
cv2.imshow("Fjes", frame)
cv2.moveWindow("Fjes", 1450, 100)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
aslake/family_deep_learning
|
collect_faces.py
|
collect_faces.py
|
py
| 3,826 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20908637143
|
from python_app_configs import config
from python_generic_modules import se_os
from python_generic_modules import se_docker
import re
import os
import glob
import time
import jinja2
template1 = jinja2.Template("{% for i in range(0,last_num)%}zookeepernode{{ i }}.{{ domain }}:2181{% if not loop.last %},{% endif %}{% endfor %}")
zookeeper_nodes = template1.render(last_num=config.zookeeper_nodes,domain=config.domain_name)
def setup_kafka_dirs():
print('Setting up kafka bind_mount directories...\n')
os.chdir(config.dest_dir)
dir_glob = 'kafka' + '*'
dir_lst = glob.glob(dir_glob)
for i in dir_lst:
se_os.del_dir(str(i))
src_dir_path = os.path.join(config.data_dir,'kafka_conf')
for i in range(0,config.kafka_nodes):
dest_path_new = os.path.join(config.dest_dir,'kafkanode'+str(i))
se_os.copy_dir(src_dir_path,dest_path_new)
print('bind_mount directories setup compelete\n')
def config_kafka(i):
src_file_path = os.path.join(config.data_dir,'kafka_conf','server.properties')
dest_file_path = os.path.join(config.dest_dir,'kafkanode'+str(i),'server.properties')
param1 = re.compile(r'(.*)(broker.id)(.*)')
param2 = re.compile(r'(.*)(num.partitions)(.*)')
param3 = re.compile(r'(.*)(zookeeper.connect=)(.*)')
with open(src_file_path,mode='r') as file1:
with open(dest_file_path,mode='w') as file2:
for line in file1:
if param1.search(line):
line = param1.sub(r'\1\2{}'.format('='+str(i)), line)
file2.write(line)
continue
elif param2.search(line):
line = param2.sub(r'\1\2{}'.format('='+str(str(config.kafka_default_partitions))), line)
file2.write(line)
continue
elif param3.search(line):
line = param3.sub(r'\1\2{}'.format(zookeeper_nodes), line)
file2.write(line)
continue
else:
file2.write(line)
def launch_kafka():
print('\n====Running kafka_setup module====\n')
setup_kafka_dirs()
time.sleep(3)
print('\n====Running kafka_config module====\n')
for i in range(0,config.kafka_nodes):
print("Updating configs for node 'kafkanode{}'\n".format(i))
config_kafka(i)
time.sleep(3)
print('\n====Creating SE_Platform Network if not already created====\n')
hadoop_net = config.hadoop_network_range + '/24'
lst = config.hadoop_network_range.split('.')
lst[3]='1'
hadoop_gateway = '.'.join(lst)
se_docker.create_network('hadoopnet',hadoop_net,hadoop_gateway)
print('\n====Launching containers and attaching bind mounts====\n')
for i in range(0,config.kafka_nodes):
se_docker.launch_containers('kmahesh2611/kafka','/kafka_2.11-2.1.0/bin/kafka-server-start.sh /kafka_2.11-2.1.0/config/server.properties','kafkanode' + str(i) + '.' + config.domain_name,'kafkanode' +str(i) + '.' + config.domain_name,{os.path.join(config.dest_dir,'kafkanode'+str(i)):{'bind':'/kafka_2.11-2.1.0/config','mode':'rw'}},'hadoopnet',True,True)
print('Wait for 10 seconds....')
time.sleep(10)
print('\n====Verify if containers are running====\n')
num = 0
for i in se_docker.get_all_containers():
if 'kafkanode' in i.name:
num = num + 1
if 'running' in i.status:
print('{} : {}'.format(i.name,i.status))
else:
print('Error: Container "{}" is in status "{}"\n'.format(i.name,i.status))
print('Exiting script\n')
sys.exit(1)
if num == 0:
print('No container found starting with name "kafkanode"')
print('Exiting script\n')
sys.exit(1)
### Creating Kafka topics ###
print('\n====Creating Kafka Topics====\n')
for i in config.kafka_topics:
print(se_docker.exec_command('kafkanode0' + '.' + config.domain_name,"/kafka_2.11-2.1.0/bin/kafka-topics.sh --create --zookeeper {} --replication-factor {} --partitions {} --topic {}".format(zookeeper_nodes,str(config.kafka_nodes),str(config.kafka_default_partitions),i)))
print("Created topics: {}\n".format([topics for topics in config.kafka_topics]))
def del_kafka_containers():
print('\n====Stopping and deleting Containers for kafka====\n')
for i in se_docker.get_all_containers():
if 'kafkanode' in i.name:
print('Stopping and deleting Container: {}\n'.format(i.name))
i.remove(force=True)
|
karthikmahesh2611/docker_hadoop
|
python_hadoop_modules/kafka.py
|
kafka.py
|
py
| 4,120 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10775009029
|
def enumerate2(xs, start=0, step=1):
for x in xs:
yield (start, x)
start += step
data = list(open("./03-input.txt").read().splitlines())
for s in ((1, 1), (3, 1), (5, 1), (7, 1), (1, 2)):
data2 = [(d, i, i % len(d), d[i%len(d)]) for i, d in enumerate2(data[::s[1]], step=s[0])]
print(sum([d[3] == '#' for d in data2]))
|
knjmooney/Advent-Of-Code
|
2020/03-toboggan.py
|
03-toboggan.py
|
py
| 348 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30061736104
|
import os,sys,random
import veri
NewName = os.path.expanduser('~')
if os.path.exists('%s/vlsistuff' % NewName):
sys.path.append('%s/vlsistuff/verification_libs3'%NewName)
elif 'VLSISTUFF' in os.environ:
sys.path.append('%s/verification_libs3'%os.environ['VLSISTUFF'])
else:
print("please set VLSISTUFF to where You cloned vlsistuff repository. like: /home/cucu/softs/vlsistuff")
sys.exit()
import logs
Monitors=[]
cycles=0
GIVEUP_TIMEOUT = 1000 # how many cycles to run before retirment.
import sequenceClass
seq = sequenceClass.sequenceClass('tb',Monitors,'',[])
def pymonname(Name):
logs.pymonname(Name)
def sequence(TestName):
Seq = logs.bin2string(TestName)
seq.readfile(Seq)
logs.setVar('sequence',Seq)
Dir = os.path.dirname(Seq)
logs.setVar('testsdir',Dir)
logs.log_info('SEQUENCE %d'%len(seq.Sequence))
def cannot_find_sig(Sig):
logs.log_error('cannot find "%s" signal in the design'%Sig)
class driverMonitor(logs.driverClass):
def __init__(self,Path,Monitors):
logs.driverClass.__init__(self,Path,Monitors)
# Monitors.append(self)
# self.Path = Path
# self.state='idle'
# self.waiting = 0
#
# def force(self,Sig,Val):
# veri.force('%s.%s'%(self.Path,Sig),str(Val))
#
# def peek(self,Sig):
# return logs.peek('%s.%s'%(self.Path,Sig))
# def peeksigned(self,Sig):
# return logs.peeksigned('%s.%s'%(self.Path,Sig))
#
# def valid(self,Sig):
# return self.peek(Sig)==1
#
def run(self):
if self.waiting>0:
self.waiting -= 1
elif self.state=='idle':
self.state='work0'
elif self.state=='work0':
self.state='work1'
elif self.state=='work1':
self.state='idle'
if self.valid('validin')and self.valid('takenin'):
return
# example of driver class usage
# driverMonitor('tb',Monitors)
def negedge():
global cycles
cycles += 1
veri.force('tb.cycles',str(cycles))
if (cycles>GIVEUP_TIMEOUT):
logs.log_info('finishing on default guard of %d'%GIVEUP_TIMEOUT)
veri.finish()
rst_n = veri.peek('tb.rst_n')
if (rst_n!='1'):
return
if (cycles==30):
veri.listing('tb','100','deep.list')
if (cycles>30):
for Mon in Monitors: Mon.run()
def cucu(): # list of all interface signals, just to help You find the names
veri.force('tb.requests','0')
grants = logs.peek('tb.grants')
|
greenblat/vlsistuff
|
rtl_library/round_robin/verilog.py
|
verilog.py
|
py
| 2,502 |
python
|
en
|
code
| 41 |
github-code
|
6
|
19092857489
|
from collections import namedtuple
import csv
import gzip
import logging
import sys
import urllib.parse
csv.field_size_limit(sys.maxsize)
logging.basicConfig(level=logging.INFO)
Switch = namedtuple("Switch", ['srclang', 'targetlang', 'country', 'qid', 'title', 'datetime', 'usertype', 'title_country_src_count'])
Session = namedtuple('Session', ['usrhash', 'country', 'pageviews', 'usertype'])
Pageview = namedtuple('Pageview', ['dt', 'proj', 'title', 'wd', 'referer'])
EDIT_STR = "EDITATTEMPT"
usertypes = ['reader', 'editor']
def tsv_to_sessions(tsv, trim=False):
"""Convert TSV file of pageviews to reader sessions.
Each line corresponds to a pageview and the file is sorted by user and then time.
Fields order is: hashed user ID, wikipedia project, page title, page ID, datettime, IP country, referer, Wikidata ID
For example:
00000a5795ba512... enwiki Columbidae 63355 2019-02-16T11:31:53 Norway https://www.google.com/ Q10856
00000a5795ba512... enwiki Anarchism 12 2019-02-16T11:32:05 Norway https://en.wikipedia.org/ Q6199
This yields a Session object where:
session.usrhash = '00000a5795ba512...'
session.country = 'Norway'
session.pageviews = [(dt='2019-02-16T11:31:53', proj='enwiki', title='Columbidae', wd='Q10856', referer='google'),
(dt='2019-02-16T11:32:05', proj='enwiki', title='Anarchism', wd='Q6199', referer='enwiki')]
"""
expected_header = ['user', 'project', 'page_title', 'page_id', 'dt', 'country', 'referer', 'item_id']
usr_idx = expected_header.index('user')
proj_idx = expected_header.index('project')
title_idx = expected_header.index('page_title')
dt_idx = expected_header.index('dt')
country_idx = expected_header.index("country")
referer_idx = expected_header.index('referer')
wd_idx = expected_header.index("item_id")
malformed_lines = 0
i = 0
with gzip.open(tsv, 'rt') as fin:
assert next(fin).strip().split("\t") == expected_header
curr_usr = None
country = None
usertype = 'reader'
session = []
for i, line in enumerate(fin):
line = line.strip().split("\t")
try:
usr = line[usr_idx]
proj = line[proj_idx]
title = line[title_idx]
dt = line[dt_idx]
ref = ref_class(line[referer_idx])
except IndexError:
malformed_lines += 1
continue
try:
wd_item = line[wd_idx]
except IndexError:
wd_item = None
pv = Pageview(dt, proj, title, wd_item, ref)
if usr == curr_usr:
if title == EDIT_STR:
usertype = 'editor'
else:
session.append(pv)
else:
if curr_usr:
if trim:
trim_session(session)
yield(Session(curr_usr, country, session, usertype=usertype))
curr_usr = usr
country = line[country_idx]
if title == EDIT_STR:
usertype = 'editor'
session = []
else:
usertype = 'reader'
session = [pv]
if curr_usr:
if trim:
trim_session(session)
yield (Session(curr_usr, country, session, usertype=usertype))
print("{0} total lines. {1} malformed.".format(i, malformed_lines))
def ref_class(referer):
dom = urllib.parse.urlparse(referer).netloc
if 'wikipedia' in dom:
return dom.split('.')[0].replace('-', '_') + 'wiki'
elif 'google' in dom:
return 'google'
else:
if dom.startswith('www.'):
dom = dom[4:]
return dom
def trim_session(pvs):
"""Remove duplicate page views (matching title and project).
For a given session, this retains only the first view of a given page title on a given project.
Parameters:
pvs: list of page view objects for a given reader's session
Returns:
Nothing. The page views are modified in place.
"""
# only report based on first pageview of page
user_unique_pvs = set()
pvs_to_remove = []
for i in range(0, len(pvs)):
pv_id = '{0}-{1}'.format(pvs[i].proj, pvs[i].title)
if pv_id in user_unique_pvs:
pvs_to_remove.append(i)
user_unique_pvs.add(pv_id)
for i in range(len(pvs_to_remove)-1, -1, -1):
pvs.pop(pvs_to_remove[i])
def get_lang_switch(pvs, wikidbs=(), ref_match=False):
"""Get pairs of page views that are language switches.
Parameters:
pvs: list of page view objects for a given reader's session
wikidbs: if empty, all language switches return. Otherwise, only language switches that involve languages
included in wikidbs will be retained.
Returns:
switches: list of tuples, where each tuple corresponds to two page views of a single Wikidata item
across two different projects.
If a session is:
[(dt='2019-02-16T11:31:53', proj='enwiki', title='Columbidae', wd='Q10856'),
(dt='2019-02-16T11:32:05', proj='enwiki', title='Anarchism', wd='Q6199'),
(dt='2019-02-16T11:32:13', proj='eswiki', title='Columbidae', wd='Q10856')]
Then the switches would be of the form [(0, 2)]
"""
switches = []
# at least two different projects viewed in the session
if len(set([p.proj for p in pvs])) > 1:
# find all wikidata items viewed in multiple languages
# preserve which one was viewed first
for i in range(0, len(pvs) - 1):
for j in range(i+1, len(pvs)):
diff_proj = pvs[i].proj != pvs[j].proj
same_item = pvs[i].wd and pvs[i].wd == pvs[j].wd
if diff_proj and same_item:
if not wikidbs or pvs[i].proj in wikidbs or pvs[j].proj in wikidbs:
if ref_match:
if pvs[i].proj == pvs[j].referer:
switches.append((i, j))
else:
switches.append((i, j))
break
return switches
def get_nonlang_switch(pvs, wikidb, switches=(), direction="from"):
"""Get page views in a language that are not switches of the specified direction.
Finds pages in a language that the user did not switch to/from (depending on direction parameter).
User must have at least one language switch with specified wikidb and direction in their session though
to indicate that they might have switched.
Parameters:
pvs: list of page view objects for a given reader's session
wikidb: Only language non-switches that involve this language will be retained.
switches: if precalculated, this speeds up processing
direction: "from" indicates the language switch must have had wikidb as the origin project.
"to" indicates the language switch must have had wikidb as the destination project.
Returns:
no_switches: list of page view indices.
For this session and wikidb = "enwiki" and direction = "from":
[(dt=2019-02-16T11:31:53, proj=enwiki, title='Columbidae', wd='Q10856'),
(dt=2019-02-16T11:32:05, proj=enwiki, title='Anarchism', wd='Q6199'),
(dt=2019-02-16T11:32:13, proj=eswiki, title='Columbidae', wd='Q10856')]
Then the no_switches would be of the form: [1]
If direction was "to" or wikidb was "eswiki" then no page views would be returned.
"""
no_switches = []
# at least two different projects viewed in the session
if len(set([p.proj for p in pvs])) > 1:
if switches:
all_switches = switches
else:
all_switches = get_lang_switch(pvs, [wikidb])
# did user have any switches of form:
# direction == "from": wikidb -> other language
# direction == "to": other language -> wikidb
dir_switches_in_lang = set()
for f,t in all_switches:
# switched from wikidb -> other project
if direction == "from" and pvs[f].proj == wikidb:
dir_switches_in_lang.add(f)
# switched from other project -> wikidb
elif direction == "to" and pvs[t].proj == wikidb:
dir_switches_in_lang.add(t)
if dir_switches_in_lang:
# find all wikidata items not viewed in multiple languages
# preserve which one was viewed first
for i in range(0, len(pvs)):
if pvs[i].proj == wikidb and i not in dir_switches_in_lang:
no_switches.append(i)
return no_switches
|
geohci/language-switching
|
session_utils.py
|
session_utils.py
|
py
| 8,958 |
python
|
en
|
code
| 2 |
github-code
|
6
|
25508679765
|
#!/usr/bin/env python3
import shutil
import psutil
import socket
import report_email
import time
import os
def check_disk_usage(disk):
disk_usage = shutil.disk_usage(disk)
free = (disk_usage.free / disk_usage.total) * 100
return free > 20
def check_cpu_usage():
usage = psutil.cpu_percent(1)
return usage < 80
def check_memory():
memory = psutil.virtual_memory()[1] / 10**6
return memory > 500
def check_localhost():
#print(socket.gethostbyname('localhost'))
if socket.gethostbyname('localhost') == '127.0.0.1':
return True
else:
return False
def alert(error):
sender = "[email protected]"
receiver = "{}@example.com".format(os.environ.get('USER'))
subject = error
body = "Please check your system and resolve the issue as soon as possible."
message = report_email.generate(sender, receiver, subject, body, '')
report_email.send(message)
def main():
while True:
if not check_disk_usage('/'):
alert('Error - Available disk space is less than 20%')
if not check_cpu_usage():
alert('Error - CPU usage is over 80%')
if not check_memory():
alert('Error - Available memory is less than 500MB')
if not check_localhost():
alert('Error - localhost cannot be resolved to 127.0.0.1')
time.sleep(60)
if __name__ == "__main__":
main()
|
paesgus/AutomationTI_finalproject
|
health_check.py
|
health_check.py
|
py
| 1,330 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72143877628
|
'''
Escribir un programa en Python que convierta millas a kilómetros. Se deben
imprimir los siguientes mensajes:
Bienvenido (ingrese su nombre): <nombre>
Ingrese las millas a convertir: <millas>
Hola <nombre>, la conversión resulta:
<resultado> km
Guarde el programa en un archivo que se llame m2k.py
'''
name = input('Bienvenido (ingrese su nombre): ')
miles = float(input('Ingrese las millas a convertir:'))
mile_kilometer_ratio = 1.609344
kilometers = miles * mile_kilometer_ratio
# Notar que en este caso usamos f-strings para generar la cadena de texto completa.
# la notación {kilometers:.2f} indica a Python que queremos visualizar solamente 2 decimales de
# la variable kilometers
print(f'Hola {name}, la conversión resulta:\n {kilometers:.2f} Km')
|
levensworth/udesa-pc-tutorial
|
2022-b/1-operaciones/clase_3_e2.py
|
clase_3_e2.py
|
py
| 767 |
python
|
es
|
code
| 2 |
github-code
|
6
|
74227616828
|
from flask import render_template,request,redirect,url_for
from . import main
from ..request import get_news_sources,get_allArticles,get_headlines
from ..models import Sources, Articles
#views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
# getting sources
business_sources = get_news_sources('business')
sports_sources = get_news_sources('sports')
technology_sources = get_news_sources('technology')
entertainment_sources = get_news_sources('entertainment')
# news_sources = get_news_sources('sources')
title = "Breaking News"
return render_template('index.html', title = title, business_sources = business_sources, sports_sources=sports_sources, technology_sources=technology_sources,entertainment_sources=entertainment_sources)
# @main.route('/articles')
# def articles():
# '''
# view article page
# '''
# articles = get_allArticles(id)
# return render_template("articles.html", id = id, articles = articles)
@main.route('/articles/<int:id>')
def articles(id):
"""_
to display news and article details
"""
articles = get_allArticles(id)
return render_template("articles.html", id = id, articles =articles)
@main.route('/headlines')
def headlines():
'''
view headline page
'''
#getting headlines
headline_id = get_headlines('id')
headline_name = get_headlines('name')
title = 'Top Headlines'
return render_template('headlines.html', title = title, headline_id= headline_id, headline_name=headline_name)
|
chanaiagwata/News_API
|
app/main/views.py
|
views.py
|
py
| 1,612 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27614632298
|
# coding:utf-8
from appium import webdriver
class Werdriver:
def get_driver(self):
configure = {
"platformName": "Android",
"deviceName": "PBV0216922007470",
"app": "/Users/luyunpeng/Downloads/ci_v1.5.0_2019-07-18_16-35_qa.apk",
"noReset": "true"
}
driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", configure)
return driver
if __name__ == '__main__':
test = Werdriver()
test.get_driver()
|
lyp0129/study_appium
|
get_driver/test_driver.py
|
test_driver.py
|
py
| 497 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22371526331
|
from odoo import api, SUPERUSER_ID
def post_init_hook(cr, registry, vals=None):
"""For brand new installations"""
env = api.Environment(cr, SUPERUSER_ID, {})
# Change only those with no weight already set
products_init = env['product.product'].search([
('weight', '=', 0),
]).filtered('is_weight_uom')
for product in products_init:
product._onchange_uom_product_weight_through_uom()
|
detian08/bsp_addons
|
product-attribute-11.0/product_weight_through_uom/hooks.py
|
hooks.py
|
py
| 425 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21951291908
|
#Описать функцию AddRightDigit(D, K), добавляющуюю к целому числу положительному
#числу К справа цифру D(D - входной параметр целого типа лежащий в диапазоне от 0 до 9, К - параметр целого типа,
# являющийся входным и выходный одновременно). С помощью функции последовательно добавиить к данному числу К справа данный цифры D1,D2,
# выводя рез-тат каждого добавления
D1= int(input("Введите цифру от 0 до 9: "))
K = int(input("Введите число К: "))
if type(D1)!=int:
print("Введите целую цифру от 0 до 9")
D1= int(input("Введите цифру от 0 до 9: "))
if not(0<=D1<=9):
print("Введите число в указанном диапазоне")
D1 = int(input("Введите цифру от 0 до 9: "))
def AddRightDigit(D2):
print("Промежуточный результат с D1: ",K, D1, sep="")
print("Конечный результат с D2: ",K, D1,D2, sep="")
AddRightDigit(D2=int(input("Снова введите цифру от 0 до 9: ")))
|
DaNil4594/EremenkoPythonProject
|
PZ_5/PZ_5_2.py
|
PZ_5_2.py
|
py
| 1,320 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
5449785498
|
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
import matplotlib.pyplot as plt
from keras.datasets import cifar10
from keras.utils import np_utils
(xtrain,ytrain),(xtest,ytest) = cifar10.load_data()
print('xtrain.shape',xtrain.shape)
print('ytrain.shape',ytrain.shape)
print('ytest.shape',ytest.shape)
print('xtest.shape',xtest.shape)
batchsize=200
cats = 10
nepoch = 100
xtrain = xtrain.reshape(50000,3072)
xtest = xtest.reshape(10000,3072)
xtrain = xtrain/255
xtest = xtest/255
ytrain = np_utils.to_categorical(ytrain,cats)
ytest = np_utils.to_categorical(ytest,cats)
model = Sequential()
model.add(Dense(units=10,input_shape=(3072,),activation='softmax',kernel_initializer='normal'))
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.05), metrics=['accuracy'])
model.summary()
history = model.fit(xtrain, ytrain, nb_epoch=nepoch, batch_size=batchsize, verbose=1)
# Evaluate
evaluation = model.evaluate(xtest, ytest, verbose=1)
print('Summary: Loss over the test dataset: %.2f, Accuracy: %.2f' % (evaluation[0], evaluation[1]))
|
daftengineer/kerasSagemaker
|
test.py
|
test.py
|
py
| 1,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23186461899
|
"""FECo3: Python bindings to a .fec file parser written in Rust."""
from __future__ import annotations
import os
from functools import cached_property
from pathlib import Path
from typing import TYPE_CHECKING, NamedTuple
from . import _feco3, _version
if TYPE_CHECKING:
import pyarrow as pa
__version__ = _version.get_version()
"""Version string for this package."""
class Header(NamedTuple):
"""The header of a [FecFile][feco3.FecFile].
Attributes:
fec_version: The version of the FEC file format.
software_name: The name of the software that generated the file.
software_version: The version of the software that generated the file.
This isn't present in some older FEC files.
report_id: If this .fec file is an amendment to a previous filing,
the filing number of the original.
report_number: If this .fec file is an amendment to a previous filing,
which number amendement this is (1, 2, 3 etc)
"""
fec_version: str
software_name: str
software_version: str | None
report_id: str | None
report_number: str | None
class Cover(NamedTuple):
"""The Cover Line of an [FecFile][feco3.FecFile].
Attributes:
form_type: The form type of the filing, eg. "F3"
filer_committee_id: The FEC-assigned ID of the committee that filed the report,
eg "C00618371"
"""
form_type: str
filer_committee_id: str
class FecFile:
"""An FEC file."""
def __init__(self, src: str | os.PathLike) -> None:
"""Create a new FecFile.
This doesn't do any reading or parsing until you access one of the members.
Args:
src: A path or a URL to an FEC file.
If a string that starts with "http://" or "https://", it will be
treated as a URL. Otherwise, it will be treated as a path.
"""
if isinstance(src, str) and (
src.startswith("http://") or src.startswith("https://")
):
self._src = src
self._wrapped = _feco3.FecFile.from_https(self._src)
else:
self._src = Path(src)
self._wrapped = _feco3.FecFile.from_path(self._src)
@cached_property
def header(self) -> Header:
"""The [Header][feco3.Header] of the FEC file.
The first time this is accessed, the FEC file will be read and parsed as
far as needed. Subsequent accesses will return the same object.
"""
h = self._wrapped.header
return Header(
fec_version=h.fec_version,
software_name=h.software_name,
software_version=h.software_version,
report_id=h.report_id,
report_number=h.report_number,
)
@cached_property
def cover(self) -> Cover:
"""The [Cover][feco3.Cover] of the FEC file.
The first time this is accessed, the FEC file will be read and parsed as
far as needed. Subsequent accesses will return the same object.
"""
c = self._wrapped.cover
return Cover(
form_type=c.form_type,
filer_committee_id=c.filer_committee_id,
)
def to_parquets(self, out_dir: str | os.PathLike) -> None:
"""Write all itemizations in this FEC file to parquet files.
There will be one parquet file for each record type, eg. ``sa11.parquet``.
"""
parser = _feco3.ParquetProcessor(out_dir)
parser.process(self._wrapped)
def to_csvs(self, out_dir: str | os.PathLike) -> None:
"""Write all itemizations in this FEC file to CSV files.
There will be one CSV file for each record type, eg. ``sa11.csv``.
"""
parser = _feco3.CsvProcessor(out_dir)
parser.process(self._wrapped)
def __repr__(self) -> str:
src_str = f"src={self._src!r}"
return f"{self.__class__.__name__}({src_str})"
# This is what rust parquet uses as a batch size
# https://docs.rs/parquet/40.0.0/src/parquet/file/properties.rs.html#83
DEFAULT_PYARROW_RECORD_BATCH_MAX_SIZE = 1024 * 1024
class ItemizationBatch(NamedTuple):
"""A batch of itemizations.
Attributes:
code: The code of the itemization type, eg. "SA11AI"
records: A [pyarrow.RecordBatch][pyarrow.RecordBatch] of itemizations.
"""
code: str
records: pa.RecordBatch
class PyarrowBatcher:
"""
Iterates an [FecFile](feco3.FecFile) and yields [ItemizationBatch](feco3.ItemizationBatch)s of itemizations.
""" # noqa: E501
def __init__(self, fec_file: FecFile, max_batch_size: int | None = None) -> None:
"""Create a new PyarrowBatcher.
Args:
fec_file: The [FecFile][feco3.FecFile] to iterate.
max_batch_size: The max rows per [pyarrow.RecordBatch][pyarrow.RecordBatch].
Defaults to 1024 * 1024, which is what rust parquet uses.
"""
self._fec_file = fec_file
if max_batch_size is None:
max_batch_size = DEFAULT_PYARROW_RECORD_BATCH_MAX_SIZE
self._wrapped = _feco3.PyarrowBatcher(max_batch_size)
def __iter__(self) -> PyarrowBatcher:
return self
def __next__(self) -> ItemizationBatch:
"""Get the next batch of itemizations from the FEC file."""
pair = self._wrapped.next_batch(self._fec_file._wrapped)
if pair is None:
raise StopIteration
code, batch = pair
return ItemizationBatch(code, batch)
|
NickCrews/feco3
|
python/src/feco3/__init__.py
|
__init__.py
|
py
| 5,512 |
python
|
en
|
code
| 2 |
github-code
|
6
|
1530038484
|
import requests
import json
from bs4 import BeautifulSoup
def songwhip_it(url):
html = requests.get('https://songwhip.com/'+url).content
soup = BeautifulSoup(html, 'html.parser')
links_text = list(soup.findAll('script'))[2].get_text()
links_json = json.loads(links_text[links_text.index('{'):-1])['links']
return links_json
songwhip_it("https://open.spotify.com/track/4Aep3WGBQlpbKXkW7kfqcU")
|
kartikye/q
|
linker.py
|
linker.py
|
py
| 415 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34142194304
|
import numpy as np
import scipy.constants
from pathlib import Path
class ElectricAcceleration:
bodies = []
def __init__(self, bodies):
"""This will allow the list of particles from the Accelerator module to be inserted letting the ElectricAcceleration class calculate their acceleration"""
self.bodies = bodies
def acceleration(self, mag1, mag2, mag3, sinelec1, sinelec2, sinelec3):
"""The values of the electic and magnetic field set in the Accelerator file are imported here"""
constantelectric=np.array([sinelec1, sinelec2, sinelec3])
magnetic=np.array([mag1, mag2, mag3])
magnitudeMagnetic=np.linalg.norm(magnetic)
for particle1 in self.bodies:
"""The electric field due to particle-particle interactions and the acceleration are set to be arrays"""
electricSection=np.array([0., 0., 0.])
acceleration = np.array([0., 0., 0.])
"""The charge mass and velocity of the particles are set to be their values as calculated in the particle class"""
c1 = particle1.charge
m1 = particle1.mass
v1 = particle1.velocity
#kineticE = 0
for particle2 in self.bodies:
if particle1 != particle2:
"""This allows the calculation of the acceleration due to the electric and magnetic fields for each body in the system"""
m2 = particle2.mass
c2 = particle2.charge
v2 = particle2.velocity
"""This calculates the distance between the accelerating body and the body causing the acceleration, this will only apply when 2 or more charged particles are present"""
r = np.array(particle1.position) - np.array(particle2.position)
magnitudeR = np.linalg.norm(r)
const=1/(4*scipy.constants.pi*scipy.constants.epsilon_0)
#electricSection=np.array([1,1,0])
"""This calculates the electric field acting on a charged particle due to each other charged particle in the system"""
electric=np.array([const*(c2/magnitudeR**2)])
electricSection += ((electric/magnitudeR)*r)
#kineticE=np.linalg.norm(0.5*m1*v1**2)
#update magnetic with input functions
"""This combines the effects of the constant sinusoidal electric field and the effect due to other charged particles"""
totalelectric=electricSection+constantelectric
"""The value for the total electric field is then used in loretz equation to calculate the acceleration due to both the electric and magnetic fields"""
qvb=np.cross((c1*v1), magnetic)
acceleration=(((c1*totalelectric)+(qvb))/m1)+acceleration
#particle1.kineticE=kineticE
"""This sets the acceleration of the particle to be the previously calculated value for the current time step"""
particle1.acceleration = acceleration
#print(acceleration)
#for particle1 in self.bodies:
# """This allows the calculation of the angular and linear momentum of the system"""
# angularMomentum = 0
# momentum = 0
#for particle2 in self.bodies:
# if particle1 != particle2:
# m1 = particle1.mass
# r = np.array(particle1.position) - np.array(particle2.position)
# momentum = m1*np.linalg.norm(particle1.velocity)
# angularMomentum = momentum * np.linalg.norm(r)
#particle1.momentum = momentum
#particle1.angularMomentum = angularMomentum
|
Lancaster-Physics-Phys389-2020/phys389-2020-project-twgrainger
|
LidlFieldV1.py
|
LidlFieldV1.py
|
py
| 3,815 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74179568189
|
'''
To run test: move into same directory as spotify_api.py file
'''
import unittest
import spotify_api
import spotipy
import pandas as pd
from spotipy.oauth2 import SpotifyClientCredentials
client_id = 'ea776b5b86c54bd188d71ec087b194d3'
client_secret = '1e0fcbac137c4d3eb2d4cc190693792a' # keep this hidden
redirect_uri = 'http://localhost:' # will be changed
class TestSpotify(unittest.TestCase):
client_credentials_manager = None
sp = None
@classmethod
def setUpClass(cls):
cls.client_credentials_manager = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)
cls.sp = spotipy.Spotify(client_credentials_manager=cls.client_credentials_manager)
def testGetArtistInfoReturns(self):
an_dict = dict()
au_dict = dict()
artist = "Bad Suns"
info = spotify_api.get_artist_info(self.sp, artist, an_dict, au_dict)
self.assertIsNotNone(info)
def testGetArtistInfo(self):
an_dict = dict()
au_dict = dict()
artist = "Bad Suns"
info = spotify_api.get_artist_info(self.sp, artist, an_dict, au_dict)
self.assertEqual(5, len(info)) # make sure the number of albums recorded is correct
def testArtistToDF(self):
an_dict = dict()
au_dict = dict()
artist = "Bad Suns"
info = spotify_api.get_artist_info(self.sp, artist, an_dict, au_dict)
df = spotify_api.artist_to_csv("Bad Suns", info)
self.assertEqual(58, len(df)) # make sure the number of albums recorded is correct
def testDFToDict(self):
an_dict = dict()
au_dict = dict()
artist = "Bad Suns"
info = spotify_api.get_artist_info(self.sp, artist, an_dict, au_dict)
df = spotify_api.artist_to_csv("Bad Suns", info)
d = spotify_api.artist_df_to_dict(df, "Bad Suns")
self.assertEqual(13, len(d)) # make sure the number of albums recorded is correct
def testDFToSongs(self):
an_dict = dict()
au_dict = dict()
artist = "Bad Suns"
info = spotify_api.get_artist_info(self.sp, artist, an_dict, au_dict)
df = spotify_api.artist_to_csv("Bad Suns", info)
songs = spotify_api.artist_df_to_songs(df, "Bad Suns")
self.assertEqual(13, len(songs)) # make sure the number of albums recorded is correct
unittest.main()
|
dylanmccoy/songtrackr
|
tests/spotify_unittest.py
|
spotify_unittest.py
|
py
| 2,431 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3967140891
|
import torch
def batch_horizontal_flip(tensor, device):
"""
:param tensor: N x C x H x W
:return:
"""
inv_idx = torch.arange(tensor.size(3) - 1, -1, -1).long().to(device)
img_flip = tensor.index_select(3, inv_idx)
return img_flip
def euclidean_dist(x: torch.Tensor, y: torch.Tensor):
"""
Args:
x: pytorch Variable, with shape [m, d]
y: pytorch Variable, with shape [n, d]
Returns:
dist: pytorch Variable, with shape [m, n]
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(beta=1, alpha=-2, mat1=x, mat2=y.t())
dist.clamp_(min=1e-12)
dist.sqrt_() # for numerical stability
return dist
if __name__ == '__main__':
a = torch.tensor([[0., 0.]])
b = torch.tensor([[1., 1.]])
dist = euclidean_dist(a, b)
print(dist)
a = torch.randn(4, 2048, 16, 4)
b = torch.tensor([[1., 1.]])
dist = euclidean_dist(a, b)
print(dist)
|
clw5180/reid-baseline
|
utils/tensor_utils.py
|
tensor_utils.py
|
py
| 1,068 |
python
|
en
|
code
| null |
github-code
|
6
|
10137995508
|
import socket
sock = socket.socket()
server_address = ('localhost', 9080)
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
message = str.encode("CREATE TABLE VADICS (id int, name str);")
try:
print('sending {!r}'.format(message))
sock.sendall(message)
amount_received = 0
amount_expected = len(message)
while amount_received < amount_expected:
data = sock.recv(1024)
amount_received += len(data)
print('received {!r}'.format(data))
finally:
print('closing socket')
sock.close()
|
etozhezhenechka/VadikDB
|
client.py
|
client.py
|
py
| 574 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43346750218
|
if __name__ == '__main__':
from ovh import *
import argparse
import logging
logger = logging.getLogger("ovh/download_db")
parser = argparse.ArgumentParser(description='Creates N workers on the OVH cloud.')
parser.add_argument('--db-name', default='Contrastive_DPG_v2', help='name for MySQL DB')
parser.add_argument('--db-path', default='../databases/', help='Path to database backup files')
args = parser.parse_args()
novac = get_nova_client()
master = get_master_instance(novac)
ssh_master = get_ssh_client(master)
download_db(ssh_master, args.db_name, args.db_path)
for instance in novac.servers.list():
logger.info(f"Downloading experiments from {instance.name}")
rsync_experiments(
instance.addresses["Ext-Net"][0]["addr"],
local_experiments_path=f'../experiments/remote/{instance.name}'
)
|
charleswilmot/Contrastive_DPG
|
src/ovh_download_db.py
|
ovh_download_db.py
|
py
| 899 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36407185173
|
import pytest
from logging import getLogger
from barbucket.domain_model.types import *
_logger = getLogger(__name__)
_logger.debug(f"--------- ---------- Testing Types")
def test_api_correct() -> None:
_logger.debug(f"---------- Test: test_api_correct")
try:
test_api = Api.IB
except AttributeError as e:
assert False, e
def test_api_incorrect() -> None:
_logger.debug(f"---------- Test: test_api_incorrect")
with pytest.raises(AttributeError):
test_api = Api.NON_EXIST # type: ignore
def test_exchange_correct() -> None:
_logger.debug(f"---------- Test: test_exchange_correct")
try:
test_exchange = Exchange.XETRA
except AttributeError as e:
assert False, e
def test_exchange_incorrect() -> None:
_logger.debug(f"---------- Test: test_exchange_incorrect")
with pytest.raises(AttributeError):
test_exchange = Exchange.NON_EXIST # type: ignore
def test_stock_type_correct() -> None:
_logger.debug(f"---------- Test: test_stock_type_correct")
try:
test_contract_type = StockType.ETF
except AttributeError as e:
assert False, e
def test_stock_type_incorrect() -> None:
_logger.debug(f"---------- Test: test_stock_type_incorrect")
with pytest.raises(AttributeError):
test_contract_type = StockType.NON_EXIST # type: ignore
def test_get_api_notation_for_exchange() -> None:
_logger.debug(f"---------- Test: test_get_api_notation_for_exchange")
trans = ApiNotationTranslator()
expected = "IBIS"
actual = trans.get_api_notation_for_exchange(
exchange=Exchange.XETRA,
api=Api.IB)
assert actual == expected
def test_get_exchange_from_api_notation() -> None:
_logger.debug(f"---------- Test: test_get_exchange_from_api_notation")
trans = ApiNotationTranslator()
expected = Exchange.XETRA
actual = trans.get_exchange_from_api_notation(
name="IBIS",
api=Api.IB)
assert actual == expected
def test_get_api_notation_for_contract_type() -> None:
_logger.debug(f"---------- Test: test_get_api_notation_for_contract_type")
trans = ApiNotationTranslator()
expected = "COMMON"
actual = trans.get_api_notation_for_stock_type(
stock_type=StockType.COMMON_STOCK,
api=Api.IB)
assert actual == expected
def test_get_contract_type_from_api_notation() -> None:
_logger.debug(f"---------- Test: test_get_contract_type_from_api_notation")
trans = ApiNotationTranslator()
expected = StockType.COMMON_STOCK
actual = trans.get_stock_type_from_api_notation(
name="COMMON",
api=Api.IB)
assert actual == expected
def test_get_api_notation_for_ticker_symbol() -> None:
_logger.debug(f"---------- Test: test_get_api_notation_for_ticker_symbol")
trans = ApiNotationTranslator()
expected = "AB CD"
actual = trans.get_api_notation_for_ticker_symbol(
ticker_symbol=TickerSymbol(name="AB_CD"),
api=Api.IB)
assert actual == expected
def test_get_ticker_symbol_from_api_notation() -> None:
_logger.debug(f"---------- Test: test_get_ticker_symbol_from_api_notation")
trans = ApiNotationTranslator()
ticker_symbol = trans.get_ticker_symbol_from_api_notation(
name="AB CD",
api=Api.IB)
assert type(ticker_symbol) == TickerSymbol
assert ticker_symbol.name == "AB_CD"
|
mcreutz/barbucket
|
tests/domain_model/test_types.py
|
test_types.py
|
py
| 3,398 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73022500347
|
import threading
def even_list_sum(numbers):
even_sum = sum(x for x in numbers if x % 2 == 0)
print(f"Sum of even elements: {even_sum}")
def odd_list_sum(numbers):
odd_sum = sum(x for x in numbers if x % 2 != 0)
print(f"Sum of odd elements: {odd_sum}")
def main():
input_str = input("Enter a list of integers separated by spaces: ")
numbers = list(map(int, input_str.split()))
evenlist_thread = threading.Thread(target=even_list_sum, args=(numbers,))
oddlist_thread = threading.Thread(target=odd_list_sum, args=(numbers,))
evenlist_thread.start()
oddlist_thread.start()
evenlist_thread.join()
oddlist_thread.join()
print("Exit from main")
if __name__ == "__main__":
main()
|
vedangthete30/Python-Assignments
|
Assignment 7/Assignment7_3.py
|
Assignment7_3.py
|
py
| 738 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30168009886
|
import requests
import pandas as pd
import arrow
import warnings
import io
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import smtplib
import logging
warnings.filterwarnings('ignore', 'Unverified HTTPS request')
url = "https://protect.cylance.com/Reports/ThreatDataReportV1/memoryprotection/"
token = "Token"
fullurl = (url + token)
path = 'Filepath'
logfile = 'FilePath'
nv = arrow.now()
date = nv.shift(days=-1).format('M/D/YYYY')
date2 = nv.shift(days=-1).format('YYYYMD')
def email_send(email_data):
from_addr = "EmailFrom"
to_addr = "EmailTo"
to_list = ["To_List"]
msg = MIMEMultipart()
msg['From'] = from_addr
msg['To'] = to_addr
msg['Subject'] = "Cylance Exploit Attempts for %s" %(date)
part2 = MIMEText(email_data, 'html')
msg.attach(part2)
server = smtplib.SMTP("smtpRelay", 25)
server.sendmail(from_addr,to_list,msg.as_string())
server.quit()
if __name__ == '__main__':
logging.basicConfig(filename=logfile, level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')
logging.info('Requesting MEM TDR')
urlData = requests.get(fullurl).content
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
logging.info('Creating dataframe')
df2 = pd.DataFrame(rawData)
logging.info('Dropping Serial Column')
df3 = df2.drop(["Serial Number",], axis = 1)
logging.info('Filtering Data by date')
test3 = (df3[df3['ADDED'].str.contains(date)])
logging.info('Selecting Column Headers')
output = (test3[["Device Name","ADDED",'PROCESS NAME','ACTION','TYPE','USER NAME']])
print(output)
if output.empty:
logging.info('No Memory Exploit for %s' % (date))
else:
logging.info('Creating CSV')
output.to_csv(path + date2 + "mem.csv", index=False)
logging.info('CSV Created')
logging.info('Converting Data to HTML')
email_data = output.to_html(index = False)
logging.info('Preparing Email')
email_send(email_data)
logging.info('Email Sent')
|
cmoxley1/Cylance
|
MemTDREmail.py
|
MemTDREmail.py
|
py
| 2,128 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43347425408
|
import numpy as np
import os
from collections import defaultdict, namedtuple
import re
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
class Collection(object):
def __init__(self, path):
self.path = path
self.name = path.strip("/").split("/")[-1]
self.data = defaultdict(list)
self.RunDescription = namedtuple('RunDescription', ['n_sources', 'dim_sources', 'dim_shared', 'dim_correlate', 'dim_latent'])
self.RunData = namedtuple('RunData', ['sources', 'shared'])
for filename in os.listdir(self.path):
self.add_data(filename)
def add_data(self, filename):
match = re.match("[0-9]+_[0-9]+_[0-9]+_[0-9]+_[0-9]+_[0-9]+_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+).npz", filename)
if match:
new_data = np.load(self.path + '/' + filename)
run_description = self.RunDescription(*(int(x) for x in match.groups()))
run_data = self.RunData(new_data["sources"], new_data["shared"])
self.data[run_description].append(run_data)
def get_final_reconstruction_errors_means_stds(self):
sources_data = {
run_description: np.array([run_data.sources for run_data in run_data_list])
for run_description, run_data_list in self.data.items()
}
shared_data = {
run_description: np.array([run_data.shared for run_data in run_data_list])
for run_description, run_data_list in self.data.items()
}
sources_means = {
run_description: np.mean(data)
for run_description, data in sources_data.items()
}
sources_stds = {
run_description: np.std(np.mean(data, axis=-1))
for run_description, data in sources_data.items()
}
shared_means = {
run_description: np.mean(data)
for run_description, data in shared_data.items()
}
shared_stds = {
run_description: np.std(np.mean(data, axis=-1))
for run_description, data in shared_data.items()
}
return sources_means, sources_stds, shared_means, shared_stds
def plot_wrt_latent_dim(self, ax, legend=True, lasts=3, inset=False, ylabel=False, title='exclusive'):
sources_means, sources_stds, shared_means, shared_stds = self.get_final_reconstruction_errors_means_stds()
keys = list(sources_means.keys())
keys.sort(key=lambda x: x.dim_latent)
x = np.array([key.dim_latent for key in keys])
sources_means = np.array([sources_means[key] for key in keys])
sources_stds = np.array([sources_stds[key] for key in keys])
shared_means = np.array([shared_means[key] for key in keys])
shared_stds = np.array([shared_stds[key] for key in keys])
# ax.plot([0], [1], color='grey', marker='o')
# ax.plot([0, x[0]], [1, sources_means[0]], color='grey', linestyle='--')
# ax.plot([0, x[0]], [1, shared_means[0]], color='grey', linestyle='--')
x = np.concatenate([[0], x], axis=0)
sources_means = np.concatenate([[1], sources_means], axis=0)
sources_stds = np.concatenate([[0], sources_stds], axis=0)
shared_means = np.concatenate([[1], shared_means], axis=0)
shared_stds = np.concatenate([[0], shared_stds], axis=0)
ax.plot(x, sources_means, color='b', linestyle='--', marker='o', label="exclusive")
ax.plot(x, shared_means, color='r', linestyle='--', marker='o', label="shared")
ax.fill_between(x, sources_means - sources_stds, sources_means + sources_stds, color='b', alpha=0.5)
ax.fill_between(x, shared_means - shared_stds, shared_means + shared_stds, color='r', alpha=0.5)
ax.axvline(keys[0].dim_shared + (keys[0].n_sources * keys[0].dim_sources), color='k', linestyle='--')
ax.set_xlabel("latent dimension")
if ylabel:
ax.set_ylabel(r"mean reconstruction errors $\tilde{r}_{m}$ and $\tilde{r}_{e}$")
else:
ax.set_yticks([])
if title == 'exclusive':
title = r"$d_{e} = " + "{}$".format(keys[0].dim_sources)
elif title == 'n_sources':
title = r"$n = {}$".format(keys[0].n_sources)
ax.set_title(title)
if legend:
ax.legend(loc='center right')
if inset:
inset = inset_axes(ax, width="15%", height="30%", loc=1)
inset.plot(x[-lasts:], sources_means[-lasts:], color='b', linestyle='--', marker='o', label="exclusive")
inset.plot(x[-lasts:], shared_means[-lasts:], color='r', linestyle='--', marker='o', label="shared")
inset.fill_between(x[-lasts:], sources_means[-lasts:] - sources_stds[-lasts:], sources_means[-lasts:] + sources_stds[-lasts:], color='b', alpha=0.5)
inset.fill_between(x[-lasts:], shared_means[-lasts:] - shared_stds[-lasts:], shared_means[-lasts:] + shared_stds[-lasts:], color='r', alpha=0.5)
inset.set_ylim([0, None])
if __name__ == '__main__':
c = Collection('../data/trash/')
c.compute_fits()
fits = c.compute_fits_fixed_u0()
print(fits)
|
charleswilmot/lossy_compression
|
src/collection.py
|
collection.py
|
py
| 5,139 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38200290752
|
import speech_recognition as sr
import pyttsx3
import screen_brightness_control as sbc
import geocoder
from geopy.geocoders import Nominatim
r = sr.Recognizer()
def SpeakText(command):
engine = pyttsx3.init()
engine.say(command)
engine.runAndWait()
while(1):
try:
with sr.Microphone() as source2:
r.adjust_for_ambient_noise(source2, duration=0.2)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
MyText = MyText.lower()
print(MyText)
SpeakText(MyText)
except sr.RequestError as e:
print("Could not request results; {0}".format(e))
except sr.UnknownValueError:
print("unknown error occured")
|
Priyanshu360-cpu/Machine-Learning
|
repeat_audio.py
|
repeat_audio.py
|
py
| 746 |
python
|
en
|
code
| 3 |
github-code
|
6
|
71053381947
|
import logging
import os
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.propagate = False # do not propagate logs to previously defined root logger (if any).
formatter = logging.Formatter('%(asctime)s - %(levelname)s(%(name)s): %(message)s')
# console
consH = logging.StreamHandler()
consH.setFormatter(formatter)
consH.setLevel(logging.INFO)
logger.addHandler(consH)
# file handler
request_file_handler = True
log = logger
resume_result_json = True
class Bunch(dict):
def __init__(self, *args, **kwds):
super(Bunch, self).__init__(*args, **kwds)
self.__dict__ = self
AA_abb_dict = {"<unk>": 0, "<pad>": 1, "<start>": 2, "<eos>": 3, "A": 4, "C": 5, "D": 6, "E": 7,
"F": 8, "G": 9, "H": 10, "I": 11, "K": 12, "L": 13, "M": 14, "N": 15, "P": 16,
"Q": 17, "R": 18, "S": 19, "T": 20, "V": 21, "W": 22, "Y": 23}
AA_abb_dict_T = {v:k for k, v in AA_abb_dict.items()}
AA_dict = {"<unk>": 0, "<pad>": 1, "<start>": 2, "<eos>": 3, "ALA": 4, "CYS": 5, "ASP": 6, "GLU": 7,
"PHE": 8, "GLY": 9, "HIS": 10, "ILE": 11, "LYS": 12, "LEU": 13, "MET": 14, "ASN": 15,"PRO": 16,
"GLN": 17, "ARG": 18, "SER": 19, "THR": 20, "VAL": 21, "TRP": 22, "TYR": 23}
pep_max_length_uniprot = 40
pep_max_length_pepbdb = 40
EGCM_max_length = 400
process_pepbdb = True
process_uniprot = False
pepbdb_source = "/home/chens/data/pepbdb/pepbdb"
pepbdb_processed = 'pepbdb_sorted.json'
uniprot_yes_source = '/home/chens/data/uniprot/uniprot-reviewed_yes.fasta'
uniprot_no_source = '/home/chens/data/uniprot/uniprot-reviewed_no.fasta'
uniprot_processed = 'uniprot.json'
test_result = 'test_result.json'
EGCM_max_value = 100
EGCM_embeded_length = 50
train_mode = 'finetune' #or pretrain
pretrained_model = 'output/07-14/pretrain_model_57.pt'
fintuned_model = 'output/07-14-21/finetune_model_59.pt'
final_model = 'output/07-15-12/z_gen_model_59.pt'
savepath='output/{}'
tbpath = 'tb/default'
generated_savepath = 'generated.json'
batch_size = 16
total_epoch = 60
sample_num = 20
def _cfg_import_export(cfg_interactor, cfg_, prefix='', mode='fill_parser'):
""" Iterate through cfg_ module/object. For known variables import/export
from cfg_interactor (dict, argparser, or argparse namespace) """
for k in dir(cfg_):
if k[0] == '_': continue # hidden
v = getattr(cfg_, k)
if type(v) in [float, str, int, bool]:
if mode == 'fill_parser':
cfg_interactor.add_argument('--{}{}'.format(prefix, k), type=type(v), help='default: {}'.format(v))
elif mode == 'fill_dict':
cfg_interactor['{}{}'.format(prefix, k)] = v
elif mode == 'override':
prek = '{}{}'.format(prefix, k)
if prek in cfg_interactor:
setattr(cfg_, k, getattr(cfg_interactor, prek))
elif type(v) == Bunch: # recurse; descend into Bunch
_cfg_import_export(cfg_interactor, v, prefix=prefix + k + '.', mode=mode)
def _override_config(args, cfg):
""" call _cfg_import_export in override mode, update cfg from:
(1) contents of config_json (taken from (a) loadpath if not auto, or (2) savepath)
(2) from command line args
"""
config_json = vars(args).get('config_json', '')
_cfg_import_export(args, cfg, mode='override')
vae = Bunch(
batch_size=1,
lr=1e-3,
# TODO lrate decay with scheduler
s_iter=0,
n_iter=200000,
beta=Bunch(
start=Bunch(val=1.0, iter=0),
end=Bunch(val=2.0, iter=10000)
),
lambda_logvar_L1=0.0, # default from https://openreview.net/pdf?id=r157GIJvz
lambda_logvar_KL=1e-3, # default from https://openreview.net/pdf?id=r157GIJvz
z_regu_loss='mmdrf', # kl (vae) | mmd (wae) | mmdrf (wae)
cheaplog_every=500, # cheap tensorboard logging eg training metrics
expsvlog_every=20000, # expensive logging: model checkpoint, heldout set evals, word emb logging
chkpt_path='./output/{}/{}_model_{}.pt',
clip_grad=5.0,
)
vae.beta.start.iter = vae.s_iter
vae.beta.end.iter = vae.s_iter + vae.n_iter // 5
model = Bunch(
z_dim=100,
c_dim=2,
emb_dim=150,
pretrained_emb=None, # set True to load from dataset_unl.get_vocab_vectors()
freeze_embeddings=False,
flow=0,
flow_type='',
E_args=Bunch(
h_dim=80, # 20 for amp, 64 for yelp
biGRU=True,
layers=1,
p_dropout=0.0
),
G_args=Bunch(
G_class='gru',
GRU_args=Bunch(
# h_dim = (z_dim + c_dim) for now. TODO parametrize this?
p_word_dropout=0.3,
p_out_dropout=0.3,
skip_connetions=False,
),
deconv_args=Bunch(
max_seq_len=pep_max_length_pepbdb if train_mode=='finetune' else pep_max_length_uniprot,
num_filters=100,
kernel_size=4,
num_deconv_layers=3,
useRNN=False,
temperature=1.0,
use_batch_norm=True,
num_conv_layers=2,
add_final_conv_layer=True,
),
),
C_args=Bunch(
min_filter_width=3,
max_filter_width=5,
num_filters=100,
dropout=0.5
)
)
# config for the losses, constant during training & phases
losses = Bunch(
wae_mmd=Bunch(
sigma=7.0, # ~ O( sqrt(z_dim) )
kernel='gaussian',
# for method = rf
rf_dim=500,
rf_resample=False
),
)
|
ChenShengsGitHub/structure-based-peptide-generator
|
cfg.py
|
cfg.py
|
py
| 5,610 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36766343172
|
from sys import stdin
n, m, v = map(int, stdin.readline().split())
graph = [[0]*(n+1) for _ in range(n+1)]
visited = [False]*(n+1)
# 간선 입력받기
for _ in range(m):
x, y = map(int, stdin.readline().split())
graph[x][y] = 1
graph[y][x] = 1
def dfs(v):
visited[v] = True
print(v, end=" ")
for i in range(1, n+1):
if not visited[i] and graph[v][i] == 1:
dfs(i)
def bfs(v):
visited[v] = False
queue = [v]
while queue:
v = queue.pop(0)
print(v, end=" ")
for i in range(1, n+1):
if visited[i] and graph[v][i] == 1:
queue.append(i)
visited[i] = False
dfs()
print()
bfs()
# 런타임 에러발생???
|
jiyoung-dev/Algorithm
|
2021study/dfs bfs/b1260_dfs와bfs.py
|
b1260_dfs와bfs.py
|
py
| 727 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42862238176
|
import pgzrun
import random
import time
import pygame.time
# import pygame
TITLE = "Brickbreaker"
# initial score is 0
# time is use to get the initial time and stores in the variable 'start time'
score = 0
# as ball hits the brick, score changes by 10
score_point = 10
start_time = time.time ()
elapsed_time = 0
# setting the size of the game window and number of bricks per row
WIDTH = 640
HEIGHT = 480
PADDLE_HEIGHT = 1
BRICKS_PER_ROW = 10
# setting the paddle initial position
paddle = Actor("paddlered.png")
paddle.x = 320
paddle.y = 440
# choosing the ball type and setting the initial ball position
ball = Actor("ballgrey.png")
ball.x = 320
ball.y = 340
# setting the initial speed
ball_x_speed = 2
ball_y_speed = 2
bricks = []
# placing the bricks in the screen
current_brick_pos_x = 64 / 2
current_brick_pos_y = 32 / 2
# Brick sprites are 64 by 32
# defining the code for different types of bricks
brick_sprites = ["element_green_rectangle.png", "element_yellow_rectangle.png", "element_red_rectangle.png"]
middle_brick = ["element_grey_rectangle.png"]
# this will be used to check if the game is over or not so that it can be used to restart and set everything back to its orignal position
game_over_box = False
# if we want to display any thing on the screen like ball, paddle, score; it must be written in this function
def draw():
global start_time
screen.fill((100, 149, 237))
paddle.draw()
ball.draw()
for brick in bricks:
brick.draw()
# to draw the score and elapsed time
screen.draw.text("Score: " + str(score), bottomleft=(10, 480), color="red")
update_elapsed_time()
screen.draw.text("Time: " + str(elapsed_time), bottomright = (630, 480), color = "red")
#if game is over it will call game over function to draw the message
game_over()
def update_elapsed_time():
global elapsed_time
global start_time
# this is the main code to checking the time
# first it checks the universal(device) time(which will be our start time)
# so as the game goes on it frequently keeps on subtracting the initial time from latest time
elapsed_time = int(time.time() - start_time)
def update_paddle():
# it will check the mouse coordinates horizontally
# if its horizontal it will move the paddle with the mouse
global paddle
if pygame.mouse.get_rel()[0] != 0:
paddle.x = pygame.mouse.get_pos()[0]
# if the mouse is not moving it will follow the keys
else:
if keyboard.a:
if (paddle.x - 4 > + 52):
paddle.x = paddle.x - 4
if keyboard.d:
if (paddle.x + 4 < 640 - 48):
paddle.x = paddle.x + 4
# updates the position of given parameters
def update_ball():
global ball_x_speed
global ball_y_speed
global score
global game_over_box
ball.x = ball.x + ball_x_speed
ball.y = ball.y + ball_y_speed
# checks weather the ball has hit the side walls
if (ball.x > WIDTH - 16) or (ball.x < 0):
ball_x_speed = ball_x_speed * -1
# checks weather the ball has hit the top or bottom wall, here speed -1 means reverse the direction
if (ball.y > HEIGHT - 16) or (ball.y < 0):
ball_y_speed = ball_y_speed * -1
# checks weather the ball had collide with paddle, if yes reverse the direction
if ball.colliderect(paddle):
ball_y_speed = ball_y_speed * -1
# checks the ball position, if collided at the bottom, the condition gave over becomes true
# which will draw the game over sign in the screen
if (ball.y > HEIGHT - 16):
game_over_box = True
for brick in bricks:
# checks the condition if ball collide with the bricks,the bricks gets removed
# and speed becomes -1, ball returns
# score increases by 10
if ball.colliderect(brick):
bricks.remove(brick)
ball_y_speed = ball_y_speed * -1
score = score + score_point
def update():
update_paddle()
update_ball()
update_elapsed_time()
# this function is used to create the row of bricks with the given sprite and position
def place_brick_row(sprite, pos_x, pos_y):
any_brick = BRICKS_PER_ROW // 2
for i in range(BRICKS_PER_ROW):
brick = Actor(sprite)
brick.x = pos_x + i * 64
brick.y = pos_y
if i == any_brick:
any_brick = random.choice(middle_brick)
brick.image = any_brick
bricks.append(brick)
for brick_sprite in brick_sprites:
place_brick_row(brick_sprite, current_brick_pos_x, current_brick_pos_y)
current_brick_pos_y += 32
def game_over():
if game_over_box:
message = "Game Over"
restart_game = "Press Enter to Restart"
message_width = len(message) * 30
message_height = 50
# draws the message in the screen game over and want to restart
screen.draw.filled_rect(
Rect(WIDTH / 2 - message_width / 2, HEIGHT / 2 - message_height / 2, message_width, message_height),
(255, 0, 0))
screen.draw.text(message, center=(WIDTH / 2, HEIGHT / 2), fontsize=40, color="white")
screen.draw.text(restart_game, center=(WIDTH / 2, HEIGHT / 1.5), fontsize=40, color="white")
# if user press enter it will call restart function
if keyboard.RETURN:
restart()
# reset everything back as usual
def restart():
global score, ball_x_speed, ball_y_speed, game_over_box, current_brick_pos_x, current_brick_pos_y, bricks, start_time, elapsed_time
score = 0
start_time = time.time()
elapsed_time = 0
ball.x = 320
ball.y = 340
ball_x_speed = 2
ball_y_speed = 2
paddle.x = 320
paddle.y = 440
bricks = []
current_brick_pos_x = 64 / 2
current_brick_pos_y = 32 / 2
current_brick_pos_y = 32 / 2
for brick_sprite in brick_sprites:
place_brick_row(brick_sprite, current_brick_pos_x, current_brick_pos_y)
current_brick_pos_y += 32
game_over_box = False
pgzrun.go()
|
Nirrdsh/py-game
|
Assignment.py
|
Assignment.py
|
py
| 6,033 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34050613320
|
import tensorflow as tf
import numpy as np
def model(X):
X = X / 255
conv1 = tf.layers.batch_normalization(tf.layers.conv2d(X, 64, 6, activation=tf.nn.leaky_relu, padding="SAME"))
pool1 = tf.layers.max_pooling2d(conv1, 2, 2)
conv2 = tf.layers.batch_normalization(tf.layers.conv2d(pool1, 128, 3, activation=tf.nn.leaky_relu, padding="SAME"))
conv3 = tf.layers.batch_normalization(tf.layers.conv2d(conv2, 128, 1, activation=tf.nn.leaky_relu, padding="SAME"))
pool2 = tf.layers.max_pooling2d(conv3, 2, 2)
conv4 = tf.layers.batch_normalization(tf.layers.conv2d(pool2, 256, 3, activation=tf.nn.leaky_relu, padding="SAME"))
conv5 = tf.layers.batch_normalization(tf.layers.conv2d(conv4, 256, 1, activation=tf.nn.leaky_relu, padding="SAME"))
conv6 = tf.layers.batch_normalization(tf.layers.conv2d(conv5, 256, 3, activation=tf.nn.leaky_relu, padding="SAME"))
pool3 = tf.layers.max_pooling2d(conv6, 2, 2)
conv7 = tf.layers.batch_normalization(tf.layers.conv2d(pool3, 512, 1, activation=tf.nn.leaky_relu, padding="SAME"))
conv8 = tf.layers.batch_normalization(tf.layers.conv2d(conv7, 512, 3, activation=tf.nn.leaky_relu, padding="SAME"))
conv9 = tf.layers.batch_normalization(tf.layers.conv2d(conv8, 512, 1, activation=tf.nn.leaky_relu, padding="SAME"))
pool4 = tf.layers.max_pooling2d(conv9, 2, 2)
conv10 = tf.layers.batch_normalization(tf.layers.conv2d(pool4, 1024, 3, activation=tf.nn.leaky_relu, padding="SAME"))
out = tf.layers.conv2d(conv10, 25, 1)
return out
def calc_iou(boxes1,boxes2):
#convert [x,y,w,h] to [x1,y1,x2,y2]
boxes1_t=tf.stack([
boxes1[..., 0] - boxes1[...,2] / 2.0,
boxes1[..., 1] - boxes1[...,3] / 2.0,
boxes1[..., 0] + boxes1[...,2] / 2.0,
boxes1[..., 1] + boxes1[...,3] / 2.0],
axis=-1)
boxes2_t = tf.stack([
boxes2[..., 0] - boxes2[..., 2] / 2.0,
boxes2[..., 1] - boxes2[..., 3] / 2.0,
boxes2[..., 0] + boxes2[..., 2] / 2.0,
boxes2[..., 1] + boxes2[..., 3] / 2.0],
axis=-1)
#calc left up and right down
lu = tf.maximum(boxes1_t[...,:2],boxes2_t[...,:2])
rd = tf.minimum(boxes1_t[...,2:],boxes2_t[...,2:])
# intersection
intersection = tf.maximum(0.0, rd - lu)
inter_square = intersection[..., 0] * intersection[..., 1]
#calculate boxes1 and boxes2 square
square1 = boxes1[...,2] * boxes1[...,3]
square2 = boxes2[..., 2] * boxes2[..., 3]
union_square = tf.maximum(square1 + square2 - inter_square, 1e-10)
return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)
def loss(pred, out):
out_score = out[..., 4]
pred_score = pred[..., 4]
pred_cat = pred[..., 5:]
out_cat = out[..., 5:]
pred_boxes = pred[...,0:4]
out_boxes = out[...,0:4]
obj=out_score
no_obj = tf.ones_like(out_score) - out_score
pred_boxes=tf.stack([
pred_boxes[...,0],
pred_boxes[...,1],
tf.square(pred_boxes[...,2]),
tf.square(pred_boxes[...,3])],
axis=-1)
iou=calc_iou(pred_boxes,out_boxes)
#object loss
loss_obj= obj * tf.square(iou - pred_score)
loss_obj = tf.reduce_mean(tf.reduce_sum( loss_obj,axis=[1,2] ))
#no object loss
loss_noobj = no_obj * tf.square(pred_score)
loss_noobj = tf.reduce_mean(tf.reduce_sum(loss_noobj, axis=[1, 2]))
# x,y,w,h loss
pred_boxes_l=tf.stack([
pred_boxes[...,0],
pred_boxes[...,1],
pred_boxes[...,2],
pred_boxes[...,3]],
axis=-1)
loss_box = tf.reduce_sum( tf.square(pred_boxes_l - out_boxes), axis=-1)
loss_box = obj * loss_box
loss_box = tf.reduce_mean( tf.reduce_sum (loss_box,axis=[1,2] ) )
#class loss
loss_class = tf.reduce_sum( tf.square(out_cat - pred_cat),axis=-1 )
loss_class = obj * loss_class
loss_class = tf.reduce_mean( tf.reduce_sum(loss_class ,axis=[1,2]))
return loss_class + loss_obj + .5 * loss_noobj + 5 * loss_box
|
AbdelrahmanEldakrony/yolo-object-detection
|
yolo.py
|
yolo.py
|
py
| 4,066 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71276866109
|
import setuptools
with open("README.md", "r") as file:
long_description = file.read()
with open("requirements.txt", "r") as file:
required_packages = file.read().splitlines()
setuptools.setup(
name="labscribe",
version="0.4.7",
author="Jay Morgan",
author_email="[email protected]",
description="A small package for managing python experiment scripts",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jaypmorgan/labscribe",
packages=setuptools.find_packages(),
python_requires=">=3",
install_requires=required_packages,
include_package_data=True,
package_data={"labscribe": ["labscribe/data/*.sql"]})
|
jaypmorgan/labscribe
|
setup.py
|
setup.py
|
py
| 729 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19124524287
|
from google.appengine.ext import ndb
from components import utils
import gae_ts_mon
import config
import model
FIELD_BUCKET = 'bucket'
# Override default target fields for app-global metrics.
GLOBAL_TARGET_FIELDS = {
'job_name': '', # module name
'hostname': '', # version
'task_num': 0, # instance ID
}
CREATE_COUNT = gae_ts_mon.CounterMetric(
'buildbucket/builds/created',
description='Build creation',
)
START_COUNT = gae_ts_mon.CounterMetric(
'buildbucket/builds/started',
description='Build start',
)
COMPLETE_COUNT = gae_ts_mon.CounterMetric(
'buildbucket/builds/completed',
description='Build completion, including success, failure and cancellation',
)
HEARTBEAT_COUNT = gae_ts_mon.CounterMetric(
'buildbucket/builds/heartbeats',
description='Failures to extend a build lease'
)
LEASE_COUNT = gae_ts_mon.CounterMetric(
'buildbucket/builds/leases',
description='Successful build lease extension',
)
LEASE_EXPIRATION_COUNT = gae_ts_mon.CounterMetric(
'buildbucket/builds/lease_expired',
description='Build lease expirations'
)
CURRENTLY_PENDING = gae_ts_mon.GaugeMetric(
'buildbucket/builds/pending',
description='Number of pending builds',
)
CURRENTLY_RUNNING = gae_ts_mon.GaugeMetric(
'buildbucket/builds/running',
description='Number of running builds'
)
LEASE_LATENCY = gae_ts_mon.NonCumulativeDistributionMetric(
'buildbucket/builds/never_leased_duration',
description=(
'Duration between a build is created and it is leased for the first time'),
)
SCHEDULING_LATENCY = gae_ts_mon.NonCumulativeDistributionMetric(
'buildbucket/builds/scheduling_duration',
description='Duration of a build remaining in SCHEDULED state',
)
def fields_for(build, **extra):
if build:
tags = dict(t.split(':', 1) for t in build.tags)
fields = {
'builder': tags.get('builder', ''),
'user_agent': tags.get('user_agent', ''),
FIELD_BUCKET: build.bucket,
}
else:
fields = {
'builder': '',
'user_agent': '',
FIELD_BUCKET: '<no bucket>',
}
fields.update(extra)
return fields
def increment(metric, build, **fields): # pragma: no cover
"""Increments a counter metric."""
metric.increment(fields_for(build, **fields))
def increment_complete_count(build): # pragma: no cover
assert build
assert build.status == model.BuildStatus.COMPLETED
increment(
COMPLETE_COUNT,
build,
result=str(build.result),
failure_reason=str(build.failure_reason or ''),
cancelation_reason=str(build.cancelation_reason or ''),
)
@ndb.tasklet
def set_build_status_metric(metric, bucket, status):
q = model.Build.query(
model.Build.bucket == bucket,
model.Build.status == status)
value = yield q.count_async()
metric.set(value, {FIELD_BUCKET: bucket}, target_fields=GLOBAL_TARGET_FIELDS)
@ndb.tasklet
def set_build_latency(metric, bucket, must_be_never_leased):
q = model.Build.query(
model.Build.bucket == bucket,
model.Build.status == model.BuildStatus.SCHEDULED,
)
if must_be_never_leased:
q = q.filter(model.Build.never_leased == True)
else:
# Reuse the index that has never_leased
q = q.filter(model.Build.never_leased.IN((True, False)))
now = utils.utcnow()
dist = gae_ts_mon.Distribution(gae_ts_mon.GeometricBucketer())
for e in q.iter(projection=[model.Build.create_time]):
latency = (now - e.create_time).total_seconds()
dist.add(latency)
if dist.count == 0:
dist.add(0)
metric.set(dist, {FIELD_BUCKET: bucket}, target_fields=GLOBAL_TARGET_FIELDS)
# Metrics that are per-app rather than per-instance.
GLOBAL_METRICS = [
CURRENTLY_PENDING,
CURRENTLY_RUNNING,
LEASE_LATENCY,
SCHEDULING_LATENCY,
]
def update_global_metrics():
"""Updates the metrics in GLOBAL_METRICS."""
futures = []
for b in config.get_buckets_async().get_result():
futures.extend([
set_build_status_metric(
CURRENTLY_PENDING, b.name, model.BuildStatus.SCHEDULED),
set_build_status_metric(
CURRENTLY_RUNNING, b.name, model.BuildStatus.STARTED),
set_build_latency(LEASE_LATENCY, b.name, True),
set_build_latency(SCHEDULING_LATENCY, b.name, False),
])
for f in futures:
f.check_success()
|
mithro/chromium-infra
|
appengine/cr-buildbucket/metrics.py
|
metrics.py
|
py
| 4,246 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40238915211
|
from setuptools import setup, find_packages
requires = [
'buildbot',
'python-debian',
'xunitparser',
]
setup(
name='buildbot-junit',
version='0.1',
description='Junit for buildbot',
author='Andrey Stolbuhin',
author_email='[email protected]',
url='https://github.com/ZeeeL/buildbot-junit',
keywords='buildbot xunit junit steps shellcommand',
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
install_requires=requires,
)
|
ZeeeL/buildbot-junit
|
setup.py
|
setup.py
|
py
| 535 |
python
|
en
|
code
| 3 |
github-code
|
6
|
26273852906
|
"""Module for test case fixturing functions.
"""
from birgitta import timing
__all__ = ['dataframes', 'write_fixtures']
def dataframes(fixtures, variant_name, spark_session):
"""Makes dataframes from fixtures.
Args:
fixtures (dict): Dict of fixtures
variant_name (str): Name of fixture variant
spark_session (SparkSesssion): Spark session used to create fixtures
dataframe_source (DataframeSource): The source to write to, e.g. S3
Returns:
A dict of `dataset name => dataframe` pairs
"""
# FUTURE: Try writing directly to parquet using pyarrow instead. Could be faster. # noqa 501
ret = {}
for fixture in fixtures:
ret[fixture.dataset.name] = fixture.df(variant_name,
spark_session)
return ret
def write_fixtures(fixtures, variant_name, spark_session, dataframe_source):
"""Write fixtures to storage.
Args:
fixtures (dict): Dict of fixtures
variant_name (str): Name of fixture variant
spark_session (SparkSession): Spark session used to create fixtures
dataframe_source (DataframeSource): The source to write to, e.g. FS
"""
timing.time("write_fixtures start")
dfs = dataframes(fixtures, variant_name, spark_session)
for ds_name in dfs.keys():
dataframe_source.write(dfs[ds_name], ds_name)
timing.time("write_fixtures end")
|
telia-oss/birgitta
|
birgitta/recipetest/localtest/fixturing.py
|
fixturing.py
|
py
| 1,427 |
python
|
en
|
code
| 13 |
github-code
|
6
|
30078414055
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.keras.utils import to_categorical
print(tf.__version__)
train = pd.read_csv(r"sign_mnist_train.csv")
test = pd.read_csv(r"sign_mnist_test.csv")
print(train.head())
train_labels = train['label']
test_labels = test['label']
del train['label']
del test['label']
train_images = train.values
test_images = test.values
print(train_images.shape, test_images.shape)
# Normalize the data
x_train = train_images / 255.0
x_test = test_images / 255.0
# Reshaping the data from 1-D to 3-D as required through input by CNN's
x_train = x_train.reshape(-1,28,28,1)
x_test = x_test.reshape(-1,28,28,1)
y_train = to_categorical(train_labels)
y_test = to_categorical(test_labels)
print("image shape-", x_train.shape, x_test.shape)
print("label shape-", y_train.shape, y_test.shape)
model = Sequential()
model.add(layers.Conv2D(32, (3,3), input_shape=(28,28,1), activation='relu'))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(64, (3,3), activation='relu'))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(32, (3,3), activation='relu'))
model.add(layers.MaxPooling2D())
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(25, activation='softmax'))
print(model.summary())
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, validation_data = (x_test, y_test), batch_size=128)
model.evaluate(x_test, y_test)
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
# graph
plot_graphs(history, 'accuracy')
plot_graphs(history, 'loss')
|
daxjain789/Sign-Language-MNIST-with-CNN
|
sign_language.py
|
sign_language.py
|
py
| 2,118 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4783520916
|
from railrl.misc.exp_util import *
from railrl.launchers.exp_launcher import tdm_experiment
import railrl.misc.hyperparameter as hyp
from railrl.config.base_exp_config import variant as base_exp_variant
from multiworld.envs.mujoco.cameras import *
from multiworld.core.image_env import get_image_presampled_goals as image_env_presampled_goals_func
variant = deep_update(base_exp_variant, dict(
rl_variant=dict(
do_state_exp=False,
algo_kwargs=dict(
tdm_kwargs=dict(
max_tau=25,
),
),
presample_goals=True,
generate_goal_dataset_fctn=image_env_presampled_goals_func,
goal_generation_kwargs=dict(
num_goals_presampled=10000,
),
replay_buffer_kwargs=dict(
store_distributions=True,
),
use_state_reward=True,
vae_wrapped_env_kwargs=dict(),
reward_params=dict(
type='vectorized_state_distance',
),
train_algo='mf-tdm',
snapshot_gap=25,
),
observation_modality='latent',
tag='',
))
env_params = {
'pm': {
'env_id': ['Image48PointmassUWallTrainEnvBig-v1'],
# 'rl_variant.vae_base_path': [
# 'your-base-path-here',
# ],
'rl_variant.vae_path': [
'your-path-here',
],
'rl_variant.algo_kwargs.base_kwargs.max_path_length': [100],
'rl_variant.algo_kwargs.tdm_kwargs.max_tau': [25],
'rl_variant.algo_kwargs.base_kwargs.num_epochs': [200],
'rl_variant.exploration_type': ['epsilon'],
'rl_variant.exploration_noise': [0.1],
'rl_variant.algo_kwargs.base_kwargs.reward_scale': [1e0],
'rl_variant.snapshot_gap': [10],
'rl_variant.vis_kwargs.save_period': [20],
'rl_variant.vis_kwargs.vis_list': [[
'v',
]],
'rl_variant.vis_kwargs.vis_blacklist': [[
'reconstr_image_reproj_observation',
]],
'rl_variant.vae_wrapped_env_kwargs.v_func_heatmap_bounds': [(-1.5, 0.0)],
},
'pnr': {
'env_id': ['Image84SawyerPushAndReachArenaTrainEnvBig-v0'],
# 'rl_variant.vae_base_path': [
# 'your-base-path-here',
# ],
'rl_variant.vae_path': [
'your-path-here',
],
'rl_variant.algo_kwargs.base_kwargs.max_path_length': [100],
'rl_variant.algo_kwargs.tdm_kwargs.max_tau': [25],
'rl_variant.algo_kwargs.base_kwargs.batch_size': [2048],
'rl_variant.algo_kwargs.base_kwargs.num_epochs': [500],
'rl_variant.exploration_type': ['ou'],
'rl_variant.exploration_noise': [0.3],
'rl_variant.algo_kwargs.base_kwargs.reward_scale': [1e1],
'rl_variant.snapshot_gap': [25],
'rl_variant.vis_kwargs.save_period': [50],
'rl_variant.vis_kwargs.vis_list': [[
'plt',
]],
},
}
def process_variant(variant):
rl_variant = variant['rl_variant']
if args.debug:
rl_variant['algo_kwargs']['base_kwargs']['num_epochs'] = 4
rl_variant['algo_kwargs']['base_kwargs']['batch_size'] = 128
rl_variant['vis_kwargs']['num_samples_for_video'] = 2
rl_variant['vae_wrapped_env_kwargs']['num_samples_for_latent_histogram'] = 100
if 'env_kwargs' in variant and 'num_goals_presampled' in variant['env_kwargs']:
variant['env_kwargs']['num_goals_presampled'] = 10
if 'goal_generation_kwargs' in rl_variant and \
'num_goals_presampled' in rl_variant['goal_generation_kwargs']:
rl_variant['goal_generation_kwargs']['num_goals_presampled'] = 10
if rl_variant['use_state_reward']:
assert 'latent' not in rl_variant['reward_params']['type']
rl_variant['training_mode'] = 'train_env_goals'
rl_variant['vis_kwargs']['save_video_env_only'] = True
rl_variant['qf_kwargs']['structure'] = 'none'
rl_variant['vf_kwargs']['structure'] = 'none'
rl_variant['replay_buffer_kwargs']['ob_keys_to_save'] = [
'state_observation', 'state_desired_goal', 'state_achieved_goal',
'latent_observation', 'latent_desired_goal', 'latent_achieved_goal',
]
rl_variant['replay_buffer_kwargs']['goal_keys'] = ['state_desired_goal', 'latent_desired_goal']
rl_variant['replay_buffer_kwargs']['desired_goal_keys'] = ['state_desired_goal', 'latent_desired_goal']
variant['tag'] = 'max-tau-' + str(rl_variant['algo_kwargs']['tdm_kwargs']['max_tau'])
rl_variant['train_algo'] = rl_variant['train_algo'] + "-" + variant['tag']
variant['train_algo'] = rl_variant['train_algo']
if __name__ == "__main__":
args = parse_args()
preprocess_args(args)
search_space = env_params[args.env]
load_vae_base_path_meta_data(search_space)
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters(print_info=False)):
process_variant(variant)
run_experiment(
exp_function=tdm_experiment,
variant=variant,
args=args,
exp_id=exp_id,
exp_type='train-tdm',
)
|
snasiriany/leap
|
experiments/image/train_tdm.py
|
train_tdm.py
|
py
| 5,244 |
python
|
en
|
code
| 45 |
github-code
|
6
|
15909966165
|
'''
2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
What is the sum of the digits of the number 2^1000?
'''
def power_sum(exp):
sum = 0
pow = 2 ** int(exp)
for i in str(pow):
sum = sum + int(i)
return(sum)
print(power_sum(1000))
|
beingnishas/projecteuler
|
016_Power_digit_sum.py
|
016_Power_digit_sum.py
|
py
| 276 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23378159417
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.utils as utils
LRELU_SLOPE = 0.1
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
def init_weights(m, mean=0.0, std=0.01):
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(mean, std)
class res_block1(nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super().__init__()
self.h = h
self.convs1 = nn.ModuleList([
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x): # lrelu -> cnn1 -> lrelu -> cnn2 -> residual x
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
utils.remove_weight_norm(l)
for l in self.convs2:
utils.remove_weight_norm(l)
class res_block2(nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
super().__init__()
self.h = h
self.convs = nn.ModuleList([
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
utils.remove_weight_norm(l)
class generator(nn.Module):
def __init__(self, h):
super().__init__()
self.h = h
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
self.conv_pre = utils.weight_norm(nn.Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))
resblock = res_block1 if h.resblock == '1' else res_block2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(utils.weight_norm(
nn.ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
k, u, padding=(k-u)//2)))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel//(2**(i+1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(resblock(h, ch, k, d))
self.conv_post = utils.weight_norm(nn.Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
x = self.conv_pre(x) # This is the first layer that upsamples the number of channels from 80 to 8192
for i in range(self.num_upsamples): # Stacks the transpose-conv + resblocks 'num_upsamples' times.
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x) # Decreases the num of channels
xs = None
for j in range(self.num_kernels): # Each iteration inputs into the resblocks
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels # In the end, all the individual outputs from the resblocks is meaned.
# After all the resblocks, the final output is the dim of 32 in the current configuration.
x = F.leaky_relu(x)
x = self.conv_post(x) # Takes the 32 input channels and gives 1 channel of output
x = torch.tanh(x)
return x # Final output is (bs, 1, 2097152) for default config.
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
utils.remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
utils.remove_weight_norm(self.conv_pre)
utils.remove_weight_norm(self.conv_post)
class discriminator_p(nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super().__init__()
self.period = period
norm_f = utils.weight_norm if use_spectral_norm == False else utils.spectral_norm
self.convs = nn.ModuleList([
norm_f(nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(nn.Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(nn.Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(nn.Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(nn.Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = list()
b, c, t = x.shape
if t % self.period != 0:
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), 'reflect')
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class multi_period_discriminator(nn.Module):
def __init__(self):
super().__init__()
self.discriminators = nn.ModuleList([
discriminator_p(i) for i in [2, 3, 5, 7, 11]
])
def forward(self, y, y_hat): # Takes actual out (y) and fake out (y_hat)
y_d_rs, y_d_gs, fmap_rs, fmap_gs = list(), list(), list(), list()
for i, d in enumerate(self.discriminators): # each discriminator has a different kernel size (but 1 depth) to compute only 1 period of audio.
y_d_r, fmap_r = d(y) # calculates discrimination score for real (hence, 'r'). Look, I didn't pick the variables names okay.
y_d_g, fmap_g = d(y_hat) # 'g' stands for generated
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class discriminator_s(nn.Module):
def __init__(self, use_spectral_norm=False):
super().__init__()
norm_f = utils.weight_norm if use_spectral_norm == False else utils.spectral_norm
self.convs = nn.ModuleList([
norm_f(nn.Conv1d(1, 128, 15, 1, padding=7)),
norm_f(nn.Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(nn.Conv1d(128, 256, 41, 2, groups=16, padding=20)),
norm_f(nn.Conv1d(256, 512, 41, 4, groups=16, padding=20)),
norm_f(nn.Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
norm_f(nn.Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
norm_f(nn.Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(nn.Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = list()
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class multi_scale_discriminator(nn.Module):
def __init__(self):
super().__init__()
self.discriminators = nn.ModuleList([
discriminator_s(use_spectral_norm=True),
discriminator_s(),
discriminator_s(),
])
self.meanpools = nn.ModuleList([
nn.AvgPool1d(4, 2, padding=2),
nn.AvgPool1d(4, 2, padding=2)
])
def forward(self, y, y_hat): # in MSD, you do not reshape the input data to differentiate between different period of the input audio.
y_d_rs, y_d_gs, fmap_rs, fmap_gs = list(), list(), list(), list()
for i, d in enumerate(self.discriminators):
if i != 0: # you do not average-pool the raw audio. Also, you use spectral_norm on the raw audio.
y = self.meanpools[i-1](y) # average-pooling the inputs
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y) # discrimination scores for the inputs
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r) # fmap are the feature maps. It's audio
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def feature_loss(fmap_r, fmap_g): # it is the mean absolute error a.k.a L1 Loss
loss = 0 # all the losses calculated is added to the total loss.
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2 # 2 is just a factor added to increase the influence of this loss to the overall loss
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss, r_losses, g_losses = 0, list(), list()
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2) # real loss
g_loss = torch.mean(dg**2) # gen loss
loss += (r_loss + g_loss) # GAN Loss
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(disc_outputs):
loss, gen_losses = 0, list()
for dg in disc_outputs:
l = torch.mean((1-dg)**2) # GAN Loss for generators
gen_losses.append(l)
loss += l
return loss, gen_losses
if __name__ == '__main__':
model = multi_period_discriminator()
[print(model.discriminators[i].period) for i in range(5)]
|
uuzall/hifi_gan
|
model.py
|
model.py
|
py
| 10,620 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19018842256
|
# https://atcoder.jp/contests/arc147/submissions/34636074
import sys
N = int(sys.stdin.readline().rstrip())
P = [ int(x) for x in sys.stdin.readline().rstrip().split() ]
ans = []
for i in range(N-2):
for j in range(N-3, i-1, -1):
if (j+1)%2 == P[j]%2 and (j+3)%2 != P[j+2]%2:
P[j], P[j+2] = P[j+2], P[j]
ans.append(['B', j+1])
for i in range(N-1):
if (i+1)%2 != P[i]%2 and (i+2)%2 != P[i+1]%2:
P[i], P[i+1] = P[i+1], P[i]
ans.append(['A', i+1])
for i in range(N-2):
for j in range(N-3, i-1, -1):
if P[j] > P[j+2]:
P[j], P[j+2] = P[j+2], P[j]
ans.append(['B', j+1])
print(len(ans))
for el in ans:
print("{} {}".format(el[0], el[1]))
|
minheibis/atcoder
|
questions/ARC147/B/ref_00.py
|
ref_00.py
|
py
| 743 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2333095008
|
"""
Random agent on Farm0
=====================
"""
from rlberry.agents import AgentWithSimplePolicy
from rlberry.manager import AgentManager, evaluate_agents, plot_writer_data
from rlberry_farms.game0_env import Farm0
from rlberry.agents.torch.utils.training import model_factory_from_env
import numpy as np
env_ctor, env_kwargs = Farm0, {}
class InstallationTestAgent(AgentWithSimplePolicy):
name = "InstallationTestAgent"
def __init__(self, env, **kwargs):
AgentWithSimplePolicy.__init__(self, env, **kwargs)
def fit(self, budget=10, **kwargs):
observation = self.env.reset()
episode_reward = 0
for ep in range(int(budget)):
action = self.policy(observation)
observation, reward, done, _ = self.env.step(action)
episode_reward += reward
if done:
self.writer.add_scalar("episode_rewards", episode_reward, ep)
episode_reward = 0
self.env.reset()
def policy(self, observation):
return 1
if __name__ == "__main__":
manager = AgentManager(
InstallationTestAgent,
(env_ctor, env_kwargs),
agent_name="InstallationTestAgent",
fit_budget=10,
eval_kwargs=dict(eval_horizon=150),
n_fit=4,
parallelization="process",
mp_context="spawn",
)
manager.fit()
evaluation = evaluate_agents([manager], n_simulations=2, show=False).values
print("Installation test : Done!")
# This template file gives mean evaluation reward 96.
|
farm-gym/rlberry-farms
|
examples/installation_test.py
|
installation_test.py
|
py
| 1,556 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12424083867
|
__author__ = "Vanessa Sochat, Alec Scott"
__copyright__ = "Copyright 2021-2022, Vanessa Sochat and Alec Scott"
__license__ = "Apache-2.0"
from .command import Command
import json
# Every command must:
# 1. subclass Command
# 2. defined what container techs supported for (class attribute) defaults to all
# 3. define run function with kwargs
class Size(Command):
supported_for = ["docker", "podman"]
pre_message = "Sizing Container..."
def run(self, **kwargs):
"""
Get a container size.
"""
# Always run this first to make sure container tech is valid
self.check(**kwargs)
# These are both required for docker/podman
container_name = self.kwargs["container_name"]
out, err = self.execute_host(
[
self.tech,
"container",
"ls",
"-s",
"--filter",
"name=%s" % container_name,
"--format",
'"{{ json .}}"',
]
)
if not err:
out = json.loads(out.strip().strip('"'))
return self.return_success(out["Size"])
return self.return_failure(err)
class InspectContainer(Command):
supported_for = ["docker", "podman"]
pre_message = "Inspecting Container..."
def run(self, **kwargs):
"""
Inspect a container fully, or specific sections
"""
# Always run this first to make sure container tech is valid
self.check(**kwargs)
# These are both required for docker/podman
container_name = self.kwargs["container_name"]
# inspect defaults to labels and environment
if self.args:
for section in self.args:
result = self.run_command(
[
self.tech,
"inspect",
"--format",
"{{json .%s }}" % section.capitalize(),
container_name,
]
)
else:
result = self.run_command([self.tech, "inspect", container_name])
if result:
return result
return self.return_success()
|
syspack/paks
|
paks/commands/inspect.py
|
inspect.py
|
py
| 2,263 |
python
|
en
|
code
| 2 |
github-code
|
6
|
42666755991
|
# https://www.youtube.com/watch?v=y5DkiL6gIzY&ab_channel=Makekit
# https://makecode.microbit.org/#editor
pitch = 0
arm = 0 #Arm means on or off for the drone
roll = 0
throttle = 0
yaw = 0
radio_group = 7
radio.set_group(radio_group)
# Have the display show the radio set_group
basic.show_number(radio_group)
# To show the radio group number on the display
# Function for when the a button is pressed on the transmitter
# Want to decrease the speed
# The input part is the inturrupt that detects the button being pressed
# The section above the input is the function part where we'll add our code
def on_button_pressed_a():
global throttle
throttle -= 5
input.on_button_pressed(Button.A, on_button_pressed_a)
# Function for when the b button is pressed on the transmitter
# Want to increase the speed
# Inside the functions, the varibales are local
# Need to specify that is the global variable we are changing
def on_button_pressed_b():
global throttle
throttle += 5
input.on_button_pressed(Button.B, on_button_pressed_b)
# To turn on and off the engines
def on_button_pressed_ab():
global arm
global throttle
throttle = 0
if arm == 0:
arm = 1
else:
arm = 0
input.on_button_pressed(Button.AB, on_button_pressed_ab)
# Shake to stop feature
def on_gesture_shake():
global arm
arm = 0
input.on_gesture(Gesture.SHAKE, on_gesture_shake)
def on_forever():
#basic.show_number(throttle) # To display value on the board
pitch = input.rotation(Rotation.PITCH)
roll = input.rotation(Rotation.ROLL)
# To fill a pixel when the motors are turned on_button_pressed
basic.clear_screen()
if arm == 1:
led.plot(0,0) # Top Left Pixel
# Throttle
led.plot(0, Math.map(throttle,0,100,4,0)) # How to represent throttle if only leds from 0 - 4
# map(value you want to change, whats the lowest number it can be, whats the highest number it can be,
# whats the lowest output number you want it to be, whats the highest output number you want it to be)
# roll and pitch
led.plot(Math.map(roll,-45, 45,0,4), Math.map(pitch,-45, 45,0,4)) # x-axis -> moves side to side, y-axis -> moves up and down
# This function send 1 value with a name from transmitter to drone. Tells it whether it is roll or pitch or whatever and how much it is
radio.send_value("P", pitch)
radio.send_value("A", arm)
radio.send_value("R", roll)
radio.send_value("T", throttle)
radio.send_value("Y", yaw)
basic.forever(on_forever)
|
ogradyra/Cyber-Physical-Systems
|
weekly-code/week01/online_resources/youtube_video_code.py
|
youtube_video_code.py
|
py
| 2,541 |
python
|
en
|
code
| 1 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.