max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/test_db.py | heptaliane/RankingAnnotator | 1 | 12796051 | # -*- coding: utf-8 -*-
import os
from unittest import TestCase
import tempfile
from server import db
def use_temp_db(filename):
def _use_temp_db(fn):
def wrapper(obj):
with tempfile.TemporaryDirectory() as dirname:
fn(obj, os.path.join(dirname, filename))
return wrapper
return _use_temp_db
class TestMatchResultDBController(TestCase):
def test_get_from_blank(self):
with tempfile.NamedTemporaryFile() as f:
logger = db.MatchResultDBController(f.name)
results = logger.get()
self.assertEqual(results, [])
@use_temp_db('test.db')
def test_add_one(self, filename):
logger = db.MatchResultDBController(filename)
with logger as lg:
lg.add(0, 1, 2, 0)
lg.add(1, 2, 3, 0)
results = logger.get()
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 1,
'winner': 2,
'loser': 3,
'trigger_id': 0,
})
self.assertEqual(logger.current_id, 1)
@use_temp_db('test.db')
def test_add_list(self, filename):
logger = db.MatchResultDBController(filename)
with logger as lg:
lg.add((0, 1), (1, 2), (2, 3), (0, 0))
results = logger.get()
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 1,
'winner': 2,
'loser': 3,
'trigger_id': 0,
})
@use_temp_db('test.db')
def test_add_list2(self, filename):
logger = db.MatchResultDBController(filename)
with logger as lg:
lg.add((0, 1), (1, 2), (2, 3), 0)
results = logger.get(ordered=True)
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 1,
'winner': 2,
'loser': 3,
'trigger_id': 0,
})
@use_temp_db('test.db')
def test_delete(self, filename):
logger = db.MatchResultDBController(filename)
with logger as lg:
lg.add((0, 1), (1, 3), (2, 4), 0)
lg.add((2, 3), (5, 7), (6, 8), 2)
with logger as lg:
deleted = lg.delete(0)
results = logger.get()
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 3,
'winner': 7,
'loser': 8,
'trigger_id': 2,
})
self.assertEqual(len(deleted), 2)
self.assertEqual(deleted[1], {
'id': 1,
'winner': 3,
'loser': 4,
'trigger_id': 0,
})
class TestRatedMatchResultDBController(TestCase):
def test_get_from_blank(self):
with tempfile.NamedTemporaryFile() as f:
logger = db.RatedMatchResultDBController(f.name)
results = logger.get()
self.assertEqual(results, [])
@use_temp_db('test.db')
def test_add_one(self, filename):
logger = db.RatedMatchResultDBController(filename)
with logger as lg:
lg.add(0, 1, 2, 0, 1400.0, 1600.0)
lg.add(1, 2, 3, 0, 1550, 1450)
results = logger.get()
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 1,
'winner': 2,
'loser': 3,
'trigger_id': 0,
'winner_rate': 1550.0,
'loser_rate': 1450.0,
})
@use_temp_db('test.db')
def test_add_list(self, filename):
logger = db.RatedMatchResultDBController(filename)
with logger as lg:
lg.add((0, 1), (1, 2), (2, 3), 0, (1400, 1550), (1600, 1450))
results = logger.get(ordered=True)
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 1,
'winner': 2,
'loser': 3,
'trigger_id': 0,
'winner_rate': 1550.0,
'loser_rate': 1450.0,
})
@use_temp_db('test.db')
def test_add_delete(self, filename):
logger = db.RatedMatchResultDBController(filename)
with logger as lg:
lg.add((0, 1), (1, 2), (2, 3), 0, (1400, 1550), (1600, 1450))
lg.add((2, 3), (5, 6), (7, 8), 2, (1300, 1700), (1510, 1490))
with logger as lg:
deleted = lg.delete(0)
results = logger.get()
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 3,
'winner': 6,
'loser': 8,
'trigger_id': 2,
'winner_rate': 1700.0,
'loser_rate': 1490.0,
})
self.assertEqual(len(deleted), 2)
self.assertEqual(deleted[1], {
'id': 1,
'winner': 2,
'loser': 3,
'trigger_id': 0,
'winner_rate': 1550.0,
'loser_rate': 1450.0,
})
class TestItemLabelDBController(TestCase):
def test_get_from_blank(self):
with tempfile.NamedTemporaryFile() as f:
logger = db.ItemLabelDBController(f.name)
results = logger.get()
self.assertEqual(results, [])
@use_temp_db('test.db')
def test_add_one(self, filename):
logger = db.ItemLabelDBController(filename)
with logger as lg:
lg.add(0, 'foo')
lg.add(1, 'bar')
results = logger.get()
self.assertEqual(results, [
{'id': 0, 'label': 'foo'},
{'id': 1, 'label': 'bar'},
])
@use_temp_db('test.db')
def test_add_list(self, filename):
logger = db.ItemLabelDBController(filename)
with logger as lg:
lg.add((0, 1), ('foo', 'bar'))
results = logger.get(ordered=True)
self.assertEqual(results, [
{'id': 0, 'label': 'foo'},
{'id': 1, 'label': 'bar'},
])
@use_temp_db('test.db')
def test_delete(self, filename):
logger = db.ItemLabelDBController(filename)
with logger as lg:
lg.add((0, 1), ('foo', 'bar'))
with logger as lg:
deleted = lg.delete('foo')
results = logger.get()
self.assertEqual(results, [
{'id': 1, 'label': 'bar'},
])
self.assertEqual(deleted, [
{'id': 0, 'label': 'foo'}
])
| 2.953125 | 3 |
acousticsim/analysis/formants/lpc.py | JoFrhwld/python-acoustic-similarity | 5 | 12796052 | <filename>acousticsim/analysis/formants/lpc.py
import librosa
import numpy as np
import scipy as sp
from scipy.signal import lfilter
from scipy.fftpack import fft,ifft
from scipy.signal import gaussian
from ..helper import fix_time_points, nextpow2
def lpc_ref(signal, order):
"""Compute the Linear Prediction Coefficients.
Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will
find the k+1 coefficients of a k order linear filter:
xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1]
Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized.
Parameters
----------
signal: array_like
input signal
order : int
LPC order (the output will have order + 1 items)
Notes
----
This is just for reference, as it is using the direct inversion of the
toeplitz matrix, which is really slow"""
if signal.ndim > 1:
raise ValueError("Array of rank > 1 not supported yet")
if order > signal.size:
raise ValueError("Input signal must have a lenght >= lpc order")
if order > 0:
p = order + 1
r = np.zeros(p, 'float32')
# Number of non zero values in autocorrelation one needs for p LPC
# coefficients
nx = np.min([p, signal.size])
x = np.correlate(signal, signal, 'full')
r[:nx] = x[signal.size-1:signal.size+order]
phi = np.dot(sp.linalg.inv(sp.linalg.toeplitz(r[:-1])), -r[1:])
return np.concatenate(([1.], phi))
else:
return np.ones(1, dtype = 'float32')
#@jit
def levinson_1d(r, order):
"""Levinson-Durbin recursion, to efficiently solve symmetric linear systems
with toeplitz structure.
Parameters
---------
r : array-like
input array to invert (since the matrix is symmetric Toeplitz, the
corresponding pxp matrix is defined by p items only). Generally the
autocorrelation of the signal for linear prediction coefficients
estimation. The first item must be a non zero real.
Notes
----
This implementation is in python, hence unsuitable for any serious
computation. Use it as educational and reference purpose only.
Levinson is a well-known algorithm to solve the Hermitian toeplitz
equation:
_ _
-R[1] = R[0] R[1] ... R[p-1] a[1]
: : : : * :
: : : _ * :
-R[p] = R[p-1] R[p-2] ... R[0] a[p]
_
with respect to a ( is the complex conjugate). Using the special symmetry
in the matrix, the inversion can be done in O(p^2) instead of O(p^3).
"""
r = np.atleast_1d(r)
if r.ndim > 1:
raise ValueError("Only rank 1 are supported for now.")
n = r.size
if n < 1:
raise ValueError("Cannot operate on empty array !")
elif order > n - 1:
raise ValueError("Order should be <= size-1")
if not np.isreal(r[0]):
raise ValueError("First item of input must be real.")
elif not np.isfinite(1/r[0]):
raise ValueError("First item should be != 0")
# Estimated coefficients
a = np.empty(order+1, 'float32')
# temporary array
t = np.empty(order+1, 'float32')
# Reflection coefficients
k = np.empty(order, 'float32')
a[0] = 1.
e = r[0]
for i in range(1, order+1):
acc = r[i]
for j in range(1, i):
acc += a[j] * r[i-j]
k[i-1] = -acc / e
a[i] = k[i-1]
for j in range(order):
t[j] = a[j]
for j in range(1, i):
a[j] += k[i-1] * np.conj(t[i-j])
e *= 1 - k[i-1] * np.conj(k[i-1])
return a, e, k
#@jit
def _acorr_last_axis(x, nfft, maxlag):
a = np.real(ifft(np.abs(fft(x, n = nfft) ** 2)))
return a[..., :maxlag+1] / x.shape[-1]
#@jit
def acorr_lpc(x, axis=-1):
"""Compute autocorrelation of x along the given axis.
This compute the biased autocorrelation estimator (divided by the size of
input signal)
Notes
-----
The reason why we do not use acorr directly is for speed issue."""
if not np.isrealobj(x):
raise ValueError("Complex input not supported yet")
maxlag = x.shape[axis]
nfft = int(2 ** nextpow2(2 * maxlag - 1))
if axis != -1:
x = np.swapaxes(x, -1, axis)
a = _acorr_last_axis(x, nfft, maxlag)
if axis != -1:
a = np.swapaxes(a, -1, axis)
return a
#@jit
def lpc(signal, order, axis=-1):
"""Compute the Linear Prediction Coefficients.
Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will
find the k+1 coefficients of a k order linear filter:
xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1]
Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized.
Parameters
----------
signal: array_like
input signal
order : int
LPC order (the output will have order + 1 items)
Returns
-------
a : array-like
the solution of the inversion.
e : array-like
the prediction error.
k : array-like
reflection coefficients.
Notes
-----
This uses Levinson-Durbin recursion for the autocorrelation matrix
inversion, and fft for the autocorrelation computation.
For small order, particularly if order << signal size, direct computation
of the autocorrelation is faster: use levinson and correlate in this case."""
n = signal.shape[axis]
if order > n:
raise ValueError("Input signal must have length >= order")
r = acorr_lpc(signal, axis)
return levinson_1d(r, order)
def process_frame(X, window, num_formants, new_sr):
X = X * window
A, e, k = lpc(X, num_formants*2)
rts = np.roots(A)
rts = rts[np.where(np.imag(rts) >= 0)]
angz = np.arctan2(np.imag(rts), np.real(rts))
frqs = angz * (new_sr / (2 * np.pi))
frq_inds = np.argsort(frqs)
frqs = frqs[frq_inds]
bw = -1 / 2 * (new_sr / (2 * np.pi)) * np.log(np.abs(rts[frq_inds]))
return frqs, bw
def lpc_formants(signal, sr, num_formants, max_freq, time_step,
win_len, window_shape = 'gaussian'):
output = {}
new_sr = 2 * max_freq
alpha = np.exp(-2 * np.pi * 50 * (1 / new_sr))
proc = lfilter([1., -alpha], 1, signal)
if sr > new_sr:
proc = librosa.resample(proc, sr, new_sr)
nperseg = int(win_len * new_sr)
nperstep = int(time_step * new_sr)
if window_shape == 'gaussian':
window = gaussian(nperseg + 2, 0.45 * (nperseg - 1) / 2)[1:nperseg + 1]
else:
window = np.hanning(nperseg + 2)[1:nperseg + 1]
indices = np.arange(int(nperseg / 2), proc.shape[0] - int(nperseg / 2) + 1, nperstep)
num_frames = len(indices)
for i in range(num_frames):
if nperseg % 2 != 0:
X = proc[indices[i] - int(nperseg / 2):indices[i] + int(nperseg / 2) + 1]
else:
X = proc[indices[i] - int(nperseg / 2):indices[i] + int(nperseg / 2)]
frqs, bw = process_frame(X, window, num_formants, new_sr)
formants = []
for j, f in enumerate(frqs):
if f < 50:
continue
if f > max_freq - 50:
continue
formants.append((np.asscalar(f), np.asscalar(bw[j])))
missing = num_formants - len(formants)
if missing:
formants += [(None, None)] * missing
output[indices[i] / new_sr] = formants
return output
def signal_to_formants(signal, sr, num_formants=5, max_freq=5000,
time_step=0.01, win_len=0.025,
begin=None, padding=None):
output = lpc_formants(signal, sr, num_formants, max_freq, time_step,
win_len, window_shape='gaussian')
duration = signal.shape[0] / sr
return fix_time_points(output, begin, padding, duration)
def file_to_formants(file_path, num_formants, max_freq, win_len, time_step):
sig, sr = librosa.load(file_path, sr=None, mono=False)
output = signal_to_formants(sig, sr, num_formants, max_freq, win_len, time_step)
return output
| 2.359375 | 2 |
src/pyqp/__main__.py | MMSB-MOBI/pyqp | 0 | 12796053 | """Quantitative Proteomic Service
Usage:
pyqp api
pyqp cli <proteomicTSV> <proteomeXML> [--field=<quantity_column>] [--adress=<apiAdress>] [--port=<apiPort>] [--verbose] [--topScore=<pathway_number>]
Options:
-h --help Show this screen.
--field=<quantity column> csv column header featuring signal
--purb=purb aa
--intg=intg bbb
--alpha=alpha ccc
--ncore=ncore ddd
--sizelim=sizelim eee
--prot=<proteomeXML> ggg
--adress=<apiAdress> aaa
--port=<apiPort> aaa
--verbose iiii
--topScore=<pathway_number> aaaa
"""
# TEST W/ mycoplasma proteome
# The test this
#python -m pyqp cli previous/wt2_subset.tsv unigo/src/unigo/data/uniprot-proteome_UP000000625.xml.gz
from docopt import docopt
#from pyT2GA import analysis
from unigo import Unigo as createUniGOTree
from unigo import uloads as createGOTreeFromAPI
from .utils import proteomicWrapper
from pyproteinsExt.uniprot import EntrySet as createUniprotCollection
from requests import get
from .api import app
import time
arguments = docopt(__doc__)
#print(arguments)
abnd_field = arguments['--field'] if arguments['--field'] else "Corrected Abundance ratio (1,526968203)"
nTop = int(arguments['--topScore']) if arguments['--topScore'] else 20
if arguments['cli']:
quantProteomic = proteomicWrapper( csv_file = arguments['<proteomicTSV>'], abnd_label = abnd_field)
uColl = createUniprotCollection(collectionXML = arguments['<proteomeXML>'] )
missingProt = []
for x in quantProteomic.uniprot:
if not uColl.has(x):
print(f"{x} not found in proteome")
missingProt.append(x)
for x in missingProt:
quantProteomic.remove(x)
taxid = uColl.taxids[0]
apiAdress = arguments['--adress'] if arguments['--adress'] else "127.0.0.1"
apiPort = arguments['--port'] if arguments['--port'] else "5000"
url = f"http://{apiAdress}:{apiPort}/unigo/{taxid}"
print(f"Fetching universal annotation tree from {url}")
expUniprotID = [ _ for _ in quantProteomic.uniprot ]
resp = get(url)
if resp.status_code == 404:
print(f"{url} returned 404, provided proteome XML {taxid} may not be registred")
else:
unigoTree = createGOTreeFromAPI(resp.text, expUniprotID)
x,y = unigoTree.dimensions
print("Unigo Object successfully buildt w/ following dimensions:")
print(f"\txpTree => nodes:{x[0]} children_links:{x[1]}, total_protein_occurences:{x[2]}, protein_set:{x[3]}")
print(f"\t universeTree => nodes:{y[0]} children_links:{y[1]}, total_protein_occurences:{y[2]}, protein_set:{y[3]}")
nDelta=int(0.1 * len(quantProteomic))
print(f"{len(quantProteomic)} proteins available in quantitative records, taking first {nDelta} as of quantity modified")
print("Computing ORA")
deltaUniprotID = expUniprotID[:nDelta]
rankingsORA = unigoTree.computeORA(deltaUniprotID, verbose = arguments['--verbose'])
print(f"Test Top - {nTop}\n{rankingsORA[:nTop]}")
if arguments['api']:
app.run(port=1234)
"""
unigoTree = createUniGOTree( backgroundUniColl = uColl,
proteinList = [ x for x in quantProteomic.uniprot ],
fetchLatest = False)
start = time.perf_counter()
# Taking 10% w/ highest qtty value
rankingsORA = unigoTree.computeORA(
[ _ for _ in quantProteomic[nTop].uniprot ]
, verbose = False)
stop = time.perf_counter()
print(f"Test Top - {5}\n{rankingsORA[5]}")
print(f"Execution time is {stop-start} sc")
"""
# Unnecssary
def typeGuardTaxID(proteomicData, uColl):
taxids = {}
for uID in proteomicData.uniprot:
uObj = uColl.get(uID)
if not uObj.taxid in taxids:
taxids[uObj.taxid] = 0
taxids[uObj.taxid] += 1
return sorted( [ (k,v) for k,v in taxids.items() ], key=lambda x:x[1] )
#r = pyt2ga.analysis(proteoRes, GOpwRes, STRINGRes, mapperRes, intg=False,
# abnd_label = "Corrected Abundance ratio (1,526968203)", ncore=3) | 2.1875 | 2 |
phase3/consumer-new-customer.py | amanda-wink/Kafka3-Data | 0 | 12796054 | from kafka import KafkaConsumer, TopicPartition
from json import loads
from sqlalchemy import create_engine, Table, Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
import os
user = os.getenv('MYSQL_user')
pw = os.getenv('MYSQL')
str_sql = 'mysql+mysqlconnector://' + user + ':' + pw + '@localhost/ZipBank'
engine = create_engine(str_sql)
Base = declarative_base(bind=engine)
class XactionConsumer:
def __init__(self):
self.consumer = KafkaConsumer('bank-customer-new',
bootstrap_servers=['localhost:9092'],
# auto_offset_reset='earliest',
value_deserializer=lambda m: loads(m.decode('ascii')))
## These are two python dictionaries
# Ledger is the one where all the transaction get posted
self.customer = {}
self.customer_list = []
#Go back to the readme.
def handleMessages(self):
self.CustDb()
for message in self.consumer:
message = message.value
print('{} received'.format(message))
self.customer[message['custid']] = message
# add message to the transaction table in your SQL usinf SQLalchemy
if message['custid'] in self.customer_list:
print("Already a customer")
else:
with engine.connect() as connection:
connection.execute("insert into person (custid, createdate, fname, lname) values(%s, %s, %s, %s)", (message['custid'], message['createdate'], message['fname'], message['lname']))
print(self.customer)
def CustDb(self):
with engine.connect() as connection:
cust = connection.execute("select custid from person")
cust_list = cust.fetchall()
for row in range(len(cust_list)):
self.customer_list.append(row)
class Transaction(Base):
__tablename__ = 'person'
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
custid = Column(Integer, primary_key=True)
createdate = Column(Integer)
fname = Column(String(50))
lname = Column(String(50))
if __name__ == "__main__":
Base.metadata.create_all(engine)
c = XactionConsumer()
c.handleMessages() | 2.78125 | 3 |
Server/diarization_service.py | mmlabox/TeamAudio | 0 | 12796055 | import os
from google.cloud import speech_v1p1beta1 as speech
import io
#Set env variable, because it resets every shell session
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/home/robin_jf_andersson/mbox_speaker_diarization/mbox1-28508a73fde1.json"
def speaker_diarization(audio_file, channels, sample_rate, nbr_of_persons):
client = speech.SpeechClient()
speech_file = audio_file
with open(speech_file, "rb") as audio_file:
content = audio_file.read()
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=sample_rate,
language_code="en-US",
enable_speaker_diarization=True,
diarization_speaker_count=nbr_of_persons,
audio_channel_count=channels,
enable_separate_recognition_per_channel=True, #change this if respeaker is configured correctly
model="video",
)
print("Waiting for operation to complete...")
response = client.recognize(config=config, audio=audio)
# The transcript within each result is separate and sequential per result.
# However, the words list within an alternative includes all the words
# from all the results thus far. Thus, to get all the words with speaker
# tags, you only have to take the words list from the last result:
result = response.results[-1]
words_info = result.alternatives[0].words
output_result = {}
#saving each word with corresponding speaker tag into a dictionary of word lists
for i in range(nbr_of_persons):
word_counter = 0
speaker_data = {}
words = []
for word_info in words_info:
if(word_info.speaker_tag == (i+1)):
words.append(word_info.word)
word_counter += 1
speaker_data["number_of_words"] = word_counter
speaker_data["words"] = words
output_result[(i+1)] = speaker_data
#print(output_result)
return output_result
#test
#diarization_service("audiofiles/Test7.wav")
| 2.984375 | 3 |
Parabola/prop1_probs.py | pdcxs/ManimProjects | 29 | 12796056 | from manimlib.imports import *
from ManimProjects.utils.Parabola import Parabola
from ManimProjects.utils.geometry import CText
class Prob1(Parabola):
CONFIG = {
'x_min' : -5
}
def construct(self):
self.adjust_x_range()
graph = self.get_graph(color=LIGHT_BROWN)
directrix = self.get_directrix()
focus = Dot().move_to(self.get_focus())
focus.set_fill(DARK_BROWN)
focus.plot_depth = 1
focusLabel = TexMobject('F').scale(0.7)
focusLabel.next_to(focus, RIGHT)
self.play(*[ShowCreation(e) for\
e in [graph, directrix, focus, focusLabel]])
y_val = ValueTracker(8)
p1 = Dot()
p1.set_color(DARK_BLUE)
p1.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(y_val.get_value()),
y_val.get_value()
)))
p1.plot_depth = 1
p1Label = TexMobject('P_1').scale(0.7)
p1Label.add_updater(lambda m:\
m.next_to(p1, RIGHT, buff=SMALL_BUFF))
p2 = Dot()
p2.set_color(DARK_BLUE)
p2.add_updater(lambda m:\
m.move_to(self.get_opposite(p1)))
p2.plot_depth = 1
p2Label = TexMobject('P_2').scale(0.7)
p2Label.add_updater(lambda m:\
m.next_to(p2, RIGHT, buff=SMALL_BUFF))
focus_chord = Line()
focus_chord.add_updater(lambda m:\
m.put_start_and_end_on(
p1.get_center(),
self.get_opposite(p1)
))
self.play(ShowCreation(p1), ShowCreation(p1Label))
self.play(ShowCreation(focus_chord))
self.play(ShowCreation(p2), ShowCreation(p2Label))
fc_def = CText('焦点弦')
fc_def.move_to(focus_chord.get_center())
fc_def.shift(0.2 * RIGHT + 0.1 * DOWN)
self.play(Write(fc_def))
self.wait(2)
self.play(FadeOut(fc_def))
q_y = ValueTracker(2)
q = Dot()
q.set_fill(DARK_BLUE)
q.plot_depth = 1
q.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(q_y.get_value()),
q_y.get_value()
)))
qLabel = TexMobject('Q').scale(0.7)
qLabel.add_updater(lambda m:\
m.next_to(q, LEFT, buff=SMALL_BUFF))
k1 = Dot()
k1.set_fill(BLUE_E)
k1.plot_depth = 1
k1.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(p1, q)))
k1Label = TexMobject('K_1').scale(0.7)
k1Label.add_updater(lambda m:\
m.next_to(k1, LEFT, buff=SMALL_BUFF))
k2 = Dot()
k2.set_fill(BLUE_E)
k2.plot_depth = 1
k2.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(p2, q)))
k2Label = TexMobject('K_2').scale(0.7)
k2Label.add_updater(lambda m:\
m.next_to(k2, LEFT, buff=SMALL_BUFF))
l1 = Line()
l1.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p1, q),
self.chord_to_directrix(p1, q)
))
l2 = Line()
l2.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p2, q),
self.chord_to_directrix(p2, q)
))
self.play(ShowCreation(q), ShowCreation(qLabel))
self.play(ShowCreation(l1), ShowCreation(l2))
self.play(*[ShowCreation(e) for e in [k1, k2, k1Label, k2Label]])
k1f = Line()
k1f.add_updater(lambda m:\
m.put_start_and_end_on(
k1.get_center(), focus.get_center()
))
k2f = Line()
k2f.add_updater(lambda m:\
m.put_start_and_end_on(
k2.get_center(), focus.get_center()
))
self.play(ShowCreation(k1f), ShowCreation(k2f))
self.wait(1)
self.play(ApplyMethod(y_val.set_value,
5))
summary = TexMobject('K_1F \\perp K_2F').scale(2)
summary.to_edge(RIGHT)
self.wait(1)
self.play(Write(summary))
self.wait(5)
qf = Line()
qf.add_updater(lambda m:\
m.put_start_and_end_on(q.get_center(),
focus.get_center()))
self.play(ShowCreation(qf))
self.wait(1)
self.play(ApplyMethod(q_y.set_value,
-1))
self.wait(1)
self.play(ApplyMethod(y_val.set_value,
0.5))
self.wait(1)
self.play(ApplyMethod(y_val.set_value,
3),
ApplyMethod(q_y.set_value, 0.5))
self.wait(10)
class Prob2(Parabola):
CONFIG = {
'focus': 2,
'x_min': -4
}
def construct(self):
self.adjust_x_range()
graph = self.get_graph(color=LIGHT_BROWN)
directrix = self.get_directrix()
focus = Dot().move_to(self.get_focus())
focus.set_fill(DARK_BROWN)
focus.plot_depth = 1
focusLabel = TexMobject('F').scale(0.7)
focusLabel.next_to(focus, RIGHT)
self.play(*[ShowCreation(e) for\
e in [graph, directrix, focus, focusLabel]])
q1_y = ValueTracker(9)
q1 = Dot()
q1.set_fill(DARK_BLUE)
q1.plot_depth = 1
q1.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(q1_y.get_value()),
q1_y.get_value()
)))
q1_label = TexMobject('Q_1').scale(0.5)
q1_label.add_updater(lambda m:\
m.next_to(q1, RIGHT, buff=SMALL_BUFF))
self.play(ShowCreation(q1), ShowCreation(q1_label))
q2 = Dot()
q2.set_fill(DARK_BLUE)
q2.plot_depth = 1
q2.add_updater(lambda m:\
m.move_to(self.get_opposite(q1)))
q2_label = TexMobject('Q_2').scale(0.5)
q2_label.add_updater(lambda m:\
m.next_to(q2, RIGHT, buff=SMALL_BUFF))
q1q2 = Line()
q1q2.add_updater(lambda m:\
m.put_start_and_end_on(
q1.get_center(),
self.get_opposite(q1)
))
self.play(*[ShowCreation(e) for e in\
[q2, q2_label, q1q2]])
p1_y = ValueTracker(2)
p1 = Dot()
p1.set_fill(DARK_BLUE)
p1.plot_depth = 1
p1.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(p1_y.get_value()),
p1_y.get_value()
)))
p1_label = TexMobject('P_1').scale(0.5)
p1_label.add_updater(lambda m:\
m.next_to(p1, RIGHT, buff=SMALL_BUFF))
self.play(ShowCreation(p1), ShowCreation(p1_label))
p2 = Dot()
p2.set_fill(DARK_BLUE)
p2.plot_depth = 1
p2.add_updater(lambda m:\
m.move_to(self.get_opposite(p1)))
p2_label = TexMobject('P_2').scale(0.5)
p2_label.add_updater(lambda m:\
m.next_to(p2, RIGHT, buff=SMALL_BUFF))
p1p2 = Line()
p1p2.add_updater(lambda m:\
m.put_start_and_end_on(
p1.get_center(),
self.get_opposite(p1)
))
self.play(*[ShowCreation(e) for e in\
[p2, p2_label, p1p2]])
k1 = Dot()
k1.set_fill(DARK_BROWN)
k1.plot_depth = 1
k1.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(p1, q1)))
k1_label = TexMobject('K_1').scale(0.5)
k1_label.add_updater(lambda m:\
m.next_to(k1, LEFT, buff=SMALL_BUFF))
p1q1 = Line()
p1q1.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p1, q1),
self.chord_to_directrix(p1, q1)
))
p2q2 = Line()
p2q2.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p2, q2),
self.chord_to_directrix(p2, q2)
))
self.play(*[ShowCreation(e) for e in \
[k1, k1_label, p1q1, p2q2]])
k2 = Dot()
k2.set_fill(DARK_BROWN)
k2.plot_depth = 1
k2.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(p2, q1)))
k2_label = TexMobject('K_2').scale(0.5)
k2_label.add_updater(lambda m:\
m.next_to(k2, LEFT, buff=SMALL_BUFF))
p2q1 = Line()
p2q1.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p2, q1),
self.chord_to_directrix(p2, q1)
))
p1q2 = Line()
p1q2.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p1, q2),
self.chord_to_directrix(p1, q2)
))
self.play(*[ShowCreation(e) for e in \
[k2, k2_label, p2q1, p1q2]])
explain = CText('这些交点在准线上').scale(0.3)
explain.to_edge(RIGHT)
self.wait(2)
self.play(Write(explain))
self.wait(5)
self.play(ApplyMethod(q1_y.set_value, 0.5),
ApplyMethod(p1_y.set_value, -3))
self.wait(3)
self.play(ApplyMethod(q1_y.set_value, 3),
ApplyMethod(p1_y.set_value, -9))
self.wait(10)
class Prob3(Parabola):
CONFIG = {
'focus': 2,
'x_min': -4
}
def construct(self):
self.adjust_x_range()
graph = self.get_graph(color=LIGHT_BROWN)
directrix = self.get_directrix()
focus = Dot().move_to(self.get_focus())
focus.set_fill(DARK_BROWN)
focus.plot_depth = 1
focusLabel = TexMobject('F').scale(0.7)
focusLabel.next_to(focus, RIGHT)
self.play(*[ShowCreation(e) for\
e in [graph, directrix, focus, focusLabel]])
q1_y = ValueTracker(9)
q1 = Dot()
q1.set_fill(DARK_BLUE)
q1.plot_depth = 1
q1.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(q1_y.get_value()),
q1_y.get_value()
)))
q1_label = TexMobject('Q_1').scale(0.5)
q1_label.add_updater(lambda m:\
m.next_to(q1, RIGHT, buff=SMALL_BUFF))
self.play(ShowCreation(q1), ShowCreation(q1_label))
q2 = Dot()
q2.set_fill(DARK_BLUE)
q2.plot_depth = 1
q2.add_updater(lambda m:\
m.move_to(self.get_opposite(q1)))
q2_label = TexMobject('Q_2').scale(0.5)
q2_label.add_updater(lambda m:\
m.next_to(q2, RIGHT, buff=SMALL_BUFF))
q1q2 = Line()
q1q2.add_updater(lambda m:\
m.put_start_and_end_on(
q1.get_center(),
self.get_opposite(q1)
))
self.play(*[ShowCreation(e) for e in\
[q2, q2_label, q1q2]])
p1_y = ValueTracker(2)
p1 = Dot()
p1.set_fill(DARK_BLUE)
p1.plot_depth = 1
p1.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(p1_y.get_value()),
p1_y.get_value()
)))
p1_label = TexMobject('P_1').scale(0.5)
p1_label.add_updater(lambda m:\
m.next_to(p1, RIGHT, buff=SMALL_BUFF))
self.play(ShowCreation(p1), ShowCreation(p1_label))
p2 = Dot()
p2.set_fill(DARK_BLUE)
p2.plot_depth = 1
p2.add_updater(lambda m:\
m.move_to(self.get_opposite(p1)))
p2_label = TexMobject('P_2').scale(0.5)
p2_label.add_updater(lambda m:\
m.next_to(p2, RIGHT, buff=SMALL_BUFF))
p1p2 = Line()
p1p2.add_updater(lambda m:\
m.put_start_and_end_on(
p1.get_center(),
self.get_opposite(p1)
))
self.play(*[ShowCreation(e) for e in\
[p2, p2_label, p1p2]])
k1 = Dot()
k1.set_fill(DARK_BROWN)
k1.plot_depth = 1
k1.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(p1, q1)))
k1_label = TexMobject('K_1').scale(0.5)
k1_label.add_updater(lambda m:\
m.next_to(k1, LEFT, buff=SMALL_BUFF))
p1q1 = Line()
p1q1.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p1, q1),
self.chord_to_directrix(p1, q1)
))
p2q2 = Line()
p2q2.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p2, q2),
self.chord_to_directrix(p2, q2)
))
self.play(*[ShowCreation(e) for e in \
[k1, k1_label, p1q1, p2q2]])
k2 = Dot()
k2.set_fill(DARK_BROWN)
k2.plot_depth = 1
k2.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(p2, q1)))
k2_label = TexMobject('K_2').scale(0.5)
k2_label.add_updater(lambda m:\
m.next_to(k2, LEFT, buff=SMALL_BUFF))
p2q1 = Line()
p2q1.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p2, q1),
self.chord_to_directrix(p2, q1)
))
p1q2 = Line()
p1q2.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p1, q2),
self.chord_to_directrix(p1, q2)
))
self.play(*[ShowCreation(e) for e in \
[k2, k2_label, p2q1, p1q2]])
k1f = Line()
k1f.add_updater(lambda m:\
m.put_start_and_end_on(
k1.get_center(),
focus.get_center()
))
k2f = Line()
k2f.add_updater(lambda m:\
m.put_start_and_end_on(
k2.get_center(),
focus.get_center()
))
explain = TexMobject('K_1F \\perp K_2F')
explain.to_edge(RIGHT)
self.wait(2)
self.play(ShowCreation(k1f), ShowCreation(k2f))
self.wait(3)
self.play(Write(explain))
self.wait(5)
self.play(ApplyMethod(q1_y.set_value, 0.5),
ApplyMethod(p1_y.set_value, -3))
self.wait(3)
self.play(ApplyMethod(q1_y.set_value, 3),
ApplyMethod(p1_y.set_value, -9))
self.wait(10)
class Prob4(Parabola):
CONFIG = {
'focus': 3,
'x_min': -10
}
def construct(self):
self.adjust_x_range()
graph = self.get_graph(color=LIGHT_BROWN)
directrix = self.get_directrix()
focus = Dot().move_to(self.get_focus())
focus.set_fill(DARK_BROWN)
focus.plot_depth = 1
focusLabel = TexMobject('F').scale(0.5)
focusLabel.next_to(focus, RIGHT)
self.play(*[ShowCreation(e) for\
e in [graph, directrix, focus, focusLabel]])
a = Dot()
a.set_fill(DARK_BROWN)
a.move_to(self.coords_to_point(0, 0))
a.plot_depth = 1
a_label = TexMobject('A').scale(0.5)
a_label.next_to(a, RIGHT)
self.play(*[ShowCreation(e) for e in [a, a_label]])
y_val = ValueTracker(8)
m = Dot()
m.set_fill(DARK_BLUE)
m.plot_depth = 1
m.add_updater(lambda m:\
m.move_to(self.coords_to_point(
-self.focus, y_val.get_value()
)))
m_label = TexMobject('M').scale(0.5)
m_label.add_updater(lambda l:\
l.next_to(m, LEFT))
p = Dot()
p.set_fill(DARK_BLUE)
p.plot_depth = 1
p.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(y_val.get_value()),
y_val.get_value()
)))
p_label = TexMobject('P').scale(0.5)
p_label.add_updater(lambda m:\
m.next_to(p, RIGHT))
self.play(*[ShowCreation(e) for e in\
[m, m_label, p, p_label]])
k = Dot()
k.set_fill(DARK_BLUE)
k.plot_depth = 1
k.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(
p, a
)))
k_label = TexMobject('K').scale(0.5)
k_label.add_updater(lambda m:\
m.next_to(k, LEFT))
pk = Line()
pk.add_updater(lambda l:\
l.put_start_and_end_on(
p.get_center(),
self.chord_to_directrix(p, a)
))
mp = Line()
mp.add_updater(lambda l:\
l.put_start_and_end_on(
m.get_center(),
p.get_center()
))
self.play(*[ShowCreation(e) for e in\
[k, k_label, pk, mp]])
kf = Line()
kf.add_updater(lambda l:\
l.put_start_and_end_on(
k.get_center(),
focus.get_center()
))
mf = Line()
mf.add_updater(lambda l:\
l.put_start_and_end_on(
m.get_center(),
focus.get_center()
))
self.play(ShowCreation(kf), ShowCreation(mf))
form = TexMobject('KF \\perp MF')
form.scale(0.7)
form.to_edge(RIGHT)
self.play(Write(form))
af = DashedLine(a.get_center(), focus.get_center())
pf = DashedLine()
def get_pf_extent():
vec = focus.get_center() - p.get_center()
vec = normalize(vec)
return focus.get_center() + 2 * vec
pf.add_updater(lambda m:\
m.put_start_and_end_on(
p.get_center(),
get_pf_extent()
))
self.play(ShowCreation(af), ShowCreation(pf))
self.wait(3)
self.play(ApplyMethod(y_val.set_value, 2))
self.wait(3)
self.play(ApplyMethod(y_val.set_value, -2))
self.wait(3)
self.play(ApplyMethod(y_val.set_value, -8))
self.wait(10)
class Prob5(Parabola):
CONFIG = {
'focus': 3,
'x_min': -10
}
def construct(self):
self.adjust_x_range()
graph = self.get_graph(color=LIGHT_BROWN)
directrix = self.get_directrix()
focus = Dot().move_to(self.get_focus())
focus.set_fill(DARK_BROWN)
focus.plot_depth = 1
focusLabel = TexMobject('F').scale(0.5)
focusLabel.next_to(focus, RIGHT + UP)
self.play(*[ShowCreation(e) for\
e in [graph, directrix, focus, focusLabel]])
h_line = self.get_horizontal()
x = Dot()
x.set_fill(DARK_BROWN)
x.plot_depth = 1
x.move_to(self.coords_to_point(-self.focus, 0))
x_label = TexMobject('X').scale(0.5)
x_label.next_to(x, LEFT + UP)
self.play(ShowCreation(h_line))
self.play(ShowCreation(x), ShowCreation(x_label))
y_val = ValueTracker(8)
p = Dot()
p.set_fill(DARK_BLUE)
p.plot_depth = 1
p.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(y_val.get_value()),
y_val.get_value()
)))
q = Dot()
q.set_fill(DARK_BLUE)
q.plot_depth = 1
q.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(-y_val.get_value()),
-y_val.get_value()
)))
t = Dot()
t.set_fill(DARK_BLUE)
t.plot_depth = 1
t.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(y_val.get_value()), 0
)))
p_label = TexMobject('P').scale(0.5)
p_label.add_updater(lambda m:\
m.next_to(p, RIGHT))
q_label = TexMobject('Q').scale(0.5)
q_label.add_updater(lambda m:\
m.next_to(q, RIGHT))
t_label = TexMobject('T').scale(0.5)
t_label.add_updater(lambda m:\
m.next_to(t, RIGHT + UP))
pq = Line()
pq.add_updater(lambda m:\
m.put_start_and_end_on(
p.get_center(),
self.coords_to_point(
self.func(-y_val.get_value()),
-y_val.get_value()
)))
pt = Line()
pt.add_updater(lambda m:\
m.put_start_and_end_on(
p.get_center(),
self.coords_to_point(
self.func(y_val.get_value()), 0
)))
self.play(ShowCreation(p), ShowCreation(p_label))
self.play(ShowCreation(pt))
self.play(ShowCreation(t), ShowCreation(t_label))
label1 = CText('纵标线').scale(0.3)\
.next_to(pt, RIGHT)
self.play(ShowCreation(label1))
self.wait()
self.play(FadeOut(label1))
self.play(ShowCreation(pq))
self.remove(pt)
self.play(ShowCreation(q), ShowCreation(q_label))
label2 = CText('双纵标线').scale(0.3)\
.next_to(t, RIGHT+DOWN)
self.play(ShowCreation(label2))
self.wait()
self.play(FadeOut(label2))
self.wait()
inter = Dot()
inter.set_fill(DARK_BLUE)
inter.plot_depth = 1
inter.add_updater(lambda m:\
m.move_to(
self.coords_to_point(
4*(self.focus**3)/(y_val.get_value()**2),
4*self.focus**2/y_val.get_value()
) if y_val.get_value() != 0 else
self.coords_to_point(0, 0)
))
inter_label = TexMobject("P'").scale(0.5)
inter_label.add_updater(lambda m:\
m.next_to(inter, LEFT + UP, buff=SMALL_BUFF))
px = Line()
px.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p, inter),
x.get_center()
))
self.play(ShowCreation(px))
self.play(ShowCreation(inter),
ShowCreation(inter_label))
self.wait()
form = CText("P'Q经过焦点").shift(UP)
form.scale(0.5)
form.to_edge(RIGHT)
self.play(Write(form))
interq = Line()
interq.add_updater(lambda m:\
m.put_start_and_end_on(
inter.get_center(),
q.get_center()
))
self.play(ShowCreation(interq))
self.wait(2)
self.play(ApplyMethod(y_val.set_value, 4))
self.wait(2)
self.play(ApplyMethod(y_val.set_value, -4))
self.wait(2)
self.play(ApplyMethod(y_val.set_value, -9))
self.wait(2)
self.play(ApplyMethod(y_val.set_value, 9))
self.wait(10) | 2.515625 | 3 |
pyfuseki/rdf/rdf_prefix.py | yubinCloud/pyfuseki | 21 | 12796057 | <reponame>yubinCloud/pyfuseki
"""
@Time: 2021/9/18 13:04
@Author:
@File: rf_prefix.py
"""
import rdflib
from pyfuseki import config
import uuid
name_to_uri = dict()
class NameSpace(rdflib.Namespace):
"""
继承 rdflib 的 Namespace 并扩充其他相关的功能
"""
def __getitem__(self, key) -> rdflib.URIRef:
return super(NameSpace, self).__getitem__(key)
def __getattr__(self, name) -> rdflib.URIRef:
return super(NameSpace, self).__getattr__(name)
def uid(self, name) -> rdflib.URIRef:
"""
以 uuid 生成一个唯一 id 来作为 value 包装成 URIRef
:return:
"""
if name not in name_to_uri:
name_to_uri[name] = str(uuid.uuid1())
uri = name_to_uri[name]
return rdflib.URIRef(self[uri])
def to_uri(self) -> rdflib.URIRef:
"""
将自身转换成 URIRef
:return:
"""
uri = str(self)
if uri.endswith('/'):
uri = uri[:uri.rfind('/')]
return rdflib.URIRef(uri)
def rdf_prefix(cls: type, local_prefix: str = None):
if local_prefix is None:
local_prefix = config.COMMON_PREFIX
attrs = cls.__annotations__.keys()
for k in attrs:
setattr(cls, k, NameSpace(local_prefix + k + '/'))
return cls
if __name__ == '__main__':
a = NameSpace('http://www.google.com/person/')
b = a.to_uri()
@rdf_prefix
class Node:
name: str
email: str
n = Node()
print(n.name['yubin']) | 2.390625 | 2 |
Python 3 - Curso completo/exercicio067.py | PedroMunizdeMatos/Estudos-e-Projetos | 0 | 12796058 | '''faça um programa que mostre a tabuada de vários números, um de cada vez, para cada valor digitado
pelo usuário. O programa será interrompido quando o valor solicitado for negativo.'''
from time import sleep
n = 0
cont = 0
while n >= 0:
print('--' * 15)
print('\033[33mPara cancelar, digite um número negativo.\033[m')
n = int(input('Qual número deseja saber a tabuada ? '))
print('--' * 15)
if n < 0:
print('\033[31mFinalizando o programa...\033[m')
sleep(1)
break
else:
for c in range (0,11):
print(f'{n} x {c} = {n*c}')
| 3.9375 | 4 |
src/core.py | foutoucour/nepo | 1 | 12796059 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
from contextlib import contextmanager
import click
import crayons
def open_url(url):
click.echo("Opening {}.".format(crayons.white(url, bold=True)))
click.launch(url)
def get_config_file_path():
home = os.path.expanduser("~")
return os.path.realpath('{}/.commands.json'.format(home))
@contextmanager
def get_config_file(mode='r'):
""" Return the file storing the commands.
:param str mode: the mode the file with be opened with. Default: r
:return: the file object.
:rtype: file
"""
path = get_config_file_path()
if not os.path.exists(path):
generate_empty_config_file()
with open(path, mode) as datafile:
yield datafile
def generate_empty_config_file():
""" Reset the config file."""
with open(get_config_file_path(), 'w') as datafile:
json.dump({}, datafile)
def build_command(name, url):
""" Build a click command according the arguments.
:param str name: label that the user will use to trigger the command.
:param str url: the url that will be opened.
:rtype: click.Command
"""
return click.Command(
name,
callback=lambda: open_url(url),
help='Open {}'.format(url)
)
| 3.125 | 3 |
src/tanuki/database/adapter/query/query_compiler.py | M-J-Murray/tanuki | 0 | 12796060 | <gh_stars>0
from __future__ import annotations
from typing import Any, Generic, TYPE_CHECKING, TypeVar, Union
T = TypeVar("T")
from tanuki.data_store.query import Query
if TYPE_CHECKING:
from tanuki.data_store.query import (
AndGroupQuery,
AndQuery,
EqualsQuery,
GreaterEqualQuery,
GreaterThanQuery,
LessEqualQuery,
LessThanQuery,
NotEqualsQuery,
OrGroupQuery,
OrQuery,
RowCountQuery,
SumQuery,
)
class QueryCompiler(Generic[T]):
def EQUALS(self: "QueryCompiler", query: EqualsQuery) -> T:
raise NotImplementedError()
def NOT_EQUALS(self: "QueryCompiler", query: NotEqualsQuery) -> T:
raise NotImplementedError()
def GREATER_THAN(self: "QueryCompiler", query: GreaterThanQuery) -> T:
raise NotImplementedError()
def GREATER_EQUAL(self: "QueryCompiler", query: GreaterEqualQuery) -> T:
raise NotImplementedError()
def LESS_THAN(self: "QueryCompiler", query: LessThanQuery) -> T:
raise NotImplementedError()
def LESS_EQUAL(self: "QueryCompiler", query: LessEqualQuery) -> T:
raise NotImplementedError()
def ROW_COUNT(self: "QueryCompiler", query: RowCountQuery) -> T:
raise NotImplementedError()
def SUM(self: "QueryCompiler", query: SumQuery) -> T:
raise NotImplementedError()
def AND(self: "QueryCompiler", query: AndQuery) -> T:
raise NotImplementedError()
def AND_GROUP(self: "QueryCompiler", query: AndGroupQuery) -> T:
raise NotImplementedError()
def OR(self: "QueryCompiler", query: OrQuery) -> T:
raise NotImplementedError()
def OR_GROUP(self: "QueryCompiler", query: OrGroupQuery) -> T:
raise NotImplementedError()
def compile(self: "QueryCompiler", query: Union[Any, Query]) -> T:
if not isinstance(query, Query):
return query
return query.compile(self)
| 2.359375 | 2 |
trade_fetcher.py | jianwang0212/napoli_gang | 1 | 12796061 | import json
import requests
import ccxt
import time
import os
import pandas as pd
from datetime import datetime, timedelta
import operator
import csv
import cfg
liquid = ccxt.liquid(cfg.liquid_misc_credential)
exchange = liquid
since = exchange.milliseconds() - 86400000 # -1 day from now
def save_and_get_str():
# SAVE
all_orders = []
since = exchange.milliseconds() - 86400000 * 5 # -1 day from now
while since < exchange.milliseconds():
symbol = 'ETH/JPY' # change for your symbol
limit = 100 # change for your limit
orders = exchange.fetch_my_trades(symbol, since, limit)
if len(orders) > 1:
since = orders[len(orders) - 1]['timestamp']
all_orders += orders
else:
break
df = pd.DataFrame(
columns=['utc', 'time', 'type', 'amount', 'price', 'fee', 'takerOrMaker'])
for element in all_orders:
trade = element['info']
trade_utc = datetime.utcfromtimestamp(
float(trade['created_at'])).strftime('%Y-%m-%d %H:%M:%S.%f')
trades_to_append = str(int(float(trade['created_at']) * 1000)) + ',' + str(trade_utc) + ',' + str(trade['my_side']) + ',' + str(abs(
float(trade['quantity']))) + ',' + str(float(trade['price'])) + ',' + str(element['fee']) + ',' + str(element['takerOrMaker'])
df.loc[len(df.index)] = trades_to_append.split(",")
# df.to_csv('transaction_liquid.csv')
if not os.path.isfile("transaction_liquid.csv"):
csv_content = df.to_csv(index=False)
else:
csv_content = df.to_csv(
index=False, header=None)
with open('transaction_liquid.csv', 'a') as csvfile:
csvfile.write(csv_content)
def sort_csv():
x = pd.read_csv("transaction_liquid.csv")
print(x.iloc[0])
x = x.drop_duplicates().sort_values('time', ascending=False)
x.to_csv('transaction_liquid.csv', index=False)
print('sorted')
while True:
save_and_get_str()
sort_csv()
time.sleep(23 * 60)
| 2.484375 | 2 |
mytb/datetime.py | quentinql/mytb | 0 | 12796062 | import re
from datetime import datetime
from datetime import timedelta
import dateutil.parser
import pytz
import tzlocal
# datetime objct for beginning of epoch
T_EPOCH = datetime(1970, 1, 1, tzinfo=pytz.utc)
DEFAULT = object() # singleton, for args with default values
class DateTimeError(Exception):
""" custom exception """
class DateTime(object):
single_delta = r'(?:\s*([+-]\d+(?:\.\d*)?)(?:\s*([shMdw])?)\s*)'
single_delta = r'(?:\s*([+-]\d+(?:\.\d*)?)\s*([shMdw]?)\s*)'
# attempt to handle comma separated list of deltas
# multi_delta = r'^%s(?:,%s)*$' % (single_delta, single_delta)
delta_rex = re.compile('^' + single_delta + '$')
delta_units = {
's': (0, 1),
'M': (0, 60),
'h': (0, 3600),
'd': (1, 0),
'w': (7, 0),
'': (1, 0), # default unit = days
}
@classmethod
def strptimedelta(cls, deltastr, info=None, raise_on_error=True):
""" parses a date time string and returns a datetime timedelta object
Supported Formats:
'+-<num><unit>'
where unit =
s for seconds
h for hours
M for minutes
d for days
w for weeks
default = days
"""
# not implemented so far
# and rounding (use by strptime) =
# d for days
# default no rounding
# """
# TODO: think about using dateutil.parser.relativedelta
rslt = datetime.now(pytz.utc)
fields = (val.strip() for val in deltastr.split(','))
delta_rex = cls.delta_rex
for field in fields:
match = delta_rex.match(field)
if not match:
raise DateTimeError("can't parse %r as delta" % field)
value, unit = match.groups()
value = float(value)
days, seconds = cls.delta_units[unit]
rslt += timedelta(days * value, seconds * value)
return rslt
@classmethod
def strptime(cls, datestr=None, fmt=None, tzinfo=DEFAULT):
""" parses a date time string and returns a date time object
Supported Formats:
- formats as supported by dateutil.parser
- None, '', 0, '0' and 'now' -> datetime.now()
- if fmt is passed same as datetime.strptime
:param datestr: date string to be passed
:param fmt: if passedm then use datetime's normal strptime
BUT add a time zone info
:param tzinfo: if no tz info is specified in the string, then
this param decides which time zone shall be used.
DEFAULT: use local time zone
None: return naive time zone object
other: use other time zone
"""
# NOT IMPLEMENTED SO FAR
# - delta format with +-num units[rounding],
# where unit =
# s for seconds
# M for minutes
# h for hours
# d for days
# w for weeks
# and rounding =
# d for days
# default no rounding
tzinfo = tzinfo if tzinfo is not DEFAULT else tzlocal.get_localzone()
if fmt:
rslt = datetime.strptime(datestr, fmt)
else:
if isinstance(datestr, (int, float)):
datestr = str(datestr)
datestr = datestr.strip() if datestr else datestr
if datestr in (None, '', '0', 'now'):
return datetime.now(tzinfo)
if datestr[:1] in "+-" or ',' in datestr:
return cls.strptimedelta(datestr, tzinfo)
rslt = dateutil.parser.parse(datestr)
if rslt.tzinfo is None and tzinfo:
rslt = tzinfo.localize(rslt)
return rslt
@classmethod
def parse_range(cls, rangestr=None, default_from='-1d', default_to='now'):
""" parses a time range string
a time range string is a comma separated list of a start time
and a end time
"""
if rangestr is None:
from_str = default_from
to_str = default_to
else:
from_str, to_str = [v.strip() for v in rangestr.split(',', 1)]
from_str = from_str if from_str else default_from
to_str = to_str if to_str else default_to
t_from = cls.strptime(from_str)
t_to = cls.strptime(to_str)
return t_from, t_to
class Time(DateTime):
@classmethod
def strptime(cls, datestr):
pass
class Date(DateTime):
@classmethod
def strptime(cls, datestr):
pass
def fname_to_time(fname, use_ctime=False, use_mtime=False, tz=None):
""" extracts date time from an fname
examples of supported formats:
"fnameYYYYMMDD" just a date
"fnameYYYY-MM-DD" date with separators
"fnameYYYYMMDD_HHmmss" date and time
"fnameYYYYMMDD-HHmmss" date and time
"fnameYYYYMMDD-HH-mm-ss" date and time
"fnameYYYYMMDD-ssssssssss" date and time(in seconds since epoche)
:param fname: file name to parse
:param use_ctime: if file name contains no string use file's ctime
:param use_mtime: if file name contains no string use file's mtime
"""
def to_timestamp(t):
""" convert a datetime object to seconds since epoch """
return (t - T_EPOCH).total_seconds()
| 2.96875 | 3 |
izzy.py | imre-kerr/better-ea | 0 | 12796063 | from __future__ import division
from ea import float_gtype
from ea import adult_selection
from ea import parent_selection
from ea import reproduction
from ea import main
from ea.ea_globals import *
import pylab
import sys
import copy
import multiprocessing as mp
def spiketrain(a, b, c, d, k,):
'''Compute a spike train according to the Izhikevich model'''
tau = 10
thresh = 35
steps = 1000
ext_input = [10 for i in xrange(steps)]
v = -60
u = 0
train = []
for i in xrange(steps):
train += [v]
if v >= thresh:
v = c
u = u + d
dv = 1/tau * (k * v**2 + 5*v + 140 - u + ext_input[i])
du = a/tau * (b*v - u)
v += dv
u += du
return train
def spiketrain_list(params):
'''Take a, b, c, d and k as a list and compute the corresponding spike train'''
return spiketrain(params[0], params[1], params[2], params[3], params[4])
def detect_spikes(spike_train):
'''Detect spikes in a spike train using a sliding window of size k'''
thresh = 0
k = 5
spikes = []
for i in xrange(len(spike_train) - k + 1):
window = spike_train[i:i+k]
if window[k//2] == max(window) and window[k//2] > thresh:
spikes += [i + k//2]
return spikes
def dist_spike_time(train1, train2):
'''Compute distance between two spike trains using the spike time distance metric'''
spikes1 = detect_spikes(train1)
spikes2 = detect_spikes(train2)
m = min(len(spikes1), len(spikes2))
n = max(len(spikes1), len(spikes2))
p = 2
dist = 0
for i in xrange(m):
dist += abs(spikes1[i] - spikes2[i])**p
dist = dist ** (1/p)
penalty = (n-m)*len(train1)
penalty = penalty / max(2*m, 1)
dist = (1/n) * (dist + penalty)
return dist
def dist_spike_interval(train1, train2):
'''Compute distance between two spike trains using the spike interval distance metric'''
spikes1 = detect_spikes(train1)
spikes2 = detect_spikes(train2)
n = max(len(spikes1), len(spikes2))
m = min(len(spikes1), len(spikes2))
p = 2
dist = sum(abs((spikes1[i] - spikes1[i-1])-(spikes2[i] - spikes2[i-1]))**p for i in xrange(1,m)) ** (1/p)
penalty = (n - m) * len(train1) / max(2*m, 1)
dist = 1/max(m-1, 1) * (dist + penalty)
return dist
def dist_waveform(train1, train2):
'''Compute distance between two spike trains using the waveform distance metric'''
m = len(train1)
p = 2
dist = 1/m * sum(abs(train1[i] - train2[i]) ** p for i in xrange(m)) ** (1/p)
return dist
def fitness_test(population, target, dist):
'''Compute fitnesses based on distance to the target spike train'''
tested = population
for ind in tested:
if ind.fitness != None:
continue
distance = dist(ind.ptype, target)
if distance != 0:
ind.fitness = 1 / distance
else:
ind.fitness = float('Inf')
return tested
def fitness_test_mp(population, target, dist):
'''Compute fitnesses based on distance to the target spike train'''
pool = mp.Pool(mp.cpu_count())
tested = population
indices = []
workers = []
for i, ind in enumerate(population):
if ind.fitness == None:
indices += [i]
workers += [pool.apply_async(dist, [ind.ptype, target])]
for i, worker in enumerate(workers):
distance = worker.get()
population[indices[i]].fitness = 1 / (1 + distance)
pool.close()
pool.join()
return tested
def gen_fitness(target):
'''Generate a fitness function interactively'''
while True:
method = raw_input("Input distance metric (time/interval/waveform):\n")
if method == 'time':
return (lambda population: fitness_test_mp(population, target, dist_spike_time))
elif method == 'interval':
return (lambda population: fitness_test_mp(population, target, dist_spike_interval))
elif method == 'waveform':
return (lambda population: fitness_test_mp(population, target, dist_waveform))
else:
print "Unrecognized method: " + method
def develop(population):
'''Development function, generates spike train for each individual'''
developed = population
for ind in developed:
if ind.ptype != None:
continue
ind.ptype = spiketrain_list(ind.gtype)
return developed
def develop_mp(population):
'''Development function that makes use of multiprocessing'''
developed = population
workers = []
indices = []
pool = mp.Pool(mp.cpu_count())
for i, ind in enumerate(developed):
if ind.ptype != None:
continue
indices += [i]
workers += [pool.apply_async(spiketrain_list, [ind.gtype])]
for i, worker in enumerate(workers):
population[indices[i]].ptype = worker.get()
pool.close()
pool.join()
return developed
def visualize(generation_list, target):
'''Generate pretty pictures using pylab'''
best = []
average = []
stddev = []
average_plus_stddev = []
average_minus_stddev = []
for pop in generation_list:
best += [most_fit(pop).fitness]
average += [avg_fitness(pop)]
stddev += [fitness_stddev(pop)]
average_plus_stddev += [average[-1] + stddev[-1]]
average_minus_stddev += [average[-1] - stddev[-1]]
pylab.figure(1)
pylab.fill_between(range(len(generation_list)), average_plus_stddev, average_minus_stddev, alpha=0.2, color='b', label="Standard deviation")
pylab.plot(range(len(generation_list)), best, color='r', label='Best')
pylab.plot(range(len(generation_list)), average, color='b', label='Average with std.dev.')
pylab.title("Fitness plot - Izzy")
pylab.xlabel("Generation")
pylab.ylabel("Fitness")
pylab.legend(loc="upper left")
pylab.savefig("izzy_fitness.png")
best_index = best.index(max(best))
best_individual = most_fit(generation_list[best_index])
best_spiketrain = best_individual.ptype
print best_individual.gtype
print best_individual.fitness
pylab.figure(2)
pylab.plot(range(len(best_spiketrain)), best_spiketrain, color='r', label='Best solution')
pylab.plot(range(len(target)), target, color='blue', label='Target')
pylab.title("Spiketrain plot")
pylab.xlabel("Time - t")
pylab.ylabel("Activation level - v")
pylab.legend(loc="upper right")
pylab.savefig("izzy_spiketrains.png")
if __name__ == '__main__':
if len(sys.argv) == 1:
print "Error: No filename given"
sys.exit()
target_file = open(sys.argv[1])
target_spiketrain = [float(num) for num in target_file.read().split()]
ranges = [(0.001, 0.2), (0.01, 0.3), (-80.0, -30.0), (0.1, 10.0), (0.01, 1.0)]
popsize = int(raw_input("Input population size:\n"))
fitness_tester = gen_fitness(target_spiketrain)
adult_selector, litter_size = adult_selection.gen_adult_selection(popsize)
parent_selector = parent_selection.gen_parent_selection(litter_size)
mutate = float_gtype.gen_mutate(ranges)
crossover = float_gtype.gen_crossover()
reproducer = reproduction.gen_reproduction(mutate, crossover)
generations = int(raw_input("Input max number of generations:\n"))
fitness_goal = float(raw_input("Input fitness goal, 0 for none:\n"))
initial = [individual(gtype=float_gtype.generate(ranges), age=0) for i in xrange(popsize)]
generation_list = main.evolutionary_algorithm(initial, develop_mp, fitness_tester, adult_selector, parent_selector, reproducer, generations, fitness_goal)
visualize(generation_list, target_spiketrain)
| 2.703125 | 3 |
contrib/python/CUBRIDdb/connections.py | eido5/cubrid | 253 | 12796064 | """
This module implements connections for CUBRIDdb. Presently there is
only one class: Connection. Others are unlikely. However, you might
want to make your own subclasses. In most cases, you will probably
override Connection.default_cursor with a non-standard Cursor class.
"""
from CUBRIDdb.cursors import *
import types, _cubrid
class Connection(object):
"""CUBRID Database Connection Object"""
def __init__(self, *args, **kwargs):
'Create a connecton to the database.'
self.charset = ''
kwargs2 = kwargs.copy()
self.charset = kwargs2.pop('charset', 'utf8')
self.connection = _cubrid.connect(*args, **kwargs2)
def __del__(self):
pass
def cursor(self, dictCursor = None):
if dictCursor:
cursorClass = DictCursor
else:
cursorClass = Cursor
return cursorClass(self)
def set_autocommit(self, value):
if not isinstance(value, bool):
raise ValueError("Parameter should be a boolean value")
if value:
switch = 'TRUE'
else:
switch = 'FALSE'
self.connection.set_autocommit(switch)
def get_autocommit(self):
if self.connection.autocommit == 'TRUE':
return True
else:
return False
autocommit = property(get_autocommit, set_autocommit, doc = "autocommit value for current Cubrid session")
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
def close(self):
self.connection.close()
def escape_string(self, buf):
return self.connection.escape_string(buf)
| 3.234375 | 3 |
client.py | pereztjacob/http-server | 0 | 12796065 | <reponame>pereztjacob/http-server
import sys
import socket
def client(message):
s = socket.socket()
host = socket.gethostname()
res = socket.gethostbyaddr("127.0.0.1")
host = res[0]
try:
s.connect((host, 12345))
except:
print("connection failed")
else:
s.send(str.encode(message))
gett = s.recv(len(message))
result = gett.decode()
if(result == message):
print('go ahead', message)
s.close()
if __name__ == "__main__":
if len(sys.argv) <= 1:
print('not good')
else:
client(sys.argv[1]) | 2.90625 | 3 |
Python/math/sum_of_digits.py | TechSpiritSS/NeoAlgo | 897 | 12796066 | # Python program to Find the Sum of Digits of a Number
def sum_of_digits(num):
# Extracting Each digits
# and compute thier sum in 's'
s = 0
while num != 0:
s = s + (num % 10)
num = num // 10
return s
if __name__ == '__main__':
# Input the number And
# Call the function
print("Enter the number: ", end="")
n = int(input())
S = sum_of_digits(abs(n))
print("The sum of digits of the given number is {}.".format(S))
'''
Time Complexity: O(log(num)), where "num" is the length of the given number
Space Complexity: O(1)
SAMPLE INPUT AND OUTPUT
SAMPLE 1
Enter the number: -12
The sum of digits of the given number is 3.
SAMPLE 2
Enter the number: 43258
The sum of digits of the given number is 22.
'''
| 4.28125 | 4 |
as2t.py | minitech/as2t | 1 | 12796067 | <gh_stars>1-10
import json
import tarfile
from typing import NamedTuple
_UINT32_SIZE = b'\x04\x00\x00\x00'
class FileRecord(NamedTuple):
path: str
offset: int
size: int
executable: bool
def _read_exact(f, count):
result = f.read(count)
if len(result) != count:
raise ValueError('Unexpected end of Asar file')
return result
def _expect(expectation):
if not expectation:
raise ValueError('Unexpected data in Asar file')
def _read_uint4_le(f):
return int.from_bytes(_read_exact(f, 4), 'little')
def _read_padding(f, size):
_expect(0 <= size < 4)
if size != 0:
_expect(f.read(size) == b'\0' * size)
def _flatten(path, index):
for key, value in index.items():
_expect(key and '/' not in key and key not in {'.', '..'})
if 'files' in value:
yield from _flatten(path + (key,), value['files'])
else:
raw_offset = value['offset']
raw_size = value['size']
raw_executable = value.get('executable', False)
_expect(isinstance(raw_offset, str))
_expect(isinstance(raw_size, int))
_expect(isinstance(raw_executable, bool))
offset = int(raw_offset)
_expect(offset >= 0)
_expect(raw_size >= 0)
yield FileRecord(path + (key,), offset, raw_size, raw_executable)
def transform(f, out):
# header_size header
_expect(f.read(4) == _UINT32_SIZE)
# header_size
header_pickled_size = _read_uint4_le(f) - 4
# header header
_expect(_read_uint4_le(f) == header_pickled_size)
# header header 2: return of the length prefixes
header_unpadded_size = _read_uint4_le(f)
padding_size = header_pickled_size - 4 - header_unpadded_size
header_bytes = _read_exact(f, header_unpadded_size)
header = json.loads(header_bytes)
_read_padding(f, padding_size)
offset = 0
for r in sorted(_flatten((), header['files']), key=lambda r: r.offset):
_expect(offset == r.offset)
offset = r.offset + r.size
info = tarfile.TarInfo(name='/'.join(r.path))
info.size = r.size
info.mode = 0o755 if r.executable else 0o644
out.addfile(info, f)
if __name__ == '__main__':
import sys
with tarfile.open(fileobj=sys.stdout.buffer, mode='w|') as out:
transform(sys.stdin.buffer, out)
| 2.4375 | 2 |
coole/settings.py | ahDDD/jwt-template | 1 | 12796068 | """
Django settings for coole project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import datetime
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost ', '.gaonengyujing.com']
HOST = '127.0.0.1:3000'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'account',
'care'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'coole.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['frontend/dist'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'coole.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME':'cool', #数据库名
'USER': 'root', #数据库用户名
'PASSWORD': '<PASSWORD>', #数据库用户名密码
'HOST': '127.0.0.1',
'PORT': '5432', #数据库远程连接端口
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'account.User'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication', # django-rest-framework-jwt
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
)
}
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=30),
'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_ALLOW_REFRESH': True,
'JWT_RESPONSE_PAYLOAD_HANDLER': 'account.jwt.custom_jwt_response',
'JWT_AUTH_HEADER_PREFIX': 'COOL', # 请求头前缀
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# build static out
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'frontend', 'dist').replace('//', '/'),
]
MEDIA_URL = '/frontend/static/profile/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'frontend', 'static', 'profile').replace('//', '/')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp-mail.outlook.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = 'cool'
| 1.820313 | 2 |
sparse/repos/pyomeca/tutorials/setup.py | yuvipanda/mybinder.org-analytics | 1 | 12796069 | from setuptools import setup
setup(name='tutorials',
version='POC',
url='https://github.com/pyomeca/tutorials.git',
author='pyomeca',
packages=['src'],
zip_safe=False)
| 1.007813 | 1 |
troposphere/cassandra.py | compose-x/troposphere | 0 | 12796070 | <filename>troposphere/cassandra.py
# Copyright (c) 2012-2022, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
from . import AWSObject, AWSProperty, PropsDictType, Tags
from .validators import boolean, integer
from .validators.cassandra import (
validate_billingmode_mode,
validate_clusteringkeycolumn_orderby,
)
class Keyspace(AWSObject):
"""
`Keyspace <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cassandra-keyspace.html>`__
"""
resource_type = "AWS::Cassandra::Keyspace"
props: PropsDictType = {
"KeyspaceName": (str, False),
"Tags": (Tags, False),
}
class ProvisionedThroughput(AWSProperty):
"""
`ProvisionedThroughput <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cassandra-table-provisionedthroughput.html>`__
"""
props: PropsDictType = {
"ReadCapacityUnits": (integer, True),
"WriteCapacityUnits": (integer, True),
}
class BillingMode(AWSProperty):
"""
`BillingMode <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cassandra-table-billingmode.html>`__
"""
props: PropsDictType = {
"Mode": (validate_billingmode_mode, True),
"ProvisionedThroughput": (ProvisionedThroughput, False),
}
class Column(AWSProperty):
"""
`Column <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cassandra-table-column.html>`__
"""
props: PropsDictType = {
"ColumnName": (str, True),
"ColumnType": (str, True),
}
class ClusteringKeyColumn(AWSProperty):
"""
`ClusteringKeyColumn <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cassandra-table-clusteringkeycolumn.html>`__
"""
props: PropsDictType = {
"Column": (Column, True),
"OrderBy": (validate_clusteringkeycolumn_orderby, False),
}
class EncryptionSpecification(AWSProperty):
"""
`EncryptionSpecification <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cassandra-table-encryptionspecification.html>`__
"""
props: PropsDictType = {
"EncryptionType": (str, True),
"KmsKeyIdentifier": (str, False),
}
class Table(AWSObject):
"""
`Table <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cassandra-table.html>`__
"""
resource_type = "AWS::Cassandra::Table"
props: PropsDictType = {
"BillingMode": (BillingMode, False),
"ClusteringKeyColumns": ([ClusteringKeyColumn], False),
"DefaultTimeToLive": (integer, False),
"EncryptionSpecification": (EncryptionSpecification, False),
"KeyspaceName": (str, True),
"PartitionKeyColumns": ([Column], True),
"PointInTimeRecoveryEnabled": (boolean, False),
"RegularColumns": ([Column], False),
"TableName": (str, False),
"Tags": (Tags, False),
}
| 1.929688 | 2 |
app/utils/game_logic.py | lik33v3n/Tower-of-God | 3 | 12796071 | import random
import json
import math
async def battle_attack(x, y, u, e, call):
if x == y:
await call.answer("❗ Противник увернулся от удара", show_alert=True)
return e.health, e.defence
else:
if e.defence <= 0:
e.health -= u.damage
return e.health, e.defence
else:
if u.damage > e.defence:
miss_dmg = u.damage - e.defence
e.health -= miss_dmg
e.defence = 0
return e.health, e.defence
else:
e.defence -= u.damage
return e.health, e.defence
async def battle_defence(x, y, u, e, call):
if x == y:
await call.answer("❗ Ты увернулся от удара", show_alert=True)
return u.health, u.defence
else:
if u.defence <= 0:
u.health -= e.damage
return u.health, u.defence
else:
if e.damage > u.defence:
miss_dmg = e.damage - u.defence
u.health -= miss_dmg
u.defence = 0
return u.health, u.defence
else:
u.defence -= e.damage
return u.health, u.defence
def power(obj, maximal=False):
if maximal is True:
hp = obj.max_health + obj.max_defence
else:
hp = obj.health + obj.defence
return hp * obj.damage
def exam_choose(user):
from app.models.examinators import exams
for i in range(len(exams)):
if user.rank == '-':
return exams[0]
elif exams[i].rank == user.rank:
try:
return exams[i + 1]
except IndexError:
return 'Максимальный ранг!'
def set_difficulty(m, u):
if m * 3 <= u:
difficulty = 'Оч. легко'
elif m * 2.5 <= u:
difficulty = 'Легко'
elif m * 2 < u:
difficulty = 'Нормально'
elif m * 1.5 < u:
difficulty = 'Сложно'
elif m < u:
difficulty = 'Очень сложно'
elif m > u * 3:
difficulty = 'Верная смерть'
elif m >= u:
difficulty = 'Невозможно'
else:
return
return difficulty
def get_xp(lvl):
"""
Returns total XP according to gain level
"""
total_xp = int((lvl * 10) ** 1.1)
return total_xp * lvl
# def json_inv(u):
# """
# Converts string from database to list
# Example: '[3, 2]' => [3, 2]
# :param u: User
# :return: User's inventory as list
# """
# inventory = json.loads(u['inventory']) if u['inventory'] != '[]' else []
# return inventory
def item_drop(chance):
"""
:param chance: Mob's chance of drop
:return: True/False
"""
c = random.randint(1, 100)
if c <= chance:
return True
return False
def round_down(n, decimals=0):
"""
Rounds a number down to a specified number of digits.
:param decimals: Specified number of digits
:param n: Float
"""
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
def enemy_calc(u_attack, u_health, u_defence, lvl):
enemy, result = [], []
if lvl != 1:
multiplier = round_down(random.uniform(0.4, 1.1), 1)
else:
multiplier = 0.4
print(multiplier)
for stat in (u_attack, u_health, u_defence):
enemy.append(round(stat*multiplier) if stat != 0 else 0)
e_power = enemy[0]*(enemy[1]+enemy[2])
formulae = int((e_power/(lvl**1.45))*2)
result = [enemy, formulae if formulae > 1 else 2]
return result
| 2.859375 | 3 |
app/components/menu.py | TechPriestJon/ChessPython | 0 | 12796072 | <gh_stars>0
import pyglet
from .button import Button
from .border import Border
class Menu:
def __init__(self, x, y, background):
self.__x = x
self.__y = y
#self.__image = pyglet.image.load(background)
#self.__sprite = pyglet.sprite.Sprite(self.__image, x, y - self.__image.height)
self.__components = []
self.__border = Border(x -10, y + 5, background)
def render(self):
#self.__sprite.draw()
self.component_height_sum = 0
for component in self.__components:
component.render()
self.component_height_sum += component.height() + 40
self.components_width = 0
for component in self.__components:
if component.width() + 10 + 20 > self.components_width:
self.components_width = component.width() + 10 + 20
self.__border.render(self.components_width, self.component_height_sum)
#def add_button(self, text):
#if len(self.__components) > 0:
# x = self.__component[-1].x() + 10
# y = self.__components[-1].y() - (self.__components[-1].content_height + 10)
#else:
# x = self.__x + 10
# y = self.__y - 10
#button = Button(text, x, y)
#self.__components.append(button)
def add_button(self, text, background):
if len(self.__components) > 0:
x = self.__components[-1].x()
y = self.__components[-1].y() - (self.__components[-1].height() + 40)
else:
x = self.__x + 10
y = self.__y - 10
button = Button(text, x, y, background)
self.__components.append(button)
def withinBoundry(self, x, y):
x1left = self.__x
x1right = self.__x + self.__label.content_width
y1top = self.__y - self.__label.content_height
y1bottom = self.__y
return (x1right>x>x1left) and (y1bottom>y>y1top)
def on_hover(self, x, y):
for component in self.__components:
component.on_hover(x,y)
def on_click(self, x, y):
for component in self.__components:
component.on_click(x,y)
def on_release(self, x, y):
for component in self.__components:
component.on_release(x,y)
| 2.890625 | 3 |
packages/news_classifier/news_classifier/utils/load_data.py | marco-cardoso/ML-News-article-classification-architecture | 0 | 12796073 | import pandas as pd
from news_classifier.database import db
def load_data(projection: dict) -> pd.DataFrame:
"""
Load the data from the Mongo collection and transform
into a pandas dataframe
:projection: A dictionary with the fields to load from database
:return: A pandas dataframe with the data
"""
articles = db.read_articles(
projection=projection
)
return pd.DataFrame(articles)
| 3.234375 | 3 |
pvlib/clearsky.py | dpete2008/Sandia | 0 | 12796074 | <gh_stars>0
"""
The ``clearsky`` module contains several methods
to calculate clear sky GHI, DNI, and DHI.
"""
from __future__ import division
import os
from collections import OrderedDict
import numpy as np
import pandas as pd
from pvlib import tools
def ineichen(apparent_zenith, airmass_absolute, linke_turbidity,
altitude=0, dni_extra=1364.):
'''
Determine clear sky GHI, DNI, and DHI from Ineichen/Perez model.
Implements the Ineichen and Perez clear sky model for global
horizontal irradiance (GHI), direct normal irradiance (DNI), and
calculates the clear-sky diffuse horizontal (DHI) component as the
difference between GHI and DNI*cos(zenith) as presented in [1, 2]. A
report on clear sky models found the Ineichen/Perez model to have
excellent performance with a minimal input data set [3].
Default values for monthly Linke turbidity provided by SoDa [4, 5].
Parameters
-----------
apparent_zenith: numeric
Refraction corrected solar zenith angle in degrees.
airmass_absolute: numeric
Pressure corrected airmass.
linke_turbidity: numeric
Linke Turbidity.
altitude: numeric
Altitude above sea level in meters.
dni_extra: numeric
Extraterrestrial irradiance. The units of ``dni_extra``
determine the units of the output.
Returns
-------
clearsky : DataFrame (if Series input) or OrderedDict of arrays
DataFrame/OrderedDict contains the columns/keys
``'dhi', 'dni', 'ghi'``.
See also
--------
lookup_linke_turbidity
pvlib.location.Location.get_clearsky
References
----------
[1] <NAME> and <NAME>, "A New airmass independent formulation for
the Linke turbidity coefficient", Solar Energy, vol 73, pp. 151-157,
2002.
[2] <NAME> et. al., "A New Operational Model for Satellite-Derived
Irradiances: Description and Validation", Solar Energy, vol 73, pp.
307-317, 2002.
[3] <NAME>, <NAME>, and <NAME>, "Global Horizontal Irradiance Clear
Sky Models: Implementation and Analysis", Sandia National
Laboratories, SAND2012-2389, 2012.
[4] http://www.soda-is.com/eng/services/climat_free_eng.php#c5 (obtained
July 17, 2012).
[5] <NAME>, et. al., "Worldwide Linke Turbidity Information", Proc.
ISES Solar World Congress, June 2003. Goteborg, Sweden.
'''
# Dan's note on the TL correction: By my reading of the publication
# on pages 151-157, Ineichen and Perez introduce (among other
# things) three things. 1) Beam model in eqn. 8, 2) new turbidity
# factor in eqn 9 and appendix A, and 3) Global horizontal model in
# eqn. 11. They do NOT appear to use the new turbidity factor (item
# 2 above) in either the beam or GHI models. The phrasing of
# appendix A seems as if there are two separate corrections, the
# first correction is used to correct the beam/GHI models, and the
# second correction is used to correct the revised turibidity
# factor. In my estimation, there is no need to correct the
# turbidity factor used in the beam/GHI models.
# Create the corrected TL for TL < 2
# TLcorr = TL;
# TLcorr(TL < 2) = TLcorr(TL < 2) - 0.25 .* (2-TLcorr(TL < 2)) .^ (0.5);
# This equation is found in Solar Energy 73, pg 311. Full ref: Perez
# et. al., Vol. 73, pp. 307-317 (2002). It is slightly different
# than the equation given in Solar Energy 73, pg 156. We used the
# equation from pg 311 because of the existence of known typos in
# the pg 156 publication (notably the fh2-(TL-1) should be fh2 *
# (TL-1)).
# The NaN handling is a little subtle. The AM input is likely to
# have NaNs that we'll want to map to 0s in the output. However, we
# want NaNs in other inputs to propagate through to the output. This
# is accomplished by judicious use and placement of np.maximum,
# np.minimum, and np.fmax
# use max so that nighttime values will result in 0s instead of
# negatives. propagates nans.
cos_zenith = np.maximum(tools.cosd(apparent_zenith), 0)
tl = linke_turbidity
fh1 = np.exp(-altitude/8000.)
fh2 = np.exp(-altitude/1250.)
cg1 = 5.09e-05 * altitude + 0.868
cg2 = 3.92e-05 * altitude + 0.0387
ghi = (np.exp(-cg2*airmass_absolute*(fh1 + fh2*(tl - 1))) *
np.exp(0.01*airmass_absolute**1.8))
# use fmax to map airmass nans to 0s. multiply and divide by tl to
# reinsert tl nans
ghi = cg1 * dni_extra * cos_zenith * tl / tl * np.fmax(ghi, 0)
# BncI = "normal beam clear sky radiation"
b = 0.664 + 0.163/fh1
bnci = b * np.exp(-0.09 * airmass_absolute * (tl - 1))
bnci = dni_extra * np.fmax(bnci, 0)
# "empirical correction" SE 73, 157 & SE 73, 312.
bnci_2 = ((1 - (0.1 - 0.2*np.exp(-tl))/(0.1 + 0.882/fh1)) /
cos_zenith)
bnci_2 = ghi * np.fmin(np.fmax(bnci_2, 0), 1e20)
dni = np.minimum(bnci, bnci_2)
dhi = ghi - dni*cos_zenith
irrads = OrderedDict()
irrads['ghi'] = ghi
irrads['dni'] = dni
irrads['dhi'] = dhi
if isinstance(dni, pd.Series):
irrads = pd.DataFrame.from_dict(irrads)
return irrads
def lookup_linke_turbidity(time, latitude, longitude, filepath=None,
interp_turbidity=True):
"""
Look up the Linke Turibidity from the ``LinkeTurbidities.mat``
data file supplied with pvlib.
Parameters
----------
time : pandas.DatetimeIndex
latitude : float
longitude : float
filepath : string
The path to the ``.mat`` file.
interp_turbidity : bool
If ``True``, interpolates the monthly Linke turbidity values
found in ``LinkeTurbidities.mat`` to daily values.
Returns
-------
turbidity : Series
"""
# The .mat file 'LinkeTurbidities.mat' contains a single 2160 x 4320 x 12
# matrix of type uint8 called 'LinkeTurbidity'. The rows represent global
# latitudes from 90 to -90 degrees; the columns represent global longitudes
# from -180 to 180; and the depth (third dimension) represents months of
# the year from January (1) to December (12). To determine the Linke
# turbidity for a position on the Earth's surface for a given month do the
# following: LT = LinkeTurbidity(LatitudeIndex, LongitudeIndex, month).
# Note that the numbers within the matrix are 20 * Linke Turbidity,
# so divide the number from the file by 20 to get the
# turbidity.
try:
import scipy.io
except ImportError:
raise ImportError('The Linke turbidity lookup table requires scipy. ' +
'You can still use clearsky.ineichen if you ' +
'supply your own turbidities.')
if filepath is None:
pvlib_path = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(pvlib_path, 'data', 'LinkeTurbidities.mat')
mat = scipy.io.loadmat(filepath)
linke_turbidity_table = mat['LinkeTurbidity']
latitude_index = (
np.around(_linearly_scale(latitude, 90, -90, 1, 2160))
.astype(np.int64))
longitude_index = (
np.around(_linearly_scale(longitude, -180, 180, 1, 4320))
.astype(np.int64))
g = linke_turbidity_table[latitude_index][longitude_index]
if interp_turbidity:
# Data covers 1 year.
# Assume that data corresponds to the value at
# the middle of each month.
# This means that we need to add previous Dec and next Jan
# to the array so that the interpolation will work for
# Jan 1 - Jan 15 and Dec 16 - Dec 31.
# Then we map the month value to the day of year value.
# This is approximate and could be made more accurate.
g2 = np.concatenate([[g[-1]], g, [g[0]]])
days = np.linspace(-15, 380, num=14)
linke_turbidity = pd.Series(np.interp(time.dayofyear, days, g2),
index=time)
else:
linke_turbidity = pd.DataFrame(time.month, index=time)
# apply monthly data
linke_turbidity = linke_turbidity.apply(lambda x: g[x[0]-1], axis=1)
linke_turbidity /= 20.
return linke_turbidity
def haurwitz(apparent_zenith):
'''
Determine clear sky GHI from Haurwitz model.
Implements the Haurwitz clear sky model for global horizontal
irradiance (GHI) as presented in [1, 2]. A report on clear
sky models found the Haurwitz model to have the best performance of
models which require only zenith angle [3]. Extreme care should
be taken in the interpretation of this result!
Parameters
----------
apparent_zenith : Series
The apparent (refraction corrected) sun zenith angle
in degrees.
Returns
-------
pd.Series
The modeled global horizonal irradiance in W/m^2 provided
by the Haurwitz clear-sky model.
Initial implementation of this algorithm by <NAME>.
References
----------
[1] <NAME>, "Insolation in Relation to Cloudiness and Cloud
Density," Journal of Meteorology, vol. 2, pp. 154-166, 1945.
[2] <NAME>, "Insolation in Relation to Cloud Type," Journal of
Meteorology, vol. 3, pp. 123-124, 1946.
[3] <NAME>, <NAME>, and <NAME>, "Global Horizontal Irradiance Clear
Sky Models: Implementation and Analysis", Sandia National
Laboratories, SAND2012-2389, 2012.
'''
cos_zenith = tools.cosd(apparent_zenith)
clearsky_ghi = 1098.0 * cos_zenith * np.exp(-0.059/cos_zenith)
clearsky_ghi[clearsky_ghi < 0] = 0
df_out = pd.DataFrame({'ghi': clearsky_ghi})
return df_out
def _linearly_scale(inputmatrix, inputmin, inputmax, outputmin, outputmax):
""" used by linke turbidity lookup function """
inputrange = inputmax - inputmin
outputrange = outputmax - outputmin
outputmatrix = (inputmatrix-inputmin) * outputrange/inputrange + outputmin
return outputmatrix
def simplified_solis(apparent_elevation, aod700=0.1, precipitable_water=1.,
pressure=101325., dni_extra=1364.):
"""
Calculate the clear sky GHI, DNI, and DHI according to the
simplified Solis model [1]_.
Reference [1]_ describes the accuracy of the model as being 15, 20,
and 18 W/m^2 for the beam, global, and diffuse components. Reference
[2]_ provides comparisons with other clear sky models.
Parameters
----------
apparent_elevation: numeric
The apparent elevation of the sun above the horizon (deg).
aod700: numeric
The aerosol optical depth at 700 nm (unitless).
Algorithm derived for values between 0 and 0.45.
precipitable_water: numeric
The precipitable water of the atmosphere (cm).
Algorithm derived for values between 0.2 and 10 cm.
Values less than 0.2 will be assumed to be equal to 0.2.
pressure: numeric
The atmospheric pressure (Pascals).
Algorithm derived for altitudes between sea level and 7000 m,
or 101325 and 41000 Pascals.
dni_extra: numeric
Extraterrestrial irradiance. The units of ``dni_extra``
determine the units of the output.
Returns
-------
clearsky : DataFrame (if Series input) or OrderedDict of arrays
DataFrame/OrderedDict contains the columns/keys
``'dhi', 'dni', 'ghi'``.
References
----------
.. [1] <NAME>, "A broadband simplified version of the
Solis clear sky model," Solar Energy, 82, 758-762 (2008).
.. [2] <NAME>, "Validation of models that estimate the clear
sky global and beam solar irradiance," Solar Energy, 132,
332-344 (2016).
"""
p = pressure
w = precipitable_water
# algorithm fails for pw < 0.2
if np.isscalar(w):
w = 0.2 if w < 0.2 else w
else:
w = w.copy()
w[w < 0.2] = 0.2
# this algorithm is reasonably fast already, but it could be made
# faster by precalculating the powers of aod700, the log(p/p0), and
# the log(w) instead of repeating the calculations as needed in each
# function
i0p = _calc_i0p(dni_extra, w, aod700, p)
taub = _calc_taub(w, aod700, p)
b = _calc_b(w, aod700)
taug = _calc_taug(w, aod700, p)
g = _calc_g(w, aod700)
taud = _calc_taud(w, aod700, p)
d = _calc_d(w, aod700, p)
# this prevents the creation of nans at night instead of 0s
# it's also friendly to scalar and series inputs
sin_elev = np.maximum(1.e-30, np.sin(np.radians(apparent_elevation)))
dni = i0p * np.exp(-taub/sin_elev**b)
ghi = i0p * np.exp(-taug/sin_elev**g) * sin_elev
dhi = i0p * np.exp(-taud/sin_elev**d)
irrads = OrderedDict()
irrads['ghi'] = ghi
irrads['dni'] = dni
irrads['dhi'] = dhi
if isinstance(dni, pd.Series):
irrads = pd.DataFrame.from_dict(irrads)
return irrads
def _calc_i0p(i0, w, aod700, p):
"""Calculate the "enhanced extraterrestrial irradiance"."""
p0 = 101325.
io0 = 1.08 * w**0.0051
i01 = 0.97 * w**0.032
i02 = 0.12 * w**0.56
i0p = i0 * (i02*aod700**2 + i01*aod700 + io0 + 0.071*np.log(p/p0))
return i0p
def _calc_taub(w, aod700, p):
"""Calculate the taub coefficient"""
p0 = 101325.
tb1 = 1.82 + 0.056*np.log(w) + 0.0071*np.log(w)**2
tb0 = 0.33 + 0.045*np.log(w) + 0.0096*np.log(w)**2
tbp = 0.0089*w + 0.13
taub = tb1*aod700 + tb0 + tbp*np.log(p/p0)
return taub
def _calc_b(w, aod700):
"""Calculate the b coefficient."""
b1 = 0.00925*aod700**2 + 0.0148*aod700 - 0.0172
b0 = -0.7565*aod700**2 + 0.5057*aod700 + 0.4557
b = b1 * np.log(w) + b0
return b
def _calc_taug(w, aod700, p):
"""Calculate the taug coefficient"""
p0 = 101325.
tg1 = 1.24 + 0.047*np.log(w) + 0.0061*np.log(w)**2
tg0 = 0.27 + 0.043*np.log(w) + 0.0090*np.log(w)**2
tgp = 0.0079*w + 0.1
taug = tg1*aod700 + tg0 + tgp*np.log(p/p0)
return taug
def _calc_g(w, aod700):
"""Calculate the g coefficient."""
g = -0.0147*np.log(w) - 0.3079*aod700**2 + 0.2846*aod700 + 0.3798
return g
def _calc_taud(w, aod700, p):
"""Calculate the taud coefficient."""
# isscalar tests needed to ensure that the arrays will have the
# right shape in the tds calculation.
# there's probably a better way to do this.
if np.isscalar(w) and np.isscalar(aod700):
w = np.array([w])
aod700 = np.array([aod700])
elif np.isscalar(w):
w = np.full_like(aod700, w)
elif np.isscalar(aod700):
aod700 = np.full_like(w, aod700)
aod700_mask = aod700 < 0.05
aod700_mask = np.array([aod700_mask, ~aod700_mask], dtype=np.int)
# create tuples of coefficients for
# aod700 < 0.05, aod700 >= 0.05
td4 = 86*w - 13800, -0.21*w + 11.6
td3 = -3.11*w + 79.4, 0.27*w - 20.7
td2 = -0.23*w + 74.8, -0.134*w + 15.5
td1 = 0.092*w - 8.86, 0.0554*w - 5.71
td0 = 0.0042*w + 3.12, 0.0057*w + 2.94
tdp = -0.83*(1+aod700)**(-17.2), -0.71*(1+aod700)**(-15.0)
tds = (np.array([td0, td1, td2, td3, td4, tdp]) * aod700_mask).sum(axis=1)
p0 = 101325.
taud = (tds[4]*aod700**4 + tds[3]*aod700**3 + tds[2]*aod700**2 +
tds[1]*aod700 + tds[0] + tds[5]*np.log(p/p0))
# be polite about matching the output type to the input type(s)
if len(taud) == 1:
taud = taud[0]
return taud
def _calc_d(w, aod700, p):
"""Calculate the d coefficient."""
p0 = 101325.
dp = 1/(18 + 152*aod700)
d = -0.337*aod700**2 + 0.63*aod700 + 0.116 + dp*np.log(p/p0)
return d
| 2.34375 | 2 |
utils/utils.py | toandaominh1997/ProductDetectionShopee | 0 | 12796075 | import torch
def get_state_dict(model):
if type(model) == torch.nn.DataParallel:
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
return state_dict
| 2.5 | 2 |
pylaprof/scripts/merge.py | glumia/pylaprof | 14 | 12796076 | #!/usr/bin/env python
import argparse
import sys
from collections import defaultdict
DEFAULT_OUT = "stackcollapse-merged.txt"
def merge(files, dst):
data = defaultdict(lambda: 0)
for file in files:
with open(file, "r") as fp:
for line in fp.readlines():
stack, hits = line.rsplit(" ", 1)
hits = int(hits)
data[stack] += hits
with open(dst, "w") as fp:
for stack, hits in data.items():
print(stack, hits, file=fp)
def main():
parser = argparse.ArgumentParser(sys.argv[0])
parser = argparse.ArgumentParser(
description="merge multiple stackcollapes into a single one"
)
parser.add_argument(
"files", metavar="FILE", type=str, nargs="+", help="a stackcollapse file"
)
parser.add_argument(
"-o",
"--out",
default=DEFAULT_OUT,
help=f"write resulting stackcollapse to this file (default: {DEFAULT_OUT})",
)
opts = parser.parse_args(sys.argv[1:])
merge(opts.files, opts.out)
if __name__ == "__main__":
main()
| 3 | 3 |
src/imable_backend/app.py | AlinMH/imable-backend | 0 | 12796077 | import os
from fastapi import Depends, FastAPI, Response, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi_users import FastAPIUsers
from fastapi_users.authentication import JWTAuthentication
from sqlalchemy.orm import Session
from .database.session import database, user_db
from .deps import db_session
from .models.disability import Disability as DisabilityModel
from .models.education import Education as EducationModel
from .models.experience import Experience as ExperienceModel
from .models.language import Language as LanguageModel
from .schemas.disability import Disability as DisabilitySchema
from .schemas.disability import DisabilityDB
from .schemas.education import Education as EducationSchema
from .schemas.education import EducationDB
from .schemas.experience import Experience as ExperienceSchema
from .schemas.experience import ExperienceDB
from .schemas.language import Language as LanguageSchema
from .schemas.language import LanguageDB
from .schemas.user import User, UserCreate, UserDB, UserUpdate
APP_SECRET = os.getenv("APP_SECRET")
jwt_authentication = JWTAuthentication(secret=APP_SECRET, lifetime_seconds=3600, tokenUrl="/auth/jwt/login")
app = FastAPI()
fastapi_users = FastAPIUsers(
user_db,
[jwt_authentication],
User,
UserCreate,
UserUpdate,
UserDB,
)
app.include_router(fastapi_users.get_auth_router(jwt_authentication), prefix="/auth/jwt", tags=["auth"])
app.include_router(fastapi_users.get_register_router(), prefix="/auth", tags=["auth"])
app.include_router(fastapi_users.get_reset_password_router(APP_SECRET), prefix="/auth", tags=["auth"])
app.include_router(fastapi_users.get_verify_router(APP_SECRET), prefix="/auth", tags=["auth"])
app.include_router(fastapi_users.get_users_router(), prefix="/users", tags=["users"])
app.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"]
)
@app.on_event("startup")
async def startup():
await database.connect()
@app.on_event("shutdown")
async def shutdown():
await database.disconnect()
@app.get("/user/experience", tags=["experience"], response_model=list[ExperienceDB])
def get_user_experience(user: User = Depends(fastapi_users.current_user()), session: Session = Depends(db_session)):
experiences = session.query(ExperienceModel).filter(ExperienceModel.user_id == user.id).all()
return [
ExperienceDB(
id=exp.id,
position=exp.position,
employer=exp.employer,
city=exp.city,
start_date=exp.start_date,
end_date=exp.end_date,
description=exp.description,
)
for exp in experiences
]
@app.post("/user/experience", tags=["experience"], status_code=status.HTTP_201_CREATED)
def add_user_experience(
request: ExperienceSchema,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
experience = ExperienceModel(**request.dict(), user_id=user.id)
session.add(experience)
session.commit()
session.refresh(experience)
@app.put("/user/experience", tags=["experience"])
def edit_user_experience(
id: int,
request: ExperienceSchema,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
experience = (
session.query(ExperienceModel)
.filter(ExperienceModel.user_id == user.id)
.filter(ExperienceModel.id == id)
.one_or_none()
)
if experience:
experience.position = request.position
experience.employer = request.employer
experience.city = request.city
experience.start_date = request.start_date
experience.end_date = request.end_date
experience.description = request.description
session.commit()
session.refresh(experience)
return
response.status_code = status.HTTP_404_NOT_FOUND
@app.delete("/user/experience", tags=["experience"])
def remove_user_experience(
id: int,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
deleted = (
session.query(ExperienceModel)
.filter(ExperienceModel.user_id == user.id)
.filter(ExperienceModel.id == id)
.delete()
)
if not deleted:
response.status_code = status.HTTP_404_NOT_FOUND
return
session.commit()
@app.get("/user/education", tags=["education"], response_model=list[EducationDB])
def get_user_education(user: User = Depends(fastapi_users.current_user()), session: Session = Depends(db_session)):
educations = session.query(EducationModel).filter(EducationModel.user_id == user.id).all()
return [
EducationDB(
id=edu.id,
edu_type=edu.edu_type.value,
name=edu.name,
city=edu.city,
start_date=edu.start_date,
end_date=edu.end_date,
)
for edu in educations
]
@app.post("/user/education", tags=["education"], status_code=status.HTTP_201_CREATED)
def add_user_education(
request: EducationSchema,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
edu = EducationModel(**request.dict(), user_id=user.id)
session.add(edu)
session.commit()
session.refresh(edu)
@app.put("/user/education", tags=["education"])
def edit_user_education(
id: int,
request: EducationSchema,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
education = (
session.query(EducationModel)
.filter(EducationModel.user_id == user.id)
.filter(EducationModel.id == id)
.one_or_none()
)
if education:
education.edu_type = request.edu_type
education.name = request.name
education.city = request.city
education.start_date = request.start_date
education.end_date = request.end_date
session.commit()
session.refresh(education)
return
response.status_code = status.HTTP_404_NOT_FOUND
@app.delete("/user/education", tags=["education"])
def remove_user_education(
id: int,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
deleted = (
session.query(EducationModel).filter(EducationModel.user_id == user.id).filter(EducationModel.id == id).delete()
)
if not deleted:
response.status_code = status.HTTP_404_NOT_FOUND
return
session.commit()
@app.get("/user/language", tags=["language"], response_model=list[LanguageDB])
def get_user_language(user: User = Depends(fastapi_users.current_user()), session: Session = Depends(db_session)):
languages = session.query(LanguageModel).filter(LanguageModel.user_id == user.id).all()
return [LanguageDB(id=lang.id, language=lang.language, level=lang.level.value) for lang in languages]
@app.post("/user/language", tags=["language"], status_code=status.HTTP_201_CREATED)
def add_user_language(
request: LanguageSchema,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
edu = LanguageModel(**request.dict(), user_id=user.id)
session.add(edu)
session.commit()
session.refresh(edu)
@app.put("/user/language", tags=["language"])
def edit_user_language(
id: int,
request: LanguageSchema,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
lang = (
session.query(LanguageModel)
.filter(LanguageModel.user_id == user.id)
.filter(LanguageModel.id == id)
.one_or_none()
)
if lang:
lang.level = request.level
lang.language = request.language
session.commit()
session.refresh(lang)
return
response.status_code = status.HTTP_404_NOT_FOUND
@app.delete("/user/language", tags=["language"])
def remove_user_language(
id: int,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
deleted = (
session.query(LanguageModel).filter(LanguageModel.user_id == user.id).filter(LanguageModel.id == id).delete()
)
if not deleted:
response.status_code = status.HTTP_404_NOT_FOUND
return
session.commit()
@app.get("/user/disability", tags=["disability"], response_model=list[DisabilityDB])
def get_user_language(user: User = Depends(fastapi_users.current_user()), session: Session = Depends(db_session)):
disabilities = session.query(DisabilityModel).filter(DisabilityModel.user_id == user.id).all()
return [DisabilityDB(id=dis.id, type=dis.type.value, level=dis.level.value) for dis in disabilities]
@app.post("/user/disability", tags=["disability"], status_code=status.HTTP_201_CREATED)
def add_user_language(
request: DisabilitySchema,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
edu = DisabilityModel(**request.dict(), user_id=user.id)
session.add(edu)
session.commit()
session.refresh(edu)
@app.put("/user/disability", tags=["disability"])
def edit_user_language(
id: int,
request: DisabilitySchema,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
dis = (
session.query(DisabilityModel)
.filter(DisabilityModel.user_id == user.id)
.filter(DisabilityModel.id == id)
.one_or_none()
)
if dis:
dis.level = request.level
dis.type = request.type
session.commit()
session.refresh(dis)
return
response.status_code = status.HTTP_404_NOT_FOUND
@app.delete("/user/disability", tags=["disability"])
def remove_user_language(
id: int,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
deleted = (
session.query(DisabilityModel)
.filter(DisabilityModel.user_id == user.id)
.filter(DisabilityModel.id == id)
.delete()
)
if not deleted:
response.status_code = status.HTTP_404_NOT_FOUND
return
session.commit()
| 2.078125 | 2 |
api/tf_auth/migrations/0003_populate_email_preferences.py | prattl/teamfinder-web | 9 | 12796078 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-04-15 17:10
from __future__ import unicode_literals
from django.db import migrations
class EmailTag:
ALL = 0
UPDATES = 1
PLAYER_NOTIFICATIONS = 2
TEAM_NOTIFICATIONS = 3
CHOICES = (
(ALL, 'All'),
(UPDATES, 'Updates and New Features'),
(PLAYER_NOTIFICATIONS, 'Player Notifications'),
(TEAM_NOTIFICATIONS, 'Team Notifications'),
)
def forwards(apps, schema_editor):
TFUser = apps.get_model('tf_auth.TFUser')
UserEmailPreferences = apps.get_model('tf_auth.UserEmailPreferences')
EmailPreference = apps.get_model('tf_auth.EmailPreference')
def create_default_preferences(self):
for (option, _) in EmailTag.CHOICES:
EmailPreference.objects.create(tag=option, user_email_preferences=self)
UserEmailPreferences.create_default_preferences = create_default_preferences
for user in TFUser.objects.all():
try:
user.user_email_preferences
except UserEmailPreferences.DoesNotExist:
preferences = UserEmailPreferences.objects.create(user=user)
preferences.create_default_preferences()
class Migration(migrations.Migration):
dependencies = [
('tf_auth', '0002_auto_20170415_1821'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
| 1.953125 | 2 |
edgify/functional/linear.py | scale-lab/BitTrain | 3 | 12796079 | <gh_stars>1-10
import torch
class Linear(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias=None):
ctx.save_for_backward(input.to_sparse(), weight.to_sparse(), bias.to_sparse() if bias else None)
output = input.mm(weight.t())
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
return output
@staticmethod
def backward(ctx, grad_output):
input, weight, bias = ctx.saved_tensors
input, weight, bias = input.to_dense(), weight.to_dense(), bias.to_dense() if bias else None
grad_input = grad_weight = grad_bias = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(weight)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0)
return grad_input, grad_weight, grad_bias
| 2.390625 | 2 |
tests/test_rate.py | kalaspuff/stockholm | 15 | 12796080 | <gh_stars>10-100
import json
import pytest
import stockholm
from stockholm import ConversionError, ExchangeRate, Money, Rate
def test_rate():
assert Rate(100) == 100
assert Rate("100.50551") == ExchangeRate("100.50551")
assert str(Rate("4711.1338")) == "4711.1338"
assert Rate(100).currency is None
assert Rate(100).currency_code is None
assert Rate(100).amount == 100
assert Rate(100).value == "100.00"
assert Rate(50) < 51
assert Rate(50) > 49
assert Rate(50) > Rate(49)
assert Rate(50) + Rate(50) == 100
assert (Rate(50) + Rate(50)).__class__ is Rate
assert str(Rate(50) + Rate(50)) == "100.00"
assert repr(Rate(50) + Rate(50)) == '<stockholm.Rate: "100.00">'
assert (Rate(50) + Rate(50) + Money(50)).__class__ is Money
assert str(Rate(50) + Rate(50) + Money(50)) == "150.00"
assert repr(Rate(50) + Rate(50) + Money(50)) == '<stockholm.Money: "150.00">'
assert Rate(Money(100)) == Rate(100)
assert Rate(Money(100)).__class__ is Rate
def test_bad_rates():
with pytest.raises(ConversionError):
Rate(1, currency="EUR")
with pytest.raises(ConversionError):
Rate(Money(1, currency="SEK"))
with pytest.raises(ConversionError):
Rate(100, from_sub_units=True)
with pytest.raises(ConversionError):
Rate.from_sub_units(100)
with pytest.raises(ConversionError):
Rate(1).to_currency("SEK")
with pytest.raises(ConversionError):
Rate(1).to_sub_units()
with pytest.raises(ConversionError):
Rate(1).sub_units
def test_rate_hashable() -> None:
m = stockholm.Rate(0)
assert hash(m)
def test_rate_asdict():
assert Rate(1338).asdict() == {
"value": "1338.00",
"units": 1338,
"nanos": 0,
}
assert Rate("1338.4711").as_dict() == {
"value": "1338.4711",
"units": 1338,
"nanos": 471100000,
}
assert dict(Rate("0.123456")) == {
"value": "0.123456",
"units": 0,
"nanos": 123456000,
}
assert dict(Rate("0.1")) == {
"value": "0.10",
"units": 0,
"nanos": 100000000,
}
assert Rate(1338).keys() == ["value", "units", "nanos"]
assert Rate(1338)["units"] == 1338
assert Rate(1338)["value"] == "1338.00"
with pytest.raises(KeyError):
Rate(1338)["does_not_exist"]
def test_rate_from_dict():
d = {"value": "13384711", "units": 13384711, "nanos": 0}
assert str(Rate.from_dict(d)) == "13384711.00"
assert str(Rate(d)) == "13384711.00"
def test_rate_json():
rate = Rate("-999999999999999999.999999999")
json_string = json.dumps({"rate": rate.asdict()})
str(Rate(json.loads(json_string).get("rate"))) == "-999999999999999999.999999999"
| 2.84375 | 3 |
GLM/source/plugins/minimal_plugin.py | tomsimonart/LMPM | 1 | 12796081 | <reponame>tomsimonart/LMPM
from ..libs.pluginbase import PluginBase
class Plugin(PluginBase):
def __init__(self, start, *args):
super().__init__(start, *args)
def _plugin_info(self):
"""Required informations about the plugin
"""
self.version = "0.11.0"
self.data_dir = "minimal"
def _make_layout(self):
"""Here is where the ingredients to bake a
great plugin and webview template go
"""
pass
def _event_loop(self, event):
"""Event getter
before every _start cycle
"""
pass
def _start(self):
"""Main loop of the plugin
this includes a refresh of self.screen
"""
pass
| 2.34375 | 2 |
singlecell/util.py | johannesnicolaus/singlecell | 0 | 12796082 | """Utility functions."""
import subprocess
import logging
import os
import shutil
import stat
import itertools
from collections import OrderedDict
from pkg_resources import resource_string
import pandas as pd
from genometools.expression import ExpGeneTable
from genometools import gtf
import singlecell
_LOGGER = logging.getLogger(__name__)
def get_readable_gene_identifiers(gene_table: ExpGeneTable):
"""Return unique gene identifiers that primarily use the genes' names."""
# count occurrences for each of gene name
counts = gene_table['name'].value_counts()
gene_counts = counts.loc[gene_table['name']]
gene_ids = gene_table.index.tolist()
gene_ids = [name if c == 1 else '%s_%s' % (name, gene_ids[i])
for i, (name, c) in enumerate(gene_counts.items())]
return gene_ids
def get_edit_sequences(seq, num_edits, bases=None):
"""Return all nucleotide sequences with a given hamming distance."""
if num_edits > len(seq):
raise ValueError('Asked to make make more edits (%d) than the length '
'of the sequence (%d nt).' % (num_edits, len(seq)))
if bases is None:
bases = set('ACGT')
length = len(seq)
all_bases = [bases for i in range(num_edits)]
seq_list = [nt for nt in seq]
mismatch = []
for comb in itertools.combinations(range(length), num_edits):
for subs in itertools.product(*all_bases):
mut = seq_list[:]
valid = True
for pos, nt in zip(comb, subs):
if mut[pos] == nt:
valid = False
break
mut[pos] = nt
if valid:
mismatch.append(''.join(mut))
return sorted(mismatch)
def concatenate_files(input_files, output_file, append=False):
write_mode = 'wb'
if append:
write_mode = 'ab'
with open(output_file, write_mode) as ofh:
for f in input_files:
with open(f, 'rb') as ifh:
shutil.copyfileobj(ifh, ofh, 16*1024*1024)
def make_file_executable(path):
"""Sets the user executable flag for a file."""
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def zcat_subproc(path):
"""Creates a subprocess for decompressing a gzip file.
TODO: docstring"""
subproc = subprocess.Popen('gunzip -c "%s"' % path, shell=True,
stdout=subprocess.PIPE)
return subproc
def get_all_kmers(k, kmer='', kmer_list=None):
"""Returns all possible k-mer sequences (for A/C/G/T alphabet).
TODO: docstring"""
if kmer_list is None:
kmer_list = []
if len(kmer) == k:
kmer_list.append(kmer)
else:
for nuc in ['A', 'C', 'G', 'T']:
var = kmer + nuc
get_all_kmers(k, var, kmer_list)
if not kmer:
return kmer_list
def get_mismatch_sequences(seq):
"""Generates all nucleotide sequences with hamming distance 1 to `seq`.
TODO: docstring"""
for pos in range(len(seq)):
for nuc in ['A', 'C', 'G', 'T']:
if nuc != seq[pos]:
mm = seq[:pos] + nuc + seq[(pos+1):]
yield mm
def get_reverse_complement(seq):
"""Returns the reverse complement of a nucleotide sequence.
TODO: docstring"""
rc = {
'A': 'T',
'T': 'A',
'G': 'C',
'C': 'G'
}
compseq = ''.join([rc[nuc] for nuc in seq[::-1]])
return compseq
def get_gene_exons(gene_table, genome_annotation_file, chunksize=10000):
"""Parse GTF file and get a dictionary of gene=>list of exon intervals.
(Only for protein-coding genes.)
TODO: docstring"""
# get gene names that are guaranteed to be unique
#gene_names = get_readable_gene_identifiers(gene_table)
# series with index = Ensembl ID, value = unique gene name
#genes = pd.Series(index=gene_table.index, data=gene_names)
# sort genes by chromosome, strand, and then position
sorted_gene_ids = sorted(
[id_ for id_ in gene_table.index],
key=lambda id_: [gene_table.loc[id_, 'chromosome'],
gene_table.loc[id_, 'position'] < 0,
abs(gene_table.loc[id_, 'position'])])
#genes = genes.loc[sorted_gene_ids]
gene_table = gene_table.loc[sorted_gene_ids]
# dictionary for holding list of intervals for each gene
gene_exons = OrderedDict([id_, []] for id_ in gene_table.index)
valid = 0
total = 0
_LOGGER.info('Parsing GTF file "%s" in chunks...', genome_annotation_file)
for i, df in enumerate(pd.read_csv(
genome_annotation_file, dtype={0: str},
sep='\t', comment='#', header=None, chunksize=chunksize)):
# select only exon entries
df_sel = df.loc[df.iloc[:, 2] == 'exon']
# extract gene IDs
gene_ids = df_sel.iloc[:, 8].apply(
lambda x: gtf.parse_attributes(x)['gene_id'])
for id_, chrom, start, end in zip(
gene_ids,
df_sel.iloc[:, 0], df_sel.iloc[:, 3], df_sel.iloc[:, 4]):
total += 1
try:
gene = gene_table.loc[id_]
except KeyError:
# this gene is not contained in the gene table
continue
gene_chrom = gene_table.loc[id_, 'chromosome']
if chrom != gene_chrom:
_LOGGER.warning('%s exon ignored (wrong chromosome: '
'%s instead of %s).',
id_, chrom, gene_chrom)
else:
valid += 1
gene_exons[id_].append([start-1, end])
_LOGGER.info('%d / %d exons from valid genes (%.1f %%).',
valid, total, 100*(valid/float(total)))
return gene_exons
def merge_intervals(intervals):
"""Merge overlapping intervals.
TODO: docstring"""
if not intervals:
return []
# sort intervals by start position
intervals = sorted(intervals, key=lambda x:x[0])
merged = []
cur = list(intervals[0])
for iv in intervals[1:]:
# interval starts inside/right after current interval
if iv[0] <= cur[1]:
if iv[1] > cur[1]: # interval ends after current interval
cur[1] = iv[1]
else:
merged.append(cur)
cur = list(iv)
merged.append(cur)
return merged
def get_mitochondrial_genes(species='human'):
"""Get a list of all mitochondrial genes for a given species.
"Mitochondrial genes" are defined here as all genes on the mitochondrial
chromosome.
TODO: docstring
"""
path = os.path.join(singlecell._root,
'data', 'gene_lists', 'mitochondrial_%s.tsv' % species)
with open(path) as fh:
return fh.read().split('\n')
def get_ribosomal_genes(species='human'):
"""Get a list of all ribosomal genes for a given species.
"Ribosomal genes" are defined here as all protein-coding genes whose
protein products are a structural component of the small or large ribosomal
subunit (including fusion genes).
TODO: docstring
"""
path = os.path.join(singlecell._root,
'data', 'gene_lists', 'ribosomal_%s.tsv' % species)
with open(path) as fh:
return fh.read().split('\n')
def get_plotly_js():
"""Return the plotly javascript code.
TODO: docstring
"""
# resource_string?
path = 'package_data/plotly.min.js'
return resource_string('plotly', path).decode('utf-8')
def is_empty_dir(dir_):
"""Tests whether a directory is empty.
Note: Also returns True if the directory doesn't exist.
TODO: docstring
"""
is_empty = True
try:
_, dirnames, filenames = next(os.walk(dir_))
if dirnames or filenames:
is_empty = False
except StopIteration:
pass
return is_empty
| 2.609375 | 3 |
app.py | maguowei/aweme-sign | 2 | 12796083 | import os
import frida
from flask import Flask, jsonify, request
from hook import start_hook
REMOTE_DEVICE = os.getenv('REMOTE_DEVICE', '')
app = Flask(__name__)
api = start_hook(REMOTE_DEVICE)
@app.route('/sign')
def sign():
global api
url = request.args.get('url', '')
headers = dict(request.headers)
try:
data = api.exports.sign(url, headers)
except frida.InvalidOperationError as e:
print(f'app crash: {e}')
api = start_hook(REMOTE_DEVICE)
data = api.exports.sign(url, headers)
return jsonify({
'url': url,
'headers': headers,
'sign': data,
})
if __name__ == '__main__':
app.run()
| 2.328125 | 2 |
aserializer/fields/time_fields.py | orderbird/aserializer | 0 | 12796084 | <reponame>orderbird/aserializer<filename>aserializer/fields/time_fields.py<gh_stars>0
# -*- coding: utf-8 -*-
from datetime import datetime, date, time
from aserializer.utils import py2to3
from aserializer.fields.base import BaseSerializerField, SerializerFieldValueError
from aserializer.fields import validators as v
class BaseDatetimeField(BaseSerializerField):
date_formats = ['%Y-%m-%dT%H:%M:%S.%f', ]
error_messages = {
'required': 'This field is required.',
'invalid': 'Invalid date value.',
}
def __init__(self, formats=None, serialize_to=None, *args, **kwargs):
super(BaseDatetimeField, self).__init__(*args, **kwargs)
self._date_formats = formats or self.date_formats
self._serialize_format = serialize_to
self._current_format = None
self.invalid = False
def validate(self):
if self.ignore:
return
if self.invalid:
raise SerializerFieldValueError(self._error_messages['invalid'], field_names=self.names)
if self.value in v.VALIDATORS_EMPTY_VALUES and (self.required or self.identity):
raise SerializerFieldValueError(self._error_messages['required'], field_names=self.names)
if self._is_instance(self.value):
return
_value = self.strptime(self.value, self._date_formats)
if _value is None and self.invalid:
raise SerializerFieldValueError(self._error_messages['invalid'], field_names=self.names)
def set_value(self, value):
if self._is_instance(value):
self.value = value
elif isinstance(value, py2to3.string):
self.value = self.strptime(value, self._date_formats)
self.invalid = self.value is None
def _is_instance(self, value):
return False
def strptime(self, value, formats):
for f in formats:
try:
result = datetime.strptime(value, f)
self._current_format = f
except (ValueError, TypeError):
continue
else:
return result
return None
def strftime(self, value):
if self._serialize_format:
return value.strftime(self._serialize_format)
elif self._current_format:
return value.strftime(self._current_format)
else:
return py2to3._unicode(value.isoformat())
class DatetimeField(BaseDatetimeField):
date_formats = ['%Y-%m-%dT%H:%M:%S.%f%z', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S']
error_messages = {
'required': 'This field is required.',
'invalid': 'Invalid date time value.',
}
def _is_instance(self, value):
return isinstance(value, datetime)
def _to_native(self):
if self.value in v.VALIDATORS_EMPTY_VALUES:
return None
if isinstance(self.value, datetime):
return self.strftime(self.value)
return py2to3._unicode(self.value)
def _to_python(self):
if self.value in v.VALIDATORS_EMPTY_VALUES:
return None
if isinstance(self.value, datetime):
return self.value
self.value = self.strptime(self.value, self._date_formats)
return self.value
class DateField(BaseDatetimeField):
date_formats = ['%Y-%m-%d', ]
error_messages = {
'required': 'This field is required.',
'invalid': 'Invalid date value.',
}
def _is_instance(self, value):
return isinstance(value, date)
def set_value(self, value):
if self._is_instance(value):
self.value = value
elif isinstance(value, datetime):
self.value = value.date()
elif isinstance(value, py2to3.string):
_value = self.strptime(value, self._date_formats)
if _value is not None:
self.value = _value.date()
self.invalid = _value is None
def _to_native(self):
if self.value in v.VALIDATORS_EMPTY_VALUES:
return None
if isinstance(self.value, date):
return self.strftime(self.value)
return py2to3._unicode(self.value)
def _to_python(self):
if self.value in v.VALIDATORS_EMPTY_VALUES:
return None
if isinstance(self.value, date):
return self.value
_value = self.strptime(self.value, self._date_formats)
if _value:
self.value = _value.date()
return self.value
class TimeField(BaseDatetimeField):
date_formats = ['%H:%M:%S', ]
error_messages = {
'required': 'This field is required.',
'invalid': 'Invalid time value.',
}
def _is_instance(self, value):
return isinstance(value, time)
def set_value(self, value):
if self._is_instance(value):
self.value = value
elif isinstance(value, datetime):
self.value = value.time()
elif isinstance(value, py2to3.string):
_value = self.strptime(value, self._date_formats)
if _value is not None:
self.value = _value.time()
self.invalid = _value is None
def _to_native(self):
if self.value in v.VALIDATORS_EMPTY_VALUES:
return None
if isinstance(self.value, time):
return self.strftime(self.value)
return py2to3._unicode(self.value)
def _to_python(self):
if self.value in v.VALIDATORS_EMPTY_VALUES:
return None
if isinstance(self.value, time):
return self.value
_value = self.strptime(self.value, self._date_formats)
if _value:
self.value = _value.time()
return self.value
| 2.40625 | 2 |
_scripts/objecteditor/controller/newproduction.py | Son-Guhun/Titan-Land-Lands-of-Plenty | 12 | 12796085 | import PySimpleGUI as sg
from ..model.objectdata import ObjectData
from ..model.search import SearchableList
from ..view import newproduction
from . import get_string_unit, RACES, filter_listbox
from myconfigparser import Section
def open_window(data):
options = SearchableList()
for u in data:
unit = Section(data[u])
if 'peon' in unit['type'].lower() and u != 'e000' and u != 'udr' and 'A00J' not in unit['abilList']:
options.append('{name} [{code}]'.format(code=u, name=unit['Name'][1:-1]))
window = sg.Window('New Production', newproduction.get_layout(), default_element_size=(40, 1), grab_anywhere=False).Finalize()
window.find_element('Options').Update(sorted(options))
while True:
event, values = window.read()
if event is None:
break
elif event == 'Submit':
try:
ObjectData(data).create_production(values['Name'], get_string_unit(values['Options'][0]), RACES[values['ProdRace']])
sg.popup('Success')
except Exception as e:
sg.popup(str(e),title='Error')
filter_listbox(data, window, values, '', options)
| 2.453125 | 2 |
DallasPlayers/soft_grudger_player.py | fras2560/Competition | 0 | 12796086 | '''
@author: <NAME>
@id: 20652186
@class: CS686
@date: 2016-02-13
@note: contains a player using an soft grudge strategy
'''
from DallasPlayers.player import COOPERATE, Player, DEFECT
import unittest
class SoftGrudgerPlayer(Player):
"""
Soft Grudger Player - Co-operates until the opponent defects,
in such case opponent is punished with d,d,d,d,c,c.
"""
def __init__(self):
self.grudge = False
self.moves = []
def studentID(self):
return "20652186"
def agentName(self):
return "Soft Grudge Player"
def play(self, myHistory, oppHistory1, oppHistory2):
# are we cooperating
if self.first_move(oppHistory1, oppHistory2):
self.grudge = False
self.moves = []
move = COOPERATE
else:
if not self.grudge:
# lets work together
move = COOPERATE
if oppHistory1[-1] == DEFECT or oppHistory2[-1] == DEFECT:
# someone has betrayed us, now have a grudge
move = DEFECT
self.grudge = True
self.moves = 2 * [COOPERATE] + 3 * [DEFECT]
else:
# still have a grudge
move = self.moves.pop()
if len(self.moves) == 0:
# can move on now, no more grudge
self.grudge = False
return move
class TestPlayer(unittest.TestCase):
def setUp(self):
self.player = SoftGrudgerPlayer()
def testPlay(self):
for x in range(0, 5):
# no grudge everyone gets along
move = self.player.play(x * [COOPERATE],
x * [COOPERATE],
x * [COOPERATE])
self.assertEqual(move, COOPERATE)
self.assertEqual(self.player.grudge, False)
# now test the grudge
moves = 2 * [COOPERATE] + 3 * [DEFECT]
# HERE COMES THE GRUDGE
move = self.player.play([COOPERATE], [DEFECT], [COOPERATE])
self.assertEqual(move, DEFECT)
self.assertEqual(self.player.grudge, True)
# grudge it out
while len(moves) > 0:
move = self.player.play([COOPERATE], [DEFECT], [COOPERATE])
self.assertEqual(move, moves.pop())
# grudge should be gone
self.assertEqual(self.player.grudge, False)
# now back to life being good
for x in range(0, 5):
# no grudge everyone gets along
move = self.player.play(x * [COOPERATE],
x * [COOPERATE],
x * [COOPERATE])
self.assertEqual(move, COOPERATE)
self.assertEqual(self.player.grudge, False)
| 3.390625 | 3 |
beats/migrations/0003_auto_20210809_1938.py | cybrvybe/FactorBeats-Platform | 0 | 12796087 | # Generated by Django 3.2.3 on 2021-08-10 02:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('artists', '0002_artist_bio'),
('beats', '0002_instrumental_img_file'),
]
operations = [
migrations.AlterField(
model_name='instrumental',
name='producer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='artists.artist'),
),
migrations.AlterField(
model_name='instrumentalcollection',
name='instrumentals',
field=models.ManyToManyField(blank=True, related_name='_beats_instrumentalcollection_instrumentals_+', to='beats.Instrumental'),
),
]
| 1.523438 | 2 |
autodiscover/util/helper.py | macbryc/IX-DiscoveryTools | 3 | 12796088 | <reponame>macbryc/IX-DiscoveryTools
#Copyright 2021, Battelle Energy Alliance, LLC
from uuid import uuid4
from stix2.datastore import Filter, FilterSet
from stix2 import Software, Process, IPv4Address, Infrastructure, Relationship, CustomExtension, properties
import logging
import re
def gen_uuid(string):
return f'{string}--{uuid4()}'
def get_rels(stix_loader, obj, direction='in', filters=None):
fs = FilterSet()
if not filters is None:
if type(filters) == list:
for f in filters:
fs.add(f)
else:
fs.add(filters)
if direction == 'in':
f = Filter('target_ref', '=', obj.id)
fs.add()
elif direction == 'out':
f = Filter('source_ref', '=', obj.id)
fs.add()
else:
logging.error(f'Unexpected direction passed to get_rels: {direction}')
return stix_loader.ms_source.query(fs)
def get_connected_objs(stix_loader, obj, direction='in', obj_type=None):
f = None
l = []
if not obj_type is None:
if direction == 'in':
f = Filter('source_ref', 'contains', obj_type)
elif direction == 'out':
f = Filter('target_ref', 'contains', obj_type)
rels = get_rels(stix_loader, obj, direction=direction, filters=f)
for rel in rels:
if direction == 'in':
l.append(stix_loader.ms_source.get(rel.source_ref))
elif direction == 'out':
l.append(stix_loader.ms_source.get(rel.target_ref))
return l
def get_connected_obj(stix_loader, obj, direction='in', obj_type=None):
objs = get_connected_objs(stix_loader, obj, direction=direction, obj_type=obj_type)
if len(objs) < 1:
return None
else:
return objs[0]
def get_infrastructure_by_ip(stix_loader, ip):
ip_obj = stix_loader.ms_source.query(query=Filter('value', '=', ip))[0]
if type(ip_obj) == list and len(ip_obj) != 1:
return None
elif ip_obj is None:
return None
else:
return get_connected_obj(stix_loader, ip_obj, direction='in', obj_type='infrastructure')
def multi_filt(op='=', **kwargs):
fs = FilterSet()
for key in kwargs:
if key == 'op':
continue
fs.add(Filter(key, op , kwargs[key]))
return fs
#TODO HELPER FUNCTION (param = class (software, process,etc), dictionary)
#TODO: returns created object (extensions:())
#TODO: dict.keys(startswith(x_)) key.add +'_inl'
def fix_stix(SDOType, stixdict, sdostring):
'''
Allows us to fix our dictionary every time we create STIX Objects to have all custom properties in an extensions list
'''
newList = stixdict.copy()
extensions = {}
for key, value in newList.items():
if key.startswith('x_'):
addString = key + '_inl'
logging.debug(f'type of str: {type(addString)}')
extensions[addString] = value
stixdict.pop(key)
stixdict['extensions'] = extensions
# print(stixdict)
# id = ''
# print('our type: ', type(SDOType))
# if sdostring == 'Software':
# id = gen_uuid('software')
# elif sdostring == 'Infrastructure':
# id = gen_uuid('infrastructure')
# elif SDOType == 'Process':
# id = gen_uuid('process')
# else:
# print('SDO TYPE NOT INF/Software/PROCESS')
# print(id)
if 'id' not in stixdict.keys():
stixdict['id'] = gen_uuid(sdostring)
if 'allow_custom' not in stixdict.keys():
stixdict['allow_custom'] = True
if 'spec_version' not in stixdict.keys():
stixdict['spec_version'] = '2.1'
s = SDOType(**stixdict)
return s
#Get infra connected to ip:
# - get ip by value
# - get connected by type (infra)
# - get connected by type, port, protocol, service
def get_objects(filt, stix_loader):
objs = stix_loader.ms_source.query(filt)
if len(objs) > 0 :
return objs
else:
return None
def get_object(filt, stix_loader):
objs = get_objects(filt, stix_loader)
if objs is None:
return None
elif len(objs) == 1:
return objs[0]
elif len(objs) > 1:
logging.error(f'{filt} object matched multiple objects! This could cause unexpected behavior!')
return objs[0]
def get_related_multi(obj, filt, stix_loader):
objs = stix_loader.ms.related_to(obj, filters=filt)
if len(objs) > 0:
return objs
else:
return None
def get_related_single(obj, filt, stix_loader):
objs = get_related_multi(obj, filt, stix_loader)
if objs is None:
return None
elif len(objs) == 1:
return objs[0]
elif len(objs) > 1:
logging.error(f'{filt} object matched multiple objects! This could cause unexpected behavior!')
return objs[0]
def get_object_or_create(ip_addr, port, protocol, service, stix_loader):
#need to go from ip -> infra -> software (if any link is missing we need to create that link)
# self.ms_source = self.ms.source
# self.ms_sink = self.ms.sink
# stix_loader.ms_source
ret_objs = []
ip = get_object(multi_filt(type='ipv4-addr', value=ip_addr), stix_loader)
if ip is None:
ip = IPv4Address(value=ip_addr)
ret_objs.append(ip)
infra = get_related_single(ip, multi_filt(type='infrastructure'), stix_loader)
print(f'get_related_single_infra: {infra}')
if infra is None:
infra = Infrastructure(name=ip.value)
rel = Relationship(source_ref=infra, relationship_type='has', target_ref=ip)
ret_objs.extend([infra, rel])
software = get_related_single(ip, multi_filt(type='software', x_port=port, x_protocol=protocol), stix_loader)
if software is None:
software = Software(name=f'{service if service else f"{port}/{protocol}"} Server',
x_port=port, x_protocol=protocol, x_service=service, allow_custom=True, id=gen_uuid('software'))
rel = Relationship(source_ref=infra, relationship_type='has', target_ref=software)
# stix_loader.merge([software, rel])
ret_objs.extend([software, rel])
# ret_objs.extend([software, rel])
return (software, ret_objs)
def get_stix_attr(obj, attr_string):
if hasattr(obj, attr_string):
return getattr(obj, attr_string)
elif hasattr(obj, 'extensions'):
if attr_string in obj.extensions:
return obj.extensions[attr_string]
return None
| 2.015625 | 2 |
examples/dataPipe.py | kirillovmr/python-pipeline | 2 | 12796089 | import os
import sys
sys.path.append(os.path.join(sys.path[0], '../'))
from smart_pipeline import Pipeline
data = [1,2,3,4,5]
# Define a data function
def onlyOdd(item):
return False if item%2==0 else True
pl = Pipeline()
# Adds function into pipeline
pl.addDataPipe(onlyOdd)
res = pl(data)
for item in res:
print(item) | 2.734375 | 3 |
y2_test_feed_tensor.py | ZhengDeQuan/AAA | 0 | 12796090 | import tensorflow as tf
batch_size = 4
feature_num = 3
csv1 = [
"harden|james|curry",
"wrestbrook|harden|durant",
"paul|towns",
]
csv2 = [
"curry",
"wrestbrook|harden|durant",
"paul|towns",
]
csv3 = [
"harden|james|curry",
"durant",
"paul|towns",
]
csv4 = [
"wrestbrook|harden|durant",
"wrestbrook|harden|durant",
"wrestbrook|harden|durant"
]
csv_s= [csv1,csv2,csv3,csv4]
X = tf.placeholder(shape=[None,feature_num],dtype=tf.string)
one_feature = tf.contrib.layers.sparse_column_with_hash_bucket(
column_name="zhengquan_test",
hash_bucket_size=10,
combiner="sum",
dtype=tf.string
# dtype=tf.dtypes.int32
)
res = tf.contrib.layers.embedding_column(one_feature,
# initializer=my_initializer,
combiner="mean",
dimension=3)
#除了有下面这种方法还有tf.unstack的方法
# for i in range(batch_size):
# for j in range(feature_num):
# one_feature = X[i][j]
# one_feature = tf.reshape(one_feature,shape=[1])
# split_tag = tf.string_split(one_feature, "|")
# one_sparse = tf.SparseTensor(
# indices=split_tag.indices,
# values= split_tag.values,
# dense_shape=split_tag.dense_shape
# )
#
# current_mapping = {'zhengquan_test': one_sparse}
# one_feature_embedding_res = tf.feature_column.input_layer(current_mapping, res)
# #[[ 0.08187684, 0.22063671, -0.16549297]]
#用unstack证明也是可行的,但是placeholder的第一个dimension不能是None,需要是一个确切的数值,不然unstack函数不能解析
# exp_X = tf.expand_dims(X,axis=-1)
# example_list = tf.unstack(exp_X,axis = 0)
# for one_example in example_list:
# features = tf.unstack(one_example,axis = 0)
# feature = features[0]
# for one_feature in features:
# # one_feature = tf.reshape(one_feature,shape=[1])
# split_tag = tf.string_split(one_feature, "|")
# one_sparse = tf.SparseTensor(
# indices=split_tag.indices,
# values= split_tag.values,
# dense_shape=split_tag.dense_shape
# )
#
# current_mapping = {'zhengquan_test': one_sparse}
# one_feature_embedding_res = tf.feature_column.input_layer(current_mapping, res)
#[[-0.10367388, 0.25915673, -0.00741819]]
def my_function(one_example):
features = tf.unstack(one_example,axis = 0)
for one_feature in features:
split_tag = tf.string_split(one_feature, "|")
one_sparse = tf.SparseTensor(
indices=split_tag.indices,
values= split_tag.values,
dense_shape=split_tag.dense_shape
)
current_mapping = {'zhengquan_test': one_sparse}
one_feature_embedding_res = tf.feature_column.input_layer(current_mapping, res)
return one_feature_embedding_res
exp_X = tf.expand_dims(X,axis=-1)
res = tf.map_fn(fn=my_function,elems=exp_X,dtype=tf.float32)
print(tf.shape(res))
import pdb
pdb.set_trace()
# res_seq = tf.squeeze(res,squeeze_dims=[-1])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess_res = sess.run([res],feed_dict={X:csv_s})
print(type(sess_res))
print(sess_res)
| 2.421875 | 2 |
products/migrations/0008_auto_20201019_1155.py | akshaynot/farmedorganic | 0 | 12796091 | <reponame>akshaynot/farmedorganic
# Generated by Django 3.1.2 on 2020-10-19 11:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0007_product_detail'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='images',
),
migrations.AddField(
model_name='product',
name='image',
field=models.FileField(blank=True, upload_to='Products'),
),
]
| 1.554688 | 2 |
python/experiments/feedback/bcipy_psd_explore.py | oken-cognitive-neuroscience-lab/scripts | 0 | 12796092 | <filename>python/experiments/feedback/bcipy_psd_explore.py
import csv
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from bcipy.helpers.load import read_data_csv
from bcipy.signal.process.filter import bandpass, notch, downsample
from bcipy.helpers.task import trial_reshaper
from bcipy.helpers.load import load_experimental_data
from bcipy.helpers.triggers import trigger_decoder
from bcipy.helpers.acquisition import (
analysis_channels, analysis_channel_names_by_pos)
from bcipy.signal.process.decomposition.psd import (
power_spectral_density, PSD_TYPE)
# BciPy Constants
# [TODO] We can load some of these from the session parameter files
MODE = 'calibration'
TRIGGERS_FN = 'triggers.txt'
RAW_DATA_FN = 'raw_data.csv'
CSV_EXPORT_NAME = 'feedback_exports.csv'
# Parameters
TRIAL_LENGTH = 2.5
NUMBER_OF_STIMULI_PER_SEQUENCE = 10
DOWNSAMPLE_RATE = 2
NOTCH_FREQ = 60
FILTER_HP = 2
FILTER_LP = 40
# Quantile Exports
QUANTILES = [15, 30, 45, 70]
# PSD Parameters
"""Define bands here and add to PSD_TO_DETERMINE list."""
ALPHA = ('alpha', [8, 11.99])
ALPHA_SUB_1 = ('alpha_sub_1', [7.00, 9.00])
ALPHA_SUB_2 = ('alpha_sub_2', [11.5, 12.5])
BETA = ('beta', [12, 25])
THETA = ('theta', [4, 7.99])
THETA_SUB_1 = ('theta_sub_1', [3.00, 5.00])
DELTA = ('delta', [1, 3.99])
DELTA_SUB_1 = ('delta_sub_1', [3.20, 4.00])
# append desired psd defined above to the list to use
PSD_TO_DETERMINE = [ALPHA, ALPHA_SUB_1, ALPHA_SUB_2, BETA, THETA, THETA_SUB_1, DELTA]
# Initialize exports
exports = {}
for name, band in PSD_TO_DETERMINE:
exports[name] = {}
exports[name]['data'] = []
def psd_explore(
data_folder,
channel_index,
plot=True,
relative=False,
reverse=False,
export_to_csv=False):
"""PSD Explore.
This assumes use with VR300 for the AD Feedback experiment.
data_folder: path to a BciPy data folder with raw data and triggers
channel_index: channel to use for PSD calculation
plot: whether or not to plot the filtered data and psd spectrum
relative: whether or not to export relative PSD output
reverse: whether the level estimations should be descending (default; ie band increases with attention) or ascending
export_to_csv: whether or not to write output to csv
returns: average, standard deviation
"""
# construct the relevant data paths
trigger_path = f'{data_folder}/{TRIGGERS_FN}'
raw_data_path = f'{data_folder}/{RAW_DATA_FN}'
# print helpful information to console
print('CONFIGURATION:\n'
f'Trial length: {TRIAL_LENGTH} \n'
f'Downsample rate: {DOWNSAMPLE_RATE} \n'
f'Notch Frequency: {NOTCH_FREQ} \n'
f'Bandpass Range: [{FILTER_HP}-{FILTER_LP}] \n'
f'Trigger Path: [{trigger_path}] \n'
f'Raw Data Path: [{raw_data_path}] \n')
# process and get the data from csv
raw_data, _, channels, type_amp, fs = read_data_csv(raw_data_path)
# print helpful information to console
print(
'DEVICE INFO:'
f'\nChannels loaded: {channels}. \n'
f'Using channel: {channels[channel_index]} \n'
f'Using Device: {type_amp} - {fs} samples/sec \n')
# filter the data
filtered_data, sampling_rate_post_filter = filter_data(
raw_data, fs, DOWNSAMPLE_RATE, NOTCH_FREQ)
# decode triggers and get a channel map
_, trigger_targetness, trigger_timing, offset = trigger_decoder(
mode=MODE,
trigger_path=trigger_path)
# add a static offset of 100 ms [TODO load from parameters]
offset = offset + .1
# reshape the data
x, y, num_seq, _ = trial_reshaper(
trigger_targetness,
trigger_timing,
filtered_data,
mode=MODE,
fs=fs,
k=DOWNSAMPLE_RATE,
offset=offset,
channel_map=analysis_channels(channels, type_amp),
trial_length=TRIAL_LENGTH)
data = create_sequence_exports(
x,
num_seq * 10,
channel_index,
TRIAL_LENGTH,
sampling_rate_post_filter,
plot,
relative,
reverse)
# plot raw data for the trial index given
if plot:
time = np.arange(
data.size) / sampling_rate_post_filter
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
plt.plot(time, data, lw=1.5, color='k')
plt.xlabel('Time (seconds)')
plt.ylabel('Voltage')
plt.xlim([time.min(), time.max()])
plt.title('Raw Data Plot')
sns.set(font_scale=1.2)
sns.despine()
plt.show()
if export_to_csv:
export_data_to_csv(exports)
return exports
def create_sequence_exports(
data,
num_trials,
channel_index,
trial_length,
sampling_rate,
plot,
relative,
reverse,
step=NUMBER_OF_STIMULI_PER_SEQUENCE):
"""Create Sequence exports.
Loops through segmented data and calculates the PSD sequence data.
data: reshaped trial data ['first', 'second']
num_trials: total number of sequences in task (ie 50, 100)
channel_index: channel we're interested in extracting
trial_length: length of reshaping
sampling_rate: data sampling rate of EEG
plot: whether or not to plot the data for exploration
relative: whether this is a relative or absolute calculation of PSD
reverse: whether the level estimations should be descending (default; ie band increases with attention) or ascending
step: how many stimuli between each trial [TODO: this could be taken from parameters from the session]
* we want the PSD from the first stimuli in trial to the trial_length
"""
index = 0
frames = int(num_trials / step)
tmp = []
# Calculate PSD for every sequence (called frame here)
for _ in range(frames):
process_data = data[channel_index][index]
tmp.append(process_data)
index += step
for name, band in PSD_TO_DETERMINE:
exports[name]['data'].append(
power_spectral_density(
process_data,
band,
sampling_rate=sampling_rate,
window_length=TRIAL_LENGTH,
method=PSD_TYPE.WELCH,
plot=False,
relative=relative))
# calculate the fields of interest for export
for name, band in PSD_TO_DETERMINE:
stats_data = np.array(exports[name]['data'])
exports[name]['average'] = np.mean(stats_data, axis=0)
exports[name]['stdev'] = np.std(stats_data, axis=0)
exports[name]['range'] = [
np.min(stats_data, axis=0), np.max(stats_data, axis=0)
]
if reverse:
QUANTILES.reverse()
exports[name]['quantiles'] = np.percentile(stats_data, QUANTILES)
del exports[name]['data']
# calculate a raw data average for plotting purposes only
average = np.mean(np.array(tmp), axis=0)
if plot:
power_spectral_density(
average,
[1, 2],
sampling_rate=sampling_rate,
window_length=TRIAL_LENGTH,
method=PSD_TYPE.WELCH,
plot=plot,
relative=relative)
return average
def filter_data(raw_data, fs, downsample_rate, notch_filter_freqency):
"""Filter Data.
Using the same procedure as AD supplement, filter and downsample the data
for futher processing.
Return: Filtered data & sampling rate
"""
notch_filterted_data = notch.notch_filter(
raw_data, fs, notch_filter_freqency)
bandpass_filtered_data = bandpass.butter_bandpass_filter(
notch_filterted_data, FILTER_HP, FILTER_LP, fs, order=2)
filtered_data = downsample.downsample(
bandpass_filtered_data, factor=downsample_rate)
sampling_rate_post_filter = fs / downsample_rate
return filtered_data, sampling_rate_post_filter
def export_data_to_csv(exports):
with open(CSV_EXPORT_NAME, 'w') as feedback_file:
writer = csv.writer(
feedback_file,
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
# write headers
writer.writerow(
['',
'Average',
'Standard Deviation',
'Range [min max]',
f'Quantiles {QUANTILES}'])
# write PSD data
for name, _ in PSD_TO_DETERMINE:
writer.writerow(
[name,
exports[name]['average'],
exports[name]['stdev'],
exports[name]['range'],
exports[name]['quantiles']]
)
if __name__ == '__main__':
import argparse
# Define necessary command line arguments
parser = argparse.ArgumentParser(description='Explore PSD.')
parser.add_argument('-channel', '--channel',
default=6,
type=int,
help='channel Index to compute PSD')
parser.add_argument('-plot', '--plot',
default=False,
type=lambda x: (str(x).lower() == 'true'),
help='Whether or not to plot raw data and PSD')
parser.add_argument('-relative', '--relative',
default=False,
type=lambda x: (str(x).lower() == 'true'),
help='Whether or not to use relative band calculation for PSD')
parser.add_argument('-path', '--path',
default=False,
type=str,
help='Path to BciPy data directory of interest.')
parser.add_argument('-feedback_desc', '--feedback_desc',
default=False,
type=lambda x: (str(x).lower() == 'true'),
help='By default, PSD are assumed desceding in ' \
'nature; ie PSD increases with attention. ' \
'Use this flag to reverse that direction. ' \
'Used to calculate appropriate cutoffs for feedback levels ')
parser.add_argument('-export', '--export',
required=False,
default=False,
type=str,
help='Path to BciPy data directory of interest.')
# parse and define the command line arguments.
args = parser.parse_args()
data_folder = args.path
# Note: this doesn't work on Mac for some reason... supply the path in the console
if not data_folder:
data_folder = load_experimental_data()
channel_index = args.channel
plot = args.plot
relative_calculation = args.relative
reverse = args.feedback_desc
export_to_csv = args.export
# ignore some pandas warnings, run the psd explore function and print results
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# explore!
psd = psd_explore(
data_folder,
channel_index,
plot=plot,
relative=relative_calculation,
reverse=reverse,
export_to_csv=export_to_csv)
print(
'RESULTS:\n'
f'{psd}')
| 2.390625 | 2 |
src/lightmlrestapi/testing/__init__.py | sdpython/lightmlrestapi | 0 | 12796093 | """
@file
@brief Shortcuts to *testing*.
"""
from .dummy_applications import dummy_application, dummy_application_image
from .dummy_applications import dummy_application_fct, dummy_application_neighbors, dummy_application_neighbors_image
from .dummy_applications import dummy_application_auth, dummy_mlstorage
| 1.070313 | 1 |
mnotes/cmd_index.py | mattj23/m-notes | 0 | 12796094 | """
Commands for index operations
"""
import os
import re
import sys
import time
from typing import List
from zipfile import ZipFile, ZIP_DEFLATED
from datetime import datetime as DateTime
import click
from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data
from mnotes.notes.index import NoteIndex
from mnotes.notes.markdown_notes import NoteInfo
valid_chars_pattern = re.compile(r"[^a-z0-9\-]")
@click.group(name="index", invoke_without_command=True)
@click.pass_context
@pass_env
def main(env: MnoteEnvironment, ctx: click.core.Context):
""" Manage M-Notes' global directory of indices. Indices represent folders containing indexed notes."""
style = env.config.styles
env.global_index.load_all()
echo_line(" * index mode")
if len(env.global_index.indices) == 0 and ctx.invoked_subcommand != "create":
echo_line(" * there are ", style.warning("no indices"), " in the global directory")
echo_line(" -> to create an index navigate to the folder containing notes you want to add")
echo_line(" -> then use the 'mnote index create <name>' command")
sys.exit()
else:
echo_line(" * there are ", style.visible(f"{len(env.global_index.indices)}"),
" indices in the global directory")
if ctx.invoked_subcommand is None:
# Update the global index
start_time = time.time()
env.global_index.load_all()
end_time = time.time()
click.echo(style.success(f" * updated all indices, took {end_time - start_time:0.2f} seconds"))
click.echo()
echo_line(click.style("Current Indices in Global Directory:", bold=True))
for index in env.global_index.indices.values():
echo_line(" * ", style.visible(index.name), f" ({len(index.notes)} notes): {index.path}")
echo_line()
echo_line(style.visible(" (use 'mnote index reload' to rebuild with checksums)"))
@main.command(name="zip")
@click.argument("names", type=str, nargs=-1)
@pass_env
def zip_cmd(env: MnoteEnvironment, names: List[str]):
"""
Archive an index or multiple/all indices in zip files
Creates archives of the markdown notes (text files only, no resources) of the indices by compressing them into zip
files. The files will be named with the index name and the current date and time and saved in the current
directory. This command can be run from anywhere on the machine, it does not need to be run from inside any of the
index folders.
You can specify a single index by name, several indices, or leave the 'name' argument blank in order to back up
all of them at once.
"""
style = env.config.styles
click.echo()
failed = False
for index_name in names:
if index_name not in env.global_index.indices:
echo_line(style.fail(f"There is no index named '{index_name}' to archive!"))
failed = True
if failed:
return
if not names:
echo_line(style.visible("No index(s) specified, so zipping all of them..."))
names = [i.name for i in env.global_index.indices.values()]
start = time.time()
for name in names:
echo_line()
echo_line(click.style("Zipping index ", bold=True), style.visible(f"'{name}'", bold=True))
index: NoteIndex = env.global_index.indices[name]
now = DateTime.now().strftime("%Y-%m-%d-%H-%M-%S")
output_name = os.path.join(env.cwd, f"{name}-{now}.zip")
with ZipFile(output_name, "w") as zip_handle:
with click.progressbar(index.notes.values()) as notes:
for note in notes:
note: NoteInfo
zip_handle.write(note.file_path,
arcname=os.path.relpath(note.file_path, start=index.path),
compress_type=ZIP_DEFLATED)
end = time.time()
echo_line()
echo_line(style.success(f"Operation completed in {end - start:0.1f} seconds"))
@main.command(name="reload")
@pass_env
def reload(env: MnoteEnvironment):
"""
Rebuild all indices using checksums.
M-Notes by default will verify the integrity of its cached data by looking at the file size and last modified
timestamp to guess at whether the file has changed since it was last read (this is similar to the method which
rsync uses) However, it's up to the file system to report these values accurately, so this option uses the SHA1
checksum to rebuild the indicies. It's faster than re-reading all of the files, but slower than simply looking at
the file size and timestamps.
"""
style = env.config.styles
start_time = time.time()
env.global_index.load_all(True)
end_time = time.time()
click.echo(style.success(f"Updated all indices with checksums, took {end_time - start_time:0.2f} seconds"))
@main.command(name="delete")
@click.argument("name", type=str)
@pass_env
def delete(env: MnoteEnvironment, name: str):
""" Delete an index from the global directory. """
style = env.config.styles
click.echo()
if name not in env.global_index.indices:
echo_line(style.fail(f"There is no index named '{name}' to remove!"))
return
# If we got to this point we can create the index!
click.echo()
echo_line(style.warning(f"You are about to remove the index named '{name}'", bold=True))
echo_line(style.warning(f"which maps to the folder '{env.cwd}'", bold=True))
click.echo()
if click.confirm(click.style(f"Apply this change?", bold=True)):
click.echo(style.success("User deleted index"))
del env.global_index.index_directory[name]
save_global_index_data(env.global_index)
else:
click.echo(style.fail("User rejected index creation"))
@main.command(name="create")
@click.argument("name", type=str)
@pass_env
def create(env: MnoteEnvironment, name: str):
""" Create a new index in the global directory with the specified name. """
style = env.config.styles
click.echo()
# Check if this folder is already part of another index
if env.index_of_cwd is not None:
echo_line(style.fail(f"The current working directory is already part of an index named "
f"'{env.index_of_cwd.name}'. Indexes cannot be contained by other indexes"))
return
# Check if this index would contain another index
contained = env.indices_in_cwd
if contained:
echo_line(style.fail("The following already-existing indices are subdirectories of the current working "
"directory. You can't create an index here because indexes cannot be contained by other "
"indexes."))
for index in contained:
echo_line(f" * {index.name}: {index.path}")
return
# Check if the name given is valid
if valid_chars_pattern.findall(name):
echo_line("The name ", style.fail(f"'{name}'"), " contains invalid characters for an index name")
click.echo()
echo_line("Index names may contain numbers, lowercase letters, and dashes only. Also consider that shorter "
"names are faster to type. Think of the index name as a nickname or an alias for the folder you"
"are adding to the global directory.")
return
if name in env.global_index.indices:
echo_line("The name ", style.fail(f"'{name}'"), " is already used by another index.")
click.echo()
echo_line("Index names may contain numbers, lowercase letters, and dashes only. Also consider that shorter "
"names are faster to type. Think of the index name as a nickname or an alias for the folder you"
"are adding to the global directory.")
# Check for conflicts before allowing M-Notes to add this as an index
conflicts = env.global_index.find_conflicts(env.cwd)
if conflicts:
echo_line(style.fail("There are ID conflicts which would be created if this folder is merged into the global"
"directory as it is."))
for id_, conflict in conflicts.items():
click.echo()
echo_line(style.warning(f"Conflict for ID {id_}:", bold=True))
for e in conflict.existing:
echo_line(style.visible(f" * Already in global: {e.file_path}"))
for c in conflict.conflicting:
echo_line(style.warning(f" * In this directory: {c.file_path}"))
return
# If we got to this point we can create the index!
click.echo()
echo_line(style.warning(f"You are about to create an index named '{name}'", bold=True))
echo_line(style.warning(f"which will be located in the folder '{env.cwd}'", bold=True))
click.echo()
if click.confirm(click.style(f"Apply this change?", bold=True)):
click.echo(style.success("User created index"))
env.global_index.index_directory[name] = {"path": env.cwd}
save_global_index_data(env.global_index)
else:
click.echo(style.fail("User rejected index creation"))
| 2.96875 | 3 |
src/evaluate_attn_maps.py | MkuuWaUjinga/leopart | 0 | 12796095 | import numpy as np
import os
import torch
import torch.nn as nn
import pytorch_lightning as pl
from data.VOCdevkit.vocdata import VOCDataset
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur
from torchvision.transforms.functional import InterpolationMode
from skimage.measure import label
class EvaluateAttnMaps(pl.callbacks.Callback):
def __init__(self,
voc_root: str,
train_input_height: int,
attn_batch_size: int,
num_workers: int,
threshold: float = 0.6):
# Setup transforms and dataloader pvoc
image_transforms = Compose([Resize((train_input_height, train_input_height)),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
target_transforms = Compose([Resize((train_input_height, train_input_height),
interpolation=InterpolationMode.NEAREST),
ToTensor()])
self.dataset = VOCDataset(root=os.path.join(voc_root, "VOCSegmentation"), image_set="val",
transform=image_transforms, target_transform=target_transforms)
self.loader = DataLoader(self.dataset, batch_size=attn_batch_size, shuffle=False, num_workers=num_workers,
drop_last=True, pin_memory=True)
self.threshold = threshold
def on_validation_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
# Evaluate attention maps.
if pl_module.global_rank == 0 and pl_module.local_rank == 0:
print("\n" + "#" * 20 + "Evaluating attention maps on VOC2012 with threshold: " +
str(self.threshold) + "#" * 20)
jacs_merged_attn = 0
jacs_all_heads = 0
# If teacher is present use teacher attention as it is also used during training
if hasattr(pl_module, 'teacher'):
patch_size = pl_module.teacher.patch_size
model = pl_module.teacher
else:
patch_size = pl_module.model.patch_size
model = pl_module.model
model.eval()
for i, (imgs, maps) in enumerate(self.loader):
w_featmap = imgs.shape[-2] // patch_size
h_featmap = imgs.shape[-1] // patch_size
with torch.no_grad():
attentions = model.get_last_selfattention(imgs.to(pl_module.device))
bs = attentions.shape[0]
attentions = attentions[..., 0, 1:]
# Evaluate two different protocols: merged attention and best head
jacs_merged_attn += self.evaluate_merged_attentions(attentions, bs, w_featmap, h_featmap, patch_size,
maps)
jacs_all_heads += self.evaluate_best_head(attentions, bs, w_featmap, h_featmap, patch_size, maps)
jacs_merged_attn /= len(self.dataset)
jacs_all_heads /= len(self.dataset)
print(f"Merged Jaccard on VOC12: {jacs_merged_attn.item()}")
print(f"All heads Jaccard on VOC12: {jacs_all_heads.item()}")
pl_module.logger.experiment.log_metric('attn_jacs_voc', jacs_merged_attn.item())
pl_module.logger.experiment.log_metric('all_heads_jacs_voc', jacs_all_heads.item())
def evaluate_best_head(self, attentions: torch.Tensor, bs: int, w_featmap: int, h_featmap: int, patch_size: int,
maps: torch.Tensor) -> torch.Tensor:
jacs = 0
nh = attentions.shape[1] # number of heads
# we keep only a certain percentage of the mass
val, idx = torch.sort(attentions)
val /= torch.sum(val, dim=-1, keepdim=True)
cumval = torch.cumsum(val, dim=-1)
th_attn = cumval > (1 - self.threshold)
idx2 = torch.argsort(idx)
for head in range(nh):
th_attn[:, head] = torch.gather(th_attn[:, head], dim=1, index=idx2[:, head])
th_attn = th_attn.reshape(bs, nh, w_featmap, h_featmap).float()
# interpolate
th_attn = nn.functional.interpolate(th_attn, scale_factor=patch_size, mode="nearest").cpu().numpy()
# Calculate IoU for each image
for k, map in enumerate(maps):
jac = 0
objects = np.unique(map)
objects = np.delete(objects, [0, -1])
for o in objects:
masko = map == o
intersection = masko * th_attn[k]
intersection = torch.sum(torch.sum(intersection, dim=-1), dim=-1)
union = (masko + th_attn[k]) > 0
union = torch.sum(torch.sum(union, dim=-1), dim=-1)
jaco = intersection / union
jac += max(jaco)
if len(objects) != 0:
jac /= len(objects)
jacs += jac
return jacs
def evaluate_merged_attentions(self, attentions: torch.Tensor, bs: int, w_featmap: int, h_featmap: int,
patch_size: int, maps: torch.Tensor) -> torch.Tensor:
jacs = 0
# Average attentions
attentions = sum(attentions[:, i] * 1 / attentions.size(1) for i in range(attentions.size(1)))
nh = 1 # number of heads is one as we merged all heads
# Gaussian blurring
attentions = GaussianBlur(7, sigma=(.6))(attentions.reshape(bs * nh, 1, w_featmap, h_featmap))\
.reshape(bs, nh, -1)
# we keep only a certain percentage of the mass
val, idx = torch.sort(attentions)
val /= torch.sum(val, dim=-1, keepdim=True)
cumval = torch.cumsum(val, dim=-1)
th_attn = cumval > (1 - self.threshold)
idx2 = torch.argsort(idx)
th_attn[:, 0] = torch.gather(th_attn[:, 0], dim=1, index=idx2[:, 0])
th_attn = th_attn.reshape(bs, nh, w_featmap, h_featmap).float()
# remove components that are less then 3 pixels
for j, th_att in enumerate(th_attn):
labelled = label(th_att.cpu().numpy())
for k in range(1, np.max(labelled) + 1):
mask = labelled == k
if np.sum(mask) <= 2:
th_attn[j, 0][mask] = 0
# interpolate
th_attn = nn.functional.interpolate(th_attn, scale_factor=patch_size, mode="nearest").cpu().numpy()
# Calculate IoU for each image
for k, map in enumerate(maps):
gt_fg_mask = (map != 0.).float()
intersection = gt_fg_mask * th_attn[k]
intersection = torch.sum(torch.sum(intersection, dim=-1), dim=-1)
union = (gt_fg_mask + th_attn[k]) > 0
union = torch.sum(torch.sum(union, dim=-1), dim=-1)
jacs += intersection / union
return jacs
| 2.078125 | 2 |
students/K33402/Karatetskaya_Maria/lab1/4/server.py | marysadness/ITMO_ICT_WebDevelopment_2021-2022 | 0 | 12796096 | import socket, threading
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "127.0.0.1"
port = 9090
server.bind((host, port))
server.listen(5)
clients = list()
end = list()
def get():
while True:
client, addr = server.accept()
clients.append(client)
print(f'сервер подключен через {addr}: количество клиентов: {len (clients)}', end = '\n')
def recv_data(client):
while True:
try:
indata = client.recv(1024)
except Exception:
clients.remove(client)
end.remove(client)
print( f'Сервер отключен: количество клиентов: {len (clients)}', end = '\n')
break
print(indata.decode('utf-8'))
for i in clients:
if i != client:
i.send(indata)
def send_mes():
while True:
print('')
outdata = input('')
print()
for client in clients:
client.send(f"Сервер: {outdata}".encode('utf-8)'))
def get_mes():
while True:
for i in clients:
if i in end:
continue
index = threading.Thread(target=recv_data, args=(i,))
index.start()
end.append(i)
t1 = threading.Thread(target=send_mes, name='input')
t1.start()
t2 = threading.Thread(target=get_mes, name='out')
t2.start()
t3 = threading.Thread(target=get(), name='get')
t3.start()
t2.join()
for i in clients:
i.close() | 3.125 | 3 |
src/deep_autoencoder.py | Cc618/PyTorch-Collections | 0 | 12796097 | # Autoencoder using convolutional layers
# Dataset : MNIST
# Requires : PIL, matplotlib
# Inspired by https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
# To compress data : net.encode(data)
# To decompress data : net.decode(data)
# To mutate data : net(data)
import os
import numpy as np
import matplotlib.pyplot as plt
import torch as T
from torch import nn
from torch import cuda
import torch.nn.functional as F
from torchvision import transforms
import torchvision
from torchvision.datasets import MNIST
from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d
import PIL.Image as im
from utils import dataset_dir, models_dir
# Displays an image (1 dim tensor)
# t has values in [0, 1]
def imshow(t):
transforms.ToPILImage()(t).show()
# Show in matplotlib
def gridshow(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class Net(nn.Module):
def __init__(self, hidden_size, latent_size):
super().__init__()
self.latent_size = latent_size
self.encodeConv1 = Conv2d(1, 16, 4)
self.encodeConv2 = Conv2d(16, 32, 2)
self.encodeFC1 = Linear(800, hidden_size)
self.encodeFC2 = Linear(hidden_size, self.latent_size)
self.decodeFC1 = Linear(self.latent_size, 13 * 13)
self.decodeConv1 = ConvTranspose2d(1, 1, 2)
self.decodeFC2 = Linear(14 * 14, 28 * 28)
def encode(self, x):
x = MaxPool2d(2)(F.relu(self.encodeConv1(x)))
x = MaxPool2d(2)(F.relu(self.encodeConv2(x)))
x = x.view(-1, 800)
x = F.relu(self.encodeFC1(x))
x = T.sigmoid(self.encodeFC2(x))
return x
def decode(self, x):
x = F.relu(self.decodeFC1(x))
x = x.view(-1, 1, 13, 13)
x = F.relu(self.decodeConv1(x))
x = x.view(-1, 14 * 14)
x = T.sigmoid(self.decodeFC2(x))
x = x.view(-1, 1, 28, 28)
return x
def forward(self, x):
return self.decode(self.encode(x))
# Hyper params
latent_size = 10
hidden_size = 256
epochs = 3
batch_size = 10
learning_rate = .0002
train_or_test = 'test'
path = models_dir + '/deep_autoencoder'
# Training device
device = T.device('cuda:0' if cuda.is_available() else 'cpu')
# Dataset
trans = transforms.ToTensor()
dataset = MNIST(root=dataset_dir, train=True, download=True, transform=trans)
loader = T.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=0)
# Model
net = Net(hidden_size, latent_size)
net.to(device)
if train_or_test == 'train':
# Load
if os.path.exists(path):
net.load_state_dict(T.load(path))
print('Model loaded')
# Train
optim = T.optim.Adam(net.parameters(), lr=learning_rate, betas=(.9, .999))
criterion = nn.MSELoss()
for e in range(epochs):
avg_loss = 0
for i, data in enumerate(loader, 0):
# Only inputs (no labels)
inputs, _ = data
# Zero the parameter gradients
optim.zero_grad()
# Predictions
x = inputs.to(device)
y = net(x)
# Back prop
loss = criterion(y, x)
loss.backward()
optim.step()
avg_loss += loss.item()
# Stats
print_freq = 100
if i % print_freq == print_freq - 1:
print(f'Epoch {e + 1:2d}, Batch {i + 1:5d}, Loss {avg_loss / print_freq:.3f}')
avg_loss = 0.0
# Save
T.save(net.state_dict(), path)
print('Model trained and saved')
else:
# Load
net.load_state_dict(T.load(path))
# Test
dataiter = iter(loader)
images, _ = dataiter.next()
# Show ground truth
gridshow(torchvision.utils.make_grid(images))
# Show predictions
with T.no_grad():
preds = T.cat([net(images[i].view(1, 1, 28, 28).to(device)).view(1, 1, 28, 28).cpu() for i in range(batch_size)])
preds = T.tensor(preds)
gridshow(torchvision.utils.make_grid(preds))
| 3.203125 | 3 |
src/main/python/monocyte/handler/ec2.py | claytonbrown/aws-monocyte | 20 | 12796098 | # Monocyte - Search and Destroy unwanted AWS Resources relentlessly.
# Copyright 2015 Immobilien Scout GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from boto import ec2
from boto.exception import EC2ResponseError
from monocyte.handler import Resource, Handler
class Instance(Handler):
VALID_TARGET_STATES = ["terminated", "shutting-down"]
def fetch_region_names(self):
return [region.name for region in ec2.regions()]
def fetch_unwanted_resources(self):
for region_name in self.region_names:
connection = ec2.connect_to_region(region_name)
resources = connection.get_only_instances() or []
for resource in resources:
resource_wrapper = Resource(resource=resource,
resource_type=self.resource_type,
resource_id=resource.id,
creation_date=resource.launch_time,
region=region_name)
if resource.id in self.ignored_resources:
self.logger.info('IGNORE ' + self.to_string(resource_wrapper))
continue
yield resource_wrapper
def to_string(self, resource):
return "ec2 instance found in {region.name}, " \
"with identifier {id}, instance type is {instance_type}, created {launch_time}, " \
"dnsname is {public_dns_name}, key {key_name}, with state {_state}".format(**vars(resource.wrapped))
def delete(self, resource):
if resource.wrapped.state in Instance.VALID_TARGET_STATES:
raise Warning("state '{0}' is a valid target state, skipping".format(
resource.wrapped.state))
connection = ec2.connect_to_region(resource.region)
if self.dry_run:
try:
connection.terminate_instances([resource.wrapped.id], dry_run=True)
except EC2ResponseError as exc:
if exc.status == 412: # Precondition Failed
raise Warning("Termination {message}".format(**vars(exc)))
raise
else:
instances = connection.terminate_instances([resource.wrapped.id], dry_run=False)
self.logger.info("Initiating shutdown sequence for {0}".format(instances))
return instances
class Volume(Handler):
def fetch_region_names(self):
return [region.name for region in ec2.regions()]
def fetch_unwanted_resources(self):
for region_name in self.region_names:
connection = ec2.connect_to_region(region_name)
resources = connection.get_all_volumes() or []
for resource in resources:
resource_wrapper = Resource(resource=resource,
resource_type=self.resource_type,
resource_id=resource.id,
creation_date=resource.create_time,
region=region_name)
if resource.id in self.ignored_resources:
self.logger.info('IGNORE ' + self.to_string(resource_wrapper))
continue
yield resource_wrapper
def to_string(self, resource):
return "ebs volume found in {region.name}, " \
"with identifier {id}, created {create_time}, " \
"with state {status}".format(**vars(resource.wrapped))
def delete(self, resource):
connection = ec2.connect_to_region(resource.region)
if self.dry_run:
try:
connection.delete_volume(resource.wrapped.id, dry_run=True)
except EC2ResponseError as exc:
if exc.status == 412: # Precondition Failed
warnings.warn(Warning("Termination {message}".format(**vars(exc))))
raise
else:
self.logger.info("Initiating deletion of EBS volume {0}".format(resource.wrapped.id))
connection.delete_volume(resource.wrapped.id, dry_run=False)
| 2.015625 | 2 |
vendas/forms.py | Moisestuli/karrata | 0 | 12796099 | <gh_stars>0
from django import forms
from vendas.models import Venda
class VendaAdminForm(forms.ModelForm):
class Meta:
model = Venda
fields = ('nome_client','telefone','cidade','email','produto','deu',) | 1.84375 | 2 |
deck_code.py | BenWiederhake/solitaire_aide | 0 | 12796100 | #!/usr/bin/env python3
import common
import os
import sys
def run_encode(base):
x = 0
for i in range(base)
b = sys.stdin.buffer.read(1)
if b == b'':
break
def run_decode(base):
raise NotImplementedError()
def usage(progname):
print('USAGE: {} {{encode | decode}} [<BASENUM>]'.format(progname), file=sys.stderr)
exit(1)
def run(argv):
if not 2 <= len(argv) <= 3:
print('error: bad argument count', file=sys.stderr)
usage(argv[0])
base = None
try:
base = int(argv[2])
if not 1 < base < 1000:
print('error: "{}" is not a valid base (must be integer between 1 and 1000)'.format(argv[2]), file=sys.stderr)
usage(argv[0])
except IndexError:
pass
except ValueError:
print('error: "{}" is not a valid base (must be integer)'.format(argv[2]), file=sys.stderr)
usage(argv[0])
if len(argv) == 2:
base = os.environ.get('DECKCODE_BASE')
if base is None:
base = os.environ.get('DECKCODE_BASE')
if base is None:
base = 54
else:
print('note: base set to {}'.format(base), file=sys.stderr)
if argv[1] == 'encode':
run_encode(base)
elif argv[1] == 'decode':
run_decode(base)
else:
print('error: unknown command "{}"'.format(argv[1]), file=sys.stderr)
usage(argv[0])
if __name__ == '__main__':
run(sys.argv)
| 3.359375 | 3 |
twitch-chat-reader/acapyla.py | Evshaddock/scripts | 11 | 12796101 | from pathlib import Path
import requests
import string
import urllib
import random
import json
import sys
def acapyla(quote, voicename="willfromafar"):
try:
voiceid = "enu_" + voicename + "_22k_ns.bvcu"
except IndexError:
voiceid = "enu_willfromafar_22k_ns.bvcu"
letters = string.ascii_lowercase
premail = ''.join(random.choice(letters) for i in range(64))
email = premail + "@gmail.com"
noncerl = "https://acapelavoices.acapela-group.com/index/getnonce/"
noncedata = {'googleid':email}
noncer = requests.post(url = noncerl, data = noncedata)
nonce = noncer.text[10:50]
synthrl = "http://www.acapela-group.com:8080/webservices/1-34-01-Mobility/Synthesizer"
synthdata = "req_voice=" + voiceid + "&cl_pwd=&cl_vers=1-30&req_echo=ON&cl_login=AcapelaGroup&req_comment=%7B%22nonce%22%3A%22" + nonce + "%22%2C%22user%22%3A%22" + email + "%22%7D&req_text=" + quote + "&cl_env=ACAPELA_VOICES&prot_vers=2&cl_app=AcapelaGroup_WebDemo_Android"
headers = {'content-type': 'application/x-www-form-urlencoded'}
synthr = requests.post(url = synthrl, data = synthdata, headers = headers)
minuspre = synthr.text[synthr.text.find('http://'):]
minussuf = minuspre.split(".mp3", 1)[0]
synthresult = minussuf + ".mp3"
urllib.request.urlretrieve(synthresult, str(Path.home()) + "/.dominae/out/tts/" + email[:8] + ".mp3")
return email[:8] | 2.71875 | 3 |
getFeed.py | kjohn01/fbexample | 0 | 12796102 | <gh_stars>0
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
import json
import datetime
import pandas as pd
from dateutil.parser import parse
def handleDate(x):
if isinstance(x, datetime.date):
return "{}-{}-{}".format(x.year, x.month, x.day)
token = '<KEY>'
group = {'689157281218904':'台北技能交換'}
feeds = []
for ele in group:
res = requests.get('https://graph.facebook.com/v2.9/{}/feed?limit=100&access_token={}'.format(ele, token))
while 'paging' in res.json():
for information in res.json()['data']:
if 'message' in information:
feeds.append([group[ele], information['message'], parse(information['updated_time']).date(), information['id']])
res = requests.get(res.json()['paging']['next'])
# print(json.dumps(feeds, indent=4, separators=(',', ': '), ensure_ascii=False, default = handleDate))
with open('feeds.json', 'w') as outfile:
json.dump(feeds, outfile, indent=4, separators=(',', ': '), ensure_ascii=False, default = handleDate)
#最後將list轉換成dataframe,並輸出成csv檔
#
# information_df = pd.DataFrame(feeds, columns=['粉絲專頁', '發文內容', '發文時間'])
# information_df.to_csv('Data Visualization Information.csv', index=False)
| 2.75 | 3 |
agnes/algos/configs/a2c_config.py | rotinov/CITUS | 24 | 12796103 | <gh_stars>10-100
from typing import Dict, Tuple
def atari_config() -> Dict:
return dict(
timesteps=10e6,
nsteps=32,
nminibatches=1,
gamma=0.99,
lam=0.95,
noptepochs=1,
max_grad_norm=0.5,
learning_rate=lambda x: 7e-4*x,
vf_coef=0.5,
ent_coef=0.01,
bptt=16
)
def mujoco_config() -> Dict:
return dict(
timesteps=1e6,
nsteps=64,
nminibatches=1,
gamma=0.99,
lam=0.95,
noptepochs=1,
max_grad_norm=0.5,
learning_rate=lambda x: 7e-4*x,
vf_coef=0.5,
ent_coef=0.0,
bptt=8
)
def get_config(env_type: str) -> Tuple[Dict, str]:
if env_type == 'mujoco':
cnfg = mujoco_config()
elif env_type == 'atari':
cnfg = atari_config()
else:
cnfg = atari_config()
return cnfg, env_type
| 2.484375 | 2 |
cottonformation/res/batch.py | MacHu-GWU/cottonformation-project | 5 | 12796104 | # -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropJobDefinitionAuthorizationConfig(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.AuthorizationConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html
Property Document:
- ``p_AccessPointId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html#cfn-batch-jobdefinition-authorizationconfig-accesspointid
- ``p_Iam``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html#cfn-batch-jobdefinition-authorizationconfig-iam
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.AuthorizationConfig"
p_AccessPointId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AccessPointId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html#cfn-batch-jobdefinition-authorizationconfig-accesspointid"""
p_Iam: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Iam"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html#cfn-batch-jobdefinition-authorizationconfig-iam"""
@attr.s
class PropJobDefinitionResourceRequirement(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.ResourceRequirement"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html
Property Document:
- ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html#cfn-batch-jobdefinition-resourcerequirement-type
- ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html#cfn-batch-jobdefinition-resourcerequirement-value
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.ResourceRequirement"
p_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html#cfn-batch-jobdefinition-resourcerequirement-type"""
p_Value: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Value"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html#cfn-batch-jobdefinition-resourcerequirement-value"""
@attr.s
class PropJobDefinitionEnvironment(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Environment"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html
Property Document:
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html#cfn-batch-jobdefinition-environment-name
- ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html#cfn-batch-jobdefinition-environment-value
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Environment"
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html#cfn-batch-jobdefinition-environment-name"""
p_Value: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Value"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html#cfn-batch-jobdefinition-environment-value"""
@attr.s
class PropJobDefinitionVolumesHost(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.VolumesHost"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumeshost.html
Property Document:
- ``p_SourcePath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumeshost.html#cfn-batch-jobdefinition-volumeshost-sourcepath
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.VolumesHost"
p_SourcePath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SourcePath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumeshost.html#cfn-batch-jobdefinition-volumeshost-sourcepath"""
@attr.s
class PropJobQueueComputeEnvironmentOrder(Property):
"""
AWS Object Type = "AWS::Batch::JobQueue.ComputeEnvironmentOrder"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html
Property Document:
- ``rp_ComputeEnvironment``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html#cfn-batch-jobqueue-computeenvironmentorder-computeenvironment
- ``rp_Order``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html#cfn-batch-jobqueue-computeenvironmentorder-order
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobQueue.ComputeEnvironmentOrder"
rp_ComputeEnvironment: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ComputeEnvironment"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html#cfn-batch-jobqueue-computeenvironmentorder-computeenvironment"""
rp_Order: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "Order"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html#cfn-batch-jobqueue-computeenvironmentorder-order"""
@attr.s
class PropJobDefinitionSecret(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Secret"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html#cfn-batch-jobdefinition-secret-name
- ``rp_ValueFrom``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html#cfn-batch-jobdefinition-secret-valuefrom
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Secret"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html#cfn-batch-jobdefinition-secret-name"""
rp_ValueFrom: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ValueFrom"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html#cfn-batch-jobdefinition-secret-valuefrom"""
@attr.s
class PropJobDefinitionNetworkConfiguration(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.NetworkConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-networkconfiguration.html
Property Document:
- ``p_AssignPublicIp``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-networkconfiguration.html#cfn-batch-jobdefinition-containerproperties-networkconfiguration-assignpublicip
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.NetworkConfiguration"
p_AssignPublicIp: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AssignPublicIp"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-networkconfiguration.html#cfn-batch-jobdefinition-containerproperties-networkconfiguration-assignpublicip"""
@attr.s
class PropJobDefinitionLogConfiguration(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.LogConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html
Property Document:
- ``rp_LogDriver``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-logdriver
- ``p_Options``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-options
- ``p_SecretOptions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-secretoptions
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.LogConfiguration"
rp_LogDriver: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "LogDriver"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-logdriver"""
p_Options: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Options"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-options"""
p_SecretOptions: typing.List[typing.Union['PropJobDefinitionSecret', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionSecret.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionSecret), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "SecretOptions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-secretoptions"""
@attr.s
class PropComputeEnvironmentLaunchTemplateSpecification(Property):
"""
AWS Object Type = "AWS::Batch::ComputeEnvironment.LaunchTemplateSpecification"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html
Property Document:
- ``p_LaunchTemplateId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-launchtemplateid
- ``p_LaunchTemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-launchtemplatename
- ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-version
"""
AWS_OBJECT_TYPE = "AWS::Batch::ComputeEnvironment.LaunchTemplateSpecification"
p_LaunchTemplateId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplateId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-launchtemplateid"""
p_LaunchTemplateName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplateName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-launchtemplatename"""
p_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-version"""
@attr.s
class PropJobDefinitionMountPoints(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.MountPoints"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html
Property Document:
- ``p_ContainerPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-containerpath
- ``p_ReadOnly``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-readonly
- ``p_SourceVolume``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-sourcevolume
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.MountPoints"
p_ContainerPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ContainerPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-containerpath"""
p_ReadOnly: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "ReadOnly"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-readonly"""
p_SourceVolume: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SourceVolume"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-sourcevolume"""
@attr.s
class PropSchedulingPolicyShareAttributes(Property):
"""
AWS Object Type = "AWS::Batch::SchedulingPolicy.ShareAttributes"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html
Property Document:
- ``p_ShareIdentifier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html#cfn-batch-schedulingpolicy-shareattributes-shareidentifier
- ``p_WeightFactor``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html#cfn-batch-schedulingpolicy-shareattributes-weightfactor
"""
AWS_OBJECT_TYPE = "AWS::Batch::SchedulingPolicy.ShareAttributes"
p_ShareIdentifier: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ShareIdentifier"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html#cfn-batch-schedulingpolicy-shareattributes-shareidentifier"""
p_WeightFactor: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "WeightFactor"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html#cfn-batch-schedulingpolicy-shareattributes-weightfactor"""
@attr.s
class PropJobDefinitionEvaluateOnExit(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.EvaluateOnExit"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html
Property Document:
- ``rp_Action``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-action
- ``p_OnExitCode``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onexitcode
- ``p_OnReason``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onreason
- ``p_OnStatusReason``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onstatusreason
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.EvaluateOnExit"
rp_Action: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Action"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-action"""
p_OnExitCode: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "OnExitCode"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onexitcode"""
p_OnReason: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "OnReason"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onreason"""
p_OnStatusReason: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "OnStatusReason"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onstatusreason"""
@attr.s
class PropJobDefinitionUlimit(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Ulimit"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html
Property Document:
- ``rp_HardLimit``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-hardlimit
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-name
- ``rp_SoftLimit``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-softlimit
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Ulimit"
rp_HardLimit: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "HardLimit"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-hardlimit"""
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-name"""
rp_SoftLimit: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "SoftLimit"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-softlimit"""
@attr.s
class PropJobDefinitionFargatePlatformConfiguration(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.FargatePlatformConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-fargateplatformconfiguration.html
Property Document:
- ``p_PlatformVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-fargateplatformconfiguration.html#cfn-batch-jobdefinition-containerproperties-fargateplatformconfiguration-platformversion
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.FargatePlatformConfiguration"
p_PlatformVersion: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PlatformVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-fargateplatformconfiguration.html#cfn-batch-jobdefinition-containerproperties-fargateplatformconfiguration-platformversion"""
@attr.s
class PropJobDefinitionTimeout(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Timeout"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-timeout.html
Property Document:
- ``p_AttemptDurationSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-timeout.html#cfn-batch-jobdefinition-timeout-attemptdurationseconds
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Timeout"
p_AttemptDurationSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "AttemptDurationSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-timeout.html#cfn-batch-jobdefinition-timeout-attemptdurationseconds"""
@attr.s
class PropJobDefinitionTmpfs(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Tmpfs"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html
Property Document:
- ``rp_ContainerPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-containerpath
- ``rp_Size``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-size
- ``p_MountOptions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-mountoptions
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Tmpfs"
rp_ContainerPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ContainerPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-containerpath"""
rp_Size: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "Size"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-size"""
p_MountOptions: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "MountOptions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-mountoptions"""
@attr.s
class PropJobDefinitionEfsVolumeConfiguration(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.EfsVolumeConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html
Property Document:
- ``rp_FileSystemId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-filesystemid
- ``p_AuthorizationConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-authorizationconfig
- ``p_RootDirectory``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-rootdirectory
- ``p_TransitEncryption``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-transitencryption
- ``p_TransitEncryptionPort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-transitencryptionport
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.EfsVolumeConfiguration"
rp_FileSystemId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "FileSystemId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-filesystemid"""
p_AuthorizationConfig: typing.Union['PropJobDefinitionAuthorizationConfig', dict] = attr.ib(
default=None,
converter=PropJobDefinitionAuthorizationConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionAuthorizationConfig)),
metadata={AttrMeta.PROPERTY_NAME: "AuthorizationConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-authorizationconfig"""
p_RootDirectory: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "RootDirectory"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-rootdirectory"""
p_TransitEncryption: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TransitEncryption"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-transitencryption"""
p_TransitEncryptionPort: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "TransitEncryptionPort"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-transitencryptionport"""
@attr.s
class PropJobDefinitionDevice(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Device"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html
Property Document:
- ``p_ContainerPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-containerpath
- ``p_HostPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-hostpath
- ``p_Permissions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-permissions
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Device"
p_ContainerPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ContainerPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-containerpath"""
p_HostPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "HostPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-hostpath"""
p_Permissions: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Permissions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-permissions"""
@attr.s
class PropComputeEnvironmentEc2ConfigurationObject(Property):
"""
AWS Object Type = "AWS::Batch::ComputeEnvironment.Ec2ConfigurationObject"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html
Property Document:
- ``rp_ImageType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html#cfn-batch-computeenvironment-ec2configurationobject-imagetype
- ``p_ImageIdOverride``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html#cfn-batch-computeenvironment-ec2configurationobject-imageidoverride
"""
AWS_OBJECT_TYPE = "AWS::Batch::ComputeEnvironment.Ec2ConfigurationObject"
rp_ImageType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ImageType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html#cfn-batch-computeenvironment-ec2configurationobject-imagetype"""
p_ImageIdOverride: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ImageIdOverride"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html#cfn-batch-computeenvironment-ec2configurationobject-imageidoverride"""
@attr.s
class PropJobDefinitionVolumes(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Volumes"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html
Property Document:
- ``p_EfsVolumeConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-efsvolumeconfiguration
- ``p_Host``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-host
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-name
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Volumes"
p_EfsVolumeConfiguration: typing.Union['PropJobDefinitionEfsVolumeConfiguration', dict] = attr.ib(
default=None,
converter=PropJobDefinitionEfsVolumeConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionEfsVolumeConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "EfsVolumeConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-efsvolumeconfiguration"""
p_Host: typing.Union['PropJobDefinitionVolumesHost', dict] = attr.ib(
default=None,
converter=PropJobDefinitionVolumesHost.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionVolumesHost)),
metadata={AttrMeta.PROPERTY_NAME: "Host"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-host"""
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-name"""
@attr.s
class PropSchedulingPolicyFairsharePolicy(Property):
"""
AWS Object Type = "AWS::Batch::SchedulingPolicy.FairsharePolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html
Property Document:
- ``p_ComputeReservation``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-computereservation
- ``p_ShareDecaySeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-sharedecayseconds
- ``p_ShareDistribution``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-sharedistribution
"""
AWS_OBJECT_TYPE = "AWS::Batch::SchedulingPolicy.FairsharePolicy"
p_ComputeReservation: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "ComputeReservation"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-computereservation"""
p_ShareDecaySeconds: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "ShareDecaySeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-sharedecayseconds"""
p_ShareDistribution: typing.List[typing.Union['PropSchedulingPolicyShareAttributes', dict]] = attr.ib(
default=None,
converter=PropSchedulingPolicyShareAttributes.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropSchedulingPolicyShareAttributes), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "ShareDistribution"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-sharedistribution"""
@attr.s
class PropComputeEnvironmentComputeResources(Property):
"""
AWS Object Type = "AWS::Batch::ComputeEnvironment.ComputeResources"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html
Property Document:
- ``rp_MaxvCpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-maxvcpus
- ``rp_Subnets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-subnets
- ``rp_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-type
- ``p_AllocationStrategy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-allocationstrategy
- ``p_BidPercentage``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-bidpercentage
- ``p_DesiredvCpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-desiredvcpus
- ``p_Ec2Configuration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2configuration
- ``p_Ec2KeyPair``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2keypair
- ``p_ImageId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-imageid
- ``p_InstanceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancerole
- ``p_InstanceTypes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancetypes
- ``p_LaunchTemplate``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-launchtemplate
- ``p_MinvCpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-minvcpus
- ``p_PlacementGroup``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-placementgroup
- ``p_SecurityGroupIds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-securitygroupids
- ``p_SpotIamFleetRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-spotiamfleetrole
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-tags
"""
AWS_OBJECT_TYPE = "AWS::Batch::ComputeEnvironment.ComputeResources"
rp_MaxvCpus: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "MaxvCpus"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-maxvcpus"""
rp_Subnets: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Subnets"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-subnets"""
rp_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-type"""
p_AllocationStrategy: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AllocationStrategy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-allocationstrategy"""
p_BidPercentage: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "BidPercentage"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-bidpercentage"""
p_DesiredvCpus: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "DesiredvCpus"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-desiredvcpus"""
p_Ec2Configuration: typing.List[typing.Union['PropComputeEnvironmentEc2ConfigurationObject', dict]] = attr.ib(
default=None,
converter=PropComputeEnvironmentEc2ConfigurationObject.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropComputeEnvironmentEc2ConfigurationObject), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Ec2Configuration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2configuration"""
p_Ec2KeyPair: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Ec2KeyPair"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2keypair"""
p_ImageId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ImageId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-imageid"""
p_InstanceRole: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "InstanceRole"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancerole"""
p_InstanceTypes: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "InstanceTypes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancetypes"""
p_LaunchTemplate: typing.Union['PropComputeEnvironmentLaunchTemplateSpecification', dict] = attr.ib(
default=None,
converter=PropComputeEnvironmentLaunchTemplateSpecification.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropComputeEnvironmentLaunchTemplateSpecification)),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplate"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-launchtemplate"""
p_MinvCpus: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MinvCpus"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-minvcpus"""
p_PlacementGroup: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PlacementGroup"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-placementgroup"""
p_SecurityGroupIds: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "SecurityGroupIds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-securitygroupids"""
p_SpotIamFleetRole: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SpotIamFleetRole"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-spotiamfleetrole"""
p_Tags: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-tags"""
@attr.s
class PropJobDefinitionRetryStrategy(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.RetryStrategy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html
Property Document:
- ``p_Attempts``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html#cfn-batch-jobdefinition-retrystrategy-attempts
- ``p_EvaluateOnExit``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html#cfn-batch-jobdefinition-retrystrategy-evaluateonexit
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.RetryStrategy"
p_Attempts: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Attempts"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html#cfn-batch-jobdefinition-retrystrategy-attempts"""
p_EvaluateOnExit: typing.List[typing.Union['PropJobDefinitionEvaluateOnExit', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionEvaluateOnExit.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionEvaluateOnExit), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "EvaluateOnExit"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html#cfn-batch-jobdefinition-retrystrategy-evaluateonexit"""
@attr.s
class PropJobDefinitionLinuxParameters(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.LinuxParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html
Property Document:
- ``p_Devices``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-devices
- ``p_InitProcessEnabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-initprocessenabled
- ``p_MaxSwap``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-maxswap
- ``p_SharedMemorySize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-sharedmemorysize
- ``p_Swappiness``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-swappiness
- ``p_Tmpfs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-tmpfs
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.LinuxParameters"
p_Devices: typing.List[typing.Union['PropJobDefinitionDevice', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionDevice.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionDevice), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Devices"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-devices"""
p_InitProcessEnabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "InitProcessEnabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-initprocessenabled"""
p_MaxSwap: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaxSwap"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-maxswap"""
p_SharedMemorySize: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "SharedMemorySize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-sharedmemorysize"""
p_Swappiness: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Swappiness"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-swappiness"""
p_Tmpfs: typing.List[typing.Union['PropJobDefinitionTmpfs', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionTmpfs.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionTmpfs), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tmpfs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-tmpfs"""
@attr.s
class PropJobDefinitionContainerProperties(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.ContainerProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html
Property Document:
- ``rp_Image``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-image
- ``p_Command``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-command
- ``p_Environment``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-environment
- ``p_ExecutionRoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-executionrolearn
- ``p_FargatePlatformConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-fargateplatformconfiguration
- ``p_InstanceType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-instancetype
- ``p_JobRoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-jobrolearn
- ``p_LinuxParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-linuxparameters
- ``p_LogConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-logconfiguration
- ``p_Memory``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-memory
- ``p_MountPoints``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-mountpoints
- ``p_NetworkConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-networkconfiguration
- ``p_Privileged``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-privileged
- ``p_ReadonlyRootFilesystem``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-readonlyrootfilesystem
- ``p_ResourceRequirements``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-resourcerequirements
- ``p_Secrets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-secrets
- ``p_Ulimits``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-ulimits
- ``p_User``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-user
- ``p_Vcpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-vcpus
- ``p_Volumes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-volumes
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.ContainerProperties"
rp_Image: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Image"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-image"""
p_Command: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Command"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-command"""
p_Environment: typing.List[typing.Union['PropJobDefinitionEnvironment', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionEnvironment.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionEnvironment), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Environment"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-environment"""
p_ExecutionRoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ExecutionRoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-executionrolearn"""
p_FargatePlatformConfiguration: typing.Union['PropJobDefinitionFargatePlatformConfiguration', dict] = attr.ib(
default=None,
converter=PropJobDefinitionFargatePlatformConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionFargatePlatformConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "FargatePlatformConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-fargateplatformconfiguration"""
p_InstanceType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "InstanceType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-instancetype"""
p_JobRoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "JobRoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-jobrolearn"""
p_LinuxParameters: typing.Union['PropJobDefinitionLinuxParameters', dict] = attr.ib(
default=None,
converter=PropJobDefinitionLinuxParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionLinuxParameters)),
metadata={AttrMeta.PROPERTY_NAME: "LinuxParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-linuxparameters"""
p_LogConfiguration: typing.Union['PropJobDefinitionLogConfiguration', dict] = attr.ib(
default=None,
converter=PropJobDefinitionLogConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionLogConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "LogConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-logconfiguration"""
p_Memory: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Memory"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-memory"""
p_MountPoints: typing.List[typing.Union['PropJobDefinitionMountPoints', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionMountPoints.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionMountPoints), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "MountPoints"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-mountpoints"""
p_NetworkConfiguration: typing.Union['PropJobDefinitionNetworkConfiguration', dict] = attr.ib(
default=None,
converter=PropJobDefinitionNetworkConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionNetworkConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "NetworkConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-networkconfiguration"""
p_Privileged: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "Privileged"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-privileged"""
p_ReadonlyRootFilesystem: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "ReadonlyRootFilesystem"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-readonlyrootfilesystem"""
p_ResourceRequirements: typing.List[typing.Union['PropJobDefinitionResourceRequirement', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionResourceRequirement.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionResourceRequirement), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "ResourceRequirements"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-resourcerequirements"""
p_Secrets: typing.List[typing.Union['PropJobDefinitionSecret', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionSecret.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionSecret), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Secrets"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-secrets"""
p_Ulimits: typing.List[typing.Union['PropJobDefinitionUlimit', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionUlimit.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionUlimit), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Ulimits"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-ulimits"""
p_User: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "User"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-user"""
p_Vcpus: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Vcpus"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-vcpus"""
p_Volumes: typing.List[typing.Union['PropJobDefinitionVolumes', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionVolumes.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionVolumes), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Volumes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-volumes"""
@attr.s
class PropJobDefinitionNodeRangeProperty(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.NodeRangeProperty"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html
Property Document:
- ``rp_TargetNodes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html#cfn-batch-jobdefinition-noderangeproperty-targetnodes
- ``p_Container``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html#cfn-batch-jobdefinition-noderangeproperty-container
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.NodeRangeProperty"
rp_TargetNodes: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TargetNodes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html#cfn-batch-jobdefinition-noderangeproperty-targetnodes"""
p_Container: typing.Union['PropJobDefinitionContainerProperties', dict] = attr.ib(
default=None,
converter=PropJobDefinitionContainerProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionContainerProperties)),
metadata={AttrMeta.PROPERTY_NAME: "Container"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html#cfn-batch-jobdefinition-noderangeproperty-container"""
@attr.s
class PropJobDefinitionNodeProperties(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.NodeProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html
Property Document:
- ``rp_MainNode``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-mainnode
- ``rp_NodeRangeProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-noderangeproperties
- ``rp_NumNodes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-numnodes
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.NodeProperties"
rp_MainNode: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "MainNode"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-mainnode"""
rp_NodeRangeProperties: typing.List[typing.Union['PropJobDefinitionNodeRangeProperty', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionNodeRangeProperty.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionNodeRangeProperty), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "NodeRangeProperties"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-noderangeproperties"""
rp_NumNodes: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "NumNodes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-numnodes"""
#--- Resource declaration ---
@attr.s
class JobQueue(Resource):
"""
AWS Object Type = "AWS::Batch::JobQueue"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html
Property Document:
- ``rp_ComputeEnvironmentOrder``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-computeenvironmentorder
- ``rp_Priority``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-priority
- ``p_JobQueueName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-jobqueuename
- ``p_SchedulingPolicyArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-schedulingpolicyarn
- ``p_State``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-state
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-tags
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobQueue"
rp_ComputeEnvironmentOrder: typing.List[typing.Union['PropJobQueueComputeEnvironmentOrder', dict]] = attr.ib(
default=None,
converter=PropJobQueueComputeEnvironmentOrder.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobQueueComputeEnvironmentOrder), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "ComputeEnvironmentOrder"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-computeenvironmentorder"""
rp_Priority: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "Priority"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-priority"""
p_JobQueueName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "JobQueueName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-jobqueuename"""
p_SchedulingPolicyArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SchedulingPolicyArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-schedulingpolicyarn"""
p_State: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "State"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-state"""
p_Tags: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-tags"""
@attr.s
class JobDefinition(Resource):
"""
AWS Object Type = "AWS::Batch::JobDefinition"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html
Property Document:
- ``rp_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-type
- ``p_ContainerProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-containerproperties
- ``p_JobDefinitionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-jobdefinitionname
- ``p_NodeProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-nodeproperties
- ``p_Parameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-parameters
- ``p_PlatformCapabilities``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-platformcapabilities
- ``p_PropagateTags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-propagatetags
- ``p_RetryStrategy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-retrystrategy
- ``p_SchedulingPriority``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-schedulingpriority
- ``p_Timeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-timeout
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-tags
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition"
rp_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-type"""
p_ContainerProperties: typing.Union['PropJobDefinitionContainerProperties', dict] = attr.ib(
default=None,
converter=PropJobDefinitionContainerProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionContainerProperties)),
metadata={AttrMeta.PROPERTY_NAME: "ContainerProperties"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-containerproperties"""
p_JobDefinitionName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "JobDefinitionName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-jobdefinitionname"""
p_NodeProperties: typing.Union['PropJobDefinitionNodeProperties', dict] = attr.ib(
default=None,
converter=PropJobDefinitionNodeProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionNodeProperties)),
metadata={AttrMeta.PROPERTY_NAME: "NodeProperties"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-nodeproperties"""
p_Parameters: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Parameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-parameters"""
p_PlatformCapabilities: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "PlatformCapabilities"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-platformcapabilities"""
p_PropagateTags: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "PropagateTags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-propagatetags"""
p_RetryStrategy: typing.Union['PropJobDefinitionRetryStrategy', dict] = attr.ib(
default=None,
converter=PropJobDefinitionRetryStrategy.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionRetryStrategy)),
metadata={AttrMeta.PROPERTY_NAME: "RetryStrategy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-retrystrategy"""
p_SchedulingPriority: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "SchedulingPriority"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-schedulingpriority"""
p_Timeout: typing.Union['PropJobDefinitionTimeout', dict] = attr.ib(
default=None,
converter=PropJobDefinitionTimeout.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionTimeout)),
metadata={AttrMeta.PROPERTY_NAME: "Timeout"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-timeout"""
p_Tags: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-tags"""
@attr.s
class SchedulingPolicy(Resource):
"""
AWS Object Type = "AWS::Batch::SchedulingPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html
Property Document:
- ``p_FairsharePolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-name
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-tags
"""
AWS_OBJECT_TYPE = "AWS::Batch::SchedulingPolicy"
p_FairsharePolicy: typing.Union['PropSchedulingPolicyFairsharePolicy', dict] = attr.ib(
default=None,
converter=PropSchedulingPolicyFairsharePolicy.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropSchedulingPolicyFairsharePolicy)),
metadata={AttrMeta.PROPERTY_NAME: "FairsharePolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy"""
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-name"""
p_Tags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#aws-resource-batch-schedulingpolicy-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@attr.s
class ComputeEnvironment(Resource):
"""
AWS Object Type = "AWS::Batch::ComputeEnvironment"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html
Property Document:
- ``rp_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-type
- ``p_ComputeEnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-computeenvironmentname
- ``p_ComputeResources``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-computeresources
- ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-servicerole
- ``p_State``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-state
- ``p_UnmanagedvCpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-unmanagedvcpus
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-tags
"""
AWS_OBJECT_TYPE = "AWS::Batch::ComputeEnvironment"
rp_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-type"""
p_ComputeEnvironmentName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ComputeEnvironmentName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-computeenvironmentname"""
p_ComputeResources: typing.Union['PropComputeEnvironmentComputeResources', dict] = attr.ib(
default=None,
converter=PropComputeEnvironmentComputeResources.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropComputeEnvironmentComputeResources)),
metadata={AttrMeta.PROPERTY_NAME: "ComputeResources"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-computeresources"""
p_ServiceRole: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ServiceRole"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-servicerole"""
p_State: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "State"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-state"""
p_UnmanagedvCpus: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "UnmanagedvCpus"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-unmanagedvcpus"""
p_Tags: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-tags"""
| 1.953125 | 2 |
monitor/utils/mail.py | laozhudetui/wam | 227 | 12796105 | #!/usr/bin/env python
# coding: utf-8
# __buildin__ modules
import smtplib
from email.mime.text import MIMEText
from monitor.utils.settings import EMAIL_SERVER
from monitor.utils.settings import EMAIL_PORT
from monitor.utils.settings import EMAIL_USER
from monitor.utils.settings import EMAIL_PASS
from monitor.utils.settings import EMAIL_FROM_ADDR
from monitor.utils.email_list import EMAIL_LIST
def _sendmail(to_list, subject, content):
"""
params:
to_addr[list]:
subject[str]:
content[str]: plain content
"""
msg = MIMEText(content, 'plain', 'utf-8')
msg['Subject'] = subject
msg['From'] = EMAIL_FROM_ADDR
msg['To'] = ', '.join(to_list)
smtp = smtplib.SMTP_SSL()
smtp.set_debuglevel(0)
smtp.connect(EMAIL_SERVER, EMAIL_PORT)
smtp.login(EMAIL_USER, EMAIL_PASS)
smtp.sendmail(EMAIL_FROM_ADDR, to_list, msg.as_string())
smtp.quit()
def sendmail(subject, content):
"""
params:
subject[str]:
content[str]: plain content
"""
if EMAIL_LIST:
_sendmail(EMAIL_LIST, subject, content)
else:
raise ValueError('email list is empty')
| 2.25 | 2 |
py_kiwoom/kiwoom_evol.py | ijhan21/hackathon_kiwoom | 0 | 12796106 | <gh_stars>0
import os
import pickle
import sys
from PyQt5.QAxContainer import *
from PyQt5.QtCore import *
from config.errorCode import *
from PyQt5.QtTest import *
from config.kiwoomType import *
# from config.slack import *
import logging
from PyQt5.QtWidgets import *
STOP_LOSS_RATE = 0.03
STOP_PROFIT_RATE = 0.03
# class Ui_class():
# def __init__(self):
# self.app = QApplication(sys.argv)
# self.kiwoom = Kiwoom()
# ret = self.kiwoom.multi_test()
# # self.app.exec_()
logging.basicConfig(filename="kiwoom.log", level=logging.INFO)
class Kiwoom(QAxWidget):
def __init__(self):
super().__init__()
self.realType = RealType()
# self.slack = Slack() #슬랙 동작
#print("kiwoom() class start. ")
print("Kiwoom() class start.")
####### event loop를 실행하기 위한 변수모음
self.login_event_loop = QEventLoop() #로그인 요청용 이벤트루프
self.detail_account_info_event_loop = QEventLoop() # 예수금 요청용 이벤트루프
self.calculator_event_loop = QEventLoop()
self.get_not_concluded_account_event_loop = QEventLoop()
#########################################
####### 계좌 관련된 변수
self.account_stock_dict = {}
self.not_concluded_account = {}
self.deposit = 0 #예수금
self.use_money = 0 #실제 투자에 사용할 금액
self.use_money_percent = 0.5 #예수금에서 실제 사용할 비율
self.output_deposit = 0 #출력가능 금액
self.total_profit_loss_money = 0 #총평가손익금액
self.total_profit_loss_rate = 0.0 #총수익률(%)
########################################
######## 종목 정보 가져오기
self.portfolio_stock_dict = {}
self.jango_dict = {}
########################
##########################################
self.data = None
####### 요청 스크린 번호
self.screen_my_info = "2000" #계좌 관련한 스크린 번호
self.screen_calculation_stock = "4000" #계산용 스크린 번호
self.screen_real_stock = "5000" #종목별 할당할 스크린 번호
self.screen_meme_stock = "6000" #종목별 할당할 주문용스크린 번호
self.screen_start_stop_real = "1000" #장 시작/종료 실시간 스크린번호
########################################
######### 초기 셋팅 함수들 바로 실행
self.get_ocx_instance() #OCX 방식을 파이썬에 사용할 수 있게 변환해 주는 함수
self.event_slots() # 키움과 연결하기 위한 시그널 / 슬롯 모음
self.real_event_slot() # 실시간 이벤트 시그널 / 슬롯 연결
self.signal_login_commConnect() #로그인 요청 시그널 포함
self.get_account_info() #계좌번호 가져오기
self.detail_account_info() #예수금 요청 시그널 포함
self.detail_account_mystock() #계좌평가잔고내역 요청 시그널 포함
QTimer.singleShot(5000, self.get_not_concluded_account) #5초 뒤에 미체결 종목들 가져오기 실행
#########################################
# QTest.qWait(10000)
self.read_code()
self.screen_number_setting()
QTest.qWait(5000)
#실시간 수신 관련 함수
#장시작 종료 세팅
self.dynamicCall("SetRealReg(QString, QString, QString, QString)", self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'], "0")
def setRealReg(self, companys):
for code in companys:
screen_num = self.not_concluded_account[code]['스크린번호']
fids = self.realType.REALTYPE['주식체결']['체결시간']
self.dynamicCall("SetRealReg(QString, QString, QString, QString)", screen_num, code, fids, "1")
def get_ocx_instance(self):
self.setControl("KHOPENAPI.KHOpenAPICtrl.1") # 레지스트리에 저장된 api 모듈 불러오기
def event_slots(self):
self.OnEventConnect.connect(self.login_slot) # 로그인 관련 이벤트
self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션 요청 관련 이벤트
self.OnReceiveMsg.connect(self.msg_slot)
def real_event_slot(self):
self.OnReceiveRealData.connect(self.realdata_slot) # 실시간 이벤트 연결
self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결 관련한 이벤트
def signal_login_commConnect(self):
self.dynamicCall("CommConnect()") # 로그인 요청 시그널
self.login_event_loop.exec_() # 이벤트루프 실행
def login_slot(self, err_code):
logging.debug(errors(err_code)[1])
#로그인 처리가 완료됐으면 이벤트 루프를 종료한다.
self.login_event_loop.exit()
def get_account_info(self):
QTest.qWait(3600) #3.6초마다 딜레이를 준다.
account_list = self.dynamicCall("GetLoginInfo(QString)", "ACCNO") # 계좌번호 반환
account_num = account_list.split(';')[1]
self.account_num = account_num
logging.debug("계좌번호 : %s" % account_num)
def detail_account_info(self, sPrevNext="0"):
QTest.qWait(3600) #3.6초마다 딜레이를 준다.
self.dynamicCall("SetInputValue(QString, QString)", "계좌번호", self.account_num)
self.dynamicCall("SetInputValue(QString, QString)", "비밀번호", "0000")
self.dynamicCall("SetInputValue(QString, QString)", "비밀번호입력매체구분", "00")
self.dynamicCall("SetInputValue(QString, QString)", "조회구분", "1")
self.dynamicCall("CommRqData(QString, QString, int, QString)", "예수금상세현황요청", "opw00001", sPrevNext, self.screen_my_info)
self.detail_account_info_event_loop.exec_()
def detail_account_mystock(self, sPrevNext="0"):
QTest.qWait(3600) #3.6초마다 딜레이를 준다.
self.account_stock_dict = dict()
self.dynamicCall("SetInputValue(QString, QString)", "계좌번호", self.account_num)
self.dynamicCall("SetInputValue(QString, QString)", "비밀번호", "0000")
self.dynamicCall("SetInputValue(QString, QString)", "비밀번호입력매체구분", "00")
self.dynamicCall("SetInputValue(QString, QString)", "조회구분", "1")
self.dynamicCall("CommRqData(QString, QString, int, QString)", "계좌평가잔고내역요청", "opw00018", sPrevNext, self.screen_my_info)
self.detail_account_info_event_loop.exec_()
def get_not_concluded_account(self, sPrevNext="0"):
QTest.qWait(3600) #3.6초마다 딜레이를 준다.
self.dynamicCall("SetInputValue(QString, QString)", "계좌번호", self.account_num)
self.dynamicCall("SetInputValue(QString, QString)", "체결구분", "1")
self.dynamicCall("SetInputValue(QString, QString)", "매매구분", "0")
self.dynamicCall("CommRqData(QString, QString, int, QString)", "실시간미체결요청", "opt10075", sPrevNext, self.screen_my_info)
self.get_not_concluded_account_event_loop.exec_()
def trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName, sPrevNext):
# print("sRQName", sRQName)
if sRQName == "예수금상세현황요청":
deposit = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "예수금")
self.deposit = int(deposit)
use_money = float(self.deposit) * self.use_money_percent
self.use_money = int(use_money)
self.use_money = self.use_money / 4
output_deposit = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "출금가능금액")
self.output_deposit = int(output_deposit)
logging.debug("예수금 : %s" % self.output_deposit)
print("예수금 : %s" % self.output_deposit)
self.stop_screen_cancel(self.screen_my_info)
self.detail_account_info_event_loop.exit()
elif sRQName == "계좌평가잔고내역요청":
total_buy_money = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "총매입금액")
self.total_buy_money = int(total_buy_money)
total_profit_loss_money = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "총평가손익금액")
self.total_profit_loss_money = int(total_profit_loss_money)
total_profit_loss_rate = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "총수익률(%)")
self.total_profit_loss_rate = float(total_profit_loss_rate)
logging.debug("계좌평가잔고내역요청 싱글데이터 : %s - %s - %s" % (total_buy_money, total_profit_loss_money, total_profit_loss_rate))
rows = self.dynamicCall("GetRepeatCnt(QString, QString)", sTrCode, sRQName)
for i in range(rows):
code = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "종목번호") # 출력 : A039423 // 알파벳 A는 장내주식, J는 ELW종목, Q는 ETN종목
code = code.strip()[1:]
code_nm = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "종목명") # 출럭 : 한국기업평가
stock_quantity = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "보유수량") # 보유수량 : 000000000000010
buy_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "매입가") # 매입가 : 000000000054100
learn_rate = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "수익률(%)") # 수익률 : -000000001.94
current_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "현재가") # 현재가 : 000000003450
total_chegual_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "매입금액")
possible_quantity = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "매매가능수량")
logging.debug("종목코드: %s - 종목명: %s - 보유수량: %s - 매입가:%s - 수익률: %s - 현재가: %s" % (
code, code_nm, stock_quantity, buy_price, learn_rate, current_price))
if code in self.account_stock_dict: # dictionary 에 해당 종목이 있나 확인
pass
else:
self.account_stock_dict[code] = Jango(code)
code_nm = code_nm.strip()
stock_quantity = int(stock_quantity.strip())
buy_price = int(buy_price.strip())
learn_rate = float(learn_rate.strip())
current_price = int(current_price.strip())
total_chegual_price = int(total_chegual_price.strip())
possible_quantity = int(possible_quantity.strip())
tmp = self.account_stock_dict[code]
tmp.jango.update({"종목명": code_nm})
# tmp.jango.update({"보유수량": stock_quantity})
tmp.jango.update({"체결량": stock_quantity})
# tmp.jango.update({"매입가": buy_price})
tmp.jango.update({"체결가": buy_price})
# tmp.jango.update({"수익률(%)": learn_rate})
tmp.jango.update({"현재가": current_price})
# tmp.jango.update({"매입금액": total_chegual_price})
# tmp.jango.update({'매매가능수량' : possible_quantity})
tmp.update()
logging.debug("sPreNext : %s" % sPrevNext)
print("\n계좌에 가지고 있는 종목은 %s " % rows)
# for item in self.account_stock_dict.keys():
# print(self.account_stock_dict[item].jango)
if sPrevNext == "2":
self.detail_account_mystock(sPrevNext="2")
else:
self.detail_account_info_event_loop.exit()
elif sRQName == "실시간미체결요청":
rows = self.dynamicCall("GetRepeatCnt(QString, QString)", sTrCode, sRQName)
for i in range(rows):
code = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "종목코드")
code_nm = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "종목명")
order_no = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "주문번호")
order_status = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i,
"주문상태") # 접수,확인,체결
order_quantity = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i,
"주문수량")
order_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i,
"주문가격")
order_gubun = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i,
"주문구분") # -매도, +매수, -매도정정, +매수정정
not_quantity = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i,
"미체결수량")
ok_quantity = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i,
"체결량")
code = code.strip()
code_nm = code_nm.strip()
order_no = int(order_no.strip())
order_status = order_status.strip()
order_quantity = int(order_quantity.strip())
order_price = int(order_price.strip())
order_gubun = order_gubun.strip().lstrip('+').lstrip('-')
not_quantity = int(not_quantity.strip())
ok_quantity = int(ok_quantity.strip())
if code in self.not_concluded_account:
pass
else:
self.not_concluded_account[code] = Jango(code)
tmp = self.not_concluded_account[code]
tmp.jango.update({'종목코드': code})
tmp.jango.update({'종목명': code_nm})
tmp.jango.update({'주문번호': order_no})
tmp.jango.update({'주문상태': order_status})
tmp.jango.update({'주문수량': order_quantity})
tmp.jango.update({'주문가격': order_price})
tmp.jango.update({'주문구분': order_gubun})
tmp.jango.update({'미체결수량': not_quantity})
tmp.jango.update({'체결량': ok_quantity})
tmp.jango.update({'스크린번호': 1000})
tmp.update()
logging.debug("미체결 종목 : %s " % self.not_concluded_account[code])
print("미체결 종목 : %s " % self.not_concluded_account[code].jango)
self.get_not_concluded_account_event_loop.exit()
#######################################
elif sRQName == "3분봉조회":
cnt = self.dynamicCall("GetRepeatCnt(QString, QString)", sTrCode, sRQName)
# print(sTrCode)
# data = self.dynamicCall("GetCommDataEx(QString, QString)", sTrCode, sRQName)
# [[‘’, ‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’. ‘’], [‘’, ‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’, ‘’]. […]]
logging.debug("3분봉조회 %s" % cnt)
ret_data=list()
for i in range(cnt):
data = []
code = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "종목코드")
code = code.strip()
code_name = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, 0, "종목명")
code_name = code_name.strip()
current_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "현재가").strip() # 출력 : 000070
volume = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "거래량").strip() # 출력 : 000070
trading_value = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "거래대금") # 출력 : 000070
date = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "일자") # 출력 : 000070
start_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "시가").strip() # 출력 : 000070
high_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "고가").strip() # 출력 : 000070
low_price = self.dynamicCall("GetCommData(QString, QString, int, QString)", sTrCode, sRQName, i, "저가").strip() # 출력 : 000070
data=[int(current_price),int(volume), int(start_price), int(high_price), int(low_price)]
ret_data.append(data)
self.data = ret_data
self.calculator_event_loop.exit()
def multi_rq3(self, sCode, tick):
QTest.qWait(3600) #3.6초마다 딜레이를 준다.
trCode = "opt10080"
sRQName = "3분봉조회"
수정주가구분 = 1
self.dynamicCall("SetInputValue(QString, QString)", "종목코드", sCode)
self.dynamicCall("SetInputValue(QString, QString)", "틱범위", tick)
self.dynamicCall("SetInputValue(QString, QString)", "수정주가구분", 수정주가구분)
ret = self.dynamicCall("CommRqData(QString, QString, int, QString, QString, QString)",sRQName,trCode, "0", self.screen_meme_stock)
# ret = self.dynamicCall("GetCommDataEx(QString, QString)", trCode, "주식분봉차트")
self.calculator_event_loop.exec_()
return self.data
def stop_screen_cancel(self, sScrNo=None):
self.dynamicCall("DisconnectRealData(QString)", sScrNo) # 스크린번호 연결 끊기
def get_code_list_by_market(self, market_code):
'''
종목코드 리스트 받기
#0:장내, 10:코스닥
:param market_code: 시장코드 입력
:return:
'''
code_list = self.dynamicCall("GetCodeListByMarket(QString)", market_code)
code_list = code_list.split(';')[:-1]
return code_list
def read_code(self):
# if os.path.exists("files/condition_stock.txt"): # 해당 경로에 파일이 있는지 체크한다.
# f = open("files/condition_stock.txt", "r", encoding="utf8") # "r"을 인자로 던져주면 파일 내용을 읽어 오겠다는 뜻이다.
# lines = f.readlines() #파일에 있는 내용들이 모두 읽어와 진다.
# for line in lines: #줄바꿈된 내용들이 한줄 씩 읽어와진다.
# if line != "":
# ls = line.split("\t")
# stock_code = ls[0]
# stock_name = ls[1]
# stock_price = int(ls[2].split("\n")[0])
# stock_price = abs(stock_price)
# self.portfolio_stock_dict.update({stock_code:{"종목명":stock_name, "현재가":stock_price}})
# f.close()
files = os.listdir("./models/")
codes=list()
for f in files:
codes.append(f.replace(".pt",""))
for code in codes:
self.portfolio_stock_dict[code] = Jango(code)
return codes
def screen_number_setting(self):
screen_overwrite = []
#계좌평가잔고내역에 있는 종목들
for code in self.account_stock_dict.keys():
if code not in screen_overwrite:
screen_overwrite.append(code)
#미체결에 있는 종목들
for code in self.not_concluded_account.keys():
code = self.not_concluded_account[code]['종목코드']
if code not in screen_overwrite:
screen_overwrite.append(code)
#포트폴리로에 담겨있는 종목들
for code in self.portfolio_stock_dict.keys():
if code not in screen_overwrite:
screen_overwrite.append(code)
# 스크린번호 할당
cnt = 0
for code in screen_overwrite:
temp_screen = int(self.screen_real_stock)
meme_screen = int(self.screen_meme_stock)
if (cnt % 50) == 0:
temp_screen += 1
self.screen_real_stock = str(temp_screen)
if (cnt % 50) == 0:
meme_screen += 1
self.screen_meme_stock = str(meme_screen)
if code in self.portfolio_stock_dict.keys():
self.portfolio_stock_dict[code].jango.update({"스크린번호": str(self.screen_real_stock)})
self.portfolio_stock_dict[code].jango.update({"주문용스크린번호": str(self.screen_meme_stock)})
elif code not in self.portfolio_stock_dict.keys():
self.portfolio_stock_dict[code] = Jango(code)
self.portfolio_stock_dict[code].jango.update({"스크린번호": str(self.screen_real_stock)})
self.portfolio_stock_dict[code].jango.update({"주문용스크린번호": str(self.screen_meme_stock)})
cnt += 1
# 실시간 데이터 얻어오기
def realdata_slot(self, sCode, sRealType, sRealData):
if sRealType == "장시작시간":
fid = self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감)
value = self.dynamicCall("GetCommRealData(QString, int)", sCode, fid)
if value == '0':
logging.debug("장 시작 전")
elif value == '3':
logging.debug("장 시작")
elif value == "2":
logging.debug("장 종료, 동시호가로 넘어감")
elif value == "4":
logging.debug("3시30분 장 종료")
for code in self.not_concluded_account.keys():
self.dynamicCall("SetRealRemove(QString, QString)", self.not_concluded_account[code]['스크린번호'], code)
QTest.qWait(5000)
sys.exit()
elif sRealType == "주식체결":
a = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['체결시간']) # 출력 HHMMSS
b = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['현재가']) # 출력 : +(-)2520
b = abs(int(b))
c = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['전일대비']) # 출력 : +(-)2520
c = abs(int(c))
d = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['등락율']) # 출력 : +(-)12.98
d = float(d)
e = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력 : +(-)2520
e = abs(int(e))
f = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력 : +(-)2515
f = abs(int(f))
g = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['거래량']) # 출력 : +240124 매수일때, -2034 매도일 때
g = abs(int(g))
h = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['누적거래량']) # 출력 : 240124
h = abs(int(h))
i = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['고가']) # 출력 : +(-)2530
i = abs(int(i))
j = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['시가']) # 출력 : +(-)2530
j = abs(int(j))
k = self.dynamicCall("GetCommRealData(QString, int)", sCode, self.realType.REALTYPE[sRealType]['저가']) # 출력 : +(-)2530
k = abs(int(k))
if sCode not in self.not_concluded_account:
self.not_concluded_account[sCode]=Jango(sCode)
tmp_not_c = self.not_concluded_account[sCode]
tmp_not_c.jango.update({"현재가": b})
tmp_not_c.jango.update({"거래량": g})
# 현재 가지고 있는 대상인지 파악
if sCode in self.account_stock_dict.keys():
try:
# 스탑로스 구현
print(self.account_stock_dict[sCode].jango["종목명"],(self.account_stock_dict[sCode].jango['체결가']-k)/self.account_stock_dict[sCode].jango['체결가'])
if self.account_stock_dict[sCode].jango["체결량"]>0 and self.account_stock_dict[sCode].jango['체결가']*(1-STOP_LOSS_RATE)>k:
count = self.account_stock_dict[sCode].jango["체결량"]
while count >0:
print("스탑로스 가동",self.account_stock_dict[sCode].jango['체결가'], k)
print('스탑로스 기준가',self.account_stock_dict[sCode].jango['체결가']*(1-STOP_LOSS_RATE))
ret = self.send_order("신규매도",sCode=sCode,order_quantity=1,order_price=b,hoga_type="시장가")
count -= 1
self.account_stock_dict[sCode].jango["체결량"]=count
elif self.account_stock_dict[sCode].jango["체결량"]>0 and self.account_stock_dict[sCode].jango['체결가']*(1+STOP_PROFIT_RATE)<b: # 익절
count = self.account_stock_dict[sCode].jango["체결량"]
while count >0:
print("스탑프로핏 가동",self.account_stock_dict[sCode].jango['체결가'], k)
print('스탑프로핏 기준가',self.account_stock_dict[sCode].jango['체결가']*(1+STOP_LOSS_RATE))
ret = self.send_order("신규매도",sCode=sCode,order_quantity=1,order_price=b,hoga_type="지정가")
count -= 1
self.account_stock_dict[sCode].jango["체결량"]=count
except Exception as e:
print(e)
print("EXception 현재 가지고 있는 잔고 비교 정보",self.account_stock_dict[sCode].jango)
try:
#print("실시간 주식체결 정보 : ", self.not_concluded_account[sCode]["종목명"],a, b)
pass
except Exception as e:
print("실시간 주식체결 정보 : ", sCode,a, b)
def send_order(self,order_type, sCode, order_quantity, order_price, hoga_type, order_num=""):
if order_type == "신규매수":
type_dict = 1
elif order_type =="신규매도":
type_dict = 2
elif order_type =="매수취소":
type_dict = 3
elif order_type =="매도취소":
type_dict = 4
elif order_type =="매수정정":
type_dict = 5
elif order_type =="매도정정":
type_dict = 6
if hoga_type =="지정가":
hoga_dict = "00"
elif hoga_type =="시장가":
hoga_dict = "03"
order_success = self.dynamicCall(
"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)",
[order_type, self.screen_meme_stock, self.account_num, type_dict, sCode, order_quantity, order_price, hoga_dict, order_num]
)
if order_success == 0:
logging.debug("%s 전달 성공"%order_type)
print("%s 전달 성공"%order_type)
else:
logging.debug("%s 전달 실패"%order_type)
return order_success
# 실시간 체결 정보
def chejan_slot(self, sGubun, nItemCnt, sFidList):
if int(sGubun) == 0: #주문체결
account_num = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['계좌번호'])
sCode = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['종목코드'])[1:]
stock_name = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['종목명'])
stock_name = stock_name.strip()
origin_order_number = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['원주문번호']) # 출력 : defaluse : "000000"
order_number = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['주문번호']) # 출럭: 0115061 마지막 주문번호
order_status = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['주문상태']) # 출력: 접수, 확인, 체결
order_quan = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['주문수량']) # 출력 : 3
order_quan = int(order_quan)
order_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['주문가격']) # 출력: 21000
order_price = int(order_price)
not_chegual_quan = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['미체결수량']) # 출력: 15, default: 0
not_chegual_quan = int(not_chegual_quan)
order_gubun = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['주문구분']) # 출력: -매도, +매수
order_gubun = order_gubun.strip().lstrip('+').lstrip('-')
chegual_time_str = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력: '151028'
chegual_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['체결가']) # 출력: 2110 default : ''
if chegual_price == '':
chegual_price = 0
else:
chegual_price = int(chegual_price)
chegual_quantity = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['체결량']) # 출력: 5 default : ''
if chegual_quantity == '':
chegual_quantity = 0
else:
chegual_quantity = int(chegual_quantity)
current_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['현재가']) # 출력: -6000
current_price = abs(int(current_price))
first_sell_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력: -6010
first_sell_price = abs(int(first_sell_price))
first_buy_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력: -6000
first_buy_price = abs(int(first_buy_price))
######## 새로 들어온 주문이면 주문번호 할당
if sCode not in self.not_concluded_account.keys():
self.not_concluded_account[sCode]=Jango(sCode)
tmp = self.not_concluded_account[sCode]
tmp.jango.update({"종목코드": sCode})
tmp.jango.update({"주문번호": order_number})
tmp.jango.update({"종목명": stock_name})
tmp.jango.update({"주문상태": order_status})
tmp.jango.update({"주문수량": order_quan})
tmp.jango.update({"주문가격": order_price})
tmp.jango.update({"미체결수량": not_chegual_quan})
tmp.jango.update({"원주문번호": origin_order_number})
tmp.jango.update({"주문구분": order_gubun})
tmp.jango.update({"체결가": chegual_price})
tmp.jango.update({"체결량": chegual_quantity})
tmp.jango.update({"현재가": current_price})
tmp.update()
print("주문체결")
print(self.not_concluded_account[sCode].jango)
elif int(sGubun) == 1: #잔고
account_num = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['계좌번호'])
sCode = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['종목코드'])[1:]
stock_name = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['종목명'])
stock_name = stock_name.strip()
current_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['현재가'])
current_price = abs(int(current_price))
stock_quan = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['보유수량'])
stock_quan = int(stock_quan)
like_quan = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['주문가능수량'])
like_quan = int(like_quan)
buy_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['매입단가'])
buy_price = abs(int(buy_price))
total_buy_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['총매입가']) # 계좌에 있는 종목의 총매입가
total_buy_price = int(total_buy_price)
meme_gubun = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['매도매수구분'])
meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun]
first_sell_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['(최우선)매도호가'])
first_sell_price = abs(int(first_sell_price))
first_buy_price = self.dynamicCall("GetChejanData(int)", self.realType.REALTYPE['잔고']['(최우선)매수호가'])
first_buy_price = abs(int(first_buy_price))
if sCode not in self.jango_dict.keys():
self.jango_dict.update({sCode:{}})
self.jango_dict[sCode].update({"현재가": current_price})
self.jango_dict[sCode].update({"종목코드": sCode})
self.jango_dict[sCode].update({"종목명": stock_name})
self.jango_dict[sCode].update({"보유수량": stock_quan})
self.jango_dict[sCode].update({"주문가능수량": like_quan})
self.jango_dict[sCode].update({"매입단가": buy_price})
self.jango_dict[sCode].update({"총매입가": total_buy_price})
self.jango_dict[sCode].update({"매도매수구분": meme_gubun})
self.jango_dict[sCode].update({"(최우선)매도호가": first_sell_price})
self.jango_dict[sCode].update({"(최우선)매수호가": first_buy_price})
# print("잔고")
# print(self.jango_dict)
if stock_quan == 0:
del self.jango_dict[sCode]
#송수신 메세지 get
def msg_slot(self, sScrNo, sRQName, sTrCode, msg):
logging.debug("스크린: %s, 요청이름: %s, tr코드: %s --- %s" %(sScrNo, sRQName, sTrCode, msg))
# ui = Ui_class()
class Jango():
def __init__(self, code):
self.jango=dict()
self.jango["종목코드"]=code
self.jango["종목명"] = ""
self.jango["체결가"]=0
self.jango["현재가"]=0
self.jango["체결량"]=0 #보유수량
self.jango["주문번호"]=""
self.jango["원주문번호"]=""
self.jango["주문상태"]=""
self.jango["주문수량"]=0
self.jango["주문가격"]=0
self.jango["주문구분"]=""
self.jango["미체결수량"]=""
self.jango["스크린번호"]=""
self.jango["주문용스크린번호"]=""
self.jango["손익률"]=0.
# self.jango["평균단가"]=0
self.jango["보유금액"]=0
def update(self):
#손익률
if self.jango["체결가"] != 0:
self.jango["손익률"] = (self.jango["현재가"]-self.jango["체결가"])/self.jango["체결가"]
#보유금액
self.jango["보유금액"]=self.jango["체결가"]*self.jango["체결량"] #내용 확인해 보자. 기존 주식과 합산 계산 되는지 | 2.0625 | 2 |
ibsng/handler/session/__init__.py | ParspooyeshFanavar/pyibsng | 6 | 12796107 | """Session library."""
| 1.007813 | 1 |
src/runner.py | jayantik/AiCorExample | 0 | 12796108 | <filename>src/runner.py
import collections
import torch
# Train
# Validate
# On given arguments, data
def run(model, criterion, optimizer, dataset, is_training: bool, **metrics):
model.train(is_training)
dictionary = collections.defaultdict(int)
counter = 0
with torch.set_grad_enabled(is_training):
for X, y in dataset:
counter += 1
y_pred = model(X)
loss = criterion(y_pred, y)
for name, metric in metrics.items():
dictionary[name] += metric(y_pred, y)
if is_training:
loss.backward()
optimizer.step()
optimizer.zero_grad()
return {name: value / counter for name, value in dictionary.items()}
| 2.890625 | 3 |
pl_bolts/callbacks/ssl_online.py | Aayush-Jain01/lightning-bolts | 504 | 12796109 | <filename>pl_bolts/callbacks/ssl_online.py
from contextlib import contextmanager
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import torch
from pytorch_lightning import Callback, LightningModule, Trainer
from pytorch_lightning.utilities import rank_zero_warn
from torch import Tensor, nn
from torch.nn import functional as F
from torch.optim import Optimizer
from torchmetrics.functional import accuracy
from pl_bolts.models.self_supervised.evaluator import SSLEvaluator
class SSLOnlineEvaluator(Callback): # pragma: no cover
"""Attaches a MLP for fine-tuning using the standard self-supervised protocol.
Example::
# your datamodule must have 2 attributes
dm = DataModule()
dm.num_classes = ... # the num of classes in the datamodule
dm.name = ... # name of the datamodule (e.g. ImageNet, STL10, CIFAR10)
# your model must have 1 attribute
model = Model()
model.z_dim = ... # the representation dim
online_eval = SSLOnlineEvaluator(
z_dim=model.z_dim
)
"""
def __init__(
self,
z_dim: int,
drop_p: float = 0.2,
hidden_dim: Optional[int] = None,
num_classes: Optional[int] = None,
dataset: Optional[str] = None,
):
"""
Args:
z_dim: Representation dimension
drop_p: Dropout probability
hidden_dim: Hidden dimension for the fine-tune MLP
"""
super().__init__()
self.z_dim = z_dim
self.hidden_dim = hidden_dim
self.drop_p = drop_p
self.optimizer: Optional[Optimizer] = None
self.online_evaluator: Optional[SSLEvaluator] = None
self.num_classes: Optional[int] = None
self.dataset: Optional[str] = None
self.num_classes: Optional[int] = num_classes
self.dataset: Optional[str] = dataset
self._recovered_callback_state: Optional[Dict[str, Any]] = None
def setup(self, trainer: Trainer, pl_module: LightningModule, stage: Optional[str] = None) -> None:
if self.num_classes is None:
self.num_classes = trainer.datamodule.num_classes
if self.dataset is None:
self.dataset = trainer.datamodule.name
def on_pretrain_routine_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
# must move to device after setup, as during setup, pl_module is still on cpu
self.online_evaluator = SSLEvaluator(
n_input=self.z_dim,
n_classes=self.num_classes,
p=self.drop_p,
n_hidden=self.hidden_dim,
).to(pl_module.device)
# switch fo PL compatibility reasons
accel = (
trainer.accelerator_connector
if hasattr(trainer, "accelerator_connector")
else trainer._accelerator_connector
)
if accel.is_distributed:
if accel.use_ddp:
from torch.nn.parallel import DistributedDataParallel as DDP
self.online_evaluator = DDP(self.online_evaluator, device_ids=[pl_module.device])
elif accel.use_dp:
from torch.nn.parallel import DataParallel as DP
self.online_evaluator = DP(self.online_evaluator, device_ids=[pl_module.device])
else:
rank_zero_warn(
"Does not support this type of distributed accelerator. The online evaluator will not sync."
)
self.optimizer = torch.optim.Adam(self.online_evaluator.parameters(), lr=1e-4)
if self._recovered_callback_state is not None:
self.online_evaluator.load_state_dict(self._recovered_callback_state["state_dict"])
self.optimizer.load_state_dict(self._recovered_callback_state["optimizer_state"])
def to_device(self, batch: Sequence, device: Union[str, torch.device]) -> Tuple[Tensor, Tensor]:
# get the labeled batch
if self.dataset == "stl10":
labeled_batch = batch[1]
batch = labeled_batch
inputs, y = batch
# last input is for online eval
x = inputs[-1]
x = x.to(device)
y = y.to(device)
return x, y
def shared_step(
self,
pl_module: LightningModule,
batch: Sequence,
):
with torch.no_grad():
with set_training(pl_module, False):
x, y = self.to_device(batch, pl_module.device)
representations = pl_module(x).flatten(start_dim=1)
# forward pass
mlp_logits = self.online_evaluator(representations) # type: ignore[operator]
mlp_loss = F.cross_entropy(mlp_logits, y)
acc = accuracy(mlp_logits.softmax(-1), y)
return acc, mlp_loss
def on_train_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: Sequence,
batch: Sequence,
batch_idx: int,
dataloader_idx: int,
) -> None:
train_acc, mlp_loss = self.shared_step(pl_module, batch)
# update finetune weights
mlp_loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
pl_module.log("online_train_acc", train_acc, on_step=True, on_epoch=False)
pl_module.log("online_train_loss", mlp_loss, on_step=True, on_epoch=False)
def on_validation_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: Sequence,
batch: Sequence,
batch_idx: int,
dataloader_idx: int,
) -> None:
val_acc, mlp_loss = self.shared_step(pl_module, batch)
pl_module.log("online_val_acc", val_acc, on_step=False, on_epoch=True, sync_dist=True)
pl_module.log("online_val_loss", mlp_loss, on_step=False, on_epoch=True, sync_dist=True)
def on_save_checkpoint(self, trainer: Trainer, pl_module: LightningModule, checkpoint: Dict[str, Any]) -> dict:
return {"state_dict": self.online_evaluator.state_dict(), "optimizer_state": self.optimizer.state_dict()}
def on_load_checkpoint(self, trainer: Trainer, pl_module: LightningModule, callback_state: Dict[str, Any]) -> None:
self._recovered_callback_state = callback_state
@contextmanager
def set_training(module: nn.Module, mode: bool):
"""Context manager to set training mode.
When exit, recover the original training mode.
Args:
module: module to set training mode
mode: whether to set training mode (True) or evaluation mode (False).
"""
original_mode = module.training
try:
module.train(mode)
yield module
finally:
module.train(original_mode)
| 2.265625 | 2 |
math/stochastic_modeling/program.py | spideynolove/Other-repo | 0 | 12796110 | <reponame>spideynolove/Other-repo
import numpy as np
print("Hello stochastic_modeling!") | 1.273438 | 1 |
tests/regressions/python/530_augmented_assignment_indexed_lhs.py | frzfrsfra4/phylanx | 83 | 12796111 | # Copyright (c) 2018 <NAME>
# Copyright (c) 2018 <NAME>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from phylanx import Phylanx
import numpy as np
@Phylanx
def foo():
local_a = np.array((2, 1))
local_a[0] += 55
return local_a
assert (np.array((57, 1)) == foo()).any()
| 1.9375 | 2 |
test/test-cases/conftest.py | vkuma82/DASH | 0 | 12796112 | import importlib
import json
import os
import sys
from pprint import pprint as pp
import pytest
import utils as util
from ixload import IxLoadTestSettings as TestSettings
from ixload import IxLoadUtils as IxLoadUtils
from ixload import IxRestUtils as IxRestUtils
from ixnetwork_restpy import SessionAssistant
from ixnetwork_restpy.testplatform.testplatform import TestPlatform
targets_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "targets"))
sys.path.insert(0, targets_dir)
@pytest.fixture(scope="session")
def tbinfo(request):
"""Create and return testbed information"""
from credentials import CREDENTIALS as CR
from testbed import TESTBED as TB
TB["CR"] = CR
return TB
@pytest.fixture(name="smartnics", scope="session")
def fixture_smartnics(tbinfo):
test_type = tbinfo['stateless'][0]['dpu'][0]['type']
if test_type:
modname = test_type.lower() + "." + test_type.lower()
else:
raise Exception('Fail to load module %s' % modname)
try:
imod = importlib.import_module(modname)
cls = getattr(imod, test_type.title() + "Test")
return cls(**tbinfo)
except:
raise Exception('Fail to load module %s' % modname)
@pytest.fixture(scope="session")
def utils():
return util
@pytest.fixture
def create_ixload_session_url(tbinfo):
ixload_settings = {}
tb = tbinfo['stateful'][0]
tg = {
'chassis_list': tb['server'],
'tgen': tb['tgen'],
'vxlan': tb['vxlan'],
'dpu': tb
}
# Helper Functions
def create_test_settings():
# TEST CONFIG
test_settings = TestSettings.IxLoadTestSettings()
test_settings.gatewayServer = tbinfo['stateful'][0]['server'][0]['addr']
test_settings.apiVersion = "v0"
test_settings.ixLoadVersion = "9.20.0.279"
slot1 = tg['tgen'][0][1]
port1 = tg['tgen'][0][2]
slot2 = tg['tgen'][1][1]
port2 = tg['tgen'][1][2]
test_settings.portListPerCommunity = {
# format: { community name : [ port list ] }
"Traffic1@Network1": [(1, slot1, port1)],
"Traffic2@Network2": [(1, slot2, port2)]
}
chassisList = tg['tgen'][0][0]
test_settings.chassisList = [chassisList]
#test_settings.chassisList = ["10.36.79.165"]
return test_settings
def create_session(test_settings):
connection = IxRestUtils.getConnection(
test_settings.gatewayServer,
test_settings.gatewayPort,
httpRedirect=test_settings.httpRedirect,
version=test_settings.apiVersion
)
return connection
test_settings = create_test_settings()
connection = create_session(test_settings)
connection.setApiKey(test_settings.apiKey)
ixload_settings['connection'] = connection
#ixload_settings['session_url'] = session_url
ixload_settings['test_settings'] = test_settings
yield ixload_settings
def getTestClass(*args, **kwargs):
if test_type:
modname = test_type.lower() + "." + test_type.lower()
else:
raise Exception('Fail to load module %s' % modname)
try:
imod = importlib.import_module(modname)
cls = getattr(imod, test_type.title() + "Test")
return cls(*args, **kwargs)
except:
raise Exception('Fail to load module %s' % modname)
| 1.859375 | 2 |
Array/MaxProductOfTwoElements.py | haaris272k/Problem-Solving-Collection | 1 | 12796113 | <filename>Array/MaxProductOfTwoElements.py
"""Given the array of integers nums, you will choose two different indices i and j of that array. Return the maximum value of (nums[i]-1)*(nums[j]-1).
Example 1:
Input: nums = [3,4,5,2]
Output: 12
Explanation: If you choose the indices i=1 and j=2 (indexed from 0), you will get the maximum value, that is, (nums[1]-1)*(nums[2]-1) = (4-1)*(5-1) = 3*4 = 12. """
nums = [3, 4, 5, 2]
hashmap = {}
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
formula = (nums[i] - 1) * (nums[j] - 1)
hashmap.update({formula: [i, j]})
max_val = max(list(hashmap.keys()))
print(max_val)
| 4.25 | 4 |
tests/test_attributes.py | uilianries/bintray-python | 4 | 12796114 | <filename>tests/test_attributes.py
import pytest
from bintray.bintray import Bintray
@pytest.fixture()
def create_attributes():
bintray = Bintray()
attributes = [{"name": "att1", "values": ["val1"], "type": "string"}]
return bintray.set_attributes("uilianries", "generic", "statistics", "test", attributes)
@pytest.fixture()
def create_file_attributes():
bintray = Bintray()
attributes = [{"name": "att1", "values": ["val1"], "type": "string"}]
response = bintray.set_file_attributes("uilianries", "generic", "packages.json", attributes)
return response
def test_get_attributes(create_attributes):
bintray = Bintray()
response = bintray.get_attributes("uilianries", "generic", "statistics", "test")
assert [{'name': 'att1', 'type': 'string', 'values': ['val1']},
{'error': False, 'statusCode': 200}] == response
response = bintray.get_attributes("uilianries", "generic", "statistics", "test", ["att1"])
assert [{'name': 'att1', 'type': 'string', 'values': ['val1']},
{'error': False, 'statusCode': 200}] == response
def test_set_attributes(create_attributes):
assert [{'name': 'att1', 'type': 'string', 'values': ['val1']},
{'error': False, 'statusCode': 200}] == create_attributes
def test_update_attributes(create_attributes):
bintray = Bintray()
attributes = [{"name": "att1", "values": ["val2"], "type": "string"}]
response = bintray.update_attributes("uilianries", "generic", "statistics", "test", attributes)
assert [{'name': 'att1', 'type': 'string', 'values': ['val2']},
{'error': False, 'statusCode': 200}] == response
def test_delete_attributes(create_attributes):
bintray = Bintray()
attributes = ["att1"]
response = bintray.delete_attributes("uilianries", "generic", "statistics", "test", attributes)
assert {'error': False, 'message': 'success', 'statusCode': 200} == response
def test_search_attributes(create_attributes):
bintray = Bintray()
attributes = [{'att1': ["val1", "val2"]}]
response = bintray.search_attributes("uilianries", "generic", "statistics", attributes)
assert {'error': False, 'statusCode': 200} in response
def test_get_files_attributes(create_file_attributes):
assert [{'name': 'att1', 'type': 'STRING', 'values': ['val1']},
{'error': False, 'statusCode': 200}] == create_file_attributes
def test_set_files_attributes():
bintray = Bintray()
attributes = [{'name': 'att1', 'values': ['val2'], 'type': "string"}]
response = bintray.set_file_attributes("uilianries", "generic", "packages.json", attributes)
assert [{'name': 'att1', 'type': 'STRING', 'values': ['val2']},
{'error': False, 'statusCode': 200}] == response
def test_update_files_attributes():
bintray = Bintray()
attributes = [{"name": "att1", "values": ["val3"], "type": "string"}]
response = bintray.update_file_attributes("uilianries", "generic", "packages.json", attributes)
assert [{'name': 'att1', 'type': 'STRING', 'values': ['val3']},
{'error': False, 'statusCode': 200}] == response
def test_delete_file_attributes(create_file_attributes):
bintray = Bintray()
attributes = ["att1"]
response = bintray.delete_file_attributes("uilianries", "generic", "packages.json", attributes)
assert {'error': False,
'message': 'Attributes were deleted successfully from the following file path: '
'packages.json',
'statusCode': 200} == response
def test_search_file_attributes(create_file_attributes):
bintray = Bintray()
attributes = [{'att1': ["val1"]}]
response = bintray.search_file_attributes("uilianries", "generic", attributes)
assert "packages.json" == response[0]["name"]
| 2.125 | 2 |
floodsystem/plot.py | vuquach99/1a-flood-warning-system | 0 | 12796115 | import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import datetime
from dateutil.tz import tzutc
def plot_water_levels(station, dates, levels):
"""Task 2E: Plots water level against time"""
#Assign variables
range_high = [station.typical_range[1]]*len(dates)
range_low = [station.typical_range[0]]*len(dates)
# Plot
plt.plot(dates, levels, label="Water Level")
plt.plot(dates, range_high, label="Typical High")
plt.plot(dates, range_low, label="Typical Low")
# Add axis labels, add legend, rotate date labels and add plot title
plt.xlabel('Date')
plt.ylabel('Water Level (m)')
plt.legend()
plt.xticks(rotation=45)
plt.title(station.name)
# Display plot
plt.tight_layout() # This makes sure plot does not cut off date labels
return plt.show()
def plot_water_level_with_fit(station, dates, levels, p):
"""Task 2F: Plots the water level data and the best-fit polynomial"""
# Convert dates to floats
dates_float = matplotlib.dates.date2num(dates)
# Create a shifted time list
dates_shifted = []
for i in range(len(dates_float)):
dates_shifted.append(dates_float[i] - dates_float[0])
# Find coefficients of best-fit polynomial f(x) of degree p
p_coeff = np.polyfit(dates_shifted, levels, p)
# Convert coefficient into a polynomial that can be evaluated,
# e.g. poly(0.3)
poly = np.poly1d(p_coeff)
# Plot original data points
plt.plot(dates_shifted, levels, '.', label='Data Points')
# Plot polynomial fit and typical range low/high at 30 points along interval
# (note that polynomial is evaluated using the date shift)
x = np.linspace(dates_shifted[0], dates_shifted[-1], 30)
range_high = [station.typical_range[1]]*len(x)
range_low = [station.typical_range[0]]*len(x)
plt.plot(x, poly(x - x[0]), label="Polynomial Fit")
plt.plot(x, range_high, label="Typical High")
plt.plot(x, range_low, label="Typical Low")
# Add axis labels, add legend, rotate date labels and add plot title
plt.xlabel('Dates from {}'.format(dates[-1]))
plt.ylabel('Water Level (m)')
plt.legend()
plt.xticks(rotation=45)
plt.title(station.name)
# Display plot
plt.tight_layout() # This makes sure plot does not cut off date labels
return plt.show() | 3.8125 | 4 |
sp_pipe.py | icanswim/seneca | 0 | 12796116 | <gh_stars>0
# Author: <NAME>
from __future__ import unicode_literals
import spacy
import numpy as np
class SpPipe():
def __init__(self):
self.nlp = spacy.load('en_core_web_md', disable=['ner','parser','tagger','textcat'])
def __call__(self, texts, labels, steps=10):
print 'tokenizing with spacy...'
docs = [self.nlp(unicode(text)) for text in texts]
X, y = self._get_features(docs, labels, steps)
return X, y
def _get_features(self, docs, labels, steps):
X = np.zeros((len(labels), steps), dtype='int32')
for n, doc in enumerate(docs):
m = 0
for token in doc:
vector_id = token.vocab.vectors.find(key=token.orth)
if vector_id >= 0:
X[n, m] = vector_id
else:
X[n, m] = 0
m += 1
if m >= steps:
break
return X, labels
def get_embedding(self):
return self.nlp.vocab.vectors.data
| 2.828125 | 3 |
app/api_tools.py | AndreyAD1/forum | 0 | 12796117 | from app import database
def get_single_json_entity(entity_query):
query_result_proxy = database.session.execute(entity_query)
database.session.commit()
row_proxies = [r for r in query_result_proxy]
if len(row_proxies) == 1:
json_entity = {k: v for k, v in row_proxies[0].items()}
else:
json_entity = {}
return json_entity
| 2.40625 | 2 |
test.py | rykumar13/news-scrapper-backend | 0 | 12796118 | <filename>test.py
import requests
def main():
# url = "http://127.0.0.1:5000/"
url = "http://localhost:8080/"
raw_response = requests.get(url=url, auth=('api_username', 'api_password'))
if raw_response.status_code == 200:
result = raw_response.json()
print(result)
else:
print(str(raw_response.status_code) + " - " + raw_response.text)
if __name__ == "__main__":
main()
| 2.984375 | 3 |
test/formats/test_mrea.py | randovania/retro-data-structures | 0 | 12796119 | <filename>test/formats/test_mrea.py
from pathlib import Path
import pytest
from retro_data_structures.base_resource import AssetId
from retro_data_structures.formats import Mlvl
from retro_data_structures.formats.mrea import MREA, Mrea
from retro_data_structures.game_check import Game
from test import test_lib
_mrea_path_p1 = "Resources/Worlds/EndCinema/!EndCinema_Master/01_endcinema.MREA"
_mrea_path_p2 = "Resources/Worlds/SandWorld/!SandWorld_Master/00_pickup_sand_d_dark.MREA"
@pytest.fixture(name="p1_mrea_path")
def _p1_mrea_path(prime1_pwe_project) -> Path:
return prime1_pwe_project.joinpath(_mrea_path_p1)
@pytest.fixture(name="p2_mrea_path")
def _p2_mrea_path(prime2_pwe_project) -> Path:
return prime2_pwe_project.joinpath(_mrea_path_p2)
def test_compare_p1(p1_mrea_path):
# Known difference: some Prime 1 script layers have sizes that
# are not multiples of 32; building always pads to 32
test_lib.parse_and_build_compare(MREA, Game.PRIME, p1_mrea_path)
def test_compare_p1_parsed(p1_mrea_path):
test_lib.parse_and_build_compare_parsed(MREA, Game.PRIME, p1_mrea_path)
def test_compare_p2(p2_mrea_path):
test_lib.parse_and_build_compare_parsed(MREA, Game.ECHOES, p2_mrea_path)
def test_compare_all_p2(prime2_asset_manager, mrea_asset_id: AssetId):
resource, decoded, encoded = test_lib.parse_and_build_compare_from_manager(
prime2_asset_manager,
mrea_asset_id,
Mrea,
)
assert isinstance(decoded, Mrea)
def test_add_instance(prime2_asset_manager):
from retro_data_structures.properties.echoes.objects.SpecialFunction import SpecialFunction
from retro_data_structures.enums import echoes
mlvl = prime2_asset_manager.get_parsed_asset(0x42b935e4, type_hint=Mlvl)
area = mlvl.get_area(0x5DFA984F)
area.get_layer("Default").add_instance_with(SpecialFunction(
function=echoes.Function.Darkworld,
))
assert area.mrea.build() is not None
| 2.234375 | 2 |
build/tools/build_resource_to_bytecode.py | openharmony-gitee-mirror/ace_ace_engine | 0 | 12796120 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
def resource_file_to_bytecode(input_dir, input_file, output_path):
with open(os.path.join(input_dir, input_file), 'rb')\
as resource_file_object:
with open(output_path, 'a') as cpp_file_object:
length = 0;
all_the_content = resource_file_object.read();
template0 = "#include <stdint.h>\n";
template1 = "const uint8_t _binary_$1_start[$2] = {$3};\n";
template2 = \
"const uint8_t* _binary_$1_end = _binary_$1_start + $2;";
formats = ","
seq = []
for content in all_the_content:
seq.append(str(hex(content)))
length = length + 1
byte_code = formats.join(seq);
input_file = input_file.replace(".", "_")
template1 = template1.replace("$1", str(input_file)) \
.replace("$2", str(length)) \
.replace("$3", str(byte_code))
template2 = template2.replace("$1", str(input_file)) \
.replace("$2", str(length))
cpp_file_object.seek(0)
cpp_file_object.truncate();
cpp_file_object.write(template0 + template1 + template2);
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--objcopy', type=str, required=False)
parser.add_argument('--input', type=str, required=True)
parser.add_argument('--output', type=str, required=True)
parser.add_argument('--arch', type=str, required=False)
args = parser.parse_args()
input_dir, input_file = os.path.split(args.input)
output_path = os.path.abspath(args.output)
resource_file_to_bytecode(input_dir, input_file, output_path)
if __name__ == '__main__':
sys.exit(main())
| 2.484375 | 2 |
config.py | nowindxdw/flask_base | 0 | 12796121 | <filename>config.py
#!/usr/bin/env python
# encoding: utf-8
import os
basedir = os.path.abspath(os.path.dirname(__file__))
BASEDIR = basedir
DEBUG = False
SECRET_KEY = 'This is a secret key forexample'
# not end with else throw AttributeError: 'tuple' object has no attribute 'drivername'
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:[email protected]/test?charset=utf8" # base管理
SQLALCHEMY_BINDS = {
'base': "mysql+pymysql://root:[email protected]/test?charset=utf8", # web数据库
'website': "mysql+pymysql://root:[email protected]/website?charset=utf8", # web数据库
'otherdb': "mysql+pymysql://root:[email protected]/otherdb?charset=utf8", # other管理
}
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_AUTOFLUSH = False
SQLALCHEMY_ECHO = True
REDIS_URL = 'redis://:@127.0.0.1:6379'
| 2.0625 | 2 |
examples/miniapps/engines_cars/example/cars.py | kinow/python-dependency-injector | 0 | 12796122 | """Dependency injection example, cars module."""
class Car:
"""Example car."""
def __init__(self, engine):
"""Initialize instance."""
self._engine = engine # Engine is injected
| 2.8125 | 3 |
GCC-paddle/gcc/models/emb/from_numpy.py | S-HuaBomb/Contrib | 243 | 12796123 | <reponame>S-HuaBomb/Contrib
import random
import networkx as nx
import numpy as np
class Zero(object):
def __init__(self, hidden_size, **kwargs):
self.hidden_size = hidden_size
def train(self, G):
return np.zeros((G.number_of_nodes(), self.hidden_size))
class FromNumpy(object):
def __init__(self, hidden_size, emb_path, **kwargs):
super(FromNumpy, self).__init__()
self.hidden_size = hidden_size
self.emb = np.load(emb_path)
def train(self, G):
id2node = dict([(vid, node) for vid, node in enumerate(G.nodes())])
embeddings = np.asarray([self.emb[id2node[i]] for i in range(len(id2node))])
assert G.number_of_nodes() == embeddings.shape[0]
return embeddings
class FromNumpyGraph(FromNumpy):
def train(self, G):
assert G is None
return self.emb
class FromNumpyAlign(object):
def __init__(self, hidden_size, emb_path_1, emb_path_2, **kwargs):
self.hidden_size = hidden_size
self.emb_1 = np.load(emb_path_1)
self.emb_2 = np.load(emb_path_2)
self.t1, self.t2 = False, False
def train(self, G):
if G.number_of_nodes() == self.emb_1.shape[0] and not self.t1:
emb = self.emb_1
self.t1 = True
elif G.number_of_nodes() == self.emb_2.shape[0] and not self.t2:
emb = self.emb_2
self.t2 = True
else:
raise NotImplementedError
id2node = dict([(vid, node) for vid, node in enumerate(G.nodes())])
embeddings = np.asarray([emb[id2node[i]] for i in range(len(id2node))])
return embeddings
| 2.3125 | 2 |
tests/__init__.py | robinsax/canvas | 4 | 12796124 | # coding: utf-8
'''
Unit tests on the canvas framework.
'''
from . import exceptions, utils, json, views, controller_service, assets, \
model, service
| 1.132813 | 1 |
files/042 - coded triangle numbers.py | farukara/Project-Euler-problems | 0 | 12796125 | <filename>files/042 - coded triangle numbers.py<gh_stars>0
#!python3
# coding: utf-8
# The nth term of the sequence of triangle numbers is given by, tn = ½n(n+1); so the first ten triangle numbers are:
# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
# By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a triangle number then we shall call the word a triangle word.
# Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?
#https://projecteuler.net/problem=42
from time import perf_counter
def timeit(func):
def wrapper(*args, **kwargs):
start = perf_counter()
result = func(*args, **kwargs)
finish = perf_counter()
print(f"{func.__name__} function took {finish - start:.3f} seconds")
return result
return wrapper
@timeit
def main():
triangle_num_values = []
for i in range(1, 27):
triangle_num_values.append(int(i*(i+1)*0.5))
counter = 0
with open("p042_words.txt", "r") as f:
words = f.readlines()[0][1:-1].split('","')
for word in words:
sum_of_letters = 0
for letter in word:
sum_of_letters += ord(letter)-64
if sum_of_letters in triangle_num_values:
counter += 1
print("\n", counter)
if __name__ == "__main__":
main()
| 3.828125 | 4 |
nucleus/__init__.py | ciex/souma | 5 | 12796126 | <filename>nucleus/__init__.py
import logging
import blinker
from web_ui import db, app
from sqlalchemy.orm import sessionmaker
ERROR = {
"MISSING_MESSAGE_TYPE": (1, "No message type found."),
"MISSING_PAYLOAD": (2, "No data payload found."),
"OBJECT_NOT_FOUND": lambda name: (3, "Object does not exist: ".format(name)),
"MISSING_KEY": lambda name: (4, "Missing data for this request: {}".format(name)),
"INVALID_SIGNATURE": (5, "Invalid signature."),
"INVALID_SESSION": (6, "Session invalid. Please re-authenticate."),
"DUPLICATE_ID": lambda id: (7, "Duplicate ID: {}".format(id)),
"SOUMA_NOT_FOUND": lambda id: (8, "Souma not found: {}".format(id)),
"MISSING_PARAMETER": lambda name: (9, "Missing HTTP parameter: {}".format(name)),
}
# Setup Blinker namespace
notification_signals = blinker.Namespace()
# Setup logger namespace
logger = logging.getLogger('nucleus')
# Source formatting helper
source_format = lambda address: None if address is None else \
"{host}:{port}".format(host=address[0], port=address[1])
# Possible states of stars
STAR_STATES = {
-2: (-2, "deleted"),
-1: (-1, "unavailable"),
0: (0, "published"),
1: (1, "draft"),
2: (2, "private"),
3: (3, "updating")
}
# Possible states of planets
PLANET_STATES = {
-1: (-1, "unavailable"),
0: (0, "published"),
1: (1, "private"),
2: (2, "updating")
}
# Possible states of 1ups
ONEUP_STATES = {
-1: "disabled",
0: "active",
1: "unknown author"
}
CHANGE_TYPES = ("insert", "update", "delete")
class InvalidSignatureError(Exception):
"""Throw this error when a signature fails authenticity checks"""
pass
class PersonaNotFoundError(Exception):
"""Throw this error when the Persona profile specified for an action is not available"""
pass
class UnauthorizedError(Exception):
"""Throw this error when the active Persona is not authorized for an action"""
pass
class VesicleStateError(Exception):
"""Throw this error when a Vesicle's state does not allow for an action"""
pass
# Import at bottom to avoid circular imports
# Import all models to allow querying db binds
from nucleus.models import *
from vesicle import Vesicle
# _Session is a custom sessionmaker that returns a session prefconfigured with the
# model bindings from Nucleus
_Session = sessionmaker(bind=db.get_engine(app))
def create_session():
"""Return a session to be used for database connections
Returns:
Session: SQLAlchemy session object
"""
# Produces integrity errors!
# return _Session()
# db.session is managed by Flask-SQLAlchemy and bound to a request
return db.session
| 2.28125 | 2 |
2018/day2/part2.py | MartinPetkov/AdventOfCode | 0 | 12796127 | <reponame>MartinPetkov/AdventOfCode
#!/usr/bin/env python
import argparse
def part2(input):
# Track all strings with each letter missing.
# If seen, exit early.
seen = {}
for box_id in input:
for i in range(len(box_id) - 1):
if i not in seen:
seen[i] = set()
s = box_id[:i] + box_id[i+1:]
if s in seen[i]:
print(s)
seen[i].add(s)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-f',
default='input.txt',
dest='input_file',
)
args = parser.parse_known_args()[0]
input_file = args.input_file
with open(input_file) as f:
input = [l.strip() for l in f.readlines()]
print(part2(input))
if __name__ == '__main__':
main()
| 3.5 | 4 |
analysis.py | cangokceaslanx/2D-Scattering | 1 | 12796128 | <reponame>cangokceaslanx/2D-Scattering
from scipy.stats import linregress
import matplotlib.pyplot as mt
import math
import numpy as np
import statistics as st
datax = [20,40,60,80,100,120,140,160,180,200,220,240,260,280,300,320,340]
data = [43,23,25,32,46,55,55,72,73,92,73,56,37,54,33,26,16]
datax_symmetric = [20,40,60,80,100,120,140,160,180]
sin_symmetric = []
errorbars = []
symmetric = []
errorbars_symmetric = []
for i in range(len(data)):
errorbars.append(math.sqrt(data[i]))
for k in range(8):
symmetric.append(data[k]+data[16-k])
errorbars_symmetric.append(math.sqrt((data[k])+(data[16-k])))
symmetric.append(data[8]*2)
errorbars_symmetric.append(math.sqrt((data[8])+(data[8])))
for l in range(len(symmetric)):
sin_symmetric.append(np.sin(math.radians(datax_symmetric[l] / 2)))
sine = np.array(sin_symmetric)
error = np.array(errorbars_symmetric)
dNarr = np.array(symmetric)
slope,intercept,rvalue,pvalue,stderr=linregress(sine,dNarr)
fit=np.polyfit(sine,dNarr,1)
bfl=np.poly1d(fit)
mt.errorbar(sine,dNarr,yerr=errorbars_symmetric,linestyle="None",color="red") #this is the error bar on linefit of sinus
mt.legend("Error bars")
mt.scatter(sine,dNarr, color="red") #this is the points for symmetric symmetric summation
mt.legend("Sinus function")
mt.plot(sine,bfl(sine),color="green") #this is the fitting symmetric summation plot
mt.show()
chi_squared = np.sum((np.polyval(fit, sine) - dNarr) ** 2) #Chi of line fit
dNi = sum(symmetric) #Total of Symmetric Summation
stDni = st.stdev(symmetric) #Standard Deviation of Symmetric Summation
mt.title("Theta vs dN")
mt.xlabel("Theta")
mt.ylabel("dN")
#mt.bar(datax,data,width=19,align='center',yerr=errorbars,color="gray") #this is the plot of real data we took from the paper
#mt.bar(datax_symmetric,symmetric,width=19,align='center',yerr=errorbars_symmetric,color="green") #this is the data for symmetric summation
| 2.5 | 2 |
Python-LoopControlBreak.py | H2oPtic/Codecademy-Education | 0 | 12796129 | <gh_stars>0
dog_breeds_available_for_adoption = ["french_bulldog", "dalmatian", "shihtzu", "poodle", "collie"]
dog_breed_I_want = "collie"
for dog_breed in dog_breeds_available_for_adoption:
print(dog_breed)
if dog_breed == dog_breed_I_want:
print("They have the dog I want!")
break
| 3.5625 | 4 |
calibration/python/bolopropertiesutils.py | tskisner/spt3g_software | 6 | 12796130 | <reponame>tskisner/spt3g_software<gh_stars>1-10
from spt3g.calibration import BolometerProperties
from spt3g import core
import math
__all__ = ['SplitByProperty', 'SplitByBand', 'SplitTimestreamsByBand',
'SplitByWafer', 'SplitByPixelType']
@core.indexmod
class SplitByProperty(object):
'''
Take an input G3FrameObject-derivative Map keyed by bolometer name and
split it into several based on the property of the detectors as given by
the BolometerProperties key.
Return the same type of maps as the one it was handed, e.g.
G3TimestreamMap, G3MapInt, etc.
'''
def __init__(self, input='CalTimestreams', property=None, property_list=None,
output_root=None, bpm='BolometerProperties'):
'''
Split the input map given by input into several output
maps named output_root + key (e.g. CalTimestreams + str(property)) with
the default options).
Arguments
---------
input : str
Key name of the input map to split.
property : str
Attribute name to extract from the BolometerProperties object.
Required.
property_list : list of properties
Properties to include in the output keys. Entries that are not strings
will be converted to strings using the `SplitByProperty.converter` method.
If property_list is not None, use only the names in the
list (possibly writing empty timestream maps to the frame). Otherwise,
creates maps for every that exists in the input.
output_root : str
Prefix for the output keys.
If None (default), use `input` as the output root.
bpm : str
The key name of the BolometerPropertiesMap from which to extract
the requested `property` for splitting the input map.
'''
if property is None:
core.log_fatal("Property is a required argument")
self.bpmattr = property
self.input = input
self.output_root = output_root if output_root is not None else input
if property_list is not None:
self.props = [self.converter(x) if not isinstance(x, str) else x
for x in property_list]
else:
self.props = None
self.bpmkey = bpm
self.bpm = None
@staticmethod
def converter(prop):
"""
Function for converting the property to its corresponding string name.
Returns a string representation of the input argument, or None if the
argument is invalid.
Overload this function in subclasses of SplitByProperty to change
how attributes are parsed into their string representations.
"""
if prop is None:
return None
return str(prop)
def __call__(self, frame):
if self.bpmkey in frame:
self.bpm = frame[self.bpmkey]
if self.input not in frame:
return
inmap = frame[self.input]
out = {}
if self.props is not None:
for prop in self.props:
out[prop] = type(inmap)()
for b in inmap.keys():
try:
prop = self.converter(getattr(self.bpm[b], self.bpmattr))
except KeyError:
continue
if prop not in out:
if self.props is None and prop is not None:
out[prop] = type(inmap)()
else:
continue
out[prop][b] = inmap[b]
for prop in out.keys():
frame['%s%s' % (self.output_root, prop)] = out[prop]
@core.indexmod
class SplitByBand(SplitByProperty):
'''
Take an input G3FrameObject-derivative Map keyed by bolometer name and
split it into several based on the bands of the detectors as given by
the BolometerProperties key.
Return the same type of maps as the one it was handed, e.g.
G3TimestreamMap, G3MapInt, etc.
'''
def __init__(self, input='CalTimestreams', output_root=None,
bands=None, bpm='BolometerProperties'):
'''
Split the input map given by input into several output
maps named output_root + band + GHz (e.g. CalTimestreams150GHz with
the default options). If bands is not None, use only the bands in the
list (possibly writing empty timestream maps to the frame). Otherwise,
creates maps for every band that exists in the input. Setting bpm
to a non-default value causes this to get its band mapping from an
alternative data source.
'''
super(SplitByBand, self).__init__(
input=input, output_root=output_root, property_list=bands,
bpm=bpm, property='band')
@staticmethod
def converter(band):
if isinstance(band, str):
return band
if math.isnan(band) or math.isinf(band):
return None
if band < 0:
return None
return '%dGHz' % int(band/core.G3Units.GHz)
@core.indexmod
class SplitTimestreamsByBand(SplitByBand):
def __init__(self, input='CalTimestreams', output_root=None,
bands=None, bpm='BolometerProperties'):
core.log_warn("SplitTimestreamsByBand is deprecated, use SplitByBand instead")
super(SplitTimestreamsByBand, self).__init__(
input=input, output_root=output_root, bands=bands, bpm=bpm)
@core.indexmod
class SplitByWafer(SplitByProperty):
'''
Take an input G3FrameObject-derivative Map keyed by bolometer name and
split it into several based on the wafers of the detectors as given by
the BolometerProperties key.
Return the same type of maps as the one it was handed, e.g.
G3TimestreamMap, G3MapInt, etc.
'''
def __init__(self, input='CalTimestreams', output_root=None,
wafers=None, bpm='BolometerProperties'):
'''
Split the input map given by input into several output
maps named output_root + wafer (e.g. CalTimestreamsW172 with
the default options). If wafers is not None, use only the wafers in the
list (possibly writing empty timestream maps to the frame). Otherwise,
creates maps for every wafer that exists in the input. Setting bpm
to a non-default value causes this to get its wafer mapping from an
alternative data source.
'''
super(SplitByWafer, self).__init__(
input=input, output_root=output_root, property_list=wafers,
bpm=bpm, property='wafer_id')
@staticmethod
def converter(wafer):
if wafer is None:
return None
return str(wafer).capitalize()
@core.indexmod
class SplitByPixelType(SplitByProperty):
'''
Take an input G3FrameObject-derivative Map keyed by bolometer name and
split it into several based on the pixel types of the detectors as given by
the BolometerProperties key.
Return the same type of maps as the one it was handed, e.g.
G3TimestreamMap, G3MapInt, etc.
'''
def __init__(self, input='CalTimestreams', output_root=None,
types=None, bpm='BolometerProperties'):
'''
Split the input map given by input into several output
maps named output_root + wafer (e.g. CalTimestreamsW172 with
the default options). If wafers is not None, use only the wafers in the
list (possibly writing empty timestream maps to the frame). Otherwise,
creates maps for every wafer that exists in the input. Setting bpm
to a non-default value causes this to get its wafer mapping from an
alternative data source.
'''
super(SplitByPixelType, self).__init__(
input=input, output_root=output_root, property_list=types,
bpm=bpm, property='pixel_type')
@staticmethod
def converter(pixel_type):
if pixel_type is None:
return None
if not pixel_type:
return None
pixel_type = str(pixel_type)
if pixel_type.lower() == 'n/a':
return None
if pixel_type.islower():
return pixel_type.capitalize()
return pixel_type
| 2.609375 | 3 |
controle_financeiro/lancamentos/migrations/0001_initial.py | douglaspands/controle-financeiro | 0 | 12796131 | <reponame>douglaspands/controle-financeiro
# Generated by Django 3.2 on 2021-04-27 02:00
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('carteiras', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('criado_em', models.DateTimeField(auto_now_add=True)),
('atualizado_em', models.DateTimeField(auto_now=True)),
('titulo', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100, unique=True)),
('descricao', models.TextField()),
],
options={
'ordering': ['slug'],
},
),
migrations.CreateModel(
name='Despesa',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('criado_em', models.DateTimeField(auto_now_add=True)),
('atualizado_em', models.DateTimeField(auto_now=True)),
('nome', models.CharField(max_length=100)),
('valor_total', models.DecimalField(decimal_places=2, max_digits=11)),
('datahora', models.DateTimeField()),
('quantidade_parcelas', models.IntegerField(default=1)),
('situacao', models.IntegerField(choices=[(1, 'Em Aberto'), (2, 'Pago'), (3, 'Cancelado'), (4, 'Estornado')], default=1)),
],
options={
'ordering': ['-datahora'],
},
),
migrations.CreateModel(
name='Lancamento',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('criado_em', models.DateTimeField(auto_now_add=True)),
('atualizado_em', models.DateTimeField(auto_now=True)),
('tipo', models.IntegerField(choices=[(1, 'Receita'), (2, 'Despesa')])),
('datahora', models.DateTimeField()),
('categorias', models.ManyToManyField(blank=True, to='lancamentos.Categoria')),
('centro_custo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lancamentos', to='carteiras.centrocusto')),
],
options={
'ordering': ['-datahora'],
},
),
migrations.CreateModel(
name='Receita',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('criado_em', models.DateTimeField(auto_now_add=True)),
('atualizado_em', models.DateTimeField(auto_now=True)),
('nome', models.CharField(max_length=100)),
('valor_total', models.DecimalField(decimal_places=2, default=0, max_digits=11)),
('datahora', models.DateTimeField()),
('lancamento', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='receita', to='lancamentos.lancamento')),
],
options={
'ordering': ['-datahora'],
},
),
migrations.CreateModel(
name='Parcela',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('criado_em', models.DateTimeField(auto_now_add=True)),
('atualizado_em', models.DateTimeField(auto_now=True)),
('ordem', models.IntegerField(validators=[django.core.validators.MinValueValidator(1)])),
('data', models.DateField()),
('valor', models.DecimalField(decimal_places=2, max_digits=11)),
('situacao', models.IntegerField(choices=[(1, 'Em Aberto'), (2, 'Pago'), (3, 'Cancelado'), (4, 'Estornado')], default=1)),
('despesa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parcelas', to='lancamentos.despesa')),
],
options={
'ordering': ['data'],
},
),
migrations.AddField(
model_name='despesa',
name='lancamento',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='despesa', to='lancamentos.lancamento'),
),
migrations.AddIndex(
model_name='categoria',
index=models.Index(fields=['slug'], name='lancamentos_slug_1e2e80_idx'),
),
]
| 1.882813 | 2 |
test_time_log_window.py | sphericalpm/jira-quick-reporter | 1 | 12796132 | <filename>test_time_log_window.py
from jiraclient import JiraClient
import unittest
from types import SimpleNamespace as sn
class Test(unittest.TestCase):
def test_get_remaining_estimate_empty(self):
issue = sn(
fields=sn(
timetracking=sn(
raw={}
)
)
)
self.assertEqual(JiraClient.get_remaining_estimate(issue), '0m')
def test_get_remaining_estimate(self):
issue = sn(
fields=sn(
timetracking=sn(
raw={'remainingEstimate': '1h'}
)
)
)
self.assertEqual(JiraClient.get_remaining_estimate(issue), '1h')
if __name__ == '__main__':
unittest.main()
| 2.421875 | 2 |
test_old.py | knightman/skybitz-gatewaydemo | 0 | 12796133 | import gateway
import soapreqs
import time
from datetime import datetime
#Imports currently used for testing only
# import pprint
# import json
# --------------------------------------------------------- #
''' EARLY TEST SCENARIOS '''
# --------------------------------------------------------- #
# invalrmlist = d['soap:Body']['GetInventoryCalcAlarmResponse']['GetInventoryCalcAlarmResult']['CalcAlarmInventory']
# inventorytime = ''
# for item in invalrmlist:
# if item['sUTCInventoryTime']:
# #datetime_object = datetime.strptime(str(item['sUTCInventoryTime']), '%m %d %Y %I:%M:%S %p')
# print(str(item['sUTCInventoryTime']))
# # if item['iCalcAlarmBits'] != str(0):
# # print('Tank ' + item['iTankID'] + ' has alarm status ' + item['iCalcAlarmBits'])
# f = open('temp.json', 'w')
# f.write(json.dumps(resp, sort_keys=True, indent=4))
# for k in d['soap:Body']:
# print(k)
# break
# d = {'ONE':{'TWO':{'THREE':'some txt value'}}}
# pprint.pprint(d)
# print(d['ONE'])
# print(d['ONE']['TWO'])
# print(d['soap:Body']['GetTankResponse']['@xmlns'])
# print(d['soap:Body']['GetTankResponse']['iErrorCode'])
# tanklist = d['soap:Body']['GetTankResponse']['GetTankResult']['Tank']
# for item in tanklist:
# print(item) #need to fix
# #Org example reading the list in Organization value
# print(d['soap:Body']['GetOrganizationResponse']['@xmlns'])
# print(d['soap:Body']['GetOrganizationResponse']['iErrorCode'])
# list = d['soap:Body']['GetOrganizationResponse']['GetOrganizationResult']['Organization'] #returns list
# for k in list:
# #print(type(k))
# #print(k)
# for k, v in k.items():
# if k == 'iOrganizationID':
# print(k, v)
# #print(v)
# #Loc example reading the list in Location value
# print('Return code: ' + str(d['soap:Body']['GetLocationResponse']['iErrorCode']))
# print('Location List: ')
# list = d['soap:Body']['GetLocationResponse']['GetLocationResult']['Location'] #returns list
# for k in list:
# try:
# if k['iLocationID']:
# print('ID: ' + str(k['iLocationID']) + ' Name: ' + str(k['sLocationName'])
# + ' Address: ' + str(k['sAddress1']))
# except KeyError:
# pass
# #Tank example reading the list in Tank value
# print('Return code: ' + str(d['soap:Body']['GetTankResponse']['iErrorCode']))
# print('Tank List: ')
# list = d['soap:Body']['GetTankResponse']['GetTankResult']['Tank'] #returns list
# for k in list:
# try:
# if k['iTankID']:
# print('ID: ' + str(k['iTankID']))
# except KeyError:
# pass
# --------------------------------------------------------- #
''' REAL GATEWAY TEST SECTION '''
# --------------------------------------------------------- #
# GATEWAY SOAP GEN AND REQUEST TESTS
# g = gateway.Gateway()
# Make the Request to Gateway
# soapResponse = g.gateway_request(soapreqs.get_org_soap())
# soapResponse = g.gateway_request(soapreqs.get_loc_soap())
# soapResponse = g.gateway_request(soapreqs.get_tank_soap())
# soapResponse = g.gateway_request(soapreqs.get_inv_soap())
# soapResponse = g.gateway_request(soapreqs.get_invalrm_soap())
# tankgenlatlonstr = '10203647'
# soapResponse = g.gateway_request(soapreqs.get_tankgenlatlon_soap(tankgenlatlonstr))
# # Parse response
# dresp = g.parse_response(soapResponse)
# print(dresp)
# INV ALARM CALC TRANSACTIONID TESTS
# # Step1 - make request using simple inventory soap (ie. zero as ACK code), parse response and save json file
# g = gateway.Gateway()
# dictresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_soap())) #soapreqs.get_invalrm_transactid_soap('0') works the same
# # Step2 - Process the json file to get the TransactionID and Inv Calc Alarm count
# p = gateway.Process()
# transactidstr = p.get_inventorycalcalrm_transactID()
# invalrmcount = p.count_inventorycalcalrm()
# print('TransactionID: ' + transactidstr + ' Inv Count: ' + str(invalrmcount))
# time.sleep(2) #wait 2 secs
# #Step2.5 - make a second gateway req using the TransactionID to create unique json - first test
# testinvtransactid = '47174434'
# #g.parse_response(g.gateway_request(soapreqs.get_invalrm_transactid_soap(testinvtransactid)))
# newinvalrmcount = p.count_inventorycalcalrm_unique(testinvtransactid)
# print('new count: ' + str(newinvalrmcount))
# #Step3 - make a second gateway request using the TransactionID to create unique json file
# uniquedictresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_transactid_soap(transactidstr)))
# g.save_resp_unique_json(uniquedictresponse, transactidstr)
# #Step4 - Now parse the unique json file to get the new transaction id and count
# newtransactidstr = p.get_inventorycalcalrm_unique_transactID(transactidstr)
# newinvalrmcount = p.count_inventorycalcalrm_unique(transactidstr)
# print('NEW TransactionID: ' + newtransactidstr + ' NEW Inv Count: ' + str(newinvalrmcount))
# #Step 5- Repeat as neccessary until count < 100 to get the latest inventory
# nexttransactidstr = transactidstr
# newinvalrmcount = invalrmcount
# while newinvalrmcount == 100:
# time.sleep(3)
# #replaces step3
# uniquedictresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_transactid_soap(nexttransactidstr)))
# g.save_resp_unique_json(uniquedictresponse, nexttransactidstr)
# print('Created unique json for TransactionID ' + nexttransactidstr)
# #replaces step4
# newinvalrmcount = p.count_inventorycalcalrm_unique(nexttransactidstr) #updates newinvalrmcount
# newtransactidstr = p.get_inventorycalcalrm_unique_transactID(nexttransactidstr) #temp var
# print('NEW TransactionID: ' + nexttransactidstr + ' NEW Inv Count: ' + str(newinvalrmcount))
# nexttransactidstr = newtransactidstr #updates nexttransactidstr
# # NEW TEST TO GET LATEST INV RECORDS - THIS PROCESS GIVES YOU LATEST UNIQUE INVCALCALARM
# #NOTE: THIS METHOD OF GETTING LATEST INVENTORY ONLY WORKS IF YOU HAVE LESS THAN 100 TANKS!
# #TODO: Place thi ALL into a function that whose job is to basically create the latest inventory json file.
# g = gateway.Gateway()
# firstresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_soap()))
# g.save_resp_json(firstresponse)
# # Everything depends on count of this first item
# p = gateway.Process()
# thecount = p.count_inventorycalcalrm()
# transactidstr = p.get_inventorycalcalrm_transactID()
# print('TransactID: ' + transactidstr)
# print('Inventory count: ' + str(thecount))
# #IF COUNT <= 0 --> NO NEW INV RECORDS
# #MUST USE LATEST UNIQUE JSON FILE FOR INV RECORDS
# #ELSE IF COUNT >= 100 --> NEED TO ITERATE THRU TO GET LATEST
# #MUST MAKE SURE YOU SAVE EACH UNIQUE JSON! ONCE YOU CALL THE WEB SERVICE WITH TRANSACTID, YOU CANNOT GET IT AGAIN!
# #ELSE YOU HAVE THE LATEST INV IN GetInventoryCalcAlarmResponse.json, SAVE TO LATEST
# if thecount <= 0:
# #No new inv, Use latest unique - BASICALLY THIS MEANS NEED TO COMPARE EMPTY GetInventoryCalcAlarmResponse.json
# #FILE TO THE LATEST GetInventoryCalcAlarmResponse_latest.json INVENTORY THAT SOULD ALREADY EXIST
# print('Zero new inventory records, use the existing latest')
# elif thecount >= 100:
# #ITERATE TO GET THE LATEST INVENTORY GetInventoryCalcAlarmResponse_latest.json; ALSO DEL EMPTY LATEST IF PRESENT AT END!
# print('more than 100, need to iterate to latest')
# #transactidstr = p.get_inventorycalcalrm_transactID()
# #invalrmcount = p.count_inventorycalcalrm()
# print('TransactionID: ' + transactidstr)
# #get and save unique json reponse
# uniquedictresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_transactid_soap(transactidstr)))
# g.save_resp_unique_json(uniquedictresponse, transactidstr)
# #get the new inv alarm count from the uniquedictresponse
# invalrmcount = p.count_inventorycalcalrm_unique(transactidstr)
# print(' NEW Inv Count: ' + str(invalrmcount))
# #set transactid and count to first one above
# #nextinvalrmcount = invalrmcount
# nextinvalrmcount = thecount
# nexttransactidstr = transactidstr
# # while more to get, set new transactid to that from latest unique json
# while True:
# #save next to last id string in case last item has zero records
# nexttolastidstr = nexttransactidstr
# #break while loop if count less than 100
# if nextinvalrmcount < 100:
# break
# print('fetching next...')
# newtransactidstr = p.get_inventorycalcalrm_unique_transactID(nexttransactidstr)
# print('NEW TransactionID: ' + newtransactidstr)
# #get the next unique json from gateway request
# newuniquedictresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_transactid_soap(newtransactidstr)))
# g.save_resp_unique_json(newuniquedictresponse, newtransactidstr)
# #get the new inv alrm count from the newtransactidstr
# newinvalrmcount = p.count_inventorycalcalrm_unique(newtransactidstr)
# print(' NEW Inv Count: ' + str(newinvalrmcount))
# #update nexttransactid and nextinvalrmcount
# nexttransactidstr = p.get_inventorycalcalrm_unique_transactID(nexttransactidstr)
# nextinvalrmcount = p.count_inventorycalcalrm_unique(nexttransactidstr)
# time.sleep(2)
# #now, check if latest unique json has no records, if so delete it
# if len(nexttolastidstr) > 0 and newinvalrmcount < 1:
# deletresponsestr = 'data/GetInventoryCalcAlarmResponse{0}.json'
# g.delete_resp_unique_json(deletresponsestr.format(nexttransactidstr))
# #finally, save the latest non-empty unique inv json file to the latest
# g.save_resp_unique_json(newuniquedictresponse, '_latest')
# else:
# print('Less than 100')
# #save as latest inv json file
# g.save_resp_unique_json(firstresponse, '_latest')
# # #also get and save unique json reponse for the next transactid - IMPORTANT: THIS WILL GIVE AN EMPTY NEXT REPONSE
# # uniquedictresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_transactid_soap(transactidstr)))
# # g.save_resp_unique_json(firstresponse, transactidstr)
# transactidstr = p.get_inventorycalcalrm_transactID()
# # invalrmcount = p.count_inventorycalcalrm()
# print('TransactionID: ' + transactidstr)
# #get and save unique json reponse
# uniquedictresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_transactid_soap(transactidstr)))
# g.save_resp_unique_json(uniquedictresponse, transactidstr)
# #get the new inv alarm count from the uniquedictresponse
# invalrmcount = p.count_inventorycalcalrm_unique(transactidstr)
# print(' NEW Inv Count: ' + str(invalrmcount))
# #determine inv count - if less than 100, nothing more to do
# nexttolastidstr = ''
# newuniquedictresponse = []
# if invalrmcount == 100:
# print('more than 100, need to iterate to latest')
# #set transactid and count to first one above
# nexttransactidstr = transactidstr
# nextinvalrmcount = invalrmcount
# # while more to get, set new transactid to that from latest unique json
# while True:
# #save next to last id string in case last item has zero records
# nexttolastidstr = nexttransactidstr
# #break while loop if count less than 100
# if nextinvalrmcount < 100:
# break
# print('fetching next...')
# newtransactidstr = p.get_inventorycalcalrm_unique_transactID(nexttransactidstr)
# print('NEW TransactionID: ' + newtransactidstr)
# #get the next unique json from gateway request
# newuniquedictresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_transactid_soap(newtransactidstr)))
# g.save_resp_unique_json(newuniquedictresponse, newtransactidstr)
# #get the new inv alrm count from the newtransactidstr
# newinvalrmcount = p.count_inventorycalcalrm_unique(newtransactidstr)
# print(' NEW Inv Count: ' + str(newinvalrmcount))
# #update nexttransactid and nextinvalrmcount
# nexttransactidstr = p.get_inventorycalcalrm_unique_transactID(nexttransactidstr)
# nextinvalrmcount = p.count_inventorycalcalrm_unique(nexttransactidstr)
# time.sleep(3)
# #now, check if latest unique json has no records, if so delete it
# if len(nexttolastidstr) > 0 and newinvalrmcount < 1:
# deletresponsestr = 'data/GetInventoryCalcAlarmResponse{0}.json'
# g.delete_resp_unique_json(deletresponsestr.format(nexttransactidstr))
# #finally, rename the unique inv json file to be the generic starting point GetInventoryCalcAlarmResponselatest json file!
# if len(str(newuniquedictresponse)) > 0:
# g.save_resp_unique_json(newuniquedictresponse, 'latest')
# else:
# print('less than 100, have latest')
# g.save_resp_unique_json(uniquedictresponse, 'latest')
# PROCESSING TEST SECTION ONLY
# p = gateway.Process()
#test1
# tanklist = p.get_tank_list()
# for item in tanklist:
# print(item)
#test2
# invlist = p.get_inventory_list()
# for item in invlist:
# print(item)
#test3
# bothlist = p.get_tankinv_list()
# for item in bothlist:
# print(item)
#test4
# print(p.get_grossvol_byinvid('194699940'))
#test5
# latestinvstr = p.get_latestinvid_bytank('10203647') #works!
# print(latestinvstr)
#test6 - nice working test!
# tanklist = p.get_tank_list() #gives list of tank ids
# print(tanklist)
# for item in tanklist: #display latest inventory for each tank in list
# latestinvidstr = p.get_latestinvid_bytank(str(item)) #get the latest inventory id for the tank
# print('TankID: ' + str(item) + ' currently has gross vol ' + p.get_grossvol_byinvid(latestinvidstr) + ' gals')
#test7
#print(str(p.get_tankname_bytankid('10203647')))
# # TEST 8 - full test working thru step 4 - fully working
# g = gateway.Gateway()
# p = gateway.Process()
# #step1 - req all tanks and write to master tanks file
# g.save_resp_json(g.parse_response(g.gateway_request(soapreqs.get_tank_soap())))
# time.sleep(2)
# #step2 - build tank list from file created in step 1
# tanklist = p.get_tank_list() #gives list of tank ids - THIS IS AN IMPORTANT STEP FOR SEVERAL ITEMS BELOW!!!!!!!
# #print(tanklist)
# for item in tanklist: #for each unique tank, create a unique file for each tank
# g.save_resp_unique_json(g.parse_response(g.gateway_request(soapreqs.get_tankgenlatlon_soap(item))), item)
# time.sleep(1)
# #step3 - get latest inv and save file
# g.save_resp_json(g.parse_response(g.gateway_request(soapreqs.get_inv_soap())))
# #step4 - for each tank in tanklist get latest inventory and display
# #note: for this to work, you must have already done steps 1 and 3 above - need tank and inv
# for item in tanklist:
# latestinvidstr = p.get_latestinvid_bytank(str(item)) #get the latest inventory id for the tank
# print('Tank ' + p.get_tankname_bytankid_file(str(item)) + ' currently has gross vol of '
# + str(int(float(p.get_grossvol_byinvid(latestinvidstr)))) + ' gals')
# #step5 - works now, similar to step 4
# g.save_resp_json(g.parse_response(g.gateway_request(soapreqs.get_invalrm_soap())))
# #step7 - parse and display the data
# for item in tanklist:
# latestinvidstr = p.get_latestinvid_bytank(str(item)) #get the latest inventory id for the tank
# alarmstatus = p.get_tankalrm_byinvid(latestinvidstr)
# if alarmstatus != '0':
# print('Tank ' + p.get_tankname_bytankid_file(str(item)) + ' currently has alarm status of '
# + alarmstatus + ' calc alarm bits')
# #TODO: Add function in Process to perform an alarm bits lookup to decode the actual alarm state
# #RUN.PY TEST
# # SETUP RUN TEST TO CHECK FOR CHANGES VIA GATEWAY
# # TODO: Switch print stmts to log statements
# print('\nWELCOME TO THE GATEWAY DEMO APP\n--------------------------------')
# g = gateway.Gateway()
# p = gateway.Process()
# while True:
# print(str(datetime.datetime.now()) + ' - wake up...')
# #step1 - request all tanks and write to master tanks file
# g.save_resp_json(g.parse_response(g.gateway_request(soapreqs.get_tank_soap())))
# time.sleep(1)
# print('retrieved tanks...')
# #step2 - build tank list from file created in step 1
# tanklist = p.get_tank_list() #gives list of tank ids
# print('TankIDs: ' + str(tanklist))
# for item in tanklist: #for each unique tank, create a unique file for each tank
# g.save_resp_unique_json(g.parse_response(g.gateway_request(soapreqs.get_tankgenlatlon_soap(item))), item)
# time.sleep(1)
# #step3 - get latest inv and save file
# print('writing parsed inventory data to file...')
# g.save_resp_json(g.parse_response(g.gateway_request(soapreqs.get_inv_soap())))
# print('writing parsed alarm data to file...')
# g.save_resp_json(g.parse_response(g.gateway_request(soapreqs.get_invalrm_soap())))
# #delay
# print('zzzzz')
# time.sleep(180) #sleep for 3mins, increase this later
# def build_latest_inv_file():
# '''NEW TEST TO GET LATEST INV RECORDS - THIS PROCESS GIVES YOU LATEST UNIQUE INVCALCALARM
# NOTE: THIS METHOD OF GETTING LATEST INVENTORY ONLY WORKS IF YOU HAVE LESS THAN 100 TANKS!'''
# try:
# logtxt = ''
# g = gateway.Gateway()
# firstresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_soap()))
# g.save_resp_json(firstresponse)
# # Everything depends on count of this first item
# p = gateway.Process()
# thecount = p.count_inventorycalcalrm()
# transactidstr = p.get_inventorycalcalrm_transactID()
# print('TransactID: ' + transactidstr)
# print('Inventory count: ' + str(thecount))
# #IF COUNT <= 0 --> NO NEW INV RECORDS
# #MUST USE LATEST UNIQUE JSON FILE FOR INV RECORDS
# #ELSE IF COUNT >= 100 --> NEED TO ITERATE THRU TO GET LATEST
# #MUST MAKE SURE YOU SAVE EACH UNIQUE JSON! ONCE YOU CALL THE WEB SERVICE WITH TRANSACTID, YOU CANNOT GET IT AGAIN!
# #ELSE YOU HAVE THE LATEST INV IN GetInventoryCalcAlarmResponse.json, SAVE TO LATEST
# if thecount <= 0:
# #No new inv, Use latest unique - BASICALLY THIS MEANS NEED TO COMPARE EMPTY GetInventoryCalcAlarmResponse.json
# #FILE TO THE LATEST GetInventoryCalcAlarmResponse_latest.json INVENTORY THAT SOULD ALREADY EXIST
# print('Zero new inventory records, use the existing latest')
# elif thecount >= 100:
# #ITERATE TO GET THE LATEST INVENTORY GetInventoryCalcAlarmResponse_latest.json; ALSO DEL EMPTY LATEST IF PRESENT AT END!
# print('more than 100, need to iterate to latest')
# #transactidstr = p.get_inventorycalcalrm_transactID()
# #invalrmcount = p.count_inventorycalcalrm()
# print('TransactionID: ' + transactidstr)
# #get and save unique json reponse
# uniquedictresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_transactid_soap(transactidstr)))
# g.save_resp_unique_json(uniquedictresponse, transactidstr)
# #get the new inv alarm count from the uniquedictresponse
# invalrmcount = p.count_inventorycalcalrm_unique(transactidstr)
# print(' NEW Inv Count: ' + str(invalrmcount))
# #set transactid and count to first one above
# #nextinvalrmcount = invalrmcount
# nextinvalrmcount = thecount
# nexttransactidstr = transactidstr
# # while more to get, set new transactid to that from latest unique json
# while True:
# #save next to last id string in case last item has zero records
# nexttolastidstr = nexttransactidstr
# #break while loop if count less than 100
# if nextinvalrmcount < 100:
# break
# print('fetching next...')
# newtransactidstr = p.get_inventorycalcalrm_unique_transactID(nexttransactidstr)
# print('NEW TransactionID: ' + newtransactidstr)
# #get the next unique json from gateway request
# newuniquedictresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_transactid_soap(newtransactidstr)))
# g.save_resp_unique_json(newuniquedictresponse, newtransactidstr)
# #get the new inv alrm count from the newtransactidstr
# newinvalrmcount = p.count_inventorycalcalrm_unique(newtransactidstr)
# print(' NEW Inv Count: ' + str(newinvalrmcount))
# #update nexttransactid and nextinvalrmcount
# nexttransactidstr = p.get_inventorycalcalrm_unique_transactID(nexttransactidstr)
# nextinvalrmcount = p.count_inventorycalcalrm_unique(nexttransactidstr)
# time.sleep(2)
# #now, check if latest unique json has no records, if so delete it
# if len(nexttolastidstr) > 0 and newinvalrmcount < 1:
# deletresponsestr = 'data/GetInventoryCalcAlarmResponse{0}.json'
# g.delete_resp_unique_json(deletresponsestr.format(nexttransactidstr))
# #finally, save the latest non-empty unique inv json file to the latest
# g.save_resp_unique_json(newuniquedictresponse, '_latest')
# else:
# print('Less than 100')
# #save as latest inv json file
# g.save_resp_unique_json(firstresponse, '_latest')
# # #also get and save unique json reponse for the next transactid - IMPORTANT: THIS WILL GIVE AN EMPTY NEXT REPONSE
# # uniquedictresponse = g.parse_response(g.gateway_request(soapreqs.get_invalrm_transactid_soap(transactidstr)))
# # g.save_resp_unique_json(firstresponse, transactidstr)
# except:
# logtxt = 'error'
# return logtxt
# # TEST 9 - modified test #8 for using latest inv above based on count
# g = gateway.Gateway()
# p = gateway.Process()
# #step1 - req all tanks and write to master tanks file
# g.save_resp_json(g.parse_response(g.gateway_request(soapreqs.get_tank_soap())))
# time.sleep(2)
# #step2 - build tank list from file created in step 1
# tanklist = p.get_tank_list() #gives list of tank ids - THIS IS AN IMPORTANT STEP FOR SEVERAL ITEMS BELOW!!!!!!!
# #print(tanklist)
# for item in tanklist: #for each unique tank, create a unique json file for each tank
# g.save_resp_unique_json(g.parse_response(g.gateway_request(soapreqs.get_tankgenlatlon_soap(item))), item)
# time.sleep(1)
# #step3 - get latest inv and save file
# g.save_resp_json(g.parse_response(g.gateway_request(soapreqs.get_inv_soap())))
# #step4 - for each tank in tanklist get latest inventory and display
# #note: for this to work, you must have already done steps 1 and 3 above - need tank and inv
# for item in tanklist:
# latestinvidstr = p.get_latestinvid_bytank(str(item)) #get the latest inventory id for the tank
# print('Tank ' + p.get_tankname_bytankid_file(str(item)) + ' currently has gross vol of '
# + str(int(float(p.get_grossvol_byinvid(latestinvidstr)))) + ' gals')
# #step5 - works now, similar to step 4
# g.save_resp_json(g.parse_response(g.gateway_request(soapreqs.get_invalrm_soap())))
# #step7 - parse and display the data
# for item in tanklist:
# latestinvidstr = p.get_latestinvid_bytank(str(item)) #get the latest inventory id for the tank
# alarmstatus = p.get_tankalrm_byinvid(latestinvidstr)
# if alarmstatus != '0':
# print('Tank ' + p.get_tankname_bytankid_file(str(item)) + ' currently has alarm status of '
# + alarmstatus + ' calc alarm bits')
| 2.28125 | 2 |
setup.py | vspaz/pclient | 1 | 12796134 | <reponame>vspaz/pclient
import os
import setuptools
def _build_path(file_path, base=os.path.abspath(os.path.dirname(__file__))):
return os.path.join(base, file_path)
def _get_dependencies():
with open(_build_path(file_path='requirements/prod.txt')) as fh:
return [line.strip() for line in fh.readlines()]
def _get_readme():
with open(_build_path(file_path='README.md')) as fh:
return fh.read()
def _get_package_info():
with open(_build_path(file_path='pyclient/__version__.py')) as fh:
package_info = {}
exec(fh.read(), package_info)
return package_info
_PACKAGE_INFO = _get_package_info()
setuptools.setup(
name=_PACKAGE_INFO['__title__'],
version=_PACKAGE_INFO['__version__'],
description=_PACKAGE_INFO['__description__'],
long_description=_get_readme(),
packages=setuptools.find_packages(exclude=['tests', 'requirements']),
install_requires=_get_dependencies(),
url=_PACKAGE_INFO['__url__'],
license='MIT License',
author=_PACKAGE_INFO['__author__'],
author_email=_PACKAGE_INFO['__email__'],
maintainer=_PACKAGE_INFO['__maintainer__'],
classifiers=[
'Programming Language :: Python :: 3'
'Programming Language :: Python :: 3.7'
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
)
| 1.921875 | 2 |
performance_curves/__init__.py | erp12/performance-curves | 0 | 12796135 | <gh_stars>0
"""Hello world!
A brief overview of the package should go here.
"""
| 1.179688 | 1 |
tcs.py | DeViL3998/hackerrankChallenges | 0 | 12796136 | n, m = list(map(int, input().split(" ")))
arr = [ ]
count = count2 = 0
for i in range(n, m+1):
for j in range(1, i//2 + 1):
if i%j == 0:
count += 1
if count == 1:
arr.append(i)
count = 0
for i in arr:
if i + 6 in arr:
count2 += 1
print(count2) | 3.046875 | 3 |
basicts/data/transforms.py | zezhishao/GuanCang_BasicTS | 3 | 12796137 | from basicts.utils.registry import SCALER_REGISTRY
"""
data normalization and re-normalization
"""
# ====================================== re-normalizations ====================================== #
@SCALER_REGISTRY.register()
def re_max_min_normalization(x, **kwargs):
_min, _max = kwargs['min'], kwargs['max']
x = (x + 1.) / 2.
x = 1. * x * (_max - _min) + _min
return x
@SCALER_REGISTRY.register()
def standard_re_transform(x, **kwargs):
mean, std = kwargs['mean'], kwargs['std']
x = x * std
x = x + mean
return x
# ====================================== normalizations ====================================== #
# omitted to avoid redundancy, as they should only be used in data preprocessing in `scripts/data_preparation`
| 2.84375 | 3 |
ex009.py | vinisantos7/PythonExercicios | 2 | 12796138 | print("Bem-Vindo a Tabuada v1.0!")
num = (int(input("Digite um número para a tabuada: ")))
print('-'*12)
print(f"{num} x {1:2} = {num * 1}")
print(f"{num} x {2:2} = {num * 2}")
print(f"{num} x {3:2} = {num * 3}")
print(f"{num} x {4:2} = {num * 4}")
print(f"{num} x {5:2} = {num * 5}")
print(f"{num} x {6:2} = {num * 6}")
print(f"{num} x {7:2} = {num * 7}")
print(f"{num} x {8:2} = {num * 8}")
print(f"{num} x {9:2} = {num * 9}")
print(f"{num} x {10:2} = {num * 10}")
print("-"*12)
| 4.25 | 4 |
python-pattern-matching/stdlib_ast.py | oilshell/blog-code | 54 | 12796139 | <reponame>oilshell/blog-code
#!/usr/bin/env python3
"""
demo.py
Run with Python 3.10
"""
from __future__ import print_function
import sys
import ast
from ast import BinOp, UnaryOp, Constant, Add, Sub, USub
# https://gvanrossum.github.io/docs/PyPatternMatching.pdf
def fact(arg):
match arg:
case 0 | 1:
f = 1
case n:
f = n * fact(n - 1)
return f
def mysum(seq):
match seq:
case []:
s = 0
case [head, *tail]:
s = head + mysum(tail)
return s
# This one is superficially different than in the paper!
#
# Hm this depends on __match_args__ ? Is it set in the ast module nodes?
def simplify(node):
match node:
case BinOp(Constant(left), Add(), Constant(right)):
return Constant(left + right)
case BinOp(left, Add() | Sub(), Constant(0)):
return simplify(left)
case UnaryOp(USub(), UnaryOp(USub(), item)):
return simplify(item)
case _:
return node
def main(argv):
print('Hello from demo.py')
print(fact(6))
print(mysum([1, 2, 3]))
# Test out all the optimizations
for code_str in ['3 + 4', '3 - 0', '- - 5']:
print(' %s' % code_str)
module = ast.parse(code_str)
expr = module.body[0].value
print(ast.dump(expr))
opt = simplify(expr)
print(' => optimized')
print(opt)
print(ast.dump(opt))
print('-----')
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError as e:
print('FATAL: %s' % e, file=sys.stderr)
sys.exit(1)
| 2.546875 | 3 |
swaty/classes/subbasin.py | changliao1025/pyswat | 2 | 12796140 | <filename>swaty/classes/subbasin.py
import os,stat
import sys
import glob
import shutil
import numpy as np
from pathlib import Path
import tarfile
import subprocess
from shutil import copyfile
from abc import ABCMeta, abstractmethod
import datetime
from shutil import copy2
import json
from json import JSONEncoder
from swaty.classes.swatpara import swatpara
class SubbasinClassEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.float32):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, swatpara):
return json.loads(obj.tojson())
if isinstance(obj, list):
pass
return JSONEncoder.default(self, obj)
class pysubbasin(object):
__metaclass__ = ABCMeta
lIndex_subbasin=-1
iFlag_subbasin=0
nSoil_layer = 1
nParameter_subbasin=0
aParameter_subbasin=None
aParameter_subbasin_name = None
def __init__(self, aConfig_in =None):
if aConfig_in is not None:
pass
else:
pass
return
def setup_parameter(self, aPara_in= None):
if aPara_in is not None:
self.nParameter_subbasin = len(aPara_in)
self.aParameter_subbasin=list()
self.aParameter_subbasin_name =list()
for i in range(self.nParameter_subbasin):
subbasin_dummy = aPara_in[i]
pParameter_subbasin = swatpara(subbasin_dummy)
self.aParameter_subbasin.append(pParameter_subbasin)
sName = pParameter_subbasin.sName
if sName not in self.aParameter_subbasin_name:
self.aParameter_subbasin_name.append(sName)
else:
pass
return
def tojson(self):
aSkip = []
obj = self.__dict__.copy()
for sKey in aSkip:
obj.pop(sKey, None)
sJson = json.dumps(obj,\
sort_keys=True, \
indent = 4, \
ensure_ascii=True, \
cls=SubbasinClassEncoder)
return sJson | 2.234375 | 2 |
Ch1-Arrays-and-Strings/04_palindrome_permutation.py | fatima-rizvi/CtCI-Solutions-6th-Edition | 0 | 12796141 | <filename>Ch1-Arrays-and-Strings/04_palindrome_permutation.py<gh_stars>0
# Given a string, write a function to check if it is a permutation of a palindrome
def palindrome_permutation(str1):
str1 = str1.replace(" ", "")
count = {}
for char in str1:
if count.get(char):
count[char] += 1
else:
count[char] = 1
odds = 0
for char, num in count.items():
if num % 2 != 0:
odds += 1
if odds > 1:
return False
return True
print(palindrome_permutation("tact coa")) # True, taco cat
print(palindrome_permutation("cacr ear")) # True, race car
print(palindrome_permutation("livo veile")) # True, evil olive
print(palindrome_permutation("not one")) # False
| 3.96875 | 4 |
instructors/need-rework/24_curses/cu0.py | mgadagin/PythonClass | 46 | 12796142 | import curses
def main(stdscr):
""" Curses is controlled from here.
This might be called 'the loop' in a game.
game loop: http://gameprogrammingpatterns.com/game-loop.html
"""
curses.textpad.rectangle(stdscr,0,0,10,10)
keypress = int()
# 113 is the lowercase 'q' key.
while keypress != 113:
keypress = stdscr.getch()
print keypress
if __name__=='__main__':
"""
This is our most basic model.
This presupposes you know how to read a try... finally... block.
For now, put it in your 'to learn' notes.
You can come back to this to see the components initialized by curses.wrapper.
This code is pretty identical to example 1.
Moving on!!
"""
try:
stdscr=curses.initscr()
curses.noecho() ; curses.cbreak()
stdscr.keypad(1)
main(stdscr) # Enter the main loop
finally:
stdscr.erase()
stdscr.refresh()
stdscr.keypad(0)
curses.echo() ; curses.nocbreak()
curses.endwin() # Terminate curses
| 3.9375 | 4 |
treecompare/__init__.py | mx-pycoder/treecompare | 0 | 12796143 | # API
from ._treecompare import namecomp
from ._treecompare import treedups
from ._treecompare import treepurge
from ._treecompare import duplicate
| 1.046875 | 1 |
kite-python/metrics/kite_metrics/loader.py | kiteco/kiteco-public | 17 | 12796144 | from jinja2 import Environment, PackageLoader, select_autoescape
import yaml
import json
import pkg_resources
import os
env = Environment(
loader=PackageLoader('kite_metrics', 'schemas'),
)
cache = {}
def _schema_exists(filename):
return pkg_resources.resource_exists('kite_metrics', 'schemas/{}'.format(filename))
def _schema_open(filename):
return pkg_resources.resource_stream('kite_metrics', 'schemas/{}'.format(filename))
def load_context(key):
filename = '{}.ctx.yaml'.format(key)
if filename not in cache:
ctx = {}
if _schema_exists(filename):
ctx = yaml.load(_schema_open(filename), yaml.FullLoader)
cache[filename] = ctx
return cache[filename]
def load_schema(key):
filename = '{}.yaml.tmpl'.format(key)
if filename not in cache:
ctx = load_context(key)
cache[filename] = yaml.load(env.get_template(filename).render(ctx), Loader=yaml.FullLoader)
return cache[filename]
def load_json_schema(key, extra_ctx=None):
filename = '{}.schema.json'.format(key)
if filename not in cache:
if _schema_exists(filename):
cache[filename] = json.load(_schema_open(filename))
else:
tmpl_filename = '{}.schema.json.tmpl'.format(key)
ctx = {'schema': load_schema(key)}
if extra_ctx:
ctx.update(extra_ctx)
rendered = env.get_template(tmpl_filename).render(ctx)
try:
cache[filename] = json.loads(rendered)
except json.decoder.JSONDecodeError:
print("Error decoding schema JSON:\n{}".format(rendered))
return cache[filename]
| 2.265625 | 2 |
scan/link_finders/find_links_for_disks.py | korenlev/calipso-cvim | 0 | 12796145 | ###############################################################################
# Copyright (c) 2017-2020 <NAME> (Cisco Systems), #
# <NAME> (Cisco Systems), <NAME> (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from base.utils.configuration import Configuration
from base.utils.origins import Origin
from scan.link_finders.find_links import FindLinks
class FindLinksForDisks(FindLinks):
# per future ceph releases this might need revisions
DB_PARTITION_PATH_ATT = 'bluefs_db_partition_path'
BLK_PARTITION_PATH_ATT = 'bluestore_bdev_partition_path'
def __init__(self):
super().__init__()
self.environment_type = None
self.hosts = []
self.osds = []
self.disks = []
self.partitions = []
def setup(self, env, origin: Origin = None):
super().setup(env, origin)
self.configuration = Configuration()
self.environment_type = self.configuration.get_env_type()
def add_links(self):
self.log.info("adding links of types: host-osd, osd-partition, partition-disk")
self.hosts = self.inv.find_items({
"environment": self.configuration.env_name,
"type": "host"
})
self.osds = self.inv.find_items({
"environment": self.get_env(),
"type": "osd"
})
self.partitions = self.inv.find_items({
"environment": self.get_env(),
"type": "partition"
})
self.disks = self.inv.find_items({
"environment": self.get_env(),
"type": "disk"
})
for osd in self.osds:
self.add_link_for_hosts(osd)
for partition in self.partitions:
self.add_link_for_osds(partition)
for disk in self.disks:
self.add_link_for_partitions(disk)
def add_link_for_hosts(self, osd):
# link_type: "host-osd"
metadata = osd.get('metadata', '')
for host in self.hosts:
if host.get('id', 'None') == osd.get('host', ''):
self.add_links_with_specifics(host, osd,
extra_att={"osd_data": metadata.get('osd_data', '')})
def add_link_for_osds(self, partition):
# link_type: "osd-partition"
for osd in self.osds:
metadata = osd.get('metadata', '')
if ((metadata.get(self.DB_PARTITION_PATH_ATT, 'None') == partition.get('device', '')) and (
osd.get('host', 'None') == partition.get('host', ''))) or ((
metadata.get(self.BLK_PARTITION_PATH_ATT, 'None') == partition.get('device', '')) and (
osd.get('host', 'None') == partition.get('host', ''))) or (
metadata.get('osd_data', 'None') == partition.get('mount_point', '')):
self.add_links_with_specifics(osd, partition,
extra_att={"osd_objectstore": metadata.get('osd_objectstore', '')})
def add_link_for_partitions(self, disk):
# link_type: "partition-disk"
for partition in self.partitions:
if (partition.get('master_disk', 'None') == disk.get('name', '')) and (
partition.get('host', 'None') == disk.get('host', 'None')):
self.add_links_with_specifics(partition, disk,
extra_att={"partition_type": partition.get('label', '')})
def add_links_with_specifics(self, source, target, extra_att=None):
link_name = '{}-{}'.format(source.get('name', 'None'), target.get('name', ''))
source_label = '{}-{}-{}'.format(source.get('cvim_region', ''), source.get('cvim_metro', ''),
source.get('id', ''))
target_label = target.get('id', '')
extra = {"source_label": source_label, "target_label": target_label}
if extra_att:
extra.update(extra_att)
self.link_items(source, target, link_name=link_name, extra_attributes=extra)
| 1.835938 | 2 |
gws_volume_scanner/scanner/util.py | cedadev/gws-scanner | 0 | 12796146 | <filename>gws_volume_scanner/scanner/util.py<gh_stars>0
import multiprocessing as mp
import queue as queue_
from . import config, elastic, models, scanner
class ElasticQueueWorker:
"""Create and manage a worker for sending files to es."""
def __init__(self, config_: config.ScannerSchema):
# Used to signal to the worker to exit.
self._shutdown = mp.Event()
# Setup queue of items for elasticsearch.
self.queue: mp.JoinableQueue[models.File] = mp.JoinableQueue(
config_["queue_length_scale_factor"]
)
# Start process to to elastic tasks.
self._pr = mp.Process(
target=elastic.worker,
args=((self.queue, config_, self._shutdown)),
)
self._pr.start()
def shutdown(self) -> None:
"""Shutdown queue and worker and make sure everything gets tidied up."""
# Ensure queue is done.
self.queue.close()
self.queue.join()
# Signal to worker it should finish.
self._shutdown.set()
# Shut the process down.
self._pr.join()
self._pr.close()
# Shutdown queue completely.
self.queue.join_thread()
class ScanQueueWorker:
"""Create and mannage queue and worker pool for scanning files."""
def __init__(
self, config_: config.ScannerSchema, elastic_q: queue_.Queue[models.File]
):
# Used to signal to the worker to exit.
self._shutdown = mp.Event()
# Setup queue of items for the scanner.
self.queue: mp.JoinableQueue[scanner.ToScan] = mp.JoinableQueue(
config_["scan_processes"] * config_["queue_length_scale_factor"]
)
# Pool of workers to deal with the queue.
self._pl = mp.Pool( # pylint: disable=consider-using-with
processes=config_["scan_processes"],
initializer=scanner.worker,
initargs=((self.queue, elastic_q, config_, self._shutdown)),
)
def shutdown(self) -> None:
"""Shutdown queue and worker pool and make sure everything gets tidied up."""
# Ensure queue is done.
self.queue.close()
self.queue.join()
# Signal to worker it should finish.
self._shutdown.set()
# Ensure pool is done.
# The order of these is different to if you are using a worker.
self._pl.close()
self._pl.join()
# Shutdown queue completely.
self.queue.join_thread()
| 2.5 | 2 |
src/Plot.py | ComicSphinx/TimeTracker | 1 | 12796147 | <gh_stars>1-10
# @Author: <NAME> (@ComicSphinx)
from database.DatabaseUtilities import DatabaseUtilities as dbu
from datetime import datetime as dt
class Plot():
str_sleeping = "Sleeping"
str_unknown = "?"
int_maxMinutes = 1440
int_sleep = 480
def addData(self, minutes):
select = dbu.buildSelect(dbu, dt.now().year, dt.now().month, dt.now().day, self.str_sleeping)
if (dbu.dataIsNotExist(dbu, select)):
self.addSleep(self)
select = dbu.buildSelect(dbu, dt.now().year, dt.now().month, dt.now().day, self.str_unknown)
if (dbu.dataIsNotExist(dbu, select)):
self.addUnknown(self, minutes)
def addSleep(self):
insert = dbu.buildInsert(dbu, self.str_sleeping, self.int_sleep)
dbu.executeCommand(dbu, insert)
def addUnknown(self, minutes):
tmp = 0
for i in range(len(minutes)):
tmp += minutes[i]
tmp = self.int_maxMinutes - tmp
insert = dbu.buildInsert(dbu, self.str_unknown, tmp)
dbu.executeCommand(dbu, insert)
| 2.828125 | 3 |
bluebottle/time_based/migrations/0020_auto_20201102_1230.py | terrameijar/bluebottle | 10 | 12796148 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-11-02 11:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('activities', '0027_contributionvalue'),
('time_based', '0019_auto_20201030_1317'),
]
operations = [
migrations.CreateModel(
name='OnADateApplication',
fields=[
('contribution_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='activities.Contribution')),
],
options={
'verbose_name': 'On a date application',
'verbose_name_plural': 'On a date application',
'permissions': (('api_read_onadateapplication', 'Can view application through the API'), ('api_add_onadateapplication', 'Can add application through the API'), ('api_change_onadateapplication', 'Can change application through the API'), ('api_delete_onadateapplication', 'Can delete application through the API'), ('api_read_own_onadateapplication', 'Can view own application through the API'), ('api_add_own_onadateapplication', 'Can add own application through the API'), ('api_change_own_onadateapplication', 'Can change own application through the API'), ('api_delete_own_onadateapplication', 'Can delete own application through the API')),
},
bases=('activities.contribution',),
),
migrations.CreateModel(
name='PeriodApplication',
fields=[
('contribution_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='activities.Contribution')),
('current_period', models.DateField(blank=True, null=True)),
],
options={
'verbose_name': 'Period application',
'verbose_name_plural': 'Period application',
'permissions': (('api_read_periodapplication', 'Can view application through the API'), ('api_add_periodapplication', 'Can add application through the API'), ('api_change_periodapplication', 'Can change application through the API'), ('api_delete_periodapplication', 'Can delete application through the API'), ('api_read_own_periodapplication', 'Can view own application through the API'), ('api_add_own_periodapplication', 'Can add own application through the API'), ('api_change_own_periodapplication', 'Can change own application through the API'), ('api_delete_own_periodapplication', 'Can delete own application through the API')),
},
bases=('activities.contribution',),
),
migrations.RemoveField(
model_name='application',
name='current_period',
),
]
| 1.679688 | 2 |
amulet/world_interface/chunk/interfaces/leveldb/leveldb_15/interface.py | architectdrone/Amulet-Core | 0 | 12796149 | from __future__ import annotations
from amulet.world_interface.chunk.interfaces.leveldb.leveldb_14.interface import (
LevelDB14Interface,
)
class LevelDB15Interface(LevelDB14Interface):
def __init__(self):
LevelDB14Interface.__init__(self)
self.features["chunk_version"] = 15
INTERFACE_CLASS = LevelDB15Interface
| 1.5625 | 2 |
main.py | knishioka/arxiv-bot | 0 | 12796150 | <reponame>knishioka/arxiv-bot
import datetime
from arxiv_bot.arxiv_scraper import ArxivScraper
from arxiv_bot.translator import translate
def main():
"""List updated articles."""
today = datetime.date.today()
start_date = today - datetime.timedelta(days=2)
end_date = today - datetime.timedelta(days=1)
articles = ArxivScraper().search(start_date=start_date, end_date=end_date, category_id="cs.AI")
for article in articles:
print(article["itemTitle"])
print(", ".join(article["itemAuthors"]))
print(translate(article["itemSummary"]))
print(f'https://arxiv.org/abs/{article["id"]}')
if __name__ == "__main__":
main()
| 2.640625 | 3 |
Subsets and Splits