blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12e031215e23497b73989ea4cf0808a1ec95f07e | 575ad5e7d90ae1c8121bcb8250cff94290e6ada8 | /Day3/fft1.py | e871f4320311f97675cb183a391145c391847372 | []
| no_license | sammita94/Image-Lab | 472aaa8db7b1fd71269450c99cb704bfd0a9f797 | a3c548ba23bf4da89220c503e7eacdea0a1e653c | refs/heads/master | 2020-07-23T05:49:57.441291 | 2016-11-16T04:27:12 | 2016-11-16T04:27:12 | 73,814,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | """Code for Discrete Fourier Transform using numpy functions
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('fft.jpg',0)
#Fourier Transform
f = np.fft.fft2(img)
#Shifting the DC component from top left to center
fshift = np.fft.fftshift(f)
#Finding the Magnitude Spectrum
magnitude_spectrum = 20*np.log(np.abs(fshift))
#Shifting the DC component back to the top left corner
f_ishift = np.fft.ifftshift(fshift)
#Inverse Fourier Transform
img_back = np.fft.ifft2(f_ishift)
img_back = np.abs(img_back)
plt.subplot(131),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(magnitude_spectrum, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(img_back, cmap = 'gray')
plt.title('Image inverted'), plt.xticks([]), plt.yticks([])
plt.show()
| [
"[email protected]"
]
| |
2d220c223d8de7d4b6a23be7c9e63a51b4fe4af8 | 05f759e98eefcb7962f3df768fc6e77192311188 | /prepro.py | e276e2d990eb9a35f4fef940fdc81b9d31ad80dc | [
"Apache-2.0"
]
| permissive | 1048693172/transformer | 17c76605beb350a7a1d6fe50a46b3fbbefb16269 | fd26fab9a4e36816223d80e8287c0b08a6d645d0 | refs/heads/master | 2020-04-15T17:44:58.943322 | 2019-01-09T15:14:40 | 2019-01-09T15:14:40 | 164,885,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | # -*- coding: utf-8 -*-
#/usr/bin/python2
'''
June 2017 by kyubyong park.
[email protected].
https://www.github.com/kyubyong/transformer
'''
from __future__ import print_function
from hyperparams import Hyperparams as hp
import tensorflow as tf
import numpy as np
import codecs
import os
import regex
from collections import Counter
def make_vocab(fpath, fname):
'''Constructs vocabulary.
Args:
fpath: A string. Input file path.
fname: A string. Output file name.
Writes vocabulary line by line to `preprocessed/fname`
'''
text = codecs.open(fpath, 'r', 'utf-8').read()
#text = regex.sub("[^\s\p{Latin}']", "", text)
words = text.split()
word2cnt = Counter(words)
if not os.path.exists('preprocessed'):
os.mkdir('preprocessed')
with codecs.open('preprocessed/{}'.format(fname), 'w', 'utf-8') as fout:
fout.write("{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n".format("<PAD>", "<UNK>", "<S>", "</S>"))
for word, cnt in word2cnt.most_common(len(word2cnt)):
fout.write(u"{}\t{}\n".format(word, cnt))
if __name__ == '__main__':
make_vocab(hp.source_train, "de.vocab.tsv")
make_vocab(hp.target_train, "en.vocab.tsv")
print("Done") | [
"[email protected]"
]
| |
7f370a2f39867e89d89ab28e23fdbd1bf78c5c33 | affb8d9028f52201dc56dff947502134dcac3066 | /class-06/demo/big_O.py | a4cb31e5067e800c86925b9dfb3be4fe661ec627 | []
| no_license | maisjamil1/amman-python-401d1 | 10aa4d81c9082fbdf18badc3de060ce1d5309e1a | 25c37a5a7c023b5a24ba7a6cc303338b62548f83 | refs/heads/master | 2022-12-28T19:23:11.143932 | 2020-10-13T11:58:30 | 2020-10-13T11:58:30 | 287,927,879 | 0 | 0 | null | 2020-08-16T11:11:27 | 2020-08-16T11:11:27 | null | UTF-8 | Python | false | false | 2,410 | py | # Measure # of operations
n = 7 #1 operation
for i in range(n):
print(i) # n operations
# n+1 operations
# n = 5 > 6
# n = 100 > 101
# n = 1000000 > 1000001
# O(n+1)
# O(n)
def testing_bigoh(n):
for i in range(n):
for j in range(n):
print(i,j) # n*n (n^2)
# testing_bigoh(8)
# O(n^2)
nums1 = [2, 5, 8, 9, 43, 7]
nums2 = [-4, 43, 7, 8, 13, 45]
# One Loop
# Return a list of all items bigger than number in unsorted list
def find_nums_above(nums_list, number):
result = [] # 1 operation
for num in nums_list: # n times
if num > number:
result.append(num) # 1 operation -- 1 extra space
elif num < number:
print("Less")
else:
print("Else")
print("Done with current iteration") # 1 operation
return result # 1 operation
print(find_nums_above(nums1, 10))
# O(2*n+1+1) => O(2n+2)
# O(n)
# O(n) spaces
def find_nums_above_loop_inside(nums_list, number):
result = [] # 1 operation
for num in nums_list: # n times
if num > number:
result.append(num) # 1 operation
elif num < number:
print("Less") # 1 op
for j in range(len(nums_list)): # n times
print("Just for fun") # 1 op
else:
print("Else") # 1 op
print("Done with current iteration") # 1 operation
return result # 1 operation
# O(1 + n (1+ (1 or 1+n or 1) ) + 1)
# O(1 + n (1+ 1+n) + 1)
# O(1 + n(2+n) +1)
# O(2 + 2n^2)
# O(2n^2)
# O(n^2)
print(find_nums_above_loop_inside(nums1, 10))
def tricky_example(a):
print("Hi") # 1 op
print (3*4*6/2) # 1 op
a.sort() # Hidden loop (n*log(n)) -- Merge sort
print(a) # 1 op
print("The end") # 1 op
# O(4 + sort-big-oh)
# O(sort-big-oh)
a = [4,7,2,9,5,0,3]
# Binary Search
# O(log n)
# We divide the array into two halfes and we elimate one of them
sorted_list = [-1, 4, 6, 9, 23, 30, 45, 65, 76, 77, 90]
def binary_search(sorted_nums, target):
min = 0 # 1 space
max = len(sorted_nums)-1 # 1 space
while max>min:
pivot = (max+min)//2 # 1 space
print(max, min, pivot)
if target == sorted_nums[pivot]:
return pivot
elif target < sorted_nums[pivot]:
max = pivot-1
else:
min = pivot+1
return -1
print(binary_search(sorted_list, -1))
# O(3) spaces
# O(1)
# O(3*log n ) spaces
# O(log n)
def fib(i):
# base cases
return fib(i-1) + fib(i-2)
# fib(4) = fib(3) + fib(2)
# We recreate i variable in every recursive call
| [
"[email protected]"
]
| |
729aafbd622a90e8bebf023ef2424d3fcf61b70c | afea9757be324c8def68955a12be11d71ce6ad35 | /willyanealves/services/migrations/0014_auto_20201209_1623.py | aa5563d97e9d3dbc154b4da10bedc96ae1265e5e | []
| no_license | bergpb/willyane-alves | c713cac3ec3a68005f3b8145985693d2477ba706 | 8b2b9922ba35bf2043f2345228f03d80dbd01098 | refs/heads/master | 2023-02-10T19:57:50.893172 | 2021-01-11T16:17:14 | 2021-01-11T16:17:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # Generated by Django 3.1.2 on 2020-12-09 19:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('stock', '0001_initial'),
('services', '0013_remove_kititem_price'),
]
operations = [
migrations.AlterField(
model_name='kititem',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stockitem', to='stock.stock'),
),
]
| [
"[email protected]"
]
| |
041cf40053b8f029ba5b1f64754d2048cbb70f5e | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res_bw/scripts/common/lib/idlelib/grepdialog.py | 05f4b74a7d37f75455c785428aa681b07d431a4b | []
| no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,154 | py | # 2015.11.10 21:36:11 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/idlelib/GrepDialog.py
import os
import fnmatch
import sys
from Tkinter import *
from idlelib import SearchEngine
from idlelib.SearchDialogBase import SearchDialogBase
def grep(text, io = None, flist = None):
root = text._root()
engine = SearchEngine.get(root)
if not hasattr(engine, '_grepdialog'):
engine._grepdialog = GrepDialog(root, engine, flist)
dialog = engine._grepdialog
searchphrase = text.get('sel.first', 'sel.last')
dialog.open(text, searchphrase, io)
class GrepDialog(SearchDialogBase):
title = 'Find in Files Dialog'
icon = 'Grep'
needwrapbutton = 0
def __init__(self, root, engine, flist):
SearchDialogBase.__init__(self, root, engine)
self.flist = flist
self.globvar = StringVar(root)
self.recvar = BooleanVar(root)
def open(self, text, searchphrase, io = None):
SearchDialogBase.open(self, text, searchphrase)
if io:
path = io.filename or ''
else:
path = ''
dir, base = os.path.split(path)
head, tail = os.path.splitext(base)
if not tail:
tail = '.py'
self.globvar.set(os.path.join(dir, '*' + tail))
def create_entries(self):
SearchDialogBase.create_entries(self)
self.globent = self.make_entry('In files:', self.globvar)
def create_other_buttons(self):
f = self.make_frame()
btn = Checkbutton(f, anchor='w', variable=self.recvar, text='Recurse down subdirectories')
btn.pack(side='top', fill='both')
btn.select()
def create_command_buttons(self):
SearchDialogBase.create_command_buttons(self)
self.make_button('Search Files', self.default_command, 1)
def default_command(self, event = None):
prog = self.engine.getprog()
if not prog:
return
path = self.globvar.get()
if not path:
self.top.bell()
return
from idlelib.OutputWindow import OutputWindow
save = sys.stdout
try:
sys.stdout = OutputWindow(self.flist)
self.grep_it(prog, path)
finally:
sys.stdout = save
def grep_it(self, prog, path):
dir, base = os.path.split(path)
list = self.findfiles(dir, base, self.recvar.get())
list.sort()
self.close()
pat = self.engine.getpat()
print 'Searching %r in %s ...' % (pat, path)
hits = 0
for fn in list:
try:
with open(fn) as f:
for lineno, line in enumerate(f, 1):
if line[-1:] == '\n':
line = line[:-1]
if prog.search(line):
sys.stdout.write('%s: %s: %s\n' % (fn, lineno, line))
hits += 1
except IOError as msg:
print msg
print 'Hits found: %s\n(Hint: right-click to open locations.)' % hits if hits else 'No hits.'
def findfiles(self, dir, base, rec):
try:
names = os.listdir(dir or os.curdir)
except os.error as msg:
print msg
return []
list = []
subdirs = []
for name in names:
fn = os.path.join(dir, name)
if os.path.isdir(fn):
subdirs.append(fn)
elif fnmatch.fnmatch(name, base):
list.append(fn)
if rec:
for subdir in subdirs:
list.extend(self.findfiles(subdir, base, rec))
return list
def close(self, event = None):
if self.top:
self.top.grab_release()
self.top.withdraw()
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_grep', verbosity=2, exit=False)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\idlelib\grepdialog.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:36:11 Střední Evropa (běžný čas)
| [
"[email protected]"
]
| |
5aa4ab44e8db688f1fcc7c5792a3d330f805cc4b | e214193fdbc342ce1b84ad4f35bd6d64de7a8767 | /bsn/common/tcp_server.py | af016b7a5eaa3c469a037f78f5023f18cf39703e | []
| no_license | bsn069/py | 78f791dab87c3246a1a173263a703c63c543c8ad | 3b6c2070d38f61eb8511495d38b1cec522ad6de7 | refs/heads/master | 2020-03-10T04:30:00.282303 | 2018-10-07T15:29:45 | 2018-10-07T15:29:45 | 129,193,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from bsn.common import file_import_tree
file_import_tree.file_begin(__name__)
import asyncio
from bsn.common.ip_port import CIPPort
from bsn.common.ip import CIP
from bsn.common.port import CPort
from bsn.common import err
import logging
import enum
from bsn.common import tcp_accept
class EState(enum.Enum):
Null = 0
ParseIPPort = 1
Listened = 2
class CTCPServer(tcp_accept.CTCPAccept):
def __init__(self, loop):
logging.info("{}".format(self))
super().__init__(loop)
self._EStateCTCPServer = EState.Null
async def _parse_ip_port(self):
logging.info("{}".format(self))
self._CIP = CIP('0.0.0.0')
self._CPort = CPort(10001)
await asyncio.sleep(1)
async def _run(self):
logging.info("{}".format(self))
await asyncio.sleep(10)
async def run(self):
logging.info("{}".format(self))
if self._EStateCTCPServer != EState.Null:
raise err.ErrState(self._EStateCTCPServer)
try:
await self._parse_ip_port()
self._EStateCTCPServer = EState.ParseIPPort
await self.start_listen()
self._EStateCTCPServer = EState.Listened
await self._run()
logging.info("{} run end".format(self))
except Exception as e:
logging.error(e)
if self._EStateCTCPServer.value > EState.Listened.value:
await self.stop_listen()
if self._EStateCTCPServer.value > EState.ParseIPPort.value:
self._CIP = None
self._CPort = None
self._EStateCTCPServer = EState.Null
@property
def estate_tcp_server(self):
return self._EStateCTCPServer
file_import_tree.file_end(__name__)
| [
"[email protected]"
]
| |
489be89dfb47f43097ad446f460e1cbd05328464 | 2cfe527e8a5d9c44aa0f83574b1016ec35755446 | /PyFunnels/PyF_theharvester.py | 4b3c10eeaa1b0b57eb4a4a85d46a07744ac7e1e2 | [
"MIT"
]
| permissive | polling-repo-continua/PyFunnels | e3d7a6a89d0369914f5b7ca160c16ea9ebe025c6 | f8089c3c39248eb1ef97f2681c43f76f55a07900 | refs/heads/master | 2022-02-14T12:07:09.866528 | 2019-08-13T17:52:07 | 2019-08-13T17:52:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | import xml.etree.ElementTree as ET
class PyFtheHarvester:
CAPABILITIES = ['domains', 'ips', 'emails']
def __init__(self,
file,
list_domains = [],
list_ips = [],
list_emails = []
):
self.file = file
self.list_domains = list_domains
self.list_ips = list_ips
self.list_emails = list_emails
self.tree = ET.parse(self.file)
self.root = self.tree.getroot()
def domains(self):
for d in self.root.findall('host'):
domain = d.find('hostname').text
if domain not in self.list_domains:
self.list_domains.append(domain)
def ips(self):
for i in self.root.findall('host'):
ip = i.find('ip').text
if ip not in self.list_ips:
self.list_ips.append(ip)
def emails(self):
for e in self.root.findall('email'):
email = e.text
if email not in self.list_emails:
self.list_emails.append(email) | [
"[email protected]"
]
| |
097abd80763e1b42f4d5a68c9500b5438fdc4a1e | 748cbfda91c8088c8feac93f0dac884a0d334e1c | /jaspar.py | 836b84189a87fefd25d408829b74dd56eea8d398 | []
| no_license | jlwetzel/zfcode | e2aca0b8661079734953cb3d1a9970e2939e1584 | 52e6fba51dbe74d5da9871cbaf28dbc24b7ccad7 | refs/heads/master | 2020-06-30T02:54:17.754259 | 2013-07-30T23:57:27 | 2013-07-30T23:57:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,003 | py | # Code for reteiving and maniupulating the JASPAR sql_table files
# and the JASPAR PWM file.
import os
JASPAR_BUILD = '2009-Oct12-NonRedundant'
prefix = '../data/JASPAR/' + JASPAR_BUILD
protTab = prefix + '/sql_tables/MATRIX_PROTEIN.txt'
annotTab = prefix + '/sql_tables/MATRIX_ANNOTATION.txt'
speciesTab = prefix + '/sql_tables/MATRIX_SPECIES.txt'
matrixTab = prefix + '/sql_tables/MATRIX.txt'
PWMfile = prefix + '/matrix_only.txt'
def getNewBuild():
# Get the latest build of the complete JASPAR CORE set.
# First set up directory structure in ../data/JASPAR/
JASPAR_HTML_PREFIX = "http://jaspar.genereg.net//" + \
"html/DOWNLOAD/jaspar_CORE/non_redundant/all_species/"
sqlTables = ["MATRIX.txt", "MATRIX_ANNOTATION.txt", "MATRIX_DATA.txt",
"MATRIX_PROTEIN.txt", "MATRIX_SPECIES.txt"]
os.mkdir("../data/JASPAR/" + JASPAR_BUILD)
os.mkdir("../data/JASPAR/" + JASPAR_BUILD + "/sql_tables")
for tab in sqlTables:
os.system("wget -P " + prefix + "/sql_tables/ " +
JASPAR_HTML_PREFIX + "/sql_tables/" + tab)
os.system("wget -P " + prefix + " " + JASPAR_HTML_PREFIX
+ "matrix_only/matrix_only.txt")
def getIDsByAnnot(annot, currentList = None):
# Returns a list of JASPAR unique IDs that are are
# labelled by the annots. annots is tuple (key, value)
if currentList == None:
ids = set()
else:
ids = set(currentList)
annotFile = open(annotTab, 'r')
for line in annotFile:
sp_line = line.strip().split('\t')
if len(sp_line) < 3:
continue
key = sp_line[1]
val = sp_line[2]
if key == annot[0] and val == annot[1]:
ids.add(sp_line[0])
annotFile.close()
ids = list(ids)
ids = [int(i) for i in ids]
return sorted(list(ids))
def JASPARIDs2proteinIDs(JASPARids):
# Takes a sorted list of JASPAR IDs and
# returns a list of the corresponding protein IDs
protFile = open(protTab, 'r')
i = 0
proteinIDs = []
for line in protFile:
sp_line = line.strip().split()
if int(sp_line[0]) == JASPARids[i]:
proteinIDs.append(sp_line[1])
i += 1
if i == len(JASPARids):
break
protFile.close()
return proteinIDs
def getAnnotsByJASPARid(JASPARids, label):
# Finds the annotation associated with the JasparID
# and label for each ID in the ***SORTED***
# list of sorted JASPARids
annotFile = open(annotTab, 'r')
i = 0
vals = []
for line in annotFile:
if len(line) != 0:
sp_line = line.strip().split('\t')
if int(sp_line[0]) > JASPARids[i]:
print "No label: %s for JASPAR id %d" %(label, JASPARids[i])
i += 1
if i == len(JASPARids):
break
if int(sp_line[0]) == JASPARids[i] and sp_line[1] == label:
vals.append(sp_line[2])
i += 1
if i == len(JASPARids):
break
annotFile.close()
return vals
def main():
#getNewBuild()
JASPARids = getIDsByAnnot(('family', 'BetaBetaAlpha-zinc finger'))
print JASPARids
x = getAnnotsByJASPARid(JASPARids, "family")
#protIDs = JASPARIDs2proteinIDs(JASPARids)
#print(len(protIDs))
for t in x:
print t
if __name__ == '__main__':
main() | [
"[email protected]"
]
| |
c91563eee6c60960746a34671256bdc380a91e08 | af3ec207381de315f4cb6dddba727d16d42d6c57 | /dialogue-engine/test/programytest/storage/stores/nosql/mongo/store/test_sets.py | b4a1ce00829727f91194650b0127c7d2bb059299 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | mcf-yuichi/cotoba-agent-oss | 02a5554fe81ce21517f33229101013b6487f5404 | ce60833915f484c4cbdc54b4b8222d64be4b6c0d | refs/heads/master | 2023-01-12T20:07:34.364188 | 2020-11-11T00:55:16 | 2020-11-11T00:55:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,711 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programytest.storage.asserts.store.assert_sets import SetStoreAsserts
from programy.storage.stores.nosql.mongo.store.sets import MongoSetsStore
from programy.storage.stores.nosql.mongo.engine import MongoStorageEngine
from programy.storage.stores.nosql.mongo.config import MongoStorageConfiguration
import programytest.storage.engines as Engines
class MongoSetsStoreTests(SetStoreAsserts):
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_initialise(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_set_storage(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_set_storage(store)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_from_text(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_from_text(store)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_from_text_file(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_from_text_file(store)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_text_files_from_directory_no_subdir(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_text_files_from_directory_no_subdir(store)
@unittest.skip("CSV not supported yet")
def test_upload_from_csv_file(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_from_csv_file(store)
@unittest.skip("CSV not supported yet")
def test_upload_csv_files_from_directory_with_subdir(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_csv_files_from_directory_with_subdir(store)
| [
"[email protected]"
]
| |
e7b6ed30d1d3b6ae95bd07204d6d545021943528 | a3ffecad8d176142f0f9b7504503365b8e64bd69 | /turtle2/n2.py | 2bd41ffc2fb2ecbcdad4ab57df34e1a505316357 | []
| no_license | dumb-anchovy/mipt_python_1sem | 517a497d879be1f32530c023af2a9481430c024f | 76d4f378ff74345ac3107d42ce16a68cc5d2e46f | refs/heads/main | 2023-08-27T16:48:18.210559 | 2021-11-02T11:25:17 | 2021-11-02T11:25:17 | 410,534,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | import turtle as t
a0 = [0, 0, 40, 0, 0, -80, -40, 0, 0, 80, 0, 0]
a1 = [0, -40, 40, 40, 0, -80, -40, 80]
a2 = [0, 0, 40, 0, 0, -40, -40, -40, 40, 0, -40, 80]
a3 = [0, 0, 40, 0, -40, -40, 40, 0, -40, -40, 0, 80]
a4 = [0, 0, 0, -40, 40, 0, 0, -40, 0, 80, -40, 0]
a5 = [40, 0, -40, 0, 0, -40, 40, 0, 0, -40, -40, 0, 0, 80]
a6 = [40, 0, -40, -40, 0, -40, 40, 0, 0, 40, -40, 0, 0, 40]
a7 = [0, 0, 40, 0, -40, -40, 0, -40, 0, 80]
a8 = [0, 0, 40, 0, 0, -40, -40, 0, 0, -40, 40, 0, 0, 40, -40, 0, 0, 40, 0, 0]
a9 = [0, -80, 40, 40, 0, 40, -40, 0, 0, -40, 40, 0, -40, 40]
al = [a0, a1, a2, a3, a4, a5, a6, a7, a8, a9]
def ch(a):
x = t.xcor()
y = t.ycor()
for n in range(0, len(a), 2):
if (n == 0) or (n == len(a) - 2):
x += a[n]
y += a[n + 1]
t.penup()
t.goto(x, y)
t.pendown()
else:
x += a[n]
y += a[n + 1]
t.goto(x, y)
x = -370
y = 0
t.penup()
t.goto(x, y)
t.pendown()
#141700
k = [1, 4, 1, 7, 0, 0]
for j in k:
ch(al[j])
x = t.xcor()
y = t.ycor()
t.penup()
t.goto(x + 80, y)
t.pendown()
t.exitonclick()
| [
"[email protected]"
]
| |
93106b10ac49c4459a2aba027ec3a84d8b8ae976 | ebb4b093fe5b0f1d806ad5b566d80eee0a63148e | /citydata/parse_census.py | a653771485315e3e460da679736118e3599a332a | []
| no_license | kris-samala/LBSN | b8d8e8660afed0e67870ba31ee9532dde72d3f4e | 9005e66df22cb2fb7ff8da64baddbb399e8c975c | refs/heads/master | 2020-12-24T13:36:00.649302 | 2012-06-07T06:27:10 | 2012-06-07T06:27:10 | 3,608,883 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import sys
import fileinput
import pickle
#python parse_census.py [state_abbr] [census_raw] census.out
out = open(sys.argv[3], 'wb')
census = {}
state_abb = {}
if len(sys.argv) < 4:
print "Filename required."
else:
for line in fileinput.input(sys.argv[1]):
line = line.split(',')
state_abb[line[0]] = line[1]
for line in fileinput.input(sys.argv[2]):
line = line.split(',')
city = line[2].lstrip('"')
city = city.replace('city','').replace('village','').replace('CDP','').replace('town','').replace('municipality','').replace('zona urbana','').rstrip()
state = line[3].lstrip().rstrip('"')
state = state_abb[state].rstrip()
pop = line[4]
loc = city + "," + state
census[loc] = int(pop)
for l in census:
out.write(l + " = " + str(census[l]) + "\n")
out.close()
pickle.dump(census, open('census.p', 'wb'))
| [
"[email protected]"
]
| |
ed2d0c4a5ef120704b2f17e4f84a75fac344740a | 6c53b41340bcacd08b2cbb01214f20beab27fe6b | /env/bin/symilar | db333a323b6132593cc12b208bbc4246dfe55f95 | [
"MIT"
]
| permissive | daydroidmuchiri/News-Highlight | 8a9e71ed284622d78a1bdff0e1d4fc26bc999c89 | ab7a9ea7bd29c8ca37e8f923af310999fd4cecde | refs/heads/master | 2021-06-24T17:32:37.913494 | 2019-10-22T07:41:03 | 2019-10-22T07:41:03 | 214,965,319 | 0 | 0 | null | 2021-03-20T02:00:37 | 2019-10-14T06:31:26 | Python | UTF-8 | Python | false | false | 275 | #!/home/daniel/Desktop/python/projects/co/news-highlight/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"[email protected]"
]
| ||
b8d0505385eafd32d112698612fb8469b9c0428a | f06336d8ffcc6028679dff525c60f056f0436e92 | /ChatAj/ChatAj/asgi.py | c257802471be095579816096409cebce83be6805 | []
| no_license | AngelJadan/Primeras-Practicas-con-django | 440fc7efe9c6e49274294765714703ab1c936f8c | fe89c57b16eddb5dcce131212ac5dc3035f41303 | refs/heads/main | 2023-08-19T16:32:19.854395 | 2021-10-14T19:54:51 | 2021-10-14T19:54:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
ASGI config for ChatAj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ChatAj.settings')
application = get_asgi_application()
| [
"[email protected]"
]
| |
94a4be74fbb2627846ed9d68f324dbf2a692b318 | f749d098555c7be9e1693aab470ed260779baf1e | /函数/main.py | c6d78391b84e124ffaa1df7dcb2585cd93198ef4 | []
| no_license | es716/study-Python | 77ece1828aec0d383c5376eddcf4b7bc593c53c1 | 3a7879e23468f981801ee4428583e0cd13848b08 | refs/heads/master | 2021-01-11T00:20:56.205252 | 2016-10-11T04:08:05 | 2016-10-11T04:08:05 | 70,545,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#==============================================
from test import my_abs,power,add_end,calc,person,person1,person2
names = [1, 2, 3]
print(my_abs(-90))
print (power(25, 5))
print (power(25))
print (add_end())
print (add_end())
print (calc(1,2,3))
print (calc())
print (calc(*names))
person('es',16)
person('es',16,country='China')
person('es',16,country='China',city='Beijing')
person1('es',16,city='Beijing',job = 'studence')
#person1('Jack', 24, 'Beijing', 'Engineer')
person2('es',16,city='Beijing',job = 'studence')
person2('es',16)
'''
获取网页
import urllib.request
def getHtml(url):
page = urllib.request.urlopen(url)
html = page.read()
return html.decode('UTF-8')
html = getHtml("https://movie.douban.com/")
print (html)
'''
| [
"[email protected]"
]
| |
6db8075e420794f1f1c675a8d8c85b56937e0f2f | 3240f07f724583313f154ca52d617447a582fa60 | /python-village/conditions-and-loops/ini4.py | 2c52ceaed225258b96b4f4581e6e097d3ae319fa | [
"MIT"
]
| permissive | uabua/rosalind | 65b7bf312a1d826e6863ff84f61b43a7c7b3d8bb | 37b3b1a1ef2a245f979ce1c2f08c4d7535d38195 | refs/heads/master | 2021-08-18T22:41:28.360916 | 2021-02-20T19:21:32 | 2021-02-20T19:21:32 | 245,150,385 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | """
ID: INI4
Title: Conditions and Loops
URL: http://rosalind.info/problems/ini4/
"""
def sum_of_odd_integers(start, end):
"""
Counts the sum of all odd integers from start through end, inclusively.
Args:
start (int): starting number in range.
end (int): ending number in range.
Returns:
int: the sum of all odd integers from start through end, inclusively.
"""
if start % 2 == 0:
start += 1
sum_of_numbers = 0
for number in range(start, end+1, 2):
sum_of_numbers += number
return sum_of_numbers
| [
"[email protected]"
]
| |
faf52cb7ec3df25b917e3e95f90c424dd7835be9 | 6c516e6bfb610209b82fd5b97b8cc56613d46813 | /day8/dict01.py | cad85366580f9596c59e34bb69c6e77cbbc0226a | []
| no_license | Eric-cv/QF_Python | 7497d1629d24b78aad141d42de5a28b00da207a4 | 8832faaf63e6fbaaeb2d50befa53d86547c31042 | refs/heads/master | 2022-06-16T21:34:48.077318 | 2020-05-11T11:57:03 | 2020-05-11T11:57:03 | 263,021,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | # dictionary 字典
'''
应用:
貂蝉 --- ['屠龙刀','手榴弹'] 800
诸葛亮 --- ['鹅毛扇','碧血剑','98k枪'] 300
字典:
特点:
1.符号:{}
2.关键字:dict
3.保存的元素的:key:value 键值对
列表 元组 字典
[] () {}
list tuple dict
ele ele key:value #element 元素
'''
# 定义
dict1 = {} # 空字典
dict2 = dict() # 空字典 list=list() 空列表 tuple=tuple() 空元组
dict3 = {'ID':'220821199601010018','name':'Eric','age':18}
dict4 = dict([('name','Eric'),('age',18)]) # 'name':'Eric','age':18
dict5 = dict([(1,2,3),(4,5),(6,8),(9,0)]) # three too much , two expected
# 注意:list可以转成字典 但是前提:列表中的元素都要成对出现
# 字典的增删改查
# 增加:格式:dict[key]=value
# 特点:按照上面的格式,如果字典中存在同名的key,则发生值的覆盖
# 如果没有同名的key,则实现添加的功能(key:value添加到字典中)
dict6 = {}
# 格式:dict6[key] = value
dict6['brand']='huawei'
print(dict6) #{'brand':'huawei'}
dict6['brand']='mi'
dict6['type']='p30 pro'
dict6['price']=9000
dict6['color']='黑色'
print(dict6)
'''
案例:
用户注册功能
username
password
email
phone
'''
| [
"[email protected]"
]
| |
0800aa7da0792d7332654f3ccb4b3ad85fd99712 | 59216c8fa10e1b35b6defecd0d103cb29413a4b3 | /stupyde/fixes/utime.py | 770d09f1dd24cec15d64c648ddf3db69ee16d05b | [
"MIT"
]
| permissive | pmp-p/stupyde | d0ca43e1ea5dbec6ce074afc301df6f40985d2e3 | 725bfc790999589c20fb2eea5dc75e03fc5d7ff4 | refs/heads/master | 2021-07-18T03:44:13.272593 | 2020-05-10T04:56:57 | 2020-05-10T04:56:57 | 152,827,405 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | import time as _time
MICROPY_PY_UTIME_TICKS_PERIOD = 2**30
if sys.version_info[0:2]>(3,7):
_PASSTHRU = ("time", "sleep", "process_time", "localtime")
def clock():
return _time.process_time()
else:
_PASSTHRU = ("time", "sleep", "clock", "localtime")
for f in _PASSTHRU:
globals()[f] = getattr(_time, f)
def sleep_ms(t):
_time.sleep(t / 1000)
def sleep_us(t):
_time.sleep(t / 1000000)
def ticks_ms():
return int(_time.time() * 1000) & (MICROPY_PY_UTIME_TICKS_PERIOD - 1)
def ticks_us():
return int(_time.time() * 1000000) & (MICROPY_PY_UTIME_TICKS_PERIOD - 1)
ticks_cpu = ticks_us
def ticks_add(t, delta):
return (t + delta) & (MICROPY_PY_UTIME_TICKS_PERIOD - 1)
def ticks_diff(a, b):
return ((a - b + MICROPY_PY_UTIME_TICKS_PERIOD // 2) & (MICROPY_PY_UTIME_TICKS_PERIOD - 1)) - MICROPY_PY_UTIME_TICKS_PERIOD // 2
del f
| [
"[email protected]"
]
| |
60aab1d320ca746684132493414659925b08ba03 | e916c49c5fa662e54c9d9e07226bc2cd973d2bf1 | /ucf11/mobilenet_twostream2_max.py | a3016608854db500c3c5ee8969cc9ce7ca2bf52f | []
| no_license | Zumbalamambo/cnn-1 | 7111a3ff70344a9c118971f22539fedaffc394fb | 0cc6ef095f5b03152696a75f44109cb67d62cd0e | refs/heads/master | 2020-03-14T04:07:22.213824 | 2018-03-30T09:02:51 | 2018-03-30T09:02:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,298 | py | import keras
import sys
from keras.models import Model
from keras.layers import Dense, Conv2D, Activation, Reshape, Flatten, Input, ZeroPadding2D, Maximum
import get_data as gd
from keras import optimizers
import pickle
import random
import numpy as np
import config
# train: python mobilenet_two_stream.py train 32 1 101 0 0
# test: python mobilenet_two_stream.py test 32 1 101
# retrain: python mobilenet_two_stream.py retrain 32 1 101 1
if sys.argv[1] == 'train':
train = True
retrain = False
old_epochs = 0
spa_epochs = int(sys.argv[5])
tem_epochs = int(sys.argv[6])
elif sys.argv[1] == 'retrain':
train = True
retrain = True
old_epochs = int(sys.argv[5])
else:
train = False
retrain = False
opt_size = 2
batch_size = int(sys.argv[2])
epochs = int(sys.argv[3])
classes = int(sys.argv[4])
depth = 20
input_shape = (224,224,depth)
server = config.server()
if server:
if train:
out_file = '/home/oanhnt/thainh/data/database/train-opt2.pickle'
else:
out_file = '/home/oanhnt/thainh/data/database/test-opt2.pickle'
valid_file = r'/home/oanhnt/thainh/data/database/valid-opt2.pickle'
else:
if train:
out_file = '/mnt/smalldata/database/train-opt2.pickle'
else:
out_file = '/mnt/smalldata/database/test-opt2.pickle'
# two_stream
model = keras.applications.mobilenet.MobileNet(
include_top=True,
dropout=0.5
)
# Disassemble layers
layers = [l for l in model.layers]
input_opt = Input(shape=input_shape)
x = ZeroPadding2D(padding=(1, 1), name='conv1_padx')(input_opt)
x = Conv2D(filters=32,
kernel_size=(3, 3),
padding='valid',
use_bias=False,
strides=(2,2),
name='conv_new')(x)
for i in range(3, len(layers)-3):
layers[i].name = str(i)
x = layers[i](x)
x = Flatten()(x)
x = Dense(classes, activation='softmax')(x)
temporal_model = Model(inputs=input_opt, outputs=x)
if train & (not retrain):
temporal_model.load_weights('weights/mobilenet_temporal22_{}e.h5'.format(tem_epochs))
# Spatial
model2 = keras.applications.mobilenet.MobileNet(
include_top=True,
input_shape=(224,224,3),
dropout=0.5
)
y = Flatten()(model2.layers[-4].output)
y = Dense(classes, activation='softmax')(y)
spatial_model = Model(inputs=model2.input, outputs=y)
if train & (not retrain):
spatial_model.load_weights('weights/mobilenet_spatial2_{}e.h5'.format(spa_epochs))
# Fusion
z = Maximum()([y, x])
# Final touch
result_model = Model(inputs=[model2.input,input_opt], outputs=z)
# Run
result_model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-3, momentum=0.9),
metrics=['accuracy'])
if train:
if retrain:
result_model.load_weights('weights/mobilenet_twostream2_max_{}e.h5'.format(old_epochs))
with open(out_file,'rb') as f1:
keys = pickle.load(f1)
len_samples = len(keys)
if server:
with open(valid_file,'rb') as f2:
keys_valid = pickle.load(f2)
len_valid = len(keys_valid)
print('-'*40)
print 'MobileNet Optical #{} stream only: Training'.format(opt_size)
print('-'*40)
print 'Number samples: {}'.format(len_samples)
if server:
print 'Number valid: {}'.format(len_valid)
histories = []
for e in range(epochs):
print('-'*40)
print('Epoch', e+1)
print('-'*40)
random.shuffle(keys)
if server:
history = result_model.fit_generator(
gd.getTrainData(keys,batch_size,classes,5,'train'),
verbose=1,
max_queue_size=2,
steps_per_epoch=len_samples/batch_size,
epochs=1,
validation_data=gd.getTrainData(keys_valid,batch_size,classes,5,'valid'),
validation_steps=len_valid/batch_size
)
histories.append([
history.history['acc'],
history.history['val_acc'],
history.history['loss'],
history.history['val_loss']
])
else:
history = result_model.fit_generator(
gd.getTrainData(keys,batch_size,classes,5,'train'),
verbose=1,
max_queue_size=2,
steps_per_epoch=3,
epochs=1
)
histories.append([
history.history['acc'],
history.history['loss']
])
result_model.save_weights('weights/mobilenet_twostream2_max_{}e.h5'.format(old_epochs+1+e))
print histories
with open('data/trainHistoryTwoStreamMax2{}_{}_{}e'.format(2, old_epochs, epochs), 'wb') as file_pi:
pickle.dump(histories, file_pi)
else:
result_model.load_weights('weights/mobilenet_twostream2_max_{}e.h5'.format(epochs))
with open(out_file,'rb') as f2:
keys = pickle.load(f2)
len_samples = len(keys)
print('-'*40)
print('MobileNet Optical+RGB stream: Testing')
print('-'*40)
print 'Number samples: {}'.format(len_samples)
score = result_model.evaluate_generator(gd.getTrainData(keys,batch_size,classes,5,'test'), max_queue_size=3, steps=len_samples/batch_size)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| [
"[email protected]"
]
| |
9a7a6e1d171bf14644f7c0a4335a2de37ac7d303 | cfb33f980471042e104a928a09c2e23b983b485f | /Subset/binsearch.py | a5b297922a5780e65cf3005ff88eedc028e2e2ec | []
| no_license | shilpchk/NetworkStructure | 22d819ed9bce217f04366b0f61533ef3e135848a | 5ea3126455ccfe5a8e7fc1e40fd08b9bd6f9e921 | refs/heads/master | 2021-01-19T11:09:24.447938 | 2017-04-11T12:55:52 | 2017-04-11T12:55:52 | 87,933,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | def binsearch(value, arr, N):
low=0; high=N;
while(low < high):
mid = low + int((high-low)/2);
if(arr[mid] < value):
low = mid+1;
else:
high = mid;
return low
| [
"[email protected]"
]
| |
468a0df45f7aa90ff9775925bbb385b03026e242 | e271c9699d07f4b627ac4cf71b4bb4c233af2eb1 | /packassembler/tests/base.py | 16139e28f40f950802e6ff24fb9993d5d3a3ac3e | [
"Apache-2.0"
]
| permissive | PackAssembler/PackAssembler | 379912ff59a00797a35e70ce51ac5cfc7db3b3fc | 284a5f31ec2f05f71fe30397a6da069a0a8cb45f | refs/heads/master | 2016-09-05T17:26:21.017491 | 2014-07-07T13:09:13 | 2014-07-07T13:09:13 | 11,825,806 | 0 | 1 | null | 2014-04-22T03:07:13 | 2013-08-01T19:30:34 | Python | UTF-8 | Python | false | false | 1,119 | py | from pyramid import testing
from copy import copy
class DummyRequest(testing.DummyRequest):
session = {}
def flash(self, msg):
self.session['flash'] = [msg]
def flash_error(self, msg):
self.session['error_flash'] = [msg]
class BaseTest:
def _get_test_class(self):
pass
def make_one(self, *args, **kw):
return self._get_test_class()(*args, **kw)
@classmethod
def setup_class(cls):
cls.config = testing.setUp()
cls.config.include('packassembler')
cls.config.include('pyramid_mailer.testing')
@classmethod
def teardown_class(cls):
testing.tearDown()
def authenticate(self, user):
self.config.testing_securitypolicy(userid=user.username)
def match_request(params=None, **kwargs):
return DummyRequest(matchdict=kwargs, params=params)
def create_rid(name):
return name.replace(' ', '_')
def document_to_data(doc):
data = copy(doc._data)
data['submit'] = ''
filtered = {}
for k, v in data.items():
if v is not None:
filtered[k] = v
return filtered
| [
"[email protected]"
]
| |
fce283892ba59dcf2ba42e224830b42612d88aa5 | ec3e9925af8742d578fd11aac6f000ced71aa9f5 | /crm_app/migrations/0001_initial.py | a8d2064e20aeff0443aad84487887d739acbfa32 | []
| no_license | amrit-kumar/CRM-Customer-relationship-management- | cfd3ec42a975e7b987d76abe465cb2ec9eec62b4 | d41b482166557e17825b2a010d24bb03ee469245 | refs/heads/master | 2021-06-25T06:37:51.721771 | 2017-08-12T09:43:23 | 2017-08-12T09:43:23 | 96,964,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-01-17 10:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MsgReports',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('request_id', models.CharField(blank=True, max_length=250, null=True)),
('user_id', models.CharField(blank=True, max_length=250, null=True)),
('date', models.DateTimeField(blank=True, null=True)),
('discription', models.CharField(blank=True, max_length=250, null=True)),
('number', models.BigIntegerField(blank=True, null=True)),
('sender_id', models.CharField(blank=True, max_length=250, null=True)),
('campaign_name', models.CharField(blank=True, max_length=250, null=True)),
('status', models.CharField(blank=True, choices=[('1', '1'), ('2', '2'), ('3', '3')], max_length=250, null=True)),
],
),
]
| [
"[email protected]"
]
| |
abdf9a3a6958959db50b3339852f2f49dff0d58a | cf57c29736ff6841b0024740201f7fe5dc9430da | /amiibo_comments/wsgi.py | f7000b8488316dbcb8098eccde6cf68555a4ee81 | []
| no_license | zedsousa/amiibo_comments | 9e224da4e1b48171536ba6919bf47527f940632e | 7254f4feb7cd61b38d7036e27a84b18169379abd | refs/heads/main | 2023-07-08T04:38:37.551955 | 2021-08-16T00:34:34 | 2021-08-16T00:34:34 | 396,533,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for amiibo_comments project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'amiibo_comments.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
3f23b2f1a0f51f385862ff8d5a5ea6f26947847e | b4cd6acd822dc2fbc97908aafc910e60bf597756 | /web_s/env/lib/python3.7/keyword.py | c829f53b9c895483ad9f7c5c7ba0a5b6f6bf08e7 | []
| no_license | vaibhavCodian/Stock-Prediction-Web-App | 868685786c43155ae4abcf7dd6c4590802faa168 | 54ca117150c71a2a017c0ba4b8d91324a7645a8b | refs/heads/master | 2021-02-12T14:06:53.216536 | 2020-04-28T17:50:24 | 2020-04-28T17:50:24 | 244,597,359 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | /home/vaibhav/anaconda3/lib/python3.7/keyword.py | [
"[email protected]"
]
| |
8255c837aa16fadcba7eb5b77f8cdb00c4d40c4e | 6b97237bfd9647f7a90c1d1c33b4453c07e56839 | /routingpolicy/peeringdb.py | 1785b3738e64605815f3565b2ae6a6f5cfd0589e | [
"MIT"
]
| permissive | 48ix/routingpolicy | 4e9803659daf84478e3bf41db90a8df642fb50e8 | fd3e9547a5c54bd78ee2144786f6b30fdf41d7ef | refs/heads/master | 2023-01-30T03:20:37.440933 | 2020-12-16T17:19:39 | 2020-12-16T17:19:39 | 295,359,872 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,392 | py | """Get Participant Information via the PeeringDB API."""
# Standard Library
from typing import Tuple, Sequence
# Third Party
from httpx import AsyncClient
# Project
from routingpolicy.log import log
async def max_prefixes(asn: int) -> Tuple[int, int]:
"""Search PeeringDB for an entry matching an ASN and return its max prefixes."""
prefixes = (200, 20)
async with AsyncClient(
http2=True,
verify=True,
base_url="https://peeringdb.com",
headers={"Accept": "application/json"},
) as client:
log.debug("Getting max prefixes for AS{}", str(asn))
res = await client.get("/api/net", params={"asn__contains": asn})
res.raise_for_status()
for data in res.json()["data"]:
if "asn" in data and data["asn"] == asn:
log.debug("Matched AS{} to {}", str(asn), data["name"])
log.debug(
"AS{} PeeringDB Org ID {}, last updated {}",
str(asn),
str(data["org_id"]),
data["updated"],
)
prefixes = (
data.get("info_prefixes4", 200),
data.get("info_prefixes6", 20),
)
return prefixes
async def get_as_set(asn: str) -> Sequence[str]:
"""Search PeeringDB for an entry matching an ASN and return its IRR AS_Set."""
result = []
async with AsyncClient(
http2=True,
verify=True,
base_url="https://peeringdb.com",
headers={"Accept": "application/json"},
) as client:
log.debug("Getting max prefixes for AS{}", asn)
res = await client.get("/api/net", params={"asn__contains": asn})
res.raise_for_status()
for data in res.json()["data"]:
if "asn" in data and str(data["asn"]) == asn:
log.debug("Matched AS{} to {}", str(asn), data["name"])
log.debug(
"AS{} PeeringDB Org ID {}, last updated {}",
str(asn),
str(data["org_id"]),
data["updated"],
)
as_set = data.get("irr_as_set", "")
if as_set != "":
result = as_set.split(" ")
log.debug("Found AS-Set(s) {} for {}", result, data["name"])
break
return result
| [
"[email protected]"
]
| |
0a32d2b6c410aca949535c18a0afdc1811fa82de | d77cee829ec56d2ef12446bf1ebc75cf3a1d8de8 | /src/confluence/urls.py | 11ca30b6e7eba5d7d393b109c004ba297c8ac408 | [
"MIT"
]
| permissive | thisisayush/Confluence | 6a508fdd96aebf38a9d063760fed7709c1a968f5 | a7e7b3b4d45ae9577f44d112c7383e4e101f3dd6 | refs/heads/master | 2021-04-15T08:02:05.097647 | 2017-03-02T19:15:49 | 2017-03-02T19:15:49 | 94,565,851 | 0 | 0 | null | 2017-06-16T17:15:55 | 2017-06-16T17:15:55 | null | UTF-8 | Python | false | false | 946 | py | """confluence URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
| [
"[email protected]"
]
| |
f1ea3f92b383d226f7bc949e68085f65407309e1 | 97fd76dd9f2fd29a6418e26b8f8d21f330b58a9c | /tests/test_main.py | 2025733d3b9de2147a4a03c1f3d06c4a418c6312 | [
"MIT"
]
| permissive | sudeep611/nepserate | d53d39ca4c204508621379787ac8d4cbb40a68d3 | b7247839ed1675eeaecf6cac4124507a35f6c8d3 | refs/heads/master | 2021-05-29T15:35:24.690452 | 2014-10-31T16:56:06 | 2014-10-31T16:56:06 | 25,625,531 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | # Test Script
# If this test pass then everything should work fine
from nepserate import ScrapeRate
import unittest
class TestScrapeRate(unittest.TestCase):
def test_result(self):
ns = ScrapeRate()
# Check if the return type is list
self.assertEqual(type(ns.getRate("ADBL")), type([]))
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
]
| |
c941709fbed0b9fa452dac0e4e3ea4916d99de51 | 3b630e8ffae16049b09ea90b3d4af4e2c7b9483b | /firstphy.py | 35ea0b20e4778b407114c119c477c625d43f2d8e | []
| no_license | shafifx/myhub | fe91a2d46c0ba7f7d58057e1d05aecc067989fc9 | a3939fe4743a80535af1334f1f7fc78f28482745 | refs/heads/main | 2023-06-06T22:34:09.271540 | 2021-07-08T16:17:53 | 2021-07-08T16:17:53 | 383,184,433 | 0 | 0 | null | 2021-07-08T16:17:53 | 2021-07-05T15:21:38 | Python | UTF-8 | Python | false | false | 43 | py | hry pythonhttps://github.com/shafifx/myhub
| [
"[email protected]"
]
| |
7c1cf7586a7cc9ff5c8a7ecd4890b9115290f894 | f578bf168e4f8df91007bae7a2352a31cd98d375 | /CraftProtocol/Protocol/v1_8/Packet/Play/ConfirmTransactionClientPacket.py | 784d4f7628593a4131223067b1b3c32efe08486b | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | Toranktto/CraftProtocol | 97a4e4f408e210494f6acbec0f30c477bb55f8fa | a6f4a67756c3868820ab76df5e148d76b020d990 | refs/heads/master | 2021-07-18T11:04:13.432733 | 2018-09-09T17:23:51 | 2018-09-09T17:23:51 | 144,491,218 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | #!/usr/bin/env python
from CraftProtocol.Protocol.Packet.BasePacket import BasePacket
from CraftProtocol.Protocol.Packet.PacketDirection import PacketDirection
from CraftProtocol.StreamIO import StreamIO
class ConfirmTransactionClientPacket(BasePacket):
PACKET_ID = 0x32
PACKET_DIRECTION = PacketDirection.CLIENTBOUND
def __init__(self, window_id, transaction_id, accepted):
BasePacket.__init__(self)
self._window_id = int(window_id)
self._transaction_id = int(transaction_id)
self._accepted = bool(accepted)
def get_window_id(self):
return self._window_id
def set_window_id(self, window_id):
self._window_id = int(window_id)
def get_transaction_id(self):
return self._transaction_id
def set_transaction_id(self, transaction_id):
self._transaction_id = int(transaction_id)
def is_accepted(self):
return self._accepted
def set_accepted(self, accepted):
self._accepted = bool(accepted)
@staticmethod
def write(stream, packet):
StreamIO.write_byte(stream, packet.get_window_id())
StreamIO.write_short(stream, packet.get_transaction_id())
StreamIO.write_bool(stream, packet.is_accepted())
@staticmethod
def read(stream, packet_size):
window_id = StreamIO.read_byte(stream)
transaction_id = StreamIO.read_short(stream)
accepted = StreamIO.read_bool(stream)
return ConfirmTransactionClientPacket(window_id, transaction_id, accepted)
| [
"[email protected]"
]
| |
61d903d2962755912aa8dac21eaa78b97774cbf8 | 5b473f7876104de55b0ac19616c9ef0976c2f224 | /cals/NoiseDiode/ND_atten_fit.py | ab7a122198042084db22e7ce4274e87374b7e8a6 | []
| no_license | SDRAST/Receivers_WBDC | 80f3c2481fb09b9875b9ecd1687a4cc194ab9005 | 3e1d49d3daf361608331fcdf3c6c3e41b4ad9de9 | refs/heads/master | 2022-12-23T20:21:22.281484 | 2020-09-30T23:00:19 | 2020-09-30T23:00:19 | 291,369,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | # -*- coding: utf-8 -*-
"""
These data are from the calibration log for Fri Apr 8 16:03:55 2011
Add ctrl_voltage as a method to ND. ND should probably be raised to a class
"""
import numpy as NP
from pylab import *
import scipy
def ctrl_voltage(ND):
coefs = array([ 3.85013993e-18, -6.61616152e-15, 4.62228606e-12,
-1.68733555e-09, 3.43138077e-07, -3.82875899e-05,
2.20822016e-03, -8.38473034e-02, 1.52678586e+00])
return scipy.polyval(coefs,ND)
data = NP.array([
[-6.00, -28.716],
[-5.75, -28.732],
[-5.50, -28.757],
[-5.25, -28.797],
[-5.00, -28.851],
[-4.75, -28.928],
[-4.50, -29.035],
[-4.25, -29.179],
[-4.00, -29.355],
[-3.75, -29.555],
[-3.50, -29.775],
[-3.25, -29.992],
[-3.00, -30.189],
[-2.75, -30.378],
[-2.50, -30.548],
[-2.25, -30.691],
[-2.00, -30.822],
[-1.75, -30.926],
[-1.50, -31.028],
[-1.25, -31.109],
[-1.00, -31.206],
[-0.75, -31.296],
[-0.50, -31.388],
[-0.25, -31.498],
[ 0.00, -31.612],
[ 0.25, -31.747],
[ 0.50, -31.880],
[ 0.75, -31.995],
[ 1.00, -32.078],
[ 1.25, -32.116],
[ 1.50, -32.136],
[ 1.75, -32.144]])
ctrlV = data[:,0]
pwr_dB = data[:,1]
pwr_W = pow(10.,pwr_dB/10)
min_pwr = pwr_W.min()
max_pwr = pwr_W.max()
gain = 320/min_pwr
TsysMax = gain*max_pwr # assuming the system was linear, which it was
print "Tsys with full ND =",TsysMax
NDmax = TsysMax-320
print "Tnd(max) =",NDmax
ND =gain*pwr_W - 320
plot(ND,ctrlV)
ylabel("Control Voltage (V)")
xlabel("Noise Diode (K)")
grid()
coefs = scipy.polyfit(ND,ctrlV, 8)
print coefs
vctrl_voltage = NP.vectorize(ctrl_voltage)
x = arange(0,350,10)
plot(x,vctrl_voltage(x),'ro')
show()
| [
"[email protected]"
]
| |
22eee1e5ce8c1bd1a482bcb953ffebe1d366e5d7 | 1c73ef51b70251ed6ed24ce7a9ea08e601b602dd | /insert_mysql.py | 2672456d4f9ab5af82e2e0d026f684b8fa32ba81 | []
| no_license | java2man/restful-client | 21487f0a8361c499277d6863e86d0fdf4060ff46 | d1cc2a3b3995214d9c71ad50a5149f145fd3063e | refs/heads/master | 2021-01-22T04:11:07.756990 | 2017-02-10T01:50:50 | 2017-02-10T01:50:50 | 81,513,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,067 | py | # -*- coding: utf-8 -*-
import sys
import mysql.connector
from mysql.connector import conversion
class DBOperator:
def __init__(self, user, password, host, database):
self.conn = mysql.connector.connect(user=user, password=password, host=host, database=database)
self.cur = self.conn.cursor()
def myprint(self, s):
sys.stdout.buffer.write(s.encode('cp932', errors='replace'))
def createTable(self, table_name, json):
sql = "create table IF NOT EXISTS " + table_name + "("
keys = json.keys()
for key in keys:
if(key == 'links'):
continue
if(key == 'group'):
key = '_group'
if(key == '_id'):
sql = sql + key + " INT NOT NULL PRIMARY KEY,"
else:
sql = sql + key + " " + "TEXT,"
sql = sql[:-1] + ")"
#self.myprint(sql)
self.cur.execute(sql)
self.conn.commit()
self.cur.close
self.conn.close
def insertTable(self, table_name, json):
sql_insert = "insert ignore into " + table_name + "("
sql_values = "values("
keys = json.keys()
for key in keys:
value = str(json[key])
if(key == 'links'):
continue
if(key == 'group'):
key = '_group'
sql_insert = sql_insert + key + ","
sql_values = sql_values + "'" + (value.replace("'", "''")).replace("\\", "\\\\") + "',"
sql = sql_insert[:-1] + ") " + sql_values[:-1] + ")"
#self.myprint(sql)
self.addColumnIfNeed(table_name, sql)
#self.cur.execute(sql)
self.conn.commit()
self.cur.close
self.conn.close
def alterTable(self, table_name, column_name):
sql_alter = "ALTER TABLE " + table_name + " ADD COLUMN " + column_name + " TEXT"
self.cur.execute(sql_alter)
self.conn.commit()
def addColumnIfNeed(self, table_name, sql):
try:
self.cur.execute(sql)
except mysql.connector.ProgrammingError as e:
str1 = "Unknown column '"
str2 = "' in 'field list'"
field = ''
if(str1 in str(e) and str2 in str(e)):
index1 = str(e).index(str1) + len(str1)
field = str(e)[index1:len(str(e)) - len(str2)]
print(field)
self.alterTable(table_name, field)
self.addColumnIfNeed(table_name, sql) | [
"[email protected]"
]
| |
3979ad9eea86bdb41c9f35d9812b87941eb06226 | 5ed4a4dc164791157d089568a6a256372262f6d4 | /7.9.aux_NM.py | 0f4e42b0ac10c712a35ad01de06f2b6434811b62 | []
| no_license | Valkyries12/algoritmos-programacion1 | 72cf6c2146ff8822b1ff8aa8bf6d7c4c677fc55e | 20a5861c85e2841fdba9574c4b08cec6b2200b15 | refs/heads/master | 2020-07-02T08:16:53.670119 | 2019-08-18T01:59:09 | 2019-08-18T01:59:09 | 201,470,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | #Ejercicio 7.9. Escribir una función que reciba como parámetro una cadena de palabras separadas por espacios y devuelva, como resultado, cuántas palabras de más de cinco letras tiene la cadena dada.
def cantidad_mas_de_cinco(cadena):
""" devuelve cuantas palabras tienen mas de 5 letras """
lista = cadena.split()
cantidad = 0
for elemento in lista:
if len(elemento) > 5:
cantidad += 1
return cantidad
print(cantidad_mas_de_cinco("Habia una vez un barco chiquitito que nadaba sin atencion")) | [
"[email protected]"
]
| |
78f31a9c174255d188697506e1941c866f62891c | 8f949493064b77dd3f19ceeed1e86382ace176d6 | /posts/urls.py | 3f113ad6817989d01a71ca2970489a00507bc58f | []
| no_license | sudhanshu-jha/simplesocial | 44a19a1b1051dcc8577de5d87660a5b890b829d1 | 6d40293be75703d5498025150acf9e91bae6f77c | refs/heads/master | 2020-04-17T07:41:54.207867 | 2019-01-18T10:24:14 | 2019-01-18T10:24:14 | 135,698,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | from django.conf.urls import url
from . import views
app_name = "posts"
urlpatterns = [
url(r"^$", views.PostList.as_view(), name="all"),
url(r"new/$", views.CreatePost.as_view(), name="create"),
url(r"by/(?P<username>[-\w]+)/$", views.UserPosts.as_view(), name="for_user"),
url(
r"by/(?P<username>[-\w]+)/(?P<pk>\d+)/$",
views.PostDetail.as_view(),
name="single",
),
url(r"delete/(?P<pk>\d+)/$", views.DeletePost.as_view(), name="delete"),
]
| [
"[email protected]"
]
| |
0ff0703817449a164cc4148e5e772d7aad82761d | 20a0bd0a9675f52d4cbd100ee52f0f639fb552ef | /transit_odp/data_quality/migrations/0010_auto_20191118_1604.py | 1dbd2499c70b6991917a996f3979d7d53de8b877 | []
| no_license | yx20och/bods | 2f7d70057ee9f21565df106ef28dc2c4687dfdc9 | 4e147829500a85dd1822e94a375f24e304f67a98 | refs/heads/main | 2023-08-02T21:23:06.066134 | 2021-10-06T16:49:43 | 2021-10-06T16:49:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | # Generated by Django 2.2.7 on 2019-11-18 16:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("data_quality", "0009_auto_20191118_1029"),
]
operations = [
migrations.RemoveField(
model_name="service",
name="report",
),
migrations.AddField(
model_name="service",
name="ito_id",
field=models.TextField(default=None, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name="service",
name="reports",
field=models.ManyToManyField(
related_name="services", to="data_quality.DataQualityReport"
),
),
migrations.AddField(
model_name="servicelink",
name="ito_id",
field=models.TextField(default=None, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name="servicepattern",
name="ito_id",
field=models.TextField(default=None, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name="timingpattern",
name="ito_id",
field=models.TextField(default=None, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name="vehiclejourney",
name="ito_id",
field=models.TextField(default=None, unique=True),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
c833bef47a138873f53849c9dffd03c4f3bb8c82 | f54e711cb7fa9ec0295d1e5519fde39778299c48 | /blogProject/myApp/migrations/0004_comment_name.py | 0e5793a15f0e9b585a0ce290649d05cbf9f3c64e | []
| no_license | Sushma-RV99/blog-repo | bb9c795784fd82178384ede75ef369d64997fa1a | a1443c963fdcaaf38904b3f6faa90401a396564e | refs/heads/master | 2023-02-11T06:07:48.709194 | 2021-01-08T14:23:36 | 2021-01-08T14:23:36 | 327,924,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # Generated by Django 2.2 on 2021-01-01 15:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myApp', '0003_comment'),
]
operations = [
migrations.AddField(
model_name='comment',
name='name',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
33eadd24b48302614418717e1f4b4966a2618001 | 5b6ec20f6a62e2daf46e13c8740e9d8c8f4ff0e2 | /mvloader/nrrd.py | 68430da6cbbd010a39c01834606f84bae565ca30 | [
"MIT"
]
| permissive | dichetao/mvloader | 946575ee2cad2daa2d4ae507ba44bf120e100966 | 4244ba30f4c8f92ccf7605dc0134ef32706a70a2 | refs/heads/master | 2021-09-23T14:23:26.151457 | 2018-09-24T14:11:24 | 2018-09-24T14:11:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,901 | py | #!/usr/bin/env python
# coding: utf-8
"""A module for reading NRRD files [NRRD1]_, basically a wrapper for calls on the pynrrd library [NRRD2]_.
References
----------
.. [NRRD1] http://teem.sourceforge.net/nrrd/format.html (20180212)
.. [NRRD2] https://github.com/mhe/pynrrd (20180212).
"""
import nrrd
import numpy as np
from mvloader.volume import Volume
def open_image(path, verbose=True):
"""
Open a 3D NRRD image at the given path.
Parameters
----------
path : str
The path of the file to be loaded.
verbose : bool, optional
If `True` (default), print some meta data of the loaded file to standard output.
Returns
-------
Volume
The resulting 3D image volume, with the ``src_object`` attribute set to the tuple `(data, header)` returned
by pynrrd's ``nrrd.read`` (where `data` is a Numpy array and `header` is a dictionary) and the desired
anatomical world coordinate system ``system`` set to "RAS".
Raises
------
IOError
If something goes wrong.
"""
try:
src_object = (voxel_data, hdr) = nrrd.read(path)
except Exception as e:
raise IOError(e)
if verbose:
print("Loading image:", path)
print("Meta data:")
for k in sorted(hdr.keys(), key=str.lower):
print("{}: {!r}".format(k, hdr[k]))
__check_data_kinds_in(hdr)
src_system = __world_coordinate_system_from(hdr) # No fixed world coordinates for NRRD images!
mat = __matrix_from(hdr) # Voxels to world coordinates
# Create new ``Volume`` instance
volume = Volume(src_voxel_data=voxel_data, src_transformation=mat, src_system=src_system, system="RAS",
src_object=src_object)
return volume
def save_image(path, data, transformation, system="RAS", kinds=None):
"""
Save the given image data as a NRRD image file at the given path.
Parameters
----------
path : str
The path for the file to be saved.
data : array_like
Three-dimensional array that contains the voxels to be saved.
transformation : array_like
:math:`4×4` transformation matrix that maps from ``data``'s voxel indices to the given ``system``'s anatomical
world coordinate system.
system : str, optional
The world coordinate system to which ``transformation`` maps the voxel data. Either "RAS" (default), "LAS", or
"LPS" (these are the ones supported by the NRRD format).
kinds : str or sequence of strings, optional
If given, the string(s) will be used to set the NRRD header's "kinds" field. If a single string is given, it
will be used for all dimensions. If multiple strings are given, they will be used in the given order. If
nothing is given (default), the "kinds" field will not be set. Note that all strings should either be "domain"
or "space".
"""
if data.ndim > 3:
raise RuntimeError("Currently, mvloader supports saving NRRD files with scalar data only!")
# Create the header entries from the transformation
space = system.upper()
space_directions = transformation[:3, :3].T.tolist()
space_origin = transformation[:3, 3].tolist()
options = {"space": space, "space directions": space_directions, "space origin": space_origin}
if kinds is not None:
kinds = (data.ndim * [kinds]) if isinstance(kinds, str) else list(kinds)
options["kinds"] = kinds
nrrd.write(filename=path, data=data, options=options)
def save_volume(path, volume, src_order=True, src_system=True, kinds=None):
"""
Save the given ``Volume`` instance as a NRRD image file at the given path.
Parameters
----------
path : str
The path for the file to be saved.
volume : Volume
The ``Volume`` instance containing the image data to be saved.
src_order : bool, optional
If `True` (default), order the saved voxels as in ``src_data``; if `False`, order the saved voxels as in
``aligned_data``. In any case, the correct transformation matrix will be chosen.
src_system : bool, optional
If `True` (default), try to use ``volume``'s ``src_system`` as the anatomical world coordinate system for
saving; if `False`, try to use ``volume``'s ``system`` instead. In either case, this works if the system is
either "RAS", "LAS", or "LPS" (these are the ones supported by the NRRD format). If a different system is
given, use "RAS" instead.
kinds : str or sequence of strings, optional
If given, the string(s) will be used to set the NRRD header's "kinds" field. If a single string is given, it
will be used for all dimensions. If multiple strings are given, they will be used in the given order. If
nothing is given (default), the "kinds" field will not be set. Note that all strings should either be "domain"
or "space".
"""
if volume.aligned_data.ndim > 3:
raise RuntimeError("Currently, mvloader supports saving NRRD files with scalar data only!")
system = volume.src_system if src_system else volume.system
system = system if system in ["RAS", "LAS", "LPS"] else "RAS"
if src_order:
data = volume.src_data
transformation = volume.get_src_transformation(system)
else:
data = volume.aligned_data
transformation = volume.get_aligned_transformation(system)
save_image(path, data=data, transformation=transformation, system=system, kinds=kinds)
def __check_data_kinds_in(header):
"""
Sanity check on the header's "kinds" field: are all entries either "domain" or "space" (i.e. are we really dealing
with scalar data on a spatial domain)?
Parameters
----------
header : dict
A dictionary containing the NRRD header (as returned by ``nrrd.read``, for example).
Returns
-------
None
Simply return if everything is ok or the "kinds" field is not set.
Raises
------
IOError
If the "kinds" field contains entries other than "domain" or "space".
"""
kinds = header.get("kinds")
if kinds is None:
return
for k in kinds:
if k.lower() not in ["domain", "space"]:
raise IOError("At least one data dimension contains non-spatial data!")
def __world_coordinate_system_from(header):
"""
From the given NRRD header, determine the respective assumed anatomical world coordinate system.
Parameters
----------
header : dict
A dictionary containing the NRRD header (as returned by ``nrrd.read``, for example).
Returns
-------
str
The three-character uppercase string determining the respective anatomical world coordinate system (such as
"RAS" or "LPS").
Raises
------
IOError
If the header is missing the "space" field or the "space" field's value does not determine an anatomical world
coordinate system.
"""
try:
system_str = header["space"]
except KeyError as e:
raise IOError("Need the header's \"space\" field to determine the image's anatomical coordinate system.")
if len(system_str) == 3:
# We are lucky: this is already the format that we need
return system_str.upper()
# We need to separate the string (such as "right-anterior-superior") at its dashes, then get the first character
# of each component. We cannot handle 4D data nor data with scanner-based coordinates ("scanner-...") or
# non-anatomical coordinates ("3D-...")
system_components = system_str.split("-")
if len(system_components) == 3 and not system_components[0].lower() in ["scanner", "3d"]:
system_str = "".join(c[0].upper() for c in system_components)
return system_str
raise IOError("Cannot handle \"space\" value {}".format(system_str))
def __matrix_from(header):
"""
Calculate the transformation matrix from voxel coordinates to the header's anatomical world coordinate system.
Parameters
----------
header : dict
A dictionary containing the NRRD header (as returned by ``nrrd.read``, for example).
Returns
-------
numpy.ndarray
The resulting :math:`4×4` transformation matrix.
"""
try:
space_directions = header["space directions"]
space_origin = header["space origin"]
except KeyError as e:
raise IOError("Need the header's \"{}\" field to determine the mapping from voxels to world coordinates.".format(e))
# "... the space directions field gives, one column at a time, the mapping from image space to world space
# coordinates ... [1]_" -> list of columns, needs to be transposed
trans_3x3 = np.array(space_directions).T
trans_4x4 = np.eye(4)
trans_4x4[:3, :3] = trans_3x3
trans_4x4[:3, 3] = space_origin
return trans_4x4
| [
"[email protected]"
]
| |
22cd4aa937ae8cfd23745a3259f156cd50b64a4e | cb3583cc1322d38b1ee05cb1c081e0867ddb2220 | /donor/migrations/0014_auto_20210331_0404.py | b1189bdce3ff86f5f1436a2a55ec393aa74d80f9 | [
"MIT"
]
| permissive | iamgaddiel/codeupblood | 9e897ff23dedf5299cb59fd6c44d9bd8a645e9c6 | a0aa1725e5776d80e083b6d4e9e67476bb97e983 | refs/heads/main | 2023-05-07T23:34:27.475043 | 2021-04-24T20:49:08 | 2021-04-24T20:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # Generated by Django 3.1.6 on 2021-03-31 11:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('donor', '0013_auto_20210330_0743'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='d_id',
field=models.CharField(default='oiapGX', max_length=50),
),
]
| [
"[email protected]"
]
| |
b933acdeb2309ba593e6d9e9d9d667aff904c210 | 520c5877c4f7e33b66a955bde8eb0b613b99666a | /lyric_generation/embedding.py | 4970fcff859b9802952e2bb645b2b409cfea1c74 | []
| no_license | richardsavery/interactive-hiphop | 53db132369bb354c626d5a28635d1bba857a12d8 | 41bb1b7a7eb2a6bc1eb33a7f4cdf640e4cda7ff1 | refs/heads/master | 2022-02-23T20:30:52.354441 | 2022-02-10T03:58:17 | 2022-02-10T03:58:17 | 206,121,157 | 3 | 2 | null | 2022-02-11T03:00:55 | 2019-09-03T16:21:45 | Python | UTF-8 | Python | false | false | 829 | py | from gensim.test.utils import datapath, get_tmpfile
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
import pickle
# GLOVE_FILE = "/Users/brianmodel/Desktop/gatech/VIP/interactive-hiphop/lyric_generation/glove.840B.300d.txt"
WORD2VEC_FILE = "/Users/brianmodel/Desktop/gatech/VIP/interactive-hiphop/lyric_generation/GoogleNews-vectors-negative300.bin"
def glove_to_word2vec():
glove_file = datapath(GLOVE_FILE)
tmp_file = get_tmpfile(WORD2VEC_FILE)
_ = glove2word2vec(glove_file, tmp_file)
# model = KeyedVectors.load_word2vec_format(tmp_file)
def get_embedding():
return KeyedVectors.load_word2vec_format(WORD2VEC_FILE, binary=True)
model = get_embedding()
with open('word2vec.model', 'wb') as model_file:
pickle.dump(model, model_file)
print(model)
| [
"[email protected]"
]
| |
46e316c0cc99e56127b71e1ee886f2f946be6258 | 1b6da6feaeeaa3801279781ab8421e7294c5b393 | /python27/py_auto_test/test_cases/utility_mysql.py | f2253b0fd0e97f23add55b6d6364cf3cd8efa204 | []
| no_license | doorhinges0/my_projects | 703bbc92425e6c0604088d546b84be6dca37c0cd | f981ca0bfd79c3a119cd52155028f3f338378690 | refs/heads/master | 2021-01-13T12:00:06.992906 | 2015-12-28T12:00:42 | 2015-12-28T12:00:42 | 48,828,883 | 0 | 1 | null | 2015-12-31T02:24:37 | 2015-12-31T02:24:37 | null | UTF-8 | Python | false | false | 1,354 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import mysql.connector
# 注意把password设为你的root口令:
conn = mysql.connector.connect(user='root', password='trxhfly', database='account_system_db', use_unicode=True)
def get_user(type, id):
if type not in ('uid','acc','phone','email'):
return None
if not id:
return None
user=None
values=None
uid=None
query=None
if type=='uid':
query='select * from user_info where uid=%s' % id
elif type=='acc':
query='select * from user_info where acc="%s"' % id
elif type=='phone':
query='select * from user_info where phone="%s"' % id
elif type=='email':
query='select * from user_info where email="%s"' % id
if query:
print('query=',query)
#values=redis_obj.hmget(uid,user_fields)
cursor = conn.cursor(dictionary=True)
cursor.execute(query)
values = cursor.fetchall()
# 关闭Cursor和Connection:
cursor.close()
if 1==len(values):
user=values[0]
return user
if '__main__'==__name__:
'''
user=get_user('uid','100000')
print(user)
print(user['uid'])
print(user['acc'])
'''
user=get_user('acc','svqymidfc6m9')
if user:
print(user)
print(user['uid'])
print(user['acc'])
| [
"[email protected]"
]
| |
1b1ef729bfe6870880ec2b3f58f8d04117f29bc5 | ddf9d47a06ce85f9d06ec4923982f96996e028a7 | /Notebooks/Entrenamiento Modelo/CustomHyperModelImages.py | 2f49fceefd6a2ddbc8d07d8b4f3d7947bbe71e0f | []
| no_license | SebasPelaez/colombia-energy-forecast | f7b7a184026d3eb22a2087fda39249998ba1128e | 269d432dfda0e976aa06d1b9b7804945d9362af3 | refs/heads/master | 2023-04-14T18:36:14.294769 | 2021-04-21T14:01:58 | 2021-04-21T14:01:58 | 286,310,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,779 | py | import tensorflow as tf
import CustomMetrics
from kerastuner import HyperModel
class ArquitecturaI1(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.ConvLSTM2D(
input_shape=self.input_shape,
filters=hp.Int(
"convLSTM2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_1", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
),
return_sequences=True
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.ConvLSTM2D(
filters=hp.Int(
"convLSTM2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_3", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
),
return_sequences=True
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.ConvLSTM2D(
filters=hp.Int(
"convLSTM2d_filters_layer_5", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_5", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_5",
values=["valid", "same"],
default="valid"
),
return_sequences=False
)
)
model.add(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_6", min_value=3, max_value=5, step=2, default=3
)
)
)
model.add(
tf.keras.layers.Flatten()
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_8", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(tf.keras.layers.Dense(units=self.n_steps_out,activation=None))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI2(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.ConvLSTM2D(
input_shape=self.input_shape,
filters=hp.Int(
"convLSTM2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
),
return_sequences=True
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_2", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.ConvLSTM2D(
filters=hp.Int(
"convLSTM2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_3", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
),
return_sequences=False
)
)
model.add(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_4", min_value=3, max_value=5, step=2, default=3
)
)
)
model.add(
tf.keras.layers.Flatten()
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_6", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_6_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(tf.keras.layers.Dense(units=self.n_steps_out,activation=None))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI3(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.ConvLSTM2D(
input_shape=self.input_shape,
filters=hp.Int(
"convLSTM2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
),
return_sequences=False
)
)
model.add(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_2", min_value=3, max_value=7, step=2, default=3
)
)
)
model.add(
tf.keras.layers.Flatten()
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_4", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_4_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(tf.keras.layers.Dense(units=self.n_steps_out,activation=None))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI4(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.ConvLSTM2D(
input_shape=self.input_shape,
filters=hp.Int(
"convLSTM2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_1", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
),
return_sequences=True
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.ConvLSTM2D(
filters=hp.Int(
"convLSTM2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_3", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
),
return_sequences=True
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.ConvLSTM2D(
filters=hp.Int(
"convLSTM2d_filters_layer_5", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_5", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_5",
values=["valid", "same"],
default="valid"
),
return_sequences=False
)
)
model.add(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_6", min_value=3, max_value=5, step=2, default=3
)
)
)
model.add(
tf.keras.layers.Flatten()
)
model.add(tf.keras.layers.Dense(units=self.n_steps_out,activation=None))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI5(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.ConvLSTM2D(
input_shape=self.input_shape,
filters=hp.Int(
"convLSTM2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
),
return_sequences=True
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_2", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.ConvLSTM2D(
filters=hp.Int(
"convLSTM2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_3", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
),
return_sequences=False
)
)
model.add(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_4", min_value=3, max_value=5, step=2, default=3
)
)
)
model.add(
tf.keras.layers.Flatten()
)
model.add(tf.keras.layers.Dense(units=self.n_steps_out,activation=None))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI6(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.ConvLSTM2D(
input_shape=self.input_shape,
filters=hp.Int(
"convLSTM2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
),
return_sequences=False
)
)
model.add(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_2", min_value=3, max_value=7, step=2, default=3
)
)
)
model.add(
tf.keras.layers.Flatten()
)
model.add(tf.keras.layers.Dense(units=self.n_steps_out,activation=None))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI7(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=hp.Int(
"conv2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_3", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=hp.Int(
"conv2d_filters_layer_5", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_5", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_5",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_6", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_7", min_value=64, max_value=512, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(
l1=hp.Float(
"kernel_regularizer_layer_7",
min_value=0,
max_value=0.105,
step=0.0075,
default=1e-2,
)
),
dropout=hp.Float(
"dropout_regularizer_layer_7",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_8", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_8_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI8(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_2", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=hp.Int(
"conv2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_3", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_4", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_6", min_value=64, max_value=512, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(
l1=hp.Float(
"kernel_regularizer_layer_6",
min_value=0,
max_value=0.105,
step=0.0075,
default=1e-2,
)
),
dropout=hp.Float(
"dropout_regularizer_layer_6",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_7", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_7_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI9(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_2", min_value=3, max_value=7, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_3", min_value=64, max_value=512, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(
l1=hp.Float(
"kernel_regularizer_layer_4",
min_value=0,
max_value=0.105,
step=0.0075,
default=1e-2,
)
),
dropout=hp.Float(
"dropout_regularizer_layer_4",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_5", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_5_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI10(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=hp.Int(
"conv2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_3", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=hp.Int(
"conv2d_filters_layer_5", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_5", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_5",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_6", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_7", min_value=64, max_value=512, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(
l1=hp.Float(
"kernel_regularizer_layer_7",
min_value=0,
max_value=0.105,
step=0.0075,
default=1e-2,
)
),
dropout=hp.Float(
"dropout_regularizer_layer_7",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI11(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_2", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=hp.Int(
"conv2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_3", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_4", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_6", min_value=64, max_value=512, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(
l1=hp.Float(
"kernel_regularizer_layer_6",
min_value=0,
max_value=0.105,
step=0.0075,
default=1e-2,
)
),
dropout=hp.Float(
"dropout_regularizer_layer_6",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI12(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_2", min_value=3, max_value=7, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_3", min_value=64, max_value=448, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(
l1=hp.Float(
"kernel_regularizer_layer_4",
min_value=0,
max_value=0.105,
step=0.0075,
default=1e-2,
)
),
dropout=hp.Float(
"dropout_regularizer_layer_4",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI13(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=16, step=8, default=16
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_2", min_value=3, max_value=7, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_4", min_value=64, max_value=448, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(l1=0),
dropout=hp.Float(
"dropout_regularizer_layer_4",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_5", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_5_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-5,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI14(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=16, step=8, default=16
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_2", min_value=3, max_value=7, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_4", min_value=64, max_value=512, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(l1=0),
dropout=hp.Float(
"dropout_regularizer_layer_4",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-5,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model | [
"[email protected]"
]
| |
f427c925290c5a2a81db95be3c0f18e6c3e33066 | dccd1058e723b6617148824dc0243dbec4c9bd48 | /atcoder/abc048/a.py | 2a32b441150b9a7e79505fe4330cbbf200516869 | []
| no_license | imulan/procon | 488e49de3bcbab36c624290cf9e370abfc8735bf | 2a86f47614fe0c34e403ffb35108705522785092 | refs/heads/master | 2021-05-22T09:24:19.691191 | 2021-01-02T14:27:13 | 2021-01-02T14:27:13 | 46,834,567 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | for s in input().split():
print(s[0],end="")
print()
| [
"[email protected]"
]
| |
2d85e566ab46559127ff094934cff6b9e3b4a756 | e72db255e41332c113f929eb63815b2169038209 | /Chapter08/audio-encode-server-4/audio_encode_server/s3.py | 8585e1faf5d52e430754cde9e22635bf0eee6396 | [
"MIT"
]
| permissive | PacktPublishing/Hands-On-Reactive-Programming-with-Python | b196b971fe49a36da9f979790b8c31c98a659031 | 757d45e2023032c6074e26ad252530f3c89978bf | refs/heads/master | 2023-02-07T01:03:37.648175 | 2023-02-05T18:21:17 | 2023-02-05T18:21:38 | 128,761,473 | 75 | 19 | null | null | null | null | UTF-8 | Python | false | false | 2,077 | py | import asyncio
from collections import namedtuple
from io import BytesIO
import reactivex as rx
import boto3
from boto3.session import Session
from cyclotron import Component
Source = namedtuple('Source', ['response'])
Sink = namedtuple('Sink', ['request'])
# Sink objects
Configure = namedtuple('Configure', [
'access_key', 'secret_key',
'bucket', 'endpoint_url', 'region_name'])
UploadObject = namedtuple('UploadObject', ['key', 'data', 'id'])
# Source objects
UploadReponse = namedtuple('UploadReponse', ['key', 'id'])
def make_driver(loop=None):
if loop is None:
loop = asyncio.get_event_loop()
def driver(sink):
def on_subscribe(observer, scheduler):
client = None
bucket = None
def on_next(item):
nonlocal client
nonlocal bucket
if type(item) is Configure:
session = Session(aws_access_key_id=item.access_key,
aws_secret_access_key=item.secret_key)
client = session.client(
's3',
endpoint_url=item.endpoint_url,
region_name=item.region_name)
bucket = item.bucket
elif type(item) is UploadObject:
data = BytesIO(item.data)
client.upload_fileobj(data, bucket, item.key)
loop.call_soon_threadsafe(observer.on_next, UploadReponse(
key=item.key,
id=item.id))
else:
loop.call_soon_threadsafe(observer.on_error, "unknown item: {}".format(type(item)))
sink.request.subscribe(
on_next=on_next,
on_error=lambda e: loop.call_soon_threadsafe(observer.on_error, e),
on_completed=lambda: loop.call_soon_threadsafe(observer.on_completed))
return Source(
response=rx.create(on_subscribe)
)
return Component(call=driver, input=Sink)
| [
"[email protected]"
]
| |
f4d0fbd3015939c5f1fbedeb7e90834ae6473193 | b02a2c1e8cf778f8f810897c478abcec720b7220 | /ds_algos_primer/python/arrays_and_strings_solutions.py | a70325060b6be3c328ac7c8980a14ffffa4407b0 | []
| no_license | naraekwon/CodingInterviewMastery | d8596a4decb52086ea2eefa32ebccd4a25c6181a | c14ceaa19649269467160a5bf53e4a3d927e97a5 | refs/heads/main | 2023-09-05T16:28:25.253482 | 2021-11-04T18:56:19 | 2021-11-04T18:56:19 | 428,470,905 | 0 | 0 | null | 2022-02-13T19:54:09 | 2021-11-16T00:58:46 | null | UTF-8 | Python | false | false | 15,724 | py | """
Title: Arrays and Strings Solutions
This file contains the solutions for the Arrays and Strings exercises in
the DS & Algos Primer. If you have not already attempted these exercises,
we highly recommend you complete them before reviewing the solutions here.
Execution: python arrays_and_strings_solutions.py
*** IMPORTANT NOTE ***
Python provides a lot of inbuilt functions to accomplish certain tasks. If you
are aware of these, that's great.
HOWEVER, the goal of these exercises is to understand these data structures.
Therefore, you are discouraged from writing one- to two-line functions. Instead
you will learn a lot more by implementing these things manually.
In your interview, you may want to use these inbuilt functions, but while
learning, you will learn more by doing things the hard way.
"""
from collections import Counter
from typing import List
"""
Exercise 1.1: Write a function that takes an integer array and reverses
the values in place
Time Complexity: O(len(arr))
Space Complexity: O(1)
"""
def reverse_array(arr: List[int]):
# We will iterate to the midpoint of the array. For each value, we can
# get the index its supposed to swap with by computing arr.length-i-1
for i in range(len(arr)//2):
temp = arr[i]
arr[i] = arr[len(arr)-i-1]
arr[len(arr)-i-1] = temp
"""
Exercise 1.2: Given a 2D matrix, write a function to print the values in
the matrix in a clockwise spiral from outside to inside
Time Complexity: O(len(arr) * len(arr[0]))
Space Complexity: O(1)
"""
def print_spiral(arr: List[List[int]]):
# We need to keep track of the boundaries of the current layer of the
# spiral that we are traversing
min_row = 0
min_col = 0
max_row = len(arr)-1
max_col = len(arr[0])-1
# Once the mins and maxes converge, we are at the center of the spiral.
# The spiral follows a fixed set of steps. We go left, down, right, up.
# For each of these, we just interate to the bounds, so we express each
# one explicitly.
while min_row < max_row and min_col < max_col:
# Go across the top
for col in range(min_col, max_col+1):
print(arr[min_row][col])
min_row = min_row+1
# Go down the right side
for row in range(min_row, max_row+1):
print(arr[row][max_col])
max_col = max_col-1
# Go across the bottom
for col in range(max_col, min_col-1, -1):
print(arr[max_row][col])
max_row = max_row-1
# Go up the left side
for row in range(max_row, min_row-1, -1):
print(arr[row][min_col])
min_col = min_col+1
"""
Exercise 1.3: Given a 2D matrix, write a function to print the values in the
matrix in a zig-zag order
Time Complexity: O(len(arr) * len(arr[0]))
Space Complexity: O(1)
"""
def print_diagonals(arr: List[List[int]]):
row = 0
col = 0
# Like the spiral, we have clearly defined directions we need to go. In
# this case we either go up to the right or down to the left. We define
# each of these explicitly and just go back and forth between doing one
# and the other
while True:
# Go up to the right
while row > 0 and col < len(arr[0])-1:
print(arr[row][col])
row = row-1
col = col+1
# Without this we won't print the final value in the diagonal
print(arr[row][col])
# Check whether we're at the botom right corner
if row == len(arr)-1 and col == len(arr[0])-1:
break
# We need to update our positiion differently depending on whether
# we're still going along the top of the matrix or down the
# righthand side
elif col+1 < len(arr[0]):
col = col+1
else:
row = row+1
# Go down to the left
while row < len(arr)-1 and col > 0:
print(arr[row][col])
row = row+1
col = col-1
# Without this we won't print the final value in the diagonal
print(arr[row][col])
# Check whether we're at the botom right corner
if row == len(arr)-1 and col == len(arr[0])-1:
break
# Are we going along the lefthand side or the bottom?
elif row+1 < len(arr):
row = row+1
else:
col = col+1
"""
Exercise 1.4: Write a function that takes in a string and removes every
even-indexed character
Time Complexity: O(len(s))
Space Complexity: O(len(s))
"""
def remove_even(s: str) -> str:
# Build the string as a list first and then join everything together
result = []
# Increment by 2 each time to only visit odd indices
for i in range(1, len(s), 2):
result.append(s[i])
return ''.join(result)
"""
Exercises 1.5: Zig Zag Conversion
Full Problem Definition: https://leetcode.com/problems/zigzag-conversion/
Time Complexity: O(len(s))
Space Complexity: O(len(s))
"""
def zig_zag(s: str, num_rows: int) -> str:
# Compute each row and then merge them at the end
rows = [ [] for _ in range(num_rows)]
# We have 2 actions. First we iterate down over each row, then we iterate
# back up. Do one then the other
idx = 0
while idx < len(s):
# Iterate from row 0 to num_rows-1
i = 0
while i < len(rows) and idx < len(s):
rows[i].append(s[idx])
idx = idx+1
i = i+1
# Iterate back up from numRows-2 to 1. Make sure we go from numRows-2 to
# 1 and not numRows-1 to 0 because otherwise we'll add 2 characters to
# row 0 and 2 characters to row numRows-1
i = len(rows)-2
while i >= 1 and idx < len(s):
rows[i].append(s[idx])
idx = idx+1
i = i-1
# Combine everything together
result = []
for row in rows:
result.append(''.join(row))
return ''.join(result)
"""
Exercise 2.1: Given a string, print out all of the substrings
Time Complexity: O(len(s)^2)
Space Complexity: O(1)
"""
def print_substrings(s: str):
for i in range(len(s)):
for j in range(i+1, len(s)+1):
print(s[i:j])
"""
Exercise 2.2: Write a function to find all duplicates in an array. The array
will contain exactly 1 duplicated value
Time Complexity: O(len(arr)^2)
Space Complexity: O(1)
"""
def find_duplicates(arr: List[int]) -> int:
# Use 2 pointers to compare each pair of values
for i in range(len(arr)):
for j in range(i+1, len(arr)):
if arr[i] == arr[j]:
return arr[i]
"""
Exercise 2.3: Given a sorted array, find every pair of values in the
array that sum up to a given target
Time Complexity: O(len(arr))
Space Complexity: O(1)
"""
def two_sum(arr: List[int], target: int) -> List[List[int]]:
result = []
# We start our pointers at the beginning and move towards the center
i = 0
j = len(arr)-1
while i < j:
sum = arr[i] + arr[j]
# If we found the target, we add it to the result. Then we either
# increment i or decrement j. It doesn't matter which we do
if sum == target:
result.append([arr[i],arr[j]])
# We want to avoid including the same pair multiple times so we
# skip the pointer ahead to the next unique value. Since our
# array is sorted, we just keep incrementing until we see a
# new value
while arr[i] == arr[i+1]:
i = i+1
i = i+1
# We can find a larger sum by incrementing i. This makes the smaller
# value in our pair larger so the sum is larger
if sum < target:
i = i+1
# If it's too big, we do the opposite by decrementing j
if sum > target:
j = j-1
return result
"""
Exercise 3.1: Given two arrays, compare them to see if they are equal
Time Complexity: O(len(arr1))
Space Complexity: O(1)
"""
def arrays_are_equal(arr1: List[int], arr2: List[int]) -> bool:
# If they're not the same length they can't be equal
if len(arr1) != len(arr2):
return False
# Compare each value. If they're not equal then the arrays are unequal
for i in range(len(arr1)):
if arr1[i] != arr2[i]:
return False
return True
"""
Exercise 3.2: Given two strings, determine if one string is the reverse of the
other string
Time Complexity: O(len(s1))
Space Complexity: O(1)
"""
def strings_are_opposite(s1: str, s2: str) -> bool:
# If they're not the same length they can't be opposites
if len(s1) != len(s2):
return False
# Compare the opposing characters in each string. We could also just
# reverse one of the strings and compare them, but that takes extra
# space whereas this does not
for i in range(len(s1)):
if s1[i] != s2[len(s2)-i-1]:
return False
return True
"""
Exercise 3.3: Given two strings, determine whether they are anagrams of
each other
Time Complexity: O(len(s1))
Space Complexity: O(len(s1))
"""
def are_anagrams(s1: str, s2: str) -> bool:
# If they're not the same length they can't be anagrams
if len(s1) != len(s2):
return False
# Count the number of occurrences of each character in s1
chars = {}
for c in s1:
chars[c] = chars.get(c, 0) + 1
# Subtract the chars in s2 from the count. We should end up with 0 of
# each character left over
for c in s2:
# s1 doesn't contain c at all
if c not in chars:
return False
# s1 contains fewer occurrences of c than s2
chars[c] = chars[c]-1
if chars[c] < 0:
return False
return True
"""
Exercise 4.1: Given an array, compute the sum of each length-k subarray
Time Complexity: O(len(arr))
Space Complexity: O(1)
"""
def subarray_sums(arr: List[int], k: int) -> List[int]:
result = []
# Compute the sum of the initial length-k subarray
sum = 0
for i in range(k):
sum = sum + arr[i]
result.append(sum)
# Use a sliding window to go through the remainder of the array without
# recomputing the sum for every subarray
left = 0
right = k-1
while right < len(arr)-1:
# The value at right+1 needs to be added to the sum and the value
# at left needs to be subtracted
right = right+1
sum = sum + arr[right]
sum = sum - arr[left]
left = left + 1
result.append(sum)
return result
"""
Exercise 4.2: Given a string, find the longest substring of the string that does
not contain any repeated characters
Time Complexity: O(len(s))
Space Complexity: O(1)
"""
def no_repeated_chars(s: str) -> int:
# Track the characters in our current substring
in_substring = set()
max_substring = 0
left = 0
right = 0
# We expand right out as much as we can without getting duplicate chars. If
# we end up with duplicates, we increment left to shrink the substring until
# we no longer have duplicates
while right < len(s):
# We have a duplicate character, so increment left until the substring
# no longer contains duplicates
while s[right] in in_substring:
in_substring.remove(s[left])
left = left + 1
# We have a valid substring so is it the longest one?
max_substring = max(max_substring, right-left+1)
# Try expanding the substring again
in_substring.add(s[right])
right = right+1
return max_substring
"""
Exercise 4.3: Given two strings, s and p, find all occurrences of anagrams of p
in s. The output is the starting index of each anagram
Time Complexity: O(len(s))
Space Complexity: O(1)
"""
def find_all_anagrams(s: str, p: str) -> List[int]:
result = []
# This is another option for computing character counts instead of a dict
# since we know they're lowercase English chars. This is a little easier
# given the approach below than using a dict
chars = [0]*256
for c in p:
chars[ord(c)] = chars[ord(c)] + 1
# Do our sliding window
left = 0
right = 0
while right < len(s):
# Add in the right character to our current window. We account for this
# by removing it from the character count we have for p
right_char_ord = ord(s[right])
right = right + 1
chars[right_char_ord] = chars[right_char_ord] - 1
# If the value is negative, then we have too many of rightChar in our
# substring so we need to make it smaller until we no longer have too
# many of that character
while chars[right_char_ord] < 0:
chars[ord(s[left])] = chars[ord(s[left])] + 1
left = left + 1
# If we have the exact right number of occurrences of the character AND
# the substring is the right length, then this is a valid substring
if chars[right_char_ord] == 0 and right-left == len(p):
result.append(left)
return result
"""
Exercise 4.4: Given two strings, s and p, find the smallest substring of s that
contains all the characters in p
Time Complexity: O(len(s))
Space Complexity: O(1)
"""
def smallest_substring(s: str, p: str) -> str:
# Same as 4.3, we use an array to store character count
chars = [0]*256
for c in p:
chars[ord(c)] = chars[ord(c)] + 1
left = 0
right = 0
# In addition to tracking left and right, we'll track the start and length
# of the string, as well as the count of characters from p that we have in
# our substring. The count allows us to quickly see whether our substring
# includes all the characters in p or not
count = 0
min_length = float('inf')
min_start = 0
while right < len(s):
# This is basically opposite of 4.3 where we WANT all the values to get
# to 0 or negative because we want the string to be inclusive of all the
# characters in p
right_char_ord = ord(s[right])
right = right + 1
chars[right_char_ord] = chars[right_char_ord] - 1
if chars[right_char_ord] >= 0:
count = count + 1
# If count == p.length we have a valid substring. In this case, keep
# shrinking it as much as we can by incrementing left
while count == len(p):
if right - left < min_length:
min_length = right - left
min_start = left
# If we have extra of a character, we don't decrement the count
# until we have fewer occurrences of that char than there are in p
left_char_ord = ord(s[left])
chars[left_char_ord] = chars[left_char_ord] + 1
if chars[left_char_ord] > 0:
count = count + 1
left = left + 1
# If we don't find a valid substring, return ""
if (min_length > len(s)):
return ""
return s[min_start : min_start + min_length]
# Sample test cases
if __name__ == '__main__':
l = [1,2,3,4]
reverse_array(l)
print(l)
matrix = [[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15],[16, 17,18,19,20]]
print_spiral(matrix)
print_diagonals(matrix)
print(remove_even("abcdef"))
print(zig_zag("PAYPALISHIRING", 3))
print_substrings("abcde")
print(find_duplicates([1,2,3,4,3,5]))
print(two_sum([1,2,2,2,3,4,5,6,6,6], 8))
print(arrays_are_equal([1,2,3,4], [1,2,3,4]))
print(strings_are_opposite("abcd", "dcba"))
print(are_anagrams("abcd", "cdab"))
print(subarray_sums([1,2,3,4,5], 3))
print(no_repeated_chars("abcdbea"))
print(find_all_anagrams("cbaebabacd", "abc"))
print(smallest_substring("aabbccdd", "abc"))
| [
"[email protected]"
]
| |
c82b677441afb16074f0386638f5da0f86f9303e | 56a8d1f72b005bd52560c3804541be729876aa9f | /rotation.py | 2f05ebde4daf3525b7c39a173e8cbb402cf3dc59 | []
| no_license | drrobotk/pycodilitytests | e5e13c9dd683207290e598e577d73555c0ef29ed | acb5a8ad52135fa327fb97d7c42f95ae23cb3389 | refs/heads/master | 2021-04-14T03:16:33.397722 | 2020-03-22T15:23:57 | 2020-03-22T15:23:57 | 249,203,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A, K):
k = 0
# write your code in Python 3.6
if len(A) != 0:
for i in range(K):
k = A[len(A)-1]
A.pop()
A.insert(0,k)
return A
if __name__ == '__main__':
A = []
K = 3
result = solution(A, K)
print(result) | [
"[email protected]"
]
| |
e8a033aa3f8f48fd75b3f9aca077478771f2bb75 | 01f09bdec77ee7823919824ac25cb1a3610790cb | /d2_Assignment_07_a.py | 8a77cb92183fcedb51f410582e51d30fbffb372e | []
| no_license | SYN2002/PYTHON-LAB | 3da5bda642d7a139ccf39e350750da8d4f5128d0 | dd93fa884415f423988375f2d3b0f058bc253135 | refs/heads/main | 2023-08-27T11:53:53.075132 | 2021-10-17T15:31:00 | 2021-10-17T15:31:00 | 406,604,738 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | n1=int(input("Enter the lower limit: "))
n2=int(input("Enter the uper limit: "))
i,j,c=1,0,0
print("The prime numbers are: ")
for i in range(n1,n2+1):
c=0
for j in range(1,i+1):
if(i%j==0):
c=c+1
if(c==2):
print(i,end=" ") | [
"[email protected]"
]
| |
261b9e2fe87ce74a8028d94c3c61852211f01d39 | 1d482878230a6c6cbef7680f3910561a4b35c35c | /element/scripts/migrations/0021_auto_20180218_0632.py | dacc71e2958595a88be73d43e5bd6e43cab8ed4d | []
| no_license | karthikvasudevan92/elem | ac5355fe029251b7de76428a558049ab949689df | f5dad5cdfaba736843d29c781ec253d2cee51ccd | refs/heads/master | 2021-04-28T03:47:23.643252 | 2018-03-17T13:10:07 | 2018-03-17T13:10:07 | 122,144,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # Generated by Django 2.0.1 on 2018-02-18 06:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scripts', '0020_auto_20180218_0621'),
]
operations = [
migrations.AlterField(
model_name='script_line',
name='script',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scripts.Script'),
),
]
| [
"[email protected]"
]
| |
0f0a43f2a910cb3bd27dccab958083608f47a592 | 0258e0c9595406ceb3de32067aff776bc2a58fa8 | /06_p12.py | a649f413d98bebdcef131856db0da2a3d6949b5d | []
| no_license | akromibn37/python_code | 72c016c361b3ba2e04c83e1d1a703171b0bd8819 | 41d1a09f8ec8696e37ad83c1a0cb6506c7f0f4f6 | refs/heads/master | 2020-03-21T22:57:25.111642 | 2018-06-29T14:14:33 | 2018-06-29T14:14:33 | 139,157,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | data = input().strip()
l = []
for x in range(len(data)):
l.append(data[x])
num = int(input().strip())
out = ""
i = 0
while i<num:
out = ""
command = [e for e in input().split()]
if command[0] == "in":
l.insert(int(command[2]),command[1])
elif command[0] == "out":
l.pop(int(command[1]))
elif command[0] == "swap":
x = l[int(command[1])]
y = l[int(command[2])]
l[int(command[1])] = y
l[int(command[2])] = x
for j in range(len(l)):
out += l[j]
print(out)
i+=1
| [
"[email protected]"
]
| |
d1a50b99473a4235042bb673ae4d5648722d7914 | 720dcd12b8fb7ab26125317a6f3d00c2623e5f13 | /chatbotQuery/__init__.py | fe8fcde48e539b7f3222f7e172a5b2d88236c54b | [
"MIT"
]
| permissive | tgquintela/chatbot_query | 78e6f21268e06572009295c271c277ef89f2dcbc | 4c5160992a444f828da019ae57a802467a13c2fa | refs/heads/master | 2021-01-01T18:00:46.261089 | 2017-10-13T18:03:32 | 2017-10-13T18:03:32 | 98,224,976 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,896 | py |
"""
TODO
----
Decorator for message collections
"""
import copy
class ChatbotMessage(dict):
"""
Compulsary elements
-------------------
- message
- collection
- from [user, bot]
"""
def __init__(self, message):
self.update({'message': '', 'collection': False})
self.update(message)
assert('from' in self)
assert('message' in self)
assert('collection' in self)
@classmethod
def from_message(cls, message):
if isinstance(message, ChatbotMessage):
return message
return cls(message)
@classmethod
def from_candidates_messages(cls, message):
message.update({'from': 'bot'})
if type(message['message']) == str:
message['collection'] = False
elif type(message['message']) == list:
message['collection'] = True
return cls(message)
@classmethod
def fake_user_message(cls):
return cls({'from': 'user'})
@property
def last_message_text(self):
if self['collection']:
return self['message'][-1]['message']
else:
return self['message']
def get_last_post(self):
_, last_post = self._filter_message_2_post()
for p in last_post:
yield p
def get_post(self):
posts, _ = self._filter_message_2_post()
for p in posts:
yield p
def get_all_messages(self):
for p in self.get_post():
yield p
for p in self.get_last_post():
yield p
def format_message(self, format_information):
if self['collection']:
self['message'][-1]['message'] =\
self['message'][-1]['message'].format(**format_information)
else:
self['message'] = self['message'].format(**format_information)
return self
def reflect_message(self, pre_message):
for key in pre_message:
if key not in ['message', 'from', 'time', 'answer_status',
'sending_status', 'collection', 'posting_status']:
self[key] = pre_message[key]
return self
def reflect_metadata(self, pre_message):
for key in pre_message:
if key not in self:
if key not in ['message', 'from', 'time', 'answer_status',
'sending_status', 'collection']:
self[key] = pre_message[key]
return self
def keep_query(self, pre_message):
if 'query' in pre_message:
if 'query' in self:
if self['query'] is None:
self['query'] = pre_message['query']
else:
self['query'] = pre_message['query']
return self
def _if_possible_send(self, message):
logi = True
logi = logi and (message['from'] == 'bot')
logi = logi and (message['message'] != '')
return logi
def _filter_message_2_post(self):
posts, last_post = [], []
if self['collection']:
messages = [m for m in self['message']
if self._if_possible_send(m)]
if len(messages):
last_post = [messages[-1]]
posts = messages[:-1]
else:
if self._if_possible_send(self):
last_post = [copy.copy(self)]
return posts, last_post
def _detect_message_sending_status(self):
if 'sending_status' in self:
return self['sending_status']
return True
def _preformat_collection_messages(self):
if not self._detect_message_sending_status():
if not self['collection']:
self['message'] = [copy.copy(self)]
self['collection'] = True
return self
return self
def _is_prepared(self, message):
if message['message'] == '':
return False
if 'sending_status' in self:
return self['sending_status']
if 'posting_status' in self:
return self['posting_status']
def is_prepared(self):
if self['collection']:
return any([self._is_prepared(e) for e in self['message']])
else:
return self._is_prepared(self)
return False
def add_tags(self, tags):
if tags is not None and (type(tags) in [list, str]):
tags = tags if type(tags) == list else [tags]
if 'tags' in self:
old_tags = self['tags']
old_tags += tags
old_tags = list(set(old_tags))
self['tags'] = old_tags
else:
self['tags'] = tags
if self['collection']:
if 'tags' in self['message'][-1]:
old_tags = self['message'][-1]['tags']
old_tags += tags
old_tags = list(set(old_tags))
self['message'][-1]['tags'] = old_tags
self['tags'] = old_tags
else:
self['message'][-1]['tags'] = tags
return self
def collapse_message(self, message):
self._preformat_collection_messages()
if self['collection']:
messagestext = copy.copy(self['message'])
if message['collection']:
messagestext += message['message']
else:
messagestext.append(message)
self.update(message)
self['message'] = messagestext
self['collection'] = True
self.check_message()
return self
else:
output_message = copy.copy(message)
output_message['collection'] = False
if 'query' in message:
output_message['query'] = message['query']
output_message =\
ChatbotMessage.from_candidates_messages(output_message)
output_message.check_message()
return output_message
def add_selector_types(self, selector_types):
## Store results in message
self['selector_types'] = selector_types
return self
def add_entry_to_last_message(self, entry_var, var):
self[entry_var] = var
if self['collection']:
self['message'][-1][entry_var] = var
return self
def structure_answer(self):
## Input selector types
if self['collection']:
self['message'][-1]['selector_types'] = self['selector_types']
self.check_message()
return self
def check_message(self):
if self['collection']:
assert(all([isinstance(m, dict) for m in self['message']]))
assert(all([isinstance(m['message'], str)
for m in self['message']]))
else:
assert(isinstance(self['message'], str))
| [
"[email protected]"
]
| |
8ab81a05046b4fbe1d20f70062f9411fee994e8d | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/pyr_Tcrop255_pad20_jit15/Sob_k17_s001/pyr_4s/L4/step10_a.py | 75773149c2e2458db22e88582b00384156b134b7 | []
| no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,921 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_4side_L4 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_I_gt_MC
use_loss_obj = [G_sobel_k17_loss_info_builder.set_loss_target("UNet_Mask").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
# 1 3 6 10 15 21 28 36 45 55
# side1 OK 1
ch032_1side_1__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# side2 OK 4
ch032_1side_2__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# side3 OK 10
ch032_1side_3__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 "10" 15 21 28 36 45 55
# side4 OK 20
ch032_1side_4__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 10 "15" 21 28 36 45 55
# side5 OK 35
ch032_1side_5__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_5.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1__3side_1_4side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
]
| |
de47d3adc3f532d09cc14eab66da496c3a9dfa6a | 5f82cd4c97e3bc950799f3d2feedd8e5f800dc4c | /FNN.py | 7e0a73075675825258d17f22f9d8062db01426e9 | []
| no_license | Zhetuo-Zhao/deepLearning_template | 77f0b79d229999f009de61fe43c5d80a85ce7743 | c477a4eccb24cd833e2cbdd9840923f5d3f6ebb1 | refs/heads/master | 2022-12-19T05:19:28.724019 | 2020-09-16T06:11:17 | 2020-09-16T06:11:17 | 295,637,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,401 | py | # %%
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("/tmp/data/", one_hot=True)
n_nodes_hl1=500
n_nodes_hl2=500
n_nodes_hl3=500
n_classes=10
batch_size=100
x=tf.placeholder('float',[None, 784])
y=tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer={'weights':tf.Variable(tf.random_normal([784,n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes]))}
l1=tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1=tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3, output_layer['weights']), output_layer['biases'])
return output
def train_neural_network(x,y):
prediction = neural_network_model(x)
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
optimizer=tf.train.AdamOptimizer().minimize(cost)
hm_epochs=10
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss=0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c=sess.run([optimizer,cost],feed_dict={x:epoch_x, y:epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)
correct= tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy =tf.reduce_mean(tf.cast(correct,'float'))
print('Accuracy:', accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
train_neural_network(x,y) | [
"[email protected]"
]
| |
d1193d44c5e75ff605a3a7007ffa13f5294f8fb5 | e6120961ab5a2005e86cf772e56d694878b8cb35 | /Finance/Old_Finance/MyFinance8.py | c3c87e529104e1875500a06c8783c14c4d4e7ac1 | []
| no_license | marcelo-alves87/STN-PythonLearning | a81e1d379dc28fd0334883dc7f930c7aadc6f047 | c2d31c0ae55a302d8cd35636ed02673452536f8e | refs/heads/master | 2023-08-22T09:05:56.313556 | 2023-08-20T21:33:16 | 2023-08-20T21:33:16 | 139,202,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | import numpy as np
import pandas as pd
import pandas_datareader.data as web
from collections import Counter
from sklearn import svm, cross_validation, neighbors
from sklearn.ensemble import VotingClassifier, RandomForestClassifier
def process_data_for_labels(ticker):
hm_days = 7
df = pd.read_csv('ibovespa_joined_closes.csv', index_col=0)
tickers = df.columns.values.tolist()
df.fillna(0, inplace=True)
for i in range(1, hm_days+1):
df['{}_{}d'.format(ticker, i)] = (df[ticker].shift(-i) - df[ticker]) / df[ticker]
df.fillna(0, inplace=True)
return tickers, df
def buy_sell_hold(*args):
cols = [c for c in args]
requirement = 0.02
for col in cols:
if col > requirement:
return 1
if col < -requirement:
return -1
return 0
def extract_featuresets(ticker):
tickers, df = process_data_for_labels(ticker)
df['{}_target'.format(ticker)] = list(map( buy_sell_hold,
df['{}_1d'.format(ticker)],
df['{}_2d'.format(ticker)],
df['{}_3d'.format(ticker)],
df['{}_4d'.format(ticker)],
df['{}_5d'.format(ticker)],
df['{}_6d'.format(ticker)],
df['{}_7d'.format(ticker)]))
vals = df['{}_target'.format(ticker)].values.tolist()
str_vals = [str(i) for i in vals]
print('Data spread:', Counter(str_vals))
df.fillna(0, inplace=True)
df = df.replace([np.inf, -np.inf], np.nan)
df.dropna(inplace=True)
df_vals = df[[ticker for ticker in tickers]].pct_change()
df_vals = df_vals.replace([np.inf, -np.inf], 0)
df_vals.fillna(0, inplace=True)
X = df_vals.values
y = df['{}_target'.format(ticker)].values
return X, y, df
def do_ml(ticker):
X, y, df = extract_featuresets(ticker)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.25)
clf = VotingClassifier([('lsvc', svm.LinearSVC()),
('knn', neighbors.KNeighborsClassifier()),
('rfor', RandomForestClassifier())])
clf.fit(X_train, y_train)
confidence = clf.score(X_test, y_test)
print('accuracy:', confidence)
predictions = clf.predict(X_test)
print('predicted class counts:', Counter(predictions))
return confidence
do_ml('ABEV3')
| [
"[email protected]"
]
| |
e6417d749e8b17495ea9739ff8246df8c4a1a1eb | f5fe67aada0fca0ebc71d605e4a2dd3d60695a25 | /Evelyns Dateien/Grundpraktikum 2/US3/profil_streu_34.py | 2ee8df1f9cba6afa10dd8360dd49b71308ac5b84 | []
| no_license | rkallo/APWS1718 | 25eb10a88783434c81ebf1902936ceb6fc06ab0d | 5e124d5342ef2fd333311ddb4d323744323b68b8 | refs/heads/master | 2021-08-28T06:45:08.518688 | 2019-11-21T18:05:29 | 2019-11-21T18:05:29 | 223,233,392 | 2 | 1 | null | 2021-08-22T19:20:50 | 2019-11-21T17:59:30 | TeX | UTF-8 | Python | false | false | 479 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from pylab import figure, axes, pie, title, show
from fractions import *
x, y = np.loadtxt('streu_34.txt', unpack=True,delimiter=',')
plt.figure(1)
plt.plot(x, y,'rx', label='Messdaten')
plt.ylabel(r'$Intensität\,\,I$ $/ \, \% $')
plt.xlabel(r'$Eindringtiefe\,\,x$ $/ \, mm$')
plt.grid()
plt.legend()
plt.savefig('streu_34.pdf')
print ('Fertig') | [
"[email protected]"
]
| |
de220ce4ab36c212e05c19db10caeba92cbbe9e1 | 9f0d913822de7ebb86d9a2634e3b99d452db83ee | /py/folder2zip.py | 83bec69e42cb6e9818dbedca25d2eeee81ab084e | []
| no_license | esabox/code-desu | 3029e71d031a26c4da7687a394e4f497e09acabf | 63b3756035163cc3f480a0e87127eed93b090dfb | refs/heads/master | 2023-03-30T09:02:55.631388 | 2021-04-09T04:58:04 | 2021-04-09T04:58:04 | 222,210,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,994 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# フォルダの中のフォルダを無圧縮zip化
import os
import glob
import re
import sys
import shutil
gomibako = '/Users/kazoku/.Trash/'
def main():
print(sys.version)
print(sys.argv)
work_dir = '/Users/kazoku/Desktop/book/'
# workdirの引数があれば
if 1 < len(sys.argv):
work_dir = sys.argv[1]
os.chdir(work_dir)
print('pwd :'+os.getcwd())
ls = glob.glob('*/') # フォルダのみ
zipnisuru(ls)
res = input('\n[rename? Yes="" / No=other ]:')
if res == '':
print('リネーム実行')
zipnisuru(ls, True)
print('終了')
def zipnisuru(ls, write=False):
for f in ls:
# print(f)
nf = re.sub('/', '.zip', f)
print('new: '+nf)
cmd = 'zip -r --quiet -0 "' + nf + '" "' + f + '"'
# フォルダを圧縮するときは単一でも再帰必須
if write:
os.system(cmd)
print(cmd)
# os.remove(f) #ファイル一個か、空フォルダしか無理
# shutil.rmtree(f) #ゴミ箱経由にならない
shutil.move(f, gomibako)
pass
# zip -r -n ".jpg:.JPG:.jpeg:.JPEG:.gif:.GIF" "$zipf" "$zipd" -x ".DS_Store"
# -r オプションは、ZIPの入力ファイルにディレクトリがある場合、再帰的にたどっていくことを指示します。
# -n オプションは、引数として与えられた拡張子を持つファイルを、圧縮しないように指示します。tiffやpngも入れてもいいかもしれません。大文字小文字の違いを無視できればよいのですが、ちょっと冗長になっています。私は、テキストファイルなどは圧縮したいので、このオプションを使っています。
# -nオプションの代わりに、-0オプションを使えばすべてのファイルが無圧縮になります。-# (#は0から9の数字が入ります)の形式のオプションは、圧縮スピードを相対的に指定するのですが、-0は無圧縮の指定になっています。-0の方が無圧縮ファイル作成の目的に合う人はこちらの方がよいと思います。
# $zipfは、作成するZIPファイル名が入った変数です。
# $zipdは、作成対象のディレクトリ名が入った変数です。両方共ダブルクォーテーションで囲んであるのは、文字列中にスペースがあるとシェルが別々の引数として処理するため、予防的に入れています。
# 最後の-xオプションは、ZIPの対象としないファイルを指定します。.DS_StoreはFinderが不可視ファイルとして作る場合がありますが、今回はZIPファイルに含める必要はないため除外するようにしています。
# zipコマンドの詳細はターミナルからmanコマンドで調べることもできます。
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
2d5543f03917a6065eba621b789a7e27e75d8cf2 | 21c1da4a354037f8aed1fb323d64295f1d40d0c6 | /additive-number/solution.py | 3f0c8e98f8f5ab4e12f51b90f3a53185f8d49432 | []
| no_license | hsinhoyeh/leecode | b0de2334a1bcd9277335fba4ae7e3519775da8f9 | 7760d44f7a9038f48e12eabb6d5bafd182a0a8f6 | refs/heads/master | 2021-01-10T10:11:40.259319 | 2017-08-11T01:31:22 | 2017-08-11T01:31:22 | 45,776,150 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,931 | py | import math
class NextNum(object):
def __init__(self, num, start_index, max_len=None):
self.num = num
self.start_index = start_index
self.end_index = start_index
self.max_len = max_len
self.last_val = None
def next(self, nlen=1): # default, lookup the number with len=1
# none of value is leading with 0
# make sure that we won't go further
if self.last_val == 0:
return None
self.last_val = self._next(nlen)
return self.last_val
def _next(self, nlen=1):
self.end_index += nlen
if self.end_index > len(self.num):
return None
if self.start_index > len(self.num):
return None
if self.max_len:
if int(math.fabs(self.end_index - self.start_index)) > self.max_len:
return None
if self.num[self.start_index] == '0':
return 0
return int(self.num[self.start_index: self.end_index])
class Solution(object):
def findDigits(self, anum):
return int(math.log(anum, 10)) + 1
# return the last index of found
# otherwise, return None
def findLast(self, num, lst_first_two_nums):
sum_of_2 = sum(lst_first_two_nums)
digits = self.findDigits(sum_of_2)
nn2 = NextNum(num, 0)
if nn2.next(digits) == sum_of_2:
return nn2.end_index
return None
def isAdditiveNumber(self, num):
"""
:type num: str
:rtype: bool
"""
# fix the first two numbers
# NOTE: the length of the first number and second shouldn't exceed n/2
# since first + second = thrid.
half_num = int(len(num)/2)
nn0 = NextNum(num, 0, half_num)
val0 = nn0.next()
while val0 != None:
# number 2 is start from the end of number 1
nn1 = NextNum(num, nn0.end_index, half_num)
val1 = nn1.next()
while val1 != None:
digits = self.findDigits(val0 + val1)
if len(num) < nn1.end_index + digits:
# no need to check
break
index = self.findLast(num[nn1.end_index:], [val0, val1])
if index:
index = index + nn1.end_index
tval0, tval1 = val0, val1
while index != len(num): # not end, we should keep looking
if index == None:
break
tval0, tval1 = tval1, tval0 + tval1
subindex = self.findLast(num[index:], [tval0, tval1])
if subindex:
index = index + subindex
else:
index = subindex
if index == len(num):
return True
val1 = nn1.next()
val0 = nn0.next()
return False
| [
"[email protected]"
]
| |
d4db6548602d798752d08f491cc5d988c7ab352a | ce46bd76dac66e5ff6cfa6556c9d549af324f48e | /ml-progress-bot/download_media.py | 41ffb527fb8f60c7b463737282c8784264a7a0f9 | [
"MIT"
]
| permissive | yell/kaggle-camera | f21b56277c278395496dc78bafbdb41de60439de | 7b471c3631343f6f7fd7adf1a80b2edb46d62f0b | refs/heads/master | 2023-08-26T04:49:11.158767 | 2021-11-05T14:54:58 | 2021-11-05T14:54:58 | 117,623,865 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from telethon.sync import TelegramClient
api_id = 'TODO'
api_hash = 'TODO'
client = TelegramClient('test_session', api_id, api_hash)
client.start()
print(dir(client))
for message in client.get_messages('ml_progress_bot', limit=10000):
client.download_media(message)
| [
"[email protected]"
]
| |
a70ed864f8709eca7cb6f56bd0f9445ad1b82d1b | b828fc06f40d1754dc5d6ab87b7360b97dff2938 | /intrinio_sdk/models/zacks_long_term_growth_rate.py | 38b6c21e356c2fd74ea5df4aca5ee4409d3c9166 | []
| no_license | dhruvsagar/python-sdk | 90302f3727022b9bc2dea83c7df2268bac180281 | 792f8b47a5d3238a92f62b40d164639850d9c4cb | refs/heads/master | 2022-06-04T20:38:51.263726 | 2020-05-05T20:24:29 | 2020-05-05T20:24:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57,035 | py | # coding: utf-8
"""
Intrinio API
Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner. # noqa: E501
OpenAPI spec version: 2.13.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from intrinio_sdk.models.security_summary import SecuritySummary # noqa: F401,E501
class ZacksLongTermGrowthRate(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'mean': 'float',
'median': 'float',
'count': 'int',
'high': 'float',
'low': 'float',
'std_dev': 'str',
'revisions_upward': 'int',
'revisions_downward': 'int',
'mean_7_days_ago': 'str',
'mean_30_days_ago': 'str',
'mean_60_days_ago': 'str',
'mean_90_days_ago': 'str',
'revisions_upward_last_7_days': 'str',
'revisions_downward_last_7_days': 'str',
'revisions_upward_last_30_days': 'str',
'revisions_downward_last_30_days': 'str',
'revisions_upward_last_60_days': 'str',
'revisions_downward_last_60_days': 'str',
'revisions_upward_last_90_days': 'str',
'revisions_downward_last_90_days': 'str',
'revisions_upward_last_120_days': 'str',
'revisions_downward_last_120_days': 'str',
'revisions_upward_last_150_days': 'str',
'revisions_downward_last_150_days': 'str',
'security': 'SecuritySummary'
}
attribute_map = {
'mean': 'mean',
'median': 'median',
'count': 'count',
'high': 'high',
'low': 'low',
'std_dev': 'std_dev',
'revisions_upward': 'revisions_upward',
'revisions_downward': 'revisions_downward',
'mean_7_days_ago': 'mean_7_days_ago',
'mean_30_days_ago': 'mean_30_days_ago',
'mean_60_days_ago': 'mean_60_days_ago',
'mean_90_days_ago': 'mean_90_days_ago',
'revisions_upward_last_7_days': 'revisions_upward_last_7_days',
'revisions_downward_last_7_days': 'revisions_downward_last_7_days',
'revisions_upward_last_30_days': 'revisions_upward_last_30_days',
'revisions_downward_last_30_days': 'revisions_downward_last_30_days',
'revisions_upward_last_60_days': 'revisions_upward_last_60_days',
'revisions_downward_last_60_days': 'revisions_downward_last_60_days',
'revisions_upward_last_90_days': 'revisions_upward_last_90_days',
'revisions_downward_last_90_days': 'revisions_downward_last_90_days',
'revisions_upward_last_120_days': 'revisions_upward_last_120_days',
'revisions_downward_last_120_days': 'revisions_downward_last_120_days',
'revisions_upward_last_150_days': 'revisions_upward_last_150_days',
'revisions_downward_last_150_days': 'revisions_downward_last_150_days',
'security': 'security'
}
def __init__(self, mean=None, median=None, count=None, high=None, low=None, std_dev=None, revisions_upward=None, revisions_downward=None, mean_7_days_ago=None, mean_30_days_ago=None, mean_60_days_ago=None, mean_90_days_ago=None, revisions_upward_last_7_days=None, revisions_downward_last_7_days=None, revisions_upward_last_30_days=None, revisions_downward_last_30_days=None, revisions_upward_last_60_days=None, revisions_downward_last_60_days=None, revisions_upward_last_90_days=None, revisions_downward_last_90_days=None, revisions_upward_last_120_days=None, revisions_downward_last_120_days=None, revisions_upward_last_150_days=None, revisions_downward_last_150_days=None, security=None): # noqa: E501
"""ZacksLongTermGrowthRate - a model defined in Swagger""" # noqa: E501
self._mean = None
self._median = None
self._count = None
self._high = None
self._low = None
self._std_dev = None
self._revisions_upward = None
self._revisions_downward = None
self._mean_7_days_ago = None
self._mean_30_days_ago = None
self._mean_60_days_ago = None
self._mean_90_days_ago = None
self._revisions_upward_last_7_days = None
self._revisions_downward_last_7_days = None
self._revisions_upward_last_30_days = None
self._revisions_downward_last_30_days = None
self._revisions_upward_last_60_days = None
self._revisions_downward_last_60_days = None
self._revisions_upward_last_90_days = None
self._revisions_downward_last_90_days = None
self._revisions_upward_last_120_days = None
self._revisions_downward_last_120_days = None
self._revisions_upward_last_150_days = None
self._revisions_downward_last_150_days = None
self._security = None
self.discriminator = None
if mean is not None:
self.mean = mean
if median is not None:
self.median = median
if count is not None:
self.count = count
if high is not None:
self.high = high
if low is not None:
self.low = low
if std_dev is not None:
self.std_dev = std_dev
if revisions_upward is not None:
self.revisions_upward = revisions_upward
if revisions_downward is not None:
self.revisions_downward = revisions_downward
if mean_7_days_ago is not None:
self.mean_7_days_ago = mean_7_days_ago
if mean_30_days_ago is not None:
self.mean_30_days_ago = mean_30_days_ago
if mean_60_days_ago is not None:
self.mean_60_days_ago = mean_60_days_ago
if mean_90_days_ago is not None:
self.mean_90_days_ago = mean_90_days_ago
if revisions_upward_last_7_days is not None:
self.revisions_upward_last_7_days = revisions_upward_last_7_days
if revisions_downward_last_7_days is not None:
self.revisions_downward_last_7_days = revisions_downward_last_7_days
if revisions_upward_last_30_days is not None:
self.revisions_upward_last_30_days = revisions_upward_last_30_days
if revisions_downward_last_30_days is not None:
self.revisions_downward_last_30_days = revisions_downward_last_30_days
if revisions_upward_last_60_days is not None:
self.revisions_upward_last_60_days = revisions_upward_last_60_days
if revisions_downward_last_60_days is not None:
self.revisions_downward_last_60_days = revisions_downward_last_60_days
if revisions_upward_last_90_days is not None:
self.revisions_upward_last_90_days = revisions_upward_last_90_days
if revisions_downward_last_90_days is not None:
self.revisions_downward_last_90_days = revisions_downward_last_90_days
if revisions_upward_last_120_days is not None:
self.revisions_upward_last_120_days = revisions_upward_last_120_days
if revisions_downward_last_120_days is not None:
self.revisions_downward_last_120_days = revisions_downward_last_120_days
if revisions_upward_last_150_days is not None:
self.revisions_upward_last_150_days = revisions_upward_last_150_days
if revisions_downward_last_150_days is not None:
self.revisions_downward_last_150_days = revisions_downward_last_150_days
if security is not None:
self.security = security
@property
def mean(self):
"""Gets the mean of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate # noqa: E501
:return: The mean of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
return self._mean
@property
def mean_dict(self):
"""Gets the mean of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The mean of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
result = None
value = self.mean
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'mean': value }
return result
@mean.setter
def mean(self, mean):
"""Sets the mean of this ZacksLongTermGrowthRate.
The mean long term growth estimate # noqa: E501
:param mean: The mean of this ZacksLongTermGrowthRate. # noqa: E501
:type: float
"""
self._mean = mean
@property
def median(self):
"""Gets the median of this ZacksLongTermGrowthRate. # noqa: E501
The median long term growth estimate # noqa: E501
:return: The median of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
return self._median
@property
def median_dict(self):
"""Gets the median of this ZacksLongTermGrowthRate. # noqa: E501
The median long term growth estimate as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The median of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
result = None
value = self.median
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'median': value }
return result
@median.setter
def median(self, median):
"""Sets the median of this ZacksLongTermGrowthRate.
The median long term growth estimate # noqa: E501
:param median: The median of this ZacksLongTermGrowthRate. # noqa: E501
:type: float
"""
self._median = median
@property
def count(self):
"""Gets the count of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimates # noqa: E501
:return: The count of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: int
"""
return self._count
@property
def count_dict(self):
"""Gets the count of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimates as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The count of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: int
"""
result = None
value = self.count
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'count': value }
return result
@count.setter
def count(self, count):
"""Sets the count of this ZacksLongTermGrowthRate.
The number of long term growth estimates # noqa: E501
:param count: The count of this ZacksLongTermGrowthRate. # noqa: E501
:type: int
"""
self._count = count
@property
def high(self):
"""Gets the high of this ZacksLongTermGrowthRate. # noqa: E501
The high long term growth estimate # noqa: E501
:return: The high of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
return self._high
@property
def high_dict(self):
"""Gets the high of this ZacksLongTermGrowthRate. # noqa: E501
The high long term growth estimate as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The high of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
result = None
value = self.high
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'high': value }
return result
@high.setter
def high(self, high):
"""Sets the high of this ZacksLongTermGrowthRate.
The high long term growth estimate # noqa: E501
:param high: The high of this ZacksLongTermGrowthRate. # noqa: E501
:type: float
"""
self._high = high
@property
def low(self):
"""Gets the low of this ZacksLongTermGrowthRate. # noqa: E501
The low long term growth estimate # noqa: E501
:return: The low of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
return self._low
@property
def low_dict(self):
"""Gets the low of this ZacksLongTermGrowthRate. # noqa: E501
The low long term growth estimate as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The low of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
result = None
value = self.low
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'low': value }
return result
@low.setter
def low(self, low):
"""Sets the low of this ZacksLongTermGrowthRate.
The low long term growth estimate # noqa: E501
:param low: The low of this ZacksLongTermGrowthRate. # noqa: E501
:type: float
"""
self._low = low
@property
def std_dev(self):
"""Gets the std_dev of this ZacksLongTermGrowthRate. # noqa: E501
The standard deviation long term growth estimate # noqa: E501
:return: The std_dev of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._std_dev
@property
def std_dev_dict(self):
"""Gets the std_dev of this ZacksLongTermGrowthRate. # noqa: E501
The standard deviation long term growth estimate as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The std_dev of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.std_dev
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'std_dev': value }
return result
@std_dev.setter
def std_dev(self, std_dev):
"""Sets the std_dev of this ZacksLongTermGrowthRate.
The standard deviation long term growth estimate # noqa: E501
:param std_dev: The std_dev of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._std_dev = std_dev
@property
def revisions_upward(self):
"""Gets the revisions_upward of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward # noqa: E501
:return: The revisions_upward of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: int
"""
return self._revisions_upward
@property
def revisions_upward_dict(self):
"""Gets the revisions_upward of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: int
"""
result = None
value = self.revisions_upward
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward': value }
return result
@revisions_upward.setter
def revisions_upward(self, revisions_upward):
"""Sets the revisions_upward of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward # noqa: E501
:param revisions_upward: The revisions_upward of this ZacksLongTermGrowthRate. # noqa: E501
:type: int
"""
self._revisions_upward = revisions_upward
@property
def revisions_downward(self):
"""Gets the revisions_downward of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward # noqa: E501
:return: The revisions_downward of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: int
"""
return self._revisions_downward
@property
def revisions_downward_dict(self):
"""Gets the revisions_downward of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: int
"""
result = None
value = self.revisions_downward
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward': value }
return result
@revisions_downward.setter
def revisions_downward(self, revisions_downward):
"""Sets the revisions_downward of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward # noqa: E501
:param revisions_downward: The revisions_downward of this ZacksLongTermGrowthRate. # noqa: E501
:type: int
"""
self._revisions_downward = revisions_downward
@property
def mean_7_days_ago(self):
"""Gets the mean_7_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 7 days ago # noqa: E501
:return: The mean_7_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._mean_7_days_ago
@property
def mean_7_days_ago_dict(self):
"""Gets the mean_7_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 7 days ago as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The mean_7_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.mean_7_days_ago
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'mean_7_days_ago': value }
return result
@mean_7_days_ago.setter
def mean_7_days_ago(self, mean_7_days_ago):
"""Sets the mean_7_days_ago of this ZacksLongTermGrowthRate.
The mean long term growth estimate 7 days ago # noqa: E501
:param mean_7_days_ago: The mean_7_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._mean_7_days_ago = mean_7_days_ago
@property
def mean_30_days_ago(self):
"""Gets the mean_30_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 30 days ago # noqa: E501
:return: The mean_30_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._mean_30_days_ago
@property
def mean_30_days_ago_dict(self):
"""Gets the mean_30_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 30 days ago as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The mean_30_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.mean_30_days_ago
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'mean_30_days_ago': value }
return result
@mean_30_days_ago.setter
def mean_30_days_ago(self, mean_30_days_ago):
"""Sets the mean_30_days_ago of this ZacksLongTermGrowthRate.
The mean long term growth estimate 30 days ago # noqa: E501
:param mean_30_days_ago: The mean_30_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._mean_30_days_ago = mean_30_days_ago
@property
def mean_60_days_ago(self):
"""Gets the mean_60_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 60 days ago # noqa: E501
:return: The mean_60_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._mean_60_days_ago
@property
def mean_60_days_ago_dict(self):
"""Gets the mean_60_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 60 days ago as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The mean_60_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.mean_60_days_ago
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'mean_60_days_ago': value }
return result
@mean_60_days_ago.setter
def mean_60_days_ago(self, mean_60_days_ago):
"""Sets the mean_60_days_ago of this ZacksLongTermGrowthRate.
The mean long term growth estimate 60 days ago # noqa: E501
:param mean_60_days_ago: The mean_60_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._mean_60_days_ago = mean_60_days_ago
@property
def mean_90_days_ago(self):
"""Gets the mean_90_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 90 days ago # noqa: E501
:return: The mean_90_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._mean_90_days_ago
@property
def mean_90_days_ago_dict(self):
"""Gets the mean_90_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 90 days ago as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The mean_90_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.mean_90_days_ago
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'mean_90_days_ago': value }
return result
@mean_90_days_ago.setter
def mean_90_days_ago(self, mean_90_days_ago):
"""Sets the mean_90_days_ago of this ZacksLongTermGrowthRate.
The mean long term growth estimate 90 days ago # noqa: E501
:param mean_90_days_ago: The mean_90_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._mean_90_days_ago = mean_90_days_ago
@property
def revisions_upward_last_7_days(self):
"""Gets the revisions_upward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 7 days # noqa: E501
:return: The revisions_upward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_upward_last_7_days
@property
def revisions_upward_last_7_days_dict(self):
"""Gets the revisions_upward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 7 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_upward_last_7_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward_last_7_days': value }
return result
@revisions_upward_last_7_days.setter
def revisions_upward_last_7_days(self, revisions_upward_last_7_days):
"""Sets the revisions_upward_last_7_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward in the last 7 days # noqa: E501
:param revisions_upward_last_7_days: The revisions_upward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_upward_last_7_days = revisions_upward_last_7_days
@property
def revisions_downward_last_7_days(self):
"""Gets the revisions_downward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 7 days # noqa: E501
:return: The revisions_downward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_downward_last_7_days
@property
def revisions_downward_last_7_days_dict(self):
"""Gets the revisions_downward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 7 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_downward_last_7_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward_last_7_days': value }
return result
@revisions_downward_last_7_days.setter
def revisions_downward_last_7_days(self, revisions_downward_last_7_days):
"""Sets the revisions_downward_last_7_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward in the last 7 days # noqa: E501
:param revisions_downward_last_7_days: The revisions_downward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_downward_last_7_days = revisions_downward_last_7_days
@property
def revisions_upward_last_30_days(self):
"""Gets the revisions_upward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 30 days # noqa: E501
:return: The revisions_upward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_upward_last_30_days
@property
def revisions_upward_last_30_days_dict(self):
"""Gets the revisions_upward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 30 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_upward_last_30_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward_last_30_days': value }
return result
@revisions_upward_last_30_days.setter
def revisions_upward_last_30_days(self, revisions_upward_last_30_days):
"""Sets the revisions_upward_last_30_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward in the last 30 days # noqa: E501
:param revisions_upward_last_30_days: The revisions_upward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_upward_last_30_days = revisions_upward_last_30_days
@property
def revisions_downward_last_30_days(self):
"""Gets the revisions_downward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 30 days # noqa: E501
:return: The revisions_downward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_downward_last_30_days
@property
def revisions_downward_last_30_days_dict(self):
"""Gets the revisions_downward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 30 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_downward_last_30_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward_last_30_days': value }
return result
@revisions_downward_last_30_days.setter
def revisions_downward_last_30_days(self, revisions_downward_last_30_days):
"""Sets the revisions_downward_last_30_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward in the last 30 days # noqa: E501
:param revisions_downward_last_30_days: The revisions_downward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_downward_last_30_days = revisions_downward_last_30_days
@property
def revisions_upward_last_60_days(self):
"""Gets the revisions_upward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 60 days # noqa: E501
:return: The revisions_upward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_upward_last_60_days
@property
def revisions_upward_last_60_days_dict(self):
"""Gets the revisions_upward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 60 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_upward_last_60_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward_last_60_days': value }
return result
@revisions_upward_last_60_days.setter
def revisions_upward_last_60_days(self, revisions_upward_last_60_days):
"""Sets the revisions_upward_last_60_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward in the last 60 days # noqa: E501
:param revisions_upward_last_60_days: The revisions_upward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_upward_last_60_days = revisions_upward_last_60_days
@property
def revisions_downward_last_60_days(self):
"""Gets the revisions_downward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 60 days # noqa: E501
:return: The revisions_downward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_downward_last_60_days
@property
def revisions_downward_last_60_days_dict(self):
"""Gets the revisions_downward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 60 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_downward_last_60_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward_last_60_days': value }
return result
@revisions_downward_last_60_days.setter
def revisions_downward_last_60_days(self, revisions_downward_last_60_days):
"""Sets the revisions_downward_last_60_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward in the last 60 days # noqa: E501
:param revisions_downward_last_60_days: The revisions_downward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_downward_last_60_days = revisions_downward_last_60_days
@property
def revisions_upward_last_90_days(self):
"""Gets the revisions_upward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 90 days # noqa: E501
:return: The revisions_upward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_upward_last_90_days
@property
def revisions_upward_last_90_days_dict(self):
"""Gets the revisions_upward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 90 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_upward_last_90_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward_last_90_days': value }
return result
@revisions_upward_last_90_days.setter
def revisions_upward_last_90_days(self, revisions_upward_last_90_days):
"""Sets the revisions_upward_last_90_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward in the last 90 days # noqa: E501
:param revisions_upward_last_90_days: The revisions_upward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_upward_last_90_days = revisions_upward_last_90_days
@property
def revisions_downward_last_90_days(self):
"""Gets the revisions_downward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 90 days # noqa: E501
:return: The revisions_downward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_downward_last_90_days
@property
def revisions_downward_last_90_days_dict(self):
"""Gets the revisions_downward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 90 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_downward_last_90_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward_last_90_days': value }
return result
@revisions_downward_last_90_days.setter
def revisions_downward_last_90_days(self, revisions_downward_last_90_days):
"""Sets the revisions_downward_last_90_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward in the last 90 days # noqa: E501
:param revisions_downward_last_90_days: The revisions_downward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_downward_last_90_days = revisions_downward_last_90_days
@property
def revisions_upward_last_120_days(self):
"""Gets the revisions_upward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 120 days # noqa: E501
:return: The revisions_upward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_upward_last_120_days
@property
def revisions_upward_last_120_days_dict(self):
"""Gets the revisions_upward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 120 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_upward_last_120_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward_last_120_days': value }
return result
@revisions_upward_last_120_days.setter
def revisions_upward_last_120_days(self, revisions_upward_last_120_days):
"""Sets the revisions_upward_last_120_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward in the last 120 days # noqa: E501
:param revisions_upward_last_120_days: The revisions_upward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_upward_last_120_days = revisions_upward_last_120_days
@property
def revisions_downward_last_120_days(self):
"""Gets the revisions_downward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 120 days # noqa: E501
:return: The revisions_downward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_downward_last_120_days
@property
def revisions_downward_last_120_days_dict(self):
"""Gets the revisions_downward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 120 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_downward_last_120_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward_last_120_days': value }
return result
@revisions_downward_last_120_days.setter
def revisions_downward_last_120_days(self, revisions_downward_last_120_days):
"""Sets the revisions_downward_last_120_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward in the last 120 days # noqa: E501
:param revisions_downward_last_120_days: The revisions_downward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_downward_last_120_days = revisions_downward_last_120_days
@property
def revisions_upward_last_150_days(self):
"""Gets the revisions_upward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 150 days # noqa: E501
:return: The revisions_upward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_upward_last_150_days
@property
def revisions_upward_last_150_days_dict(self):
"""Gets the revisions_upward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 150 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_upward_last_150_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward_last_150_days': value }
return result
@revisions_upward_last_150_days.setter
def revisions_upward_last_150_days(self, revisions_upward_last_150_days):
"""Sets the revisions_upward_last_150_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward in the last 150 days # noqa: E501
:param revisions_upward_last_150_days: The revisions_upward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_upward_last_150_days = revisions_upward_last_150_days
@property
def revisions_downward_last_150_days(self):
"""Gets the revisions_downward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 150 days # noqa: E501
:return: The revisions_downward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_downward_last_150_days
@property
def revisions_downward_last_150_days_dict(self):
"""Gets the revisions_downward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 150 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_downward_last_150_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward_last_150_days': value }
return result
@revisions_downward_last_150_days.setter
def revisions_downward_last_150_days(self, revisions_downward_last_150_days):
"""Sets the revisions_downward_last_150_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward in the last 150 days # noqa: E501
:param revisions_downward_last_150_days: The revisions_downward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_downward_last_150_days = revisions_downward_last_150_days
@property
def security(self):
"""Gets the security of this ZacksLongTermGrowthRate. # noqa: E501
The Security of the Zacks long term growth estimate # noqa: E501
:return: The security of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: SecuritySummary
"""
return self._security
@property
def security_dict(self):
"""Gets the security of this ZacksLongTermGrowthRate. # noqa: E501
The Security of the Zacks long term growth estimate as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The security of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: SecuritySummary
"""
result = None
value = self.security
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'security': value }
return result
@security.setter
def security(self, security):
"""Sets the security of this ZacksLongTermGrowthRate.
The Security of the Zacks long term growth estimate # noqa: E501
:param security: The security of this ZacksLongTermGrowthRate. # noqa: E501
:type: SecuritySummary
"""
self._security = security
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ZacksLongTermGrowthRate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
835fa05e3acbfa8beb25f0ef8b975295d4668c90 | 20002b0c41f0ff67553ea7ffb6568975792d8c95 | /main.py | 37a235d422efc3a4b23733caa5426aac6eee1393 | []
| no_license | hugos0910/Data_Science_London | be18667fd0121ba0b2549f02263fcad6d2a54448 | 74dcdd7d2504f0ba9be9b58acb01d2e305827b12 | refs/heads/master | 2021-01-18T06:00:53.529998 | 2016-09-21T17:17:48 | 2016-09-21T17:17:48 | 68,791,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py |
import numpy as np
import pandas as pd
import util
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA
from sklearn.svm import SVC
# Import data
print('Importing data...')
train = pd.read_csv('train.csv', header = None)
test = pd.read_csv('test.csv', header = None)
label = pd.read_csv('trainLabels.csv', header = None)
label = np.ravel(label)
# Cleaning data
print('Sanitizing data...')
pca = PCA(n_components = 12, whiten = True)
train = pca.fit_transform(train)
test = pca.transform(test)
# # Obtain best parameters
# num_processor = -1
# util.obtain_parameters('RF', train, label, num_processor)
# util.obtain_parameters('ET', train, label, num_processor)
# util.obtain_parameters('SVM', train, label, num_processor)
# util.obtain_parameters('KNN', train, label, num_processor)
# util.obtain_parameters('LR', train, label, num_processor)
# Training classifier
'''
classifier abbreviations:
RF - Random Forest
ET - Extra Trees
SVM - Support Vector Machine
KNN - K Nearest Neighbors
LR - Logistic Regression
'''
classifier_name = 'SVM'
print('Training and prediction with %s classifier...' %classifier_name)
prediction = util.classify(classifier_name, train, label, test)
# Exporting solution
index = list(range(1,len(test) +1))
print('Writing data to CSV file...')
df_prediction = pd.DataFrame(data = prediction, index = index, columns = ['Solution'])
df_prediction_csv = df_prediction.to_csv('prediction_%s.csv' % classifier_name, index_label = ["Id"])
| [
"[email protected]"
]
| |
67cd9c4d1cbc6de5a7be578e14c812fc18dd3f18 | 2edbe77571e522722a759921cd45cf6ff540e87d | /quene/marketmodel.py | da9893a00121afcdaeb56f737460e67203feb162 | []
| no_license | nasty11pig/pyStudy | 5253da4334c3e51bff938d1343a85ff76cd1c0d6 | da33c6c59bc84689c4aae8771be4ad36671ab5bf | refs/heads/master | 2020-03-17T01:08:35.501268 | 2018-05-26T15:51:38 | 2018-05-26T15:51:38 | 133,141,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | # -*- coding: utf-8 -*-
class MarketModel():
def __init__(self, lengthofSimulation, averageTimePerCus,
probabilityofNewArrival):
self._probabilityofNewArrival = probabilityofNewArrival
self._averageTimeperCus = averageTimePerCus
self._lenthofSimulation = lengthofSimulation
self._cashier = Cashier()
def runSimulation(self):
for currentTime in range(self._lengthofSimulation):
customer = Customer.generateCustomer(
self._probabilityofNewArrival,
currentTime,
self._arraveTimePerCus)
if customer != None:
self._cashier.addCustomer(customer)
self._cashier.serveCustomers(currentTime)
def __str__(self):
return str(self._cashier)
| [
"[email protected]"
]
| |
ab7427ee9ab9e98e843967f814b678f41e26f819 | a0406e59552037a3277d51eb9994565d3f557d7f | /book_lib/presentation/resource/book.py | c60de1f3f7e52df92f27bfa9d0b5430f5ae8b497 | []
| no_license | eezhal92/flask-sqlalchemy | 84bbf18fc041a2339842d56a7924a10f0332b975 | 7092651a5b5b73f1366c1c2473aab9b1c4eedea5 | refs/heads/master | 2021-07-12T10:10:54.572392 | 2017-10-17T11:47:39 | 2017-10-17T11:47:39 | 106,980,363 | 0 | 1 | null | 2017-10-17T11:47:40 | 2017-10-15T03:19:24 | Python | UTF-8 | Python | false | false | 454 | py | """Books Controller."""
from flask import jsonify
from flask_restful import Resource
from book_lib.infrastructure.repository.book import BookRepository
class Book(Resource):
"""."""
def __init__(self, **kwargs):
"""."""
self.db = kwargs['db']
self.book_repo = BookRepository(self.db)
def get(self):
"""."""
books = [b.serialize() for b in self.book_repo.find_all()]
return jsonify(books)
| [
"[email protected]"
]
| |
f354b13480738cfc382c8ee67d5d250310c01f13 | eebeeb2c31dc90b21878196502efec9086b87c46 | /07/vmparser.py | b63709acbddb5f5c3126b6dab11a6926fae4627c | []
| no_license | festa78/nand2tetris | 6f07673d69277125331f7536f18214dce64d8008 | 815f887c584d3de91591abe44123ee5c3b006575 | refs/heads/master | 2023-03-06T00:46:01.412600 | 2021-02-07T12:35:33 | 2021-02-22T08:53:08 | 328,305,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,696 | py | class Parser:
COMMAND_TYPES = (
'C_ARITHMETIC',
'C_PUSH',
'C_POP',
'C_LABEL',
'C_GOTO',
'C_IF',
'C_FUNCTION',
'C_RETURN',
'C_CALL',
)
COMMAND_TO_TYPES = {
'and': COMMAND_TYPES[0],
'or': COMMAND_TYPES[0],
'not': COMMAND_TYPES[0],
'add': COMMAND_TYPES[0],
'sub': COMMAND_TYPES[0],
'neg': COMMAND_TYPES[0],
'eq': COMMAND_TYPES[0],
'lt': COMMAND_TYPES[0],
'gt': COMMAND_TYPES[0],
'push': COMMAND_TYPES[1],
'pop': COMMAND_TYPES[2],
}
def __init__(self, vmpath):
if not vmpath.endswith('.vm'):
raise ValueError('Not an .vm file but {}'.format(vmpath))
self.commands = []
with open(vmpath, 'r') as f:
for line in f.read().splitlines():
line = line.strip()
if not line:
continue
if line.startswith('//'):
continue
if '//' in line:
line = line.split('//')[0].strip()
self.commands.append(line)
self.index = 0
def hasMoreCommands(self):
return self.index + 1 < len(self.commands)
def advance(self):
assert self.hasMoreCommands()
self.index += 1
def commandType(self):
current_command = self.commands[self.index]
command_name = current_command.split(' ')[0]
if command_name not in self.COMMAND_TO_TYPES.keys():
raise AttributeError('Unsupported command')
return self.COMMAND_TO_TYPES[command_name]
def arg1(self):
command_type = self.commandType()
assert command_type != self.COMMAND_TYPES[7], 'Not for C_RETURN'
current_command = self.commands[self.index]
if command_type in self.COMMAND_TYPES[0]:
# C_ARITHMETIC.
return current_command.split(' ')[0]
return current_command.split(' ')[1]
def arg2(self):
command_type = self.commandType()
current_command = self.commands[self.index]
if command_type == self.COMMAND_TYPES[1]:
# C_PUSH.
return current_command.split(' ')[2]
elif command_type == self.COMMAND_TYPES[2]:
# C_POP.
return current_command.split(' ')[2]
elif command_type == self.COMMAND_TYPES[6]:
# C_FUNCTION.
return current_command.split(' ')[2]
elif command_type == self.COMMAND_TYPES[8]:
# C_CALL.
return current_command.split(' ')[2]
else:
raise ValueError('Unsupported command type.') | [
"[email protected]"
]
| |
58529c5f530f1fe4e00a6432565eb83f72ad4840 | f9c3c75718f5cfb3827658f8cedaa1fb8e289011 | /BujaMovies/migrations/0018_auto_20190903_2229.py | 77723265ca8edf07a4838817bed39093d340faac | []
| no_license | twizy/BullyHoodyGit | 7559c70269cd3b0abafb662a56fa33d95df9d6ff | 1a75617954428c70c82cf355f5aab0cb822ebc7a | refs/heads/master | 2020-07-12T21:25:29.609503 | 2019-09-03T21:50:57 | 2019-09-03T21:50:57 | 204,909,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | # Generated by Django 2.2.3 on 2019-09-03 20:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BujaMovies', '0017_auto_20190903_2118'),
]
operations = [
migrations.AddField(
model_name='films',
name='film',
field=models.FileField(blank=True, null=True, upload_to='Videos/'),
),
migrations.AlterField(
model_name='films',
name='cover',
field=models.ImageField(blank=True, null=True, upload_to='Covers/'),
),
]
| [
"[email protected]"
]
| |
79445dc9be69e70168bbf832fc269c16f8377373 | c5859d1bdf44c8452563f856dc4191b74e85ce21 | /custom_components/image_processing/tagbox.py | 163ce385bf2c8182fd5f439a3f58b3d206199a0e | []
| no_license | balloob/homeassistant-config | 46774ea88ced4414e48e4f1f40af63ff67b6f990 | 9f341e4b695db56f3c4af7299a336d5a0f60cdcf | refs/heads/master | 2020-03-21T03:10:31.729526 | 2018-06-18T18:27:54 | 2018-06-18T18:27:54 | 138,039,924 | 11 | 0 | null | 2018-06-20T13:56:12 | 2018-06-20T13:56:12 | null | UTF-8 | Python | false | false | 4,157 | py | """
Component that will search images for tagged objects via a local
machinebox instance.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/image_processing.tagbox
"""
import base64
import requests
import logging
import time
import voluptuous as vol
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.components.image_processing import (
PLATFORM_SCHEMA, ImageProcessingEntity, CONF_SOURCE, CONF_ENTITY_ID,
CONF_NAME, DOMAIN)
_LOGGER = logging.getLogger(__name__)
CONF_ENDPOINT = 'endpoint'
CONF_TAGS = 'tags'
ROUNDING_DECIMALS = 2
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ENDPOINT): cv.string,
vol.Optional(CONF_TAGS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the classifier."""
entities = []
for camera in config[CONF_SOURCE]:
entities.append(Tagbox(
camera.get(CONF_NAME),
config[CONF_ENDPOINT],
camera[CONF_ENTITY_ID],
config[CONF_TAGS],
))
add_devices(entities)
class Tagbox(ImageProcessingEntity):
"""Perform a tag search via a Tagbox."""
def __init__(self, name, endpoint, camera_entity, tags):
"""Init with the API key and model id"""
super().__init__()
if name: # Since name is optional.
self._name = name
else:
self._name = "Tagbox {0}".format(
split_entity_id(camera_entity)[1])
self._camera = camera_entity
self._default_tags = {tag: 0.0 for tag in tags}
self._tags = self._default_tags
self._url = "http://{}/tagbox/check".format(endpoint)
self._state = "no_processing_performed"
self._response_time = None
def process_image(self, image):
"""Process an image."""
timer_start = time.perf_counter()
try:
response = requests.post(
self._url,
json=self.encode_image(image)
).json()
except:
response = {'success': False}
if response['success']:
elapsed_time = time.perf_counter() - timer_start
self._response_time = round(elapsed_time, ROUNDING_DECIMALS)
self._tags, self._state = self.process_response(response)
else:
self._state = "Request_failed"
self._tags = self._default_tags
def encode_image(self, image):
"""base64 encode an image stream."""
base64_img = base64.b64encode(image).decode('ascii')
return {"base64": base64_img}
def process_response(self, response):
"""Process response data, returning the processed tags and state."""
tags = self._default_tags.copy()
tags.update(self.process_tags(response['tags']))
if response['custom_tags']:
tags.update(self.process_tags(response['custom_tags']))
# Default tags have probability 0.0 and cause an exception.
try:
state = max(tags.keys(), key=(lambda k: tags[k]))
except:
state = "No_tags_identified"
return tags, state
def process_tags(self, tags_data):
"""Process tags data, returning the tag and rounded confidence."""
processed_tags = {
tag['tag'].lower(): round(tag['confidence'], ROUNDING_DECIMALS)
for tag in tags_data
}
return processed_tags
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
attr = self._tags.copy()
attr.update({'response_time': self._response_time})
return attr
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return self._name
| [
"[email protected]"
]
| |
a39cbb706ac3420712b45eb050eae01efddba13e | 1e3f458b297b349eb875aebab254e05cdad2458e | /guessno.py | 6d6b1cee5d25c4ad5b2e5dd171bb21ffbf8c8694 | []
| no_license | mezeru/Python_Coding | 899169e162d01a2a1f6f043e45f3b07dc68e1001 | 99941431025b5c35731903dabb6c9e6106f59fcc | refs/heads/master | 2023-07-04T11:51:28.174018 | 2021-08-06T20:05:58 | 2021-08-06T20:05:58 | 255,226,334 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | import random
def rnum():
return (random.randint(0,10))
fnum=rnum()
cou = 0
while True:
print("Guesses the no :")
cou=cou+1
G=int(input())
if fnum == G :
print("You guessed right in " + str(cou)+" Guess")
break
elif fnum > G:
print("You guessed LOW")
continue
elif fnum < G:
print("You guessed High")
continue
else:
continue
| [
"[email protected]"
]
| |
c261e60b78e10d19087b88625ff766401bcafd78 | 0bcd538401c0f7ffa61047c44ca5f497afc56f82 | /datasets/flowers.py | ed865e8b5d968ed02fc87f1c1cd0d06a01e91ad0 | [
"Apache-2.0"
]
| permissive | Ningchen-Wang/DCGAN | fae14ae1bb6147caf600396d1689cc3877dacb37 | 4ba9fd4cdb89c809e90511427f85e88a589671be | refs/heads/master | 2021-08-10T11:09:44.577277 | 2017-11-12T13:36:22 | 2017-11-12T13:36:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,267 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the flowers dataset.
The dataset scripts used to create the dataset can be found at:
tensorflow/models/slim/datasets/download_and_convert_flowers.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from datasets import dataset_utils
slim = tf.contrib.slim
_FILE_PATTERN = 'flowers_%s_*.tfrecord'
SPLITS_TO_SIZES = {'train': 3320, 'validation': 350}
_NUM_CLASSES = 5
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying size.',
'label': 'A single integer between 0 and 4',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading flowers.
Args:
split_name: A train/validation split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/validation split.
"""
if split_name not in SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/class/label': tf.FixedLenFeature(
[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
print('@')
print(file_pattern)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
| [
"[email protected]"
]
| |
a3832070b1ec7002d6f2dd0a9f5bd280d29a3962 | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/tensorflow/python/keras/layers/cudnn_recurrent 2.py | 96ae66c775e623fff4738688d4f11005c5261b33 | []
| no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:52c49577848819c4116b99c29c11e765e7a2d686e7ccb4dc7b84454bdf31510f
size 20854
| [
"[email protected]"
]
| |
088293b7dea6a8f0f04a083b5246bc45e276a471 | c584fd0c13d2f396aaf940e9d4e774f01ce5e2ce | /apps/users/apps.py | 5838b89ccd4979ad020b44afbd2652bfef00306a | []
| no_license | wxrapha/RoadBicycle | ee07601458b5af72b1a853b2c4c969d6cdf81ae8 | 0f990267a47b738e97880c999ed3bc054c2889c7 | refs/heads/master | 2021-07-10T13:30:25.484884 | 2017-10-10T23:52:14 | 2017-10-10T23:52:20 | 103,798,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'apps.users'
verbose_name = u'用户信息'
| [
"[email protected]"
]
| |
170eb1cf38678e8baf10258b548535244e7f2996 | 12df1e58fe493c4a929e6d54a938f9b357964701 | /Day-5 Closest Value in BST.py | 68d748a07e7b9618aa7fce5bd8d1b7190170c74e | []
| no_license | Anshul-Dagar/100-Day-Coding-Challenge | 132dadc50b572428c7e33ceda329770d8766965a | 33f10cc6357d4ca3fa8a16cc954f6559f39e73bb | refs/heads/main | 2023-02-12T04:04:12.389433 | 2021-01-09T13:56:36 | 2021-01-09T13:56:36 | 326,140,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 19:30:49 2021
@author: ironman
"""
class BST:
def __init__(self,value):
self.value=value
self.left=None
self.right=None
def insert(self,value):
currentnode=self
while True:
if value<currentnode.value:
if currentnode.left is None:
currentnode.left=BST(value)
break
else:
currentnode=currentnode.left
else:
if currentnode.right is None:
currentnode.right=BST(value)
break
else:
currentnode=currentnode.right
return self
def contain(self,value):
currentnode=self
while currentnode is not None:
if value<currentnode.value:
currentnode=currentnode.left
elif value>currentnode.value:
currentnode=currentnode.right
else:
return True
return False
def findClosestValueInBst(target,tree):
return findClosestValueInBstHelper(target,tree,float("inf"))
def findClosestValueInBstHelper(target,tree,closest):
currentnode=tree
while currentnode is not None:
if abs(target-closest)>abs(target-currentnode.value):
closest=currentnode.value
if target>currentnode.value:
currentnode=currentnode.right
elif target<currentnode.value:
currentnode=currentnode.left
else:
break
return closest
tree=BST(10)
tree.insert(5)
tree.insert(15)
tree.insert(2)
tree.insert(5)
tree.insert(1)
ans=findClosestValueInBst(9,tree)
print(ans) | [
"[email protected]"
]
| |
889f0a628633729df0d4a4bc22d12bf0d72f43b7 | 548e34ebef0904ad21efa3fd53a10b6af1011152 | /uva/10180 - geom/gen_data.py | 8b3c2f71c5cacf2a069b847041645d4e8e623ae2 | []
| no_license | eric7237cire/CodeJam | 1646f43ec34b87edf8029b839b01fb0d667758ce | ca9dd1ddad5c42a02731c3c7c8e4342bd5c9ee9d | refs/heads/master | 2021-06-08T17:47:53.204535 | 2018-01-01T15:34:29 | 2018-01-01T15:34:29 | 1,331,950 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | import sys
import random
import math
random.seed()
T = 30000
MAX = .1
print( str(T) )
for t in range(T):
r = random.random() * MAX
p = 0
list = []
while len(list) < 4:
x = -MAX + random.random() * MAX * 2
y = -MAX + random.random() * MAX * 2
if math.hypot(x, y) <= r + .00001:
continue
list.append(x)
list.append(y)
list.append(r)
print( " ".join([ "%.5f" % x for x in list ] ) )
| [
"[email protected]"
]
| |
4f763a66d6c6077358c6dadee57b52bddcadf918 | 2a9572e6f1cfb329a12d6835071483ec89ec6538 | /flask_test/flask_blog.py | 1ceec00beaf74f9b274f237f1860dfff21615f7f | []
| no_license | Cezar04/petproject | c8a4c810a8b05d0645dc36601539034dc35be6b5 | 9093d2435f779235db5f9e79417395e4dd13e8b0 | refs/heads/master | 2022-11-13T11:12:29.785362 | 2020-07-03T11:14:25 | 2020-07-03T11:14:25 | 276,875,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | from flask import Flask, render_template, url_for, flash, redirect, request
from forms import registration_form, login_form, post_form
import data_manager
app = Flask(__name__)
app.config['SECRET_KEY'] = 'haker'
posts = [
{"author":"Gigel",
"title": "blog post 1",
"content":"First post content",
"date_posted": "marite 200001"},
{"author":"Gina gaina",
"title": "blog post 2",
"content":"First post content",
"date_posted": "marite 202"}
]
@app.route('/')
@app.route('/home')
def home():
return render_template('home.html', posts=posts)
@app.route('/about')
def about():
return render_template("about.html")
@app.route('/register', methods=["GET", "POST"])
def register():
form = registration_form()
if form.validate_on_submit():
flash(f"Account created for {form.username.data}!", "success")
return redirect(url_for("home"))
return render_template("register.html", title="Register", form=form)
@app.route('/login', methods=["GET", "POST"])
def login():
form = login_form()
if form.validate_on_submit():
if form.email.data == "[email protected]" and form.password.data == "1234":
flash('You are logged in!', 'success')
return redirect(url_for('home'))
else:
flash("Login failed, check username and password", 'danger')
return render_template("login.html", title="Login", form=form)
@app.route('/post/new', methods=["GET", "POST"])
def new_post():
form = post_form()
if form.validate_on_submit():
# post = posts(title=form.title.data, author=form.content.data, content=form.content.data)
flash("Post Created", "success")
return redirect(url_for("home"))
return render_template("create_post.html", title="New Post", form=form)
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
]
| |
6e40ec6f6b3b14aa33b9e1e5a07f218ba7ee36e0 | 00d2f3fde2c3d9e03a1babc958e35285d5798352 | /removedependent.py | 626bf7416873208dd75191cd10f065def3a4c318 | []
| no_license | N-S-Krishnan/Database-GUI-Elmasri-and-Navathe- | 14043e90c2e25e6c5ab080cc5efe985731479b93 | f8a60edad75505ad0587f3a3562cfc14cc0d018f | refs/heads/main | 2023-04-22T07:34:54.141788 | 2021-04-26T01:07:05 | 2021-04-26T01:07:05 | 361,572,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,474 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'removedependent.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QTableWidgetItem
import mysql.connector
from PyQt5.QtCore import QRegExp
from PyQt5.QtGui import QRegExpValidator
class Ui_RemoveDependent(object):
passedssn = -1
deldepname = ""
db = None # mysql connection
def __init__(self, obj):
self.passedssn = obj.textEdit.text()
def setupUi(self, RemoveDependent):
RemoveDependent.setObjectName("RemoveDependent")
RemoveDependent.resize(700, 505)
self.buttonBox = QtWidgets.QDialogButtonBox(RemoveDependent)
self.buttonBox.setGeometry(QtCore.QRect(310, 420, 321, 31))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayoutWidget = QtWidgets.QWidget(RemoveDependent)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(30, 80, 641, 201))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.tabdependents = QtWidgets.QTableWidget(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabdependents.sizePolicy().hasHeightForWidth())
self.tabdependents.setSizePolicy(sizePolicy)
self.tabdependents.setMinimumSize(QtCore.QSize(639, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.tabdependents.setFont(font)
self.tabdependents.setAutoFillBackground(True)
self.tabdependents.setGridStyle(QtCore.Qt.SolidLine)
self.tabdependents.setRowCount(10)
self.tabdependents.setColumnCount(4)
self.tabdependents.setObjectName("tabdependents")
item = QtWidgets.QTableWidgetItem()
self.tabdependents.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tabdependents.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tabdependents.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tabdependents.setHorizontalHeaderItem(3, item)
self.tabdependents.horizontalHeader().setSortIndicatorShown(False)
self.verticalLayout.addWidget(self.tabdependents)
self.label_2 = QtWidgets.QLabel(RemoveDependent)
self.label_2.setGeometry(QtCore.QRect(30, 360, 161, 16))
self.label_2.setObjectName("label_2")
self.empssn = QtWidgets.QLineEdit(RemoveDependent)
self.empssn.setGeometry(QtCore.QRect(90, 20, 101, 31))
self.empssn.setObjectName("empssn")
self.gobutton = QtWidgets.QPushButton(RemoveDependent)
self.gobutton.setGeometry(QtCore.QRect(40, 420, 93, 28))
self.gobutton.setObjectName("gobutton")
self.dname = QtWidgets.QTextEdit(RemoveDependent)
self.dname.setGeometry(QtCore.QRect(230, 350, 271, 31))
self.dname.setObjectName("dname")
self.label = QtWidgets.QLabel(RemoveDependent)
self.label.setGeometry(QtCore.QRect(30, 20, 71, 21))
self.label.setObjectName("label")
self.empname = QtWidgets.QLabel(RemoveDependent)
self.empname.setGeometry(QtCore.QRect(240, 20, 71, 21))
self.empname.setObjectName("empname")
self.empname_2 = QtWidgets.QTextEdit(RemoveDependent)
self.empname_2.setGeometry(QtCore.QRect(310, 20, 261, 31))
self.empname_2.setObjectName("empname_2")
self.label_6 = QtWidgets.QLabel(RemoveDependent)
self.label_6.setGeometry(QtCore.QRect(30, 310, 121, 16))
self.label_6.setObjectName("label_6")
self.depcount = QtWidgets.QTextEdit(RemoveDependent)
self.depcount.setGeometry(QtCore.QRect(210, 300, 31, 31))
self.depcount.setObjectName("depcount")
self.retranslateUi(RemoveDependent)
self.buttonBox.rejected.connect(RemoveDependent.reject)
QtCore.QMetaObject.connectSlotsByName(RemoveDependent)
self.empssn.setText(self.passedssn)
self.empssn.setDisabled(True)
self.select_data()
self.tabdependents.clicked.connect(self.select_depname)
self.gobutton.clicked.connect(self.processdelete)
def retranslateUi(self, RemoveDependent):
_translate = QtCore.QCoreApplication.translate
RemoveDependent.setWindowTitle(_translate("RemoveDependent", "RemoveDependent"))
self.tabdependents.setSortingEnabled(True)
item = self.tabdependents.horizontalHeaderItem(0)
item.setText(_translate("RemoveDependent", "Name"))
item = self.tabdependents.horizontalHeaderItem(1)
item.setText(_translate("RemoveDependent", "Sex"))
item = self.tabdependents.horizontalHeaderItem(2)
item.setText(_translate("RemoveDependent", "Date of Birth"))
item = self.tabdependents.horizontalHeaderItem(3)
item.setText(_translate("RemoveDependent", "Relationship"))
self.label_2.setText(_translate("RemoveDependent", "Name to Delete:"))
self.gobutton.setText(_translate("RemoveDependent", "Delete"))
self.label.setText(_translate("RemoveDependent", "Emp SSN"))
self.empname.setText(_translate("RemoveDependent", "Emp Name"))
self.label_6.setText(_translate("RemoveDependent", "Dependent Count"))
def select_data(self):
# Retrieve data on existing dependents that correspond to an ssn value given
try:
#print("select dependents")
self.db = mysql.connector.connect(option_files='mydb.conf')
cursor = self.db.cursor()
cursor.execute("select concat(fname, ' ', minit,' ', lname) empname from employee where ssn =" + str(self.passedssn) +
" for update ")
for row in cursor:
self.empname_2.setText(row[0])
#print(row)
self.empname_2.setDisabled(True)
nrows = cursor.rowcount
#print('nrows', nrows)
if nrows <= 0 :
msg = QtWidgets.QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("No employee with ssn "+ str(self.passedssn))
msg.setWindowTitle("Add Dependent")
msg.exec()
self.reject()
cursor.execute("select dependent_name, sex, bdate, relationship from dependent where essn =" + str(self.passedssn) +
" for update ")
result = cursor.fetchall()
nrows = cursor.rowcount
self.depcount.setText(str(nrows))
self.depcount.setDisabled(True)
self.tabdependents.setRowCount(0)
for rnum, rdata in enumerate(result):
self.tabdependents.insertRow(rnum)
for colnum, cdata in enumerate(rdata):
self.tabdependents.setItem(rnum, colnum,QTableWidgetItem(str(cdata)))
#self.tabdependents.setDisabled(True)
except mysql.connector.Error as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("SQL Error "+ str(e.msg))
msg.setWindowTitle("Add Dependent")
msg.exec()
def select_depname(self, item):
cellContent = item.data()
#print(cellContent) # test
#sf = "You clicked on {} {}".format(item.row(), item.column())
#print(sf)
myrow = item.row()
mycol = item.column()
if mycol == 0:
self.dname.setText(cellContent)
self.deldepname = cellContent
def processdelete(self, item):
if self.dname != "":
self.db = mysql.connector.connect(option_files='mydb.conf')
cursor = self.db.cursor()
# The number of variables we pass to the delete query is small enough where we can place them directly into
# the string that forms the sql query
cursor.execute("delete from dependent where essn =" + str(self.passedssn)+" and dependent_name = '"+self.deldepname+"'")
self.db.commit()
msg = QtWidgets.QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("Deleted dependent with essn "+ str(self.passedssn) + " dep name '" + self.deldepname +"'")
msg.setWindowTitle("Delete Dependent")
msg.exec()
self.dname.setText("")
self.select_data()
def reject(self):
#print("in reject")
self.db.commit()
self._close()
QDialog.reject(self)
| [
"[email protected]"
]
| |
574dd0cb3d9db506563978325c04ba1163e9ff93 | 6b00aa745fbd266b0eb5413eed4852aef9294b2b | /voice2voice_pytorch/voice2voice.py | 6c5131e12ca04272744b97ad106d85fb03e1d4c5 | [
"MIT"
]
| permissive | 19ai/voice2voice | 86172969917e5114d1c6eefc15b32f0216e10b6f | 1938737b9c1a15b045a49460e026b533eff62019 | refs/heads/master | 2020-04-23T03:13:21.369798 | 2018-09-22T08:00:13 | 2018-09-22T08:00:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,630 | py | # coding: utf-8
import torch
from torch import nn
from torch.nn import functional as F
import math
import numpy as np
from .modules import Conv1d, ConvTranspose1d, Embedding, Linear, GradMultiply
from .modules import get_mask_from_lengths, SinusoidalEncoding, Conv1dGLU
def expand_speaker_embed(inputs_btc, speaker_embed=None, tdim=1):
if speaker_embed is None:
return None
# expand speaker embedding for all time steps
# (B, N) -> (B, T, N)
ss = speaker_embed.size()
speaker_embed_btc = speaker_embed.unsqueeze(1).expand(
ss[0], inputs_btc.size(tdim), ss[-1])
return speaker_embed_btc
class Encoder(nn.Module):
def __init__(self, in_dim=513, speaker_style_dim=16,
convolutions=((64, 5, 1), (32, 5, 3), (16, 5, 9)), downsample_t = 4,
dropout=0.1, apply_grad_scaling=False):
super(Encoder, self).__init__()
self.dropout = dropout
self.num_attention_layers = None
self.apply_grad_scaling = apply_grad_scaling
self.downsample_t = downsample_t
# Non causual convolution blocks
in_channels = 64
self.convolutions = nn.ModuleList()
self.speaker_fc1 = Linear(in_dim, in_channels, dropout=dropout)
self.convolutions = nn.ModuleList()
std_mul = 1.0
for (out_channels, kernel_size, dilation) in convolutions:
if in_channels != out_channels:
# Conv1d + ReLU
self.convolutions.append(
Conv1d(in_channels, out_channels, kernel_size=1, padding=0,
dilation=1, std_mul=std_mul))
self.convolutions.append(nn.ReLU(inplace=True))
in_channels = out_channels
std_mul = 2.0
self.convolutions.append(
Conv1dGLU(1, speaker_style_dim,
in_channels, out_channels, kernel_size, causal=False,
dilation=dilation, dropout=dropout, std_mul=std_mul,
residual=True))
in_channels = out_channels
std_mul = 4.0
# Last 1x1 convolution
self.convolutions.append(Conv1d(in_channels, convolutions[len(convolutions)-1][0], kernel_size=1,
padding=0, dilation=1, std_mul=std_mul,
dropout=dropout))
def forward(self, input):
# downsample inputs
x = input[:, 0::self.downsample_t, :].contiguous()
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.speaker_fc1(x)
# B x T x C -> B x C x T
x = x.transpose(1, 2)
# 1D conv blocks
for f in self.convolutions:
x = f(x, None) if isinstance(f, Conv1dGLU) else f(x)
# Back to B x T x C
x = x.transpose(1, 2)
x = x[:, int(0.25*x.size(1)):int(0.75*x.size(1)), :]
x = x.mean(1, True).reshape((x.size(0), x.size(2))).contiguous()
return x
class Decoder(nn.Module):
def __init__(self, in_dim=513, speaker_style_dim=16,
convolutions=((512, 5, 3),)*4,
dropout=0.1, apply_grad_scaling=False):
super(Decoder, self).__init__()
self.dropout = dropout
self.num_attention_layers = None
self.apply_grad_scaling = apply_grad_scaling
self.speaker_fc1 = Linear(speaker_style_dim, in_dim, dropout=dropout)
self.speaker_fc2 = Linear(speaker_style_dim, in_dim, dropout=dropout)
# Non causual convolution blocks
in_channels = in_dim
self.convolutions = nn.ModuleList()
self.convolutions = nn.ModuleList()
std_mul = 1.0
for (out_channels, kernel_size, dilation) in convolutions:
if in_channels != out_channels:
# Conv1d + ReLU
self.convolutions.append(
Conv1d(in_channels, out_channels, kernel_size=1, padding=0,
dilation=1, std_mul=std_mul))
self.convolutions.append(nn.ReLU(inplace=True))
in_channels = out_channels
std_mul = 2.0
self.convolutions.append(
Conv1dGLU(2, speaker_style_dim,
in_channels, out_channels, kernel_size, causal=False,
dilation=dilation, dropout=dropout, std_mul=std_mul,
residual=True))
in_channels = out_channels
std_mul = 4.0
# Last 1x1 convolution
self.convolutions.append(Conv1d(in_channels, in_dim, kernel_size=1,
padding=0, dilation=1, std_mul=std_mul,
dropout=dropout))
def forward(self, x, style):
# x
x = F.dropout(x, p=self.dropout, training=self.training)
# expand speaker style for all time steps
speaker_style_btc = expand_speaker_embed(x, style)
if speaker_style_btc is not None:
speaker_style_btc = F.dropout(speaker_style_btc, p=self.dropout, training=self.training)
x = x + F.softsign(self.speaker_fc1(speaker_style_btc))
# B x T x C -> B x C x T
x = x.transpose(1, 2)
# 1D conv blocks
for f in self.convolutions:
x = f(x, speaker_style_btc) if isinstance(f, Conv1dGLU) else f(x)
# Back to B x T x C
x = x.transpose(1, 2)
if speaker_style_btc is not None:
x = x + F.softsign(self.speaker_fc2(speaker_style_btc))
return torch.sigmoid(x)
| [
"[email protected]"
]
| |
de3fe45a87e82c646b0708bb94ef18a5f539f842 | 4d675034878c4b6510e1b45b856cc0a71af7f886 | /mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py | 06c1de2b9010fef13bd2322bbd3352d82a1f3e2f | [
"Apache-2.0",
"BSD-2-Clause-Views",
"MIT",
"BSD-2-Clause"
]
| permissive | shinya7y/UniverseNet | 101ebc2ad8f15482ee45ea8d6561aa338a0fa49e | 3652b18c7ce68122dae7a32670624727d50e0914 | refs/heads/master | 2023-07-22T08:25:42.646911 | 2023-07-08T18:09:34 | 2023-07-08T18:09:34 | 263,555,721 | 407 | 58 | Apache-2.0 | 2023-01-27T01:13:31 | 2020-05-13T07:23:43 | Python | UTF-8 | Python | false | false | 4,482 | py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
init_cfg=None,
**kwargs):
super(HeuristicFusionHead,
self).__init__(num_things_classes, num_stuff_classes, test_cfg,
None, init_cfg, **kwargs)
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
"""Lay instance masks to a result map.
Args:
bboxes: The bboxes results, (K, 4).
labels: The labels of bboxes, (K, ).
masks: The instance masks, (K, H, W).
overlap_thr: Threshold to determine whether two masks overlap.
default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
scores, bboxes = bboxes[:, -1], bboxes[:, :4]
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,
**kwargs):
"""Fuse the results of instance and semantic segmentations.
Args:
det_bboxes: The bboxes results, (K, 4).
det_labels: The labels of bboxes, (K,).
mask_preds: The masks results, (K, H, W).
seg_preds: The semantic segmentation results,
(K, num_stuff + 1, H, W).
Returns:
Tensor : The panoptic segmentation result, (H, W).
"""
mask_preds = mask_preds >= self.test_cfg.mask_thr_binary
id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(det_labels.shape[0]):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
return pan_results
| [
"[email protected]"
]
| |
69ef378642a90c904e60bcd86fa6932e967ed311 | 032117bbf248a76abd25fcc2355bc8ade84fa76a | /inheritance_4.py | b62203cddf2bf1a42b3576a58752aaab34cfb71a | []
| no_license | shefaligoel136/python_summer_training | ba8f28f6af008584b4239c73d466e4e9d35b4b01 | 0b97fea050342fe4ed95b18c5f7ed885a6c8ca23 | refs/heads/master | 2022-11-13T07:22:32.855717 | 2020-07-06T08:33:19 | 2020-07-06T08:33:19 | 277,480,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # using super
class a:
def __init__(self):
print("initof A")
def feature1(self):
print("feature 1 is working")
def feature2(self):
print("feature 2 is working")
class b(a):
def __init__(self):
super().__init__()
print("initof B")
def feature3(self):
print("feature 3 is working")
def feature4(self):
print("feature 4 is working")
k = b()
k.feature1() | [
"[email protected]"
]
| |
bded7a0abc4bf1dc4955561f7e0715bcba19006f | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /codeforces/cf326-350/cf334/b.py | 3d79209e1a77d7ad5f7c126cf1c70b802e0ece89 | []
| no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | # -*- coding: utf-8 -*-
import sys,copy,math,heapq,itertools as it,fractions,re,bisect,collections as coll
mod = 10**9 + 7
class UnionFind:
def __init__(self, size):
self.rank = [0] * size
self.par = range(size)
self.g_num = size
def find(self, x):
if x == self.par[x]: return x
self.par[x] = self.find(self.par[x])
return self.par[x]
def same(self, x, y):
return self.find(x) == self.find(y)
def unite(self, x, y):
x, y = self.find(x), self.find(y)
if x == y: return
self.g_num -= 1
if (self.rank[x] > self.rank[y]):
self.par[y] = x
else:
self.par[x] = y
if (self.rank[x] == self.rank[y]): self.rank[y] += 1
def group_num(self):
return self.g_num
#prime = [1] * 1000005
#prime[0] = prime[1] = 0
#for i in xrange(int(1000005**0.5) + 1):
# if prime[i]:
# prime[2*i::i] = [0] * len(prime[2*i::i])
p, k = map(int, raw_input().split())
if k == 0:
print pow(p, p - 1, mod)
exit()
uf = UnionFind(p)
cnt = 0
for x in xrange(p):
if x == k*x % p:
if k > 1:
cnt += 1
else:
uf.unite(x, k*x % p)
ans = pow(p, uf.group_num() - cnt, mod)
print ans
| [
"[email protected]"
]
| |
9f173ba385b704082ea7dd42c3c5fcdda3c25bb0 | 8e6c4def374ba21c934f6856c0333a1e8bff69db | /190415/randomTest2.py | 059ba08f9ea23984706e9c2cf9d8f94c9a6c3e4f | []
| no_license | inuse918/Python_Practice_2 | d5a930a95b51181330abc6601d80f71b67780740 | de4dd6ec8d96e9d259566916b9e7f08402e7917d | refs/heads/master | 2020-05-06T13:20:08.153295 | 2019-12-25T23:07:47 | 2019-12-25T23:07:47 | 180,128,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | import random
time=random.randint(1,24)
sunny=random.choice([True,False])
print("지금 시각은 ",str(time),"시 입니다.")
#if (sunny==True)and((time>=6)and(time<=9)):
# print("현재 날씨는 화창합니다.")
if sunny:
print ("현재 날씨는 화창합니다.")
else :
print("현재 날씨는 화창하지 않습니다.")
# 종달새가 노래를 할 것인지를 판단해보자
if time>=6 and time<9 and sunny:
print("종달새가 노래를 합니다.")
else:
print("종달새가 노래를 하지 않습니다.")
| [
"[email protected]"
]
| |
be7ba6113a8c980e9031c22cdb4c40498940dcc3 | c6bded1f43beb191c9c5fd7a96edc6f6bc366781 | /src/crawl_CF.py | d56d19a0593dac3dfdc11e4f72b330cb8d5b56d0 | []
| no_license | Renjerhady/EloR | 13d7a432504367755cda2200d08fd209fe7504a8 | 51edfaa260b491a7310815bbf961a3a099becbe7 | refs/heads/master | 2022-02-23T15:59:27.078638 | 2019-10-21T06:43:36 | 2019-10-21T06:44:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,830 | py | from bs4 import BeautifulSoup
import requests
import itertools
def get_soup(url):
headers = {
"User-Agent": "Pied Piper (www.arameb.com)",
"From": "Aram Ebtekar"
}
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, "html5lib")
return soup
def generate_hrefs(soup, prefix):
for href_tag in soup.find_all("a"):
href = href_tag["href"]
if href.startswith(prefix) and href.find("#") == -1:
href_suffix = href[len(prefix):]
if href_suffix.find("/") == -1:
yield href_suffix
def get_rated_contests(num_pages):
contests = []
for page in range(1, 1 + num_pages):
# Use ru because contests [541,648,649,780,904] were only made available in Russian
page_soup = get_soup(f"https://codeforces.com/contests/page/{page}?locale=ru")
for contest in generate_hrefs(page_soup, "/contest/"):
ratings_soup = get_soup(f"https://codeforces.com/contest/{contest}/ratings")
participants = ratings_soup.find_all(lambda tag: tag.name == "tr" and tag.has_attr("id"))
# Check that there is at least one *rated* participant
if len(participants) != 0:
contests.append(int(contest))
print(contest, flush=True)
list.reverse(contests)
print(f"The full list of {len(contests)} contests is {contests}", flush=True)
return contests
def participant_info(participant):
rank, handle = participant.find_all("td")[:2]
return handle.a.text, int(rank.text)
def save_contest_standings(contests, directory):
for contest in contests:
standings = []
tie_intervals = dict()
for page in itertools.count(1):
page_soup = get_soup(f"https://codeforces.com/contest/{contest}/ratings/page/{page}")
participants = page_soup.find_all(lambda tag: tag.name == "tr" and tag.has_attr("id"))
if page == 1:
title = page_soup.find(attrs={"class": "title"}).a.text.strip()
elif participant_info(participants[0]) == standings[100 * page - 200]:
break
for r, participant in enumerate(participants, len(standings) + 1):
handle, rank = participant_info(participant)
if len(standings) > 0 and standings[-1][1] == rank:
assert rank < r
else:
assert rank == r
standings.append((handle, rank))
tie_intervals[rank] = r
with open(f"{directory}/{contest}.txt", "w+") as standings_file:
standings_file.write(f"{len(standings)} {title}\n")
for handle, rank in standings:
standings_file.write(f"{handle} {rank} {tie_intervals[rank]}\n")
print(f"Standings saved to {contest}.txt")
def save_contests(contests, file):
with open(file, "w+") as contests_file:
contests_file.write(f"{len(contests)}\n")
for contest in contests:
contests_file.write(f"{contest}\n")
print(f"List of contests saved to {file}")
def get_contests(file):
contests_file = open(file, 'r')
return [int(contest) for contest in contests_file][1:]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--pages', type=int, default=1,
help='Number of pages of users whose histories to search.')
args = parser.parse_args()
contests = get_rated_contests(args.pages)[-3:]
# contests = get_contests("../data/all_contests.txt")[-2:]
save_contest_standings(contests, "../standings")
| [
"[email protected]"
]
| |
cd65aa3b646cd5e825a104e8767b2dbe1068af20 | b741252e3677dd2e981d452a14b41b182ebac18b | /hexomap/virtualdiffractor.py | eaab8a5029aa9b9ea1c36e5495136a499c652b89 | [
"BSD-3-Clause"
]
| permissive | KedoKudo/HEXOMAP | 01960559c2c88fc37962f966ed43b13b169bc90f | 81f1c200fe5a3ad035adf22e941e08588192d513 | refs/heads/master | 2020-04-29T05:41:27.724239 | 2019-08-26T19:06:24 | 2019-08-26T19:06:24 | 175,891,640 | 0 | 0 | null | 2019-03-15T21:10:54 | 2019-03-15T21:10:53 | null | UTF-8 | Python | false | false | 7,181 | py | #!/usr/bin/env python
"""
Module of components for virtual diffraction.
"""
import os
import yaml
import numpy as np
from dataclasses import dataclass
from itertools import product
from hexomap.orientation import Frame
from hexomap.npmath import norm
from hexomap.utility import iszero
# -- Define standard frames commmonly used for NF/FF-HEDM --
STD_FRAMES = {
'APS': Frame(
e1=np.array([ 1, 0, 0]), # APS_X
e2=np.array([ 0, 1, 0]), # APS_Y
e3=np.array([ 0, 0, 1]), # APS_Z
o =np.array([ 0, 0, 0]), # rotation stage center
name='aps'
),
"Detector": Frame(
e1=np.array([-1, 0, 0]), # detector_j
e2=np.array([ 0,-1, 0]), # detector_k
e3=np.array([ 0, 0, 1]), # detector_n, norm
o =np.array([ 0, 0, 5]), # rotation stage center, assuming 5mm detector distance
name='detector_1'
),
}
# -- Define the materials data folder direction
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
MATS_DIR = os.path.join(DATA_DIR, 'materials')
@dataclass
class Detector:
frame: "Frame" = STD_FRAMES["Detector"]
resolution: tuple = (2048, 2048) # number of pixels
pix_size: tuple = (0.00148, 0.00148) # mm or m?
# Move
def transform_detector(self, m: np.ndarray) -> None:
"""
Description
-----------
Transfer detector frame using given transformation matrix.
Parameters
----------
m: np.ndarray, (4, 4)
Transformation matrix containing both translation and rotation
Returns
-------
None
"""
pass
# IntersectoinIdx
def acquire_signal(self,
scatter_vec: np.ndarray,
bragg_angle: float,
eta: float,
) -> tuple:
"""
Description
-----------
Parameters
----------
Returns
-------
"""
pass
# BackProj
def back_projection(self,
signal_position: tuple, # (J,K) in pixels
omega: float,
bragg_angle: float,
eta: float,
target_frame: "Frame"
) -> tuple:
"""
"""
pass
@dataclass
class Crystal:
name: str
atoms: list
atomz: list
lattice: str
lattice_constant: list
def __post_init__(self):
# construct the unit cell (prism) for given crystal
self.prism = Crystal.prism_from_lattice_constant(self.lattice_constant)
def structure_factor(self, hkl):
"""Calculate structure factor"""
return np.dot(self.atomz,
np.exp(-2*np.pi*1j*np.dot(np.array(self.atoms), np.array(hkl).reshape((3, 1)))),
)
def scatter_vecs(self, q_max: int) -> list:
"""Generate scattering vectors with Eward sphere capped at q_max"""
recip_prism = Crystal.prism_to_reciprocal(self.prism)
h_max, k_max, l_max = (q_max/norm(recip_prism, axis=0)).astype(int)
hkls = product(range(-h_max, h_max+1),
range(-k_max, k_max+1),
range(-l_max, l_max+1),
)
return [
np.dot(recip_prism, hkl)
for hkl in hkls
if not iszero(sum(map(abs, hkl))) # hkl != [000]
if norm(hkl) <= q_max # within Eward sphere
if not iszero(self.structure_factor(hkl)) # non-vanishing
]
@staticmethod
def load(element:str, name: str) -> 'Crystal':
"""
Description
-----------
Load material config for given materials from data base
Parameters
----------
element: str
main element, for example, titanium for Ti64
name: str
abbreviation for target material, for example Ti64 for Ti-6Al-4V
Returns
-------
Crystal
"""
with open(os.path.join(MATS_DIR, f"{element}.yml"), 'r') as f:
mat_dict = yaml.safe_load(f)['variants'][name]
return Crystal(
name,
[me['pos'] for me in mat_dict['atoms']],
[me['atomic_number'] for me in mat_dict['atoms']],
mat_dict['crystal_structure'],
[val for _, val in mat_dict['lattice_constant'].items()]
)
@staticmethod
def prism_from_lattice_constant(lattice_constant: list,
in_degrees=True,
) -> np.ndarray:
"""
Description
-----------
Calculate the unit cell prism expressed in crystal Frame
Parameters
----------
lattice_constat: list
lattice constants for target crystal
in_degrees: bool
unit of alpha, beta, gamma in lattice constants
Returns
-------
np.ndarray
column-stacked base vectors for the unit cell prism expressed in
crystal frame
"""
a, b, c, alpha, beta, gamma = lattice_constant
if in_degrees:
alpha, beta, gamma = np.radians([alpha, beta, gamma])
# compute unit cell from lattice constants
# ref:
# https://github.com/KedoKudo/cyxtal/blob/master/documentation/dev/development.pdf
c_a, c_b, c_g = np.cos([alpha, beta, gamma])
s_g = np.sin(gamma)
factor = 1 + 2*c_a*c_b*c_g - c_a**2 - c_b**2 - c_g**2
v_cell = a*b*c*np.sqrt(factor)
v1 = [a, 0, 0]
v2 = [b*c_g, b*s_g, 0.0]
v3 = [c*c_b, c*(c_a-c_b*c_g)/(s_g), v_cell/(a*b*s_g)]
return np.column_stack((v1,v2,v3))
@staticmethod
def prism_to_reciprocal(prism: np.ndarray) -> np.ndarray:
"""
Description
-----------
Calcualte the reciprocal dual of given prism (column stacked)
ref:
https://en.wikipedia.org/wiki/Reciprocal_lattice
Parameters
----------
prism: np.ndarray
unit cell prism
Returns
-------
np.ndarray
Reciprocal dual of the unit cell prism
NOTE:
use pinv to avoid singular matrix from ill-positioned problem
"""
return np.transpose(2*np.pi*np.linalg.pinv(prism))
# TODO:
# Finish the collector after the Detector and Crystal class refactor is complete
def collect_virtual_patterns(detector: 'Detector',
xtal: 'Crystal',
):
"""
Generate list of peaks (HEDM patterns) for given crystal(sample) on the target detector
"""
pass
if __name__ == "__main__":
# example_1:
xtal = Crystal.load('gold', 'gold_fcc')
print(xtal.prism)
print(Crystal.prism_to_reciprocal(xtal.prism))
print(norm(Crystal.prism_to_reciprocal(xtal.prism), axis=0))
print(xtal.scatter_vecs(3))
| [
"[email protected]"
]
| |
f4777bda143cb4bb504692f3c4f72056032d0fb3 | ce7c501af175bcf7834d2f2b896bb6b7f8527bce | /main.py | 290f602c537818c5f2dc519cb94786b326e956aa | []
| no_license | Harikrishnan6336/Python_Learn_From_Home | b167657c8a8661dbb87e4c9263f9ab2555af4426 | 7d2567e11e6c45a44627108b194cbbd74c963cd7 | refs/heads/master | 2021-03-30T09:45:01.294468 | 2020-03-17T19:54:10 | 2020-03-17T19:54:10 | 248,039,844 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,640 | py | #This code is meant to be submitted to Python Learn From Home program by TinkerHub
class Tech:
info = {}
#The key of the dictionary is the name of the participant.
#And the value is a list comprising of [stack, designation, available time]
# Adds the tech stack of the participant
def addStacks(self,name):
stack=input("\nThe available Stacks are : Python, GO, Web, UI/UX, Flutter \nEnter a stack you are expert at/interested in[Case sensitive] : ")
self.info[name] = [None, None, None]
if name in self.info:
self.info[name][0] = stack
return
# Sets a participant as a mentor or a learner
# 1 denotes Mentor and 2 denotes Learner
def setMentorOrLearner(self,name):
desig = int(input("\nAre you a \n1.Mentor\n2.Learner\n\nEnter your choice : "))
if name in self.info:
self.info[name][1] = desig
return
# Sets the available time for a mentor
def setAvailableTime(self,name):
if self.info[name][1] == 1 :
available_time=int(input("\nEnter available time(in minutes) : "))
if name in self.info:
self.info[name][2] = available_time
return
#Gives the mentors satisfying the given specifications
def getMentor(self,stack,time):
flag = 0
print("\nThe available mentors are : ")
for mentor in self.info:
if self.info[mentor][0] == stack and self.info[mentor][2] >= time:
print("{} ".format(mentor))
flag = 1
if flag == 0:
print("None")
return
obj = Tech()
go = True
while go:
# A menu-driven program
print("\nWELCOME Tech learner/mentor")
print("\nMENU")
print("\n[1].Enter the details of a participant")
print("[2].Check the availablity of mentors")
print("[3].EXIT")
choice = int(input("\nEnter your choice : "))
if(choice == 1):
name = input("\nEnter your name : ")
obj.addStacks(name)
obj.setMentorOrLearner(name)
obj.setAvailableTime(name)
elif(choice == 2):
stack=input("\nThe available Stacks are : Python, GO, Web, UI/UX, Flutter,\nEnter a stack you are interested in learning [Case sensitive] : ")
time=int(input("Enter the required time you need mentoring for : "))
obj.getMentor(stack,time)
elif(choice == 3):
print("\nExiting \nThank You")
break
else:
print("INVALID CHOICE!!!")
flag = input("\nDo you want to continue (Y/N)? ")
if(flag == 'n' or flag == 'N'):
print("\nExiting \nThank You")
go = False
| [
"[email protected]"
]
| |
92f03d5304cd9df07112f72bed3d35156851705e | f80f7529a68c168fd19dda179ad5ea762a7faa90 | /alembic/versions/ec62bcefe99d_create_categories_table.py | 6205d6232105826d204aa36c96074971f3eba8f4 | [
"MIT"
]
| permissive | luisignaciocc/python-books-scraper | 76b9509444ca52af65411ca821ea9ffd8ff23a03 | a8dba19c1fd49a746711235219eb528a9743d108 | refs/heads/master | 2023-05-06T07:07:03.641242 | 2021-05-26T13:49:19 | 2021-05-26T13:49:19 | 349,744,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | """Create categories table
Revision ID: ec62bcefe99d
Revises:
Create Date: 2021-03-28 14:19:59.387880
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ec62bcefe99d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('categories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.Column('url', sa.String(length=100), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('url')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('categories')
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
098998f8d95c610204722f8f0990286191492db1 | e9a0efee2089b1c3bf843633c7b226638bc09e0d | /DataStructures and Algorithms/Ammortization onArrays/CaesarCipher.py | 5bb577ea9668f61442f19fefda679a1816f4a8c4 | [
"MIT"
]
| permissive | abhishekratnam/Datastructuresandalgorithmsinpython | 41226cf41d288e24dbe9cd9643650151cb2a1037 | 9339319f441755797f4d2818ac9cf742a63ab5ea | refs/heads/master | 2020-04-15T03:16:24.337787 | 2019-02-01T23:47:52 | 2019-02-01T23:47:52 | 164,342,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | class CaesarCipher:
"""Class for doing encryption and decryption using a Caesar Cipher."""
def __init__(self,shift):
"""Construct Caesar Cipher using given integer shift for rotation."""
encoder = [None] * 26
decoder = [None] * 26
for k in range(26):
encoder[k] = chr((k + shift)%26 + ord('A'))
decoder[k] = chr((k - shift)%26 + ord('A'))
self._forward = ''.join(encoder)
self._backward = ''.join(decoder)
def encrypt(self, message):
"""Return string representing encripted message."""
return self._transform(message, self._forward)
def decrypt(self, secret):
"""Returns the decrypted message with given secret."""
return self._transform(secret, self._backward)
def _transform(self, original, code):
"""Utility to perform transformation based on given code string."""
msg = list(original)
for k in range(len(msg)):
if msg[k].isupper():
j = ord(msg[k]) - ord('A')
msg[k] = code[j]
return ''.join(msg)
if __name__ == '__main__':
cipher = CaesarCipher(3)
message = "THE EAGLE IS IN PLAY; MEET AT JOE'S."
coded = cipher.encrypt(message)
print('Secret:', coded)
answer = cipher.decrypt(coded)
print('Message: ', answer)
| [
"[email protected]"
]
| |
5c0d30018cbe2c3ef11519938d2dcc3bbcfa328b | 267ab87884d6c74f8d676c1b6cfebf7e217e2ea7 | /index/views.py | 79a1320fcddf6b714ccc0465ccd2299e1bfd4d22 | []
| no_license | Emehinola/charlotte | 0d564181de1f5419a67c06e7dba5cd81796cb1aa | c3175757f5ce7d3ceab272dad9a866c4bea4bd1d | refs/heads/master | 2023-04-23T00:38:18.965089 | 2021-04-30T19:34:17 | 2021-04-30T19:34:17 | 363,119,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | from django.shortcuts import render
from django.views import generic
from blog.models import Article, categories
# Create your views here.
class Home(generic.ListView):
model = Article
paginate_by = 30
template_name = 'index/home.html'
def get_context_data(self, **kwargs):
context = {
'must_read': Article.objects.filter(must_read=True)[:5],
'articles': Article.objects.all(),
'categories': get_category
}
return context
def get_category(): # return a list of blog categories
raw = []
readable = []
for i in categories:
raw.append(i[0]) # gets the first item of the list of tuples
readable.append(i[1]) # gets the second item of the list of tuples
output = zip(raw, readable)
return output
| [
"[email protected]"
]
| |
2c003bc352ae9a2de24edeacf73478e603742dce | d27e62cb69beddbb52c63be4a7a1f54cf258ba67 | /services/users/project/api/models.py | 548b3d7c296c65573824fe23d1bb24b316ab824c | []
| no_license | gavinest/testdriven-app | 7f7f04278f148e42d30fa3b33b84c63bde0888ed | 44d83fc087e2adedeba48adbe739875d427a1de2 | refs/heads/master | 2020-03-29T18:17:21.734301 | 2018-10-24T12:59:35 | 2018-10-24T12:59:35 | 140,979,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,389 | py | import datetime
from flask import current_app
from sqlalchemy.sql import func
import jwt
from project import db, bcrypt
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(128), unique=True, nullable=False)
email = db.Column(db.String(128), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
email = db.Column(db.String(128), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
active = db.Column(db.Boolean(), default=True, nullable=False)
created_date = db.Column(db.DateTime, default=func.now(), nullable=False)
admin = db.Column(db.Boolean, default=False, nullable=False)
def __init__(self, username, email, password, admin=False):
self.username = username
self.email = email
self.password = bcrypt.generate_password_hash(password).decode()
self.admin = admin
def to_json(self):
return {
'id': self.id,
'username': self.username,
'email': self.email,
'active': self.active,
'admin': self.admin
}
def encode_auth_token(self, user_id):
'''generates the auth token'''
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(
days=current_app.config.get('TOKEN_EXPIRATION_DAYS'),
seconds=current_app.config.get('TOKEN_EXPIRATION_SECONDS')
),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
current_app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
'''
decodes the auth token - :param auth token: - :return: integer|string
'''
try:
payload = jwt.decode(
auth_token, current_app.config.get('SECRET_KEY'))
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
| [
"[email protected]"
]
| |
782847f05170889d6029e3cdac7acaa33f694b98 | 075d1037a4fc26dfb6a2255f439d80b634cfcd62 | /DataModel.py | 31ddfe0e2b304a5d8b55833ba0ba7f0b4651f863 | []
| no_license | ducdan/fruitsandgreens | bffe45acb5b1a2570c56238326f4bcc0dc1c3ace | d6b8a8fab16acdb6cc5f4bb5a1983b4cb8cbc756 | refs/heads/master | 2021-01-20T06:05:34.055289 | 2017-04-30T10:19:00 | 2017-04-30T10:19:00 | 89,843,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,957 | py | from flask import Flask, render_template,request
from flask_sqlalchemy import SQLAlchemy, Model
from sqlalchemy import Column, ForeignKey, Integer, String, Text, CHAR, DATE, Float, DECIMAL
app = Flask(__name__)
app.config.from_pyfile('config.cfg')
db = SQLAlchemy(app)
class NCC(db.Model): # Nhà cung cấp
ID_NCC = db.Column(db.Integer, autoincrement=True, primary_key=True)
MANCC = db.Column(db.CHAR(7), nullable=False, unique=True)
TENNCC = db.Column(db.Text, nullable=False, unique=True)
DIACHI = db.Column(db.Text)
NGUOILH = db.Column(db.Text) # Người liên hệ
SDT = db.Column(db.String(11))
Hopdong = db.relationship('HOPDONG', backref='owner_hopdong_ncc', lazy='dynamic')
def __init__(self, MANCC, TENNCC, DIACHI, NGUOILH, SDT):
self.MANCC = MANCC
self.TENNCC = TENNCC
self.DIACHI = DIACHI
self.NGUOILH = NGUOILH
self.SDT = SDT
# ok
class HOPDONG(db.Model): # Hợp đồng
ID_HD = Column(Integer, autoincrement=True, primary_key=True)
MAHD = Column(CHAR(7), nullable=False, unique=True)
TENHD = Column(Text, nullable=False, unique=True)
Id_ncc = Column(Integer, ForeignKey('NCC.ID_NCC'))
phieunhap = db.relationship('PHIEUNHAP', backref='owner_phieunhap_hopdong', lazy='dynamic')
def __init__(self, MAHD, TENHD, idncc):
self.MAHD = MAHD
self.TENHD = TENHD
self.Id_ncc = idncc
# ok
class LOAISP(db.Model): # Loại sản phẩm
ID_LOAISP = Column(Integer, autoincrement=True, primary_key=True)
MALOAI = Column(String(30), nullable=False, unique=True)
TENLOAI = Column(Text, nullable=False, unique=True)
sanphamnhap = db.relationship('SPN', backref='owner_sanphamnhap_loaisp', lazy='dynamic')
sanphamxuat = db.relationship('SPX', backref='owner_sanphamxuat_loaisp', lazy='dynamic')
def __init__(self, MALOAI, TENLOAI):
self.MALOAI = MALOAI
self.TENLOAI = TENLOAI
# ok
class SPN(db.Model): # Sản phẩm nhập
ID_SPN = Column(Integer, autoincrement=True, primary_key=True)
MASPN = Column(String(30), nullable=False, unique=True)
TENSPN = Column(Text, nullable=False, unique=True)
Id_loaisp = Column(Integer, ForeignKey('LOAISP.ID_LOAISP'))
phieunhap = db.relationship('PHIEUNHAP', backref='owner_phieunhap_spn', lazy='dynamic')
def __init__(self, MASPN, TENSPN, idlsp):
self.MASPN = MASPN
self.TENSPN = TENSPN
self.Id_loaisp = idlsp
# ok
class SPX(db.Model): # Sản phẩm xuất
ID_SPX = Column(Integer, autoincrement=True, primary_key=True)
MASPX = Column(String(30), nullable=False, unique=True)
TENSPX = Column(Text, nullable=False, unique=True)
Id_loaisp = Column(Integer, ForeignKey('LOAISP.ID_LOAISP'))
phieuxuat = db.relationship('PHIEUXUAT', backref='owner_phieuxuat_spx', lazy='dynamic')
def __init__(self, MASPX, TENSPX, idlsp):
self.MASPX = MASPX
self.TENSPX = TENSPX
self.Id_loaisp = idlsp
# ok
class LOAIHINHNHAP(db.Model): # Loại hình nhập
ID_LHN = Column(Integer, autoincrement=True, primary_key=True)
MALHN = Column(CHAR(7), nullable=False, unique=True)
TENLHN = Column(Text, nullable=False, unique=True)
phieunhap = db.relationship('PHIEUNHAP', backref='owner_phieunhap_loaihinhnhap', lazy='dynamic')
def __init__(self, MALHN, TENLHN):
self.MALHN = MALHN
self.TENLHN = TENLHN
# ok
class PHIEUNHAP(db.Model): # Phiếu nhập
ID_PN = Column(Integer, autoincrement=True, primary_key=True)
MAPN = Column(CHAR(7), nullable=False, unique=True)
SLNHAP = Column(Float)
SLNTHUC = Column(Float)
DVT = Column(String(20))
CONTAINER_NO = Column(String(20))
NGAYNHAP = Column(DATE)
PRICE = Column(DECIMAL)
TONGTIEN = Column(DECIMAL)
Id_lhn = Column(Integer, ForeignKey('LOAIHINHNHAP.ID_LHN'))
Id_hd = Column(Integer, ForeignKey('HOPDONG.ID_HD'))
Id_nv = Column(Integer, ForeignKey('NHANVIEN.ID_NV'))
Id_spn = Column(Integer, ForeignKey('SPN.ID_SPN'))
kho = db.relationship('KHO', backref='owner_kho_phieunhap', lazy='dynamic')
def __init__(self, MAPN, DVT, SLNHAP, SLNTHUC, CONTAINER_NO, NGAYNHAP, PRICE, TONGTIEN, idlhn, idhd, idnv, idspn):
self.MAPN = MAPN
self.SLNHAP = SLNHAP
self.SLNTHUC = SLNTHUC
self.DVT = DVT
self.CONTAINER_NO = CONTAINER_NO
self.NGAYNHAP = NGAYNHAP
self.PRICE = PRICE
self.TONGTIEN = TONGTIEN
self.Id_lhn = idlhn
self.Id_hd = idhd
self.Id_nv = idnv
self.Id_spn = idspn
# ok
class PHIEUXUAT(db.Model): # Phiếu xuất
ID_PX = Column(db.Integer, autoincrement=True, primary_key=True)
MAPX = Column(CHAR(7), nullable=False, unique=True)
NGAYDATHANG = Column(DATE)
NGAYGIAO = Column(DATE)
PHAN_TRAM_DU_THIEU = Column(Text)
TRANGTHAI = Column(Text) # Thùng hoặc KG
POST_OF_DISCHARGE = Column(Text)
SLXUAT = Column(Text)
SLXTHUC = Column(Text)
DVT = Column(String(20))
PRICE = Column(DECIMAL)
TONGTIEN = Column(DECIMAL)
Id_nv = Column(Integer, ForeignKey('NHANVIEN.ID_NV'))
Id_kh = Column(Integer, ForeignKey('KHACHHANG.ID_KH'))
Id_pt = Column(Integer, ForeignKey('PHUONGTIEN.ID_PT'))
Id_spx = Column(Integer, ForeignKey('SPX.ID_SPX'))
kho = db.relationship('KHO', backref='owner_kho_phieuxuat', lazy='dynamic')
def __init__(self, MAPX, NGAYDATHANG, NGAYGIAO, PHANTRAMDUTHIEU, TRANGTHAI,POST_OF_DISCHARGE, SLXUAT, SLXTHUC, DVT,
PRICE, TONGTIEN, idnv, idkh, idpt, idspx):
self.MAPX = MAPX
self.NGAYDATHANG = NGAYDATHANG
self.NGAYGIAO = NGAYGIAO
self.PHAN_TRAM_DU_THIEU = PHANTRAMDUTHIEU
self.TRANGTHAI = TRANGTHAI
self.POST_OF_DISCHARGE = POST_OF_DISCHARGE
self.SLXUAT = SLXUAT
self.SLXTHUC = SLXTHUC
self.DVT = DVT
self.PRICE = PRICE
self.TONGTIEN = TONGTIEN
self.Id_nv = idnv
self.Id_kh = idkh
self.Id_pt = idpt
self.Id_spx = idspx
# ok
class KHO(db.Model): # Kho
ID_KHO = Column(Integer, autoincrement=True, primary_key=True)
MAKHO = Column(String(10), nullable=False, unique=True)
TENKHO = Column(Text, nullable=False, unique=True)
DIACHI = Column(Text)
SDT = Column(String(11))
Id_pn = Column(Integer, ForeignKey('PHIEUNHAP.ID_PN'))
Id_px = Column(Integer, ForeignKey('PHIEUXUAT.ID_PX'))
def __init__(self, MAKHO, TENKHO, DIACHI, SDT, idpn, idpx):
self.MAKHO = MAKHO
self.TENKHO = TENKHO
self.DIACHI = DIACHI
self.SDT = SDT
self.Id_pn = idpn
self.Id_px = idpx
# ok
class KHACHHANG(db.Model): # Khách hàng
ID_KH = Column(Integer, autoincrement=True, primary_key=True)
MA_KH = Column(CHAR(7), nullable=False, unique=True)
TENKH = Column(Text, nullable=False, unique=True)
DIACHI = Column(Text)
SDT = Column(String(11))
NGUOILH = Column(Text) # Người liên hệ
phieuxuat = db.relationship('PHIEUXUAT', backref='owner_phieuxuat_khachhang', lazy='dynamic')
def __init__(self, MA_KH, TENKH, DIACHI, SDT, NGUOILH):
self.MA_KH = MA_KH
self.TENKH = TENKH
self.DIACHI = DIACHI
self.SDT = SDT
self.NGUOILH = NGUOILH
# ok
class PHUONGTIEN(db.Model): # Phương tiện
ID_PT = Column(Integer, autoincrement=True, primary_key=True)
MAPT = Column(CHAR(7), nullable=False, unique=True)
TenPT = Column(Text, nullable=False, unique=True)
phieuxuat = db.relationship('PHIEUXUAT', backref='owner_phieuxuat_phuongtien', lazy='dynamic')
def __init__(self, MAPT, TenPT):
self.MAPT = MAPT
self.TenPT = TenPT
# ok
class NHANVIEN(db.Model): # Nhân viên
ID_NV = Column(Integer, autoincrement=True, primary_key=True)
MANV = Column(CHAR(7), nullable=False, unique=True)
TENNV = Column(Text, nullable=False, unique=True)
phieunhap = db.relationship('PHIEUNHAP', backref='owner_phieunhap_nhanvien', lazy='dynamic')
phieuxuat = db.relationship('PHIEUXUAT', backref='owner_phieuxuat_nhanvien', lazy='dynamic')
def __init__(self, MANV, TENNV):
self.MANV = MANV
self.TENNV = TENNV
# ok
class XNK(db.Model): #Xuaất nhập khẩu
ID_XNK = Column(Integer, autoincrement=True, primary_key=True)
Shipper = Column(Text)
Cosignee = Column(Text)
ETA = Column(DATE)
Port_of_Discharge = Column(Text)
Invoice = Column(String(10))
Container_No = Column(Text)
Goods = Column(Text)
Carton = Column(Integer)
Price = Column(DECIMAL)
Amount_invoice = (DECIMAL)
payment_from_Fruits_and_Greens = Column(DECIMAL)
Date_Payment = Column(DATE)
Credit_note = Column(Text)
Balance = Column(DECIMAL)
NOTE = Column(Text)
Load_N0 = Column(Text)
def __init__(self,Shipper,Cosignee,ETA,Port_of_Discharge,Invoice,Container_No,Goods,Carton,Price,Amount_invoice,
payment_from_Fruits_and_Greens,Date_Payment,Credit_note,Balance,NOTE,Load_N0):
self.Shipper = Shipper
self.Cosignee = Cosignee
self.ETA = ETA
self.Port_of_Discharge = Port_of_Discharge
self.Invoice = Invoice
self.Container_No = Container_No
self.Goods = Goods
self.Carton = Carton
self.Price = Price
self.Amount_invoice = Amount_invoice
self.payment_from_Fruits_and_Greens = payment_from_Fruits_and_Greens
self.Date_Payment = Date_Payment
self.Credit_note = Credit_note
self.Balance = Balance
self.NOTE = NOTE
self.Load_N0 = Load_N0
def add_NCC(MANCC, TENNCC, DIACHI, NGUOILH, SDT):
ncc = NCC(MANCC, TENNCC, DIACHI, NGUOILH, SDT)
db.session.add(ncc)
db.session.commit()
def add_HD(MAHD, TENHD, idncc):
hd = HOPDONG(MAHD, TENHD, idncc)
db.session.add(hd)
db.session.commit()
def add_LOAISP(MALOAI, TENLOAI):
lsp = LOAISP(MALOAI, TENLOAI)
db.session.add(lsp)
db.session.commit()
def add_SPN(MASPN, TENSPN, idlsp):
spn = SPN(MASPN, TENSPN, idlsp)
db.session.add(spn)
db.session.commit()
def add_SPX(MASPX, TENSPX, idlsp):
spx = SPX(MASPX, TENSPX, idlsp)
db.session.add(spx)
db.session.commit()
def add_LHN(MALHN, TENLHN):
lhn = LOAIHINHNHAP(MALHN, TENLHN)
db.session.add(lhn)
db.session.commit()
def add_PN(MAPN, DVT, SLNHAP, SLNTHUC, CONTAINER_NO, NGAYNHAP, PRICE, TONGTIEN, idlhn, idhd, idnv, idspn):
pn = PHIEUNHAP(MAPN, DVT, SLNHAP, SLNTHUC, CONTAINER_NO, NGAYNHAP, PRICE, TONGTIEN, idlhn, idhd, idnv, idspn)
db.session.add(pn)
db.session.commit()
def add_PX(MAPX, NGAYDATHANG, NGAYGIAO, PHANTRAMDUTHIEU, TRANGTHAI,POST_OF_DISCHARGE, SLXUAT,SLXTHUC, DVT, PRICE,
TONGTIEN, idnv, idkh, idpt, idspx):
px = PHIEUXUAT(MAPX, NGAYDATHANG, NGAYGIAO, PHANTRAMDUTHIEU, TRANGTHAI,POST_OF_DISCHARGE,SLXUAT, SLXTHUC, DVT,
PRICE, TONGTIEN, idnv, idkh, idpt, idspx)
db.session.add(px)
db.session.commit()
def add_KHO(MAKHO, TENKHO, DIACHI, SDT, idpn, idpx):
kho = KHO(MAKHO, TENKHO, DIACHI, SDT, idpn, idpx)
db.session.add(kho)
db.session.commit()
def add_KH(MA_KH, TENKH, DIACHI, SDT, NGUOILH):
kh = KHACHHANG(MA_KH, TENKH, DIACHI, SDT, NGUOILH)
db.session.add(kh)
db.session.commit()
def add_PT(MAPT, TenPT):
pt = PHUONGTIEN(MAPT, TenPT)
db.session.add(pt)
db.session.commit()
def add_NV(MANV, TENNV):
nv = NHANVIEN(MANV, TENNV)
db.session.add(nv)
db.session.commit()
def add_XNK(Shipper,Cosignee,ETA,Port_of_Discharge,Invoice,Container_No,Goods,Carton,Price,Amount_invoice,
payment_from_Fruits_and_Greens,Date_Payment,Credit_note,Balance,NOTE,Load_N0):
xnk = XNK(Shipper,Cosignee,ETA,Port_of_Discharge,Invoice,Container_No,Goods,Carton,Price,Amount_invoice,
payment_from_Fruits_and_Greens,Date_Payment,Credit_note,Balance,NOTE,Load_N0)
db.session.add(xnk)
db.session.commit()
def hello_world():
print(db)
# db.create_all()
# add_NCC('NCC001', 'Fresh', '123 DBP, Q.1, TP.HCM', 'An', '0838383838')
# add_NCC('NCC002', 'Fruits', '456 DBP, Q.1, TP.HCM', 'Anh', '0837373737')
# add_LOAISP('CA', 'Cam')
# add_LOAISP('TA', 'Tao')
# add_LHN('LHN01', 'Nhap tai Cong ty')
# add_LHN('LHN02', 'Cang')
# add_KH('KH001', 'QUEEN LAND Q2', 'Q.2-TP.HCM', '0147852369', 'Bao')
# add_KH('KH002', 'VINMART A12 PHAN VĂN TRỊ', 'Phan Van Tri-TP.HCM', '0123456789', 'Binh')
# add_PT('XM', 'Xe May')
# add_PT('XT', 'Xe Tai')
# add_NV('NV001', 'Long')
# add_NV('NV002', 'Hung')
# add_HD('HD001', 'Hop dong dai han', 1)
# add_HD('HD002', 'Hop dong ngan han',2)
# add_SPN('CAMYN-40', 'Cam navel My size 40', 1)
# add_SPN('TAMYDOK20-100', 'Tao Đo My King Size 100 thung 20kg', 2)
# add_SPX('CAUCN-48', 'Cam navel Uc size 48', 1)
# add_SPX('TAMYDOK20-113', 'Tao Đo My King Size 113 thung 20kg', 2)
# add_PN('N0001', 'Thung', '50', '50', 'TCLU 1230412', '2017/04/20', '50', '2500',1,1,1,1)
# add_PN('N0002', 'Thung', '60', '60', 'TCLU 1230413', '2017/04/21', '60', '3600',2,2,2,2)
# add_PX('X0001', '2017/04/21', '2017/04/23', '100%', 'Thuc Nhan', 'HCM', '40', '40', 'Thung', '70', '2800',1,1,1,1)
# add_PX('X0002', '2017/04/22', '2017/04/24', '100%', 'Thuc Nhan', 'HaNoi', '50', '50', 'Thung', '80', '4000',2,2,2,2)
# add_KHO('K001', 'Kho Thu Duc', 'So 12 duong 6, P.Linh Chieu, Q.Thu Duc', '0838379828',1,1)
# add_KHO('K002', 'Kho Tran Dinh Xu', '137/31 Tran Dinh Xu, P.Nguyen Cu Trinh, Q.1', '0838385916',2,2)
add_XNK('DOMEX','F&G','2015/11/25','HCM','477641','TCLU 1230412','Apple Granny Smith 100','855','60','51300',None,None,None,None,None,None)
add_XNK('Freshmax','F&G','2015/11/25','Hà Nội','51797','176-17889885','Cherry Lapin 28-30mm (5kg/ctn)','168',None,None,None,None,None,None,None,None)
return 'Hello World!' | [
"[email protected]"
]
| |
aa455216150fa5289d9f7ee746c32197e99a6b16 | 474d8f5a63af01233bec76f154ddf05f9114cab0 | /pysleuth/controllers/components/keylogger/_controller.py | 0fb6061a6d3305fe0d0ab093b58a6bfe959731ae | []
| no_license | yuhisern7/PySleuth | 50cb94983b5c9ba0ed42a1b998252882ef4159e4 | 5235536c6be4815e03b47bf896bbeec323917b25 | refs/heads/master | 2023-03-27T22:56:55.952649 | 2020-11-13T07:04:02 | 2020-11-13T07:04:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | from ....core.components.keylogger import KeyLogger
from ..._base import BaseController
from ....core import output
logger = output.Loggers.getMaster(__name__)
class KeyLoggerController(BaseController):
def __init__(self):
super(KeyLoggerController, self).__init__()
self.worker = KeyLogger()
logFile = output.Path().getRootDir() / "keys.log"
self.worker.initLogger("keys.log", logFile)
self.connectSlots()
def startWorker(self):
self.worker.start()
logger.info("Keylogger active")
self.isWorkerActive = True
def connectSlots(self):
self.worker.SIG_KeyPressed.connect(self, "onKeyPress")
def onKeyPress(self, key):
self.worker.log(key)
logger.debug("key press triggered")
| [
"[email protected]"
]
| |
8543bcbeaead0694113b144c40525f0d8ca0ac1d | cc54cf98ec7c1dc88eae06ad12b9c66dc8d500b9 | /intrinio_sdk/models/api_response_crypto_true_strength_index.py | bd1b9566f61dd243b6253547bf33ae54da7f8950 | []
| no_license | sanderbrauwers/python-sdk | 0b3caef4c51c7f4192d315a4636e7278de2dc252 | 81f6facb30e7781c70ba0000485a0d994a82dbf8 | refs/heads/master | 2020-05-27T09:57:51.492025 | 2019-05-24T10:53:21 | 2019-05-24T10:53:21 | 188,575,545 | 0 | 0 | null | 2019-05-25T14:24:44 | 2019-05-25T14:24:44 | null | UTF-8 | Python | false | false | 14,157 | py | # coding: utf-8
"""
Intrinio API
Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner. # noqa: E501
OpenAPI spec version: 2.6.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from intrinio_sdk.models.crypto_exchange_summary import CryptoExchangeSummary # noqa: F401,E501
from intrinio_sdk.models.crypto_pair_summary import CryptoPairSummary # noqa: F401,E501
from intrinio_sdk.models.technical_indicator import TechnicalIndicator # noqa: F401,E501
from intrinio_sdk.models.true_strength_index_technical_value import TrueStrengthIndexTechnicalValue # noqa: F401,E501
class ApiResponseCryptoTrueStrengthIndex(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'technicals': 'list[TrueStrengthIndexTechnicalValue]',
'indicator': 'TechnicalIndicator',
'pair': 'CryptoPairSummary',
'exchange': 'CryptoExchangeSummary',
'timeframe': 'str',
'next_page': 'str'
}
attribute_map = {
'technicals': 'technicals',
'indicator': 'indicator',
'pair': 'pair',
'exchange': 'exchange',
'timeframe': 'timeframe',
'next_page': 'next_page'
}
def __init__(self, technicals=None, indicator=None, pair=None, exchange=None, timeframe=None, next_page=None): # noqa: E501
"""ApiResponseCryptoTrueStrengthIndex - a model defined in Swagger""" # noqa: E501
self._technicals = None
self._indicator = None
self._pair = None
self._exchange = None
self._timeframe = None
self._next_page = None
self.discriminator = None
if technicals is not None:
self.technicals = technicals
if indicator is not None:
self.indicator = indicator
if pair is not None:
self.pair = pair
if exchange is not None:
self.exchange = exchange
if timeframe is not None:
self.timeframe = timeframe
if next_page is not None:
self.next_page = next_page
@property
def technicals(self):
"""Gets the technicals of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:return: The technicals of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:rtype: list[TrueStrengthIndexTechnicalValue]
"""
return self._technicals
@property
def technicals_dict(self):
"""Gets the technicals of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:return: The technicals of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:rtype: list[TrueStrengthIndexTechnicalValue]
"""
result = None
value = self.technicals
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'technicals': value }
return result
@technicals.setter
def technicals(self, technicals):
"""Sets the technicals of this ApiResponseCryptoTrueStrengthIndex.
:param technicals: The technicals of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:type: list[TrueStrengthIndexTechnicalValue]
"""
self._technicals = technicals
@property
def indicator(self):
"""Gets the indicator of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
The name and symbol of the technical indicator # noqa: E501
:return: The indicator of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:rtype: TechnicalIndicator
"""
return self._indicator
@property
def indicator_dict(self):
"""Gets the indicator of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
The name and symbol of the technical indicator as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The indicator of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:rtype: TechnicalIndicator
"""
result = None
value = self.indicator
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'indicator': value }
return result
@indicator.setter
def indicator(self, indicator):
"""Sets the indicator of this ApiResponseCryptoTrueStrengthIndex.
The name and symbol of the technical indicator # noqa: E501
:param indicator: The indicator of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:type: TechnicalIndicator
"""
self._indicator = indicator
@property
def pair(self):
"""Gets the pair of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:return: The pair of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:rtype: CryptoPairSummary
"""
return self._pair
@property
def pair_dict(self):
"""Gets the pair of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:return: The pair of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:rtype: CryptoPairSummary
"""
result = None
value = self.pair
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'pair': value }
return result
@pair.setter
def pair(self, pair):
"""Sets the pair of this ApiResponseCryptoTrueStrengthIndex.
:param pair: The pair of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:type: CryptoPairSummary
"""
self._pair = pair
@property
def exchange(self):
"""Gets the exchange of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:return: The exchange of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:rtype: CryptoExchangeSummary
"""
return self._exchange
@property
def exchange_dict(self):
"""Gets the exchange of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:return: The exchange of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:rtype: CryptoExchangeSummary
"""
result = None
value = self.exchange
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'exchange': value }
return result
@exchange.setter
def exchange(self, exchange):
"""Sets the exchange of this ApiResponseCryptoTrueStrengthIndex.
:param exchange: The exchange of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:type: CryptoExchangeSummary
"""
self._exchange = exchange
@property
def timeframe(self):
"""Gets the timeframe of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
The time interval for the crypto currency prices # noqa: E501
:return: The timeframe of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:rtype: str
"""
return self._timeframe
@property
def timeframe_dict(self):
"""Gets the timeframe of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
The time interval for the crypto currency prices as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The timeframe of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:rtype: str
"""
result = None
value = self.timeframe
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'timeframe': value }
return result
@timeframe.setter
def timeframe(self, timeframe):
"""Sets the timeframe of this ApiResponseCryptoTrueStrengthIndex.
The time interval for the crypto currency prices # noqa: E501
:param timeframe: The timeframe of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:type: str
"""
self._timeframe = timeframe
@property
def next_page(self):
"""Gets the next_page of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
The token required to request the next page of the data # noqa: E501
:return: The next_page of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:rtype: str
"""
return self._next_page
@property
def next_page_dict(self):
"""Gets the next_page of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
The token required to request the next page of the data as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The next_page of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:rtype: str
"""
result = None
value = self.next_page
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'next_page': value }
return result
@next_page.setter
def next_page(self, next_page):
"""Sets the next_page of this ApiResponseCryptoTrueStrengthIndex.
The token required to request the next page of the data # noqa: E501
:param next_page: The next_page of this ApiResponseCryptoTrueStrengthIndex. # noqa: E501
:type: str
"""
self._next_page = next_page
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiResponseCryptoTrueStrengthIndex):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
0e28bd12d0e6eb69f2357634329e50e95b087d15 | 8444ea5cd42c09a7061b481fcb8135f72201d57e | /FileMiner/setup.py | 34c5deb70de7b44f9bacfa68b8bc8558705ba4a0 | [
"MIT"
]
| permissive | Igerald/FileMiner-PythonPackageConcept | 9c7489bd5b4f75da713756c3a296cc5f6cd6c7d3 | 77ab9884a0e3848613fa75a5a143072cd3e6122c | refs/heads/master | 2020-09-20T13:13:53.682202 | 2019-11-27T18:26:47 | 2019-11-27T18:26:47 | 224,491,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | import setuptools
with open("README.md",'r') as f:
long_text = f.read()
setuptools.setup(
name = "FileMiner",
version = "1.0.0",
author = "Isaiah Gerald",
author_email = "[email protected]",
description = "pkg-template-description",
long_description = long_text,
long_description_content_type = "text/markdown",
url = "https://github.com/pypa/sampleproject",
packages = setuptools.find_packages(),
classifiers = ["Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",],
)
| [
"[email protected]"
]
| |
a477f4d1d68a1a8e7fa815af261bde37f854edfe | 15298143f536b3f253ddbbe08c49593c225bfa0f | /tools/docker_deployement/main.py | 6d89429a4ed415af57c17592de618e8f71b0ec6e | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | djangobrunink/CellProfiler | dfe3bc0c20681729ddd8cc782dedb3ea89341e36 | cf845a1be4168a799a8bc2b25438b0cff6f1e275 | refs/heads/master | 2023-01-03T06:18:52.871747 | 2020-08-28T22:57:31 | 2020-08-28T22:57:31 | 285,306,031 | 0 | 0 | NOASSERTION | 2020-08-05T14:05:26 | 2020-08-05T14:05:26 | null | UTF-8 | Python | false | false | 1,322 | py | import glob
import os
from pathlib import Path
import docker
import pickle
def main():
# Path to the docker files.
docker_file_path = Path() / "docker_image_files"
# The name under which the docker image is stored.
image_tag = "j0rd1smit/detectron2"
# the path to the image files
images_path = Path() / "images"
volumes = [f"{images_path.absolute()}:/app/images"]
# Activate docker.
client = docker.from_env()
# Check if the docker image exist if not build it.
image = client.images.build(
path=str(docker_file_path.absolute()),
rm=True,
tag=image_tag,
quiet=False,
)
# Run the container to make the predictions
client.containers.run(
image_tag,
volumes=volumes,
)
# fetch the prediction for disk.
data = []
for path in glob.glob("images/*.p"):
# read the binary results from disk.
with open(path, "rb") as f:
prediction = pickle.load(f)
data.append(prediction)
# clean up the binary results from disk
os.remove(path)
# Output the predictions or do something else with it.
for prediction in data:
print(prediction["file_name"], prediction["num_instances"], prediction["scores"])
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
7eb92eae0eafa08bc10dc542f03fc7ae30bbba8b | 711a550352cb39cc551118f0f4902f5dae8e94ed | /group/settings.py | be433a50b2e743f28f33b147e0801f65270f13d8 | []
| no_license | zj199664/lingshi | db9470e697f65890ffc0b9416d6e7ecc2d716c0c | a9a958b4963df97078f64366c51b1c246220a5cb | refs/heads/master | 2020-04-17T22:20:40.151045 | 2019-01-25T06:06:36 | 2019-01-25T06:06:36 | 166,990,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,077 | py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kbj19x!%9-l$c2$b861a^^e(t$+4ff%6h74g!*+hr628ky$0-m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# 注册app
SYS_APPS = ['django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
# 第三方的模块注册
EXT_APPS = [
'xadmin',
'crispy_forms',
'reversion'
]
# 自定义功能模块注册
CUSTOM_APPS = [
'apps.account',
'apps.index',
'apps.detail',
'apps.order',
'apps.search',
'apps.car',
]
INSTALLED_APPS = SYS_APPS + EXT_APPS + CUSTOM_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'group.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'apps.account.context_processors.shop_count',
],
},
},
]
WSGI_APPLICATION = 'group.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'lingshi',
'USER': 'root',
'PASSWORD': 'root',
'PORT': '3306',
'HOST': '127.0.0.1',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
# 静态文件目录配置
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR, 'apps/account/static'),
)
# 配置访问多媒体的路径
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# 全局登录配置
LOGIN_URL = '/account/login/'
# 指定用户模型
AUTH_USER_MODEL = 'index.User'
# ==============邮件配置=============
# 发送邮件的服务器地址
EMAIL_HOST = 'smtp.163.com'
# 发送邮件端口
EMAIL_PORT = 25
# 发送邮件默认的名称
EMAIL_HOST_USER = '[email protected]'
# 授权码
EMAIL_HOST_PASSWORD = 'qwe123'
# 是否启用tls安全协议
EMAIL_USE_TLS = True
# 是否启用SSL安全协议
# EMAIL_USE_SSL = True
# 发送超时时间
# EMAIL_TIMEOUT =
# =============缓存配置==============
CACHES = {
'default': {
# 使用redis做缓存
'BACKEND': 'django_redis.cache.RedisCache',
# 将缓存的数据保存在该目录下
# 缓存的地址
'LOCATION': 'redis://127.0.0.1/1',
# rediss: //[:password]@localhost:6379 / 0
'TIMEOUT': 300,
'OPTIONS': {
# "PASSWORD": ""
# 是否压缩缓存数据
# "COMPRESSOR": "django_redis.compressors.zlib.ZlibCompressor",
# 配置连接池
"CONNECTION_POOL_KWARGS": {"max_connections": 100, "retry_on_timeout": True}
}
},
'session': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://127.0.0.1/2',
'TIMEOUT': 300,
'OPTIONS': {
"CONNECTION_POOL_KWARGS": {"max_connections": 100, "retry_on_timeout": True}
}
}
}
# ==========session缓存配置============
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "session"
# session失效的时间 7天
SESSION_COOKIE_AGE = 7 * 24 * 60 * 60 # Session的cookie失效日期(2周) 默认1209600秒
| [
"[email protected]"
]
| |
f2ad5621b9935aa53f1cff63460954ae5de1d0e6 | 6a74988075c2d33a5264b238d292c898f21ec792 | /application/controller/index_controller.py | 014fe93db65f61ccb2cf341ea8b1080d2209d080 | []
| no_license | gabsgc/covid-19-journal | 09caa7e25868b2678981c922f4740243526ea49a | 8821a54c3224ea55d085f0bcb82b799a4d477c6d | refs/heads/master | 2023-04-10T22:52:11.200175 | 2021-05-01T19:42:26 | 2021-05-01T19:42:26 | 363,299,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | from application import app
from flask import render_template
from application.model.entity.estado import Estado
from application.model.dao.estado_dao import EstadoDAO
from application.model.entity.noticia import Noticia
@app.route("/")
def index():
estado_list = EstadoDAO().findAll()
noticia1 = EstadoDAO().findNoticiaById(3)
noticia2 = EstadoDAO().findNoticiaById(2)
noticia3 = EstadoDAO().findNoticiaById(1)
noticia_list = [noticia1, noticia2, noticia3]
return render_template('index.html', estado_list = estado_list, noticia_list = noticia_list) | [
"[email protected]"
]
| |
8eefdcd0f560f9474b98e085a4292b064e7dce77 | 65329299fca8dcf2e204132624d9b0f8f8f39af7 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/__init__.py | 21732f34697d6d2ac9444bb3316752278e827cf6 | [
"Apache-2.0"
]
| permissive | darylturner/napalm-yang | bf30420e22d8926efdc0705165ed0441545cdacf | b14946b884ad2019b896ee151285900c89653f44 | refs/heads/master | 2021-05-14T12:17:37.424659 | 2017-11-17T07:32:49 | 2017-11-17T07:32:49 | 116,404,171 | 0 | 0 | null | 2018-01-05T16:21:37 | 2018-01-05T16:21:36 | null | UTF-8 | Python | false | false | 11,048 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import subTLVs_
class subTLVs(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-ipv6-reachability/prefixes/prefix/subTLVs. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes IS prefix sub-TLVs.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__subTLVs',)
_yang_name = 'subTLVs'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__subTLVs = YANGDynClass(base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'levels', u'level', u'link-state-database', u'lsp', u'tlvs', u'tlv', u'mt-ipv6-reachability', u'prefixes', u'prefix', u'subTLVs']
def _get_subTLVs(self):
"""
Getter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/subTLVs (list)
YANG Description: List of subTLV types in the LSDB for the specified TLV.
"""
return self.__subTLVs
def _set_subTLVs(self, v, load=False):
"""
Setter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/subTLVs (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_subTLVs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subTLVs() directly.
YANG Description: List of subTLV types in the LSDB for the specified TLV.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """subTLVs must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
})
self.__subTLVs = t
if hasattr(self, '_set'):
self._set()
def _unset_subTLVs(self):
self.__subTLVs = YANGDynClass(base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
subTLVs = __builtin__.property(_get_subTLVs)
_pyangbind_elements = {'subTLVs': subTLVs, }
import subTLVs_
class subTLVs(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-ipv6-reachability/prefixes/prefix/subTLVs. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes IS prefix sub-TLVs.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__subTLVs',)
_yang_name = 'subTLVs'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__subTLVs = YANGDynClass(base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'levels', u'level', u'link-state-database', u'lsp', u'tlvs', u'tlv', u'mt-ipv6-reachability', u'prefixes', u'prefix', u'subTLVs']
def _get_subTLVs(self):
"""
Getter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/subTLVs (list)
YANG Description: List of subTLV types in the LSDB for the specified TLV.
"""
return self.__subTLVs
def _set_subTLVs(self, v, load=False):
"""
Setter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_ipv6_reachability/prefixes/prefix/subTLVs/subTLVs (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_subTLVs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subTLVs() directly.
YANG Description: List of subTLV types in the LSDB for the specified TLV.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """subTLVs must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
})
self.__subTLVs = t
if hasattr(self, '_set'):
self._set()
def _unset_subTLVs(self):
self.__subTLVs = YANGDynClass(base=YANGListType("subtlv_type",subTLVs_.subTLVs, yang_name="subTLVs", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='subtlv-type', extensions=None), is_container='list', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)
subTLVs = __builtin__.property(_get_subTLVs)
_pyangbind_elements = {'subTLVs': subTLVs, }
| [
"[email protected]"
]
| |
f53cb9f9b4e947f1003e4e98baf2915ae2275bf4 | af60902eefaf2310f80ece1ac74885534aa43155 | /back/accounts/urls.py | 3e250509de14976bf9a60de4ee141f0238d919de | []
| no_license | uutaein/s1_netflixa | 9a773cbc2e4e16b31773284ea2a81db88c20e990 | 97ca2c3c04ec2672c6cca620f36915ae5cb4c9e7 | refs/heads/master | 2022-12-06T00:40:51.422333 | 2020-08-17T04:49:26 | 2020-08-17T04:49:26 | 274,104,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | from django.urls import path
from . import views
app_name = 'accounts'
urlpatterns = [
path('', views.user_list, name='user_list'),
path('<int:user_pk>/', views.detail, name='detail'),
# path('delete/', views.delete, name='delete'),
path('<int:user_pk>/follow/', views.follow, name='follow'),
# path('<int:user_pk>/update/', views.update, name='update'),
path('<str:user_name>/getname/', views.getname, name='getname')
] | [
"[email protected]"
]
| |
92329172eff2e4b3144e1abe0fa2c4fa8e730893 | 947d4102433b136ac65e6bbebd28ca51c53d1f5f | /ansible/library/reduce_and_add_sonic_images.py | baf99702c89fb32e4cb5718a65e1c501a6c3a86c | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
]
| permissive | SW-CSA/sonic-mgmt | eac574040a20cea724208a442f7492f840dd1ec3 | d50a683dc3ab0206e1fef9230c136b4c19b084f1 | refs/heads/201811 | 2023-09-01T13:14:29.166752 | 2019-03-21T08:08:43 | 2019-03-21T08:08:43 | 142,762,730 | 2 | 5 | NOASSERTION | 2019-11-05T06:55:46 | 2018-07-29T13:21:25 | Python | UTF-8 | Python | false | false | 4,535 | py | #!/usr/bin/python
DOCUMENTATION = '''
module: reduce_and_add_sonic_images
version_added: "1.0"
short_description: remove excessive sonic images and install new image if specified
description: remove excessive sonic images from the target device.
Note that this version doesn't guarantee to remove older images. Images
in the 'Available' list that are not 'Current' or 'Next' wil subject to
removal.
Options:
- option-name: disk_used_pcent
description: maximum disk used percentage after removing old images
required: False
Default: 50
- option-name: new_image_url
description: url pointing to the new image
required: False
Default: None
'''
import sys
from ansible.module_utils.basic import *
def exec_command(module, cmd, ignore_error=False, msg="executing command"):
rc, out, err = module.run_command(cmd)
if not ignore_error and rc != 0:
module.fail_json(msg="Failed %s: rc=%d, out=%s, err=%s" %
(msg, rc, out, err))
return out
def get_sonic_image_removal_candidates(module):
keep = set()
images = set()
out = exec_command(module, cmd="sonic_installer list",
msg="listing sonic images")
lines = out.split('\n')
for line in lines:
line = line.strip()
if line.startswith("Current:") or line.startswith("Next:"):
keep.add(line.split()[1].strip())
elif line != "Available:" and len(line) > 0:
images.add(line)
return (images - keep)
def get_disk_free_size(module, partition):
out = exec_command(module, cmd="df -BM --output=avail %s" % partition,
msg="checking disk available size")
avail = int(out.split('\n')[1][:-1])
return avail
def get_disk_used_percent(module, partition):
out = exec_command(module, cmd="df -BM --output=pcent %s" % partition,
msg="checking disk available percent")
pcent = int(out.split('\n')[1][:-1])
return pcent
def reduce_installed_sonic_images(module, disk_used_pcent):
images = get_sonic_image_removal_candidates(module)
while len(images) > 0:
pcent = get_disk_used_percent(module, "/host")
if pcent < disk_used_pcent:
break
# Randomly choose an old image to remove. On a system with
# developer built images and offical build images mix-installed
# it is hard to compare image tag to find 'oldest' image.
img = images.pop()
exec_command(module, cmd="sonic_installer remove %s -y" % img,
ignore_error=True)
def install_new_sonic_image(module, new_image_url):
if not new_image_url:
return
avail = get_disk_free_size(module, "/host")
if avail >= 1500:
# There is enough space to install directly
exec_command(module,
cmd="sonic_installer install %s -y" % new_image_url,
msg="installing new image")
else:
# Create a tmpfs partition to download image to install
exec_command(module, cmd="mkdir -p /tmp/tmpfs", ignore_error=True)
exec_command(module, cmd="umount /tmp/tmpfs", ignore_error=True)
exec_command(module,
cmd="mount -t tmpfs -o size=1000M tmpfs /tmp/tmpfs",
msg="mounting tmpfs")
exec_command(module,
cmd="curl -o /tmp/tmpfs/sonic-image %s" % new_image_url,
msg="downloading new image")
exec_command(module,
cmd="sonic_installer install /tmp/tmpfs/sonic-image -y",
msg="installing new image")
exec_command(module, cmd="sync", ignore_error=True)
exec_command(module, cmd="umount /tmp/tmpfs", ignore_error=True)
exec_command(module, cmd="rm -rf /tmp/tmpfs", ignore_error=True)
def main():
module = AnsibleModule(
argument_spec=dict(
disk_used_pcent=dict(required=False, type='int', default=8),
new_image_url=dict(required=False, type='str', default=None),
),
supports_check_mode=False)
disk_used_pcent = module.params['disk_used_pcent']
new_image_url = module.params['new_image_url']
try:
reduce_installed_sonic_images(module, disk_used_pcent)
install_new_sonic_image(module, new_image_url)
except:
err = str(sys.exc_info())
module.fail_json(msg="Error: %s" % err)
module.exit_json()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
780b80fa6885808a6cd823a419b5f2faea5eb627 | 48d6f70f2a97659ccc60e50c222477b98adb57b4 | /XOR/tensorflow_xor.py | c7046d300ce77228c508b6219a4ed9e646ea3e81 | []
| no_license | Anosy/tensorflow_DL | e1953112fb11f44fe2ac879b8e142a7e428478c0 | 5b64d3d1a3dbb481b7382100fb00006ab021408a | refs/heads/master | 2020-03-25T22:51:30.972034 | 2018-09-18T02:29:22 | 2018-09-18T02:29:22 | 144,246,341 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,600 | py | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# 定义学习率和输入的样本
learning_rate = 0.001
x_data = np.array([[0., 0.], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
y_data = np.array([[0], [1], [0], [1]])
# 定义占位符
x = tf.placeholder(dtype=tf.float32, shape=(None, 2))
y = tf.placeholder(dtype=tf.float32, shape=(None, 1))
# 定义权重
weights = {
'w1': tf.Variable(tf.random_normal([2, 16])),
'w2': tf.Variable(tf.random_normal([16, 1]))
}
# 定义偏执
biases = {
'b1': tf.Variable(tf.zeros([16])),
'b2': tf.Variable(tf.zeros([1]))
}
# 定义网络结构
def dnn(X, weights, biases):
d1 = tf.matmul(x, weights['w1']) + biases['b1']
d1 = tf.nn.relu(d1)
d2 = tf.matmul(d1, weights['w2']) + biases['b2']
# d2 = tf.nn.sigmoid(d2)
return d2
# 预测
pred = dnn(x, weights, biases)
# 定义不同的损失函数
# cost = tf.reduce_mean(tf.square(y - pred)) # 均方差损失函数
# cost = -tf.reduce_mean(y * tf.log(tf.clip_by_value(pred, 1e-10, 1.0)) + (1-y)*tf.log(tf.clip_by_value(1-pred, 1e-10, 1.0))) # sigmoid交叉熵损失函数
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred, labels=y)) # sigmoid交叉熵损失函数
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) # softmax交叉熵损失函数,不适用于二分类问题
# 优化
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# 计算精确度
correct_pred = tf.equal((pred > 0.5), (y > 0.5))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# 初始化
init = tf.initialize_all_variables()
# 开启会话
with tf.Session() as sess:
sess.run(init)
for i in range(2500):
sess.run(optimizer, feed_dict={x: x_data, y: y_data})
acc = sess.run(accuracy, feed_dict={x: x_data, y: y_data})
loss = sess.run(cost, feed_dict={x: x_data, y: y_data})
if (i % 100 == 0):
print("Step " + str(i) + " loss " + "{:.6f}".format(loss))
print("Step " + str(i) + " acc " + "{:.6f}".format(acc))
print('predict:\n', sess.run(pred, feed_dict={x: x_data}))
print("Optimization Finished!")
# 绘制图片
xx, yy = np.mgrid[-0.1:1.1:.05, -0.1:1.1:.05]
grid = np.c_[xx.ravel(), yy.ravel()]
probs = sess.run(pred, feed_dict={x: grid})
probs = probs.reshape(xx.shape)
plt.scatter(x_data[:, 0], x_data[:, 1], c=np.squeeze(y_data), cmap="RdBu", vmin=-.2, vmax=1.2, edgecolor="white")
plt.contour(xx, yy, probs, levels=[.5], cmap="Greys", vmin=0, vmax=.1)
plt.show()
| [
"[email protected]"
]
| |
d3ee284525c5c73036ffb401d8d4eb660df05e36 | 0017cece1870cafa316a8a72b5ff0551ca508385 | /agent_code/new_agent/transition_creator.py | 5a18a929c6aff058fe21969ac488a1245c716bd7 | []
| no_license | christian-lang-git/IFML-project | a43a6ee7421be4203b590ce30c82c72f4b3a7a99 | ec78df3f8290acd350990ade1f50e823080a5ad9 | refs/heads/master | 2023-03-30T16:01:15.486123 | 2021-03-28T19:32:31 | 2021-03-28T19:32:31 | 345,751,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,541 | py | from .preprocessing import *
def generate_8_transitions(old_state, action_index, new_state):
"""
Generates 8 transitions via rotation and mirroring.
Reward and termination flag are not included since they are constant for all 8 transitions.
"""
#the original transition
original = [old_state, action_index, new_state]
#counter clockwise rotations
rot_90 = rotate_90_linear(original)
rot_180 = rotate_90_linear(rot_90)
rot_270 = rotate_90_linear(rot_180)
#mirroring
original_m = mirror_linear(original)
rot_90_m = mirror_linear(rot_90)
rot_180_m = mirror_linear(rot_180)
rot_270_m = mirror_linear(rot_270)
#generate and return list of all 8 transitions
transition_list = [
original,
rot_90,
rot_180,
rot_270,
original_m,
rot_90_m,
rot_180_m,
rot_270_m
]
return transition_list
def rotate_90_linear(transition):
"""
Rotates a transition for processing type PROCESS_LINEAR counter clockwise.
Reward and termination flag are not included since they are constant for all 8 transitions.
"""
#extract components of transition
old_features = transition[0]
action_index = transition[1]
new_features = transition[2]
#apply rotation to components of transition
old_features_rot = rotate_90_linear_features(old_features)
action_index_rot = rotate_90_action(action_index)
new_features_rot = rotate_90_linear_features(new_features)
#return transition as list
return [old_features_rot, action_index_rot, new_features_rot]
def rotate_90_action(action_index):
"""
Rotates an action counter clockwise.
"""
action = ACTIONS[action_index]
if action == ACTION_LEFT:
action_rot = ACTION_DOWN
elif action == ACTION_DOWN:
action_rot = ACTION_RIGHT
elif action == ACTION_RIGHT:
action_rot = ACTION_UP
elif action == ACTION_UP:
action_rot = ACTION_LEFT
else:
return action_index
action_rot_index = INVERSE_ACTIONS[action_rot]
return action_rot_index
def rotate_90_linear_features(features):
"""
Rotates features for processing type PROCESS_LINEAR counter clockwise.
"""
features_rot = np.copy(features)
try:
for offset in LINEAR_LIST_PLAYER_INDICES:
#the data in the linear processing is always in the order player=0, left=1, right=2, up=3, down=4
#left=1 element stores up=3
features_rot[offset+1] = features[offset+3]
#right=2 element stores down=4
features_rot[offset+2] = features[offset+4]
#up=3 element stores right=2
features_rot[offset+3] = features[offset+2]
#down=4 element stores left=1
features_rot[offset+4] = features[offset+1]
except:
print("ERROR: ", features)
return features_rot
def mirror_linear(transition):
"""
Mirrors a transition along the horizontal axis (vertical flip) for processing type PROCESS_LINEAR.
Reward and termination flag are not included since they are constant for all 8 transitions.
"""
#extract components of transition
old_features = transition[0]
action_index = transition[1]
new_features = transition[2]
#apply mirroring to components of transition
old_features_m = mirror_linear_features(old_features)
action_index_m = mirror_action(action_index)
new_features_m = mirror_linear_features(new_features)
#return transition as list
return [old_features_m, action_index_m, new_features_m]
def mirror_action(action_index):
"""
Mirrors an action along the horizontal axis (vertical flip).
"""
action = ACTIONS[action_index]
if action == ACTION_UP:
action_m = ACTION_DOWN
elif action == ACTION_DOWN:
action_m = ACTION_UP
else:
return action_index
action_m_index = INVERSE_ACTIONS[action_m]
return action_m_index
def mirror_linear_features(features):
"""
Mirrors features along the horizontal axis (vertical flip) for processing type PROCESS_LINEAR.
"""
features_m = np.copy(features)
for offset in LINEAR_LIST_PLAYER_INDICES:
#the data in the linear processing is always in the order player=0, left=1, right=2, up=3, down=4
#up=3 element stores down=4
features_m[offset+3] = features[offset+4]
#down=4 element stores up=3
features_m[offset+4] = features[offset+3]
return features_m | [
"[email protected]"
]
| |
91cb763aa8be76ec2433bcc71f0dbe10801c7b84 | daf6302fc060c45832b377c91a099a2c59fe878f | /loss.py | 8a486863754effdb7468e7d8c4ba3d5df1fe810b | []
| no_license | BUCT-Vision/Vessel-wgan-pytorch | 2277af4b5cb5a386550d03f431f00ed4df508b00 | d78a3e95a41e19e809c4ae10ebb88b57e20cf468 | refs/heads/master | 2020-03-18T23:39:26.929120 | 2018-05-29T15:11:54 | 2018-05-29T15:11:54 | 135,416,942 | 1 | 0 | null | 2018-05-30T09:03:37 | 2018-05-30T09:03:37 | null | UTF-8 | Python | false | false | 619 | py | import torch
import torch.nn.functional as F
import torch.nn as nn
# Recommend
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
def forward(self, inputs, targets):
#print(inputs.size())
return self.nll_loss(F.log_softmax(inputs,dim=1), targets)
class BCE_Loss(nn.Module):
def __init__(self):
super(BCE_Loss,self).__init__()
self.bce=nn.BCELoss()
def forward(self,inputs,targets):
return self.bce(inputs,targets) | [
"[email protected]"
]
| |
668657bcff004b73d7f1774f4f953091a5bf649f | 3f55607c033fef615f8d0f9ef8d284f43d1709a1 | /shop/shop/settings.py | 04d5a80fe47afc58d6e082ce02f49aedb74d8b9d | []
| no_license | aakashres/shoppingcart | d37f7425f8585ac0463153a90ae4f1d2ed49c460 | 2060fac698130b78860072f5fcc0532ec716d087 | refs/heads/master | 2022-11-09T15:55:27.061262 | 2017-04-04T15:00:08 | 2017-04-04T15:00:08 | 273,651,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,505 | py | """
Django settings for shop project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_bai2f0i6@h=+dy+x1b(&i5$83kg0+g(rq6s5djrt=g+uunlvc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'account',
'cart',
'messaging',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'shop',
'USER': 'prixa',
'PASSWORD': 'prixatech',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kathmandu'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "root", "static_cdn")
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "root", "media_cdn")
| [
"[email protected]"
]
| |
d9268d7f1a2b9e313336b8fa0a6b1d32d9ac827f | f1a80d9a4c4ff74142dd62096843cd0318389573 | /handlers/verifycode.py | 8bd4bae53f87446b9395d175695bb1b1448ec73d | []
| no_license | jenifly/tornado_Restful | ccc354a4034804ff47d19911ae4ef4d555d6b0b5 | ddd8cf49268049c23878b4ebcd84a87ed622b0c2 | refs/heads/master | 2020-09-04T11:24:32.140200 | 2020-04-20T01:04:58 | 2020-04-20T01:04:58 | 219,719,866 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,737 | py | import logging
import orjson
from typing import (Union, Dict)
from handlers import ApiHandler
from tornado.escape import utf8
from tornado.util import unicode_type
from config import IMG_CODE_EXPIRES_SECONDS
from utils.captcha.captcha import captcha
class ImageCodeHandler(ApiHandler):
route = r'/api/imgcode'
async def get(self):
pre_code_id = self.get_argument('pre', None)
cur_code_id = self.get_argument('cur', None)
if not cur_code_id:
return self.write(4004)
text, pic = captcha.generate_captcha()
try:
if pre_code_id:
await self.redis.delete(f'img_code_{pre_code_id}')
await self.redis.setex(f'img_code_{cur_code_id}', IMG_CODE_EXPIRES_SECONDS, text)
except Exception as e:
logging.error(e)
self.write(4006)
self.write_img(pic)
async def post(self):
code = self.json_args.get('imageCode', None)
code_id = self.json_args.get('imageCodeId', None)
if not code or not code_id:
return self.write(4004)
try:
real_img_code = await self.redis.get(f'pic_code_{code_id}')
await self.redis.delete(f'pic_code_{code_id}')
except Exception as e:
logging.error(e)
return self.write(4001)
if not real_img_code:
return self.write(4600)
if real_img_code != code.upper():
return self.write(4601)
self.write()
def write_img(self, img_bytes: bytes) -> None:
if self._finished:
raise RuntimeError('Cannot write() after finish()')
self.set_header('Content-Type', 'image/jpg')
self._write_buffer.append(img_bytes) | [
"[email protected]"
]
| |
eb99dc50f86c3cd3d0216e7cd42345dc5877551e | c6348569359f6e2412ee2f3ef918e99601112629 | /RetoLiver/models.py | d111ef7b043a9e434feeb4e9b888fb8a1f873ccc | []
| no_license | MarcoBosglez/Reto_Liverpool | 9592434e23c55c9a83c2395b927a329fd187f487 | edd87fb273fa6169dd253ab8f3eb5e6f63eb6043 | refs/heads/main | 2023-05-02T00:37:31.481108 | 2021-04-30T23:53:20 | 2021-04-30T23:53:20 | 358,956,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from django.db import models
# Create your models here.
class Reto(models.Model):
phone = models.IntegerField()
email = models.CharField(max_length=30) | [
"[email protected]"
]
| |
c20219e71fe96fd7ae87fe46a2305941c63e2a61 | 161f02552f5b50d395aedb2a8354b830efe09ef3 | /setup.py | ecc057a51d2f31c09576bd383975eb71a6d7869d | [
"MIT"
]
| permissive | DuongVu39/HAM_Python | 8f79f8d6ca383b0b47ec9ae7cd3884d7313c3026 | 890229beb71848697c27baa767556265d60657c2 | refs/heads/master | 2021-05-02T07:30:01.436525 | 2018-04-18T00:35:15 | 2018-04-18T00:35:15 | 120,830,699 | 0 | 0 | MIT | 2018-04-05T06:42:09 | 2018-02-08T23:26:58 | Python | UTF-8 | Python | false | false | 336 | py | from setuptools import setup
setup(
name='HAM_Python',
version='v1.0',
author='Duong Vu, Jordan Dubchak, Linsey Yuo',
long_description=open('README.md').read(),
install_requires=['setuptools', 'pandas','numpy', 'seaborn', 'matplotlib'],
include_package_data=True,
license='MIT License'
) | [
"[email protected]"
]
| |
d2e145a737723d90d40cb49ba1513f4ce09da229 | d0fcc2198f1caf5633c4fc0d004ba68714396f1b | /bc4py/utils.py | d1c4a85cb4d9f0df6c85fb081bee3a4001b51119 | [
"MIT"
]
| permissive | webclinic017/bc4py | 4bfce04b666c2aaadda4b7ecc2a8270839231850 | 620b7d855ec957b3e2b4021cf8069d9dd128587a | refs/heads/master | 2022-12-09T22:23:49.842255 | 2019-06-21T14:24:17 | 2019-06-21T14:24:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,100 | py | from bc4py.config import C, V
from bc4py.gittool import get_current_branch
from bc4py.chain.utils import GompertzCurve
from Cryptodome.Cipher import AES
from Cryptodome import Random
from Cryptodome.Hash import SHA256
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from logging import getLogger, DEBUG, INFO, WARNING, ERROR
import multiprocessing
import os
import psutil
import sys
WALLET_VERSION = 0
log = getLogger('bc4py')
NAME2LEVEL = {
'DEBUG': DEBUG,
'INFO': INFO,
'WARNING': WARNING,
'ERROR': ERROR,
}
def set_database_path(sub_dir=None):
V.DB_HOME_DIR = os.path.join(os.path.expanduser("~"), 'blockchain-py')
if not os.path.exists(V.DB_HOME_DIR):
os.makedirs(V.DB_HOME_DIR)
if sub_dir:
V.DB_HOME_DIR = os.path.join(V.DB_HOME_DIR, sub_dir)
if not os.path.exists(V.DB_HOME_DIR):
os.makedirs(V.DB_HOME_DIR)
V.DB_ACCOUNT_PATH = os.path.join(V.DB_HOME_DIR, 'wallet.ver{}.dat'.format(WALLET_VERSION))
def set_blockchain_params(genesis_block, params):
assert 'spawn' in multiprocessing.get_all_start_methods(), 'Not found spawn method'
V.GENESIS_BLOCK = genesis_block
V.GENESIS_PARAMS = params
V.BECH32_HRP = params.get('hrp')
V.BLOCK_GENESIS_TIME = params.get('genesis_time')
V.BLOCK_MINING_SUPPLY = params.get('mining_supply')
V.BLOCK_TIME_SPAN = params.get('block_span')
V.BLOCK_REWARD = params.get('block_reward')
V.COIN_DIGIT = params.get('digit_number')
V.COIN_MINIMUM_PRICE = params.get('minimum_price')
V.BLOCK_CONSENSUSES = params.get('consensus')
GompertzCurve.k = V.BLOCK_MINING_SUPPLY
V.BRANCH_NAME = get_current_branch()
def check_already_started():
assert V.DB_HOME_DIR is not None
# check already started
pid_path = os.path.join(V.DB_HOME_DIR, 'pid.lock')
if os.path.exists(pid_path):
with open(pid_path, mode='r') as fp:
pid = int(fp.read())
if psutil.pid_exists(pid):
raise RuntimeError('Already running blockchain-py pid={}'.format(pid))
new_pid = os.getpid()
with open(pid_path, mode='w') as fp:
fp.write(str(new_pid))
log.info("create new process lock file pid={}".format(new_pid))
def console_args_parser():
"""get help by `python publicnode.py -h`"""
p = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
p.add_argument('--p2p',
help='p2p server bind port',
default=2000,
type=int)
p.add_argument('--rest',
help='REST API bind port',
default=3000,
type=int)
p.add_argument('--host',
help='REST API bind host',
default='127.0.0.1',
type=str)
p.add_argument('--user', '-u',
help='API user name',
default='user',
type=str)
p.add_argument('--password', '-p',
help='API password',
default='password',
type=str)
p.add_argument('--sub-dir',
help='setup blockchain folder path',
default=None)
p.add_argument('--log-level',
help='logging level',
choices=list(NAME2LEVEL),
default='INFO')
p.add_argument('--log-path',
help='recode log file path',
default=None,
type=str)
p.add_argument('--remove-log',
help='remove old log file when start program',
action='store_true')
p.add_argument('--daemon',
help='make process daemon',
action='store_true')
p.add_argument('--staking',
help='enable coin base staking',
action='store_true')
p.add_argument('--solo-mining',
help='solo mining for debug or testnet',
action='store_true')
return p.parse_args()
def check_process_status(f_daemon):
if sys.platform == 'win32':
# windows
if f_daemon:
if sys.executable.endswith("pythonw.exe"):
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
else:
print("ERROR: Please execute by `pythonw.exe` not `python.exe` if you enable daemon flag")
sys.exit()
else:
if sys.executable.endswith("pythonw.exe"):
print("ERROR: Please execute by `python.exe`")
sys.exit()
else:
# stdin close to prevent lock on console
sys.stdin.close()
else:
# other
if f_daemon:
pid = os.fork()
if pid == 0:
# child process (daemon)
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
else:
# main process
print("INFO: Make daemon process pid={}".format(pid))
sys.exit()
else:
# stdin close to prevent lock on console
sys.stdin.close()
class AESCipher:
@staticmethod
def create_key():
return os.urandom(AES.block_size)
@staticmethod
def encrypt(key, raw):
assert isinstance(key, bytes)
assert isinstance(raw, bytes), "input data is bytes"
key = SHA256.new(key).digest()[:AES.block_size]
raw = AESCipher._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
return iv + cipher.encrypt(raw)
@staticmethod
def decrypt(key, enc):
assert isinstance(key, bytes)
assert isinstance(enc, bytes), 'Encrypt data is bytes'
key = SHA256.new(key).digest()[:AES.block_size]
iv = enc[:AES.block_size]
cipher = AES.new(key, AES.MODE_CBC, iv)
raw = AESCipher._unpad(cipher.decrypt(enc[AES.block_size:]))
if len(raw) == 0:
raise ValueError("AES decryption error, not correct key")
else:
return raw
@staticmethod
def _pad(s):
pad = AES.block_size - len(s) % AES.block_size
add = AES.block_size - len(s) % AES.block_size
return s + add * pad.to_bytes(1, 'little')
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s) - 1:])]
class ProgressBar:
"""
terminal progressbar
original: https://github.com/bozoh/console_progressbar
author: Carlos Alexandre S. da Fonseca
"""
def __init__(self, prefix, default_suffix='', total=100, decimals=0, length=50, fill='X', zfill='-'):
self.prefix = prefix
self.default_suffix = default_suffix
self.__decimals = decimals
self.__length = length
self.__fill = fill
self.__zfill = zfill
self.__total = total
def _generate_bar(self, iteration, suffix=None):
percent = ("{0:." + str(self.__decimals) + "f}")
percent = percent.format(100 * (iteration / float(self.__total)))
filled_length = int(self.__length * iteration // self.__total)
bar = self.__fill * filled_length + self.__zfill * (self.__length - filled_length)
return '{0} |{1}| {2}% {3}'.format(self.prefix, bar, percent, suffix or self.default_suffix)
def print_progress_bar(self, iteration, suffix=None):
print('\r%s' % (self._generate_bar(iteration, suffix)), end='')
sys.stdout.flush()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.print_progress_bar(self.__total, 'Complete')
print()
else:
print()
sys.stdout.flush()
log.error('Error on progress, {}'.format(exc_val))
return True
__all__ = [
"set_database_path",
"set_blockchain_params",
"check_already_started",
"console_args_parser",
"check_process_status",
"AESCipher",
"ProgressBar",
]
| [
"[email protected]"
]
| |
b19b553273708892d03cdfc7f70263dea147d7d4 | fcd1ac6d7349aea4219a22b6e3ae82797c98b5da | /src/components/action_selectors.py | 16ad8ef4056972b1c1f2af98029a0df20ce6f9e0 | [
"Apache-2.0"
]
| permissive | multiagent-arn/ARN | ce339a1be37fa457cd29b68dde25a6d55ad72761 | eb566fcab409965ea4684b6a18c433d34562d0d6 | refs/heads/master | 2022-12-08T13:40:33.362080 | 2019-05-23T03:08:16 | 2019-05-23T03:08:16 | 185,226,977 | 2 | 0 | Apache-2.0 | 2022-11-22T03:14:23 | 2019-05-06T15:49:09 | Python | UTF-8 | Python | false | false | 2,337 | py | import torch as th
from torch.distributions import Categorical
from .epsilon_schedules import DecayThenFlatSchedule
REGISTRY = {}
class MultinomialActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
self.test_greedy = getattr(args, "test_greedy", True)
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
masked_policies = agent_inputs.clone()
# modify
if self.args.legal_action:
masked_policies[avail_actions == 0.0] = 0.0
self.epsilon = self.schedule.eval(t_env)
if test_mode and self.test_greedy:
picked_actions = masked_policies.max(dim=2)[1]
else:
picked_actions = Categorical(masked_policies).sample().long()
return picked_actions
REGISTRY["multinomial"] = MultinomialActionSelector
class EpsilonGreedyActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
# Assuming agent_inputs is a batch of Q-Values for each agent bav
self.epsilon = self.schedule.eval(t_env)
if test_mode:
# Greedy action selection only
self.epsilon = 0.0
# mask actions that are excluded from selection
masked_q_values = agent_inputs.clone()
# modify
if self.args.legal_action:
masked_q_values[avail_actions == 0.0] = -float("inf") # should never be selected!
random_numbers = th.rand_like(agent_inputs[:, :, 0])
pick_random = (random_numbers < self.epsilon).long()
random_actions = Categorical(avail_actions.float()).sample().long()
picked_actions = pick_random * random_actions + (1 - pick_random) * masked_q_values.max(dim=2)[1]
return picked_actions
REGISTRY["epsilon_greedy"] = EpsilonGreedyActionSelector
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.