blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
261
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
45
| license_type
stringclasses 2
values | repo_name
stringlengths 8
111
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 72
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 530k
616M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
24.6k
| gha_license_id
stringclasses 9
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 40
values | src_encoding
stringclasses 10
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 2
classes | length_bytes
int64 11
4.05M
| extension
stringclasses 25
values | content
stringlengths 10
4.04M
| authors
sequencelengths 1
1
| author_id
stringclasses 578
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
779e7b1fc2bfe837f10a8070b3600f71ae8cdf3a | ece7ba486d29d4bc3e87c2046db2c31140e2d86a | /suitcase/mongo_normalized/tests/tests.py | 75f4046965a77899a78b88195844aeadf0dfc188 | [] | no_license | ke-zhang-rd/suitcase-mongo | 31b97bb13b9e6089248f888a6c33824b835de141 | c938bae589ab2fba301814c846c5d5339eb90fb8 | refs/heads/master | 2020-05-31T10:29:15.458932 | 2019-10-18T17:33:03 | 2019-10-18T17:33:03 | 190,241,607 | 0 | 0 | null | 2019-06-04T16:38:12 | 2019-06-04T16:38:11 | null | UTF-8 | Python | false | false | 451 | py | # Tests should generate (and then clean up) any files they need for testing. No
# binary files should be included in the repository.
from suitcase.mongo_normalized import Serializer
def test_export(db_factory, example_data):
documents = example_data()
metadatastore_db = db_factory()
asset_registry_db = db_factory()
serializer = Serializer(metadatastore_db, asset_registry_db)
for item in documents:
serializer(*item)
| [
"[email protected]"
] | |
475d3709a36d6d7c776027c6f5b21474c5c96e8b | 87b6cae5f0bc49f86735619cda0e676486d3f143 | /tic-toc.py | 14e99adb7b24b660e1f46d8891c695a7ca3cd60b | [] | no_license | nmkolp/Python-scripts | a8dac713fd39c2a19e43aba3a7a4d609661fc64b | a69197f97707853ae68ac74ec0136a3082192ad7 | refs/heads/master | 2020-05-18T02:00:53.038116 | 2019-09-12T20:39:00 | 2019-09-12T20:39:00 | 184,104,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,178 | py | import copy
def check_win(board):
for y in range(3):
if board[0][y] == board[1][y] == board[2][y] != 0:
return True
for x in range(3):
if board[x][0] == board[x][1] == board[x][2] != 0:
return True
if board[0][0] == board[1][1] == board[2][2] != 0:
return True
if board[0][2] == board[1][1] == board[2][0] != 0:
return True
return False
def check_no_moves_left(board):
for x in range(3):
for y in range(3):
if board[x][y] == 0:
return False
return True
def get_coords(i):
if i < 1 or i > 9:
return False
return [(i - 1) % 3, 2 - (i - 1) // 3]
def print_board(board):
for y in range(3):
for x in range(3):
if board[x][y] == 0:
print("_", end='')
elif board[x][y] == 1:
print("x", end='')
else:
print("o", end='')
if x != 2:
print(" ", end='')
print("")
print("")
def eval_game(board, player):
if check_no_moves_left(board):
return [0]
for x in range(3):
for y in range(3):
if board[x][y] == 0:
nb = copy.deepcopy(board)
nb[x][y] = player
if check_win(nb):
return [player, x, y]
eval_result = eval_game(nb, -player)
if eval_result[0] == player:
return [player, x, y]
if eval_result[0] == 0:
ret_val = [0, x, y]
elif 'ret_val' not in vars():
ret_val = [-player, x, y]
return ret_val
def player_move(board, player):
while True:
inp = input("Enter: ")
if inp.isdigit() and int(inp) != 0:
coords = get_coords(int(inp))
x = coords[0]
y = coords[1]
if board[x][y] == 0:
board[x][y] = player
break
def ai_move(board, player):
eval_result = eval_game(board, player)
x = eval_result[1]
y = eval_result[2]
board[x][y] = player
play_game = True
while play_game:
board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
player = 1
ai_turn = False
while True:
first = input("Play first? (Y/N): ")
if first == "y" or first == "Y":
break
elif first == "n" or first == "N":
ai_turn = True
break
print_board(board)
while True:
if ai_turn:
ai_move(board, player)
else:
player_move(board, player)
print_board(board)
if check_win(board):
if ai_turn:
print("You lost")
else:
print("Congratulations")
break
if check_no_moves_left(board):
print("Draw")
break
ai_turn = not ai_turn
player = -player
print("")
while True:
first = input("Play again? (Y/N): ")
if first == "y" or first == "Y":
break
elif first == "n" or first == "N":
play_game = False
break
| [
"[email protected]"
] | |
54e45b5adf5e30e8719f78c545a9e56a6627a681 | cf84ca819873ec61fcc3411316681b0900de75e8 | /UriOnlineJudge/uri1001.py | f80cb6b915d1a3bc490b7dc643d3471b3b055942 | [] | no_license | gabriellll00/hello-world | 8d563e78be14b006d2064bbd298514eacb1afdb7 | fc258b99f43c70bfd0811db0176a534d026eb83e | refs/heads/main | 2023-07-21T20:08:19.009397 | 2021-09-04T20:37:25 | 2021-09-04T20:37:25 | 388,102,882 | 0 | 0 | null | 2021-07-21T12:21:41 | 2021-07-21T12:02:57 | null | UTF-8 | Python | false | false | 62 | py | a = int(input())
b = int(input())
x = a + b
print(f'X = {x}')
| [
"[email protected]"
] | |
387575509aa4d79b183e9aab89214994f4aa8615 | 31c22696e8fffd9016e2f11a6ac7aa104a17c5f7 | /bitcoin.py | f047394ab01f60c44f5eed79167304643d843784 | [] | no_license | stiipast/bitcoin-analyzer | a33f14a523d14a02855a6ada185bf50103c63775 | f1bec29da55d725ee0424145230348fe1cb669b3 | refs/heads/master | 2020-04-10T13:05:09.769392 | 2018-03-07T18:26:19 | 2018-03-07T18:26:19 | 124,274,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,321 | py | #!/usr/bin/python
import urllib, json, time, sys
import mysql.connector
def obtindre_block(block_index):
# Guardarem els valors en llistes per despres poder utilitzar els valors per fer estadistiques (encara no els he utilitzat)
in_tx=[]
out_tx=[]
fee=[]
temps=[]
conndb = mysql.connector.connect(user='bitcoin', database='bitcoin') #fem la connexio amb la DB
cursor = conndb.cursor() # fem un cursor per a insertar les dades a la DB
data = json.loads(urllib.urlopen("http://blockchain.info/rawblock/" + block_index).read()) # Descarreguem el bloc
# Obtenim la data del block en format llegible
block_date = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(data['time'])))
block_received_time = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(data['received_time'])))
for t in range(len(data["tx"])): # recorrem el bloc, la variable t recorre cada trasaccio
in_tx_temp = 0 # inicialitzem el sumatori del valor dels inputs de la transaccio t
out_tx_temp = 0 # inicialitzem el sumatori del valor dels outputs de la transaccio t
fee_temp = 0
temps_temp = 0
i=0 # variable per a recorrer els inputs
j=0 # variable per a recorrer els outputs
for i in range(len(data['tx'][t]['inputs'])):
if(t!=0):
in_tx_temp=in_tx_temp + data['tx'][t]['inputs'][i]['prev_out']['value'] # sumem al valor de input el nou valor per a cada input
in_tx.append(in_tx_temp)
for j in range(len(data['tx'][t]['out'])):
out_tx_temp = out_tx_temp + data['tx'][t]['out'][j]['value'] # sumem els outputs
out_tx.append(out_tx_temp)
# fee = (in_tx - out_tx) / 100000000.0 # fem la resta per obtindre la diferencia que son les fees i dividim per obtindre el valor en BTC
if(t==0):
fee_temp = out_tx_temp
else:
fee_temp = in_tx_temp - out_tx_temp
fee.append(fee_temp)
temps_temp = data['time'] - data['tx'][t]['time']
temps.append(temps_temp) # Temps en segons que triga la transaccio en fer-se efectiva (temps de bloc - temps de tx)
# print "%s \t %s \t %s \t %s \t %s \t %s \t %s \t %s" %(data['block_index'], data['height'], data['hash'], t, in_tx[t], out_tx[t], fee[t], temps[t])
tx_date = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(data['tx'][t]['time'])))
# Construim les dades que introduim a la DB
add_tx = ("INSERT INTO transaccions "
"(block_index, block_date, altura, hash, tx_hash, tx_index, relayed_by, n_inputs, input, n_outputs, output, tx_date, fee, temps) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")
data_tx = (data['block_index'], block_date, data['height'], data['hash'], data['tx'][t]['hash'], t, data['tx'][t]['relayed_by'], len(data['tx'][t]['inputs']), in_tx[t], len(data['tx'][t]['out']), out_tx[t], tx_date, fee[t], temps[t])
cursor.execute(add_tx, data_tx)
# Una volta hem fet totes les tx del block enviem les dades a la DB i tamquem el cursor i la connexio
add_block = ("INSERT INTO blocks "
"(block_index, block_date, block_received_time, height, hash, bits, n_tx, fee, size, main_chain, relayed_by) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")
data_block = (data['block_index'], block_date, block_received_time, data['height'], data['hash'], data['bits'], data['n_tx'], data['fee'], data['size'], data['main_chain'], data['relayed_by'])
cursor.execute(add_block, data_block)
conndb.commit()
cursor.close()
conndb.close()
return data['prev_block'] # Tornem el hash del bloc anterior al actual
# Cos principal del programa
if (len(sys.argv)) < 2:
latest_block = json.loads(urllib.urlopen("http://blockchain.info/latestblock").read())
block_index=str(latest_block["block_index"]) # Obtenim el index del ultim bloc generat
else:
if (len(sys.argv[1])) != 64:
print "El hash es incorrecte"
exit()
else:
block_index = sys.argv[1]
print "Block_index \t Altura \t Hash \t Tx_Index \t input \t output \t fee \t temps"
z = 0
if
while z < 100: #obtenim els 100 primers blocks de la cadena
block_index = obtindre_block(block_index)
z += 1 | [
"[email protected]"
] | |
ddc32b1926560d046349ee35ff5707643abd8afe | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /140_gui/pyqt_pyside/_exercises/_templates/temp/Mastering GUI Programming with Python/Chapter 3 Handling Events with Signals and Slots/signal_slots_demo.py | f79d2febefd50d50434b21a86eb7d099cee6be09 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,488 | py | # ______ ___
# ____ ? ______ ?W.. __ qtw
# ____ ? ______ ?C.. __ qtc
#
#
# c_ MainWindow ?.?W..
#
# ___ -
# s_. -
# sL.. ?.?VBL..
#
# # connecting a signal to a slot
# quitbutton _ ?.?PB.. Quit
# ?.c__.c.. cl..
# la__ .aW.. ?
#
# # connecting a signal with data to a slot that receives data
# entry1 _ ?.?LE..
# entry2 _ ?.?LE..
# la__ .aW.. ?
# la__ .aW.. ?
# _1.tC...c.. _2.sT.
#
# # connecting a signal to a python callable
# _2.tC...c.. pr..
#
# # Connecting a signal to another signal
# _1.eF__.c.. l___ print editing finished
# _2.rP__.c.. _1.eF__
#
# # This call will fail, because the signals have different argument types
# #self.entry1.textChanged.connect(self.quitbutton.clicked)
#
# # This won't work, because of signal doesn't send enough args
# badbutton _ ?.?PB.. Bad
# la__ .aW.. ?
# ?.c__.c.. n_a..
#
# # This will work, even though the signal sends extra args
# goodbutton _ ?.?PB.. Good
# la__ .aW.. ?
# ?.c__.c.. n_a..
#
#
# s..
#
# ___ needs_args arg1, arg2, arg3
# p..
#
# ___ no_args
# print('I need no arguments')
#
# __ ______ __ ______
# app _ ?.?A.. ___.a..
# # it's required to save a reference to MainWindow.
# # if it goes out of scope, it will be destroyed.
# mw _ ?
# ___.e.. ?.e..
| [
"[email protected]"
] | |
5705fd2fedee9caeaeaa41e9e65f89a975c95792 | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/models/model_test.runfiles/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/layers/convolutional.py | 1688b79891c2bcd3cce1b6bb7355c216736014a3 | [
"Apache-2.0"
] | permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 182 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/layers/convolutional.py | [
"[email protected]"
] | |
388a6eb4b8b486a5c9c706692097b3b4c38187c7 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/pandas/_config/display.py | 57b7af184346cd2f68442d22a2bd7a489047ecad | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f6ba130797f4f1ce2395562efa48f788ebd3a352e26f7c79209f476a3d300866
size 1756
| [
"[email protected]"
] | |
b35ccc9994ce54f39ce1781c925b783dfcee3c12 | e20e027fc4bc03bdcda6c73a77e07eab7ce9d4e9 | /Numpy Assignment 1.py | 38490e0ef83da9c619c925fad6a64132fef4e599 | [] | no_license | aparna31ar/Numpy-Assignment-1 | 47eae1bbe741e3e2cbfb439aa5c761b552eb85fe | 2d674587a85470a841d41d0335120902fbdcd566 | refs/heads/main | 2023-07-17T14:13:29.917923 | 2021-08-30T19:21:40 | 2021-08-30T19:21:40 | 401,460,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | #!/usr/bin/env python
# coding: utf-8
# ### Q1. Use numpy to generate array of 25 random numbers sampled from a standard normal distribution
#
# In[4]:
import numpy as np
a=np.random.normal(0,1,25)
print("25 random numbers from a standard normal distribution:")
print(a)
# ### Q2. Create a random vector of size 30 and find the mean value.
#
# In[11]:
import numpy as np
a=np.random.seed(8)
a=np.random.rand(30)
a
# ### Q3. Insert 1 to 100 numbers in a numpy array and reshape it to 10*10 matrix.
#
# In[25]:
import numpy as np
a = np.arange(1,101)
a.reshape((10,10))
# ### Q4. Create a 10x10 array with random values and find the minimum and maximum values.
# In[49]:
import numpy as np
a=np.random.seed(8)
a = np.random.randint(100,size=(10,10))
print("The array of 10 x 10 matrix is:","\n",a)
print("The minimum value is:", np.min(a))
print("The maximum value is:", np.max(a))
# ### Q5. Find Dot product of two arrays
#
# f = np.array([1,2])
#
# g = np.array([4,5])
#
#
# In[50]:
f = np.array([1,2])
g = np.array([4,5])
print(f)
print(g)
np.dot(f,g)
# ### 6) Concatenate following arrays along axis=0
#
# x=np.array([[1,2],
# [3,4]])
# y=np.array([[5,6]])
#
# In[54]:
x=np.array([[1,2],
[3,4]])
y=np.array([[5,6]])
np.concatenate((x,y),axis=0)
# ### 7) How to get the common items between two python NumPy arrays?
# a = np.array([1,2,3,2,3,4,3,4,5,6])
# b = np.array([7,2,10,2,7,4,9,4,9,8])
#
# In[55]:
a = np.array([1,2,3,2,3,4,3,4,5,6])
b = np.array([7,2,10,2,7,4,9,4,9,8])
np.intersect1d(a,b)
# ### Q8. Sort the numpy array:
#
# arr = np.array([10,5,8,4,7,2,3,1])
# In[56]:
arr = np.array([10,5,8,4,7,2,3,1])
np.sort(arr)
# In[ ]:
| [
"[email protected]"
] | |
1357ba73d00221123a4df957e5fb2229a2b6f843 | 33c9398eb53301cc3e3a71e29f610e8ab0fc8c55 | /colorann.py | af7da25012968e3b99ae9f4fbe8414174e4dcffd | [] | no_license | denzel-bogues/python-color-project | 2cc45acf358812c798a607f8de73c7e0e4067113 | c510512b4af73f544fb9859a483791f0deef649c | refs/heads/master | 2020-06-26T22:05:04.605705 | 2019-08-13T23:53:17 | 2019-08-13T23:53:17 | 199,769,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | import sys
import pandas as p
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
import AppWindow
AppWindow.call_ui()
data_read = p.read_csv("colors.csv", delimiter = ',', names=['Color names', 'Hex', 'R', 'G', 'B',])
# data_red = data_read[['R', 'G', 'B']]
R = input('Enter red value ')
G = input('Enter greem value ')
B = input('Enter blue value ')
userdata = [R, G, B]
user_df = p.DataFrame(userdata)
in_read = p.DataFrame.transpose(p.DataFrame(user_df))
in_read.columns = ['R', 'G', 'B']
in_read['R'] = in_read['R'].astype(int)
in_read['G'] = in_read['G'].astype(int)
in_read['B'] = in_read['B'].astype(int)
desired_df = p.merge(data_read, in_read, on=['R', 'G', 'B'], how='inner')
print(desired_df['Color names'])
"""
print(in_read)
print(is_string_dtype(in_read['G']))
print(is_numeric_dtype(in_read['G']))
print(p.merge(data_read, in_read, on=['R', 'G', 'B'], how='inner'))
"""
| [
"[email protected]"
] | |
47b129f352e4fa6c43d2569a27328004ab5b8e7f | 9a6ff88fb3bf3f69ade803105ee9f401ef57b11f | /Lab. 9/Лаб.9 Завд. 2.py | 0003202d2b180beaab4707e490b9b8ca49bf1ebe | [] | no_license | IvanyukAndrew/Labs | 4647ce455742ed12a96bb132c48350e96ce636ee | 4c358ebb7072444229f161579f30d6080e7ba0b0 | refs/heads/main | 2023-02-05T10:48:41.189068 | 2021-01-03T09:18:41 | 2021-01-03T09:18:41 | 320,326,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | array_of_CookDict = []
def serch(choose, criterial):
if choose == 1:
for i in range(len(array_of_CookDict)):
if array_of_CookDict[i]["Name_of_dish"] == criterial:
print(array_of_CookDict[i])
if choose == 2:
for i in range(len(array_of_CookDict)):
if array_of_CookDict[i]["Number_of_components"] == criterial:
print(array_of_CookDict[i])
if choose == 3:
for i in range(len(array_of_CookDict)):
if array_of_CookDict[i]["List_of_components"] == criterial:
print(array_of_CookDict[i])
if choose == 4:
for i in range(len(array_of_CookDict)):
if array_of_CookDict[i]["Time_for_cook"] == criterial:
print(array_of_CookDict[i])
while True:
print("\n")
print("1. Вивести всю інформацію\n"
"2. Вести дані про страву\n"
"3. Кінець\n")
choose = int(input("Напишітть цифру:"))
if choose == 1:
for i in range(len(array_of_CookDict)):
print(array_of_CookDict[i])
if choose == 2:
Name_of_dish = input("Name of dish: ")
Number_of_components = int(input("Number of components: "))
List_of_components = input("List of components: ")
Time_for_cook = int(input("Time for cook: "))
CookDict = {"Name_of_dish": Name_of_dish, "Number_of_components": Number_of_components,
"List_of_components": List_of_components, "Time_for_cook": Time_for_cook}
array_of_CookDict.append(CookDict)
elif choose == 3:
break
else:
print("Ведіть коректне число\n")
| [
"[email protected]"
] | |
3a40a1e42f60c1c9f14a8869461d90cc62d7f560 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /tests/test_rand_affine.py | 1e1a23bc0915f7025bb7fdc388ed9593b196b866 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 5,638 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.transforms import RandAffine
TEST_CASES = [
[
dict(as_tensor_output=False, device=None),
{"img": torch.arange(27).reshape((3, 3, 3))},
np.arange(27).reshape((3, 3, 3)),
],
[
dict(as_tensor_output=False, device=None, spatial_size=-1),
{"img": torch.arange(27).reshape((3, 3, 3))},
np.arange(27).reshape((3, 3, 3)),
],
[
dict(as_tensor_output=False, device=None),
{"img": torch.arange(27).reshape((3, 3, 3)), "spatial_size": (2, 2)},
np.array([[[2.0, 3.0], [5.0, 6.0]], [[11.0, 12.0], [14.0, 15.0]], [[20.0, 21.0], [23.0, 24.0]]]),
],
[
dict(as_tensor_output=True, device=None),
{"img": torch.ones((1, 3, 3, 3)), "spatial_size": (2, 2, 2)},
torch.ones((1, 2, 2, 2)),
],
[
dict(as_tensor_output=True, device=None, spatial_size=(2, 2, 2), cache_grid=True),
{"img": torch.ones((1, 3, 3, 3))},
torch.ones((1, 2, 2, 2)),
],
[
dict(
prob=0.9,
rotate_range=(np.pi / 2,),
shear_range=[1, 2],
translate_range=[2, 1],
as_tensor_output=True,
padding_mode="zeros",
spatial_size=(2, 2, 2),
device=None,
),
{"img": torch.ones((1, 3, 3, 3)), "mode": "bilinear"},
torch.tensor([[[[0.3658, 1.0000], [1.0000, 1.0000]], [[1.0000, 1.0000], [1.0000, 0.9333]]]]),
],
[
dict(
prob=0.9,
rotate_range=(np.pi / 2,),
shear_range=[1, 2],
translate_range=[2, 1],
as_tensor_output=True,
padding_mode="zeros",
spatial_size=(2, 2, 2),
cache_grid=True,
device=None,
),
{"img": torch.ones((1, 3, 3, 3)), "mode": "bilinear"},
torch.tensor([[[[0.3658, 1.0000], [1.0000, 1.0000]], [[1.0000, 1.0000], [1.0000, 0.9333]]]]),
],
[
dict(
prob=0.9,
rotate_range=(np.pi / 2,),
shear_range=[1, 2],
translate_range=[2, 1],
scale_range=[0.1, 0.2],
as_tensor_output=True,
device=None,
),
{"img": torch.arange(64).reshape((1, 8, 8)), "spatial_size": (3, 3)},
torch.tensor([[[18.7362, 15.5820, 12.4278], [27.3988, 24.2446, 21.0904], [36.0614, 32.9072, 29.7530]]]),
],
[
dict(
prob=0.9,
rotate_range=(np.pi / 2,),
shear_range=[1, 2],
translate_range=[2, 1],
scale_range=[0.1, 0.2],
spatial_size=(3, 3),
cache_grid=True,
as_tensor_output=True,
device=None,
),
{"img": torch.arange(64).reshape((1, 8, 8))},
torch.tensor([[[18.7362, 15.5820, 12.4278], [27.3988, 24.2446, 21.0904], [36.0614, 32.9072, 29.7530]]]),
],
]
ARR_NUMPY = np.arange(9 * 10).reshape(1, 9, 10)
ARR_TORCH = torch.Tensor(ARR_NUMPY)
TEST_CASES_SKIPPED_CONSISTENCY = []
for im in (ARR_NUMPY, ARR_TORCH):
for as_tensor_output in (True, False):
for in_dtype_is_int in (True, False):
TEST_CASES_SKIPPED_CONSISTENCY.append((im, as_tensor_output, in_dtype_is_int))
class TestRandAffine(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_rand_affine(self, input_param, input_data, expected_val):
g = RandAffine(**input_param)
g.set_random_state(123)
result = g(**input_data)
if input_param.get("cache_grid", False):
self.assertTrue(g._cached_grid is not None)
self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor))
if isinstance(result, torch.Tensor):
np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4)
else:
np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4)
def test_ill_cache(self):
with self.assertWarns(UserWarning):
RandAffine(cache_grid=True)
with self.assertWarns(UserWarning):
RandAffine(cache_grid=True, spatial_size=(1, 1, -1))
@parameterized.expand(TEST_CASES_SKIPPED_CONSISTENCY)
def test_skipped_transform_consistency(self, im, as_tensor_output, in_dtype_is_int):
t1 = RandAffine(prob=0, as_tensor_output=as_tensor_output)
t2 = RandAffine(prob=1, spatial_size=(10, 11), as_tensor_output=as_tensor_output)
# change dtype to int32 or float32
if in_dtype_is_int:
im = im.astype("int32") if isinstance(im, np.ndarray) else im.int()
else:
im = im.astype("float32") if isinstance(im, np.ndarray) else im.float()
out1 = t1(im)
out2 = t2(im)
# check same type
self.assertEqual(type(out1), type(out2))
# check matching dtype
self.assertEqual(out1.dtype, out2.dtype)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
1aef2eefec3ad88d9b7f8e6eeba325b3603c0c46 | 603488a6cde44b30732260010afe12b089d01c3d | /useful_functions.py | f355b0c3bc4eb97f054038f97c85f98621d7ff92 | [] | no_license | Zahra-Kader/ksz_21cm_signal | 172707ccda3aa4b61b7de4e046c8af9e2d3c034d | 897a5e36a493f0282fb1d72acee7b0425e0f4a41 | refs/heads/master | 2021-07-09T05:22:18.673237 | 2021-03-26T18:14:01 | 2021-03-26T18:14:01 | 231,374,552 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,009 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 22 12:53:47 2018
@author: zahra
"""
import distance as cd
from scipy.interpolate import interp1d
import numpy as np
import perturbation as cp
import density as den
import constants as cc
import matplotlib.pyplot as plt
import scipy as sp
import pylab
from matplotlib.colors import LogNorm
#import perturbation as cp
b_HI=1.0
omega_HI=0.8e-3
n_points=100
nu_21=1420.
cosmo = {'omega_M_0':0.3, 'omega_lambda_0':0.7, 'omega_k_0':0.0, 'h':0.67, 'omega_b_0' : 0.049, 'omega_n_0' : 0.0,
'N_nu' : 0, 'n' : 1.0, 'sigma_8' : 0.9, 'baryonic_effects' : False,'X_H':.75}
H0=cc.H100_s*cosmo['h']
#z=np.logspace(-10,np.log(2000),2000)
#z=np.linspace(1e-4,10,n_points)
z=np.geomspace(1e-4,10,n_points)
kabs,P= np.genfromtxt('/home/zahra/python_scripts/kSZ_21cm_signal/camb_63347152_matterpower_z0_16000_kmax.dat', dtype=float,
unpack=True)
#interpolate the matter power spec
Mps_interpf = interp1d(kabs, P, bounds_error=False,fill_value="extrapolate")
k=np.linspace(1.e-4,10.,10)
Mps_interpf_div_ksq=interp1d(kabs, P/kabs**2, bounds_error=False,fill_value=0.)
def zed(chi_in):
chi_full = cd.comoving_distance(z, **cosmo)
f=interp1d(chi_full,z,bounds_error=False,fill_value=0.)
return f(chi_in)
def chi(z):
chi_full = cd.comoving_distance(z, **cosmo)
return chi_full
def H(z):
H=cd.hubble_z(z,**cosmo)
return H
def D_1(z):
D_1=cp.fgrowth(z,cosmo['omega_M_0'],0)
return D_1
#plt.plot(z,D_1(z))
#plt.show()
chi_m=chi(1100)
chi_array=np.linspace(0,chi_m,2000)
#plt.plot(chi_array,D_1(zed(chi_array)))
#plt.show()
def f(z):
f=(den.omega_M_z(z,**cosmo))**(cc.gamma)
return f
#plt.plot(den.omega_M_z(z,**cosmo),f(z))
#plt.show()
def r(z):
r=cc.c_light_Mpc_s*(1+z)**2/H(z)
return r
def kpar(y,z):
kpar=y/r(z)
return kpar
def T_mean(z):
T_mean=566.*cosmo['h']*H0*omega_HI*(1+z)**2/(H(z)*0.003) #\mu K, microkelvin
return T_mean
def kpar_min(z,delta_z):
z_max=z+delta_z
z_min=z-delta_z
nu_min=nu_21/(1+z_max)
nu_max=nu_21/(1+z_min)
delta_nu_dimless=(nu_max-nu_min)/nu_21
return 2.*np.pi/r(z)/delta_nu_dimless
def ell_lims(z,Dmin,Dmax): #D=Dmin for kperp_min and D=Dmax for kperp_max
nu=nu_21/(1+z)*1.e6
c_metres=cc.c_light_cm_s/100.
lam=c_metres/nu
u_min=Dmin/lam
u_max=Dmax/lam
return 2.*np.pi*u_min, 2.*np.pi*u_max
def P_delta_delta(kperp,kpar):
Kperp,Kpar=np.meshgrid(kperp,kpar)
k=np.sqrt(Kpar**2+Kperp**2)
return k**3*Mps_interpf(k)
def P_vv(kperp,kpar,z):
Kperp,Kpar=np.meshgrid(kperp,kpar)
k=np.sqrt(Kpar**2+Kperp**2)
mu_k=Kpar/k
Pvv=f(z)**2*H(z)**2*Mps_interpf(k)*mu_k**2/((1+z)**2*k**2)/cc.c_light_Mpc_s**2
return k**3*Pvv
#return k**3*Mps_interpf(k)/k**4-----------USING THIS GIVES THE SAME AMPLITUDES THAT UE LI HAD IN HIS PAPER
def P_delta_v(kperp,kpar,z):
Kperp,Kpar=np.meshgrid(kperp,kpar)
k=np.sqrt(Kpar**2+Kperp**2)
mu_k=Kpar/k
Pdeltav=f(z)*H(z)*Mps_interpf(k)*mu_k/((1+z)*k)/cc.c_light_Mpc_s
return k**3*Pdeltav
kpar=np.geomspace(5.e-3,1.,30)
kperp=np.geomspace(5.e-3,1.,30)
#k=np.sqrt(kpar**2+kperp**2)
#k=np.linspace(1.e-2,110,100)
#P=P_delta_delta(k)*P_vv(k,1.)+P_delta_v(k,1.)**2
#plt.semilogy(k,P)
#plt.plot(k,P_vv(k,1.))
#plt.plot(k,P_delta_v(k,1.))
'''
#plt.show()
print (P_delta_delta(kperp,kpar).max())
print (P_vv(kperp,kpar,1.).max())
pylab.pcolormesh(kperp,kpar,P_delta_delta(kperp,kpar),cmap='Blues',norm=LogNorm()) ; cbar=plt.colorbar();
plt.tick_params(axis='both', which='major');
#pylab.xlim([np.min(kperp),np.max(kperp)])
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$k_\perp$',fontsize=12); plt.ylabel(r'$k_\parallel$',fontsize=12)
plt.title(r'$P_{\delta \delta}$')
pylab.show()
pylab.pcolormesh(kperp,kpar,P_vv(kperp,kpar,1.),cmap='Blues',norm=LogNorm()) ; cbar=plt.colorbar()
#pylab.xlim([np.min(kperp),.5])
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$k_\perp$',fontsize=12); plt.ylabel(r'$k_\parallel$',fontsize=12)
plt.title(r'$P_{vv}$')
pylab.show()
pylab.pcolormesh(kperp,kpar,P_delta_v(kperp,kpar,1.),cmap='Blues',norm=LogNorm()) ; cbar=plt.colorbar()
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$k_\perp$',fontsize=12); plt.ylabel(r'$k_\parallel$',fontsize=12)
plt.title(r'$P_{\delta v}$')
pylab.show()
'''
'''
plt.loglog(k,k**3*Mps_interpf(k),label=r'$\rm{P_{\delta \delta}}$')
plt.loglog(k,k**3*P_delta_v(k,1.),label=r'$\rm{P_{\delta v}}$')
plt.loglog(k,k**3*P_vv(k,1.),label=r'$\rm{P_{vv}}$')
plt.xlabel('k')
plt.ylabel(r'$\rm{k^3 P(k,z=1)}$')
plt.legend()
plt.show()
'''
#plt.plot(z,T_mean(z))
#plt.xlabel('z')
#plt.ylabel('T(z)')
#plt.show()
##print (z)
'''
def chi_flat():
for i in enumerate(z):
chi =2*(1-(1/np.sqrt(1+z)))/H0
return chi
#chi_f=chi_flat()
##print ("Comoving distance to z is %.1f Mpc" % (chi))
##print (chi)
##print (z)
#return res
#result=zed()
##plt.loglog(chi,b(chi))
##plt.show()
##plt.loglog(chi_f,z)
##plt.show()
##print (b(chi))
#f=cp.fgrowth(b(chi), omega_M_0=0.27, unnormed=False)
##print (f)
##plt.loglog(b(chi),f)
'''
delta_z=2.
z_r=10.
z_ri=z_r-delta_z/2
z_rf=z_r+delta_z/2
chi_ri=chi(z_ri)
chi_rf=chi(z_rf)
delta_chi=chi_rf-chi_ri
r_H=2*cc.c_light_Mpc_s/(3*H0*np.sqrt(cosmo['omega_M_0'])*(1+z_r)**1.5)
#r_H=cd.light_travel_distance(z_r,0.0,**cosmo)
chi_r=chi(z_r)
theta=r_H/cd.angular_diameter_distance(z_r,0,**cosmo)
#print (theta)
import reionization as cr
def tau_ind(z):
tau=cr.integrate_optical_depth(z,x_ionH=1.0, x_ionHe=1.0, **cosmo)
return tau
def tau_inst(z):
tau_r=cr.optical_depth_instant(z, x_ionH=1.0, x_ionHe=1.0, z_rHe = None,return_tau_star=False, verbose=0, **cosmo)
return tau_r
#print (tau_r)
#cosmo = {'omega_M_0':0.3, 'omega_lambda_0':0.7, 'omega_k_0':0.0, 'h':0.72, 'omega_b_0' : 0.045, 'omega_n_0' : 0.0,
# 'N_nu' : 0, 'n' : 1.0, 'sigma_8' : 0.9, 'baryonic_effects' : False}
#I=cr.ionization_from_collapse(z=6, coeff_ion=1, temp_min=1e4, passed_min_mass = False,**cosmo)
| [
"[email protected]"
] | |
bae7db4680fc0354a644d46c840930244f86ed2a | b10b88230493c89cba76077c1593ca035dc1b2b2 | /NaiveBayes.py | 9f5ad4ad59fb08bd6f1e48faf2d160b51a257a07 | [] | no_license | rohandeb24/Text-Classification | ebea371bcd34a95375273ee41b5654251dec671e | 366a5622373f6f4dad4cfd47aab2203912d6c001 | refs/heads/master | 2020-03-23T18:27:06.333094 | 2018-07-22T16:21:58 | 2018-07-22T16:21:58 | 141,909,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
import Data
x_train, x_test, y_train, y_test = Data.process()
vec1 = Data.tfidf(x_train)
x_train1 = vec1.transform(x_train)
model1 = MultinomialNB()
model1.fit(x_train1,y_train)
vec2 = Data.bag_of_words(x_train)
x_train2 = vec2.transform(x_train)
model2 = MultinomialNB()
model2.fit(x_train2,y_train)
def test(x=x_test):
x_test1 = vec1.transform(x_test)
x_test2 = vec2.transform(x_test)
pred1 = model1.predict(x_test1)
pred2 = model2.predict(x_test2)
return pred1,pred2
def accuracy(predictions,y=y_test):
return accuracy_score(y_test,predictions)
def train_outputs():
pred1 = model1.predict(x_train1)
pred2 = model2.predict(x_train2)
return pred1,pred2
def predict(x):
x = vec1.transform(x)
pred1 = model1.predict(x)
pred2 = model2.predict(x)
return pred1,pred2
| [
"[email protected]"
] | |
4290f33117641c516843aeaf64025823ad951026 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/f8052e4261238ff6c93465b3f0d0f22457f127ce-<container_run>-fix.py | d32a173f5a709bd873f8aaaa81b4fc29a4a7aeb0 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,425 | py | def container_run(platform: str, nvidia_runtime: bool, docker_registry: str, shared_memory_size: str, local_ccache_dir: str, command: List[str], cleanup: Cleanup, dry_run: bool=False) -> int:
'Run command in a container'
container_wait_s = 600
environment = {
'CCACHE_MAXSIZE': '500G',
'CCACHE_TEMPDIR': '/tmp/ccache',
'CCACHE_DIR': '/work/ccache',
'CCACHE_LOGFILE': '/tmp/ccache.log',
}
jenkins_env_vars = ['BUILD_NUMBER', 'BUILD_ID', 'BUILD_TAG']
environment.update({k: os.environ[k] for k in jenkins_env_vars if (k in os.environ)})
environment.update({k: os.environ[k] for k in ['CCACHE_MAXSIZE'] if (k in os.environ)})
tag = get_docker_tag(platform=platform, registry=docker_registry)
mx_root = get_mxnet_root()
local_build_folder = buildir()
os.makedirs(local_build_folder, exist_ok=True)
os.makedirs(local_ccache_dir, exist_ok=True)
logging.info('Using ccache directory: %s', local_ccache_dir)
docker_client = docker.from_env()
docker_cmd_list = [get_docker_binary(nvidia_runtime), 'run', '--cap-add', 'SYS_PTRACE', '--rm', '--shm-size={}'.format(shared_memory_size), '-v', '{}:/work/mxnet'.format(mx_root), '-v', '{}:/work/build'.format(local_build_folder), '-v', '{}:/work/ccache'.format(local_ccache_dir), '-u', '{}:{}'.format(os.getuid(), os.getgid()), '-e', 'CCACHE_MAXSIZE={}'.format(environment['CCACHE_MAXSIZE']), '-e', 'CCACHE_TEMPDIR={}'.format(environment['CCACHE_TEMPDIR']), '-e', 'CCACHE_DIR={}'.format(environment['CCACHE_DIR']), '-e', 'CCACHE_LOGFILE={}'.format(environment['CCACHE_LOGFILE']), '-ti', tag]
docker_cmd_list.extend(command)
docker_cmd = ' \\\n\t'.join(docker_cmd_list)
logging.info('Running %s in container %s', command, tag)
logging.info('Executing the equivalent of:\n%s\n', docker_cmd)
ret = 0
if (not dry_run):
signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM})
runtime = None
if nvidia_runtime:
runtime = 'nvidia'
container = docker_client.containers.run(tag, runtime=runtime, detach=True, command=command, shm_size=shared_memory_size, user='{}:{}'.format(os.getuid(), os.getgid()), cap_add='SYS_PTRACE', volumes={
mx_root: {
'bind': '/work/mxnet',
'mode': 'rw',
},
local_build_folder: {
'bind': '/work/build',
'mode': 'rw',
},
local_ccache_dir: {
'bind': '/work/ccache',
'mode': 'rw',
},
}, environment=environment)
try:
logging.info('Started container: %s', trim_container_id(container.id))
cleanup.add_container(container)
signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM})
stream = container.logs(stream=True, stdout=True, stderr=True)
sys.stdout.flush()
for chunk in stream:
sys.stdout.buffer.write(chunk)
sys.stdout.buffer.flush()
sys.stdout.flush()
stream.close()
try:
logging.info('Waiting for status of container %s for %d s.', trim_container_id(container.id), container_wait_s)
wait_result = container.wait(timeout=container_wait_s)
logging.info('Container exit status: %s', wait_result)
ret = wait_result.get('StatusCode', 200)
except Exception as e:
logging.exception(e)
ret = 150
try:
logging.info('Stopping container: %s', trim_container_id(container.id))
container.stop()
except Exception as e:
logging.exception(e)
ret = 151
try:
logging.info('Removing container: %s', trim_container_id(container.id))
container.remove()
except Exception as e:
logging.exception(e)
ret = 152
cleanup.remove_container(container)
containers = docker_client.containers.list()
if containers:
logging.info('Other running containers: %s', [trim_container_id(x.id) for x in containers])
except docker.errors.NotFound as e:
logging.info('Container was stopped before cleanup started: %s', e)
return ret | [
"[email protected]"
] | |
bb0fd3227823ae168714b2d609f75a815db3c820 | b05761d771bb5a85d39d370c649567c1ff3eb089 | /venv/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pynamodb/connection/__init__.pyi | f649861fa13ab72cf0f93c2e820af2c7a7f8dc10 | [] | no_license | JawshyJ/Coding_Practice | 88c49cab955eab04609ec1003b6b8c20f103fc06 | eb6b229d41aa49b1545af2120e6bee8e982adb41 | refs/heads/master | 2023-02-19T10:18:04.818542 | 2023-02-06T21:22:58 | 2023-02-06T21:22:58 | 247,788,631 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | pyi | /home/runner/.cache/pip/pool/b4/87/2e/b11aa30f971bc3d814440e95ea0252de4a2d77aa81c6e6be59eb9449f8 | [
"[email protected]"
] | |
226118c526c576d8edfde2c75a1994b83da6395a | 5c7bd453ac9461062436814db502154da3c38d77 | /scripts/make-release.py | d0e419d5844edda53a841ed631f399086b17a194 | [
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | Earth4Us/flopy | 51a425862035341b03a08643678e2144343a1967 | af42e7827fe053af911efef0f37dcb76dad7e9c0 | refs/heads/develop | 2022-10-05T03:40:42.428197 | 2022-09-08T19:50:23 | 2022-09-08T19:50:23 | 79,733,250 | 0 | 0 | null | 2017-01-22T17:33:07 | 2017-01-22T17:33:06 | null | UTF-8 | Python | false | false | 11,864 | py | #!/usr/bin/python
import datetime
import json
import os
import subprocess
import sys
from importlib.machinery import SourceFileLoader
# file_paths dictionary has file names and the path to the file. Enter '.'
# as the path if the file is in the root repository directory
file_paths = {
"version.py": "../flopy",
"README.md": "../",
"PyPI_release.md": "../docs",
"code.json": "../",
"DISCLAIMER.md": "../flopy",
"notebook_examples.md": "../docs",
}
pak = "flopy"
# local import of package variables in flopy/version.py
loader = SourceFileLoader("version", os.path.join("..", "flopy", "version.py"))
version_mod = loader.load_module()
# build authors list for Software/Code citation for FloPy
authors = []
for key in version_mod.author_dict.keys():
t = key.split()
author = f"{t[-1]}"
for str in t[0:-1]:
author += f" {str}"
authors.append(author)
approved = """Disclaimer
----------
This software has been approved for release by the U.S. Geological Survey
(USGS). Although the software has been subjected to rigorous review, the USGS
reserves the right to update the software as needed pursuant to further analysis
and review. No warranty, expressed or implied, is made by the USGS or the U.S.
Government as to the functionality of the software and related material nor
shall the fact of release constitute any such warranty. Furthermore, the
software is released on condition that neither the USGS nor the U.S. Government
shall be held liable for any damages resulting from its authorized or
unauthorized use.
"""
preliminary = """Disclaimer
----------
This software is preliminary or provisional and is subject to revision. It is
being provided to meet the need for timely best science. The software has not
received final approval by the U.S. Geological Survey (USGS). No warranty,
expressed or implied, is made by the USGS or the U.S. Government as to the
functionality of the software and related material nor shall the fact of release
constitute any such warranty. The software is provided on the condition that
neither the USGS nor the U.S. Government shall be held liable for any damages
resulting from the authorized or unauthorized use of the software.
"""
def get_disclaimer():
# get current branch
branch = get_branch()
if branch.lower().startswith("release") or "master" in branch.lower():
disclaimer = approved
is_approved = True
else:
disclaimer = preliminary
is_approved = False
return is_approved, disclaimer
def get_branch():
branch = None
# determine if branch defined on command line
for argv in sys.argv:
if "master" in argv:
branch = "master"
elif "develop" in argv.lower():
branch = "develop"
if branch is None:
try:
# determine current branch
b = subprocess.Popen(
("git", "status"),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
).communicate()[0]
if isinstance(b, bytes):
b = b.decode("utf-8")
for line in b.splitlines():
if "On branch" in line:
branch = line.replace("On branch ", "").rstrip()
except:
msg = "Could not determine current branch. Is git installed?"
raise ValueError(msg)
return branch
def get_version_str(v0, v1, v2):
version_type = (f"{v0}", f"{v1}", f"{v2}")
version = ".".join(version_type)
return version
def get_tag(v0, v1, v2):
tag_type = (f"{v0}", f"{v1}", f"{v2}")
tag = ".".join(tag_type)
return tag
def get_software_citation(version, is_approved):
now = datetime.datetime.now()
sb = ""
if not is_approved:
sb = " — release candidate"
# format author names
line = "["
for ipos, author in enumerate(authors):
if ipos > 0:
line += ", "
if ipos == len(authors) - 1:
line += "and "
sv = author.split()
tauthor = f"{sv[0]}"
if len(sv) < 3:
gname = sv[1]
if len(gname) > 1:
tauthor += f", {gname}"
else:
tauthor += f", {gname[0]}."
else:
tauthor += f", {sv[1][0]}. {sv[2][0]}."
# add formatted author name to line
line += tauthor
# add the rest of the citation
line += (
f", {now.year}, FloPy v{version}{sb}: "
f"U.S. Geological Survey Software Release, {now:%d %B %Y}, "
"https://doi.org/10.5066/F7BK19FH]"
"(https://doi.org/10.5066/F7BK19FH)"
)
return line
def update_version():
name_pos = None
try:
file = "version.py"
fpth = os.path.join(file_paths[file], file)
vmajor = 0
vminor = 0
vmicro = 0
lines = [line.rstrip("\n") for line in open(fpth, "r")]
for idx, line in enumerate(lines):
t = line.split()
if "major =" in line:
vmajor = int(t[2])
elif "minor =" in line:
vminor = int(t[2])
elif "micro =" in line:
vmicro = int(t[2])
elif "__version__" in line:
name_pos = idx + 1
except:
raise OSError("There was a problem updating the version file")
try:
# write new version file
f = open(fpth, "w")
f.write(
(
f"# {pak} version file automatically created "
f"using...{os.path.basename(__file__)}\n"
)
)
f.write(
f"# created on...{datetime.datetime.now():%B %d, %Y %H:%M:%S}\n"
)
f.write("\n")
f.write(f"major = {vmajor}\n")
f.write(f"minor = {vminor}\n")
f.write(f"micro = {vmicro}\n")
f.write('__version__ = f"{major}.{minor}.{micro}"\n')
# write the remainder of the version file
if name_pos is not None:
for line in lines[name_pos:]:
f.write(f"{line}\n")
f.close()
print("Successfully updated version.py")
except:
raise OSError("There was a problem updating the version file")
# update README.md with new version information
update_readme_markdown(vmajor, vminor, vmicro)
# update notebook_examples.md
update_notebook_examples_markdown()
# update code.json
update_codejson(vmajor, vminor, vmicro)
# update PyPI_release.md
update_PyPI_release(vmajor, vminor, vmicro)
def update_codejson(vmajor, vminor, vmicro):
# define json filename
file = "code.json"
json_fname = os.path.join(file_paths[file], file)
# get branch
branch = get_branch()
# create version
version = get_tag(vmajor, vminor, vmicro)
# load and modify json file
with open(json_fname, "r") as f:
data = json.load(f)
# modify the json file data
now = datetime.datetime.now()
sdate = now.strftime("%Y-%m-%d")
data[0]["date"]["metadataLastUpdated"] = sdate
if branch.lower().startswith("release") or "master" in branch.lower():
data[0]["version"] = version
data[0]["status"] = "Production"
else:
data[0]["version"] = version
data[0]["status"] = "Release Candidate"
# rewrite the json file
with open(json_fname, "w") as f:
json.dump(data, f, indent=4)
f.write("\n")
return
def update_readme_markdown(vmajor, vminor, vmicro):
# create disclaimer text
is_approved, disclaimer = get_disclaimer()
# define branch
if is_approved:
branch = "master"
else:
branch = "develop"
# create version
version = get_tag(vmajor, vminor, vmicro)
# read README.md into memory
file = "README.md"
fpth = os.path.join(file_paths[file], file)
with open(fpth, "r") as file:
lines = [line.rstrip() for line in file]
# rewrite README.md
terminate = False
f = open(fpth, "w")
for line in lines:
if "### Version " in line:
line = f"### Version {version}"
if not is_approved:
line += " — release candidate"
elif "[flopy continuous integration]" in line:
line = (
"[](https://github.com/modflowpy/flopy/actions/"
"workflows/commit.yml)".format(branch)
)
elif "[Read the Docs]" in line:
line = (
"[]"
"(https://github.com/modflowpy/flopy/actions/"
"workflows/rtd.yml)".format(branch)
)
elif "[Coverage Status]" in line:
line = (
"[]"
"(https://coveralls.io/github/modflowpy/"
"flopy?branch={0})".format(branch)
)
elif "[Binder]" in line:
# [](https://mybinder.org/v2/gh/modflowpy/flopy.git/develop)
line = (
"[]"
"(https://mybinder.org/v2/gh/modflowpy/flopy.git/"
"{})".format(branch)
)
elif "doi.org/10.5066/F7BK19FH" in line:
line = get_software_citation(version, is_approved)
elif "Disclaimer" in line:
line = disclaimer
terminate = True
f.write(f"{line}\n")
if terminate:
break
f.close()
# write disclaimer markdown file
file = "DISCLAIMER.md"
fpth = os.path.join(file_paths[file], file)
f = open(fpth, "w")
f.write(disclaimer)
f.close()
return
def update_notebook_examples_markdown():
# create disclaimer text
is_approved, disclaimer = get_disclaimer()
# define branch
if is_approved:
branch = "master"
else:
branch = "develop"
# read notebook_examples.md into memory
file = "notebook_examples.md"
fpth = os.path.join(file_paths[file], file)
with open(fpth, "r") as file:
lines = [line.rstrip() for line in file]
# rewrite notebook_examples.md
terminate = False
f = open(fpth, "w")
for line in lines:
if "[Binder]" in line:
# [](https://mybinder.org/v2/gh/modflowpy/flopy.git/develop)
line = (
"[]"
"(https://mybinder.org/v2/gh/modflowpy/flopy.git/"
"{})".format(branch)
)
f.write(f"{line}\n")
f.close()
def update_PyPI_release(vmajor, vminor, vmicro):
# create disclaimer text
is_approved, disclaimer = get_disclaimer()
# create version
version = get_tag(vmajor, vminor, vmicro)
# read README.md into memory
file = "PyPI_release.md"
fpth = os.path.join(file_paths[file], file)
with open(fpth, "r") as file:
lines = [line.rstrip() for line in file]
# rewrite README.md
terminate = False
f = open(fpth, "w")
for line in lines:
if "doi.org/10.5066/F7BK19FH" in line:
line = get_software_citation(version, is_approved)
elif "Disclaimer" in line:
line = disclaimer
terminate = True
f.write(f"{line}\n")
if terminate:
break
f.close()
return
if __name__ == "__main__":
update_version()
get_software_citation("3.1.1", True)
| [
"[email protected]"
] | |
fdd8e9b78b9290cfe113da295706a008023d84a6 | 0280c9cdab7763ef6710e8f7ed6e94740dfda374 | /visualization/Python_Vis/lr_scheduler.py | 602c362be38f2bf6aa4c346a1a52807470e86ccb | [] | no_license | Sandy-Zeng/RandomLR | 73181bbd6d946d1163b7b337524b070285b61f20 | ec0f2ff7bf312c015e54b39815d140b8855ae145 | refs/heads/master | 2020-04-12T08:00:19.208337 | 2019-08-25T14:29:28 | 2019-08-25T14:29:28 | 162,378,477 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,490 | py | import numpy as np
import time
import math
from keras.callbacks import *
def U(tmp_lr,random_range):
np.random.seed(int(time.time()))
tmp_lr = np.random.random() * tmp_lr * random_range
# tmp_lr = tmp_lr + tmp_lr * np.random.random()
return tmp_lr
def UA(tmp_lr,random_range):
np.random.seed(int(time.time()))
tmp_lr = tmp_lr + tmp_lr * np.random.random() * random_range
return tmp_lr
def N(tmp_lr, mu=4, sigma=1):
np.random.seed(int(time.time()))
tmp_lr_factor = np.random.normal(mu, sigma)
tmp_lr_factor = abs(tmp_lr_factor) * tmp_lr
tmp_lr = tmp_lr + tmp_lr_factor
return tmp_lr
class StepDecay(Callback):
def __init__(self,epochs=200,init_lr=1e-3,distribution_method='N',random_potion=0.3,random_range=10):
super(StepDecay, self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_potion = random_potion
self.random_range = random_range
self.count_down = 19
self.count = 0
self.random_lr = init_lr
self.last_lr = init_lr
self.beta = 0.5
def lr_schedule(self,epoch):
#Learning Rate Schedule
lr = self.linear_init_lr
left = 0
right = self.epochs * 0.4
if epoch > self.epochs * 0.9:
lr *= 0.5e-3
left = self.epochs * 0.9
right = self.epochs
elif epoch > self.epochs * 0.8:
lr *= 1e-3
left = self.epochs * 0.8
right = self.epochs * 0.9
elif epoch > self.epochs * 0.6:
lr *= 1e-2
left = self.epochs * 0.6
right = self.epochs * 0.8
elif epoch > self.epochs * 0.4:
lr *= 1e-1
left = self.epochs * 0.4
right = self.epochs * 0.6
if epoch == self.epochs * 0.9+1:
self.last_lr = self.linear_init_lr * 0.5e-3
elif epoch == self.epochs * 0.8+1:
self.last_lr = self.linear_init_lr * 1e-3
elif epoch == self.epochs * 0.6+1:
self.last_lr = self.linear_init_lr * 1e-2
elif epoch == self.epochs * 0.4+1:
self.last_lr = self.linear_init_lr * 1e-1
bounder = left + int((right - left) * self.random_potion)
if epoch < bounder:
print('Bounder:', bounder)
if self.distribution_method == 'U':
# if (epoch - left) < ((right - left)*(self.random_potion/2)):
# adaptive_range = (epoch-left)/float((right - left) * (self.random_potion)/2) * self.random_range + 0.1
# lr = U(lr,adaptive_range)
# else:
# lr = U(lr,self.random_range+0.1)
# adaptive_range = (right - epoch) / float(
# (right - left)) * self.random_range + 0.1
# lr = U(lr, adaptive_range)
lr = U(lr, self.random_range)
# lr = (lr + self.last_lr)/2
lr = self.beta * self.last_lr + (1-self.beta)*lr
self.last_lr = lr
if self.distribution_method == 'UC':
if self.count == 0:
lr = U(lr,self.random_range)
self.random_lr = lr
self.count = self.count_down
else:
lr = self.random_lr
self.count -= 1
if self.distribution_method == 'N':
lr = N(tmp_lr=lr,mu=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
print('Learning rate: ', lr)
return lr
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.lr_schedule(epoch=epoch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
class StepDecayPost(Callback):
def __init__(self, epochs=200, init_lr=1e-3, distribution_method='N', random_portion=0.3, random_range=10):
super(StepDecayPost, self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_portion = random_portion
self.random_range = random_range
self.count_down = 19
self.count = 0
self.random_lr = init_lr
def lr_schedule(self,epoch):
#Learning Rate Schedule
lr = self.linear_init_lr
left = 0
right = self.epochs * 0.4
if epoch > self.epochs * 0.9:
lr *= 0.5e-3
left = self.epochs * 0.9
right = self.epochs
elif epoch > self.epochs * 0.8:
lr *= 1e-3
left = self.epochs * 0.8
right = self.epochs * 0.9
elif epoch > self.epochs * 0.6:
lr *= 1e-2
left = self.epochs * 0.6
right = self.epochs * 0.8
elif epoch > self.epochs * 0.4:
lr *= 1e-1
left = self.epochs * 0.4
right = self.epochs * 0.6
bounder = left + int((right - left) * self.random_portion)
if epoch < bounder and epoch>self.epochs*0.4:
print('Bounder:', bounder)
if self.distribution_method == 'U':
lr = U(lr, self.random_range)
if self.distribution_method == 'UA':
lr = UA(lr,self.random_range)
if self.distribution_method == 'UC':
if self.count == 0:
lr = U(lr,self.random_range)
self.random_lr = lr
self.count = self.count_down
else:
lr = self.random_lr
self.count -= 1
if self.distribution_method == 'N':
lr = N(lr)
elif self.distribution_method == 'Base':
lr = lr
print('Learning rate: ', lr)
return lr
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = self.lr_schedule(epoch=epoch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
class BatchRLR(Callback):
def __init__(self,epochs=200,init_lr=1e-3,distribution_method='N',random_potion=0.3,random_range=10):
super(BatchRLR, self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_potion = random_potion
self.random_range = random_range
self.count_down = 19
self.count = 0
self.last_lr = init_lr
self.beta = 0.7
self.base_lr = init_lr
def lr_schedule(self,batch):
#Learning Rate Schedule
lr = self.base_lr
if self.distribution_method == 'U':
lr = U(lr, self.random_range)
lr = self.beta * self.last_lr + (1-self.beta) * lr
if self.distribution_method == 'N':
lr = N(lr,random_range=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
return lr
def on_batch_begin(self, batch, logs=None):
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.lr_schedule(batch=batch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_begin(self, epoch, logs=None):
if epoch > self.epochs * 0.9:
self.base_lr *= 0.5e-3
elif epoch > self.epochs * 0.8:
self.base_lr *= 1e-3
elif epoch > self.epochs * 0.6:
self.base_lr *= 1e-2
elif epoch > self.epochs * 0.4:
self.base_lr *= 1e-1
if epoch == self.epochs * 0.9 + 1:
self.last_lr = self.linear_init_lr * 0.5e-3
elif epoch == self.epochs * 0.8 + 1:
self.last_lr = self.linear_init_lr * 1e-3
elif epoch == self.epochs * 0.6 + 1:
self.last_lr = self.linear_init_lr * 1e-2
elif epoch == self.epochs * 0.4 + 1:
self.last_lr = self.linear_init_lr * 1e-1
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
class Constant(Callback):
def __init__(self,epochs=200,init_lr=1e-3,distribution_method='N',random_potion=0.3,random_range=10):
super(Constant, self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_potion = random_potion
self.random_range = random_range
def lr_schedule(self,epoch):
#Learning Rate Schedule
lr = self.linear_init_lr
left = 0
right = self.epochs
bounder = left + int((right - left) * self.random_potion)
if epoch < bounder:
print('Bounder:', bounder)
if self.distribution_method == 'U':
lr = U(lr,self.random_range)
if self.distribution_method == 'N':
lr = N(lr,mu=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
print('Learning rate: ', lr)
return lr
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.lr_schedule(epoch=epoch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
class DenseNetSchedule(Callback):
def __init__(self,epochs=300,init_lr=1e-3,distribution_method='N',random_range=10,random_potion=0.3):
super(DenseNetSchedule,self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_range = random_range
self.random_potion = random_potion
def lr_schedule(self,epoch):
# Learning Rate Schedule
lr = self.linear_init_lr
left = 0
right = self.epochs * 0.5
if epoch >= self.epochs * 0.75:
lr *= 1e-2
left = self.epochs * 0.75
right = self.epochs
elif epoch >= self.epochs * 0.5:
lr *= 1e-1
left = self.epochs * 0.5
right = self.epochs * 0.75
bounder = left + int((right - left) * self.random_potion)
if epoch < bounder and epoch>= self.epochs*0.5:
print('Bounder:', bounder)
if self.distribution_method == 'U':
lr = U(lr, self.random_range)
if self.distribution_method == 'N':
lr = N(lr, mu=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
print('Learning rate: ', lr)
return lr
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
# lr = float(K.get_value(self.model.optimizer.lr))
lr = self.lr_schedule(epoch=epoch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
class Warm_Start_Scheduler(Callback):
def __init__(self,init_lr=1e-3,Te=10,multFac=2,distribution_method='N',random_range=10,random_potion=0.5,epochs=200):
super(Warm_Start_Scheduler,self).__init__()
self.Te = Te
self.tt = 0
self.t0 = math.pi / 2.0
self.TeNext = Te
self.multFactor = multFac
self.init_lr = init_lr
self.distribution_method = distribution_method
self.random_range = random_range
self.random_potion = random_potion
self.epochs = epochs
self.iscycle = True
self.last_lr = init_lr
def lr_schedule(self,epoch):
def WRSGN(epoch, tmp_lr):
dt = 2.0 * math.pi / float(2.0 * self.Te)
self.tt = self.tt + float(dt)
if self.tt >= math.pi:
self.tt = self.tt - math.pi
curT = self.t0 + self.tt
new_lr = tmp_lr * (1.0 + math.sin(curT)) / 2.0 # lr_min = 0, lr_max = lr
if (epoch + 1 == self.TeNext): # time to restart
self.tt = 0 # by setting to 0 we set lr to lr_max, see above
self.Te = self.Te * self.multFactor # change the period of restarts
self.TeNext = self.TeNext + self.Te # note the next restart's epoch
if self.TeNext > self.epochs:
self.iscycle = False
self.last_lr = new_lr
return new_lr
lr = self.init_lr
if self.iscycle:
lr = WRSGN(epoch, lr)
else:
lr = self.last_lr
if epoch < self.epochs * self.random_potion and epoch>80 and epoch<130:
if self.distribution_method == 'U':
lr = U(lr, self.random_range)
if self.distribution_method == 'N':
lr = N(lr, mu=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
print('Learning rate: ', lr)
return lr
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.lr_schedule(epoch=epoch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
class Exp(Callback):
def __init__(self,epochs=200,init_lr=1e-3,decay_rate=0.96,decay_step=1000,distribution_method='N',random_potion=0.3,random_range=10):
super(Exp,self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_potion = random_potion
self.random_range = random_range
self.decay_rate = decay_rate
self.global_step = 0.
self.decay_step = decay_step
self.history = {}
self.israndom = False
def lr_schedule(self):
lr = self.linear_init_lr
lr = lr * math.pow(self.decay_rate,math.floor(self.global_step/ self.decay_step))
if self.israndom == True:
if self.distribution_method == 'U':
lr = U(lr, self.random_range)
if self.distribution_method == 'N':
lr = N(lr, mu=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
# print('Learning rate: ', lr)
return lr
def on_train_begin(self, logs={}):
logs = logs or {}
print(self.global_step)
if self.global_step == 0:
print(self.linear_init_lr)
K.set_value(self.model.optimizer.lr, self.linear_init_lr)
else:
K.set_value(self.model.optimizer.lr, self.lr_schedule())
def on_batch_end(self, epoch, logs=None):
# lr = float(K.get_value(self.model.optimizer.lr))
logs = logs or {}
self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
self.history.setdefault('iterations', []).append(self.global_step)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
self.global_step = self.global_step + 1
lr = self.lr_schedule()
K.set_value(self.model.optimizer.lr, lr)
# def on_epoch_end(self, epoch, logs=None):
# logs = logs or {}
# logs['lr'] = K.get_value(self.model.optimizer.lr)
def on_epoch_begin(self, epoch, logs=None):
logs = logs or {}
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = float(K.get_value(self.model.optimizer.lr))
logs['lr'] = lr
print('Learning Rate:',lr)
if epoch > 80 and epoch<130:
self.israndom = True
else:
self.israndom = False
class RetinaSchedule(Callback):
def __init__(self,epochs=150,init_lr=1e-1,distribution_method='N',random_range=10):
super(RetinaSchedule,self).__init__()
self.epochs = epochs
self.linear_init_lr = init_lr
self.distribution_method = distribution_method
self.random_range = random_range
def lr_schedule(self,epoch):
# Learning Rate Schedule
lr = self.linear_init_lr
if epoch > 140:
lr *= 1e-2
elif epoch > 120:
lr *= 1e-1
if epoch>120:
if self.distribution_method == 'U':
lr = U(lr, self.random_range)
if self.distribution_method == 'N':
lr = N(lr, mu=self.random_range)
elif self.distribution_method == 'Base':
lr = lr
print('Learning rate: ', lr)
return lr
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
# lr = float(K.get_value(self.model.optimizer.lr))
lr = self.lr_schedule(epoch=epoch)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
| [
"[email protected]"
] | |
f6b28e44289b1bbd336d17842f7005fa46b1940f | 0033cac5a4a233b00230912f2efd4a6b7971a5ea | /restapi/config/urls.py | fa6a19e7a44d4c4613ce2ed5ff6f0223c6d32991 | [] | no_license | RamyaSaba/restapi | c2460008bdd7018c99eb32195b9bd86bd75fe0fa | 594f99532746fdde84e1d132eaea4f68fe95134c | refs/heads/main | 2023-07-29T03:52:38.114933 | 2021-09-07T11:18:35 | 2021-09-07T11:18:35 | 403,935,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include('api.urls')),
]
| [
"[email protected]"
] | |
087c4004b3983645a25c445cf140502e90ea48cb | 5bd28b96831fe60aced347d5b6c2de71689fcfd7 | /CENG114_HW2_250201073/250201073_HW2.py | e6c79d688140a77e1cb883f234eaae659d79383f | [] | no_license | kturk/Probability-and-Statistics-Assignments | 11e0053c1a8c6f9a9211b61c64a10a5313b9e707 | 95d0e29d442ae76e0abb341b645fc14d90ee1cdf | refs/heads/master | 2022-10-21T23:50:25.778275 | 2020-06-12T08:40:02 | 2020-06-12T08:40:02 | 271,749,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,197 | py | """
ID = 250201073
"""
import numpy as np
from matplotlib import pyplot as plt
# Part a (Inverse Transform Method)
U = []
Xa = []
av_Xa = []
vr_Xa = []
counterA = 0
varianceSumA = 0
# Populate the given arrays.
while counterA < 50000:
u = np.random.rand()
U.append(u)
x = u ** (1/2)
Xa.append(x)
if len(av_Xa) == 0:
av_Xa.append(x) # If list is empty average = first number
else:
av_Xa.append((av_Xa[counterA-1] * len(av_Xa) + x) / (len(av_Xa) + 1) ) # Calculating new average and adding it to the list
counterA = counterA + 1
for i in range(len(Xa)):
varianceSumA = varianceSumA + ((Xa[i] - av_Xa[i]) ** 2)
vr_Xa.append(varianceSumA / (i+1)) # Adding the variance to the list
# Inspect the following plots.
plt.figure()
for i in range(len(Xa)):
plt.plot([Xa[i],U[i]],[1,1.2])
plt.figure()
hU = plt.hist(U,100,alpha=0.5,normed=True)
hXa = plt.hist(Xa,100,alpha=0.5,normed=True)
plt.figure()
plt.plot(np.cumsum(hU[0]))
plt.plot(np.cumsum(hXa[0]))
# Plot the average and variance values.
plt.figure()
plt.plot(av_Xa)
plt.title("Figure 4")
plt.figure()
plt.plot(vr_Xa)
plt.title("Figure 5")
# Part b (Rejection Method)
Xb = []
av_Xb = []
vr_Xb = []
counterB = 0
varianceSumB = 0
pdfX = 0
# Populate the given arrays.
while counterB < 50000:
xB = np.random.rand()
y = np.random.rand()
pdfX = xB * 2
if 2 * y <= pdfX: # Accepting the value
Xb.append(xB)
if len(av_Xb) == 0:
av_Xb.append(xB) # If list is empty average = first number
else:
av_Xb.append((av_Xb[counterB-1] * len(av_Xb) + xB) / (len(av_Xb) + 1) ) # Calculating new average and adding it to the list
counterB = counterB + 1
for i in range(len(Xb)):
varianceSumB = varianceSumB + ((Xb[i] - av_Xb[i]) ** 2)
vr_Xb.append(varianceSumB / (i+1)) # Adding the variance to the list
# Inspect the following plots.
plt.figure()
hXb = plt.hist(Xb,100,normed=True)
plt.figure()
plt.plot(np.cumsum(hXb[0]))
# Plot the average and variance values.
plt.figure()
plt.plot(av_Xb)
plt.title("Figure 8")
plt.figure()
plt.plot(vr_Xb)
plt.title("Figure 9")
| [
"[email protected]"
] | |
0b086d7aa26f41565a6e64ed16dc3a0147df5f7b | 9e8ee26fdab9313df81a22dae700f8417ed60722 | /slideshow.py | ad1248130c4428f50a6a64d5acbd7038a07f9826 | [] | no_license | neelneelpurk/slideshow | fe80b812225f8f51f3bc7006b12a94f9834b03d0 | 5248134943fed11349184a008fef37c1e0baaedc | refs/heads/master | 2021-01-19T13:37:30.582857 | 2017-02-18T18:50:09 | 2017-02-18T18:50:09 | 82,403,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | import numpy as np
import cv2
import imutils
import glob
print "Give your file location:"
string = str(raw_input())
images = glob.glob(string + '*.jpg')
ch_img = cv2.imread(images[0])
ch_img = imutils.resize(ch_img, width=640, height = 540)
for fn in images:
img = cv2.imread(fn)
gray = imutils.resize(img, width=640, height = 540)
for i in range(10) :
j = i/(10.0)
dst = cv2.addWeighted(gray,j,ch_img,(1-j),0)
cv2.imshow('Slideshow',dst)
if cv2.waitKey(150) & 0xFF == ord('q'):
break
ch_img = cv2.imread(fn)
ch_img = imutils.resize(ch_img, width=640, height = 540)
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
637a199f53ea8c73496a92bc9bfdb4cf51269691 | ef59fd73399600e4997ff058c69ef0fc1bebecf5 | /buttonTime.py | 00166d1cabda6b65a24e590d181e3187b402fd9b | [] | no_license | jimTheSTEAMClown/Python-Code | cfae6d5e1fde5229d7bca7cbd9ef5973845c24e1 | e7bde6b09b951d24d66ad09a305b9a8d240f1d45 | refs/heads/master | 2022-05-16T18:20:49.150597 | 2022-05-02T00:51:55 | 2022-05-02T00:51:55 | 80,686,095 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,611 | py | # Button & Time
# 3.3v = 1,17, 5.0v =2,4 GND = 6,9,14,20,25,30,34,39
# I/O = 3,5,7,8,10,11,12,13,15,16,18,19,21,22,23,24,
# More I/O =26,27,28,29,31,32,33,35,36,37,38,40
import RPi.GPIO as GPIO
import time
from time import sleep
GPIO.setmode(GPIO.BOARD)
timeButton = 18
gotTimeLED = 5
GPIO.setup(gotTimeLED, GPIO.OUT)
GPIO.setup(timeButton, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# Your Code Here
# init states
timeButtonState = False
print('starting to check Button pressed and print time in millseconds')
startTimeMilliSeconds = int(round(time.time() * 1000))
print('Start time = ',startTimeMilliSeconds)
# Infinete Loop
while True:
# reset Button check
print('checking if Time button is pushed')
while timeButtonState == False:
timeButtonState = GPIO.input(timeButton)
#print(resetButtonState)
if timeButtonState == True:
print('Time Button Pressed')
# Ask or the current time in Milliseconds
currentMilliSeconds = int(round(time.time() * 1000))
print('Button Pusshed at ',currentMilliSeconds)
timeDifference = currentMilliSeconds - startTimeMilliSeconds
print('Start to Button Pusshed difference = ',timeDifference)
if timeDifference > 10000 :
print('----------------- Times up ---------------')
print('starting to check Button pressed and print time in millseconds')
startTimeMilliSeconds = int(round(time.time() * 1000))
print('Start time = ',startTimeMilliSeconds)
sleep(.05)
timeButtonState = False
| [
"[email protected]"
] | |
d835a6c951beb5c578bf0721b074f492301e078a | 747a43e9e8e69f870d8d693214a89f4da6859176 | /examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/dataloader/ensemble_dataset.py | 72e0b8c58e739ffd226d7b02c1d7d81eb35c1e55 | [
"MIT",
"Apache-2.0"
] | permissive | AprLie/ogb | 32a8ae331e8ebfa287b81015d88ab996b6ddb9b3 | 7e4f25bbc013e76c8f04990e1d9d659a67f5f491 | refs/heads/master | 2023-06-04T14:05:55.669928 | 2021-06-16T02:30:51 | 2021-06-16T02:30:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,277 | py | from ogb.lsc import WikiKG90MDataset
from .KGDataset import KGDataset
import numpy as np
import os.path as osp
class WikiKG90MDatasetEnsemble(WikiKG90MDataset):
def __init__(self, root: str = 'dataset'):
super(WikiKG90MDatasetEnsemble, self).__init__(root)
self._other_entity_feat = None
self._other_nfeat_valid = None
self._other_nfeat_test = None
self._train_val_hrt = None
self._train_fewer_hrt = None
self._train_upsample_hrt = None
self._train_hrt = np.concatenate((self._train_hrt, np.load(osp.join(self.processed_dir, 'trian_val_topk_add_h.npy'))))
@property
def train_val_hrt(self) -> np.ndarray:
'''
'''
if self._train_val_hrt is None:
path2 = osp.join(self.processed_dir, 'val_hrt_wyk.npy')
path3 = osp.join(self.processed_dir, 'upsample_on_val_wyk.npy')
self._train_val_hrt = np.concatenate((self._train_hrt, np.load(path2), np.load(path3)))
print("Training dataset with validation have %d samples" % self._train_val_hrt.shape[0])
return self._train_val_hrt
@property
def train_upsample_hrt(self) -> np.ndarray:
'''
using upsample train data for training
'''
if self._train_upsample_hrt is None:
self._train_upsample_hrt = self._train_hrt
print("Training dataset with filter have %d samples" % self._train_upsample_hrt.shape[0])
return self._train_upsample_hrt
@property
def num_feat_dims(self) -> int:
'''
Dimensionality of relation and entity features obtained by roberta
'''
return 200
@property
def entity_feat(self) -> np.ndarray:
'''
Entity feature
- np.ndarray of shape (num_entities, num_feat_dims)
i-th row stores the feature of i-th entity
* Loading everything into memory at once
* saved in np.float16
'''
if self._entity_feat is None:
path = osp.join(self.processed_dir, 'entity_feat.npy')
self._entity_feat = np.load(path, mmap_mode='r')
return self._entity_feat
@property
def other_entity_feat(self) -> np.ndarray:
if self._other_entity_feat is None:
path = osp.join(self.processed_dir, 'entity_feat.npy')
self._other_entity_feat = np.load(path, mmap_mode='r')
return self._other_entity_feat
@property
def other_nfeat_valid(self) -> np.ndarray:
if self._other_nfeat_valid is None:
path = osp.join(self.processed_dir, 'val_cand_occur_feat2.npy')
self._other_nfeat_valid = np.load(path, mmap_mode='r')
return self._other_nfeat_valid
@property
def other_nfeat_test(self) -> np.ndarray:
if self._other_nfeat_test is None:
path = osp.join(self.processed_dir, 'test_cand_occur_feat.npy')
self._other_nfeat_test = np.load(path, mmap_mode='r')
return self._other_nfeat_test
@property
def other_nfeat_train(self) -> np.ndarray:
if self._other_nfeat_test is None:
path = osp.join(self.processed_dir, 'train_cand_occur_feat.npy')
self._other_nfeat_test = np.load(path, mmap_mode='r')
return self._other_nfeat_test
@property
def all_entity_feat(self) -> np.ndarray:
if self._all_entity_feat is None:
path = osp.join(self.original_root, 'entity_feat.npy')
self._all_entity_feat = np.load(path)
return self._all_entity_feat
class WikiKG90MDatasetEnsembleTrainNFeat(WikiKG90MDataset):
def __init__(self, root: str = 'dataset'):
super(WikiKG90MDatasetEnsembleTrainNFeat, self).__init__(root)
self._other_entity_feat = None
self._other_nfeat_valid = None
self._other_nfeat_test = None
@property
def num_feat_dims(self) -> int:
'''
Dimensionality of relation and entity features obtained by roberta
'''
return 200
@property
def entity_feat(self) -> np.ndarray:
'''
Entity feature
- np.ndarray of shape (num_entities, num_feat_dims)
i-th row stores the feature of i-th entity
* Loading everything into memory at once
* saved in np.float16
'''
if self._entity_feat is None:
path = osp.join(self.processed_dir, 'entity_feat.npy')
self._entity_feat = np.load(path, mmap_mode='r')
return self._entity_feat
@property
def other_entity_feat(self) -> np.ndarray:
if self._other_entity_feat is None:
path = osp.join(self.processed_dir, 'entity_feat.npy')
self._other_entity_feat = np.load(path, mmap_mode='r')
return self._other_entity_feat
@property
def other_nfeat_valid(self) -> np.ndarray:
if self._other_nfeat_valid is None:
path = osp.join(self.processed_dir, 'valid_nfeat.npy')
self._other_nfeat_valid = np.load(path, mmap_mode='r')
return self._other_nfeat_valid
@property
def other_nfeat_test(self) -> np.ndarray:
if self._other_nfeat_test is None:
path = osp.join(self.processed_dir, 'test_nfeat.npy')
self._other_nfeat_test = np.load(path, mmap_mode='r')
return self._other_nfeat_test
@property
def other_nfeat_train(self) -> np.ndarray:
if self._other_nfeat_test is None:
path = osp.join(self.processed_dir, 'train_nfeat.npy')
self._other_nfeat_test = np.load(path, mmap_mode='r')
return self._other_nfeat_test
@property
def all_entity_feat(self) -> np.ndarray:
if self._all_entity_feat is None:
path = osp.join(self.original_root, 'entity_feat.npy')
self._all_entity_feat = np.load(path)
return self._all_entity_feat
class KGDatasetWikiEnsembleNFeat(KGDataset):
'''Load a knowledge graph FB15k
The FB15k dataset has five files:
* entities.dict stores the mapping between entity Id and entity name.
* relations.dict stores the mapping between relation Id and relation name.
* train.txt stores the triples in the training set.
* valid.txt stores the triples in the validation set.
* test.txt stores the triples in the test set.
The mapping between entity (relation) name and entity (relation) Id is stored as 'name\tid'.
The triples are stored as 'head_nid\trelation_id\ttail_nid'.
'''
def __init__(self, sys_args, name='wikikg90m'):
self.name = name
path = "/disk4/ogb/link_level/dataset/"
self.dataset = WikiKG90MDatasetEnsembleTrainNFeat(path)
self.train = self.dataset.train_hrt.T
self.n_entities = self.dataset.num_entities
self.n_relations = self.dataset.num_relations
self.valid = None
self.test = None
self.valid_dict = self.dataset.valid_dict
self.test_dict = self.dataset.test_dict
self.entity_feat = self.dataset.entity_feat
self.relation_feat = self.dataset.relation_feat
# self.other_entity_feat_train = self.dataset.other_entity_feat_train
self.other_nfeat_train = self.dataset.other_nfeat_train
self.other_nfeat_valid = self.dataset.other_nfeat_valid
self.other_nfeat_test = self.dataset.other_nfeat_test
print(f'sys_args.use_valid_nfeat: {sys_args.use_valid_nfeat}, sys_args.train_mode: {sys_args.train_mode}')
self.other_nfeat_train = self.dataset.other_nfeat_train
self.other_nfeat_valid = self.dataset.other_nfeat_valid
self.other_nfeat_test = self.dataset.other_nfeat_test
if 't,r->h' in self.valid_dict:
del self.valid_dict['t,r->h']
if 't,r->h' in self.test_dict:
del self.valid_dict['t,r->h']
@property
def emap_fname(self):
return None
@property
def rmap_fname(self):
return None
class KGDatasetWikiEnsemble(KGDataset):
'''Load a knowledge graph FB15k
The FB15k dataset has five files:
* entities.dict stores the mapping between entity Id and entity name.
* relations.dict stores the mapping between relation Id and relation name.
* train.txt stores the triples in the training set.
* valid.txt stores the triples in the validation set.
* test.txt stores the triples in the test set.
The mapping between entity (relation) name and entity (relation) Id is stored as 'name\tid'.
The triples are stored as 'head_nid\trelation_id\ttail_nid'.
'''
def __init__(self, sys_args, name='wikikg90m'):
self.name = name
path = "/disk4/ogb/link_level/dataset/"
self.dataset = WikiKG90MDatasetEnsemble(path)
if sys_args.train_with_val:
self.train = self.dataset.train_val_hrt.T
elif sys_args.train_upsample:
self.train = self.dataset.train_upsample_hrt.T
else:
self.train = self.dataset.train_hrt.T
self.n_entities = self.dataset.num_entities
self.n_relations = self.dataset.num_relations
self.valid = None
self.test = None
self.valid_dict = self.dataset.valid_dict
self.test_dict = self.dataset.test_dict
self.entity_feat = self.dataset.entity_feat
self.relation_feat = self.dataset.relation_feat
self.other_entity_feat = self.dataset.other_entity_feat
print(f'sys_args.use_valid_nfeat: {sys_args.use_valid_nfeat}, sys_args.train_mode: {sys_args.train_mode}')
if sys_args.use_valid_nfeat:
if sys_args.train_mode == 'valid':
print('use features on validation')
self.other_nfeat_valid = self.dataset.other_nfeat_valid
else:
print('use features on test')
self.other_nfeat_valid = self.dataset.other_nfeat_test
else:
self.other_nfeat_valid = None
if 't,r->h' in self.valid_dict:
del self.valid_dict['t,r->h']
if 't,r->h' in self.test_dict:
del self.valid_dict['t,r->h']
@property
def emap_fname(self):
return None
@property
def rmap_fname(self):
return None
| [
"[email protected]"
] | |
17a9404fe58bde1a11ae138c1925ab4fe91f325f | 002010b7cf7bf0c674c081f5d86b84dc67122048 | /sales/migrations/0015_installationpaymentreceipt_pointofsalesreceipt.py | c10fc9d16e8468e79b6d0706eb21a5f88a6fed6e | [] | no_license | Afotey-AntyGravity/Receipt-number | 824e2f2099cb458aaf54ad25c973849bed7543eb | 1f33694d9bdfe2bbdd1e0fc68af37dbf71708f3f | refs/heads/main | 2023-05-27T11:14:02.960247 | 2021-06-10T00:46:10 | 2021-06-10T00:46:10 | 375,331,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,439 | py | # Generated by Django 3.1.3 on 2021-04-22 19:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tools', '0007_tax'),
('sales', '0014_auto_20210422_1741'),
]
operations = [
migrations.CreateModel(
name='PointofSalesReceipt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reference_Number', models.CharField(max_length=200, null=True)),
('date', models.DateTimeField(auto_now_add=True)),
('discount_Rate', models.FloatField(null=True)),
('unit_Price', models.FloatField(null=True)),
('quantity', models.PositiveIntegerField(default=0)),
('goods_Pending', models.BooleanField()),
('currency', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.currency')),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.customer')),
('material_Color', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.materialcolourinformation')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.product')),
('sales_Officer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.salesmaninformation')),
],
),
migrations.CreateModel(
name='InstallationPaymentReceipt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reference_Number', models.CharField(max_length=200, null=True)),
('installation_Amount', models.FloatField(null=True)),
('amount_Paid', models.FloatField(null=True)),
('exfactory_Amount', models.FloatField(null=True)),
('PFI_Number', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='sales.proformareceipt')),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.customer')),
],
),
]
| [
"[email protected]"
] | |
2e18eb6be957873c745d543ad85b0034148f4db7 | a305d9a74d2332fdfaacc1d041a5ec8a22dae2db | /square.py | ab50b0bb35101b31be278505919126ef1b588e57 | [] | no_license | CodArtist/Fractals | d340fd124ec70b12f3dc63f93d086614278869c6 | 2c1e98db3414c8fb7830efa88973de43a49f6315 | refs/heads/main | 2022-12-27T00:36:58.890657 | 2020-10-12T05:15:31 | 2020-10-12T05:15:31 | 303,283,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | import turtle
bob =turtle.Turtle()
bob.speed(100000)
bob.penup()
bob.forward(-150)
bob.pendown()
color = ["green","blue","red"]
i=0
def star(turtle,size,col):
if size <=10:
return
else:
turtle.fillcolor(col)
turtle.begin_fill()
for i in range(4):
turtle.forward(size)
star(bob,size/2,col)
turtle.left(90)
turtle.end_fill()
star(bob,250,color[0])
turtle.done() | [
"[email protected]"
] | |
98f212d0387b519b346a5a4365cc5f62ecaf13bd | 4596e3f0097723e402c8fe21933f016d99cdb08b | /two_strings_equal.py | ae0844187de26d825498e9b5244b34bb1d530c1c | [] | no_license | Harshavardhanteja7/Python-Assignments | 3b0af25d088e42c24407be53ccd78064df5244db | cae8657618ea44ff62268e315e26adb655c9cbbf | refs/heads/master | 2022-11-09T06:19:37.462857 | 2020-06-27T17:25:30 | 2020-06-27T17:25:30 | 273,303,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | # -*- coding: utf-8 -*-
"""two_strings_equal.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1tWdMpgZwyMC_Sbp5iSaa9SRuARW-oXWx
"""
str_1=str(input("Enter the 1st string: "))
str_2=str(input("Enter the 2nd string: "))
if str_1==str_2:
print("both strings are equal")
else:
print("both strings are not equal") | [
"[email protected]"
] | |
0a46787003d24c0133f2ba04614a8d583391bd69 | 419c703dd00a6f2219a8f81408364d7f4fa9e3db | /cgi-bin/model.py | 52d5878dce25f269db670d1cac2e35f35a2b0963 | [] | no_license | borhanreo/digit_predict | 3897663f1a2689e915551c94194592baade81ec4 | 81a322b39c60e9793c3df1f857112651b2eb5f5e | refs/heads/master | 2020-04-20T06:26:53.609897 | 2019-02-01T11:10:03 | 2019-02-01T11:10:03 | 168,684,380 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | """
Define Convolutional Nerual Network model for MNIST input
"""
from tflearn import DNN
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
# Define model
model = DNN(network, tensorboard_verbose=0)
| [
"[email protected]"
] | |
63050037ff16f231d2e413b0f7308febc154a77d | ddbb862a813d28154547f46bf3f9af9297e355f7 | /Monte Carlo1.1.py | 39f1e4ee233fdf5e16fbaff8e7998b2b5a73bd50 | [] | no_license | YizhuoLu/EE511project4 | 23aa001f18ec63ed3762d843eed4a9437769ba15 | 3761e43418399d513afac53e690628cd6e20fc07 | refs/heads/master | 2020-03-23T11:58:24.831547 | 2018-07-19T05:39:14 | 2018-07-19T05:39:14 | 141,529,251 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | import numpy as np
import matplotlib.pyplot as plt
import math
s = []
N = 100
def uniformSample(N):
for i in range(N):
x = np.random.uniform(0, 1)
y = np.random.uniform(0, 1)
s.append([x, y])
return s
z = np.array(uniformSample(N))
count = 0
for i in range(len(z)):
if math.sqrt(1-z[i,0]**2) >= z[i,1]:
count = count + 1
# print('The number of samples that fall within the quarter unit-circle is:', count)
area = count / N
print("The estimated area of the inscribed quarter circle is:", area)
pi = 4 * area
print('The estimated value of pi is:', pi)
fig = plt.figure(1)
ax = fig.add_subplot(1, 1, 1)
circ = plt.Circle((0, 0), radius=1, edgecolor='r', facecolor='white')
sca = plt.scatter(z[:, 0], z[:, 1], s=7, c='b')
ax.add_artist(circ)
ax.add_artist(sca)
plt.title('scatter plot of 100 uniform distributed samples')
plt.xlabel('X')
plt.ylabel('Y')
plt.show() | [
"[email protected]"
] | |
b90a0305484644a6728e50d68732ee9e6989bb14 | 478fad340a97fc14d365b95bbd6f8ac1dcc71953 | /121/Solution.py | d76a39e78ef9cadd8e4004cc32002f4a3d0d5986 | [] | no_license | sandyg05/leetcode | 93cca3b3ce4f38cf1ea1c6d3e8400d7b6b776c37 | e9d8036e2be6dbd1b8c958431e07dc35b88ebfa8 | refs/heads/master | 2022-07-16T10:03:59.529470 | 2020-05-13T05:35:49 | 2020-05-13T05:35:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | """
Say you have an array for which the ith element is the price of a given stock on day i.
If you were only permitted to complete at most one transaction (i.e., buy one and sell one share of the stock), design an algorithm to find the maximum profit.
Note that you cannot sell a stock before you buy one.
Example 1:
Input: [7,1,5,3,6,4]
Output: 5
Explanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.
Not 7-1 = 6, as selling price needs to be larger than buying price.
Example 2:
Input: [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
"""
class Solution:
def maxProfit(self, prices):
if not prices:
return 0
min_price = prices[0]
max_profit = 0
for num in prices:
if num < min_price:
min_price = num
if num - min_price > max_profit:
max_profit = num - min_price
return max_profit | [
"[email protected]"
] | |
a5075c05b906fd9b22238fdec92901e48a23a4c7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02817/s121273903.py | c74297c5df6c42af00d7dd1b1408fea1fb86e8a6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | x=list(input().split())
print(x[1]+x[0])
| [
"[email protected]"
] | |
04a2fa5b79e53367d1fa702e2c9297adc459942f | 16f9faf6665f5189a8561534bb4bd8b0951ba1aa | /codes/metrics/__init__.py | d2cda599af5afa1f5e55bab4d4b114afd37eab3e | [] | no_license | azuryl/LPTN | 4b36dba2a7f5b2bcc7dc35ac3734839054069ca2 | a1b2db50117a842abc1f44d805291032651014ab | refs/heads/main | 2023-07-01T02:59:17.916730 | 2021-08-12T19:49:46 | 2021-08-12T19:49:46 | 395,425,328 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from .psnr_ssim import calculate_psnr, calculate_ssim
__all__ = ['calculate_psnr', 'calculate_ssim']
| [
"[email protected]"
] | |
62de88d56a77477d8991a96a5087929d2d3d2770 | 55d6de252e61c4b60688ebd8b1f637807acc1e7c | /usl_recived_forigin_purchased/models/inharitstockpicking.py | 0eebab826a9be7c89947980bd5f2d26cbf056f25 | [] | no_license | mosadiqit/eerna_erp_uslbd | b707a1d49a4fce7c1543b63e0120e8f9b77b26ce | 73e3994a9e32df7809d244eb6592513162ab7853 | refs/heads/main | 2023-06-30T14:53:04.837197 | 2021-08-04T11:30:46 | 2021-08-04T11:30:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,242 | py | from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError
from odoo.osv.osv import osv
from odoo.tools.float_utils import float_compare, float_is_zero, float_round
class StockPickingInharit(models.Model):
_inherit = 'stock.picking'
@api.onchange('commercial_invoice')
def onchange_commercial_invoice(self):
if self.commercial_invoice:
move_id = self.env['account.move'].search([('id','=',self.commercial_invoice.id)])
move_line_id = self.env['account.move.line'].search([('move_id','=',move_id.id),('account_internal_type','=','other')])
for rec in self:
lines = list()
for line in move_line_id:
vals = {
'product_id':line.product_id.id,
'branch_id':self.env.user.branch_id.id,
'product_uom_qty':line.quantity,
'reserved_availability':0,
'quantity_done':0,
'name':line.name,
'product_uom':line.product_id.uom_id.id
}
lines.append((0,0,vals))
rec.move_ids_without_package = lines
print('Hello')
def button_validate(self):
self.ensure_one()
if not self.move_lines and not self.move_line_ids:
raise UserError(_('Please add some items to move.'))
# Clean-up the context key at validation to avoid forcing the creation of immediate
# transfers.
# for rec in self.move_line_ids_without_package.lot_id:
# stock_reserved_check = self.env['stock.quant'].search([('lot_id','=',rec.id),('location_id','=',self.location_id.id)])
# if stock_reserved_check.reserved_quantity == 0:
# print(rec)
ctx = dict(self.env.context)
ctx.pop('default_immediate_transfer', None)
self = self.with_context(ctx)
# add user as a follower
self.message_subscribe([self.env.user.partner_id.id])
# If no lots when needed, raise error
picking_type = self.picking_type_id
precision_digits = self.env['decimal.precision'].precision_get('Product Unit of Measure')
no_quantities_done = all(float_is_zero(move_line.qty_done, precision_digits=precision_digits) for move_line in self.move_line_ids.filtered(lambda m: m.state not in ('done', 'cancel')))
no_reserved_quantities = all(float_is_zero(move_line.product_qty, precision_rounding=move_line.product_uom_id.rounding) for move_line in self.move_line_ids)
if no_reserved_quantities and no_quantities_done:
raise UserError(_('You cannot validate a transfer if no quantites are reserved nor done. To force the transfer, switch in edit more and encode the done quantities.'))
if picking_type.use_create_lots or picking_type.use_existing_lots:
lines_to_check = self.move_line_ids
if not no_quantities_done:
lines_to_check = lines_to_check.filtered(
lambda line: float_compare(line.qty_done, 0,
precision_rounding=line.product_uom_id.rounding)
)
for line in lines_to_check:
product = line.product_id
if product and product.tracking != 'none':
if not line.lot_name and not line.lot_id:
raise UserError(_('You need to supply a Lot/Serial number for product %s.') % product.display_name)
# Propose to use the sms mechanism the first time a delivery
# picking is validated. Whatever the user's decision (use it or not),
# the method button_validate is called again (except if it's cancel),
# so the checks are made twice in that case, but the flow is not broken
sms_confirmation = self._check_sms_confirmation_popup()
if sms_confirmation:
return sms_confirmation
if no_quantities_done:
view = self.env.ref('stock.view_immediate_transfer')
wiz = self.env['stock.immediate.transfer'].create({'pick_ids': [(4, self.id)]})
return {
'name': _('Immediate Transfer?'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'stock.immediate.transfer',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'res_id': wiz.id,
'context': self.env.context,
}
if self._get_overprocessed_stock_moves() and not self._context.get('skip_overprocessed_check'):
view = self.env.ref('stock.view_overprocessed_transfer')
wiz = self.env['stock.overprocessed.transfer'].create({'picking_id': self.id})
return {
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'stock.overprocessed.transfer',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'res_id': wiz.id,
'context': self.env.context,
}
# Check backorder should check for other barcodes
if self._check_backorder():
return self.action_generate_backorder_wizard()
self.action_done()
return
@api.onchange('is_nonsalealewarehouse_transfar')
def select_nonsale_ale_stock(self):
"""
this method is used for transfar page when select lim transfar then it show only lim transfar
:return:
"""
self.branch_id = self.env.user.branch_id
if self.is_nonsalealewarehouse_transfar:
self.is_nonsalealewarehouse_transfar = True
print('come to condition is_nonsalealewarehouse_transfar')
warehouse = self.env['stock.warehouse'].sudo().search([('is_non_saleable_warehouse', '=', True),('company_id', '=',self.env.user.company_id.id)], limit=1)
print(warehouse.id)
picking_type = self.env['stock.picking.type'].sudo().search(
[('warehouse_id', '=', warehouse.id), ('sequence_code', '=', 'INT')])
print(picking_type)
print(picking_type.warehouse_id.name)
self.picking_type_id = picking_type.id
return {
'domain': {
'picking_type_id': [('warehouse_id', '=', warehouse.id), ('sequence_code', '=', 'INT')]
},
# 'default_picking_type_id': [('warehouse_id', '=', warehouse.id), ('sequence_code', '=', 'INT')]
# lambda self: self.env['stock.picking.type'].browse(self._context.get('default_picking_type_id')).default_location_src_id
}
else:
return {
'domain': {
'picking_type_id': []
}
}
# def _do_partial_func_unreserved(self):
# print('_do_partial_unreserved')
# @api.onchange('fpo_order_id')
# def fpo_fall_into(self):
# print('work')
is_nonsalealewarehouse_transfar = fields.Boolean(string='Lim transfar ', default=False)
commercial_invoice = fields.Many2one('account.move',domain=[('type','=','in_invoice')],string="Commercial Invoice")
def action_assign(self):
""" Check availability of picking moves.
This has the effect of changing the state and reserve quants on available moves, and may
also impact the state of the picking as it is computed based on move's states.
@return: True
"""
res = {}
self.filtered(lambda picking: picking.state == 'draft').action_confirm()
moves = self.mapped('move_lines').filtered(lambda move: move.state not in ('draft', 'cancel', 'done'))
if not moves:
raise UserError(_('Nothing to check the availability for.'))
# If a package level is done when confirmed its location can be different than where it will be reserved.
# So we remove the move lines created when confirmed to set quantity done to the new reserved ones.
package_level_done = self.mapped('package_level_ids').filtered(
lambda pl: pl.is_done and pl.state == 'confirmed')
package_level_done.write({'is_done': False})
is_raise_validation_error = moves._action_assign()
package_level_done.write({'is_done': True})
if is_raise_validation_error:
# message = 'product is no available '
# raise osv.except_osv(_('warning'), _(message))
# res['warning'] = {'title': _('Warning'), 'message': message}
# raise ValueError('product not available')
raise ValidationError('product is no available ')
return True
# fpo_order_id = fields.Many2one('foreign.purchase.order', string= 'Foreign purchase order ')
# @api.onchange('move_ids_without_package.product_uom_qty')
# # def test(self):
# # print('***********************')
# # print('***********************')
# # print('***********************')
| [
"[email protected]"
] | |
75f85c94fa15463111f270dbb6aaaac6ab4a7186 | 257564cbf0f0482428e029c9129b1fb3688aabab | /personal/views.py | 1c21b7e7f6eea830ff4e12c8b18c508be2462b4e | [] | no_license | ash018/mysite | d3c1516c66a27057b90911ec641ad0344edf25cd | 635872b7870baf6ac70415d0607eecbfe20c0fdf | refs/heads/master | 2020-09-15T22:41:26.750365 | 2016-09-23T08:48:15 | 2016-09-23T08:48:15 | 67,899,564 | 0 | 0 | null | 2016-09-10T23:36:16 | 2016-09-10T23:28:08 | Python | UTF-8 | Python | false | false | 311 | py | from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return render(request,'personal/home.html')
def contact(request):
return render(request,'personal/basic.html',{'content':['If you like to contact call me @ 01681355216 Or mail me @ [email protected]']})
| [
"[email protected]"
] | |
39382fd79e7090be05bd74fab26cc30b09251711 | fbaf479c2ebddeee35f548d516a7adade35f64d5 | /csp/finite_set.py | caf92867d0bea8e3448b42584307c58f363d593d | [] | no_license | modelcheckutp/CSP-Z3 | 7b93b30c4525acd3cbdbf1b628ef44990a3d1015 | 94fd3735c239209f54ab8ad7af6b57f0e5c66b56 | refs/heads/master | 2020-03-20T14:33:23.908921 | 2018-06-28T22:10:39 | 2018-06-28T22:10:39 | 137,487,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,439 | py | ##################################################################
# The finite set theory based on BitVec
# Kun Wei 17/05/2017
##################################################################
from z3 import *
class FSetDecl():
def __init__(self, l):
self.alphabet = l
self.size = len(l)
def declare(self, name):
return BitVec(name, self.size)
def union(self, s1, s2):
assert (s1.sort() == s2.sort())
return s1|s2
def intersection(self, s1, s2):
assert (s1.sort() == s2.sort())
return s1&s2
def complement(self, s):
return ~s
def difference(self, s1, s2):
assert (s1.sort() == s2.sort())
return self.intersection(s1, self.complement(s2))
def member(self, e, s):
index = self.alphabet.index(e)
be = BitVecVal(1, self.size)<<index
#print(be)
return (be & s)!= 0
def add(self, e, s):
index = self.alphabet.index(e)
be = BitVecVal(1, self.size) << index
#print(be)
return (be | s)
def emptyset(self):
return BitVecVal(0, self.size)
def fullset(self):
return ~BitVecVal(0, self.size)
def toElements(self, b):
s = []
be = BitVecVal(1,self.size)
for i in range(self.size):
t = simplify(b&(be<<i))
if not (t == 0):
s.append(self.alphabet[i])
return s
def toSet(self,l):
s = self.emptyset()
for i in range(len(l)):
s = self.add(l[i], s)
return s
# define a finite set sort
def FSetSort(l): # l is a list of all elements in the finite set
return BitVecSort(len(l))
### for testing
#Channel, (a,b,c,d) = EnumSort('Channel', ('a','b','c','d'))
#FSet = FSetDecl([a,b,c,d])
#print(simplify(FSet.toSet([a,b,c])))
#s1 = FSet.declare('s1')
#s2 = FSet.declare('s2')
#s = Solver()
#s.add(s1== FSet.add(b,FSet.add(a,FSet.emptyset())))
#s.add(s2== FSet.add(c,FSet.add(a,FSet.emptyset())))
#print(FSet.toElements(BitVecVal(14,4)))
#s.add(FSet.union(s1,s2) == FSet.add(c, FSet.add(b,FSet.add(a,FSet.emptyset()))))
#s.add(FSet.intersection(s1,s2) == FSet.add(a,FSet.emptyset()) )
#s.add(FSet.complement(s1) == FSet.add(c, FSet.add(d, FSet.emptyset())))
#s.add(FSet.difference(s1,s2) == FSet.add(b, FSet.emptyset()))
#print(s.check()) | [
"[email protected]"
] | |
c64bb122fa1b142b05e8315ac85b8ea4cec85786 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /gaussiana/ch3_2019_03_08_14_00_41_432668.py | 4bdc1e00e92765b8d5b29e95dceff6a7256f3781 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | import math
def calcula_gaussiana(x, mi, sigma):
if (sigma == 1 and x == 0 and mi == 0):
return 0
if (sigma == 0 or sigma == - math.sqrt(2*math.pi) or sigma == 1/math.sqrt(2*math.pi)):
return 0
return (1/sigma*math.sqrt(2*math.pi))**(-0.5((x - mi)/sigma)**2) | [
"[email protected]"
] | |
6f4373a988fbcd023ca39c1755c9d361c3e7daff | 2fd14347b7f43864d8153bd1c6d79198302d21ea | /ex.002 root finding/nr_problem_case.py | 3d33bede021e71d689a6e8c5cd4a3b1edf781a2e | [] | no_license | family9od/ECAre | 0fe27ff290eaa702c754fedef8953260a67592fc | ea875ea14be9d99a5e4f2191382e6eedc702b557 | refs/heads/master | 2020-06-17T02:33:30.651909 | 2016-11-15T07:45:31 | 2016-11-15T07:45:31 | 75,047,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # -*- coding: utf8 -*-
# 2010112033 이상형 9/20
"""
1변수 방정식의 근을 찾느 방법 중 Newton-Raphson method 를 사용하여
어떤 함수 g(x) 의 근을 찾고자 함
아래 예는 newton_raphson method 를 사용하기 곤란한 경우임
"""
# 1 변수 방정식의 근을 찾는 함수를 모아둔 rootfinding 모듈을 불러들임
import rootfinding as rf
def g(x):
# 근을 구하고자 하는 함수
return x ** 3 - 2 * x + 2
def dgdx(x):
# g(x) 의 x 에 대한 미분
return 3.0 * x ** 2.0 - 2.0
if "__main__" == __name__:
# 주어진 초기값에서 시작하여 g(x) = 0 인 x를 찾고자 함
# 생각보다 시간이 많이 걸릴 수 있음
x_nr = rf.newton(g, dgdx, 0)
print('x = %g, f(%g) = %g' % (x_nr, x_nr, g(x_nr)))
| [
"CAD Client"
] | CAD Client |
60683c2d38937f8deb20ebb916a8f5c41457bf7a | 1a597ec7f4a295e98aa231ad615dc5b03a17ef26 | /Regression/Random_Forest_Regression.py | ae87949dc47ed487ce7af60f70ba40ea46ca0218 | [] | no_license | GrismPatel/Machine_Learning_Python | 9039fdf946e2a24d6194f21b4308c38e381c2ec1 | f6e22600b052cffd00101a01f69127042005ef40 | refs/heads/master | 2021-01-20T15:54:31.055806 | 2018-01-30T01:47:40 | 2018-01-30T01:47:40 | 90,802,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Position_Salaries.csv')
x = dataset.iloc[:,1:2].values
y = dataset.iloc[:,2].values
from sklearn.ensemble import RandomForestRegressor
a = RandomForestRegressor(n_estimators = 300,random_state = 0)
a.fit(x,y)
y_predict = a.predict(6.5)
x_grid = np.arange(min(x),max(x),0.01)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.scatter(x,y,color = 'red')
plt.plot(x_grid, a.predict(x_grid),color = 'black')
plt.title('Position vs Salaries')
plt.xlabel('Position')
plt.ylabel('Salaries')
plt.show()
# [email protected]
# [email protected] | [
"[email protected]"
] | |
7d5b09b7c1f6d62b3cf5a4410be34cf296b3c832 | d3f559c122f2c0fea41d26a558859ef5ede8799c | /model_7_copy.py | 3c6feab6fff4320d1ebf9455b698d5934d060197 | [] | no_license | yifengyiye/PythonModels | df05c47e2f9085ee5c3a45f18da3b5c976ed8876 | 086212b2ef9f58830816dd8313de39c974bfcb3e | refs/heads/master | 2020-12-11T07:25:49.430579 | 2016-08-16T09:28:22 | 2016-08-16T09:28:22 | 48,640,691 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | # coding: utf-8
"""
题目:将一个列表的数据复制到另一个列表中。
程序分析:使用列表[:]。
"""
a = [1,3,4,5,67,7,8,5,23,2,24542,2]
b = a[:]
print b | [
"[email protected]"
] | |
4be1a74484158c614d1bc32607a891d1931f5e2c | 10f1620a694ba283ce64e16d40b77cf4b51e90a8 | /dataProcessor.py | 06d86d6c73f79801fecf2fad314c6a88d7c57db8 | [] | no_license | pandeyGCt/Reliablity-over-UDP | 429653b57a047c081f962b7639cbba0b3ebcaa7e | 1ab95ec21ccdc40c528a11ed7f587cbaf9dd4909 | refs/heads/main | 2023-06-07T19:29:06.340277 | 2021-06-28T16:37:33 | 2021-06-28T16:37:33 | 381,097,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,683 | py | '''
Saksham Pandey 2018A7PS0259H
Vanshaj Aggarwal 2018A7PS0309H
Arpit Adlakha 2018A7PS0250H
Surinder Singh Virk 2018A7PS0234H
Aditya Sharma 2018A7PS0315H
'''
import struct
import socket
from array import array
def myCheckSum(data):
if len(data) % 2:
data += b'\x00'
s = sum(array('H',data))
s = (s & 0xffff) + (s >> 16)
s += (s >> 16)
return socket.ntohs(~s & 0xffff)
def getFileData(name):
'''
This method gets the data and breaks it into chunks.
'''
try:
f=open(name,"rb")
file_data=f.read()
file_data_size=len(file_data)
pack_size=1000
data=[]
for i in range(0,file_data_size,pack_size):
if(file_data_size-i>pack_size):
data.append(file_data[i:i+pack_size])
else:
data.append(file_data[i:file_data_size])
return data
except IOError:
print("Filen not found or incorrect path")
finally:
print("EXIT")
def makePacketArr(name):
'''
This method creates a list containing packets to be sent.
'''
data=getFileData(name)
packet_array=[]
for i in range(0,len(data)):
packer = struct.Struct('I I {}s'.format(len(data[i])))
frame=(i,myCheckSum(data[i]+bytes(i)),data[i])
packet_array.append(packer.pack(*frame))
return packet_array
def convertString(seq,string):
'''
This method creates a given seq and string into a packet to be sent to the server
'''
string= string.encode('UTF-8')
packer = struct.Struct('I I {}s'.format(len(string)))
frame=(seq,myCheckSum(string),string)
d=packer.pack(*frame)
return d
def convertFilename(string):
string=string.encode('UTF-8')
packer=struct.Struct('I {}s'.format(len(string)))
frame=(myCheckSum(string),string)
d=packer.pack(*frame)
return d
| [
"[email protected]"
] | |
b67056872a7437bd215bbd55010776a5e3c4c513 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/DECHUB900-HRIP-MIB-V3-0.py | 4affb4dd03a0dfee8d6e74ef3a888a878b9e33bf | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 11,491 | py | #
# PySNMP MIB module DECHUB900-HRIP-MIB-V3-0 (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DECHUB900-HRIP-MIB-V3-0
# Produced by pysmi-0.3.4 at Wed May 1 12:37:38 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, enterprises, Counter32, IpAddress, NotificationType, Counter64, TimeTicks, ModuleIdentity, Unsigned32, Integer32, Gauge32, MibIdentifier, ObjectIdentity, iso, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "enterprises", "Counter32", "IpAddress", "NotificationType", "Counter64", "TimeTicks", "ModuleIdentity", "Unsigned32", "Integer32", "Gauge32", "MibIdentifier", "ObjectIdentity", "iso", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
dec = MibIdentifier((1, 3, 6, 1, 4, 1, 36))
ema = MibIdentifier((1, 3, 6, 1, 4, 1, 36, 2))
decMIBextension = MibIdentifier((1, 3, 6, 1, 4, 1, 36, 2, 18))
decHub900 = MibIdentifier((1, 3, 6, 1, 4, 1, 36, 2, 18, 11))
mgmtAgent = MibIdentifier((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1))
mgmtAgentVersion1 = MibIdentifier((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1))
hrip = MibIdentifier((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2))
hripPubRingCfgTable = MibTable((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 1), )
if mibBuilder.loadTexts: hripPubRingCfgTable.setStatus('mandatory')
if mibBuilder.loadTexts: hripPubRingCfgTable.setDescription('Defines a table for ring speeds. The table has 2 rows. Row 1 defines ring speed for ring A and row 2 defines the ring speed for ring B.')
hripPubRingCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 1, 1), ).setIndexNames((0, "DECHUB900-HRIP-MIB-V3-0", "hripRingCfgIndex"))
if mibBuilder.loadTexts: hripPubRingCfgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hripPubRingCfgEntry.setDescription('An entry in the hripPubRingCfgTable.')
hripRingCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ringA", 1), ("ringB", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hripRingCfgIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hripRingCfgIndex.setDescription('Identifies the ring being accessed ie the row of the table being referred to.')
hripRingCfgSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("speed4", 2), ("speed16", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hripRingCfgSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: hripRingCfgSpeed.setDescription('The speed of each of the token rings on the backplane. speed4(1) indicates a speed of 4 Mbits per second while speed16(2) indicates 16 Mbits per second. The value of this object is maintained across power cycles and resets.')
hripPubSlotCfgTable = MibTable((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 2), )
if mibBuilder.loadTexts: hripPubSlotCfgTable.setStatus('mandatory')
if mibBuilder.loadTexts: hripPubSlotCfgTable.setDescription('Defines a table for Slot Configurations. Each row in the table corresponds to a backplane slot (hripSlotIndex).')
hripPubSlotCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 2, 1), ).setIndexNames((0, "DECHUB900-HRIP-MIB-V3-0", "hripSlotCfgIndex"))
if mibBuilder.loadTexts: hripPubSlotCfgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hripPubSlotCfgEntry.setDescription('An entry in the hripPubSlotCfgTable.')
hripSlotCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hripSlotCfgIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hripSlotCfgIndex.setDescription('Index into the table of slot configurations.')
hripSlotCfgDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("enabled-1", 1), ("disabled-1", 2), ("enabled-2", 3), ("disabled-4", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hripSlotCfgDisable.setStatus('mandatory')
if mibBuilder.loadTexts: hripSlotCfgDisable.setDescription('Locks out the corresponding backplane port in that slot. -2 is used for linecards like the MIPPY that have multiple physical token ring backplane ports. The default setting is enable (for ports 1 & 2) The value of this object is maintained across power cycles and resets.')
hripSlotCfgForce = MibTableColumn((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("noForce-1", 1), ("forceRingA-1", 2), ("forceRingB-1", 3), ("noForce-2", 4), ("forceRingA-2", 5), ("forceRingB-2", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hripSlotCfgForce.setStatus('mandatory')
if mibBuilder.loadTexts: hripSlotCfgForce.setDescription('Describes a slot/ring pairing. -2 is used for linecards like the MIPPY that have multiple physical token ring backplane ports. The value of this object is maintained across power cycles and resets.')
hripPubRingStatTable = MibTable((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 3), )
if mibBuilder.loadTexts: hripPubRingStatTable.setStatus('mandatory')
if mibBuilder.loadTexts: hripPubRingStatTable.setDescription('A table describing the number of modules on each ring.')
hripPubRingStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 3, 1), ).setIndexNames((0, "DECHUB900-HRIP-MIB-V3-0", "hripRingStatIndex"))
if mibBuilder.loadTexts: hripPubRingStatEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hripPubRingStatEntry.setDescription('An entry describing the number of modules on each ring.')
hripRingStatIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ringA", 1), ("ringB", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hripRingStatIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hripRingStatIndex.setDescription('An index into the hripPubRingStatTable.')
hripRingStatNumModInserted = MibTableColumn((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hripRingStatNumModInserted.setStatus('mandatory')
if mibBuilder.loadTexts: hripRingStatNumModInserted.setDescription('The number of modules inserted onto the ring.')
hripPubSlotStatTable = MibTable((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 4), )
if mibBuilder.loadTexts: hripPubSlotStatTable.setStatus('mandatory')
if mibBuilder.loadTexts: hripPubSlotStatTable.setDescription('The status of modules inserted on each slot of backplane.')
hripPubSlotStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 4, 1), ).setIndexNames((0, "DECHUB900-HRIP-MIB-V3-0", "hripSlotStatIndex"))
if mibBuilder.loadTexts: hripPubSlotStatEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hripPubSlotStatEntry.setDescription('An entry in the hripPubSlotStatTable.')
hripSlotStatIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hripSlotStatIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hripSlotStatIndex.setDescription('The index into slot status table.')
hripSlotStatRingAInsertCount = MibTableColumn((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 4, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hripSlotStatRingAInsertCount.setStatus('mandatory')
if mibBuilder.loadTexts: hripSlotStatRingAInsertCount.setDescription('The number of times that the module has transitioned between inserted/wrapped states on backplane ring A, since the module was last reset/power-cycled.')
hripSlotStatRingBInsertCount = MibTableColumn((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 4, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hripSlotStatRingBInsertCount.setStatus('mandatory')
if mibBuilder.loadTexts: hripSlotStatRingBInsertCount.setDescription('The number of times that the module has transitioned between inserted/wrapped states on backplane ring B, since the module was last reset/power-cycled.')
hripSlotStatTcuA = MibTableColumn((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inserted", 1), ("wrapped", 2), ("notTR", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hripSlotStatTcuA.setStatus('mandatory')
if mibBuilder.loadTexts: hripSlotStatTcuA.setDescription('Status of the TCU on ring A. If there is a non Token Ring linecard plugged into the hub, the value reported should be nonTR(3). For a Token Ring line-card the value is inserted or wrapped')
hripSlotStatTcuB = MibTableColumn((1, 3, 6, 1, 4, 1, 36, 2, 18, 11, 1, 1, 2, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inserted", 1), ("wrapped", 2), ("notTR", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hripSlotStatTcuB.setStatus('mandatory')
if mibBuilder.loadTexts: hripSlotStatTcuB.setDescription('Status of the TCU on ring B. If there is a non Token Ring linecard plugged into the hub, the value reported should be nonTR(3). For a Token Ring line-card the value is inserted or wrapped ')
mibBuilder.exportSymbols("DECHUB900-HRIP-MIB-V3-0", hripRingStatIndex=hripRingStatIndex, hripRingCfgIndex=hripRingCfgIndex, hripPubSlotStatTable=hripPubSlotStatTable, decMIBextension=decMIBextension, hripPubSlotStatEntry=hripPubSlotStatEntry, mgmtAgentVersion1=mgmtAgentVersion1, hripRingStatNumModInserted=hripRingStatNumModInserted, dec=dec, hripPubRingStatTable=hripPubRingStatTable, hrip=hrip, hripSlotStatRingAInsertCount=hripSlotStatRingAInsertCount, hripSlotStatTcuB=hripSlotStatTcuB, mgmtAgent=mgmtAgent, hripSlotStatIndex=hripSlotStatIndex, ema=ema, hripSlotCfgDisable=hripSlotCfgDisable, hripRingCfgSpeed=hripRingCfgSpeed, hripSlotStatRingBInsertCount=hripSlotStatRingBInsertCount, hripPubSlotCfgEntry=hripPubSlotCfgEntry, hripSlotCfgForce=hripSlotCfgForce, hripPubRingStatEntry=hripPubRingStatEntry, decHub900=decHub900, hripPubRingCfgEntry=hripPubRingCfgEntry, hripSlotStatTcuA=hripSlotStatTcuA, hripPubSlotCfgTable=hripPubSlotCfgTable, hripSlotCfgIndex=hripSlotCfgIndex, hripPubRingCfgTable=hripPubRingCfgTable)
| [
"[email protected]"
] | |
81f7ea6ba42d4ee18d39650fa7de3474ed999af4 | f656bae2f0fbfe58a980612729b7a54dba7b9873 | /4. Información_celular.py | 0f5d1b0dd232c2ebad3d7871a9446fcbd3368c91 | [] | no_license | neamedina73/Ejercicios_java | a5715ed4deb638c0e0e700f02aee8bd7a24b596e | 427cf54d241945d15a1f05bfbffcdcb4431d86de | refs/heads/main | 2023-05-31T18:24:12.873844 | 2021-07-03T00:30:34 | 2021-07-03T00:30:34 | 382,456,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 9 09:39:41 2021
@author: Alejandro AJ
"""
class celular:
def __init__(self, marca, modelo, tamaño, color, peso):
self.marca = marca
self.modelo = modelo
self.tamaño = tamaño
self.color = color
self.peso = peso
def gama(self):
print('Su celular es de gama alta.')
def estado(self):
print('Su celular se encuentra en perfecto estado')
def precio(self):
if self.peso > 200:
print(f'su celular {micelu.marca} es pesado.')
else:
print(f'su celular {micelu.marca} es liviano.')
micelu = celular("Iphone","11 PRO", "7 pulgadas", "gris", 130) # Instanciando la clase celular()
print(micelu.marca) #Imprimir el atributo "marca" del objeto "celular"
print(micelu.modelo) #Imprimir el atributo "modelo" del objeto "celular"
print(micelu.tamaño) #Imprimir el atributo "tamaño" del objeto "celular"
print(micelu.color) #Imprimir el atributo "color" del objeto "celular"
print(micelu.peso) | [
"[email protected]"
] | |
8d838ad1b17dd0480a189e316ae027e1fd5cb5b4 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /LR98GCwLGYPSv8Afb_1.py | 51545319e53c872c2a1520d669972a99be80e25f | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py |
def pluralize(lst):
return {i+'s' if lst.count(i)>1 else i for i in lst}
| [
"[email protected]"
] | |
8fa3d155dfb3ad674ecdfb942b55ba95f138f59b | f1efa3f00d06a12e4bce5ac8742dbe45a82d8553 | /MarkovModel.py | 6d62ff7cd5d4793627aceef34b8fe720772324c0 | [] | no_license | kd536/HPM573S18_DESOBRY_HW10- | 655ac942f00ea221181e069ce0f4835a0472d8e2 | 9b9e643700b292e626f1b494767a50bbbe3f32b5 | refs/heads/master | 2020-03-11T11:55:17.486633 | 2018-04-18T03:46:57 | 2018-04-18T03:46:57 | 129,982,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,071 | py | import scr.SamplePathClasses as PathCls
import scr.StatisticalClasses as StatCls
import scr.RandomVariantGenerators as rndClasses
import ParameterClasses as P
import InputData as Data
# patient class simulates patient, patient monitor follows patient, cohort simulates a cohort,
# cohort outcome extracts info from simulation and returns it back
class Patient: # when you store in self then all the things in that class have access to it
def __init__(self, id, parameters):
""" initiates a patient
:param id: ID of the patient
:param parameters: parameter object
"""
self._id = id
# random number generator
self._rng = None
# parameters
self._param = parameters
# state monitor
self._stateMonitor = PatientStateMonitor(parameters)
# simulate time step
self._delta_t = parameters.get_delta_t() # length of time step!
def simulate(self, sim_length):
""" simulate the patient over the specified simulation length """
# random number generator for this patient
self._rng = rndClasses.RNG(self._id) # from now on use random number generator from support library
k = 0 # current time step
# while the patient is alive and simulation length is not yet reached
while self._stateMonitor.get_if_alive() and k*self._delta_t < sim_length:
# find transition probabilities of future state
trans_prob = self._param.get_transition_prob(self._stateMonitor.get_current_state())
# create an empirical distribution
empirical_dist = rndClasses.Empirical(trans_prob)
# sample from the empirical distribution to get a new state
# (return an intger from {0, 1, 2, ...}
new_state_index = empirical_dist.sample(self._rng) # pass RNG
# update health state
self._stateMonitor.update(k, P.HealthStats(new_state_index))
# increment time step
k += 1
def get_survival_time(self):
""" returns the patient's survival time"""
return self._stateMonitor.get_survival_time()
def get_number_of_strokes(self):
""" returns the patient's time to the POST_STROKE state """
return self._stateMonitor.get_num_of_STROKE()
class PatientStateMonitor:
""" to update patient outcomes (years survived, cost, etc.) throughout the simulation """
def __init__(self, parameters):
"""
:param parameters: patient parameters
"""
# current health state
self._currentState = parameters.get_initial_health_state()
self._delta_t = parameters.get_delta_t()
self._survivalTime = 0
self._ifDevelopedStroke = False
self._strokecount = 0
def update(self, k, next_state):
"""
:param k: current time step
:param next_state: next state
"""
# updates state of patient
# if the patient has died, do nothing
if not self.get_if_alive():
return
# update survival time
if next_state is P.HealthStats.DEATH:
self._survivalTime = (k+0.5) * self._delta_t # k is number of steps its been, delta t is length of time
# step, the 0.5 is a half cycle correction
if self._currentState == P.HealthStats.STROKE:
self._ifDevelopedStroke = True
self._strokecount += 1
self._currentState = next_state
def get_if_alive(self):
result = True
if self._currentState == P.HealthStats.DEATH:
result = False
return result
def get_current_state(self):
return self._currentState
def get_survival_time(self):
""" returns the patient survival time """
# return survival time only if the patient has died
if not self.get_if_alive():
return self._survivalTime
else:
return None
def get_num_of_STROKE(self):
return self._strokecount
class Cohort:
def __init__(self, id, therapy):
""" create a cohort of patients
:param id: an integer to specify the seed of the random number generator
"""
self._initial_pop_size = Data.POP_SIZE
self._patients = [] # list of patients
# populate the cohort
for i in range(self._initial_pop_size):
# create a new patient (use id * pop_size + i as patient id)
patient = Patient(id * self._initial_pop_size + i, P.ParametersFixed(therapy))
# add the patient to the cohort
self._patients.append(patient)
def simulate(self):
""" simulate the cohort of patients over the specified number of time-steps
:returns outputs from simulating this cohort
"""
# simulate all patients
for patient in self._patients:
patient.simulate(Data.SIM_LENGTH)
# return the cohort outputs
return CohortOutputs(self)
def get_initial_pop_size(self):
return self._initial_pop_size
def get_patients(self):
return self._patients
class CohortOutputs:
def __init__(self, simulated_cohort):
""" extracts outputs from a simulated cohort
:param simulated_cohort: a cohort after being simulated
"""
self._survivalTimes = [] # patients' survival times
self._times_to_Stroke = [] # patients' times to stroke
self._count_strokes = []
# survival curve
self._survivalCurve = \
PathCls.SamplePathBatchUpdate('Population size over time', id, simulated_cohort.get_initial_pop_size())
# find patients' survival times
for patient in simulated_cohort.get_patients():
# get the patient survival time
survival_time = patient.get_survival_time()
if not (survival_time is None):
self._survivalTimes.append(survival_time) # store the survival time of this patient
self._survivalCurve.record(survival_time, -1) # update the survival curve
count_strokes = patient.get_number_of_strokes()
self._count_strokes.append(count_strokes)
# summary statistics
self._sumStat_survivalTime = StatCls.SummaryStat('Patient survival time', self._survivalTimes)
self._sumState_number_strokes = StatCls.SummaryStat('Time until stroke', self._count_strokes)
def get_if_developed_stroke(self):
return self._count_strokes
def get_survival_times(self):
return self._survivalTimes
def get_sumStat_survival_times(self):
return self._sumStat_survivalTime
def get_survival_curve(self):
return self._survivalCurve
def get_sumStat_count_strokes(self):
return self._sumState_number_strokes
| [
"[email protected]"
] | |
83abbe58f6fc2055852b647c54c5920e08777d4d | c214a3d6fbfddcb5473e4499b948e24c367f6746 | /Cesear.py | ef4797181e8b727e7ce883f4c4b52fded4e00460 | [] | no_license | kamouzougan/Cybersecurity | ff2d8df944e2f3513bd4dbd84a5e4e1612dac29e | c6b1a2c21753d1f1a07fb1a1ace3c64cbbe41205 | refs/heads/master | 2020-05-05T08:54:43.947649 | 2020-04-09T11:30:46 | 2020-04-09T11:30:46 | 179,883,232 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py | MAX_KEY_SIZE = 26
def getMode():
while True :
print("Do you wish to encrypt or decrypt a message?")
mode = input().lower()
if mode in 'encrypt e decrypt d'.split():
return mode
else :
print('Enter either "encrypt" or "e" or "decrypt" or "d".')
def getMessage():
data = open("plaintext.txt","r")
if data.mode == "r" :
contents = data.read()
print(contents)
return contents
def getKey():
key = 0
while True:
print('Enter the key number (1-%s)' % (MAX_KEY_SIZE))
key = int(input())
if (key >= 1 and key <= MAX_KEY_SIZE):
return key
def getTranslatedMessage(mode, message, key):
if mode[0] == 'd':
key = -key
translated = ''
for symbol in message:
if symbol.isalpha():
num = ord(symbol)
num += key
if symbol.isupper():
if num > ord('Z'):
num -= 26
elif num < ord('A'):
num += 26
elif symbol.islower():
if num > ord('z'):
num -= 26
elif num < ord('a'):
num += 26
translated += chr(num)
else:
translated += symbol
return translated
mode = getMode()
message = getMessage()
key = getKey()
print('Your translated text is:')
print(getTranslatedMessage(mode, message, key))
| [
"[email protected]"
] | |
c275d436f6f6dec21e35cd80e4f9de92952b5921 | 325c97b94b84f54df18c0a770bbf90cb2cd87186 | /pc/gui/image_viewer.py | a5914c19088dcd8cdd1b0c858adc35e49a7797af | [] | no_license | alexbaraker/DroneGui | c289716e1a61ec1795017529148b8a7f1d1fcedb | 0945567dd0c0d4ed0d59cf0e492f039efa733246 | refs/heads/master | 2020-06-20T12:06:24.679541 | 2019-07-23T18:11:23 | 2019-07-23T18:11:23 | 197,117,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | #######################################################################################################################################
# By: SupremeDrones Team; Alex Baraker, Dean, Kelsey, Hammad
# Date: 3/06/2019
# Info: Widget for displaying loaded image
#######################################################################################################################################
from threading import Thread
import time
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from gui.opencv_image import OpenCvImageWidget
class ImageViewerWidget(QWidget):
def __init__(self, parent):
super(QWidget, self).__init__(parent)
self.v_layout = QVBoxLayout()
self.opencv_image = OpenCvImageWidget(self)
#self.load_image_btn = QPushButton("Load Image")
#self.load_image_btn.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
#self.load_image_btn.clicked[bool].connect(self.load_file_button_clicked)
self.v_layout.addWidget(self.opencv_image)
#self.v_layout.addWidget(self.load_image_btn)
self.setLayout(self.v_layout)
# self.thread = Thread(target=self.display_loop, args=())
# self.thread.daemon = True
# self.thread.start()
#def load_file_button_clicked(self):
# self.opencv_image.open_cv_image()
#def display_loop(self):
# while True:
# self.opencv_image.refresh_image()
# time.sleep(0.05)
def strName_out(self):
self.opencv_image.strName() | [
"[email protected]"
] | |
ed64e352839fee277680c8be39d3058c38d029a5 | d570fc2e36f0842605ad6e9dda3cbd4910160a07 | /src/webdav/Resource.py | 5b3121865ca3ace9d66cf08ff6f649d0b1b59b89 | [
"ZPL-2.1"
] | permissive | zopefoundation/ZServer | 8540fc7c411a7857abf4034068f75f2f1c7ba98c | eb047c795a278c22ae77f5af4284411e4689025e | refs/heads/master | 2023-06-21T20:54:53.580461 | 2023-02-10T09:43:55 | 2023-02-10T09:43:55 | 65,092,325 | 6 | 9 | NOASSERTION | 2020-09-17T07:25:50 | 2016-08-06T16:47:48 | Python | UTF-8 | Python | false | false | 27,157 | py | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""WebDAV support - resource objects.
"""
import mimetypes
import sys
import re
from urllib import unquote
from AccessControl import getSecurityManager
from AccessControl import ClassSecurityInfo
from AccessControl.class_init import InitializeClass
from AccessControl.Permissions import delete_objects
from AccessControl.Permissions import manage_properties
from AccessControl.Permissions import view as View
from AccessControl.Permissions import webdav_lock_items
from AccessControl.Permissions import webdav_unlock_items
from AccessControl.Permissions import webdav_access
from Acquisition import aq_base
from Acquisition import aq_inner
from Acquisition import aq_parent
from App.Common import rfc1123_date
from ExtensionClass import Base
from OFS.event import ObjectClonedEvent
from OFS.event import ObjectWillBeMovedEvent
from OFS.interfaces import IWriteLock
from OFS.Lockable import LockableItem
from OFS.Lockable import wl_isLockable
from OFS.Lockable import wl_isLocked
from OFS.subscribers import compatibilityCall
from zExceptions import BadRequest
from zExceptions import Forbidden
from zExceptions import MethodNotAllowed
from zExceptions import NotFound
from zExceptions import Unauthorized
import ZServer.Zope2.Startup.config
from ZPublisher.HTTPRangeSupport import HTTPRangeInterface
from zope.interface import implementer
from zope.event import notify
from zope.lifecycleevent import ObjectCopiedEvent
from zope.lifecycleevent import ObjectMovedEvent
from zope.container.contained import notifyContainerModified
from webdav.common import absattr
from webdav.common import Conflict
from webdav.common import IfParser
from webdav.common import isDavCollection
from webdav.common import Locked
from webdav.common import PreconditionFailed
from webdav.common import tokenFinder
from webdav.common import urlbase
from webdav.common import urlfix
from webdav.interfaces import IDAVResource
ms_dav_agent = re.compile("Microsoft.*Internet Publishing.*")
@implementer(IDAVResource)
class Resource(Base, LockableItem):
"""The Resource mixin class provides basic WebDAV support for
non-collection objects. It provides default implementations
for most supported WebDAV HTTP methods, however certain methods
such as PUT should be overridden to ensure correct behavior in
the context of the object type."""
__dav_resource__ = 1
__http_methods__ = ('GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'OPTIONS',
'TRACE', 'PROPFIND', 'PROPPATCH', 'MKCOL', 'COPY',
'MOVE', 'LOCK', 'UNLOCK',
)
security = ClassSecurityInfo()
security.setPermissionDefault(webdav_access, ('Authenticated', 'Manager'))
def dav__init(self, request, response):
# Init expected HTTP 1.1 / WebDAV headers which are not
# currently set by the base response object automagically.
#
# We sniff for a ZServer response object, because we don't
# want to write duplicate headers (since ZS writes Date
# and Connection itself).
if not hasattr(response, '_server_version'):
response.setHeader('Connection', 'close')
response.setHeader('Date', rfc1123_date(), 1)
# HTTP Range support
if HTTPRangeInterface.providedBy(self):
response.setHeader('Accept-Ranges', 'bytes')
else:
response.setHeader('Accept-Ranges', 'none')
def dav__validate(self, object, methodname, REQUEST):
msg = ('<strong>You are not authorized '
'to access this resource.</strong>')
method = None
if hasattr(object, methodname):
method = getattr(object, methodname)
else:
try:
method = object.aq_acquire(methodname)
except Exception:
method = None
if method is not None:
try:
return getSecurityManager().validate(None, object,
methodname,
method)
except Exception:
pass
raise Unauthorized(msg)
def dav__simpleifhandler(self, request, response, method='PUT',
col=0, url=None, refresh=0):
ifhdr = request.get_header('If', None)
lockable = wl_isLockable(self)
if not lockable:
# degenerate case, we shouldnt have even called this method.
return None
locked = self.wl_isLocked()
if locked and (not ifhdr):
raise Locked('Resource is locked.')
if not ifhdr:
return None
# Since we're a simple if handler, and since some clients don't
# pass in the port information in the resource part of an If
# header, we're only going to worry about if the paths compare
if url is None:
url = urlfix(request['URL'], method)
url = urlbase(url) # Gets just the path information
# if 'col' is passed in, an operation is happening on a submember
# of a collection, while the Lock may be on the parent. Lob off
# the final part of the URL (ie '/a/b/foo.html' becomes '/a/b/')
if col:
url = url[:url.rfind('/') + 1]
found = 0
resourcetagged = 0
taglist = IfParser(ifhdr)
for tag in taglist:
if not tag.resource:
# There's no resource (url) with this tag
tag_list = map(tokenFinder, tag.list)
wehave = [t for t in tag_list if self.wl_hasLock(t)]
if not wehave:
continue
if tag.NOTTED:
continue
if refresh:
for token in wehave:
self.wl_getLock(token).refresh()
resourcetagged = 1
found = 1
break
elif urlbase(tag.resource) == url:
resourcetagged = 1
tag_list = map(tokenFinder, tag.list)
wehave = [t for t in tag_list if self.wl_hasLock(t)]
if not wehave:
continue
if tag.NOTTED:
continue
if refresh:
for token in wehave:
self.wl_getLock(token).refresh()
found = 1
break
if resourcetagged and (not found):
raise PreconditionFailed('Condition failed.')
elif resourcetagged and found:
return 1
else:
return 0
# WebDAV class 1 support
security.declareProtected(View, 'HEAD')
def HEAD(self, REQUEST, RESPONSE):
"""Retrieve resource information without a response body."""
self.dav__init(REQUEST, RESPONSE)
content_type = None
if hasattr(self, 'content_type'):
content_type = absattr(self.content_type)
if content_type is None:
url = urlfix(REQUEST['URL'], 'HEAD')
name = unquote(filter(None, url.split('/')[-1]))
content_type, encoding = mimetypes.guess_type(name)
if content_type is None:
if hasattr(self, 'default_content_type'):
content_type = absattr(self.default_content_type)
if content_type is None:
content_type = 'application/octet-stream'
RESPONSE.setHeader('Content-Type', content_type.lower())
if hasattr(aq_base(self), 'get_size'):
RESPONSE.setHeader('Content-Length', absattr(self.get_size))
if hasattr(self, '_p_mtime'):
mtime = rfc1123_date(self._p_mtime)
RESPONSE.setHeader('Last-Modified', mtime)
if hasattr(aq_base(self), 'http__etag'):
etag = self.http__etag(readonly=1)
if etag:
RESPONSE.setHeader('Etag', etag)
RESPONSE.setStatus(200)
return RESPONSE
def PUT(self, REQUEST, RESPONSE):
"""Replace the GET response entity of an existing resource.
Because this is often object-dependent, objects which handle
PUT should override the default PUT implementation with an
object-specific implementation. By default, PUT requests
fail with a 405 (Method Not Allowed)."""
self.dav__init(REQUEST, RESPONSE)
raise MethodNotAllowed('Method not supported for this resource.')
security.declarePublic('OPTIONS')
def OPTIONS(self, REQUEST, RESPONSE):
"""Retrieve communication options."""
self.dav__init(REQUEST, RESPONSE)
RESPONSE.setHeader('Allow', ', '.join(self.__http_methods__))
RESPONSE.setHeader('Content-Length', 0)
RESPONSE.setHeader('DAV', '1,2', 1)
# Microsoft Web Folders compatibility, only enabled if
# User-Agent matches.
if ms_dav_agent.match(REQUEST.get_header('User-Agent', '')):
if ZServer.Zope2.Startup.config.ZSERVER_ENABLE_MS_PUBLIC_HEADER:
RESPONSE.setHeader('Public', ', '.join(self.__http_methods__))
RESPONSE.setStatus(200)
return RESPONSE
security.declarePublic('TRACE')
def TRACE(self, REQUEST, RESPONSE):
"""Return the HTTP message received back to the client as the
entity-body of a 200 (OK) response. This will often usually
be intercepted by the web server in use. If not, the TRACE
request will fail with a 405 (Method Not Allowed), since it
is not often possible to reproduce the HTTP request verbatim
from within the Zope environment."""
self.dav__init(REQUEST, RESPONSE)
raise MethodNotAllowed('Method not supported for this resource.')
security.declareProtected(delete_objects, 'DELETE')
def DELETE(self, REQUEST, RESPONSE):
"""Delete a resource. For non-collection resources, DELETE may
return either 200 or 204 (No Content) to indicate success."""
self.dav__init(REQUEST, RESPONSE)
ifhdr = REQUEST.get_header('If', '')
url = urlfix(REQUEST['URL'], 'DELETE')
name = unquote(filter(None, url.split('/')[-1]))
parent = aq_parent(aq_inner(self))
# Lock checking
if wl_isLocked(self):
if ifhdr:
self.dav__simpleifhandler(REQUEST, RESPONSE, 'DELETE')
else:
# We're locked, and no if header was passed in, so
# the client doesn't own a lock.
raise Locked('Resource is locked.')
elif IWriteLock.providedBy(parent) and parent.wl_isLocked():
if ifhdr:
parent.dav__simpleifhandler(REQUEST, RESPONSE, 'DELETE', col=1)
else:
# Our parent is locked, and no If header was passed in.
# When a parent is locked, members cannot be removed
raise PreconditionFailed(
'Resource is locked, and no condition was passed in.')
# Either we're not locked, or a succesful lock token was submitted
# so we can delete the lock now.
# ajung: Fix for Collector # 2196
if parent.manage_delObjects([name], REQUEST=None) is None:
RESPONSE.setStatus(204)
else:
RESPONSE.setStatus(403)
return RESPONSE
security.declareProtected(webdav_access, 'PROPFIND')
def PROPFIND(self, REQUEST, RESPONSE):
"""Retrieve properties defined on the resource."""
from webdav.davcmds import PropFind
self.dav__init(REQUEST, RESPONSE)
cmd = PropFind(REQUEST)
result = cmd.apply(self)
# work around MSIE DAV bug for creation and modified date
if (REQUEST.get_header('User-Agent') ==
'Microsoft Data Access Internet Publishing Provider DAV 1.1'):
result = result.replace('<n:getlastmodified xmlns:n="DAV:">',
'<n:getlastmodified xmlns:n="DAV:" xmlns:b="urn:uuid:c2f41010-65b3-11d1-a29f-00aa00c14882/" b:dt="dateTime.rfc1123">') # NOQA
result = result.replace('<n:creationdate xmlns:n="DAV:">',
'<n:creationdate xmlns:n="DAV:" xmlns:b="urn:uuid:c2f41010-65b3-11d1-a29f-00aa00c14882/" b:dt="dateTime.tz">') # NOQA
RESPONSE.setStatus(207)
RESPONSE.setHeader('Content-Type', 'text/xml; charset="utf-8"')
RESPONSE.setBody(result)
return RESPONSE
security.declareProtected(manage_properties, 'PROPPATCH')
def PROPPATCH(self, REQUEST, RESPONSE):
"""Set and/or remove properties defined on the resource."""
from webdav.davcmds import PropPatch
self.dav__init(REQUEST, RESPONSE)
if not hasattr(aq_base(self), 'propertysheets'):
raise MethodNotAllowed(
'Method not supported for this resource.')
# Lock checking
ifhdr = REQUEST.get_header('If', '')
if wl_isLocked(self):
if ifhdr:
self.dav__simpleifhandler(REQUEST, RESPONSE, 'PROPPATCH')
else:
raise Locked('Resource is locked.')
cmd = PropPatch(REQUEST)
result = cmd.apply(self)
RESPONSE.setStatus(207)
RESPONSE.setHeader('Content-Type', 'text/xml; charset="utf-8"')
RESPONSE.setBody(result)
return RESPONSE
def MKCOL(self, REQUEST, RESPONSE):
"""Create a new collection resource. If called on an existing
resource, MKCOL must fail with 405 (Method Not Allowed)."""
self.dav__init(REQUEST, RESPONSE)
raise MethodNotAllowed('The resource already exists.')
security.declarePublic('COPY')
def COPY(self, REQUEST, RESPONSE):
"""Create a duplicate of the source resource whose state
and behavior match that of the source resource as closely
as possible. Though we may later try to make a copy appear
seamless across namespaces (e.g. from Zope to Apache), COPY
is currently only supported within the Zope namespace."""
self.dav__init(REQUEST, RESPONSE)
if not hasattr(aq_base(self), 'cb_isCopyable') or \
not self.cb_isCopyable():
raise MethodNotAllowed('This object may not be copied.')
depth = REQUEST.get_header('Depth', 'infinity')
if depth not in ('0', 'infinity'):
raise BadRequest('Invalid Depth header.')
dest = REQUEST.get_header('Destination', '')
while dest and dest[-1] == '/':
dest = dest[:-1]
if not dest:
raise BadRequest('Invalid Destination header.')
try:
path = REQUEST.physicalPathFromURL(dest)
except ValueError:
raise BadRequest('Invalid Destination header')
name = path.pop()
oflag = REQUEST.get_header('Overwrite', 'F').upper()
if oflag not in ('T', 'F'):
raise BadRequest('Invalid Overwrite header.')
try:
parent = self.restrictedTraverse(path)
except ValueError:
raise Conflict('Attempt to copy to an unknown namespace.')
except NotFound:
raise Conflict('Object ancestors must already exist.')
except Exception:
raise
if hasattr(parent, '__null_resource__'):
raise Conflict('Object ancestors must already exist.')
existing = hasattr(aq_base(parent), name)
if existing and oflag == 'F':
raise PreconditionFailed('Destination resource exists.')
try:
parent._checkId(name, allow_dup=1)
except Exception:
raise Forbidden(sys.exc_info()[1])
try:
parent._verifyObjectPaste(self)
except Unauthorized:
raise
except Exception:
raise Forbidden(sys.exc_info()[1])
# Now check locks. The If header on a copy only cares about the
# lock on the destination, so we need to check out the destinations
# lock status.
ifhdr = REQUEST.get_header('If', '')
if existing:
# The destination itself exists, so we need to check its locks
destob = aq_base(parent)._getOb(name)
if IWriteLock.providedBy(destob) and destob.wl_isLocked():
if ifhdr:
itrue = destob.dav__simpleifhandler(
REQUEST, RESPONSE, 'COPY', refresh=1)
if not itrue:
raise PreconditionFailed()
else:
raise Locked('Destination is locked.')
elif IWriteLock.providedBy(parent) and parent.wl_isLocked():
if ifhdr:
parent.dav__simpleifhandler(REQUEST, RESPONSE, 'COPY',
refresh=1)
else:
raise Locked('Destination is locked.')
self._notifyOfCopyTo(parent, op=0)
ob = self._getCopy(parent)
ob._setId(name)
if depth == '0' and isDavCollection(ob):
for id in ob.objectIds():
ob._delObject(id)
notify(ObjectCopiedEvent(ob, self))
if existing:
object = getattr(parent, name)
self.dav__validate(object, 'DELETE', REQUEST)
parent._delObject(name)
parent._setObject(name, ob)
ob = parent._getOb(name)
ob._postCopy(parent, op=0)
compatibilityCall('manage_afterClone', ob, ob)
notify(ObjectClonedEvent(ob))
# We remove any locks from the copied object because webdav clients
# don't track the lock status and the lock token for copied resources
ob.wl_clearLocks()
RESPONSE.setStatus(existing and 204 or 201)
if not existing:
RESPONSE.setHeader('Location', dest)
RESPONSE.setBody('')
return RESPONSE
security.declarePublic('MOVE')
def MOVE(self, REQUEST, RESPONSE):
"""Move a resource to a new location. Though we may later try to
make a move appear seamless across namespaces (e.g. from Zope
to Apache), MOVE is currently only supported within the Zope
namespace."""
self.dav__init(REQUEST, RESPONSE)
self.dav__validate(self, 'DELETE', REQUEST)
if not hasattr(aq_base(self), 'cb_isMoveable') or \
not self.cb_isMoveable():
raise MethodNotAllowed('This object may not be moved.')
dest = REQUEST.get_header('Destination', '')
try:
path = REQUEST.physicalPathFromURL(dest)
except ValueError:
raise BadRequest('No destination given')
flag = REQUEST.get_header('Overwrite', 'F')
flag = flag.upper()
name = path.pop()
parent_path = '/'.join(path)
try:
parent = self.restrictedTraverse(path)
except ValueError:
raise Conflict('Attempt to move to an unknown namespace.')
except 'Not Found':
raise Conflict('The resource %s must exist.' % parent_path)
except Exception:
raise
if hasattr(parent, '__null_resource__'):
raise Conflict('The resource %s must exist.' % parent_path)
existing = hasattr(aq_base(parent), name)
if existing and flag == 'F':
raise PreconditionFailed('Resource %s exists.' % dest)
try:
parent._checkId(name, allow_dup=1)
except Exception:
raise Forbidden(sys.exc_info()[1])
try:
parent._verifyObjectPaste(self)
except Unauthorized:
raise
except Exception:
raise Forbidden(sys.exc_info()[1])
# Now check locks. Since we're affecting the resource that we're
# moving as well as the destination, we have to check both.
ifhdr = REQUEST.get_header('If', '')
if existing:
# The destination itself exists, so we need to check its locks
destob = aq_base(parent)._getOb(name)
if IWriteLock.providedBy(destob) and destob.wl_isLocked():
if ifhdr:
itrue = destob.dav__simpleifhandler(
REQUEST, RESPONSE, 'MOVE', url=dest, refresh=1)
if not itrue:
raise PreconditionFailed
else:
raise Locked('Destination is locked.')
elif IWriteLock.providedBy(parent) and parent.wl_isLocked():
# There's no existing object in the destination folder, so
# we need to check the folders locks since we're changing its
# member list
if ifhdr:
itrue = parent.dav__simpleifhandler(REQUEST, RESPONSE, 'MOVE',
col=1, url=dest, refresh=1)
if not itrue:
raise PreconditionFailed('Condition failed.')
else:
raise Locked('Destination is locked.')
if wl_isLocked(self):
# Lastly, we check ourselves
if ifhdr:
itrue = self.dav__simpleifhandler(REQUEST, RESPONSE, 'MOVE',
refresh=1)
if not itrue:
raise PreconditionFailed('Condition failed.')
else:
raise Locked('Source is locked and no condition was passed in')
orig_container = aq_parent(aq_inner(self))
orig_id = self.getId()
self._notifyOfCopyTo(parent, op=1)
notify(ObjectWillBeMovedEvent(self, orig_container, orig_id,
parent, name))
# try to make ownership explicit so that it gets carried
# along to the new location if needed.
self.manage_changeOwnershipType(explicit=1)
ob = self._getCopy(parent)
ob._setId(name)
orig_container._delObject(orig_id, suppress_events=True)
if existing:
object = getattr(parent, name)
self.dav__validate(object, 'DELETE', REQUEST)
parent._delObject(name)
parent._setObject(name, ob, set_owner=0, suppress_events=True)
ob = parent._getOb(name)
notify(ObjectMovedEvent(ob, orig_container, orig_id, parent, name))
notifyContainerModified(orig_container)
if aq_base(orig_container) is not aq_base(parent):
notifyContainerModified(parent)
ob._postCopy(parent, op=1)
# try to make ownership implicit if possible
ob.manage_changeOwnershipType(explicit=0)
RESPONSE.setStatus(existing and 204 or 201)
if not existing:
RESPONSE.setHeader('Location', dest)
RESPONSE.setBody('')
return RESPONSE
# WebDAV Class 2, Lock and Unlock
security.declareProtected(webdav_lock_items, 'LOCK')
def LOCK(self, REQUEST, RESPONSE):
"""Lock a resource"""
from webdav.davcmds import Lock
self.dav__init(REQUEST, RESPONSE)
security = getSecurityManager()
creator = security.getUser()
body = REQUEST.get('BODY', '')
ifhdr = REQUEST.get_header('If', None)
depth = REQUEST.get_header('Depth', 'infinity')
alreadylocked = wl_isLocked(self)
if body and alreadylocked:
# This is a full LOCK request, and the Resource is
# already locked, so we need to raise the alreadylocked
# exception.
RESPONSE.setStatus(423)
elif body:
# This is a normal lock request with an XML payload
cmd = Lock(REQUEST)
token, result = cmd.apply(self, creator, depth=depth)
if result:
# Return the multistatus result (there were multiple
# errors. Note that davcmds.Lock.apply aborted the
# transaction already.
RESPONSE.setStatus(207)
RESPONSE.setHeader('Content-Type', 'text/xml; charset="utf-8"')
RESPONSE.setBody(result)
else:
# Success
lock = self.wl_getLock(token)
RESPONSE.setStatus(200)
RESPONSE.setHeader('Content-Type', 'text/xml; charset="utf-8"')
RESPONSE.setHeader('Lock-Token', 'opaquelocktoken:' + token)
RESPONSE.setBody(lock.asXML())
else:
# There's no body, so this likely to be a refresh request
if not ifhdr:
raise PreconditionFailed('If Header Missing')
taglist = IfParser(ifhdr)
found = 0
for tag in taglist:
for listitem in tag.list:
token = tokenFinder(listitem)
if token and self.wl_hasLock(token):
lock = self.wl_getLock(token)
timeout = REQUEST.get_header('Timeout', 'Infinite')
lock.setTimeout(timeout) # automatically refreshes
found = 1
RESPONSE.setStatus(200)
RESPONSE.setHeader('Content-Type',
'text/xml; charset="utf-8"')
RESPONSE.setBody(lock.asXML())
break
if found:
break
if not found:
RESPONSE.setStatus(412) # Precondition failed
return RESPONSE
security.declareProtected(webdav_unlock_items, 'UNLOCK')
def UNLOCK(self, REQUEST, RESPONSE):
"""Remove an existing lock on a resource."""
from webdav.davcmds import Unlock
self.dav__init(REQUEST, RESPONSE)
token = REQUEST.get_header('Lock-Token', '')
url = REQUEST['URL']
token = tokenFinder(token)
cmd = Unlock()
result = cmd.apply(self, token, url)
if result:
RESPONSE.setStatus(207)
RESPONSE.setHeader('Content-Type', 'text/xml; charset="utf-8"')
RESPONSE.setBody(result)
else:
RESPONSE.setStatus(204) # No Content response code
return RESPONSE
security.declareProtected(webdav_access, 'manage_DAVget')
def manage_DAVget(self):
"""Gets the document source"""
# The default implementation calls manage_FTPget
return self.manage_FTPget()
security.declareProtected(webdav_access, 'listDAVObjects')
def listDAVObjects(self):
return []
InitializeClass(Resource)
| [
"[email protected]"
] | |
49f7dbbdfffd887a721bcc1a2ee1ced7e8de18d3 | 26bd175ffb3bd204db5bcb70eec2e3dfd55fbe9f | /exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/netvisor/pn_cpu_class.py | fadbed03e41b7d154a3530d1d8ce9f13d78ed446 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later",
"CC0-1.0",
"GPL-1.0-or-later"
] | permissive | tr3ck3r/linklight | 37814ed19173d893cdff161355d70a1cf538239b | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | refs/heads/master | 2021-04-11T04:33:02.727318 | 2020-03-25T17:38:41 | 2020-03-25T17:38:41 | 248,992,437 | 0 | 0 | MIT | 2020-03-21T14:26:25 | 2020-03-21T14:26:25 | null | UTF-8 | Python | false | false | 5,894 | py | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pn_cpu_class
author: "Pluribus Networks (@rajaspachipulusu17)"
short_description: CLI command to create/modify/delete cpu-class
description:
- This module can be used to create, modify and delete CPU class information.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use C(present) to create cpu-class and
C(absent) to delete cpu-class C(update) to modify the cpu-class.
required: True
type: str
choices: ['present', 'absent', 'update']
pn_scope:
description:
- scope for CPU class.
required: false
choices: ['local', 'fabric']
pn_hog_protect:
description:
- enable host-based hog protection.
required: False
type: str
choices: ['disable', 'enable', 'enable-and-drop']
pn_rate_limit:
description:
- rate-limit for CPU class.
required: False
type: str
pn_name:
description:
- name for the CPU class.
required: False
type: str
'''
EXAMPLES = """
- name: create cpu class
pn_cpu_class:
pn_cliswitch: 'sw01'
state: 'present'
pn_name: 'icmp'
pn_rate_limit: '1000'
pn_scope: 'local'
- name: delete cpu class
pn_cpu_class:
pn_cliswitch: 'sw01'
state: 'absent'
pn_name: 'icmp'
- name: modify cpu class
pn_cpu_class:
pn_cliswitch: 'sw01'
state: 'update'
pn_name: 'icmp'
pn_rate_limit: '2000'
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the cpu-class command.
returned: always
type: list
stderr:
description: set of error responses from the cpu-class command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands
def check_cli(module, cli):
"""
This method checks for idempotency using the cpu-class-show command.
If a user with given name exists, return True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_name']
clicopy = cli
cli += ' system-settings-show format cpu-class-enable no-show-headers'
out = run_commands(module, cli)[1]
out = out.split()
if 'on' not in out:
module.fail_json(
failed=True,
msg='Enable CPU class before creating or deleting'
)
cli = clicopy
cli += ' cpu-class-show format name no-show-headers'
out = run_commands(module, cli)[1]
if out:
out = out.split()
return True if name in out else False
def main():
""" This section is for arguments parsing """
state_map = dict(
present='cpu-class-create',
absent='cpu-class-delete',
update='cpu-class-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_scope=dict(required=False, type='str',
choices=['local', 'fabric']),
pn_hog_protect=dict(required=False, type='str',
choices=['disable', 'enable',
'enable-and-drop']),
pn_rate_limit=dict(required=False, type='str'),
pn_name=dict(required=False, type='str'),
),
required_if=(
['state', 'present', ['pn_name', 'pn_scope', 'pn_rate_limit']],
['state', 'absent', ['pn_name']],
['state', 'update', ['pn_name']],
)
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
scope = module.params['pn_scope']
hog_protect = module.params['pn_hog_protect']
rate_limit = module.params['pn_rate_limit']
name = module.params['pn_name']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
NAME_EXISTS = check_cli(module, cli)
cli += ' %s name %s ' % (command, name)
if command == 'cpu-class-modify':
if NAME_EXISTS is False:
module.fail_json(
failed=True,
msg='cpu class with name %s does not exist' % name
)
if command == 'cpu-class-delete':
if NAME_EXISTS is False:
module.exit_json(
skipped=True,
msg='cpu class with name %s does not exist' % name
)
if command == 'cpu-class-create':
if NAME_EXISTS is True:
module.exit_json(
skipped=True,
msg='cpu class with name %s already exists' % name
)
if scope:
cli += ' scope %s ' % scope
if command != 'cpu-class-delete':
if hog_protect:
cli += ' hog-protect %s ' % hog_protect
if rate_limit:
cli += ' rate-limit %s ' % rate_limit
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
da3610c3294ce12848a64eefb746c8bed8a6b374 | 5b9fe71c2efe0139205020b038f7d31b6a5ede87 | /lux/utils/debug_utils.py | 14ffa3a4d570fe5723c5243d4bc0143a0f6285ba | [
"Apache-2.0"
] | permissive | lux-org/lux | 7a7c8534eec5d2b2114b1a95e64497cf9b52871a | 972e5ec24991483370dda67de6bb1e354bcf8ca6 | refs/heads/master | 2023-08-21T04:05:51.279103 | 2023-07-04T23:34:35 | 2023-07-04T23:34:35 | 232,480,726 | 4,811 | 377 | Apache-2.0 | 2023-07-12T17:45:37 | 2020-01-08T04:53:29 | Python | UTF-8 | Python | false | false | 6,852 | py | import json
from pathlib import Path
import re
import subprocess
import typing as tp
import re
import subprocess
from typing import Optional
def show_versions(return_string: bool = False) -> Optional[str]:
"""
Prints the versions of the principal packages used by Lux for debugging purposes.
Parameters
----------
return_string: Whether to return the versions as a string or print them.
Returns
-------
If return_string is True, returns a string with the versions else the versions
are printed and None is returned.
"""
import platform
import altair
import lux
import luxwidget
import matplotlib
import pandas as pd
header = "Package Versions\n----------------\n"
jupyter_versions_str = subprocess.check_output(["jupyter", "--version"])
jupyter_versions = re.findall(r"(\S+)\s+: (.+)\S*", jupyter_versions_str.decode("utf-8"))
str_lux_error = ""
str_lux_error += "lux-api library is not installed. You may need to run the following code in your command line:\n"
str_lux_error += " pip install lux-api"
# Check if correct lux library is installed
try:
import lux
except ModuleNotFoundError:
print(str_lux_error)
lux_version = lux.__version__
str_upgrade = f"The current version of lux is {lux_version}. We recommend upgrading the lux to version 0.3 and above."
str_upgrade += "To upgrade, please run the following code in your command line:\n"
str_upgrade += " pip install --upgrade lux-api"
# Check if lux needs to be upgraded
if str(lux_version) < "0.3":
print(str_upgrade)
df = pd.DataFrame(
[
("python", platform.python_version()),
("lux", lux.__version__),
("pandas", pd.__version__),
("luxwidget", luxwidget.__version__),
("matplotlib", matplotlib.__version__),
("altair", altair.__version__),
]
+ jupyter_versions,
columns=["", "Version"],
)
str_rep = header + df.to_string(index=False, justify="left")
if return_string:
return str_rep
else:
print(str_rep)
def debug_info(return_string: bool = False) -> Optional[str]:
"""
Prints all the informatation that could be useful for debugging purposes.
Currently, this includes:
* The versions of the packages used by Lux
* Info about the current state of luxwidget
Parameters
----------
return_string: Whether to return the versions as a string or print them.
Returns
-------
If return_string is True, returns a string with the debug info else the
string will be printed and None is returned.
"""
str_rep = show_versions(return_string=True)
luxwidget_msg = check_luxwidget_enabled(return_string=True)
assert str_rep is not None
assert luxwidget_msg is not None
header = "Widget Setup\n-------------\n"
str_rep += "\n\n" + header + luxwidget_msg + "\n"
if return_string:
return str_rep
else:
print(str_rep)
def notebook_enabled() -> tp.Tuple[bool, str]:
status, nbextension_list = subprocess.getstatusoutput("jupyter nbextension list")
if status != 0:
return False, "❌ Failed to run 'jupyter nbextension list'\n"
match = re.findall(r"config dir:(.*)\n", nbextension_list)
if match:
config_dir = match[0].strip()
else:
return False, "❌ No 'config dir' found in 'jupyter nbextension list'\n"
notebook_json = Path(config_dir) / "notebook.json"
if not notebook_json.exists():
return False, f"'{notebook_json}' does not exist\n"
extensions = json.loads(notebook_json.read_text())
if "load_extensions" not in extensions:
return False, "❌ 'load_extensions' not in 'notebook.json'\n"
elif "luxwidget/extension" not in extensions["load_extensions"]:
return False, "❌ 'luxwidget/extension' not in 'load_extensions'\n"
extension_enabled = extensions["load_extensions"]["luxwidget/extension"]
if not extension_enabled:
return False, "❌ luxwidget is installed but not enabled\n"
return True, ""
def lab_enabled() -> tp.Tuple[bool, str]:
status_str, lab_list = subprocess.getstatusoutput("jupyter labextension list")
if status_str != 0:
return (
False,
"❌ Failed to run 'jupyter labextension list'. Do you have Jupyter Lab installed in this environment?",
)
match = re.findall(r"luxwidget (\S+) (\S+) (\S+)", lab_list)
if match:
version_str, enabled_str, status_str = (_strip_ansi(s) for s in match[0])
else:
return False, "❌ 'luxwidget' not found in 'jupyter labextension list'\n"
if enabled_str != "enabled":
enabled_str = re.sub(r"\033\[(\d|;)+?m", "", enabled_str)
return False, f"❌ luxwidget is installed but currently '{enabled_str}'\n"
if status_str != "OK":
return False, f"❌ luxwidget is installed but currently '{status_str}'\n"
return True, ""
def is_lab_notebook():
import re
import psutil
cmd = psutil.Process().parent().cmdline()
return any(re.search("jupyter-lab", x) for x in cmd)
def check_luxwidget_enabled(return_string: bool = False) -> Optional[str]:
# get the ipython shell
import IPython
ip = IPython.get_ipython()
# return if the shell is not available
if ip is None:
return "❌ IPython shell note available.\nPlease note that Lux must be used within a notebook interface (e.g., Jupyter notebook, Jupyter Lab, JupyterHub, or VSCode)\n"
is_lab = is_lab_notebook()
if is_lab:
msg = "✅ Jupyter Lab Running\n"
enabled, emsg = lab_enabled()
msg = msg + emsg
if not enabled:
msg += f"❌ WARNING: luxwidget is not enabled in Jupyter Lab."
msg += "You may need to run the following code in your command line:\n"
msg += " jupyter labextension install @jupyter-widgets/jupyterlab-manager\n"
msg += " jupyter labextension install luxwidget"
else:
msg += "✅ luxwidget is enabled"
else:
msg = "✅ Jupyter Notebook Running\n"
enabled, emsg = notebook_enabled()
msg = msg + emsg
if not enabled:
msg += "❌ WARNING: luxwidget is not enabled in Jupyter Notebook.\n"
msg += "You may need to run the following code in your command line:\n"
msg += " jupyter nbextension install --py luxwidget\n"
msg += " jupyter nbextension enable --py luxwidget"
else:
msg += "✅ luxwidget is enabled"
if return_string:
return msg
def _strip_ansi(source):
return re.sub(r"\033\[(\d|;)+?m", "", source)
if __name__ == "__main__":
check_luxwidget_enabled()
| [
"[email protected]"
] | |
cbd6add2f6254216315796e6424530a64d91520a | b1599f517e62c6651c930c28c430ac3ff7d52bb9 | /src/apps/competitions/migrations/0011_competition_competition_type.py | f2df235246d0498858215cdf50ce59fc6d625f57 | [
"Apache-2.0"
] | permissive | HunDeMingMingBaiBai/competitions-v2 | 4d9ef93f14a3bc43c582c67b62904c6bcf0c19fb | 745b56274ada40b78cda6e91dd762f2d547cd841 | refs/heads/develop | 2023-08-11T06:02:49.780503 | 2021-09-09T04:39:42 | 2021-09-09T04:39:42 | 377,348,778 | 0 | 0 | Apache-2.0 | 2021-06-16T02:32:30 | 2021-06-16T02:32:29 | null | UTF-8 | Python | false | false | 500 | py | # Generated by Django 2.2.10 on 2020-07-03 03:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('competitions', '0010_merge_20200217_2316'),
]
operations = [
migrations.AddField(
model_name='competition',
name='competition_type',
field=models.CharField(choices=[('competition', 'competition'), ('benchmark', 'benchmark')], default='competition', max_length=128),
),
]
| [
"[email protected]"
] | |
30b09a780f82aae722e4f721ba42f220306bb21e | 3a95e89ce8ecb7434b1f00233ac468cd6d1d07e4 | /simulator.py | 2bfb65f001a423007fd06d60a48ebe91e4b25163 | [] | no_license | yinghuaman/simulator | cd24d5a38f1c096001a7aba4f51f4334a7776611 | 0cf0d7bf316f4bd99a29d3c0070b5a85428d0bae | refs/heads/master | 2020-03-27T01:31:01.870653 | 2018-08-22T14:12:55 | 2018-08-22T14:12:55 | 145,718,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,229 | py | import io
from io import StringIO
from tkinter import *
import time
import datetime
import pandas as pd
from tkinter import messagebox
import psycopg2
from sqlalchemy import create_engine
class my_GUI():
def __init__(self,master):
self.master = master
#GUI版面设计
def set_master(self):
self.master.title("数据上传模拟器")
self.master.geometry("800x400")
self.master.resizable(0,0)
self.var_IP = StringVar()
self.var_IP.set("")
Entry(self.master,textvariable = self.var_IP,width=20,font = ("Verdana",15) ).place(x=130,y=30)
Label(self.master,text = "IP:".encode("utf-8"),width = 10,font = ("Arial",15)).place(x=15,y=30)
Label(self.master,text = "*".encode("utf-8"),fg="red",font=10).place(x=87,y=30)
self.var_port = StringVar()
self.var_port.set("")
Entry(self.master, textvariable=self.var_port, width=20, font=("Verdana", 15)).place(x=525, y=30)
Label(self.master, text="port:".encode("utf-8"), width=10, font=("Arial", 15)).place(x=415, y=30)
Label(self.master, text="*".encode("utf-8"), fg="red", font=10).place(x=493, y=30)
self.var_db = StringVar()
self.var_db.set("")
Entry(self.master, textvariable=self.var_db, width=20, font=("Verdana", 15)).place(x=130, y=130)
Label(self.master, text="database:".encode("utf-8"), width=10, font=("Arial", 15)).place(x=15, y=130)
Label(self.master, text="*".encode("utf-8"), fg="red", font=10).place(x=117, y=130)
self.var_user = StringVar()
self.var_user.set("")
Entry(self.master, textvariable=self.var_user, width=20, font=("Verdana", 15)).place(x=525, y=130)
Label(self.master, text="user:".encode("utf-8"), width=10, font=("Arial", 15)).place(x=415, y=130)
Label(self.master, text="*".encode("utf-8"), fg="red", font=10).place(x=493, y=130)
self.var_password = StringVar()
self.var_password.set("")
Entry(self.master, textvariable=self.var_password, width=20, font=("Verdana", 15)).place(x=130, y=230)
Label(self.master, text="password:".encode("utf-8"), width=10, font=("Arial", 15)).place(x=15, y=230)
Label(self.master, text="*".encode("utf-8"), fg="red", font=10).place(x=117, y=230)
self.var_time = StringVar()
self.var_time.set("")
Entry(self.master, textvariable=self.var_time, width=20, font=("Verdana", 15)).place(x=525, y=230)
Label(self.master, text="time:".encode("utf-8"), width=10, font=("Arial", 15)).place(x=415, y=230)
b1 = Button(self.master,text="取消",width=10,font = ("宋体",10),command = self.cancel)
b1.bind("<Return>",self.cancel)
b1.bind("<Button-1>",self.cancel)
b1.place(x=270,y=350)
b2 = Button(self.master, text="上传", width=10, font=("宋体", 10), command=self.upload)
b2.bind("<Return>", self.upload)
b2.bind("<Button-1>", self.upload)
b2.place(x=420, y=350)
Label(self.master,text="*为必填项",width=20,fg="red",font=("Arial", 10)).place(x=10,y=270)
#读取本地文件
def Loaddata(self,filename):
data = pd.read_csv(filename,sep="\t")
return data
#判断是否链接成功
def is_connected(self):
user = self.var_user.get()
ip = self.var_IP.get()
password = self.var_password.get()
database = self.var_db.get()
port = self.var_port.get()
flag = 1
try:
messagebox.showinfo("开始链接数据库")
conn = psycopg2.connect(database = database,user=user,password=password,host=ip,port=port)
return flag
except:
flag=0
messagebox.showinfo("链接数据库失败")
return flag
def write_to_sql(self,flag,tablename):
if flag == 1:
messagebox.showinfo("数据库连接成功")
user = self.var_user.get()
ip = self.var_IP.get()
password = self.var_password.get()
db = self.var_db.get()
port = self.var_port.get()
engine = create_engine("postgresql+psycopg2://"+user+":"+password+"@"+ip+":"+str(port)+"/"+db)
for name in tablename:
df = self.Loaddata("data/%s.txt"%name)
output = StringIO()
df.to_csv(output,sep="\t",index=False,header=False)
output.getvalue()
output.seek(0)
conn = engine.raw_connection()
cur = conn.cursor()
cur.copy_from(output,name,null='')
conn.commit()
cur.close()
#定义上传函数
def upload(self,event):
flag = self.is_connected()
self.write_to_sql(flag)
def cancel(self,event):
self.var_port.set("")
self.var_db.set("")
self.var_password.set("")
self.var_IP.set("")
self.var_user.set("")
def gui_start():
root = Tk()
myApp = my_GUI(root)
myApp.set_master()
root.mainloop()
gui_start()
| [
"[email protected]"
] | |
8b61d31c3cec5c1f20d98b0a2442ff8c95374f96 | 5b90a078ec29a836555050835d40249272654085 | /Hazard/Fishing/Fisherman.py | d1a14f6543608ec8a9bd498f8394bfa47e1cc27b | [
"Unlicense"
] | permissive | ygtripps/Archives | a165224daca9e06471d9b6e6943260d0677484d7 | 8ca460eff5d60c2b3e61ee4c434c0bfcd6d53673 | refs/heads/master | 2021-06-08T13:46:15.544730 | 2016-12-05T21:16:31 | 2016-12-05T21:16:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Python3
def main(args):
global sendersFile
global receiversFile
sendersFile = "senders" # username:password type file
receiversFile = "receivers"
mailServer = "mail.company.com"
logfiles = "./logs/massfisher.log"
maxtime = 3 #In hours
sf = open(sendersFile, "r")
rf = open(receiversFile, "r")
sendersList = {}
sendersAuth = {}
receiversList = {}
with rf as fin:
for line in fin:
receiversList[len(receiversList)+1] = str(line)[0:len(str(line))-1]
with sf as fin:
for line in fin:
sendersList[len(sendersList)+1] = str(line)[0:len(str(line))-1].split(":")[0]
sendersAuth[len(sendersAuth)+1] = str(line)[0:len(str(line))-1].split(":")[1]
maxsleep = (maxtime * 60 * 60) / len(receiversList)
minsleep = int((75 * maxtime) / 100)
messages = os.listdir("Templates")
for i in receiversList:
tmp = messages[random.randint(0, len(messages)-1)]
while not os.path.isfile("./Templates/"+tmp):
tmp = messages[random.randint(0, len(messages)-1)]
rc = random.sample(list(sendersList),1)
time.sleep(random.randint(minsleep, maxsleep))
os.system(str("sendemail -f "+sendersList[rc[0]]+" -t "+receiversList[i]+" -xu "+sendersList[i]+" -xp "+sendersAuth[i]+" -s "+mailServer+" -l "+logfiles+"."+str(i)+" -v -o message-content-type=html -o message-file=" + "\"./Templates/"+ tmp + "\" -u \"" + tmp + "\""))
print("Time to go home and eat those fishes!")
if __name__ == '__main__':
import sys
import random
import time
import os
sys.exit(main(sys.argv))
| [
"[email protected]"
] | |
ce5e898f4e48bb7cd55a5dc57f8a86be77727e90 | 42a833f190b3352eaa89604381d3db500c80f538 | /pentestui/pentest_api/attacks/kerberos/modules/crypto.py | 30dd11fb3dd40ca8b292d740d0939747b96f97b5 | [
"Apache-2.0"
] | permissive | mustgundogdu/PentestUI | 95c037022c5aad25cf2e1a4b7ad58eedd6df6ed8 | 92263ea73bd2eaa2081fb277c76aa229103a1d54 | refs/heads/main | 2023-08-29T23:12:28.027452 | 2021-11-18T17:53:03 | 2021-11-18T17:53:03 | 389,436,912 | 31 | 4 | null | null | null | null | UTF-8 | Python | false | false | 12,836 | py | from __future__ import division
from __future__ import print_function
from Cryptodome.Cipher import DES, AES
from struct import pack, unpack
from pentestui.pentest_api.attacks.kerberos.modules.structure import Structure
import hmac, hashlib
from six import b
def Generate_Subkey(K):
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + Algorithm Generate_Subkey +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + +
# + Input : K (128-bit key) +
# + Output : K1 (128-bit first subkey) +
# + K2 (128-bit second subkey) +
# +-------------------------------------------------------------------+
# + +
# + Constants: const_Zero is 0x00000000000000000000000000000000 +
# + const_Rb is 0x00000000000000000000000000000087 +
# + Variables: L for output of AES-128 applied to 0^128 +
# + +
# + Step 1. L := AES-128(K, const_Zero); +
# + Step 2. if MSB(L) is equal to 0 +
# + then K1 := L << 1; +
# + else K1 := (L << 1) XOR const_Rb; +
# + Step 3. if MSB(K1) is equal to 0 +
# + then K2 := K1 << 1; +
# + else K2 := (K1 << 1) XOR const_Rb; +
# + Step 4. return K1, K2; +
# + +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AES_128 = AES.new(K, AES.MODE_ECB)
L = AES_128.encrypt(bytes(bytearray(16)))
LHigh = unpack('>Q',L[:8])[0]
LLow = unpack('>Q',L[8:])[0]
K1High = ((LHigh << 1) | ( LLow >> 63 )) & 0xFFFFFFFFFFFFFFFF
K1Low = (LLow << 1) & 0xFFFFFFFFFFFFFFFF
if (LHigh >> 63):
K1Low ^= 0x87
K2High = ((K1High << 1) | (K1Low >> 63)) & 0xFFFFFFFFFFFFFFFF
K2Low = ((K1Low << 1)) & 0xFFFFFFFFFFFFFFFF
if (K1High >> 63):
K2Low ^= 0x87
K1 = bytearray(pack('>QQ', K1High, K1Low))
K2 = bytearray(pack('>QQ', K2High, K2Low))
return K1, K2
def XOR_128(N1,N2):
J = bytearray()
for i in range(len(N1)):
#J.append(indexbytes(N1,i) ^ indexbytes(N2,i))
J.append(N1[i] ^ N2[i])
return J
def PAD(N):
padLen = 16-len(N)
return N + b'\x80' + b'\x00'*(padLen-1)
def AES_CMAC(K, M, length):
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + Algorithm AES-CMAC +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + +
# + Input : K ( 128-bit key ) +
# + : M ( message to be authenticated ) +
# + : len ( length of the message in octets ) +
# + Output : T ( message authentication code ) +
# + +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + Constants: const_Zero is 0x00000000000000000000000000000000 +
# + const_Bsize is 16 +
# + +
# + Variables: K1, K2 for 128-bit subkeys +
# + M_i is the i-th block (i=1..ceil(len/const_Bsize)) +
# + M_last is the last block xor-ed with K1 or K2 +
# + n for number of blocks to be processed +
# + r for number of octets of last block +
# + flag for denoting if last block is complete or not +
# + +
# + Step 1. (K1,K2) := Generate_Subkey(K); +
# + Step 2. n := ceil(len/const_Bsize); +
# + Step 3. if n = 0 +
# + then +
# + n := 1; +
# + flag := false; +
# + else +
# + if len mod const_Bsize is 0 +
# + then flag := true; +
# + else flag := false; +
# + +
# + Step 4. if flag is true +
# + then M_last := M_n XOR K1; +
# + else M_last := padding(M_n) XOR K2; +
# + Step 5. X := const_Zero; +
# + Step 6. for i := 1 to n-1 do +
# + begin +
# + Y := X XOR M_i; +
# + X := AES-128(K,Y); +
# + end +
# + Y := M_last XOR X; +
# + T := AES-128(K,Y); +
# + Step 7. return T; +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
const_Bsize = 16
const_Zero = bytearray(16)
AES_128= AES.new(K, AES.MODE_ECB)
M = bytearray(M[:length])
K1, K2 = Generate_Subkey(K)
n = len(M)//const_Bsize
if n == 0:
n = 1
flag = False
else:
if (length % const_Bsize) == 0:
flag = True
else:
n += 1
flag = False
M_n = M[(n-1)*const_Bsize:]
if flag is True:
M_last = XOR_128(M_n,K1)
else:
M_last = XOR_128(PAD(M_n),K2)
X = const_Zero
for i in range(n-1):
M_i = M[(i)*const_Bsize:][:16]
Y = XOR_128(X, M_i)
X = bytearray(AES_128.encrypt(bytes(Y)))
Y = XOR_128(M_last, X)
T = AES_128.encrypt(bytes(Y))
return T
def AES_CMAC_PRF_128(VK, M, VKlen, Mlen):
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + AES-CMAC-PRF-128 +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + +
# + Input : VK (Variable-length key) +
# + : M (Message, i.e., the input data of the PRF) +
# + : VKlen (length of VK in octets) +
# + : len (length of M in octets) +
# + Output : PRV (128-bit Pseudo-Random Variable) +
# + +
# +-------------------------------------------------------------------+
# + Variable: K (128-bit key for AES-CMAC) +
# + +
# + Step 1. If VKlen is equal to 16 +
# + Step 1a. then +
# + K := VK; +
# + Step 1b. else +
# + K := AES-CMAC(0^128, VK, VKlen); +
# + Step 2. PRV := AES-CMAC(K, M, len); +
# + return PRV; +
# + +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
if VKlen == 16:
K = VK
else:
K = AES_CMAC(bytes(bytearray(16)), VK, VKlen)
PRV = AES_CMAC(K, M, Mlen)
return PRV
def KDF_CounterMode(KI, Label, Context, L):
# Implements NIST SP 800-108 Section 5.1, with PRF HMAC-SHA256
# https://tools.ietf.org/html/draft-irtf-cfrg-kdf-uses-00#ref-SP800-108
# Fixed values:
# 1. h - The length of the output of the PRF in bits, and
# 2. r - The length of the binary representation of the counter i.
# Input: KI, Label, Context, and L.
# Process:
# 1. n := [L/h]
# 2. If n > 2r-1, then indicate an error and stop.
# 3. result(0):= empty .
# 4. For i = 1 to n, do
# a. K(i) := PRF (KI, [i]2 || Label || 0x00 || Context || [L]2)
# b. result(i) := result(i-1) || K(i).
# 5. Return: KO := the leftmost L bits of result(n).
h = 256
r = 32
n = L // h
if n == 0:
n = 1
if n > (pow(2,r)-1):
raise Exception("Error computing KDF_CounterMode")
result = b''
K = b''
for i in range(1,n+1):
input = pack('>L', i) + Label + b'\x00' + Context + pack('>L',L)
K = hmac.new(KI, input, hashlib.sha256).digest()
result = result + K
return result[:(L//8)]
# [MS-LSAD] Section 5.1.2 / 5.1.3
class LSA_SECRET_XP(Structure):
structure = (
('Length','<L=0'),
('Version','<L=0'),
('_Secret','_-Secret', 'self["Length"]'),
('Secret', ':'),
)
def transformKey(InputKey):
# Section 5.1.3
OutputKey = []
OutputKey.append( chr(ord(InputKey[0:1]) >> 0x01) )
OutputKey.append( chr(((ord(InputKey[0:1])&0x01)<<6) | (ord(InputKey[1:2])>>2)) )
OutputKey.append( chr(((ord(InputKey[1:2])&0x03)<<5) | (ord(InputKey[2:3])>>3)) )
OutputKey.append( chr(((ord(InputKey[2:3])&0x07)<<4) | (ord(InputKey[3:4])>>4)) )
OutputKey.append( chr(((ord(InputKey[3:4])&0x0F)<<3) | (ord(InputKey[4:5])>>5)) )
OutputKey.append( chr(((ord(InputKey[4:5])&0x1F)<<2) | (ord(InputKey[5:6])>>6)) )
OutputKey.append( chr(((ord(InputKey[5:6])&0x3F)<<1) | (ord(InputKey[6:7])>>7)) )
OutputKey.append( chr(ord(InputKey[6:7]) & 0x7F) )
for i in range(8):
OutputKey[i] = chr((ord(OutputKey[i]) << 1) & 0xfe)
return b("".join(OutputKey))
def decryptSecret(key, value):
# [MS-LSAD] Section 5.1.2
plainText = b''
key0 = key
for i in range(0, len(value), 8):
cipherText = value[:8]
tmpStrKey = key0[:7]
tmpKey = transformKey(tmpStrKey)
Crypt1 = DES.new(tmpKey, DES.MODE_ECB)
plainText += Crypt1.decrypt(cipherText)
key0 = key0[7:]
value = value[8:]
# AdvanceKey
if len(key0) < 7:
key0 = key[len(key0):]
secret = LSA_SECRET_XP(plainText)
return (secret['Secret'])
def encryptSecret(key, value):
# [MS-LSAD] Section 5.1.2
cipherText = b''
key0 = key
value0 = pack('<LL', len(value), 1) + value
for i in range(0, len(value0), 8):
if len(value0) < 8:
value0 = value0 + b'\x00'*(8-len(value0))
plainText = value0[:8]
tmpStrKey = key0[:7]
print(type(tmpStrKey))
print(tmpStrKey)
tmpKey = transformKey(tmpStrKey)
Crypt1 = DES.new(tmpKey, DES.MODE_ECB)
cipherText += Crypt1.encrypt(plainText)
key0 = key0[7:]
value0 = value0[8:]
# AdvanceKey
if len(key0) < 7:
key0 = key[len(key0):]
return cipherText
def SamDecryptNTLMHash(encryptedHash, key):
# [MS-SAMR] Section 2.2.11.1.1
Block1 = encryptedHash[:8]
Block2 = encryptedHash[8:]
Key1 = key[:7]
Key1 = transformKey(Key1)
Key2 = key[7:14]
Key2 = transformKey(Key2)
Crypt1 = DES.new(Key1, DES.MODE_ECB)
Crypt2 = DES.new(Key2, DES.MODE_ECB)
plain1 = Crypt1.decrypt(Block1)
plain2 = Crypt2.decrypt(Block2)
return plain1 + plain2
def SamEncryptNTLMHash(encryptedHash, key):
# [MS-SAMR] Section 2.2.11.1.1
Block1 = encryptedHash[:8]
Block2 = encryptedHash[8:]
Key1 = key[:7]
Key1 = transformKey(Key1)
Key2 = key[7:14]
Key2 = transformKey(Key2)
Crypt1 = DES.new(Key1, DES.MODE_ECB)
Crypt2 = DES.new(Key2, DES.MODE_ECB)
plain1 = Crypt1.encrypt(Block1)
plain2 = Crypt2.encrypt(Block2)
return plain1 + plain2
| [
"[email protected]"
] | |
f4e6efba2acb098ba59cc3d1248893b426802823 | 1ae1bc24fa902e1c293af85eac37e777e4805eb9 | /server/tests/test_get_secret.py | b6d50871b1e24317e0854e960c6b8b9a10402a2d | [
"MIT"
] | permissive | Excloudx6/simple-one-time-secret | 1745abde5c694707f6136483b5773cc04554e999 | c61c242ce41a7ef0c74e915b312e94d4ee37158c | refs/heads/main | 2023-08-03T07:45:14.826847 | 2021-09-15T17:33:35 | 2021-09-15T17:33:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | from datetime import datetime
from uuid import UUID
from fastapi import Response
from server.endpoints import get_secret
def test__get_secret__valid_data__secret_returned():
response = Response()
ret = get_secret(UUID("11111111-1111-4111-a111-111111111111"), response)
assert ret["_id"] == "11111111-1111-4111-a111-111111111111"
assert isinstance(ret["expiration"], datetime)
assert ret["secret"] == "some_encrypted_secret"
def test__get_secret__expired_secret__404():
response = Response()
ret: Response = get_secret(UUID("22222222-2222-4222-a222-222222222222"), response)
assert ret == {"message": "Not Found"}
def test__get_secret__non_existent_secret__404():
response = Response()
ret: Response = get_secret(UUID("33333333-3333-4333-a333-333333333333"), response)
assert ret == {"message": "Not Found"}
| [
"[email protected]"
] | |
f8bfb04de2d5971b0475709c0e401cf21b704a3a | e531a27eaad5a6bde184a84205a9da06b4e8e12a | /text/korean.py | fb639e5c55050a925743162b8a7c902387017967 | [
"MIT"
] | permissive | zldzmfoq12/voice-synthesizer | 5751693cc35ca457eb29e0fe719a2832b861a127 | 2c347de9d64fa42035b21c317112f6b625aa0c25 | refs/heads/master | 2022-12-13T00:01:22.360153 | 2021-04-28T12:14:50 | 2021-04-28T12:14:50 | 200,749,138 | 8 | 1 | NOASSERTION | 2022-12-08T00:48:02 | 2019-08-06T00:45:36 | Python | UTF-8 | Python | false | false | 8,631 | py | # Code based on
import re
import os
import ast
import json
from jamo import hangul_to_jamo, h2j, j2h
from .ko_dictionary import english_dictionary, etc_dictionary
PAD = '_'
EOS = '~'
PUNC = '!\'(),-.:;?'
SPACE = ' '
JAMO_LEADS = "".join([chr(_) for _ in range(0x1100, 0x1113)])
JAMO_VOWELS = "".join([chr(_) for _ in range(0x1161, 0x1176)])
JAMO_TAILS = "".join([chr(_) for _ in range(0x11A8, 0x11C3)])
VALID_CHARS = JAMO_LEADS + JAMO_VOWELS + JAMO_TAILS + PUNC + SPACE
ALL_SYMBOLS = PAD + EOS + VALID_CHARS
char_to_id = {c: i for i, c in enumerate(ALL_SYMBOLS)}
id_to_char = {i: c for i, c in enumerate(ALL_SYMBOLS)}
quote_checker = """([`"'"“‘])(.+?)([`"'"”’])"""
def is_lead(char):
return char in JAMO_LEADS
def is_vowel(char):
return char in JAMO_VOWELS
def is_tail(char):
return char in JAMO_TAILS
def get_mode(char):
if is_lead(char):
return 0
elif is_vowel(char):
return 1
elif is_tail(char):
return 2
else:
return -1
def _get_text_from_candidates(candidates):
if len(candidates) == 0:
return ""
elif len(candidates) == 1:
return _jamo_char_to_hcj(candidates[0])
else:
return j2h(**dict(zip(["lead", "vowel", "tail"], candidates)))
def jamo_to_korean(text):
text = h2j(text)
idx = 0
new_text = ""
candidates = []
while True:
if idx >= len(text):
new_text += _get_text_from_candidates(candidates)
break
char = text[idx]
mode = get_mode(char)
if mode == 0:
new_text += _get_text_from_candidates(candidates)
candidates = [char]
elif mode == -1:
new_text += _get_text_from_candidates(candidates)
new_text += char
candidates = []
else:
candidates.append(char)
idx += 1
return new_text
num_to_kor = {
'0': '영',
'1': '일',
'2': '이',
'3': '삼',
'4': '사',
'5': '오',
'6': '육',
'7': '칠',
'8': '팔',
'9': '구',
}
unit_to_kor1 = {
'%': '퍼센트',
'cm': '센치미터',
'mm': '밀리미터',
'km': '킬로미터',
'kg': '킬로그람',
}
unit_to_kor2 = {
'm': '미터',
}
upper_to_kor = {
'A': '에이',
'B': '비',
'C': '씨',
'D': '디',
'E': '이',
'F': '에프',
'G': '지',
'H': '에이치',
'I': '아이',
'J': '제이',
'K': '케이',
'L': '엘',
'M': '엠',
'N': '엔',
'O': '오',
'P': '피',
'Q': '큐',
'R': '알',
'S': '에스',
'T': '티',
'U': '유',
'V': '브이',
'W': '더블유',
'X': '엑스',
'Y': '와이',
'Z': '지',
}
def compare_sentence_with_jamo(text1, text2):
return h2j(text1) != h2j(text)
def tokenize(text, as_id=False):
text = normalize(text)
tokens = list(hangul_to_jamo(text))
if as_id:
return [char_to_id[token] for token in tokens] + [char_to_id[EOS]]
else:
return [token for token in tokens] + [EOS]
def tokenizer_fn(iterator):
return (token for x in iterator for token in tokenize(x, as_id=False))
def normalize(text):
text = text.strip()
text = re.sub('\(\d+일\)', '', text)
text = re.sub('\([⺀-⺙⺛-⻳⼀-⿕々〇〡-〩〸-〺〻㐀-䶵一-鿃豈-鶴侮-頻並-龎]+\)', '', text)
text = normalize_with_dictionary(text, etc_dictionary)
text = normalize_english(text)
text = re.sub('[a-zA-Z]+', normalize_upper, text)
text = normalize_quote(text)
text = normalize_number(text)
return text
def normalize_with_dictionary(text, dic):
if any(key in text for key in dic.keys()):
pattern = re.compile('|'.join(re.escape(key) for key in dic.keys()))
return pattern.sub(lambda x: dic[x.group()], text)
else:
return text
def normalize_english(text):
def fn(m):
word = m.group()
if word in english_dictionary:
return english_dictionary.get(word)
else:
return word
text = re.sub("([A-Za-z]+)", fn, text)
return text
def normalize_upper(text):
text = text.group(0)
if all([char.isupper() for char in text]):
return "".join(upper_to_kor[char] for char in text)
else:
return text
def normalize_quote(text):
def fn(found_text):
from nltk import sent_tokenize # NLTK doesn't along with multiprocessing
found_text = found_text.group()
unquoted_text = found_text[1:-1]
sentences = sent_tokenize(unquoted_text)
return " ".join(["'{}'".format(sent) for sent in sentences])
return re.sub(quote_checker, fn, text)
number_checker = "([+-]?\d[\d,]*)[\.]?\d*"
count_checker = "(시|명|가지|살|마리|포기|송이|수|톨|통|점|개|벌|척|채|다발|그루|자루|줄|켤레|그릇|잔|마디|상자|사람|곡|병|판)"
def normalize_number(text):
text = normalize_with_dictionary(text, unit_to_kor1)
text = normalize_with_dictionary(text, unit_to_kor2)
text = re.sub(number_checker + count_checker,
lambda x: number_to_korean(x, True), text)
text = re.sub(number_checker,
lambda x: number_to_korean(x, False), text)
return text
num_to_kor1 = [""] + list("일이삼사오육칠팔구")
num_to_kor2 = [""] + list("만억조경해")
num_to_kor3 = [""] + list("십백천")
#count_to_kor1 = [""] + ["하나","둘","셋","넷","다섯","여섯","일곱","여덟","아홉"]
count_to_kor1 = [""] + ["한","두","세","네","다섯","여섯","일곱","여덟","아홉"]
count_tenth_dict = {
"십": "열",
"두십": "스물",
"세십": "서른",
"네십": "마흔",
"다섯십": "쉰",
"여섯십": "예순",
"일곱십": "일흔",
"여덟십": "여든",
"아홉십": "아흔",
}
def number_to_korean(num_str, is_count=False):
if is_count:
num_str, unit_str = num_str.group(1), num_str.group(2)
else:
num_str, unit_str = num_str.group(), ""
num_str = num_str.replace(',', '')
num = ast.literal_eval(num_str)
if num == 0:
return "영"
check_float = num_str.split('.')
if len(check_float) == 2:
digit_str, float_str = check_float
elif len(check_float) >= 3:
raise Exception(" [!] Wrong number format")
else:
digit_str, float_str = check_float[0], None
if is_count and float_str is not None:
raise Exception(" [!] `is_count` and float number does not fit each other")
digit = int(digit_str)
if digit_str.startswith("-"):
digit, digit_str = abs(digit), str(abs(digit))
kor = ""
size = len(str(digit))
tmp = []
for i, v in enumerate(digit_str, start=1):
v = int(v)
if v != 0:
if is_count:
tmp += count_to_kor1[v]
else:
tmp += num_to_kor1[v]
tmp += num_to_kor3[(size - i) % 4]
if (size - i) % 4 == 0 and len(tmp) != 0:
kor += "".join(tmp)
tmp = []
kor += num_to_kor2[int((size - i) / 4)]
if is_count:
if kor.startswith("한") and len(kor) > 1:
kor = kor[1:]
if any(word in kor for word in count_tenth_dict):
kor = re.sub(
'|'.join(count_tenth_dict.keys()),
lambda x: count_tenth_dict[x.group()], kor)
if not is_count and kor.startswith("일") and len(kor) > 1:
kor = kor[1:]
if float_str is not None:
kor += "쩜 "
kor += re.sub('\d', lambda x: num_to_kor[x.group()], float_str)
if num_str.startswith("+"):
kor = "플러스 " + kor
elif num_str.startswith("-"):
kor = "마이너스 " + kor
return kor + unit_str
if __name__ == "__main__":
def test_normalize(text):
print(text)
print(normalize(text))
print("="*30)
test_normalize("JTBC는 JTBCs를 DY는 A가 Absolute")
test_normalize("오늘(13일) 101마리 강아지가")
test_normalize('"저돌"(猪突) 입니다.')
test_normalize('비대위원장이 지난 1월 이런 말을 했습니다. “난 그냥 산돼지처럼 돌파하는 스타일이다”')
test_normalize("지금은 -12.35%였고 종류는 5가지와 19가지, 그리고 55가지였다")
test_normalize("JTBC는 TH와 K 양이 2017년 9월 12일 오후 12시에 24살이 된다")
| [
"[email protected]"
] | |
7accaa8ad9e3c45b158dd9537e55e683338dea29 | 70e1159856750f04e58c0ffc3f54d094a4602c07 | /booktest/views.py | 84958fd19d5631e83ebfd2b20bac16190adc186f | [] | no_license | wxp19940506/django_test | 032e78a4eb45eb0c54dbafd43dfd0e463d455bb5 | c586cb62d1bb1a21f3430155b3d82ab7b2a65da6 | refs/heads/master | 2021-05-10T11:52:54.186422 | 2018-01-22T07:55:11 | 2018-01-22T07:55:11 | 118,424,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | from django.shortcuts import render
from django.http import *
from django.template import RequestContext,loader
from .models import *
# Create your views here.
def index(request):
# temp = loader.get_template("booktest/index.html")
#
# return HttpResponse(temp.render())
booklist = BookInfo.objects.all()
context = {'lists':booklist}
return render(request,'booktest/index.html',context)
def show(request,id):
book = BookInfo.objects.get(pk=id)
herolist = book.heroinfo_set.all()
context = {'list':herolist}
return render(request,'booktest/show.html',context)
| [
"[email protected]"
] | |
ae27520913674390e809620c54463d13c4e88d63 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /GIT-USERS/TOM-Lambda/CS35_IntroPython_GP/day3/intro/11_args.py | 2ec2eca832f454921138650bfb137e422a0c4711 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 2,852 | py | # Experiment with positional arguments, arbitrary arguments, and keyword
# arguments.
# Write a function f1 that takes two integer positional arguments and returns
# the sum. This is what you'd consider to be a regular, normal function.
<<<<<<< HEAD
def f1(a, b):
return a + b
=======
def f1(a, b):
return a + b
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
print(f1(1, 2))
# Write a function f2 that takes any number of integer arguments and prints the
# sum. Google for "python arbitrary arguments" and look for "*args"
<<<<<<< HEAD
def f2(*args):
sum = 0
for i in args:
sum += i
return sum
print(f2(1)) # Should print 1
print(f2(1, 3)) # Should print 4
print(f2(1, 4, -12)) # Should print -7
=======
def f2(*args):
sum = 0
for i in args:
sum += i
return sum
print(f2(1)) # Should print 1
print(f2(1, 3)) # Should print 4
print(f2(1, 4, -12)) # Should print -7
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
print(f2(7, 9, 1, 3, 4, 9, 0)) # Should print 33
a = [7, 6, 5, 4]
# What thing do you have to add to make this work?
<<<<<<< HEAD
print(f2(*a)) # Should print 22
=======
print(f2(*a)) # Should print 22
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# Write a function f3 that accepts either one or two arguments. If one argument,
# it returns that value plus 1. If two arguments, it returns the sum of the
# arguments. Google "python default arguments" for a hint.
<<<<<<< HEAD
def f3(a, b=1):
return a + b
print(f3(1, 2)) # Should print 3
print(f3(8)) # Should print 9
=======
def f3(a, b=1):
return a + b
print(f3(1, 2)) # Should print 3
print(f3(8)) # Should print 9
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# Write a function f4 that accepts an arbitrary number of keyword arguments and
# prints out the keys and values like so:
#
# key: foo, value: bar
# key: baz, value: 12
#
# Google "python keyword arguments".
<<<<<<< HEAD
def f4(**kwargs):
for k, v in kwargs.items():
print(f'key: {k}, value: {v}')
# Alternate:
# for k in kwargs:
# print(f'key: {k}, value: {kwargs[k]}')
=======
def f4(**kwargs):
for k, v in kwargs.items():
print(f"key: {k}, value: {v}")
# Alternate:
# for k in kwargs:
# print(f'key: {k}, value: {kwargs[k]}')
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# Should print
# key: a, value: 12
# key: b, value: 30
f4(a=12, b=30)
# Should print
# key: city, value: Berkeley
# key: population, value: 121240
# key: founded, value: "March 23, 1868"
f4(city="Berkeley", population=121240, founded="March 23, 1868")
<<<<<<< HEAD
d = {
"monster": "goblin",
"hp": 3
}
=======
d = {"monster": "goblin", "hp": 3}
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
# What thing do you have to add to make this work?
f4(**d)
| [
"[email protected]"
] | |
3c6dc99ca36a539efb2e696f6b57cbd205a83f8b | ae7ba9c83692cfcb39e95483d84610715930fe9e | /baidu/Paddle/paddle/trainer/tests/config_parser_test.py | 5ca874cec7914a20f79c2c7b1873c5bd04f60dca | [
"Apache-2.0"
] | permissive | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | # Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.config_parser import parse_config_and_serialize
if __name__ == '__main__':
parse_config_and_serialize('trainer/tests/test_config.conf', '')
parse_config_and_serialize(
'trainer/tests/sample_trainer_config.conf',
'extension_module_name=paddle.trainer.config_parser_extension')
parse_config_and_serialize('gserver/tests/pyDataProvider/trainer.conf', '')
| [
"[email protected]"
] | |
7d8115df6fa61bc6f721bc8db8bd47858dc75982 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/primu.py | 74ed5f7f4b48b1b61044808885c34bd9dce48229 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 255 | py | ii = [('WilbRLW.py', 1), ('WilkJMC3.py', 3), ('ClarGE2.py', 2), ('GellWPT2.py', 1), ('WilkJMC2.py', 1), ('LyelCPG.py', 1), ('SoutRD.py', 3), ('WilkJMC.py', 3), ('WestJIT.py', 1), ('FitzRNS.py', 1), ('DibdTRL.py', 1), ('EvarJSP.py', 1), ('SadlMLP2.py', 1)] | [
"[email protected]"
] | |
c8705454f5b80ca5aca9c2228cd462665605112d | f8e03a0724516b7cc2299f6c7a8cef544fa32484 | /source/pic2card/mystique/group_design_objects.py | e59231d840bf9b34a839f234137c4999867a8772 | [
"MIT"
] | permissive | isabella232/AdaptiveCards | cc3904f0782bd94087ae0a0df0ee2db954facdde | 766750517196d05f4466941647e07a8a298257b2 | refs/heads/main | 2023-03-07T22:13:55.327587 | 2020-11-17T02:02:15 | 2020-11-17T02:02:15 | 313,699,024 | 0 | 0 | MIT | 2021-02-23T16:14:48 | 2020-11-17T17:51:17 | null | UTF-8 | Python | false | false | 21,437 | py | """Module for grouping deisgn objects into different containers"""
from operator import itemgetter
from typing import List, Dict, Callable, Tuple, Optional
from mystique import config
from mystique.extract_properties import CollectProperties
class GroupObjects:
"""
Handles the grouping of given list of objects for any set conditions that
is passed.
"""
def object_grouping(self, design_objects: List[Dict],
condition: Callable[[Dict, Dict],
bool]) -> List[List[Dict]]:
"""
Groups the given List of design objects for the any given condition.
@param design_objects: objects
@param condition: Grouping condition function
@return: Grouped list of design objects.
"""
groups = []
grouped_positions = []
for ctr1, design_object1 in enumerate(design_objects):
temp_list = []
for ctr2, design_object2 in enumerate(design_objects):
if condition(design_object1, design_object2):
present = False
present_position = -1
append_object = False
append_position = -1
for ctr, gr in enumerate(groups):
if design_object2 in gr:
present = True
present_position = ctr
if design_object1 in gr:
append_object = True
append_position = ctr
if not present and not append_object:
temp_list.append(design_object2)
grouped_positions.append(ctr2)
elif not present and append_object:
groups[append_position].append(design_object2)
grouped_positions.append(ctr2)
elif present and not append_object:
groups[present_position].append(design_object1)
grouped_positions.append(ctr1)
elif (present and append_object and
present_position != append_position):
groups[present_position] += groups[append_position]
del groups[append_position]
if temp_list:
groups.append(temp_list)
for ctr, design_object in enumerate(design_objects):
if ctr not in grouped_positions:
groups.append([design_object])
return groups
class ImageGrouping(GroupObjects):
"""
Groups the image objects of the adaptive card objects into a imagesets or
individual image objects.
"""
# Image objects within the 10px ymin range and 100px range difference are
# grouped into imagesets.
IMAGE_SET_YMIN_RANGE = 10.0
IMAGE_SET_X_RANGE = 100.0
def __init__(self, card_arrange):
self.card_arrange = card_arrange
def imageset_condition(self, design_object1: Dict,
design_object2: Dict) -> bool:
"""
Returns a condition boolean value for grouping image objects into
imagesets
@param design_object1: image object
@param design_object2: image object
@return: boolean value
"""
if design_object1.get("xmin") < design_object2.get("xmin"):
xmax = design_object1.get("xmax")
xmin = design_object2.get("xmin")
else:
xmax = design_object2.get("xmax")
xmin = design_object1.get("xmin")
ymin_diff = abs(
design_object1.get("ymin") - design_object2.get("ymin")
)
x_diff = abs(xmax - xmin)
return (ymin_diff <= self.IMAGE_SET_YMIN_RANGE
and x_diff <= self.IMAGE_SET_X_RANGE)
def group_image_objects(self, image_objects, body, objects, ymins=None,
is_column=None) -> [List, Optional[Tuple]]:
"""
Groups the image objects into imagesets which are in
closer ymin range.
@param image_objects: list of image objects
@param body: list card deisgn elements.
@param ymins: list of ymins of card design
elements
@param objects: list of all design objects
@param is_column: boolean value to check if an object is inside a
columnset or not
@return: List of remaining image objects after the grouping if the
grouping is done outside the columnset container
else returned list of remaining image objects along
with its coordinate values.
"""
# group the image objects based on ymin
groups = self.object_grouping(image_objects, self.imageset_condition)
delete_positions = []
design_object_coords = []
for group in groups:
group = [dict(t) for t in {tuple(d.items()) for d in group}]
# group = self.remove_duplicates(group)
if len(group) > 1:
group = sorted(group, key=lambda i: i["xmin"])
image_set = {
"type": "ImageSet",
"imageSize": "Auto",
"images": []
}
sizes = []
alignment = []
image_xmins = []
for ctr, design_object in enumerate(group):
index = objects.index(design_object)
if index not in delete_positions:
delete_positions.append(index)
sizes.append(design_object.get("size", "Auto"))
alignment.append(design_object.get(
"horizontal_alignment", "Left"))
image_xmins.append(design_object.get("xmin"))
self.card_arrange.append_objects(design_object,
image_set["images"])
image_set["images"] = [x for _, x in sorted(
zip(image_xmins,
image_set["images"]),
key=lambda x: x[0])]
# Assign the imageset's size and alignment property based on
# each image's alignment and size properties inside the imgaeset
image_set["imageSize"] = max(set(sizes), key=sizes.count)
preference_order = ["Left", "Center", "Right"]
if len(alignment) == len(list(set(alignment))):
alignment.sort(key=(preference_order + alignment).index)
image_set["horizontalAlignment"] = alignment[0]
else:
image_set["horizontalAlignment"] = max(set(alignment),
key=alignment.count)
image_set["coords"] = str(group[0].get("coords"))
body.append(image_set)
if ymins:
ymins.append(design_object.get("ymin"))
if is_column:
design_object_coords.append(group[0].get("xmin"))
design_object_coords.append(group[0].get("ymin"))
design_object_coords.append(group[0].get("xmax"))
design_object_coords.append(group[0].get("ymax"))
objects = [design_objects for ctr, design_objects in enumerate(objects)
if ctr not in delete_positions]
if is_column:
return objects, design_object_coords
else:
return objects
class ColumnsGrouping(GroupObjects):
"""
Groups the design objects into different columns of a columnset
"""
def __init__(self, card_arrange):
self.card_arrange = card_arrange
def horizontal_inclusive(self, object_one: Dict, object_two: Dict) -> bool:
"""
Returns the horizonral inclusive condition
@param object_one: design object one
@param object_two: design object two
@return: the boolean value of the inclusive condition
"""
return (((object_one and object_two) and (
(object_one.get("xmin") <= object_two.get(
"xmin") <= object_one.get(
"xmax") and object_one.get(
"xmin") <= object_two.get(
"xmax") <= object_one.get(
"xmax"))
or (object_two.get("xmin") <= object_one.get(
"xmin") <= object_two.get(
"xmax") <= object_one.get("xmax") and
object_two.get(
"xmax") <= object_one.get(
"xmax")
) or (object_one.get(
"xmin") <= object_two.get(
"xmin") <= object_one.get(
"xmax") <= object_two.get(
"xmax") and object_two.get(
"xmax") >= object_one.get("xmin")
))
) or ((object_two and object_one) and
((object_two.get("xmin")
<= object_one.get("xmin")
<= object_two.get("xmax")
and object_two.get("xmin")
<= object_one.get("xmax")
<= object_two.get("xmax"))
or (object_one.get("xmin")
<= object_one.get("xmin")
and object_one.get("xmax")
<= object_two.get("xmax")
and object_two.get("xmin")
<= object_one.get("xmax")
<= object_two.get("xmax"))
or (object_two.get("xmin")
<= object_one.get("xmin")
<= object_two.get("xmax")
<= object_one.get("xmax")
and object_one.get("xmax")
>= object_two.get("xmin"))))
)
def vertical_inclusive(self, object_one: Dict, object_two: Dict) -> bool:
"""
Returns the vertical inclusive condition
@param object_one: design object one
@param object_two: design object two
@return: the boolean value of the inclusive condition
"""
return (
((object_one and object_two) and
((object_one.get("ymin")
<= object_two.get("ymin") <= object_one.get("ymax")
and object_one.get("ymin") <= object_two.get("ymax")
<= object_one.get("ymax"))
or (object_two.get("ymin") <= object_one.get(
"ymin") <= object_two.get(
"ymax") <= object_one.get("ymax")
and object_two.get("ymax") <= object_one.get("ymax"))
or (object_one.get("ymin") <= object_two.get("ymin")
<= object_one.get("ymax") <= object_two.get("ymax")
and object_two.get("ymax") >= object_one.get("ymin"))
))
or ((object_two and object_one)
and ((object_two.get("ymin") <= object_one.get("ymin")
<= object_two.get("ymax") and object_two.get("ymin")
<= object_one.get("ymax") <= object_two.get("ymax"))
or (object_one.get("ymin") <= object_one.get("ymin")
and object_one.get("ymax")
<= object_two.get("ymax")
and object_two.get("ymin")
<= object_one.get("ymax")
<= object_two.get("ymax"))
or (object_two.get("ymin") <= object_one.get("ymin")
<= object_two.get("ymax")
<= object_one.get("ymax")
and object_one.get("ymax")
>= object_two.get("ymin"))
))
)
def max_min_difference(self, design_object1: Dict,
design_object2: Dict, way: str) -> float:
"""
Returns the ymax-ymin difference of the 2 deisgn objects
@param design_object1: design object one
@param design_object2: design object two
@param way: xmax-xmin or ymax-ymin difference
@return: rounded ymax-ymin difference
"""
max = "ymax"
min = "ymin"
if way == "x":
max = "xmax"
min = "xmin"
if design_object1.get(min) < design_object2.get(min):
return round(abs(design_object2.get(min) - design_object1.get(max)))
else:
return round(abs(design_object1.get(min) - design_object2.get(max)))
def columns_condition(self, design_object1: Dict,
design_object2: Dict) -> bool:
"""
Returns a condition boolean value for grouping objects into
columnsets
@param design_object1: design object
@param design_object2: design object
@return: boolean value
"""
y_diff = self.max_min_difference(design_object1, design_object2,
way="y")
object_one = None
object_two = None
if (design_object1.get("object") == "image"
and design_object2.get("object") != "image"):
object_one = design_object1
object_two = design_object2
elif (design_object2.get("object") == "image"
and design_object1.get("object") != "image"):
object_one = design_object2
object_two = design_object1
elif (design_object2.get("object") == "image"
and design_object1.get("object") == "image"):
object_one = design_object1
object_two = design_object2
return (design_object1 != design_object2 and (
(abs(design_object1.get("ymin", 0)
- design_object2.get("ymin", 0))
<= config.COLUMNSET_GROUPING.get("ymin_difference", ""))
or self.vertical_inclusive(object_one, object_two)
or (y_diff <
config.COLUMNSET_GROUPING.get("ymax-ymin_difference", "")
and self.horizontal_inclusive(object_one, object_two)
)))
def columns_row_condition(self, design_object1: Dict,
design_object2: Dict) -> bool:
"""
Returns a condition boolean value for grouping columnset grouped
objects into different columns.
@param design_object1: design object
@param design_object2: design object
@return: boolean value
"""
extract_properites = CollectProperties()
x_diff = self.max_min_difference(design_object1, design_object2,
way="x")
point1 = (design_object1.get("xmin"), design_object1.get("ymin"),
design_object1.get("xmax"), design_object1.get("ymax"))
point2 = (design_object2.get("xmin"), design_object2.get("ymin"),
design_object2.get("xmax"), design_object2.get("ymax"))
if design_object1.get("ymin") < design_object2.get("ymin"):
object_one = design_object1
object_two = design_object2
else:
object_one = design_object2
object_two = design_object1
condition = (design_object1 != design_object2
and ((design_object1.get("object") == "image"
and design_object2.get("object") == "image"
and abs(design_object1.get("ymin")
- design_object2.get("ymin"))
<= config.COLUMNSET_GROUPING.get("ymin_difference")
and x_diff <= config.COLUMNSET_GROUPING.get(
"xmax-xmin_difference", ""))
or self.horizontal_inclusive(object_one, object_two)
)
)
intersection = extract_properites.find_iou(point1, point2,
columns_group=True)[0]
if intersection and point1 != point2:
condition = condition and (
intersection
and (
(object_one.get("xmin") <=
object_two.get("xmin") <= object_one.get("xmax")
and object_one.get("xmin") <=
object_two.get("xmax") <= object_one.get("xmax")
)
or (object_two.get("xmin") <=
object_one.get("xmin") <= object_two.get("xmax")
and object_two.get("xmin") <=
object_one.get("xmax") <= object_two.get("xmax")
)
)
)
return condition
class ChoicesetGrouping(GroupObjects):
"""
Groups the radiobutton objects of the adaptive card objects into a
choiceset or individual radiobuttion objects.
"""
# The design objects are grouped in choicesets based on 2 conditions:
# If the radiobuttons are within the range of 10px of ymax - ymin
# If the radiobuttons are within the rnage of 30px of ymins.
CHOICESET_Y_RANGE = 10
CHOICESET_YMIN_RANGE = 30
def __init__(self, card_arrange):
self.card_arrange = card_arrange
def choiceset_condition(self, design_object1: Dict,
design_object2: Dict) -> bool:
"""
Returns a condition boolean value for grouping radio buttion objects
into choiceset
@param design_object1: image object
@param design_object2: image object
@return: boolean value
"""
design_object1_ymin = float(design_object1.get("ymin"))
design_object2_ymin = float(design_object2.get("ymin"))
difference_in_ymin = abs(design_object1_ymin - design_object2_ymin)
if design_object1_ymin > design_object2_ymin:
difference_in_y = float(
design_object2.get("ymax")) - design_object1_ymin
else:
difference_in_y = float(
design_object1.get("ymax")) - design_object2_ymin
return (abs(difference_in_y) <= self.CHOICESET_Y_RANGE
and difference_in_ymin <= self.CHOICESET_YMIN_RANGE)
def group_choicesets(self, radiobutton_objects: Dict, body: List[Dict],
ymins=None) -> None:
"""
Groups the choice elements into choicesets based on
the closer ymin range
@param radiobutton_objects: list of individual choice
elements
@param body: list of card deisgn elements
@param ymins: list of ymin of deisgn elements
"""
groups = []
radio_buttons = []
if isinstance(radiobutton_objects, dict):
for key, values in radiobutton_objects.items():
radio_buttons.append(values)
radiobutton_objects = radio_buttons
if len(radiobutton_objects) == 1:
# radiobutton_objects = [radiobutton_objects]
groups = [radiobutton_objects]
if not groups:
groups = self.object_grouping(radiobutton_objects,
self.choiceset_condition)
for group in groups:
group = sorted(group, key=itemgetter("ymin"))
choice_set = {
"type": "Input.ChoiceSet",
"choices": [],
"style": "expanded"
}
alignment = []
for design_object in group:
self.card_arrange.append_objects(design_object,
choice_set["choices"])
alignment.append(design_object.get("horizontal_alignment",
"Left"))
preference_order = ["Left", "Center", "Right"]
if len(alignment) == len(list(set(alignment))):
alignment.sort(key=(preference_order + alignment).index)
choice_set["horizontalAlignment"] = alignment[0]
else:
choice_set["horizontalAlignment"] = max(set(alignment),
key=alignment.count)
body.append(choice_set)
if ymins is not None and len(group) > 0:
ymins.append(design_object.get("ymin"))
| [
"[email protected]"
] | |
8ef6b58674a55f6236df4da9f882ab9310c12fb8 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/GstGL/GLMemoryAllocatorClass.py | 09d94c14d0970b096b8877bf3ada06ef684d53ce | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 5,176 | py | # encoding: utf-8
# module gi.repository.GstGL
# from /usr/lib64/girepository-1.0/GstGL-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.repository.Gst as __gi_repository_Gst
import gi.repository.GstBase as __gi_repository_GstBase
import gobject as __gobject
class GLMemoryAllocatorClass(__gi.Struct):
"""
:Constructors:
::
GLMemoryAllocatorClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
copy = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
map = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
unmap = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_padding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(GLMemoryAllocatorClass), '__module__': 'gi.repository.GstGL', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'GLMemoryAllocatorClass' objects>, '__weakref__': <attribute '__weakref__' of 'GLMemoryAllocatorClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7f56a4000bd0>, 'map': <property object at 0x7f56a4000cc0>, 'copy': <property object at 0x7f56a4000db0>, 'unmap': <property object at 0x7f56a4000ea0>, '_padding': <property object at 0x7f56a4000f90>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(GLMemoryAllocatorClass)
| [
"[email protected]"
] | |
9f3670c4d707a3e54c70d0a55f2059c21cb3d607 | bc39bf7466f06503807bb39366a99ecdd5cab81e | /rdfttl_to_csv.py | 3a92fb8f6c76f113afdd3fd6e763951eeabad5c7 | [] | no_license | SreeSingamsetty/Master-Thesis | cd68e32d243c81865bc2cb4f8c55f1d3f5d43d63 | b6a27acbe1919f07f04194249df22d3d8e5a6f88 | refs/heads/master | 2020-03-18T06:46:24.399820 | 2018-05-22T13:04:22 | 2018-05-22T13:04:22 | 134,414,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from rdflib import Graph
g = Graph()
g.parse("short_abstracts_copy.ttl", format="ttl")
g.serialize("short_abstracts_copy.csv", format="ttl", base="http://dbpedia.org/resource/")
| [
"[email protected]"
] | |
e3a69ab66c5f9cb1d085346a0716780128beced1 | 7334d7669807d3bf9fe165fe916ca7b1a06f8b7c | /app.py | 4ed8e62dd07a1e26c268baf57efc01886f061091 | [] | no_license | Sreehari-BGK/Tinkerhub_Practicial_AI_Bootcamp_Project | 3a75a414d33328f31a592a273788d9df89b02b57 | 651c7d5bcf3009603c678e10bef21e98fb4f80aa | refs/heads/main | 2023-07-23T13:02:54.599057 | 2021-09-10T07:54:03 | 2021-09-10T07:54:03 | 404,999,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,197 | py | from __future__ import division, print_function
import sys
import os
import glob
import re
import numpy as np
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.models import load_model
from keras.preprocessing import image
from keras import backend as K
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
from scipy.misc import imread, imresize
import tensorflow as tf
import skimage.transform as st
from skimage.transform import resize
app = Flask(__name__)
MODEL_PATH = 'model.h5'
config = tf.ConfigProto(
device_count={'GPU': 1},
intra_op_parallelism_threads=1,
allow_soft_placement=True
)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.6
session = tf.Session(config=config)
K.set_session(session)
# Load your trained model
model = load_model(MODEL_PATH)
model._make_predict_function() # Necessary
print('Model loaded. Start serving...')
# You can also use pretrained model from Keras
# Check https://keras.io/applications/
# from keras.applications.resnet50 import ResNet50
# model = ResNet50(weights='imagenet')
graph = tf.get_default_graph() # Change
print('Model loaded. Check http://127.0.0.1:5000/')
# def classify(image, model):
# class_names = ['airplane','automobile','bird','cat','deer',
# 'dog','frog','horse','ship','truck']
# preds = model.predict(image)
# classification = np.argmax(preds)
# final = pd.DataFrame({'name' : np.array(class_names),'probability' :preds[0]})
# return final.sort_values(by = 'probability',ascending=False),class_names[classification]
def model_predict(img_path, model):
try:
with session.as_default():
with session.graph.as_default():
img = image.load_img(img_path, target_size=(32, 32,3))
# Preprocessing the image
# x = image.img_to_array(img)
# x = np.true_divide(x, 255)
x = np.expand_dims(img, axis=0)
# x = preprocess_input(x, mode='caffe')
preds = model.predict(np.array(x))
return preds
except Exception as ex:
log.log('Seatbelt Prediction Error', ex, ex.__traceback__.tb_lineno)
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['file']
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
# # image_url = request.form['image_url']
# # image = io.imread(image_url)
# image_small = st.resize(file_path, (32,32,3))
# x = np.expand_dims(image_small.transpose(2, 0, 1), axis=0)
# final,pred_class = classify(x, model)
# print(pred_class)
# print(final)
#Store model prediction results to pass to the web page
# message = "Model prediction: {}".format(pred_class)
# Make prediction
preds = model_predict(file_path, model)
print(preds)
number_to_class = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
index = np.argsort(preds[0,:])
# for x in range(len(number_to_class)):
# if number_to_class[x] == 1:
# print(preds[0][i])
# Process your result for human
pred_class = preds.argmax(axis=-1) # Simple argmax
# pred_class = decode_predictions(preds, top=1) # ImageNet Decode
# result = str(pred_class[0][1]) # Convert to string
return str(number_to_class[index[9]])+str(" index : ")+str(pred_class)
return None
if __name__ == '__main__':
app.run() | [
"[email protected]"
] | |
aa36fc5578e1ff9d3e2ca3774590d9e2ac4b034b | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/hoework01/gettop10frommaoyam01_20200626091702.py | d2722f72cb3c318b6baafd5cd7fd7285bc7c6d98 | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 2,407 | py | # 使用requests,bs4库,爬取猫眼电影top10的电影名称、电影类型、上映时间,并以utf-8的字符集保存到csv文件中
import requests
from bs4 import BeautifulSoup as bs
maoyanUrl = "https://maoyan.com/board/4";
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
header = {
'Content-Type': 'text/plain; charset=UTF-8',
'Cookie' : '__mta=251934006.1593072991075.1593100662316.1593100664951.15; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593075275703.1593078726963.7; mojo-session-id={"id":"435818e6a726415f46defffa27f7abc6","time":1593100221937}; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593100665; mojo-trace-id=17; _lxsdk_s=172ec2bff67-0c2-e9f-c64%7C%7C24__mta=251934006.1593072991075.1593100690175.1593100868002.17; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593075275703.1593078726963.7; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593100868; _lxsdk_s=172ee2f4a3e-1c2-3a1-5a4%7C%7C1',
# 'Host' : 'http://www.baidu.com',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/board/4',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
}
response = requests.get(maoyanUrl,headers=header)
response.encoding = 'utf-8'
bs_info = bs(response.text,"html.parser")
# print(response.text)
for tag in bs_info.find_all('div',attrs={'class' : 'movie-item-content'}):
print(tag)
| [
"[email protected]"
] | |
291422589918cff1a01a7a361b4c182bc37e09c5 | 65dba58b620e89db5113a60cf184cd6b26129e05 | /terms.py | 4b022f5ca152be0c0bbc7c9cb89af0bf9d827ca0 | [] | no_license | PriyankVIT/laughing-octo-journey | de3b32d69a170b97e71e4124dee0210e88d66a7b | a6849f4b42527ef4ccc67de6225954447e5a653a | refs/heads/master | 2020-08-23T10:59:00.633921 | 2019-10-21T15:09:15 | 2019-10-21T15:09:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,586 | py | import pandas as pd
import numpy as np
from nltk import sent_tokenize,word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
from sklearn.metrics.pairwise import cosine_similarity
import networkx as nx
stop = set(stopwords.words("english"))
path="terms.txt"
parsedata=[]
count=0
with open(path) as fp:
while True:
messages=[]
line=fp.readline()
line=line.lower()
if not line:
print("False")
break
else:
sent=sent_tokenize(line)
for y in sent:
count+=1
print(y)
messages=[count,y]
parsedata.append(messages)
print(messages)
data= pd.DataFrame(parsedata,columns=['index','article'])
data.to_csv("terms.csv")
print(count)
terms=pd.read_csv("terms.csv")
terms=terms[['index','article']]
def stopwords_removal(line):
line=" ".join(x for x in line.split() if x not in stop)
return line
porter = PorterStemmer()
lancaster=LancasterStemmer()
def stemSentence(sentence):
token_words=word_tokenize(sentence)
token_words
stem_sentence=[]
for word in token_words:
stem_sentence.append(lancaster.stem(word))
stem_sentence.append(" ")
return "".join(stem_sentence)
terms['article']=terms['article'].apply(stopwords_removal)
sentences = []
for s in terms['article']:
sentences.append(sent_tokenize(s))
sentences = [y for x in sentences for y in x] # flatten list
word_embeddings = {}
f = open('./glove/glove.6B.100d.txt', encoding='utf-8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
word_embeddings[word] = coefs
f.close()
sentence_vectors = []
for i in sentences:
if len(i) != 0:
v = sum([word_embeddings.get(w, np.zeros((100,))) for w in i.split()])/(len(i.split())+0.001)
else:
v = np.zeros((100,))
sentence_vectors.append(v)
sim_mat = np.zeros([len(sentences), len(sentences)])
for i in range(len(sentences)):
for j in range(len(sentences)):
if i != j:
sim_mat[i][j] = cosine_similarity(sentence_vectors[i].reshape(1,100), sentence_vectors[j].reshape(1,100))[0,0]
nx_graph = nx.from_numpy_array(sim_mat)
scores = nx.pagerank(nx_graph)
ranked_sentences = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
for i in range(10):
print(ranked_sentences[i][1]) | [
"[email protected]"
] | |
c4675dce2279ea3da44cfb77ff37f04d65a568de | b40c523eb48e899763cefbc5cbac1a9538b7524c | /test.py | a55de7e64221ef48a92e6455c286761886ce54cd | [] | no_license | kalexrt/Image-Colorization-using-CNN | b5ad355fa286280a61535bf245015d25d3108b16 | f69f4e7b6e550f22c289e44d977af0602b8309d9 | refs/heads/master | 2023-03-16T08:31:15.299794 | 2018-10-11T08:23:17 | 2018-10-11T08:23:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | #dependencies
import numpy as np
import cv2
import os
def read_images(path):
images = []
all_paths = os.listdir(path)
mini_set = all_paths[:400]
for i in mini_set:
file = path+"/"+i
image = cv2.imread(file)
image = cv2.resize(image,(128,128))
images.append(image)
return images
x = read_images("C:/Users/Arghyadeep/Desktop/image colorization/new process/val2017")
#cv2.imshow('image',x[1])
def extract_channels(lab_images):
l_channels = []
a_channels = []
b_channels = []
for i in lab_images:
l,a,b = cv2.split(i)
l_channels.append(l)
a_channels.append(a)
b_channels.append(b)
return np.array(l_channels), np.array(a_channels), np.array(b_channels)
l,a,b = cv2.split(x[1])
l = np.array(l)
l = l.reshape(128,128)
l = np.array(l)
print(l)
cv2.imshow('img',l)
| [
"[email protected]"
] | |
5fdc08551c6d8e928a2cdfe94f16b447b3157fe9 | 90d7e077d4b5aac29d9aac2352d7d56da35bdd65 | /spell_checker.py | b6785324e2771cf2b86fc9705823d40153543c5c | [] | no_license | Apoorv7092/Ori | 2d0fb807b50dfb3f4ac64d6a33992ac2cb4db3ee | 46af2ee06d7427d36697bf1f3c1a1d6ad39d0224 | refs/heads/main | 2023-06-24T07:49:59.339665 | 2021-07-23T05:23:01 | 2021-07-23T05:23:01 | 388,685,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,381 | py |
import pandas as pd
import parameters
import re
from collections import Counter
def words(text): return re.findall(r'\w+', text.lower())
fdf = pd.read_excel(parameters.training_data)
#message_col=list(fdf['message'])
tadaa = " ".join(list(fdf["message"]))
#tadaa = open('/home/rajput/Documents/Fasttext_final/testting/fastText-0.9.1/fastText-0.9.1/saddam70M').read()
tadaa1 = open(parameters.spell_checker_file).read()
tadaa+=tadaa1
word_list=tadaa.split()
words_dict={}
for i in range(len(word_list)):
words_dict[word_list[i]]=i
# print(type(tadaa))
# print(tadaa)
WORDS = Counter(words(tadaa))
def P(word, N=sum(WORDS.values())):
"Probability of `word`."
return WORDS[word] / N
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def correction1(word):
return max(candidates1(word), key=P)
def candidates1(word):
"Generate possible spelling corrections for word."
#return (known([word]) or known(edits1(word)) or known(edits2(word)) or known(edit3(word)) or [word])
return (known([word]) or known(edits1(word)) or [word])
def candidates(word):
"Generate possible spelling corrections for word."
#return (known([word]) or known(edits1(word)) or known(edits2(word)) or known(edit3(word)) or [word])
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
# def edit3(word):
# return (e3 for e2 in edits2(word) for e3 in edits2(e2))
def spell_checker(text):
#print('enter text')
#text1=input()
text=text.split()
modified_text=[]
for word in text:
if len(word)<=3:
modified_text.append(word)
elif len(word)==4:
if word not in words_dict:
modified_text.append(correction1(word))
else:
modified_text.append(word)
elif len(word)>4:
if word not in words_dict:
modified_text.append(correction(word))
else:
modified_text.append(word)
return " ".join(modified_text)
#print(correction('recharg'))
# while True:
# text=input()
# print(spell_checker(text))
# while True:
# print('enter text')
# text1=input()
# text=text1.split()
# modified_text=[]
# for word in text:
# if len(word)<=3:
# modified_text.append(word)
# else:
# modified_text.append(correction(word))
# print(" ".join(modified_text))
# print(text1)
# #print(correction('recharg'))
| [
"[email protected]"
] | |
d3bd8c51d6239f19186109f0ca17cf57933c4503 | 1d26fcc1673c78a03b2474102dddd63234863657 | /440 Final Project/neuralDigits.py | b692034f3653646f7b23a8fecc18f034a5786ea0 | [] | no_license | taotao-mars/AI-final-project | b47622927f87c83e863d28e59fb7a59d6afdc7f1 | b3e5892afad3dce64843b4c5efaab42917af42ff | refs/heads/master | 2020-03-16T17:38:34.619149 | 2018-05-10T03:18:27 | 2018-05-10T03:18:27 | 132,841,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | import samples
import numpy as np
from neuralNetwork import NeuralNetworkClassifier
def testing(num):
trainData = np.load("traindigitbasic.npy")
trainLabels = samples.loadLabelsFile("data/digitdata/traininglabels", num)
testData = np.load("testdigitbasic.npy")
testLabels = samples.loadLabelsFile("data/digitdata/testlabels", 1000)
validData = np.load("validationdigitbasic.npy")
validLabels = samples.loadLabelsFile("data/digitdata/validationlabels", 1000)
neural = NeuralNetworkClassifier(28 * 28, 50, 10, num, 3.5)
neural.train(trainData[:,0:num], trainLabels, 100)
print "*************Test Data*************"
guess = neural.classify(testData)
samples.verify(neural, guess, testLabels)
print "***********************************"
print "************Valid Data*************"
guess = neural.classify(validData)
samples.verify(neural, guess, validLabels)
if __name__ == "__main__":
sampleDigit=[500,1000,1500,2000,2500,3000,3500,4000,4500,5000]
sampleFace=[45,90,135,180,225,270,315,300,405,450]
sample=sampleDigit
for i in range(len(sample)):
print str(10*(i+1))+"%% training data, %d" % sample[i]
testing(sample[i])
print "***********************************"
| [
"[email protected]"
] | |
5504d599f5231dfb970d783217327010a3757c72 | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/netapp/v20201201/snapshot.py | 5dc6e64c2d7c5f24cdb196fcb956b80495f2cc6e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,959 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['SnapshotArgs', 'Snapshot']
@pulumi.input_type
class SnapshotArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
pool_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
volume_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Snapshot resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[str] pool_name: The name of the capacity pool
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] volume_name: The name of the volume
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] snapshot_name: The name of the mount target
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "pool_name", pool_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "volume_name", volume_name)
if location is not None:
pulumi.set(__self__, "location", location)
if snapshot_name is not None:
pulumi.set(__self__, "snapshot_name", snapshot_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the NetApp account
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="poolName")
def pool_name(self) -> pulumi.Input[str]:
"""
The name of the capacity pool
"""
return pulumi.get(self, "pool_name")
@pool_name.setter
def pool_name(self, value: pulumi.Input[str]):
pulumi.set(self, "pool_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="volumeName")
def volume_name(self) -> pulumi.Input[str]:
"""
The name of the volume
"""
return pulumi.get(self, "volume_name")
@volume_name.setter
def volume_name(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="snapshotName")
def snapshot_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the mount target
"""
return pulumi.get(self, "snapshot_name")
@snapshot_name.setter
def snapshot_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_name", value)
class Snapshot(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
pool_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
volume_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Snapshot of a Volume
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] pool_name: The name of the capacity pool
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] snapshot_name: The name of the mount target
:param pulumi.Input[str] volume_name: The name of the volume
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SnapshotArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Snapshot of a Volume
:param str resource_name: The name of the resource.
:param SnapshotArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SnapshotArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
pool_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
volume_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SnapshotArgs.__new__(SnapshotArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["location"] = location
if pool_name is None and not opts.urn:
raise TypeError("Missing required property 'pool_name'")
__props__.__dict__["pool_name"] = pool_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["snapshot_name"] = snapshot_name
if volume_name is None and not opts.urn:
raise TypeError("Missing required property 'volume_name'")
__props__.__dict__["volume_name"] = volume_name
__props__.__dict__["created"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["snapshot_id"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20201201:Snapshot"), pulumi.Alias(type_="azure-native:netapp:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20170815:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20170815:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20190501:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20190501:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20190601:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20190601:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20190701:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20190701:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20190801:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20190801:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20191001:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20191001:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20191101:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20191101:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200201:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200201:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200301:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200301:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200501:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200601:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200701:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200801:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200801:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20200901:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20200901:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20201101:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20210201:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20210201:Snapshot"), pulumi.Alias(type_="azure-native:netapp/v20210401preview:Snapshot"), pulumi.Alias(type_="azure-nextgen:netapp/v20210401preview:Snapshot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Snapshot, __self__).__init__(
'azure-native:netapp/v20201201:Snapshot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Snapshot':
"""
Get an existing Snapshot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SnapshotArgs.__new__(SnapshotArgs)
__props__.__dict__["created"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["snapshot_id"] = None
__props__.__dict__["type"] = None
return Snapshot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def created(self) -> pulumi.Output[str]:
"""
The creation date of the snapshot
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> pulumi.Output[str]:
"""
UUID v4 used to identify the Snapshot
"""
return pulumi.get(self, "snapshot_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
a9bb0f690afa0a9da4041ea70f4527cbe0ef2c3a | 761beb2a465800f992c8590767d8cd84e1480a4c | /pySON.py | 1175a5b0a060e8d6f1118ad7b2cf3bb8819ff340 | [] | no_license | willh99/PRESS-RPi | 5109aed872ef1f65249f683a3f68d141d4e995bb | 5b0587158890c42f01538f36db91124cf507abe5 | refs/heads/master | 2021-04-28T08:05:17.638496 | 2018-04-11T17:46:10 | 2018-04-11T17:46:10 | 122,240,347 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,814 | py | import json
import time
import random
import datetime
def read_json(filename):
if '.json' not in filename:
return -1
try:
with open(filename, 'r') as f:
print("File \"", filename, "\" found", sep='')
data = json.load(f)
return data
except FileNotFoundError:
print("File Not Found")
return -1
def append_json(data, filename):
with open(filename, 'w') as f:
json.dump(data, f, indent=2)
# print("wrote to file")
def create_status(buy, sell, isprice):
now = datetime.datetime.now()
now = now.strftime('%d-%m-%Y %X')
data = {"Sell": sell, "Buy": buy, "Timestamp": now}
if isprice:
filename = 'price_status.json'
else:
filename = 'status.json'
with open(filename, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2)
def profit_file(mode, profit):
try:
if mode == 'Read':
with open('profit.txt', 'r') as f:
return f.readline().split()[0]
except FileNotFoundError:
print("File Not Found")
return 0
with open('profit.txt', 'w') as f:
f.write(str(profit))
if __name__ == "__main__":
json_list = []
for x in range(0, 100):
i = random.random()*12.8
dictionary = {"Timestamp": time.asctime(time.localtime()),
"Voltage": round(i, 6)}
if len(json_list) >= 50:
json_list.pop(0)
json_list.append(dictionary)
# time.sleep(.2)
append_json(json_list)
something = read_json('status.json')
if something is not -1:
print(json.dumps(something, indent=2))
profit_file('Write', 1129.124)
print(profit_file('Read', 0))
| [
"[email protected]"
] | |
e0887b70f4b7024270a588e59d6a5d81ec0959c3 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/signalrservice/v20210601preview/get_signal_r.py | e126d745e3c8f5f3fc1a5876c117c9fc8754627f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,073 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSignalRResult',
'AwaitableGetSignalRResult',
'get_signal_r',
'get_signal_r_output',
]
@pulumi.output_type
class GetSignalRResult:
"""
A class represent a resource.
"""
def __init__(__self__, cors=None, disable_aad_auth=None, disable_local_auth=None, external_ip=None, features=None, host_name=None, id=None, identity=None, kind=None, location=None, name=None, network_acls=None, private_endpoint_connections=None, provisioning_state=None, public_network_access=None, public_port=None, server_port=None, shared_private_link_resources=None, sku=None, system_data=None, tags=None, tls=None, type=None, upstream=None, version=None):
if cors and not isinstance(cors, dict):
raise TypeError("Expected argument 'cors' to be a dict")
pulumi.set(__self__, "cors", cors)
if disable_aad_auth and not isinstance(disable_aad_auth, bool):
raise TypeError("Expected argument 'disable_aad_auth' to be a bool")
pulumi.set(__self__, "disable_aad_auth", disable_aad_auth)
if disable_local_auth and not isinstance(disable_local_auth, bool):
raise TypeError("Expected argument 'disable_local_auth' to be a bool")
pulumi.set(__self__, "disable_local_auth", disable_local_auth)
if external_ip and not isinstance(external_ip, str):
raise TypeError("Expected argument 'external_ip' to be a str")
pulumi.set(__self__, "external_ip", external_ip)
if features and not isinstance(features, list):
raise TypeError("Expected argument 'features' to be a list")
pulumi.set(__self__, "features", features)
if host_name and not isinstance(host_name, str):
raise TypeError("Expected argument 'host_name' to be a str")
pulumi.set(__self__, "host_name", host_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_acls and not isinstance(network_acls, dict):
raise TypeError("Expected argument 'network_acls' to be a dict")
pulumi.set(__self__, "network_acls", network_acls)
if private_endpoint_connections and not isinstance(private_endpoint_connections, list):
raise TypeError("Expected argument 'private_endpoint_connections' to be a list")
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_network_access and not isinstance(public_network_access, str):
raise TypeError("Expected argument 'public_network_access' to be a str")
pulumi.set(__self__, "public_network_access", public_network_access)
if public_port and not isinstance(public_port, int):
raise TypeError("Expected argument 'public_port' to be a int")
pulumi.set(__self__, "public_port", public_port)
if server_port and not isinstance(server_port, int):
raise TypeError("Expected argument 'server_port' to be a int")
pulumi.set(__self__, "server_port", server_port)
if shared_private_link_resources and not isinstance(shared_private_link_resources, list):
raise TypeError("Expected argument 'shared_private_link_resources' to be a list")
pulumi.set(__self__, "shared_private_link_resources", shared_private_link_resources)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if tls and not isinstance(tls, dict):
raise TypeError("Expected argument 'tls' to be a dict")
pulumi.set(__self__, "tls", tls)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if upstream and not isinstance(upstream, dict):
raise TypeError("Expected argument 'upstream' to be a dict")
pulumi.set(__self__, "upstream", upstream)
if version and not isinstance(version, str):
raise TypeError("Expected argument 'version' to be a str")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def cors(self) -> Optional['outputs.SignalRCorsSettingsResponse']:
"""
Cross-Origin Resource Sharing (CORS) settings.
"""
return pulumi.get(self, "cors")
@property
@pulumi.getter(name="disableAadAuth")
def disable_aad_auth(self) -> Optional[bool]:
"""
DisableLocalAuth
Enable or disable aad auth
When set as true, connection with AuthType=aad won't work.
"""
return pulumi.get(self, "disable_aad_auth")
@property
@pulumi.getter(name="disableLocalAuth")
def disable_local_auth(self) -> Optional[bool]:
"""
DisableLocalAuth
Enable or disable local auth with AccessKey
When set as true, connection with AccessKey=xxx won't work.
"""
return pulumi.get(self, "disable_local_auth")
@property
@pulumi.getter(name="externalIP")
def external_ip(self) -> str:
"""
The publicly accessible IP of the resource.
"""
return pulumi.get(self, "external_ip")
@property
@pulumi.getter
def features(self) -> Optional[Sequence['outputs.SignalRFeatureResponse']]:
"""
List of the featureFlags.
FeatureFlags that are not included in the parameters for the update operation will not be modified.
And the response will only include featureFlags that are explicitly set.
When a featureFlag is not explicitly set, its globally default value will be used
But keep in mind, the default value doesn't mean "false". It varies in terms of different FeatureFlags.
"""
return pulumi.get(self, "features")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> str:
"""
FQDN of the service instance.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedIdentityResponse']:
"""
The managed identity response
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
The kind of the service - e.g. "SignalR" for "Microsoft.SignalRService/SignalR"
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The GEO location of the resource. e.g. West US | East US | North Central US | South Central US.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkACLs")
def network_acls(self) -> Optional['outputs.SignalRNetworkACLsResponse']:
"""
Network ACLs
"""
return pulumi.get(self, "network_acls")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Sequence['outputs.PrivateEndpointConnectionResponse']:
"""
Private endpoint connections to the resource.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[str]:
"""
Enable or disable public network access. Default to "Enabled".
When it's Enabled, network ACLs still apply.
When it's Disabled, public network access is always disabled no matter what you set in network ACLs.
"""
return pulumi.get(self, "public_network_access")
@property
@pulumi.getter(name="publicPort")
def public_port(self) -> int:
"""
The publicly accessible port of the resource which is designed for browser/client side usage.
"""
return pulumi.get(self, "public_port")
@property
@pulumi.getter(name="serverPort")
def server_port(self) -> int:
"""
The publicly accessible port of the resource which is designed for customer server side usage.
"""
return pulumi.get(self, "server_port")
@property
@pulumi.getter(name="sharedPrivateLinkResources")
def shared_private_link_resources(self) -> Sequence['outputs.SharedPrivateLinkResourceResponse']:
"""
The list of shared private link resources.
"""
return pulumi.get(self, "shared_private_link_resources")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.ResourceSkuResponse']:
"""
The billing information of the resource.(e.g. Free, Standard)
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags of the service which is a list of key value pairs that describe the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def tls(self) -> Optional['outputs.SignalRTlsSettingsResponse']:
"""
TLS settings.
"""
return pulumi.get(self, "tls")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource - e.g. "Microsoft.SignalRService/SignalR"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def upstream(self) -> Optional['outputs.ServerlessUpstreamSettingsResponse']:
"""
Upstream settings when the service is in server-less mode.
"""
return pulumi.get(self, "upstream")
@property
@pulumi.getter
def version(self) -> str:
"""
Version of the resource. Probably you need the same or higher version of client SDKs.
"""
return pulumi.get(self, "version")
class AwaitableGetSignalRResult(GetSignalRResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSignalRResult(
cors=self.cors,
disable_aad_auth=self.disable_aad_auth,
disable_local_auth=self.disable_local_auth,
external_ip=self.external_ip,
features=self.features,
host_name=self.host_name,
id=self.id,
identity=self.identity,
kind=self.kind,
location=self.location,
name=self.name,
network_acls=self.network_acls,
private_endpoint_connections=self.private_endpoint_connections,
provisioning_state=self.provisioning_state,
public_network_access=self.public_network_access,
public_port=self.public_port,
server_port=self.server_port,
shared_private_link_resources=self.shared_private_link_resources,
sku=self.sku,
system_data=self.system_data,
tags=self.tags,
tls=self.tls,
type=self.type,
upstream=self.upstream,
version=self.version)
def get_signal_r(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSignalRResult:
"""
A class represent a resource.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str resource_name: The name of the resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:signalrservice/v20210601preview:getSignalR', __args__, opts=opts, typ=GetSignalRResult).value
return AwaitableGetSignalRResult(
cors=__ret__.cors,
disable_aad_auth=__ret__.disable_aad_auth,
disable_local_auth=__ret__.disable_local_auth,
external_ip=__ret__.external_ip,
features=__ret__.features,
host_name=__ret__.host_name,
id=__ret__.id,
identity=__ret__.identity,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
network_acls=__ret__.network_acls,
private_endpoint_connections=__ret__.private_endpoint_connections,
provisioning_state=__ret__.provisioning_state,
public_network_access=__ret__.public_network_access,
public_port=__ret__.public_port,
server_port=__ret__.server_port,
shared_private_link_resources=__ret__.shared_private_link_resources,
sku=__ret__.sku,
system_data=__ret__.system_data,
tags=__ret__.tags,
tls=__ret__.tls,
type=__ret__.type,
upstream=__ret__.upstream,
version=__ret__.version)
@_utilities.lift_output_func(get_signal_r)
def get_signal_r_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSignalRResult]:
"""
A class represent a resource.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str resource_name: The name of the resource.
"""
...
| [
"[email protected]"
] | |
9dca95f0eadc9b7576cb73579313ffa2ab36aaa3 | 444670e6d73ae9d95c0bb0459c8e02423876d2fb | /pycharm/LoginSite/mylogin/migrations/0001_initial.py | 08c4cb3c5cfd13d3c86c5e92dc2a59b4d175f342 | [] | no_license | zhangxingxing12138/web-pycharm | c8b6822be95bfb904f81f772185fe9e17fc77fc3 | 5f212e6805b0734aa3c791830526a95b24a930f4 | refs/heads/master | 2020-04-04T18:03:45.458309 | 2018-11-08T12:03:51 | 2018-11-08T12:03:51 | 156,148,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-06 00:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('password', models.CharField(max_length=256)),
('email', models.EmailField(max_length=254, unique=True)),
('sex', models.CharField(choices=[('male', '男'), ('female', '女')], default='男', max_length=32)),
('c_time', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-c_time'],
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
),
]
| [
"[email protected]"
] | |
fd7d21d67afccb60b4404408de84c65409f99ebc | cc66ac8f147a698cc8e4e435dd45e5129591a6ef | /improvedKNN.py | 5440c68edbfc4a9169d9585daffd34e6c735942e | [] | no_license | PiyushNarsikar27/Improved-KNN | b07352edfac5db2d2afc3bae80c68833b670fe51 | 7ed876077bacfd5b816e0bbe706e67c150a09dd1 | refs/heads/main | 2023-03-23T16:02:23.751966 | 2021-03-14T07:27:38 | 2021-03-14T07:27:38 | 347,569,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | def takeFirst(elem):
return elem[0]
def improved_knn_predictor(xtrain,xtest,ytrain,k,num_classes):
y_pred=[]
x=0 # x indicates how many elements are processed so far
for test_point in xtest:
i=0
distances=[]
dist=[]
for train_point in xtrain: # Creating a list of distances from each of the other test points. Each element of the list "distances" includes the distance and the class of the respective test point.
dist=[np.linalg.norm(test_point-train_point),ytrain[i]]
distances.append(dist)
i += 1
distances.sort(key=takeFirst) # Sorting the list based on the distance from the test point currently being processed in ascending order
sumlist=[]
for f in range(num_classes): # Creating a list of average distance of k nearest neighbors belonging to each class from the test point currently being processed
sum=0
count=0
toAdd=[]
for g in range(len(distances)):
if count==k:
break
if distances[g][1]==f:
sum += distances[g][0]
count += 1
sum=sum/k
toAdd=[sum,f]
sumlist.append(toAdd)
sumlist.sort(key=takeFirst) # Sorting the averages in ascending order
y_pred.append(distances[0][1]) # Predicting the class of the current test point as the one with lowest average distance
print(x,end=" ")
x += 1 # Incrementing the progress indicator variable x
return y_pred
| [
"[email protected]"
] | |
5126cfeafdad3a6bee680a4dfae4380b7bea389c | 381ba62f113dc74c4592bf4d2718cb3d1379bee1 | /Mosh/Python/Variables/constructor_exercise.py | e30232d11f3d300087b054a2e7c2bf773b14c05a | [] | no_license | MaSanTM/Mosh | 2926cfddb9cf7f0faef0ed80e55d29a9227b9a1e | 129e2f0618c2026556396734220b6d32f69acdf3 | refs/heads/main | 2023-07-22T05:31:55.159348 | 2021-09-07T21:17:52 | 2021-09-07T21:17:52 | 404,125,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | class Person:
def __init__(self, name):
self.name = name
def talk(self):
print(f"Hi, i'm {self.name}")
john = Person('SMITH John')
print(john.name)
john.talk() | [
"[email protected]"
] | |
ff82dba0faaadec9068bbc9b3ccc625a721573a6 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/file/formats/gzip/GZipFileSystemFactory.pyi | 65e1831d93c9e7d26029af1620bb52a08cc18eb9 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | pyi | from typing import List
import ghidra.formats.gfilesystem
import ghidra.formats.gfilesystem.factory
import ghidra.util.task
import java.io
import java.lang
class GZipFileSystemFactory(object, ghidra.formats.gfilesystem.factory.GFileSystemFactoryWithFile, ghidra.formats.gfilesystem.factory.GFileSystemProbeBytesOnly):
MAX_BYTESREQUIRED: int = 65536
PROBE_BYTES_REQUIRED: int = 2
def __init__(self): ...
def create(self, __a0: ghidra.formats.gfilesystem.FSRL, __a1: ghidra.formats.gfilesystem.FSRLRoot, __a2: java.io.File, __a3: ghidra.formats.gfilesystem.FileSystemService, __a4: ghidra.util.task.TaskMonitor) -> ghidra.formats.gfilesystem.GFileSystem: ...
def equals(self, __a0: object) -> bool: ...
def getBytesRequired(self) -> int: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def probeStartBytes(self, __a0: ghidra.formats.gfilesystem.FSRL, __a1: List[int]) -> bool: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def bytesRequired(self) -> int: ...
| [
"[email protected]"
] | |
708cfe325109af42808f25f39d043b2a0676c301 | 1dbd4d92637c80d01a1f56c62b4871c7fe22a9f0 | /analysis_scripts/1c-Plotting_patch_clamp_results.py | 34d817a659c819363eab778d74d25c11498d5c61 | [] | no_license | nmarzano/peakfinding_patchclamping_calcium-imaging | 4ecdb1b944ac7ac6b6d2d405a09a94683578ae49 | 31194e2a338f14f221e1c12917213e94261a65e5 | refs/heads/master | 2023-04-12T13:07:27.949766 | 2022-12-20T05:44:06 | 2022-12-20T05:44:06 | 580,261,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | from scipy.stats import norm
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
output_dir = 'directory_to_save'
filename = 'directory/cleaned_data.csv'
major_peaks = pd.read_csv(filename, header="infer")
def plot_cells(dfs, xmin, xmax):
sns.set(style = 'ticks', font_scale = 1)
for cell, df in dfs.groupby('cell'):
fig, ax = plt.subplots(2, 1)
sns.lineplot(data = df, x = "Time (s)", y = "Background corrected", color = "black", ax = ax[0])
sns.lineplot(data = df, x = "Time (s)", y = "Background corrected", color = "black", ax = ax[1])
ax[1].set_xlim(xmin, xmax)
fig.savefig(f'{output_dir}/raw_plot_cell{cell}.eps', dpi = 600)
plt.show()
plot_cells(major_peaks, 70.5, 71)
| [
"[email protected]"
] | |
c8aa8df708fa14ee7771650c5ffd7b543d0c78ca | c66e9277898da27d9d56fab1ac5fcdc772f57f4a | /tests/test_modeling_flax_common.py | f6737d864930434b8f8d929c30a957f20f9aae28 | [
"Apache-2.0"
] | permissive | vumichien/transformers | 47901c895cd3ce8a7c30f691dcb40bdfe7fc4030 | 75a208ef66c0176fc12a4c98922728ced5befbf9 | refs/heads/main | 2023-02-26T03:57:52.930111 | 2023-02-10T22:28:24 | 2023-02-10T22:28:24 | 238,600,337 | 1 | 0 | Apache-2.0 | 2020-02-06T03:34:11 | 2020-02-06T03:34:10 | null | UTF-8 | Python | false | false | 58,666 | py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import json
import random
import tempfile
import unittest
from typing import List, Tuple
import numpy as np
from huggingface_hub import HfFolder, delete_repo, set_access_token
from requests.exceptions import HTTPError
import transformers
from transformers import BertConfig, is_flax_available, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import (
TOKEN,
USER,
CaptureLogger,
is_pt_flax_cross_test,
is_staging_test,
require_flax,
torch_device,
)
from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging
from transformers.utils.generic import ModelOutput
if is_flax_available():
import os
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
from transformers import (
FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
FLAX_MODEL_MAPPING,
FlaxAutoModel,
FlaxAutoModelForSequenceClassification,
FlaxBertModel,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.modeling_flax_utils import FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key:
setattr(configs_no_init, key, 1e-10)
return configs_no_init
def ids_tensor(shape, vocab_size, rng=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = np.array(values, dtype=jnp.int32).reshape(shape)
return output
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return np.array(values, dtype=jnp.float32).reshape(shape)
def random_attention_mask(shape, rng=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=rng)
# make sure that at least one token is attended to for each batch
attn_mask[:, -1] = 1
return attn_mask
@require_flax
class FlaxModelTesterMixin:
model_tester = None
all_model_classes = ()
test_mismatched_shapes = True
is_encoder_decoder = False
test_head_masking = False
has_attentions = True
def _prepare_for_class(self, inputs_dict, model_class):
inputs_dict = copy.deepcopy(inputs_dict)
# hack for now until we have AutoModel classes
if "ForMultipleChoice" in model_class.__name__:
inputs_dict = {
k: jnp.broadcast_to(v[:, None], (v.shape[0], self.model_tester.num_choices, v.shape[-1]))
if isinstance(v, (jnp.ndarray, np.ndarray))
else v
for k, v in inputs_dict.items()
}
return inputs_dict
def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float):
diff = np.abs((a - b)).max()
self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).")
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assert_almost_equals(jnp.nan_to_num(tuple_object), jnp.nan_to_num(dict_object), 1e-5)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
# (Copied from tests.test_modeling_common.ModelTesterMixin.check_pt_flax_outputs)
def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None):
"""
Args:
model_class: The class of the model that is currently testing. For example, ..., etc.
Currently unused, but it could make debugging easier and faster.
names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs.
Currently unused, but in the future, we could use this information to make the error message clearer
by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax.
"""
self.assertEqual(type(name), str)
if attributes is not None:
self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`")
# Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`).
if isinstance(fx_outputs, ModelOutput):
self.assertTrue(
isinstance(pt_outputs, ModelOutput),
f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is",
)
fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])
self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch")
# convert to the case of `tuple`
# appending each key to the current (string) `name`
attributes = tuple([f"{name}.{k}" for k in fx_keys])
self.check_pt_flax_outputs(
fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes
)
# Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.)
elif type(fx_outputs) in [tuple, list]:
self.assertEqual(
type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch"
)
self.assertEqual(
len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch"
)
if attributes is not None:
# case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`)
self.assertEqual(
len(attributes),
len(fx_outputs),
f"{name}: The tuple `attributes` should have the same length as `fx_outputs`",
)
else:
# case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name`
attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))])
for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes):
self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr)
elif isinstance(fx_outputs, jnp.ndarray):
self.assertTrue(
isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is"
)
# Using `np.asarray` gives `ValueError: assignment destination is read-only` at the line `fx_outputs[fx_nans] = 0`.
fx_outputs = np.array(fx_outputs)
pt_outputs = pt_outputs.detach().to("cpu").numpy()
self.assertEqual(
fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch"
)
# deal with NumPy's scalars to make replacing nan values by 0 work.
if np.isscalar(fx_outputs):
fx_outputs = np.array([fx_outputs])
pt_outputs = np.array([pt_outputs])
fx_nans = np.isnan(fx_outputs)
pt_nans = np.isnan(pt_outputs)
pt_outputs[fx_nans] = 0
fx_outputs[fx_nans] = 0
pt_outputs[pt_nans] = 0
fx_outputs[pt_nans] = 0
max_diff = np.amax(np.abs(fx_outputs - pt_outputs))
self.assertLessEqual(
max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})."
)
else:
raise ValueError(
"`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got"
f" {type(fx_outputs)} instead."
)
@is_pt_flax_cross_test
def test_equivalence_pt_to_flax(self):
# It might be better to put this inside the for loop below (because we modify the config there).
# But logically, it is fine.
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# Output all for aggressive testing
config.output_hidden_states = True
config.output_attentions = self.has_attentions
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
# Flax models don't use the `use_cache` option and cache is not returned as a default.
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model = model_class(config, dtype=jnp.float32)
fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model)
fx_model.params = fx_state
# send pytorch model to the correct device
pt_model.to(torch_device)
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs)
fx_outputs = fx_model(**prepared_inputs_dict)
fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])
self.assertEqual(fx_keys, pt_keys)
self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True)
fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict)
fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])
self.assertEqual(fx_keys, pt_keys)
self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class)
@is_pt_flax_cross_test
def test_equivalence_flax_to_pt(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# Output all for aggressive testing
config.output_hidden_states = True
config.output_attentions = self.has_attentions
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
# Flax models don't use the `use_cache` option and cache is not returned as a default.
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model = model_class(config, dtype=jnp.float32)
pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params)
# make sure weights are tied in PyTorch
pt_model.tie_weights()
# send pytorch model to the correct device
pt_model.to(torch_device)
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs)
fx_outputs = fx_model(**prepared_inputs_dict)
fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])
self.assertEqual(fx_keys, pt_keys)
self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(tmpdirname)
pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True)
# send pytorch model to the correct device
pt_model_loaded.to(torch_device)
pt_model_loaded.eval()
with torch.no_grad():
pt_outputs_loaded = pt_model_loaded(**pt_inputs)
fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None])
self.assertEqual(fx_keys, pt_keys)
self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class)
def test_from_pretrained_save_pretrained(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
model = model_class(config)
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**prepared_inputs_dict).to_tuple()
# verify that normal save_pretrained works as expected
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# the config file (and the generation config file, if it can generate) should be saved
self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME)))
self.assertEqual(
model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME))
)
model_loaded = model_class.from_pretrained(tmpdirname)
outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()
for output_loaded, output in zip(outputs_loaded, outputs):
self.assert_almost_equals(output_loaded, output, 1e-3)
# verify that save_pretrained for distributed training
# with `params=params` works as expected
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=model.params)
model_loaded = model_class.from_pretrained(tmpdirname)
outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()
for output_loaded, output in zip(outputs_loaded, outputs):
self.assert_almost_equals(output_loaded, output, 1e-3)
def test_save_load_from_base(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = base_class(config)
base_params = flatten_dict(unfreeze(model.params))
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
head_model = model_class.from_pretrained(tmpdirname)
base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix]))
for key in base_param_from_head.keys():
max_diff = (base_params[key] - base_param_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_save_load_to_base(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix]))
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname)
base_params = flatten_dict(unfreeze(base_model.params))
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_from_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = base_class(config)
base_params = flatten_dict(unfreeze(model.params))
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
# save pt model
pt_model.save_pretrained(tmpdirname)
head_model = model_class.from_pretrained(tmpdirname, from_pt=True)
base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix]))
for key in base_param_from_head.keys():
max_diff = (base_params[key] - base_param_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_to_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix]))
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname, from_pt=True)
base_params = flatten_dict(unfreeze(base_model.params))
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_bf16_to_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
model.params = model.to_bf16(model.params)
base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix]))
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname, from_pt=True)
base_params = flatten_dict(unfreeze(base_model.params))
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def model_jitted(input_ids, attention_mask=None, **kwargs):
return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs)
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids", "attention_mask"]
self.assertListEqual(arg_names[:2], expected_arg_names)
def test_naming_convention(self):
for model_class in self.all_model_classes:
model_class_name = model_class.__name__
module_class_name = (
model_class_name[:-5] + "Module" if model_class_name[-5:] == "Model" else model_class_name + "Module"
)
bert_modeling_flax_module = __import__(model_class.__module__, fromlist=[module_class_name])
module_cls = getattr(bert_modeling_flax_module, module_class_name)
self.assertIsNotNone(module_cls)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
else:
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
if not self.has_attentions:
self.skipTest(reason="Model does not output attentions")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_length = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 5
# Question Answering model returns start_logits and end_logits
if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING):
correct_outlen += 1 # start_logits and end_logits instead of only 1 output
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_load_with_mismatched_shapes(self):
if not self.test_mismatched_shapes:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if model_class not in get_values(FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):
continue
with self.subTest(msg=f"Testing {model_class}"):
with tempfile.TemporaryDirectory() as tmp_dir:
model = model_class(config)
model.save_pretrained(tmp_dir)
# Fails when we don't set ignore_mismatched_sizes=True
with self.assertRaises(ValueError):
new_model = FlaxAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42)
with self.assertRaises(ValueError):
new_model_without_prefix = FlaxAutoModel.from_pretrained(tmp_dir, vocab_size=10)
logger = logging.get_logger("transformers.modeling_flax_utils")
with CaptureLogger(logger) as cl:
new_model = FlaxAutoModelForSequenceClassification.from_pretrained(
tmp_dir, num_labels=42, ignore_mismatched_sizes=True
)
self.assertIn("the shapes did not match", cl.out)
logits = new_model(**inputs_dict)["logits"]
self.assertEqual(logits.shape[1], 42)
with CaptureLogger(logger) as cl:
new_model_without_prefix = FlaxAutoModel.from_pretrained(
tmp_dir, vocab_size=10, ignore_mismatched_sizes=True
)
self.assertIn("the shapes did not match", cl.out)
input_ids = ids_tensor((2, 8), 10)
if self.is_encoder_decoder:
new_model_without_prefix(input_ids, decoder_input_ids=input_ids)
else:
new_model_without_prefix(input_ids)
def test_default_params_dtype(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# check if all params are still in float32 when dtype of computation is half-precision
model = model_class(config, dtype=jnp.float16)
types = jax.tree_util.tree_map(lambda x: x.dtype, model.params)
types = flatten_dict(types)
for name, type_ in types.items():
self.assertEquals(type_, jnp.float32, msg=f"param {name} is not initialized in fp32.")
def test_to_bf16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# cast all params to bf16
params = model.to_bf16(model.params)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
# test if all params are in bf16
for name, type_ in types.items():
self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.")
# test masking
flat_params = flatten_dict(params)
key = random.choice(list(flat_params.keys())) # choose a random param
mask = {path: path != key for path in flat_params} # don't cast the key
mask = unflatten_dict(mask)
params = model.to_bf16(model.params, mask)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
# test if all params are in bf16 except key
for name, type_ in types.items():
if name == key:
self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.")
else:
self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.")
def test_to_fp16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# cast all params to fp16
params = model.to_fp16(model.params)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
# test if all params are in fp16
for name, type_ in types.items():
self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.")
# test masking
flat_params = flatten_dict(params)
key = random.choice(list(flat_params.keys())) # choose a random param
mask = {path: path != key for path in flat_params} # don't cast the key
mask = unflatten_dict(mask)
params = model.to_fp16(model.params, mask)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
# test if all params are in fp16 except key
for name, type_ in types.items():
if name == key:
self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.")
else:
self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.")
def test_to_fp32(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# cast all params to fp16 and back to fp32
params = model.to_fp16(model.params)
params = model.to_fp32(params)
# test if all params are in fp32
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
for name, type_ in types.items():
self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.")
# test masking
flat_params = flatten_dict(params)
key = random.choice(list(flat_params.keys())) # choose a random param
mask = {path: path != key for path in flat_params} # don't cast the key
mask = unflatten_dict(mask)
# cast to fp16 and back to fp32 with mask
params = model.to_fp16(model.params)
params = model.to_fp32(params, mask)
# test if all params are in fp32 except key
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
for name, type_ in types.items():
if name == key:
self.assertEqual(type_, jnp.float16, msg=f"param {name} should be in fp16.")
else:
self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.")
def test_save_load_in_fp16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# convert weights to fp16 and save
params = model.to_fp16(model.params)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=params)
# load the weights again and check if they are still in fp16
model = model_class.from_pretrained(tmpdirname)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params))
for name, type_ in types.items():
self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.")
def test_save_load_in_bf16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# convert weights to bf16 and save
params = model.to_bf16(model.params)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=params)
# load the weights again and check if they are still in fp16
model = model_class.from_pretrained(tmpdirname)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params))
for name, type_ in types.items():
self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.")
def test_model_main_input_name(self):
for model_class in self.all_model_classes:
model_signature = inspect.signature(getattr(model_class, "__call__"))
# The main input is the name of the argument after `self`
observed_main_input_name = list(model_signature.parameters.keys())[1]
self.assertEqual(model_class.main_input_name, observed_main_input_name)
def test_headmasking(self):
if not self.test_head_masking:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
def _prepare_layer_head_mask(i, attention_heads, num_hidden_layers):
if i == 0:
return np.concatenate([np.zeros(1, dtype=jnp.int32), np.ones(attention_heads - 1, dtype=jnp.int32)])
if i == num_hidden_layers - 1:
return np.concatenate([np.zeros(attention_heads - 1, dtype=jnp.int32), np.ones(1, dtype=jnp.int32)])
return np.ones(attention_heads, dtype=jnp.int32)
for model_class in self.all_model_classes:
model = model_class(config)
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
inputs = self._prepare_for_class(inputs_dict, model_class).copy()
# Prepare head mask
inputs["head_mask"] = np.stack(
[
_prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers)
for i in range(config.num_hidden_layers)
]
)
outputs = model(**inputs)
def _check_attentions_validity(attentions):
# Remove NaN
for t in attentions:
# Check we don't have more than 25% nans (arbitrary)
self.assertLess(np.isnan(t).sum(), t.size / 4)
attentions = [np.where(np.isnan(t), 0.0, t) for t in attentions]
self.assertAlmostEqual(attentions[0][..., 0, :, :].sum(), 0.0)
self.assertNotEqual(attentions[0][..., -1, :, :].sum(), 0.0)
if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules
self.assertNotEqual(attentions[1][..., 0, :, :].sum(), 0.0)
self.assertAlmostEqual(attentions[-1][..., -2, :, :].sum(), 0.0)
self.assertNotEqual(attentions[-1][..., -1, :, :].sum(), 0.0)
if model.config.is_encoder_decoder:
raise NotImplementedError("The test has not been implemented for encoder-decoder models yet.")
else:
_check_attentions_validity(outputs.attentions)
def test_no_automatic_init(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
model = model_class(config, _do_init=False)
# Check that accesing parmas raises an ValueError when _do_init is False
with self.assertRaises(ValueError):
params = model.params
# Check if we params can be properly initialized when calling init_weights
params = model.init_weights(model.key, model.input_shape)
self.assertIsInstance(params, FrozenDict)
# Check if all required parmas are initialized
keys = set(flatten_dict(unfreeze(params)).keys())
self.assertTrue(all(k in keys for k in model.required_params))
# Check if the shapes match
flat_params = flatten_dict(unfreeze(params))
for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items():
self.assertEqual(
v.shape,
flat_params[k].shape,
"Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape),
)
# Check that setting params raises an ValueError when _do_init is False
with self.assertRaises(ValueError):
model.params = params
# Check if we can do a forward pass
inputs_dict["output_hidden_states"] = True
inputs = self._prepare_for_class(inputs_dict, model_class).copy()
model(**inputs, params=params)
def test_from_pretrained_with_no_automatic_init(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
def _assert_all_params_initialised(model, params):
# Check if all required parmas are loaded
keys = set(flatten_dict(unfreeze(params)).keys())
self.assertTrue(all(k in keys for k in model.required_params))
# Check if the shapes match
flat_params = flatten_dict(unfreeze(params))
for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items():
self.assertEqual(
v.shape,
flat_params[k].shape,
"Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape),
)
for model_class in self.all_model_classes:
# init the model
model = model_class(config)
# save the model in the temporary directory
# load the saved model with _do_init=False
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model, params = model_class.from_pretrained(tmpdirname, _do_init=False)
# Check that accesing parmas raises an ValueError when _do_init is False
with self.assertRaises(ValueError):
params = model.params
# Check if all required parmas are loaded
_assert_all_params_initialised(model, params)
# Check that setting params raises an ValueError when _do_init is False
with self.assertRaises(ValueError):
model.params = params
# Check if init_weights initializes missing keys from from_pretrained
flat_params = flatten_dict(unfreeze(params))
random_key = random.choice(list(flat_params.keys()))
flat_params.pop(random_key)
params = freeze(unflatten_dict(flat_params))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=params)
model, params = model_class.from_pretrained(tmpdirname, _do_init=False)
params = model.init_weights(model.key, model.input_shape, params=params)
# Check if all required parmas are loaded
_assert_all_params_initialised(model, params)
def test_checkpoint_sharding_from_hub(self):
model = FlaxBertModel.from_pretrained("ArthurZ/flax-tiny-random-bert-sharded")
# the model above is the same as the model below, just a sharded version.
ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(ref_model.params).values()):
assert np.allclose(np.array(p1), np.array(p2))
def test_checkpoint_sharding_local(self):
model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
with tempfile.TemporaryDirectory() as tmp_dir:
# We use the same folder for various sizes to make sure a new save erases the old checkpoint.
for max_size in ["150kB", "150kiB", "200kB", "200kiB"]:
model.save_pretrained(tmp_dir, max_shard_size=max_size)
# Get each shard file and its size
shard_to_size = {}
for shard in os.listdir(tmp_dir):
if shard.endswith(".msgpack"):
shard_file = os.path.join(tmp_dir, shard)
shard_to_size[shard_file] = os.path.getsize(shard_file)
index_file = os.path.join(tmp_dir, FLAX_WEIGHTS_INDEX_NAME)
# Check there is an index but no regular weight file
self.assertTrue(os.path.isfile(index_file))
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME)))
# Check a file is bigger than max_size only when it has a single weight
for shard_file, size in shard_to_size.items():
if max_size.endswith("kiB"):
max_size_int = int(max_size[:-3]) * 2**10
else:
max_size_int = int(max_size[:-2]) * 10**3
# Note: pickle adds some junk so the weight of the file can end up being slightly bigger than
# the size asked for (since we count parameters)
if size >= max_size_int + 50000:
with open(shard_file, "rb") as state_f:
state_file = from_bytes(FlaxBertModel, state_f.read())
self.assertEqual(len(state_file), 1)
# Check the index and the shard files found match
with open(index_file, "r", encoding="utf-8") as f:
index = json.loads(f.read())
all_shards = set(index["weight_map"].values())
shards_found = set(f for f in os.listdir(tmp_dir) if f.endswith(".msgpack"))
self.assertSetEqual(all_shards, shards_found)
# Finally, check the model can be reloaded
new_model = FlaxBertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(new_model.params).values()):
self.assertTrue(np.allclose(np.array(p1), np.array(p2)))
@is_pt_flax_cross_test
def test_from_sharded_pt(self):
model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True)
ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-fx-only")
for key, ref_val in flatten_dict(ref_model.params).items():
val = flatten_dict(model.params)[key]
assert np.allclose(np.array(val), np.array(ref_val))
def test_gradient_checkpointing(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
remat_model = model_class(config)
try:
remat_model.enable_gradient_checkpointing()
except NotImplementedError:
continue
outputs = model(**prepared_inputs_dict)
remat_outputs = remat_model(**prepared_inputs_dict)
# ensure that the dicts of outputs contain the same keys
self.assertEqual(outputs.keys(), remat_outputs.keys())
outputs = outputs.to_tuple()
remat_outputs = remat_outputs.to_tuple()
# ensure that the outputs remain precisely equal
for output, remat_output in zip(outputs, remat_outputs):
self.assertTrue((output == remat_output).all())
@require_flax
@is_staging_test
class FlaxModelPushToHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = TOKEN
set_access_token(TOKEN)
HfFolder.save_token(TOKEN)
@classmethod
def tearDownClass(cls):
try:
delete_repo(token=cls._token, repo_id="test-model-flax")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-model-flax-org")
except HTTPError:
pass
def test_push_to_hub(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = FlaxBertModel(config)
model.push_to_hub("test-model-flax", use_auth_token=self._token)
new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax")
base_params = flatten_dict(unfreeze(model.params))
new_params = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
max_diff = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
# Reset repo
delete_repo(token=self._token, repo_id="test-model-flax")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, repo_id="test-model-flax", push_to_hub=True, use_auth_token=self._token)
new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax")
base_params = flatten_dict(unfreeze(model.params))
new_params = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
max_diff = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_push_to_hub_in_organization(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = FlaxBertModel(config)
model.push_to_hub("valid_org/test-model-flax-org", use_auth_token=self._token)
new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org")
base_params = flatten_dict(unfreeze(model.params))
new_params = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
max_diff = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-model-flax-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
tmp_dir, repo_id="valid_org/test-model-flax-org", push_to_hub=True, use_auth_token=self._token
)
new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org")
base_params = flatten_dict(unfreeze(model.params))
new_params = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
max_diff = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def check_models_equal(model1, model2):
models_are_equal = True
flat_params_1 = flatten_dict(model1.params)
flat_params_2 = flatten_dict(model2.params)
for key in flat_params_1.keys():
if np.sum(np.abs(flat_params_1[key] - flat_params_2[key])) > 1e-4:
models_are_equal = False
return models_are_equal
@require_flax
class FlaxModelUtilsTest(unittest.TestCase):
def test_model_from_pretrained_subfolder(self):
config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
model = FlaxBertModel(config)
subfolder = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(tmp_dir, subfolder))
with self.assertRaises(OSError):
_ = FlaxBertModel.from_pretrained(tmp_dir)
model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder)
self.assertTrue(check_models_equal(model, model_loaded))
def test_model_from_pretrained_subfolder_sharded(self):
config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
model = FlaxBertModel(config)
subfolder = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB")
with self.assertRaises(OSError):
_ = FlaxBertModel.from_pretrained(tmp_dir)
model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder)
self.assertTrue(check_models_equal(model, model_loaded))
def test_model_from_pretrained_hub_subfolder(self):
subfolder = "bert"
model_id = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(OSError):
_ = FlaxBertModel.from_pretrained(model_id)
model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder)
self.assertIsNotNone(model)
def test_model_from_pretrained_hub_subfolder_sharded(self):
subfolder = "bert"
model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(OSError):
_ = FlaxBertModel.from_pretrained(model_id)
model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder)
self.assertIsNotNone(model)
| [
"[email protected]"
] | |
3bb45f868041fdd2eef7e1579e0956513f0ae960 | 7f622971d347057bdfea90d84f6c64e4fdbee418 | /news_crawl/guardian/middlewares.py | 825f870d12cf8394663bd0ca28a00ea7c57d941d | [] | no_license | beharasatya/News_Crawler_Guardian | a29730c812562572328a40a2266bc584db9946b3 | bbd1d96989f03e49a6befdd5ac9589c0f92da648 | refs/heads/master | 2021-09-04T18:51:51.480775 | 2018-01-21T09:38:48 | 2018-01-21T09:38:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class GuardianSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
90a6b0bf6ba220f42d8a29c89dc93492396116ff | 350f0a5e56c83b4db157fe06137e929ab0b07f75 | /models/tf_Cifar_OC_NN_Models.py | ad5660b15bfd3541fd0a08df12d27286fc2d736d | [] | no_license | LiTangqing/Cleaned-OC-NN | 4869e2db22fae4ce9f53e296b020ac945904a617 | 4c814626f69225215d27f11e3e316a7e7b299199 | refs/heads/master | 2020-04-05T03:33:38.447519 | 2018-11-07T09:56:51 | 2018-11-07T09:56:51 | 156,519,328 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,546 | py | import time
import csv
from itertools import zip_longest
import matplotlib as plt
import tensorflow as tf
import numpy as np
import os
RANDOM_SEED = 42
g = lambda x : 1/(1 + tf.exp(-x))
def nnScore(X, w, V, g):
return tf.matmul(g((tf.matmul(X, w))), V)
def relu(x):
y = x
y[y < 0] = 0
return y
def write_decisionScores2Csv(path, filename, positiveScores, negativeScores):
newfilePath = path+filename
print ("Writing file to ", path+filename)
poslist = positiveScores
neglist = negativeScores
# rows = zip(poslist, neglist)
d = [poslist, neglist]
export_data = zip_longest(*d, fillvalue='')
with open(newfilePath, 'w') as myfile:
wr = csv.writer(myfile)
wr.writerow(("Normal", "Anomaly"))
wr.writerows(export_data)
myfile.close()
return
def tf_OneClass_NN_linear(data_train,data_test,nu, verbose=True):
tf.reset_default_graph()
tf.set_random_seed(RANDOM_SEED)
train_X = data_train
x_size = train_X.shape[1]
print ("Input Shape:",x_size)
h_size = 16
y_size = 1
D = x_size
K = h_size
theta = np.random.normal(0, 1, K + K*D + 1)
rvalue = np.random.normal(0,1,(len(train_X),y_size))
# nu = 0.1
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape,mean=0, stddev=1)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
X = tf.cast(X, tf.float32)
w_1 = tf.cast(w_1, tf.float32)
w_2 = tf.cast(w_2, tf.float32)
h = (tf.matmul(X, w_1)) #
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
g = lambda x : x
def nnScore(X, w, V, g):
X = tf.cast(X, tf.float32)
w = tf.cast(w, tf.float32)
V = tf.cast(V, tf.float32)
return tf.matmul(g((tf.matmul(X, w))), V)
def relu1(x):
y = x
y = tf.nn.relu(x)
return y
def relu(x):
with sess.as_default():
x = x.eval()
y = x
y[y < 0] = 0
return y
def ocnn_obj(theta, X, nu, w1, w2, g,r):
w = w1
V = w2
X = tf.cast(X, tf.float32)
w = tf.cast(w1, tf.float32)
V = tf.cast(w2, tf.float32)
term1 = 0.5 * tf.reduce_sum(w**2)
term2 = 0.5 * tf.reduce_sum(V**2)
term3 = 1/nu * tf.reduce_mean(tf.nn.relu(r - nnScore(X, w, V, g)))
term4 = -r
return term1 + term2 + term3 + term4
# For testing the algorithm
test_X = data_test
# Symbols
X = tf.placeholder("float32", shape=[None, x_size])
r = tf.get_variable("r", dtype=tf.float32,shape=(),trainable=False)
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
cost = ocnn_obj(theta, X, nu, w_1, w_2, g,r)
updates = tf.train.AdamOptimizer(0.05).minimize(cost)
# Run optimization routine after initialization
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
rvalue = 0.1
start_time = time.time()
for epoch in range(100):
# Train with each example
sess.run(updates, feed_dict={X: train_X,r:rvalue})
rvalue = nnScore(train_X, w_1, w_2, g)
with sess.as_default():
rvalue = rvalue.eval()
rvalue = np.percentile(rvalue, q=100*nu)
if verbose:
print("Epoch = %d, r = %f" % (epoch + 1,rvalue))
trainTime = time.time() - start_time
### Get the optimized weights here
start_time = time.time()
train = nnScore(train_X, w_1, w_2, g)
test = nnScore(test_X, w_1, w_2, g)
testTime = time.time() - start_time
with sess.as_default():
arrayTrain = train.eval()
arrayTest = test.eval()
# rstar = r.eval()
rstar =rvalue
sess.close()
print ("====== Session Completed ======")
pos_decisionScore = arrayTrain-rstar
#pos_decisionScore[pos_decisionScore < 0] = 0 # why this?
neg_decisionScore = arrayTest-rstar
pos_decisionScore = pos_decisionScore.reshape(-1)
neg_decisionScore = neg_decisionScore.reshape(-1)
write_decisionScores2Csv(os.getcwd()+'/Decision_Scores/', 'oc_nn_linear_cifar.csv',
pos_decisionScore, neg_decisionScore)
# write_decisionScores2Csv(decision_scorePath, "OneClass_NN_linear.csv", pos_decisionScore, neg_decisionScore)
return [pos_decisionScore, neg_decisionScore,trainTime,testTime]
def tf_OneClass_NN_sigmoid(data_train,data_test,nu, verbose=True):
tf.reset_default_graph()
sess = tf.Session()
train_X = data_train
tf.set_random_seed(RANDOM_SEED)
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias
print ("Input Shape:", x_size)
h_size = 16 # Number of hidden nodes
y_size = 1 # Number of outcomes (3 iris flowers)
D = x_size
K = h_size
theta = np.random.normal(0, 1, K + K*D + 1)
rvalue = np.random.normal(0,1,(len(train_X),y_size))
# nu = 0.1
# def getActivations(layer, stimuli):
# units = sess.run(layer, feed_dict={x: np.reshape(stimuli, [1, 784], order='F'), keep_prob: 1.0})
# plotNNFilter(units)
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape,mean=0, stddev=0.00001)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
X = tf.cast(X, tf.float32)
w_1 = tf.cast(w_1, tf.float32)
w_2 = tf.cast(w_2, tf.float32)
h = tf.nn.sigmoid(tf.matmul(X, w_1)) # The \sigma function
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
g = lambda x : 1/(1 + tf.exp(-x))
def nnScore(X, w, V, g):
X = tf.cast(X, tf.float32)
w = tf.cast(w, tf.float32)
V = tf.cast(V, tf.float32)
return tf.matmul(g((tf.matmul(X, w))), V)
def data_rep(X, w, V, g):
X = tf.cast(X, tf.float32)
w = tf.cast(w, tf.float32)
return g((tf.matmul(X, w)))
def relu(x):
y = tf.nn.relu(x)
return y
def ocnn_obj(theta, X, nu, w1, w2, g,r):
w = w1
V = w2
X = tf.cast(X, tf.float32)
w = tf.cast(w1, tf.float32)
V = tf.cast(w2, tf.float32)
term1 = 0.5 * tf.reduce_sum(w**2)
term2 = 0.5 * tf.reduce_sum(V**2)
term3 = 1/nu * tf.reduce_mean(relu(r - nnScore(X, w, V, g)))
term4 = -r
return term1 + term2 + term3 + term4
test_X = data_test
X = tf.placeholder("float32", shape=[None, x_size])
r = tf.get_variable("r", dtype=tf.float32,shape=(),trainable=False)
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
# Forward propagation
yhat = forwardprop(X, w_1, w_2)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
cost = ocnn_obj(theta, X, nu, w_1, w_2, g,r)
updates = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
# Run SGD
init = tf.global_variables_initializer()
sess.run(init)
rvalue = 0.1
start_time = time.time()
for epoch in range(100):
# Train with each example
units = sess.run(updates, feed_dict={X: train_X,r:rvalue})
# plotNNFilter(units)
with sess.as_default():
w1 = w_1.eval()
w2 = w_2.eval()
rvalue = nnScore(train_X, w1, w2, g)
with sess.as_default():
rvalue = rvalue.eval()
rvalue = np.percentile(rvalue,q=100*nu)
if verbose:
print("Epoch = %d, r = %f" % (epoch + 1,rvalue))
trainTime = time.time() - start_time
with sess.as_default():
w1 = w_1.eval()
w2 = w_2.eval()
start_time = time.time()
train = nnScore(train_X, w1, w2, g)
test = nnScore(test_X, w1, w2, g)
train_rep = data_rep(train_X, w1, w2, g)
test_rep = data_rep(test_X, w1, w2, g)
testTime = time.time() - start_time
with sess.as_default():
arrayTrain = train.eval()
arrayTest = test.eval()
arraytrain_rep =train_rep.eval()
arraytest_rep= test_rep.eval()
# rstar = r.eval()
rstar =rvalue
sess.close()
print ("====== Session Completed ======")
pos_decisionScore = arrayTrain-rstar
# pos_decisionScore[pos_decisionScore< 0] = 0 ## Clip all the negative values to zero
neg_decisionScore = arrayTest-rstar
pos_decisionScore = pos_decisionScore.reshape(-1)
neg_decisionScore = neg_decisionScore.reshape(-1)
write_decisionScores2Csv(os.getcwd()+'/Decision_Scores/', 'oc_nn_sigmoid_cifar.csv',
pos_decisionScore, neg_decisionScore)
return [pos_decisionScore, neg_decisionScore,trainTime,testTime]
def tf_OneClass_NN_relu(data_train,data_test,nu, verbose=True):
tf.reset_default_graph()
sess = tf.Session()
tf.set_random_seed(RANDOM_SEED)
train_X = data_train
x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias
print ("Input Shape:", x_size)
h_size = 16 # Number of hidden nodes
y_size = 1 # Number of outcomes (3 iris flowers)
D = x_size
K = h_size
theta = np.random.normal(0, 1, K + K*D + 1)
rvalue = np.random.normal(0,1,(len(train_X),y_size))
# nu = 0.1
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape,mean=0, stddev=0.00001)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
X = tf.cast(X, tf.float32)
w_1 = tf.cast(w_1, tf.float32)
w_2 = tf.cast(w_2, tf.float32)
h = tf.nn.sigmoid(tf.matmul(X, w_1)) # The \sigma function
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
g = lambda x : relu(x)
def nnScore(X, w, V, g):
X = tf.cast(X, tf.float32)
w = tf.cast(w, tf.float32)
V = tf.cast(V, tf.float32)
return tf.matmul(g((tf.matmul(X, w))), V)
def relu(x):
y = tf.nn.relu(x)
return y
def ocnn_obj(theta, X, nu, w1, w2, g,r):
w = w1
V = w2
X = tf.cast(X, tf.float32)
w = tf.cast(w1, tf.float32)
V = tf.cast(w2, tf.float32)
term1 = 0.5 * tf.reduce_sum(w**2)
term2 = 0.5 * tf.reduce_sum(V**2)
term3 = 1/nu * tf.reduce_mean(relu(r - nnScore(X, w, V, g)))
term4 = -r
return term1 + term2 + term3 + term4
# For testing the algorithm
test_X = data_test
# Symbols
X = tf.placeholder("float32", shape=[None, x_size])
r = tf.get_variable("r", dtype=tf.float32,shape=(),trainable=False)
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
# Forward propagation
yhat = forwardprop(X, w_1, w_2)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
cost = ocnn_obj(theta, X, nu, w_1, w_2, g,r)
updates = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
# Run SGD
start_time = time.time()
init = tf.global_variables_initializer()
sess.run(init)
rvalue = 0.1
for epoch in range(100):
# Train with each example
sess.run(updates, feed_dict={X: train_X,r:rvalue})
with sess.as_default():
w1 = w_1.eval()
w2 = w_2.eval()
rvalue = nnScore(train_X, w1, w2, g)
with sess.as_default():
rvalue = rvalue.eval()
rvalue = np.percentile(rvalue,q=100*nu)
if verbose:
print("Epoch = %d, r = %f" % (epoch + 1,rvalue))
trainTime = time.time() - start_time
with sess.as_default():
w1 = w_1.eval()
w2 = w_2.eval()
start_time = time.time()
train = nnScore(train_X, w1, w2, g)
test = nnScore(test_X, w1, w2, g)
testTime = time.time() - start_time
with sess.as_default():
arrayTrain = train.eval()
arrayTest = test.eval()
rstar =rvalue
sess.close()
print ("====== Session Completed ======")
pos_decisionScore = arrayTrain-rstar
# pos_decisionScore[pos_decisionScore< 0] = 0 ## Clip all the negative values to zero
neg_decisionScore = arrayTest-rstar
pos_decisionScore = pos_decisionScore.reshape(-1)
neg_decisionScore = neg_decisionScore.reshape(-1)
write_decisionScores2Csv(os.getcwd()+'/Decision_Scores/', 'oc_nn_sigmoid_relu.csv',
pos_decisionScore, neg_decisionScore)
return [pos_decisionScore, neg_decisionScore, trainTime, testTime]
| [
"[email protected]"
] | |
d0684e191884794bcca60c9a003d3a736017998e | f8ece22d9e9e12e2cbca56d72a6b2728ba9a275a | /polyaxon/experiments/utils.py | 50329e5e6fe312b3cb5120c878e85833117c63a9 | [
"MIT"
] | permissive | pparan/polyaxon | 8c8912f9ba724e007357efcaefeab86fec2d5630 | 423199721e90431209b00c0f76caa6b4f9aa4b24 | refs/heads/master | 2021-04-15T07:15:19.701268 | 2018-03-21T11:59:12 | 2018-03-21T11:59:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from experiments.models import Experiment
def is_experiment_still_running(experiment_id=None, experiment_uuid=None):
if not any([experiment_id, experiment_uuid]) or all([experiment_id, experiment_uuid]):
raise ValueError('`is_experiment_still_running` function expects an experiment id or uuid.')
try:
if experiment_uuid:
experiment = Experiment.objects.get(uuid=experiment_uuid)
else:
experiment = Experiment.objects.get(id=experiment_id)
except Experiment.DoesNotExist:
return False
if not experiment.is_running:
return False
return True
| [
"[email protected]"
] | |
9812d4b96bf0a572295fd6c0ddb188d7e8343f0e | ae3e23956d8d831a6934570fc8ff3ec1218a8934 | /hwk10_leapfrog.py | 5f7d38024a53fa21522608e51b08a552391f59f9 | [] | no_license | Kay-Towner/Homework10 | 04cecd1686ff15543d858cb5e3f5a6a5336b2e94 | 2451c2b15ad9acaef9521e8da7ab330aa2ad1449 | refs/heads/main | 2023-04-10T12:15:51.209962 | 2021-04-15T02:02:53 | 2021-04-15T02:02:53 | 357,039,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | #By Kay Towner
import math
import numpy as np
import matplotlib.pyplot as plt
def dif(t=None, h=None, x=None, dxdt=None, d2 = None):
"""Differential equation to solve.
d2=leapfrogmethod, dxdt=thederivative=0"""
return d2 - (dxdt)**2 + x + 5
def frog(t=None, h=None, x=None, f=None):
"Leapfrog method to run on dif."
#Had difficulty here:
x=x
t=t
x = x(t+(3/2)*h)
x(t+(1/2)*h)+h*f(x(t+h),t+h)
t = x(t+2*h)
x(t+h) + h*f(x(t+(3/2)*h),t+(3/2)*h)
return x, t
if __name__ == "__main__":
#VERIABLES:
t = np.arange(0, 50) #time
x = 1 #initial condition (position)
dxdt = 0
h = 0.001 #step size
d2 = frog(t=t, h=h, x=x, f=dif)
leapfrog = dif(t=t, h=h, x=x, dxdt=dxdt, d2=d2)
print(leapfrog)
| [
"[email protected]"
] | |
0f86bc2bb49aeaa1ea80641c646f9bb2d8c08163 | be9d900c1c4971d7fb3647ba39802ea1c63a0e7d | /baithicuoiki.1.1.py | 122965e30eb08c1b3858e871858dc09d61b3e170 | [] | no_license | leduykhanh2001/KTPMUD_DV18CLC_18H1040047 | 1c7e2c8e52cb70b463f3d2c4c3298c4f6ca6a04b | 4e6981791abca202b32061234b1894ed94c84cbb | refs/heads/main | 2023-05-12T21:43:59.783862 | 2021-05-25T01:41:14 | 2021-05-25T01:41:14 | 370,515,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | class complex:
def __init__(self,complex_real,complex_image,complex_module):
def complex_reals(self,complex_real):
self.complex_real = complex_real
def complex_images(self,complex):
self.complex_image = complex_image
def complex_modules(self,complex_module):
self.complex_module = complex_module
class person:
def __init__(self,person_name,person_my_complex):
person_name1 = 'le duy khanh'
person_name2 = 'huynh pham que lam'
def person_name(self,person_name1,person_name2):
self.person_name1 = person_name1
self.person_name2 = person_name2
def person_my_complex(self,person_my_complex1):
self.person_my_complex1 = person_my_complex1
person_com1 = person()
person_com2 = person()
person_com3 = person()
print(person.person_name1)
person_com1.person_name1(float(7-j2,))
person_com2.person_name1(float(5))
person_com3.person_name1(float(2+j3))
person_com1.person_name2(float(j8))
person_com2.person_name2(float(0))
| [
"[email protected]"
] | |
3b37848f68add020cd5d254cdc317cb60dc17c29 | ba6105cbef80245d6a19215d343b2a7890a30271 | /Unit 4- Data Structures/Ch 4.4- File Input and Output/Coding Problem 4.4.3.py | de5b01a50ecb947c881ef7f789133b04c345edd6 | [] | no_license | pyl135/Introduction-to-Computing-using-Python | a9a3674cd9088d50c9eef0f46ac6da5f084f9a2e | 92e94ce2d7a23a45fa00a2907f69682e25e6ed48 | refs/heads/master | 2021-04-12T10:43:33.757692 | 2018-03-30T07:07:08 | 2018-03-30T07:07:08 | 126,672,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | #Write a function called "reader" that reads in a ".cs1301"
#file described in the previous problem. The function should
#return a list of tuples representing the lines in the file like so:
#
#[(line_1_number, line_1_assignment_name, line_1_grade, line_1_total, line_1_weight),
#(line_2_number, line_2_assignment_name, line_2_grade, line_2_total, line_2_weight)]
#
#All items should be of type int except for the name (string)
#and the weight (float). You can assume the file will be in the
#proper format.
#
#Hint: Although you could use readlines() to read in all
#the lines at once, they would all be strings, not a list.
#You still need to go line-by-line and convert each string
#to a list.
#Write your function here!
def reader(filename):
output = open(filename, "r")
array= ()
sum = []
for line in output:
each = line.split()
one = int(each[0])
two = each[1]
three = int(each[2])
four = int(each[3])
five = float(each[4])
array = (one,two,three,four,five)
sum.append(array)
return sum
output.close()
#We have supplied the same sample.cs1301 from the previous
#exercise. Feel free to test your code with it to see if it
#works:
print(reader("sample.cs1301"))
| [
"[email protected]"
] | |
c24bd142a33242ce550b621b5f054a0ae066ddc7 | f8faa223d8ba64caab5a732bc6d1d9a944b62aa7 | /tests/integration/loss/test_multi_op.py | 01ec80e9fdb4dfb7ee583f90ff5518ad7e4e45ab | [
"BSD-3-Clause"
] | permissive | pystiche/pystiche | 2f53e26f38b7fe96ec29259a084ba8ab2c2a9d36 | 71217c24557dfba05da5795547bf6f3034e7c66f | refs/heads/main | 2023-04-13T04:01:40.275142 | 2022-03-18T21:59:12 | 2022-03-18T21:59:12 | 208,798,287 | 138 | 36 | BSD-3-Clause | 2023-04-11T12:31:29 | 2019-09-16T12:49:12 | Python | UTF-8 | Python | false | false | 3,016 | py | import pytorch_testing_utils as ptu
import torch
from torch import nn
import pystiche
from pystiche import enc, loss, ops
from tests.asserts import assert_named_modules_identical
from tests.utils import suppress_deprecation_warning
@suppress_deprecation_warning
def test_MultiOperatorLoss():
class TestOperator(ops.Operator):
def process_input_image(self, image):
pass
named_ops = [(str(idx), TestOperator()) for idx in range(3)]
multi_op_loss = loss.MultiOperatorLoss(named_ops)
actuals = multi_op_loss.named_children()
desireds = named_ops
assert_named_modules_identical(actuals, desireds)
@suppress_deprecation_warning
def test_MultiOperatorLoss_trim():
class TestOperator(ops.EncodingOperator):
def __init__(self, encoder, **kwargs):
super().__init__(**kwargs)
self._encoder = encoder
@property
def encoder(self):
return self._encoder
def forward(self, image):
pass
layers = [str(idx) for idx in range(3)]
modules = [(layer, nn.Module()) for layer in layers]
multi_layer_encoder = enc.MultiLayerEncoder(modules)
ops_ = (("op", TestOperator(multi_layer_encoder.extract_encoder(layers[0])),),)
loss.MultiOperatorLoss(ops_, trim=True)
assert layers[0] in multi_layer_encoder
assert all(layer not in multi_layer_encoder for layer in layers[1:])
@suppress_deprecation_warning
def test_MultiOperatorLoss_call():
class TestOperator(ops.Operator):
def __init__(self, bias):
super().__init__()
self.bias = bias
def process_input_image(self, image):
return image + self.bias
input = torch.tensor(0.0)
named_ops = [(str(idx), TestOperator(idx + 1.0)) for idx in range(3)]
multi_op_loss = loss.MultiOperatorLoss(named_ops)
actual = multi_op_loss(input)
desired = pystiche.LossDict([(name, input + op.bias) for name, op in named_ops])
ptu.assert_allclose(actual, desired)
@suppress_deprecation_warning
def test_MultiOperatorLoss_call_encode(forward_pass_counter):
class TestOperator(ops.EncodingOperator):
def __init__(self, encoder, **kwargs):
super().__init__(**kwargs)
self._encoder = encoder
@property
def encoder(self):
return self._encoder
def forward(self, image):
return torch.sum(self.encoder(image))
modules = (("count", forward_pass_counter),)
multi_layer_encoder = enc.MultiLayerEncoder(modules)
ops_ = [
(str(idx), TestOperator(multi_layer_encoder.extract_encoder("count")),)
for idx in range(3)
]
multi_op_loss = loss.MultiOperatorLoss(ops_)
torch.manual_seed(0)
input = torch.rand(1, 3, 128, 128)
multi_op_loss(input)
actual = forward_pass_counter.count
desired = 1
assert actual == desired
multi_op_loss(input)
actual = forward_pass_counter.count
desired = 2
assert actual == desired
| [
"[email protected]"
] | |
2591cc81fd5627fc8a9f64a4682768c4fd98f5ce | 90c4326a1adc57476aea35ec18ba35f303765065 | /Stack.py | 4141878319d12f05cd69321beed2ddce74bb1a08 | [] | no_license | shyamsundar7897/Automata | d09b4695fc9292a867d6eaece89a4e28268e4632 | 32d47484b108cd04434b77ab395e26c68c19e591 | refs/heads/master | 2020-03-17T16:23:28.773729 | 2018-05-17T02:40:46 | 2018-05-17T02:40:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | class Stack:
# Stack item for expression conversion
def __init__(self):
# Constructor for Stack
self.stack = []
self.top = -1
def push(self, val):
# Push item into stack
self.top += 1
self.stack.append(val)
def pop(self):
# Return item from stack
if self.top < 0:
raise Exception('Stack Empty => Enter a correct expression')
else:
self.top -= 1
return self.stack.pop()
def isEmpty(self):
# Check if stack is empty
if self.top == -1:
return True
return False | [
"[email protected]"
] | |
3ae5a44b48257791e208650dc401ec8f6fbc5c64 | 6842e3fe3b21215859df6a61fddfd7a9b65b1ce3 | /Simple Server-Client/client.py | cb265c912446b8dfaac2dc18b9add077cbd6a891 | [] | no_license | SiriK404/Python-Socket-Programming | bc47c90ddb480787e6f9b35c7ccd393c27f93016 | 0c70ce0e8eae29a69ad8e4d6db972fdc4e56a4a2 | refs/heads/master | 2020-11-26T04:45:00.503136 | 2019-12-26T07:08:30 | 2019-12-26T07:08:30 | 228,966,591 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | #!/usr/bin/python3
import socket
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((socket.gethostname(),4444))
s.send(bytes("I am CLIENt",'utf-8'))
msg=s.recv(1024)
s.close()
print(msg.decode('utf-8')) | [
"[email protected]"
] | |
b0cd844306784feeb3f0c8b18593d18f729e49f3 | 45b54b5063a548861a7971635679776dc13e5299 | /bidnet.py | 5164ee1ace7090f524a774070290513201425161 | [] | no_license | samsmusa/python-scraping | eb39274628f798c62e8099a40f5c1783b48d8bb4 | b5788aac42652e59302ebf3dc6276f7ddcfa2bc9 | refs/heads/main | 2023-06-11T06:30:16.944977 | 2021-07-01T16:43:33 | 2021-07-01T16:43:33 | 382,097,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | import requests
from tqdm import tqdm
from bs4 import BeautifulSoup
import pandas as pd
headers1 = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36', "Upgrade-Insecure-Requests": "1","DNT": "1","Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8","Accept-Language": "en-US,en;q=0.5","Accept-Encoding": "gzip, deflate"}
#state list
# sate = ["texas","alabama" ]
#data variable list
name_bid = []
region_bid = []
published = []
end = []
#progressbar
pbar = tqdm(total = 100, desc= "Collecting...", unit= "num")
#url
base_url = "https://www.bidnetdirect.com"
# url = "https://www.bidnetdirect.com/alabama/solicitations/open-bids/page1"
url = "https://www.bidnetdirect.com/solicitations/open-bids?selectedContent=AGGREGATE"
#get source of page
def get_data(url):
html = requests.get(url, headers= headers1)
soup = BeautifulSoup(html.text, "lxml")
return soup
#collect data from page
def parse(soup, c):
content = soup.find('table', class_='mets-table')
for te in tqdm(content.find_all('tbody'), desc= f'site {c}'):
rows = te.find_all('tr')
for row in rows:
name = row.find('a', class_="solicitation-link mets-command-link")
region = row.find('td', class_='region')
s_date = row.find('td', class_='dates publication-date')
end_date = row.find('td', class_='dates closing-date')
try:
name_bid.append(name.text.strip())
region_bid.append(region.text.strip())
published.append(s_date.text.strip())
end.append(end_date.text.strip())
except:
pass
#go next page
def next_page(soup, base_url):
next = soup.find("a", class_= "next mets-pagination-page-icon")
if next:
url = base_url + next["href"]
return url
else:
return False
c = 1
#main loop = 1
while True:
soup = get_data(url)
parse(soup, c)
url = next_page(soup, base_url)
# print(url)
pbar.update(1)
c += 1
if not url:
break
#save data
bid = {
"name" : name_bid,
"region": region_bid,
"Published": published,
"End": end,
}
df = pd.DataFrame(bid)
# df.to_html(open('googl11e.html', 'w'),escape=False)
df.to_csv("bid_us.csv")
| [
"[email protected]"
] | |
6cc0b40552a7b84b67654c5343748b10becaba83 | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /qbittorrent_examples/common.py | ddc95e8e8fe8667135cad88bfda306fb07fca849 | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import sys
from typing import List, Dict
from pathlib import Path
# pip install tabulate
from tabulate import tabulate
# pip install python-qbittorrent
from qbittorrent import Client
from config import IP_HOST, USER, PASSWORD
sys.path.append(str(Path(__file__).resolve().parent.parent))
from human_byte_size import sizeof_fmt
def print_table(rows: List[List[str]], headers: List[str], show_index=True):
if show_index:
show_index = range(1, len(rows) + 1)
text = tabulate(rows, headers=headers, tablefmt="grid", showindex=show_index)
print(text)
def print_files_table(files: List[Dict]):
rows = [(file['name'], sizeof_fmt(file['size'])) for file in sorted(files, key=lambda x: x['name'])]
headers = ['#', 'File Name', 'Size']
print_table(rows, headers)
def print_torrents(torrents: List[Dict]):
total_size = 0
for i, torrent in enumerate(torrents, 1):
torrent_size = torrent['total_size']
total_size += torrent_size
print(f"{i:3}. {torrent['name']} ({sizeof_fmt(torrent_size)})")
print()
print(f'Total torrents: {len(torrents)}, total size: {sizeof_fmt(total_size)} ({total_size} bytes)')
def get_client() -> Client:
client = Client(IP_HOST)
client.login(USER, PASSWORD)
return client
| [
"[email protected]"
] | |
c589a73bdb953c385df8a3734ad9b98afacc6e90 | 72839718a4b47b1babd4ad895ecd503a0a0e14d2 | /stembot/executor/ticket.py | fff953e6803693a1039328f71b3155b6a80807ac | [
"MIT"
] | permissive | phnomcobra/stembot-python | 6fb0d9a2874fc1bb8b8e5cf69e9f4d39c38dba5a | 497dd782556d62eeb9e9301f9de37332d93207d7 | refs/heads/master | 2021-06-17T10:56:33.148454 | 2021-02-23T00:58:00 | 2021-02-23T00:58:00 | 174,921,135 | 0 | 0 | MIT | 2021-02-07T03:48:14 | 2019-03-11T03:44:21 | Python | UTF-8 | Python | false | false | 13,314 | py | #!/usr/bin/python3
ASYNC_TICKET_TIMEOUT = 3600
SYNC_TICKET_TIMEOUT = 15
import traceback
from base64 import b64encode, b64decode
from time import time, sleep
from threading import Thread, Timer
from stembot.dao.ramdocument import Collection as RAMCollection
from stembot.dao.document import Collection as SQLCollection
from stembot.adapter.agent import MPIClient
from stembot.model.peer import create_peer
from stembot.model.peer import delete_peer
from stembot.model.peer import delete_peers
from stembot.model.peer import get_peers
from stembot.model.peer import get_routes
from stembot.model import kvstore
from stembot.adapter.python import interpret
from stembot.adapter.file import create_file_handle
from stembot.adapter.file import close_file_handle
from stembot.adapter.file import file_handle_read
from stembot.adapter.file import file_handle_write
from stembot.adapter.file import file_handle_seek
from stembot.adapter.file import file_handle_tell
from stembot.adapter.file import file_handle_truncate
from stembot.adapter.process import create_process_handle
from stembot.adapter.process import process_handle_status
from stembot.adapter.process import process_handle_kill
from stembot.adapter.process import process_handle_terminate
from stembot.adapter.process import process_handle_wait
from stembot.adapter.process import process_handle_recv
from stembot.adapter.process import process_handle_send
from stembot.adapter.process import close_process_handle
from stembot.executor.cascade import create_cascade_request
from stembot.executor.cascade import create_anonymous_cascade_request
from stembot.executor.cascade import get_cascade_responses
from stembot.executor.cascade import pop_cascade_responses
from stembot.executor.cascade import wait_on_cascade_responses
from stembot.executor.counters import increment as ctr_increment
from stembot.executor.counters import get_all as ctr_get_all
from stembot.executor.timers import register_timer
def create_ticket(request):
ctr_increment('tickets created')
tickets = RAMCollection('tickets')
ticket = tickets.get_object()
ticket.object['src'] = kvstore.get(name='agtuuid')
if 'dest' in request:
ticket.object['dest'] = request['dest']
else:
ticket.object['dest'] = kvstore.get(name='agtuuid')
ticket.object['timestamp'] = time()
ticket.object['request'] = request
ticket.object['response'] = None
ticket.set()
message = {}
message['type'] = 'ticket request'
message['src'] = ticket.object['src']
message['request'] = ticket.object['request']
message['dest'] = ticket.object['dest']
message['tckuuid'] = ticket.object['objuuid']
return message
def process_ticket(message):
ctr_increment('tickets processed')
message['type'] = 'ticket response'
message['src'], message['dest'] = message['dest'], message['src']
request = message['request']
response = {}
try:
if request['type'] == 'discover peer':
if 'ttl' in request:
ttl = request['ttl']
else:
ttl = None
if 'polling' in request:
polling = request['polling']
else:
request = False
create_peer(
MPIClient(
request['url'],
kvstore.get(name='secret_digest')
).send_json({'type': 'create info event'})['dest'],
url=request['url'],
ttl=ttl,
polling=polling
)
response = request
elif request['type'] == 'create peer':
if 'url' in request:
url = request['url']
else:
url = None
if 'ttl' in request:
ttl = request['ttl']
else:
ttl = None
if 'polling' in request:
polling = request['polling']
else:
polling = False
create_peer(
request['agtuuid'],
url=url,
ttl=ttl,
polling=polling
)
response = request
elif request['type'] == 'delete peers':
delete_peers()
response = request
elif request['type'] == 'delete peer':
delete_peer(request['agtuuid'])
response = request
elif request['type'] == 'get peers':
response = get_peers()
elif request['type'] == 'get routes':
response = get_routes()
elif request['type'] == 'get counters':
response = ctr_get_all()
elif request['type'] == 'file handle open':
response['fhduuid'] = create_file_handle(
request['filename'],
request['mode']
)
response['type'] = request['type']
elif request['type'] == 'file handle close':
close_file_handle(request['fhduuid'])
response = request
elif request['type'] == 'file handle read':
if 'size' in request:
response['b64data'] = b64encode(
file_handle_read(
request['fhduuid'],
request['size']
)
).decode()
else:
response['b64data'] = b64encode(
file_handle_read(
request['fhduuid']
)
).decode()
response['type'] = request['type']
elif request['type'] == 'file handle write':
file_handle_write(
request['fhduuid'],
b64decode(request['b64data'])
)
response = request
elif request['type'] == 'file handle truncate':
file_handle_truncate(request['fhduuid'], request['size'])
response = request
elif request['type'] == 'file handle seek':
file_handle_seek(request['fhduuid'], request['position'])
response = request
elif request['type'] == 'file handle tell':
response['position'] = file_handle_tell(request['fhduuid'])
response['type'] = request['type']
elif request['type'] == 'process handle create':
response['phduuid'] = create_process_handle(request['command'])
response['type'] = request['type']
elif request['type'] == 'process handle status':
response['status'] = process_handle_status(request['phduuid'])
elif request['type'] == 'process handle kill':
process_handle_kill(request['phduuid'])
response = request
elif request['type'] == 'process handle terminate':
process_handle_terminate(request['phduuid'])
response = request
elif request['type'] == 'process handle wait':
process_handle_wait(request['phduuid'])
response = request
elif request['type'] == 'process handle close':
close_process_handle(request['phduuid'])
response = request
elif request['type'] == 'process handle send':
process_handle_send(request['phduuid'], b64decode(request['b64data']))
response = request
elif request['type'] == 'process handle recv':
stdout, stderr = process_handle_recv(request['phduuid'])
response['stdout b64data'] = b64encode(stdout).decode()
response['stderr b64data'] = b64encode(stderr).decode()
response['type'] = request['type']
elif request['type'] == 'create cascade async':
response = create_cascade_request(request)
elif request['type'] == 'create cascade anon':
create_anonymous_cascade_request(request)
response = request
elif request['type'] == 'create cascade sync':
if 'timeout' in request:
response = wait_on_cascade_responses(
create_cascade_request(request)['cscuuid'],
request['timeout']
)
else:
response = wait_on_cascade_responses(
create_cascade_request(request)['cscuuid']
)
elif request['type'] == 'get cascade responses':
response = get_cascade_responses(request['cscuuid'])
elif request['type'] == 'pull cascade responses':
response = pop_cascade_responses(request['cscuuid'])
elif request['type'] == 'delete collection':
SQLCollection(request['name']).destroy()
response = request
elif request['type'] == 'rename collection':
SQLCollection(request['name']).rename(request['new name'])
response = request
elif request['type'] == 'create collection attribute':
SQLCollection(request['name']).create_attribute(
request['attribute'],
request['path']
)
response = request
elif request['type'] == 'delete collection attribute':
SQLCollection(request['name']).delete_attribute(request['attribute'])
response = request
elif request['type'] == 'find collection objects':
response = []
for temp in SQLCollection(request['name']).find(**request['query']):
response.append(temp.object)
elif request['type'] == 'find collection object uuids':
response = SQLCollection(request['name']).find_objuuids(**request['query'])
elif request['type'] == 'get collection object':
if 'objuuid' in request:
response = SQLCollection(request['name']).get_object(request['objuuid']).object
else:
response = SQLCollection(request['name']).get_object().object
elif request['type'] == 'set collection object':
response = request
c = SQLCollection(request['name'])
o = c.get_object(request['object']['objuuid'])
o.object = request['object']
o.set()
elif request['type'] == 'delete collection object':
response = request
SQLCollection(request['name']).get_object(request['objuuid']).destroy()
elif request['type'] == 'list collection object uuids':
response = SQLCollection(request['name']).list_objuuids()
elif request['type'] == 'ping':
response = request
elif request['type'] == 'execute python':
response['status'], response['stdout'], response['stderr'] = interpret(request['body'])
else:
raise Exception('Unknown request type!')
except:
response['exception'] = traceback.format_exc()
message['response'] = response
return message
def service_ticket(message):
ctr_increment('tickets serviced')
tickets = RAMCollection('tickets')
ticket = tickets.get_object(message['tckuuid'])
ticket.object['response'] = message['response']
ticket.set()
def wait_on_ticket_response(tckuuid, timeout=None):
tickets = RAMCollection('tickets')
if timeout == None:
timeout = SYNC_TICKET_TIMEOUT
while True:
ticket = tickets.get_object(tckuuid)
if time() - ticket.object['timestamp'] > timeout:
ticket.destroy()
raise Exception('Ticket timeout period reached!')
if ticket.object['response'] != None:
response = ticket.object['response']
ticket.destroy()
break
sleep(1.0)
return response
def get_ticket_response(tckuuid):
tickets = RAMCollection('tickets')
ticket = tickets.get_object(tckuuid)
response = ticket.object['response']
return response
def delete_ticket(tckuuid):
RAMCollection('tickets').get_object(tckuuid).destroy()
def worker():
tickets = RAMCollection('tickets')
for objuuid in tickets.list_objuuids():
ticket = tickets.get_object(objuuid)
try:
if time() - ticket.object['timestamp'] > ASYNC_TICKET_TIMEOUT:
ticket.destroy()
ctr_increment('tickets expired')
except:
ticket.destroy()
register_timer(
name='ticket_worker',
target=worker,
timeout=ASYNC_TICKET_TIMEOUT
).start()
Thread(target=worker).start()
| [
"[email protected]"
] | |
b85e6af344facb6e0df6e9ed8dff20da26f7144a | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/merge-strings-alternately.py | 107572aa3949742adfc4813ca836790e9dbcd7cc | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 471 | py | # Time: O(m + n)
# Space: O(1)
class Solution(object):
def mergeAlternately(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: str
"""
result = []
i = 0
while i < len(word1) or i < len(word2):
if i < len(word1):
result.append(word1[i])
if i < len(word2):
result.append(word2[i])
i += 1
return "".join(result)
| [
"[email protected]"
] | |
d3d2478915380b6f8d4f5778c5babd647003d786 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dataartsstudio/huaweicloudsdkdataartsstudio/v1/model/show_instance_result_response.py | 89a066b6d19712691fb0599b6d0fc736ad86c3d5 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,168 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowInstanceResultResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int',
'resources': 'list[SubInstanceResult]'
}
attribute_map = {
'count': 'count',
'resources': 'resources'
}
def __init__(self, count=None, resources=None):
"""ShowInstanceResultResponse
The model defined in huaweicloud sdk
:param count: 总数量
:type count: int
:param resources: resources
:type resources: list[:class:`huaweicloudsdkdataartsstudio.v1.SubInstanceResult`]
"""
super(ShowInstanceResultResponse, self).__init__()
self._count = None
self._resources = None
self.discriminator = None
if count is not None:
self.count = count
if resources is not None:
self.resources = resources
@property
def count(self):
"""Gets the count of this ShowInstanceResultResponse.
总数量
:return: The count of this ShowInstanceResultResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ShowInstanceResultResponse.
总数量
:param count: The count of this ShowInstanceResultResponse.
:type count: int
"""
self._count = count
@property
def resources(self):
"""Gets the resources of this ShowInstanceResultResponse.
resources
:return: The resources of this ShowInstanceResultResponse.
:rtype: list[:class:`huaweicloudsdkdataartsstudio.v1.SubInstanceResult`]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this ShowInstanceResultResponse.
resources
:param resources: The resources of this ShowInstanceResultResponse.
:type resources: list[:class:`huaweicloudsdkdataartsstudio.v1.SubInstanceResult`]
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowInstanceResultResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
10d2b852e2f224d780d3d3948914efb1963d11e2 | 4c05c92b6020d0e61a6a110b7562b86a256fc962 | /Turtle Magic.py | 08bbe144a0e10b500e4a1d59011b5ce512e606ad | [] | no_license | Aaron250907/PythonCourse1 | 9bd93696973720c262d49e26be453d3b54e240fd | 43fa7de48333ce953f7c3436cc77b9930e2142de | refs/heads/main | 2023-03-14T03:27:06.844190 | 2021-02-20T09:15:53 | 2021-02-20T09:15:53 | 340,611,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | import turtle
colors = ['red', 'blue','green', 'yellow', 'purple', 'orange']
t = turtle.Pen()
turtle.bgcolor('black')
for x in range(360):
t.pencolor(colors[x%6])
t.width(x/100 + 1)
t.forward(x)
t.left(59) | [
"[email protected]"
] | |
ca17fee06b16873c1bf01a9602a2b6e6347d8b01 | f675a690b62250847b514ace399c2bb7860528f9 | /ZIFS.py | b0e5818588dee37abcd7d781d37fcfa637c0c83b | [] | no_license | adkingston/final-project-programs | a30b5bb5abcfbb4e95d19030c1e4ab2ec05c5034 | dd7db1a4484194162f756ae702743a05f7c7cd53 | refs/heads/master | 2021-01-13T10:14:31.507196 | 2017-08-18T16:56:21 | 2017-08-18T16:56:21 | 69,599,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | import numpy as np
import matplotlib.pyplot as plt
import pylab as pyl
D, R = np.arange(0.0, 1.0+1e-7, 0.1), np.arange(0.0, 2.0+1.e-7, 0.11)
A = [[a, b] for a in D for b in R]
def z1(s):
return [[0.25*x[0], 0.5*x[1]] for x in s]
def z2(s):
return [[-0.25*x[0]+0.5, -0.5*x[1]+2] for x in s]
def z3(s):
return [[-0.25*x[0] + 0.75, 0.5*x[1] + 1] for x in s]
def z4(s):
return [[0.25*x[0] + 0.75, 0.5*x[1] + 1] for x in s]
def iterations(ifs, seed, steps):
assert isinstance(ifs, list)
if steps < 1:
return seed
else:
next_step = []
for func in ifs:
next_step += func(seed)
next_step = iterations(ifs, next_step, steps-1)
return next_step
a = [[2., 3.]]
A1 = iterations([z1, z2, z3, z4], a, 7)
X1 = [z[0] for z in A1]
Y1 = [z[1] for z in A1]
# # # fig = plt.figure()
plt.plot(X1, Y1, 'bo', markersize=1, markeredgewidth=0.1)
pyl.show()
# fig.savefig("C:\\Users\\Alexander\\OneDrive\\Documents\\School
# \\University of St. Andrews\\Year 4\\MT4599
# Dissertation\\Main Document\\images\\A6.png")
# def hausdorff_dist(A, B):
# dists = []
# temp = []
# for a in A:
# for b in B:
# d = math.sqrt(abs(a[0] - b[0])**2 + abs(a[1] - b[1])**2)
# temp.append(d)
# dists.append(min(temp))
# temp = []
# return max(dists)
| [
"[email protected]"
] | |
9edb6fb910255cf29713ca49bd8f2e57d1186ea7 | a5aa3e80fe2e97cc9de3d42be873fdf468a68968 | /a10_openstack_lib/resources/a10_scaling_group.py | 312cd817ddd0ea9bc95f2784cacf72012e30ba03 | [
"Apache-2.0"
] | permissive | Cedev/a10-openstack-lib | 60911420f781db99f9d7456be5c4c707985c3c2d | 23c6a5ae2cfaeb5bb950e96be3a79c3b0e014247 | refs/heads/master | 2020-04-05T22:53:54.765410 | 2016-06-07T23:02:01 | 2016-06-07T23:02:01 | 61,076,970 | 0 | 0 | null | 2016-06-13T23:41:50 | 2016-06-13T23:41:49 | Python | UTF-8 | Python | false | false | 12,633 | py | # Copyright (C) 2016 A10 Networks Inc. All rights reserved.
EXTENSION = 'a10-scaling-group'
SERVICE = "A10_SCALING_GROUP"
SCALING_GROUPS = 'a10_scaling_groups'
SCALING_GROUP = 'a10_scaling_group'
SCALING_GROUP_WORKERS = 'a10_scaling_group_workers'
SCALING_GROUP_WORKER = 'a10_scaling_group_worker'
SCALING_POLICIES = 'a10_scaling_policies'
SCALING_POLICY = 'a10_scaling_policy'
SCALING_ALARMS = 'a10_scaling_alarms'
SCALING_ALARM = 'a10_scaling_alarm'
SCALING_ACTIONS = 'a10_scaling_actions'
SCALING_ACTION = 'a10_scaling_action'
ALARM_UNITS = ['count', 'percentage', 'bytes']
ALARM_AGGREGATIONS = ['avg', 'min', 'max', 'sum']
ALARM_MEASUREMENTS = ['connections', 'memory', 'cpu', 'interface']
ALARM_OPERATORS = ['>=', '>', '<=', '<']
ALARM_PERIOD_UNITS = ['minute', 'hour', 'day']
ACTIONS = ['scale-in', 'scale-out']
RESOURCE_ATTRIBUTE_MAP = {
SCALING_GROUPS: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'scaling_policy_id': {
'allow_post': True,
'allow_put': True,
'validate': {
'a10_type:nullable': {
'type:uuid': None,
'a10_type:reference': SCALING_POLICY
}
},
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
}
},
SCALING_GROUP_WORKERS: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'scaling_group_id': {
'allow_post': True,
'allow_put': False,
'validate': {
'type:uuid': None,
'a10_type:reference': SCALING_GROUP
},
'is_visible': True
},
'host': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True
},
'username': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True
},
'password': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': False
},
'api_version': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:values': ['2.1', '3.0']
},
'is_visible': True
},
'protocol': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:values': ['http', 'https']
},
'convert_to': lambda attr: convert_to_lower,
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
},
'port': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:range': [0, 65535]
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
},
'nova_instance_id': {
'allow_post': False,
'allow_put': False,
'validate': {
'type:uuid': None
},
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
}
},
SCALING_POLICIES: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'cooldown': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:non_negative': None
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
'default': 300,
},
'min_instances': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:non_negative': None
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
'default': 1,
},
'max_instances': {
'allow_post': True,
'allow_put': True,
'validate': {
'a10_type:nullable': {
'type:non_negative': None
}
},
'convert_to': lambda attr: convert_nullable(attr.convert_to_int),
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
},
'reactions': {
'allow_post': True,
'allow_put': True,
'convert_list_to': lambda attr: attr.convert_kvp_list_to_dict,
'is_visible': True,
'default': lambda attr: attr.ATTR_NOT_SPECIFIED
}
},
SCALING_ALARMS: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'aggregation': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['avg', 'min', 'max', 'sum']
},
'is_visible': True,
'convert_to': lambda attr: convert_to_lower,
'default': 'avg'
},
'measurement': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['connections', 'memory', 'cpu', 'interface']
},
'convert_to': lambda attr: convert_to_lower,
'is_visible': True
},
'operator': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['>=', '>', '<=', '<']
},
'is_visible': True
},
'threshold': {
'allow_post': True,
'allow_put': True,
'validate': {
'a10_type:float': None
},
'convert_to': lambda attr: convert_to_float,
'is_visible': True
},
'unit': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['count', 'percentage', 'bytes']
},
'convert_to': lambda attr: convert_to_lower,
'is_visible': True
},
'period': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:non_negative': None
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
},
'period_unit': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['minute', 'hour', 'day']
},
'convert_to': lambda attr: convert_to_lower,
'is_visible': True
}
},
SCALING_ACTIONS: {
'id': {
'allow_post': False,
'allow_put': True,
'validate': {
'type:uuid': None
},
'is_visible': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True,
'allow_put': False,
'required_by_policy': True,
'is_visible': True
},
'name': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': ''
},
'description': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:string': None
},
'is_visible': True,
'default': '',
},
'action': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:values': ['scale-in', 'scale-out']
},
'convert_to': lambda attr: convert_to_lower,
'is_visible': True
},
'amount': {
'allow_post': True,
'allow_put': True,
'validate': {
'type:non_negative': None
},
'convert_to': lambda attr: attr.convert_to_int,
'is_visible': True,
},
}
}
def convert_to_lower(input):
try:
return input.lower()
except AttributeError:
return input
def convert_to_float(input):
try:
return float(input)
except ValueError:
return input
def convert_nullable(convert_value):
def f(input):
if input is not None:
return convert_value(input)
return None
return f
def validate_float(data, options):
if not isinstance(data, float):
return "'%s' is not a number" % input
def validate_reference(data, options):
"""Referential integrity is enforced by the data model"""
return None
def validate_nullable(validators):
def f(data, options):
if data is not None:
for rule in options:
value_validator = validators[rule]
reason = value_validator(data, options[rule])
if reason:
return reason
return f
VALIDATORS = {
'a10_type:float': lambda validators: validate_float,
'a10_type:reference': lambda validators: validate_reference,
'a10_type:nullable': validate_nullable
}
| [
"[email protected]"
] | |
ec6fcf93cc3d0262a6b7b598ec122b19591057aa | f217883abc9daffecdafec5794068a5ca6adb905 | /MathOperationsP5.py | 00acbad3dd8e167bf09f55a7b3f12b6cf98a3bff | [] | no_license | DamianVega/P1.HelloWorld | 693592cd4175118afcf34790958d3751f156ce21 | 9a636ac31e54481e6fcefe5e3ab7c2d0799d8003 | refs/heads/master | 2020-04-05T01:22:45.045323 | 2019-05-13T18:47:50 | 2019-05-13T18:47:50 | 156,433,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | a = 25
b = 10
c = 7
# Adding
print("Display the sum of:",a,"+",c,"=",a+c)
# Subtracting
print("Display the difference of:",a,"-",c,"=",a-c)
# Multiplying
print("Display the multiplication of:",a,"*",c,"=",a*c)
# Division
print("Display the division of:",a,"/",c,"=",a/c)
# Integer Division
print("Display the integer division of:",a,"//",c,"=",a//c)
# The remainder of Integer Division, %
print("Display the remainder of integer division of:",a,"%",c,"=",a%c)
print(a, "Modulus",c,"=",a%c)
# Power of a Number
print("2 to the 5th power =",2**5) | [
"[email protected]"
] | |
34e6d9bd427d80013aeb40dfba6f4734f2d186e4 | e6bc1f55371786dad70313eb468a3ccf6000edaf | /Datasets/py-if-else/Correct/076.py | 2c07f238adcfd70b429c52cda3509dc1a5eb15ba | [] | no_license | prateksha/Source-Code-Similarity-Measurement | 9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0 | fb371b837917794d260a219a1ca09c46a5b15962 | refs/heads/master | 2023-01-04T07:49:25.138827 | 2020-10-25T14:43:57 | 2020-10-25T14:43:57 | 285,744,963 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | #!/bin/python3
import sys
N = int(input().strip())
if(N%2==0) :
if (N<5 or N>20):
print('Not Weird')
else :
print ("Weird")
else :
print ("Weird") | [
"[email protected]"
] | |
54073a0a96169761ca6e309c1f572aa135b71df0 | 682319f56c17e949bab0d6e418838d33977dd760 | /RP/search_element.py | 6bddc659f268253cf4d1a9296c7704a8a0a4f81b | [] | no_license | DilipBDabahde/PythonExample | 8eb70773a783b1f4b6cf6d7fbd2dc1302af8aa1b | 669762a8d9ee81ce79416d74a4b6af1e2fb63865 | refs/heads/master | 2020-08-23T01:05:44.788080 | 2020-07-25T21:59:52 | 2020-07-25T21:59:52 | 216,511,985 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | '''
.Write a program which accept N numbers from user and store it into List. Accept one another number from user and
return frequency of that number from List.
input: Num of elements: 12
input Elements: 5 8 6 8 5 9 3 7 2 21 1 5
Element to search = 5
output: Freq of search element is: 3
'''
def search_Element(arr, iNo):
if len(arr) < 0:
return -1;
icnt = 0; # icnt is counter variable which is used to increament it's value by One when we get our Element
for i in range(0, len(arr)):
if arr[i] == iNo:
icnt = icnt + 1;
return icnt;
def main():
arr_list = list(); # arr_list is object of list class , this object is used to add elements in it
size = input("Enter list size: ");
size = int(size); # type conversion of size variable str to int
print("Enter elements for list");
for i in range(0, size):
no = input("Enter element: ");
no = int(no); # type conversion
arr_list.append(no); # appending element to list class object
#now our list is created using loop iteration
print("Created list is: ",arr_list);
search_var = input("Enter number to search its freq:");
search_var = int(search_var);
result =search_Element(arr_list, search_var);
if result > 0 :
print("FReq of given variable in list is: ",result);
elif result == 0:
print("There is no element in list ");
else:
print("Invalid input");
if __name__ == "__main__":
main();
| [
"[email protected]"
] | |
e24d433767d920ff680b986ff07f6b0f6fe496bf | 61863803f0e010020f0a7ff0210e86809b94e965 | /day4/raspberrypi-codes/button-led/button-interfacing.py | f80e1efdafda5babfe46df5d4ca91093836b7735 | [] | no_license | maddydevgits/bitspilani-hyd-iot-bootcamp | 1aa23d4584bddec188996da60ab58675c30b1f3a | 44c64caaf247360252f6d9d9ccf868b1bc5a218e | refs/heads/main | 2023-06-23T23:41:00.534483 | 2021-07-30T16:24:10 | 2021-07-30T16:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | # sudo python3 rpi-button-led.py
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(2, GPIO.IN) # BUTTON (OC - 1, CC - 0)
GPIO.setup(21, GPIO.OUT) # LED (0 - ON, 1 - OFF)
while True: # Infinite Loop
if GPIO.input(2): # reading the data from GPIO2
GPIO.output(21,1) # OFF
else:
GPIO.output(21,0) # ON
| [
"[email protected]"
] | |
3ca0f0f9c4cc6b4dfa80991dd9bbeb1c6b3a23a9 | bb5ddd4543e790a78764af3b775ee23a1842cde2 | /scripts/valhalla_build_extract | 259634bf2522a16d08dd45b38f14db5bb6c57ed0 | [
"MIT"
] | permissive | molind/valhalla | ee8fcd88eb2b546af8381ab015e25a06063c0847 | 52f869ea2cc192ab31e9d5e75170cab29694059c | refs/heads/master | 2022-09-20T20:38:17.346015 | 2022-09-14T17:48:39 | 2022-09-14T17:48:39 | 84,201,422 | 0 | 0 | null | 2017-03-07T13:20:31 | 2017-03-07T13:20:31 | null | UTF-8 | Python | false | false | 7,488 | #!/usr/bin/env python3
import argparse
import ctypes
from io import BytesIO
import json
import logging
import os
from pathlib import Path
import struct
import sys
import tarfile
from tarfile import BLOCKSIZE
from time import time
from typing import List, Tuple
# "<" prefix means little-endian and no alignment
# order is important! if uint64_t is not first, c++ will use padding bytes to unpack
INDEX_BIN_FORMAT = '<QLL'
INDEX_BIN_SIZE = struct.calcsize(INDEX_BIN_FORMAT)
INDEX_FILE = "index.bin"
# skip the first 40 bytes of the tile header
GRAPHTILE_SKIP_BYTES = struct.calcsize('<Q2f16cQ')
TRAFFIC_HEADER_SIZE = struct.calcsize('<2Q4I')
TRAFFIC_SPEED_SIZE = struct.calcsize('<Q')
class TileHeader(ctypes.Structure):
"""
Resembles the uint64_t bit field at bytes 40 - 48 of the
graphtileheader to get the directededgecount_.
"""
_fields_ = [
("nodecount_", ctypes.c_ulonglong, 21),
("directededgecount_", ctypes.c_ulonglong, 21),
("predictedspeeds_count_", ctypes.c_ulonglong, 21),
("spare1_", ctypes.c_ulonglong, 1),
]
description = "Builds a tar extract from the tiles in mjolnir.tile_dir to the path specified in mjolnir.tile_extract."
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-c", "--config", help="Absolute or relative path to the Valhalla config JSON.", type=Path
)
parser.add_argument(
"-i",
"--inline-config",
help="Inline JSON config, will override --config JSON if present",
type=str,
default='{}',
)
parser.add_argument(
"-t", "--with-traffic", help="Flag to add a traffic.tar skeleton", action="store_true", default=False
)
parser.add_argument(
"-v",
"--verbosity",
help="Accumulative verbosity flags; -v: INFO, -vv: DEBUG",
action='count',
default=0,
)
# set up the logger basics
LOGGER = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)5s: %(message)s"))
LOGGER.addHandler(handler)
def get_tile_count(in_path: Path) -> int:
"""Iterates over the full tree and returns the count of all tiles it found."""
count = 0
for _, _, files in os.walk(in_path):
count += len(list(filter(lambda f: f.endswith('.gph'), files)))
return count
def get_tile_id(path: str) -> int:
"""Turns a tile path into a numeric GraphId"""
level, idx = path[:-4].split('/', 1)
return int(level) | (int(idx.replace('/', '')) << 3)
def get_tar_info(name: str, size: int) -> tarfile.TarInfo:
"""Creates and returns a tarinfo object"""
tarinfo = tarfile.TarInfo(name)
tarinfo.size = size
tarinfo.mtime = int(time())
tarinfo.type = tarfile.REGTYPE
return tarinfo
def write_index_to_tar(tar_fp_: Path):
"""Loop through all tiles and write the correct index.bin file to the tar"""
# get the offset and size from the tarred tile members
index: List[Tuple[int, int, int]] = list()
with tarfile.open(tar_fp_, 'r|') as tar:
for member in tar.getmembers():
if member.name.endswith('.gph'):
LOGGER.debug(
f"Tile {member.name} with offset: {member.offset_data}, size: {member.size}"
)
index.append((member.offset_data, get_tile_id(member.name), member.size))
# write back the actual index info
with open(tar_fp_, 'r+b') as tar:
# jump to the data block, index.bin is the first file
tar.seek(BLOCKSIZE)
for entry in index:
tar.write(struct.pack(INDEX_BIN_FORMAT, *entry))
def create_extracts(config_: dict, do_traffic: bool):
"""Actually creates the tar ball. Break out of main function for testability."""
tiles_fp: Path = Path(config_["mjolnir"].get("tile_dir", '/dev/null'))
extract_fp: Path = Path(
config_["mjolnir"].get("tile_extract") or tiles_fp.parent.joinpath('tiles.tar')
)
traffic_fp: Path = Path(
config_["mjolnir"].get("traffic_extract") or tiles_fp.parent.joinpath('traffic.tar')
)
if not tiles_fp.is_dir():
LOGGER.critical(
f"Directory 'mjolnir.tile_dir': {tiles_fp.resolve()} was not found on the filesystem."
)
sys.exit(1)
tiles_count = get_tile_count(tiles_fp)
if not tiles_count:
LOGGER.critical(f"Directory {tiles_fp} does not contain any usable graph tiles.")
sys.exit(1)
# write the in-memory index file
index_size = INDEX_BIN_SIZE * tiles_count
index_fd = BytesIO(b'0' * index_size)
index_fd.seek(0)
# first add the index file, then the sorted tiles to the tarfile
# TODO: come up with a smarter strategy to cluster the tiles in the tar
with tarfile.open(extract_fp, 'w') as tar:
tar.addfile(get_tar_info(INDEX_FILE, index_size), index_fd)
for t in sorted(tiles_fp.rglob('*.gph')):
tar.add(str(t.resolve()), arcname=str(t.relative_to(tiles_fp)))
write_index_to_tar(extract_fp)
LOGGER.info(f"Finished tarring {tiles_count} tiles to {extract_fp}")
# exit if no traffic extract wanted
if not do_traffic:
index_fd.close()
sys.exit(0)
LOGGER.info("Start creating traffic extract...")
# we already have the right size of the index file, simply reset it
index_fd.seek(0)
with tarfile.open(extract_fp) as tar_in, tarfile.open(traffic_fp, 'w') as tar_traffic:
# this will let us do seeks
in_fileobj = tar_in.fileobj
# add the index file as first data
tar_traffic.addfile(get_tar_info(INDEX_FILE, index_size), index_fd)
index_fd.close()
# loop over all routing tiles and create fixed-size traffic tiles
# based on the directed edge count
for tile_in in tar_in.getmembers():
if not tile_in.name.endswith('.gph'):
continue
# jump to the data's offset and skip the uninteresting bytes
in_fileobj.seek(tile_in.offset_data + GRAPHTILE_SKIP_BYTES)
# read the appropriate size of bytes from the tar into the TileHeader struct
tile_header = TileHeader()
b = BytesIO(in_fileobj.read(ctypes.sizeof(TileHeader)))
b.readinto(tile_header)
b.close()
# create the traffic tile
traffic_size = TRAFFIC_HEADER_SIZE + TRAFFIC_SPEED_SIZE * tile_header.directededgecount_
tar_traffic.addfile(get_tar_info(tile_in.name, traffic_size), BytesIO(b'\0' * traffic_size))
LOGGER.debug(f"Tile {tile_in.name} has {tile_header.directededgecount_} directed edges")
write_index_to_tar(traffic_fp)
LOGGER.info(f"Finished creating the traffic extract at {traffic_fp}")
if __name__ == '__main__':
args = parser.parse_args()
if not args.config and not args.inline_config:
LOGGER.critical("No valid config file or inline config used.")
sys.exit(1)
config = dict()
try:
with open(args.config) as f:
config = json.load(f)
except TypeError:
LOGGER.warning("Only inline-config will be used.")
# override with inline-config
config.update(**json.loads(args.inline_config))
# set the right logger level
if args.verbosity == 0:
LOGGER.setLevel(logging.CRITICAL)
elif args.verbosity == 1:
LOGGER.setLevel(logging.INFO)
elif args.verbosity >= 2:
LOGGER.setLevel(logging.DEBUG)
create_extracts(config, args.with_traffic)
| [
"[email protected]"
] | ||
5694f828530a430b4aca5569f67e50d0baf88575 | aff694b019806db8f8cd66fd205f9049351bb10c | /bin/wheel | e54d9f83eb92ea97085a22f82f854bd08e745464 | [] | no_license | mikilabarda/my-first-blog | 3885d08f87e9c3f05da7000b9e60d29f3895efd3 | 7e1476fa75e6db95bfe8685ad43a233777166071 | refs/heads/master | 2021-05-30T19:25:38.022284 | 2016-03-20T05:31:16 | 2016-03-20T05:31:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | #!/Users/Miki/Desktop/env/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
c02c67a431312e3b0710cf872c3477faeee8222e | f21ea752700b3afa0729bfa6520ab1c6702e6189 | /tools/graph_bag/scripts/poses.py | c92f7140d19b516cf9465625ffc36620dbebdbad | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"MIT",
"MPL-2.0",
"MPL-1.0",
"LGPL-2.1-or-later",
"Apache-2.0",
"LGPL-2.1-only",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"LGPL-3.0-only",
"CC-BY-NC-4.0",
"GPL-3.0-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla"
] | permissive | SemaSandbox/astrobee | 01594a46c80d8730be6f5ef753b44ab67a178f1d | e09fba15f241ce044e9b66d19a4767b547c2b720 | refs/heads/master | 2023-07-17T00:21:03.560819 | 2021-07-22T18:46:14 | 2021-07-22T18:46:14 | 391,620,249 | 1 | 0 | Apache-2.0 | 2021-08-01T17:12:56 | 2021-08-01T12:24:35 | null | UTF-8 | Python | false | false | 1,806 | py | #!/usr/bin/python
#
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import vector3ds
import orientations
import scipy.spatial.transform
class Poses(object):
def __init__(self, pose_type, topic):
self.positions = vector3ds.Vector3ds()
self.orientations = orientations.Orientations()
self.times = []
self.pose_type = pose_type
self.topic = topic
def add_pose(self, pose_msg, timestamp, bag_start_time=0):
self.positions.add(pose_msg.position.x, pose_msg.position.y, pose_msg.position.z)
euler_angles = scipy.spatial.transform.Rotation.from_quat(
[pose_msg.orientation.x, pose_msg.orientation.y, pose_msg.orientation.z,
pose_msg.orientation.w]).as_euler('ZYX', degrees=True)
self.orientations.add(euler_angles[0], euler_angles[1], euler_angles[2])
self.times.append(timestamp.secs + 1e-9 * timestamp.nsecs - bag_start_time)
def add_msg(self, msg, timestamp, bag_start_time=0):
self.add_pose(msg.pose, timestamp, bag_start_time)
def position_vector(self, index):
return [self.positions.xs[index], self.positions.ys[index], self.positions.zs[index]]
| [
"[email protected]"
] | |
7acf5941940c678da4795277f2ddd08749ad98a3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03192/s975847643.py | 0b87008f474274d7ec53b07ee4ec58d374c6d871 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | n = int(input())
li = []
while n > 0:
li.append(n%10)
n //= 10
li.reverse()
ans = 0
for i in range(len(li)):
if li[i] == 2:
ans += 1
print(ans) | [
"[email protected]"
] |
Subsets and Splits