blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ee5154e4a3e0cd71941ed12982005cc98c973533 | 992fb973ec03068346c725bbb2e9b9f54cb6d558 | /revision.py | 91eb4bbcd700596da82b0bfd6f690601df7c86f2 | [] | no_license | vojoup/pokusy_python | 3bc684faf7c692f5e27bf3b4b13dd4b3f71c4b87 | 78b37ddb4e53d28c6064e429ebd0a5c8dd30241c | refs/heads/master | 2021-01-19T00:23:07.029598 | 2017-04-04T08:32:52 | 2017-04-04T08:32:52 | 87,163,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,307 | py | #written by usingpython.com
#allows us to access a random 'key' in the dictionary
import random
#the questions/answer dictionary
my_dict = {
# "Milujes Zuzanku ?" : "Samozrejme",
"Number system that uses the characters 0-F" : "hexidecimal",
"7-bit text encoding standard" : "ascii",
"16-bit text encoding standard" : "unicode",
"A number that is bigger than the maximum number that can be stored" : "overflow",
"8 bits" : "byte",
"1024 bytes" : "kilobyte",
"Picture Element. The smallest component of a bitmapped image" : "pixel",
"A continuously changing wave, such as natural sound" : "analogue",
"the number of times per second that a wave is measured" : "sample rate",
"A bunary representation of a program" : "machine code"
}
#welcome message
print("Computing Revision Quiz")
print("=======================")
#the quiz will end when this variable becomes 'False'
playing = True
#While the game is running
while playing == True:
#set the score to 0
score = 0
#gets the number of questions the player wants to answer
num = int(input("\nHow many questions would you like: "))
#loop the correct number of times
for i in range(num):
#the question is one of the dictionary keys, picked at random
question = (random.choice( list(my_dict.keys())))
#the answer is the string mapped to the question key
answer = my_dict[question]
#print the question, along with the question number
print("\nQuestion " + str(i+1) )
print(question + "?")
#get the user's answer attempt
guess = input("> ")
#if their guess is the same as the answer
if guess.lower() == answer.lower():
#add 1 to the score and print a message
print("Correct!")
score += 1
else:
print("Nope!")
#after the quiz, print their final score
print("\nYour final score was " + str(score))
#store the user's input...
again = input("Enter any key to play again, or 'q' to quit.")
#... and quit if they types 'q'
if again.lower() == 'q':
playing = False
| [
"[email protected]"
] | |
578ecb92d16d1ca2da11a71e6aeb893f168da275 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-4295.py | b26269dd917ef0108851f9fca82d438ee34c6c2e | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,289 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > $Exp.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
] | |
273a2e0109802764b4323222fe539993df97fdd1 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/335/usersdata/293/100631/submittedfiles/matriz1.py | d3968016a50b896aa83760e4097b6abf19b5ee9e | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | # -*- coding: utf-8 -*-
import numpy as np
###CRIAR MATRIZ###
matriz=[]
m=int(input("Digite o número de linhas da lista: "))
n=int(input("Digite o número de colunas da lista: "))
for i in range(0,m,1):
matriz_linha=[]
for j in range(0,n,1):
matriz_linha.append(int(input("Digite o valor do elemento (%d,%d): "%(i+1,j+1))))
matriz.append(matriz_linha)
print(matriz)
###VERIFICAR LINHAS NULAS###
for i in range(len(matriz)-1,-1,-1):
cont_linhas=0
for j in range(0,n,1):
if matriz[i][j]==0:
cont_linhas= cont_linhas + 1
if cont_linhas>0:
matriz.pop(i)
print(matriz)
###COLUNAS NULAS###
#for j in range(n-1,-1,-1):
# cont_colunas=0
# for i in range(len(matriz)-1,-1,-1):
# if matriz[i][j]==0:
# cont_colunas= cont_colunas+ 1
# if cont_colunas>0:
# for k in range(0,len(matriz)-1,-1):
# matriz[i].pop(j)
#print(matriz) | [
"[email protected]"
] | |
06da86f0dc92577533174c9d475266b8cea806e0 | 8a202074cee11cb3e6f58aa097be776154fe9886 | /app.py | 4aa7badcb21957a7d3b17121696565ecd6e013e1 | [] | no_license | abhinith1234/insurance- | c80453cd2c8a4b9995728997836597f5951c2042 | 62c1266b2a9b3ea4d68e12174f9298f3957e0558 | refs/heads/master | 2023-08-07T09:49:22.177085 | 2021-10-09T08:34:05 | 2021-10-09T08:34:05 | 415,249,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | from flask import Flask, request, url_for, redirect, render_template, jsonify
from pycaret.regression import *
import pandas as pd
import pickle
import numpy as np
app = Flask(__name__)
model = load_model('deployment_28042020')
cols = ['age', 'sex', 'bmi', 'children', 'smoker', 'region']
@app.route('/')
def home():
return render_template("home.html")
@app.route('/predict',methods=['POST'])
def predict():
int_features = [x for x in request.form.values()]
final = np.array(int_features)
data_unseen = pd.DataFrame([final], columns = cols)
prediction = predict_model(model, data=data_unseen, round = 0)
prediction = int(prediction.Label[0])
return render_template('home.html',pred='Expected Bill will be {}'.format(prediction))
@app.route('/predict_api',methods=['POST'])
def predict_api():
data = request.get_json(force=True)
data_unseen = pd.DataFrame([data])
prediction = predict_model(model, data=data_unseen)
output = prediction.Label[0]
return jsonify(output)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=config.PORT, debug=config.DEBUG_MODE)
| [
"[email protected]"
] | |
180079399426d78f9f9573051dd66b3ebc738ca6 | db4cab37fadb26ed370affb72db5434e0f8ec790 | /characters.py | c02055696076a1aa87134fc455c64c2586c16835 | [] | no_license | ocastudios/bobots | 2292888996f77a300421603906a360956c579614 | 0b6e844e1aa3d1d8826177fc31e44b085f24c19c | refs/heads/master | 2021-10-11T05:43:35.433620 | 2015-09-21T17:26:45 | 2015-09-21T17:26:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | py | Lilly = {
'relative': 'Ms_Honey',
'town': 'SquirrelTown',
'area': 'School',
'win_phrase': "And isn't he pretty, too?",
'loss_phrase': "Mine is still the cutest",
'bet_base': 20,
'reward':10,
'bobots': [
{'type':'rodent1',
'attacks': ['push1'],
'fuel':['piston30'],
'coat':'yellow'
},
]
}
Parker = {
'relative': 'Ms_Red',
'town': 'SquirrelTown',
'area': 'School',
'win_phrase': "Yeah!",
'loss_phrase': "You know, there's a lesson to be learned from all this...",
'bet_base': 40,
'reward':20,
'bobots': [
{'type':'rodent1',
'attacks': ['double_strike1', 'reflection_shield1'],
'fuel':['piston20', 'battery10'],
'coat':'red'
},
]
}
Ella = {
'relative': 'Elleanor',
'town': 'SquirrelTown',
'area': 'Bookstore',
'win_phrase': "Opa! I can't believe I won!",
'loss_phrase': "You're so great!",
'bet_base': 20,
'reward':10,
'bobots': [
{'type':'rodent1',
'attacks': ['shot1', 'rajada1'],
'fuel':['ammunition30'],
'coat':None
},
]
}
Interchange_Girl = {
'relative': 'Ms_Foreigner',
'town': 'SquirrelTown',
'area': 'School',
'win_phrase': "J'ame le trioumphe!",
'loss_phrase': "C'est la vie...",
'bet_base': 100,
'reward': 50,
'bobots': [
{'type':'reptile1',
'attacks': ['firethrower1','shot2'],
'fuel':['diesel10','ammunition10'],
'coat':None
},
]
}
Bully = {
'relative': 'Punk',
'town': 'SquirrelTown',
'area': 'Street',
'win_phrase': "If you're smaller than me, your lunch money's mine!",
'loss_phrase': "I'll get you for this",
'bet_base': 20,
'reward': 10,
'bobots': [
{'type':'rodent2',
'attacks': ['napalm1','electro_shock1'],
'fuel':['diesel20','battery30'],
'coat':'black'
},
]
}
Weird_Girl = {
'relative': 'Ms_Weird',
'town': 'SquirrelTown',
'area': 'Park',
'win_phrase': "Wow! How weird is that?",
'loss_phrase': "I don't mind losing.",
'bet_base': 20,
'reward': 10,
'bobots': [
{'type':'bird1',
'attacks': ['sniper1','blunderbuss1'],
'fuel':['ammunition20'],
'coat':None
},
]
}
Yoshi = {
'relative': 'Mr_Do',
'town': 'SquirrelTown',
'area': 'Academy',
'win_phrase': "Practice leads to perfection.",
'loss_phrase': "I'll just train harder.",
'bet_base': 20,
'reward': 10,
'bobots': [
{'type':'bird1',
'attacks': ['sniper1','blunderbuss1'],
'fuel':['ammunition20'],
'coat':None
},
]
}
| [
"[email protected]"
] | |
d0cd69c9a0f5a51db088a412feea67c1613a0780 | 5a67b2e196d972b9f728a507d8a7aa7722fc06e8 | /FLAMES.py | 8989b8edf9710f37e7555e8012d42ce2a63866e5 | [] | no_license | BG-sword/gitlearn | c24617161830c7e2677de35db2ab29e25ce654ca | c9a992942c5a1f6f36ae072bddea81186f6050a2 | refs/heads/master | 2023-04-28T03:14:00.714535 | 2021-04-27T10:58:26 | 2021-04-27T10:58:26 | 362,011,051 | 0 | 0 | null | 2021-04-27T09:42:27 | 2021-04-27T06:54:20 | Python | UTF-8 | Python | false | false | 1,976 | py | #function
def remove_match_char(list1, list2):
#let remove same letters from two lists then concate with a special character between
for i in range(len(list1)) :
for j in range(len(list2)) :
if list1[i] == list2[j] :
c = list1[i]
list1.remove(c)
list2.remove(c)
list3 = list1 + ["/"] + list2
return [list3, True]
list3 = list1 + ["/"] + list2
return [list3, False]
# main code
if __name__ == "__main__" :
#name-1 with no spaces and convert to list
name1 = input("First name: ")
name1 = name1.lower()
name1.replace(" ", "")
list1 = list(name1)
#name-2 with no spaces and convert to list
name2 = input("Second name : ")
name2 = name2.lower()
name2.replace(" ", "")
list2 = list(name2)
go = True
while go:
retrived_list = remove_match_char(list1, list2)
#concated list
result_list = retrived_list[0]
#true of false tag
go = retrived_list[1]
#split the list seperated by special characted into two lists
temp = result_list.index("/")
#left side characters of special character
list1 = result_list[ : temp]
#right side characters of special character
list2 = result_list[temp + 1 : ]
#sum of lengths of both lists
count = len(list1) + len(list2)
#flames words in an array
result = ["F-Friends", "L-Love", "A-Affection", "M-Marriage", "E-Enemy", "S-Siblings"]
#flames array to elemenate each one by one iteration until one left
while len(result) > 1 :
temp_index = (count % len(result) - 1)
if temp_index >= 0 :
right = result[temp_index + 1 : ]
left = result[ : temp_index]
result = right + left
else :
result = result[ : len(result) - 1]
#return the last one that remains in array
print("Relationship status :", result[0]) | [
"[email protected]"
] | |
ccf78adf8b93f1db9dfb7c33c83611af8c2b1bff | 27277393356207c942ce3fc846de6fa8e2014a0a | /326. Power of Three.py | 6526f958aea164355e17b038d036e1688cc60029 | [] | no_license | TRKSasank/LeetCode | 5d2f463a92c5ee9124939c7313fef39d6b9f9d2b | 0c419ca138ca281e5defe28fe689d75ae5256011 | refs/heads/master | 2022-11-21T13:48:11.602162 | 2020-07-24T01:04:05 | 2020-07-24T01:04:05 | 266,245,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | class Solution:
def isPowerOfThree(self, n: int) -> bool:
import math
if(n<0):
val = math.log(n*-1,3)
val_c = math.ceil(val)
val_f = math.floor(val)
elif(n>0):
val = math.log(n,3)
val_c = math.ceil(val)
val_f = math.floor(val)
else:
return False
if(n>=1 and (n == 3**val_c or n == 3**val_f) ):
return True
#elif(n<0 and (n == (-3)**val_c or n == (-3)**val_f) ):
# return True
else:
return False
| [
"[email protected]"
] | |
3d70ab69d24576b88fa980a4f0ecfdbca3d8bb61 | b8cc6d34ad44bf5c28fcca9e0df01d9ebe0ee339 | /入门学习/threading-eg-01.py | 4fdf372ac23e074b5104d82e001b51bacae3aca8 | [] | no_license | python-yc/pycharm_script | ae0e72898ef44a9de47e7548170a030c0a752eb5 | c8947849090c71e131df5dc32173ebe9754df951 | refs/heads/master | 2023-01-05T06:16:33.857668 | 2020-10-31T08:09:53 | 2020-10-31T08:09:53 | 296,778,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | import time
#这个是不被推荐的线程包:_thread
import threading
#这个主要是注意使用参数时的写法
#使用多线程
def loop1(in1):
#ctime 得到当前时间
print('Start loop 1 at :',time.ctime())
#把参数打印出来
print("我是参数:",in1)
#睡眠多长时间,单位是:秒
time.sleep(4)
print('End loop 1 at :',time.ctime())
def loop2(in1,in2):
#ctime 得到当前时间
print('Start loop 2 at :',time.ctime())
#把参数打印出来
print("我是参数:",in1,"和参数:",in2)
#睡眠多长时间,单位是:秒
time.sleep(2)
print('End loop 2 at :',time.ctime())
#与_thread真正的区别就在main()内
#main()才是真正执行干活的
def main():
print("Starting at:",time.ctime())
#启动多线程的意思是用多线程去执行某个函数
#参数两个,一个是需要运行的函数名,第二个是函数的参数作为元组使用,为空则使用空元组
#注意:如果函数只有一个参数,需要参数后有一个逗号,即代表是元组
t1 = threading.Thread(target=loop1,args=("xiaoming",))
t1.start()
t2 = threading.Thread(target=loop2,args=("王老大","hong"))
t2.start()
# 这个不加join,好像threading自带等待所有进程结束的方式
# t1.join()
# t2.join()
#加上jion()后,执行完成t1,t2后才会执行这个print,这样就不需要加循环等待了
print("All done at:",time.ctime())
if __name__ == '__main__':
main()
#其实是有三个线程在运行,一个主线程,用于分配工作;另外两个是工作的线程
#然后这个是等待所有工作线程工作完
# while True:
# time.sleep(1)
| [
"15655982512.com"
] | 15655982512.com |
01be6d192d87648dd9c071a5da64462964f37bc8 | b753db5f746d14529b70f86795e7e373e4dac61d | /ДЗ 1/3.py | 58c2a409d4029e2e8903d5c0aa6deda44c544646 | [] | no_license | kozik87/General_python_DZ | d3b08a03cf0132fa0ebced7816dad5878b127aa2 | 7e28a4fe3a1871a1168e84bb5bfab09e99fb7642 | refs/heads/master | 2023-08-17T21:59:40.852979 | 2021-10-19T20:03:45 | 2021-10-19T20:03:45 | 407,166,323 | 0 | 0 | null | 2021-10-19T20:03:47 | 2021-09-16T13:00:51 | Python | UTF-8 | Python | false | false | 706 | py | # 3.Склонение слова
# Реализовать склонение слова «процент» во фразе «N процентов». Вывести эту фразу на экран отдельной строкой для каждого из чисел в интервале от 1 до 100:
# 1 процент
# 2 процента
# 3 процента
# 4 процента
# 5 процентов
# 6 процентов
# ...
# 100 процентов
n = int(input('Введит число: '))
if n % 10 == 1 and n != 11:
print(f"{n} процент")
elif n % 10 >= 2 and n % 10 <= 4 and n // 10 != 1:
print(f"{n} процента")
else:
print(f"{n} процентов")
| [
"[email protected]"
] | |
3a2963a69495501d8338a91a6c65527ed390b24b | 8a03b8459902d1bf0806f8d3387fb962bb57cf58 | /Validations/SAR_click.py | 44f05768023ecd4dc58d526a4c8da62232cd4494 | [] | no_license | chetandg123/cQube | f95a0e86b1e98cb418de209ad26ae2ba463cfcbc | a862a1cdf46faaaff5cad49d78c4e5f0454a6407 | refs/heads/master | 2022-07-18T12:43:06.839896 | 2020-05-22T13:23:52 | 2020-05-22T13:23:52 | 258,089,042 | 0 | 0 | null | 2020-05-08T16:28:26 | 2020-04-23T03:55:52 | HTML | UTF-8 | Python | false | false | 1,852 | py | import time
import unittest
from Data.Paramters import Data
from selenium import webdriver
class SAROption(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(Data.Path)
self.driver.maximize_window()
self.driver.implicitly_wait(10)
self.driver.get(Data.URL)
self.driver.find_element_by_xpath(Data.email).send_keys(Data.username)
self.driver.find_element_by_xpath(Data.pwd).send_keys(Data.password)
self.driver.find_element_by_xpath(Data.loginbtn).click()
time.sleep(10)
def test_SAR(self):
self.driver.find_element_by_xpath(Data.Dashboard).click()
menu_name=self.driver.find_element_by_xpath("//td[contains(text(),'Student Attendance Reports ')]").text
print(menu_name)
# self.assertEqual(" Student Attendance Reports",menu_name,"Not matching!")
self.driver.find_element_by_xpath(Data.SAR).click()
time.sleep(5)
self.driver.find_element_by_xpath(Data.Blocks).click()
time.sleep(15)
print("Block details..")
infob = self.driver.find_elements_by_xpath(Data.details)
for i in range(len(infob)):
print(infob[i].text)
self.driver.find_element_by_xpath(Data.Clusters).click()
time.sleep(15)
print("Cluster details..")
infoc = self.driver.find_elements_by_xpath(Data.details)
for i in range(len(infoc)):
print(infoc[i].text)
self.driver.find_element_by_xpath(Data.Schools).click()
print("for schools details...")
time.sleep(15)
infos = self.driver.find_elements_by_xpath(Data.details)
for i in range(len(infos)):
print(infos[i].text)
def tearDown(self):
time.sleep(5)
self.driver.close()
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
6a7120426c27f2b414d8d164f7d3394028e271ec | 2f85a2d3917a92d0d990620c9cbd7ad50956c876 | /session17/quiz.py | 6f0fbc5638ec15c1a5d729d4376b8a7c085af8c5 | [] | no_license | KyleLawson16/mis3640 | 14e57203a495c6f40188e6f5c8bf3ec82605e924 | 647a86838ff77544970973196941cd433851b150 | refs/heads/master | 2020-03-27T16:41:37.798702 | 2018-11-01T19:40:03 | 2018-11-01T19:40:03 | 146,800,315 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | ELECTORS = {'CA': 55, 'TX': 38, 'FL': 29, 'MA': 11}
class Candidate:
"""The presidential candidate"""
def __init__(self, name, winning_states=None, votes=0):
"""Initialize candidate.
name: string
winning_states: a list of strings representing initial winning state(s).
votes: integer, representing number of votes
"""
self.name = name
self.winning_states = winning_states or []
self.votes = votes
def __str__(self):
"""Return a string representaion of this candidate,
including name and winning state(s).
"""
states_string = ''
if len(self.winning_states) == 0:
states_string = 'no states'
else:
for i, state in enumerate(self.winning_states):
states_string += state
if i != len(self.winning_states) - 1:
states_string += ', '
return f'{self.name} wins {states_string}'
def __gt__(self, other):
return self.votes > other.votes
def win_state(self, state):
"""Adds a tate to winning_states and updates votes.
state: a string of state abbreviation
"""
self.winning_states.append(state)
self.votes += ELECTORS[state]
def main():
trump = Candidate('Donald Trump')
clinton = Candidate('Hillary Clinton', winning_states=['CA'])
print(trump)
print(clinton)
print()
print('After election:')
trump.win_state('FL')
trump.win_state('TX')
clinton.win_state('MA')
print(trump)
print(clinton)
print('Does Trump win?')
print(trump > clinton)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e03e7e0b0f300b27cdb37a4f807759aa86873fbf | 0e3df318ad818f7538ec5031dc4578fce3433c5c | /utils/project-pdf-generator/icGenerator.py | a1cea23ce07f1d117a8ca36efabb2038c05a244f | [] | no_license | amartya2969/Instruction-Division-2.0 | 49415ffb5260d2bc7c4e6b98f6f96463ff5e9a9e | eb08b1933ecb4e4422cec86f96235fae149bbaed | refs/heads/master | 2020-03-20T02:08:30.082196 | 2018-06-09T13:45:25 | 2018-06-09T13:45:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,586 | py | # This generates pdf for projects under each IC.
# Place the csv file in the same directory with the name `ic_list.csv` and later run the script.
# Format of ic_list.csv Course Code,Course Name,IC Name,Email ID
# Format of allotement_list.csv ID NO , STUDENT NAME, FACULTY NAME, PROJECT CODE,EMAIL, ELE TYPE, PROJECT TITLE
import csv
from reportlab.platypus import BaseDocTemplate, SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle
from reportlab.lib.styles import getSampleStyleSheet,ParagraphStyle
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import inch
from reportlab.lib import colors
#from WebKit.Page import Page
from time import strftime
from cStringIO import StringIO
import datetime
styles = getSampleStyleSheet()
# csv file name
filename = "ic_list.csv"
# initializing the titles and rows list
fields = []
rowsIC = []
# reading csv file
with open(filename, 'r') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
# extracting field names through first row
fields = csvreader.next()
# extracting each data row one by one
for row in csvreader:
rowsIC.append(row)
# get total number of rows
print("Total no. of rows: %d"%(csvreader.line_num))
# printing the field names
print('Field names are:' + ', '.join(field for field in fields))
# printing first 5 rows
print('\nFirst 5 rows are:\n')
for row in rowsIC[:6]:
# parsing each column of a row
for col in row:
print("%10s , "%col),
print('\n')
instructors = [] # -- IC --
courselist = []
instructoremails = []
data = []
for row in rowsIC :
if row[2] in instructors:
courselist[instructors.index(row[2])].append(row[0])
else:
instructors.append(row[2])
courselist.append([row[0]])
instructoremails.append(row[3])
#for row in courselist :
#print row
with open('allotment_list.csv', 'r') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
# extracting field names through first row
fields = csvreader.next()
rows = []
# extracting each data row one by one
for row in csvreader:
rows.append(row)
i = 0
for row in courselist:
data.append([])
for col in row:
for row1 in rows:
if col.strip() == row1[3].strip():
data[i].append(row1)
i = i+1
# data contains list of lists . data[i] corresponds to ith instructor in instructors.
# data[i][j] corresponds to jth project under ith instructor.
#making pdf starts
i = 0
#print data[1]
#deleting unncecessary column
for row in data:
for col in row:
del col[5]
del col[4]
for instructor in instructors :
instructoremail = instructoremails[instructors.index(instructor)]
print instructoremail
print instructor
title = "Instruction Division - BITS Pilani Hyderabad Campus"
heading1 = "FIRST SEMESTER 2018-2019"
name = "Dear "+ instructor+ ","
heading2 = "LIST OF ALLOTED PROJECT STUDENTS"
para = "The following is the allotted list of project students under your guidance during First Semester 2018-19. There is a possibility that some of the allotted project students may not register for the same. The final list of registered students will be sent to the IC of the respective project type course. In case of any discrepancy, please contact Dr. Balaji Gopalan, In-charge, Project Allotment (Extn: 575) or email at [email protected]. "
datetoday = datetime.datetime.today().strftime('%d-%m-%Y')
elements = []
footer1 = "Associate Dean <br/> A Vasan <br/> Instruction Division"
title = '<para align = "centre"><font size = 18><strong>%s</strong></font></para>' % title
ptext = '<font size=12>%s</font>' % name
head1text = '<para align = "centre">"<font size = 18><strong>%s</strong></font></para>' % heading1
head2text = '<para align = "centre"><font size = 18><strong>%s</strong></font></para>' % heading2
paratext = '<font size=12>%s</font>' % para
date = '<para align="right"><font>%s</font></para>' % datetoday
footer = '<para align = "left"><font size = "12">%s</font></para>' % footer1
j = 0
elements.append(Paragraph(title, styles["Normal"]))
while j < 5 :
elements.append(Spacer(1, 12))
j = j+1
elements.append(Paragraph(head1text, styles["Normal"]))
elements.append(Spacer(1, 12))
elements.append(Spacer(1, 12))
elements.append(Paragraph(head2text, styles["Normal"]))
elements.append(Spacer(1, 12))
elements.append(Spacer(1, 12))
elements.append(Paragraph(date, styles["Normal"]))
elements.append(Spacer(1, 12))
elements.append(Paragraph(ptext, styles["Normal"]))
elements.append(Spacer(1, 12))
elements.append(Paragraph(paratext, styles["Normal"]))
elements.append(Spacer(1, 12))
elements.append(Spacer(1, 12))
doc = SimpleDocTemplate(("./IC-PDF/"+ instructoremail + ".pdf"), pagesize=letter)
GRID_STYLE = TableStyle(
[('GRID', (0,0), (-1,-1), 0.25, colors.black),
('ALIGN', (1,1), (-1,-1), 'LEFT')]
)
# container for the 'Flowable' objects
# if data[i] != []:
if(data[i]==[]):
print instructor
# print courselist[instructors.index(instructor)]
data[i].insert(0,['S. No','ID No.','Student Name','Name of the guide','Course Code','Title'])
for j in range(1,len(data[i])):
data[i][j].insert(0,str(j))
s = getSampleStyleSheet()
s = s["BodyText"]
s.wordWrap = 'CJK'
data2 = [[Paragraph(cell, s) for cell in row] for row in data[i]]
t=Table(data2,colWidths=[None,None,None,1*inch,1*inch,2.5*inch])
t.setStyle(GRID_STYLE)
i = i+1
elements.append(t)
elements.append(Spacer(1, 12))
elements.append(Spacer(1, 12))
elements.append(Paragraph(footer, styles["Normal"]))
doc.build(elements)
| [
"[email protected]"
] | |
083acf35d6ef201f3dcf6af52969995105021d24 | 2321eee8ae8b7ec0ceebe8005ec44d59c84166b9 | /goal.py | 5fd9ac3841f3fe84cc0323deba72c440a15e32fc | [] | no_license | the-lightstack/Raspberry_Game | ff2c1b3b5148b7e817d98f8d6ace7fd49c022500 | 3c44edf7c74168b96d5c626bfe130b06404e2da5 | refs/heads/master | 2022-12-15T01:21:07.737013 | 2020-09-13T16:08:18 | 2020-09-13T16:08:18 | 294,993,881 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | import pygame
pygame.init()
class Goal:
def __init__(self,x,y,r,var):
self.rect=pygame.Rect(x,y,r,r)
self.var=var
def show(self):
pygame.draw.circle(self.var.screen,(200,40,40),(int(self.rect.x-self.var.camera_scrolling.x),int(self.rect.y-self.var.camera_scrolling.y)),self.rect.w)
def check_hit(self):
if self.var.player.rect.colliderect(self.rect):
self.var.won_level=True
def update(self):
self.check_hit()
self.show() | [
"[email protected]"
] | |
6ef08e75be9eacb96de8274efee6509baa1de57b | cad7663ccdbdeb86dc08db498f4df038b311bc3e | /BOJ/Q2501-Q5000/Q2675.py | b67f1f1648d5e72770d3c8401b43dc99b6628074 | [] | no_license | sbyeol3/Algorithm-Study | 1a8d8a95707a2c9c8fe2095612b0908bf10b55b9 | a21ac6ea1c32a22826f004c2eadbc586298489e0 | refs/heads/master | 2021-06-29T18:16:01.248410 | 2021-01-16T16:32:42 | 2021-01-16T16:32:42 | 212,065,340 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | N = int(input())
s = []
for _ in range(N) :
a, b = input().split()
string = ''
for i in range(len(b)):
string += b[i] * int(a)
s.append(string)
for i in range(N) :
print(s[i]) | [
"[email protected]"
] | |
bb1de112084f04f13bb84c2e7d9eed6667fb993d | a05a0b0764d464515b3db8be0c1cd3a766e4c62c | /preprocessing/take.py | a4a4ab7a4c9996733b36048a0d3d63a7131dfcbf | [] | no_license | pei-hsin-chiu/ML-against-supraspinatus-calcific-tendinopathy | 29e198549d3903a78744696c0383f7c5886beedb | d98379cdeea2eab3823cad6f5278f39c4219f31e | refs/heads/main | 2023-04-21T20:45:01.744686 | 2021-05-13T03:24:53 | 2021-05-13T03:24:53 | 310,478,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,608 | py | import os
import random
import shutil
###short valid###
short_calcification_path='C:/Users/user/Desktop/0622/valid/short calcification/'
short_calcification_files=os.listdir(short_calcification_path)
short_no_calcification_path='C:/Users/user/Desktop/0622/valid/short no calcification/'
short_no_calcification_files=os.listdir(short_no_calcification_path)
##calcification##
##test#
test_target_path='C:/Users/user/Desktop/0622test/'
test_amount=10
test_choice = random.sample(short_calcification_files, test_amount)
for i in range(test_amount):
oldname=short_calcification_path+test_choice[i]
#print('oldname=',oldname)
newplacename=test_target_path+test_choice[i]
shutil.copyfile(oldname,newplacename)
os.remove(oldname)
for index in range(test_amount):
short_calcification_files.remove(test_choice[index])
##no calcification##
##test#
test_target_path='C:/Users/user/Desktop/0622test/'
test_amount=10
test_choice = random.sample(short_no_calcification_files, test_amount)
for i in range(test_amount):
oldname=short_no_calcification_path+test_choice[i]
#print('oldname=',oldname)
newplacename=test_target_path+test_choice[i]
shutil.copyfile(oldname,newplacename)
os.remove(oldname)
for index in range(test_amount):
short_no_calcification_files.remove(test_choice[index])
"""
###long valid###
long_calcification_path='C:/Users/user/Desktop/長軸 780-3541/calcification/'
long_calcification_files=os.listdir(long_calcification_path)
long_no_calcification_path='C:/Users/user/Desktop/長軸 780-3541/no calcification/'
long_no_calcification_files=os.listdir(long_no_calcification_path)
##calcification##
##train##
train_target_path='C:/Users/user/Desktop/0622-long/train/calcification/'
train_amount=600
train_choice = random.sample(long_calcification_files, train_amount)
for i in range(train_amount):
oldname=long_calcification_path+train_choice[i]
#print('oldname=',oldname)
newplacename=train_target_path+train_choice[i]
shutil.copyfile(oldname,newplacename)
for index in range(train_amount):
long_calcification_files.remove(train_choice[index])
##valid##
valid_target_path='C:/Users/user/Desktop/0622-long/valid/calcification/'
valid_amount=50
valid_choice = random.sample(long_calcification_files, valid_amount)
for i in range(valid_amount):
oldname=long_calcification_path+valid_choice[i]
#print('oldname=',oldname)
newplacename=valid_target_path+valid_choice[i]
shutil.copyfile(oldname,newplacename)
for index in range(valid_amount):
long_calcification_files.remove(valid_choice[index])
##test#
test_target_path='C:/Users/user/Desktop/0622-long-test/'
test_amount=80
test_choice = random.sample(long_calcification_files, test_amount)
for i in range(test_amount):
oldname=long_calcification_path+test_choice[i]
#print('oldname=',oldname)
newplacename=test_target_path+test_choice[i]
shutil.copyfile(oldname,newplacename)
for index in range(test_amount):
long_calcification_files.remove(test_choice[index])
##no calcification##
##train##
train_target_path='C:/Users/user/Desktop/0622-long/train/no calcification/'
train_amount=600
train_choice = random.sample(long_no_calcification_files, train_amount)
for i in range(train_amount):
oldname=long_no_calcification_path+train_choice[i]
#print('oldname=',oldname)
newplacename=train_target_path+train_choice[i]
shutil.copyfile(oldname,newplacename)
for index in range(train_amount):
long_no_calcification_files.remove(train_choice[index])
##valid##
valid_target_path='C:/Users/user/Desktop/0622-long/valid/no calcification/'
valid_amount=50
valid_choice = random.sample(long_no_calcification_files, valid_amount)
for i in range(valid_amount):
oldname=long_no_calcification_path+valid_choice[i]
#print('oldname=',oldname)
newplacename=valid_target_path+valid_choice[i]
shutil.copyfile(oldname,newplacename)
for index in range(valid_amount):
long_no_calcification_files.remove(valid_choice[index])
##test#
test_target_path='C:/Users/user/Desktop/0622-long-test/'
test_amount=80
test_choice = random.sample(long_no_calcification_files, test_amount)
for i in range(test_amount):
oldname=long_no_calcification_path+test_choice[i]
#print('oldname=',oldname)
newplacename=test_target_path+test_choice[i]
shutil.copyfile(oldname,newplacename)
for index in range(test_amount):
long_no_calcification_files.remove(test_choice[index])
""" | [
"[email protected]"
] | |
e0a0b17520b4642b2b17c798f2328f86e1da532b | 6137cf4dbfae8b0908090a82ba7a17bed410a31e | /python3/multiprocess_demo/multiprocess_task_worker.py | 61960320fd4f6298bac43f0b234c663cbcbba702 | [] | no_license | linwiker/learpython | 5ba16283be032f6b3819be5888dd84088abd4c3b | 1afdc62125f154c957a739e38cced3006cd52907 | refs/heads/master | 2020-05-21T12:27:52.821229 | 2017-02-03T07:42:46 | 2017-02-03T07:42:46 | 47,006,529 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | #/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import sys
import queue
from multiprocessing.managers import BaseManager
#创建类似的QueueManager
class QueueManager(BaseManager):
pass
#由于这个QueueManager只从网络上获取Queue,所以注册时只提供名字
QueueManager.register('get_task_queue')
QueueManager.register('get_result_queue')
#链接到服务器,也就是允许master的机器:
server_addr = '10.99.56.124'
print('Connect to server %s...'%server_addr)
#端口和验证码注意保持与master设置的完全一致
m = QueueManager(address=(server_addr,5000),authkey=b'abc')
#从网络链接
m.connect()
#获取Queue的对象
task = m.get_task_queue()
result=m.get_result_queue()
#从task队列取任务,并把结果写入result队列:
for i in range(10):
try:
n = task.get(timeout=1)
print('run task %d * %d...'%(n,n))
r = '%d * %d = %d'%(n,n,n*n)
time.sleep(1)
result.put(r)
except Queue.Empty:
print('task queue is empty.')
#处理结束
print('worker exit.') | [
"[email protected]"
] | |
6fbf91a5b1debbe4ab175fbfbdfeb60a2c27d21e | 2f12d0342a1d725f3639a7b2772be0ea5fb50ba4 | /personal_library/urls.py | ac8d7ee161bde64f074a4bb3ed082d57caf68ccb | [] | no_license | divyansh-zunroof/personal_library_backend | f5012b8f2de6dbfb91d38f65124e52a22c51a55c | f8d7dcd31b96696f4cd918e50536d32c6024f896 | refs/heads/master | 2023-05-04T03:17:30.653911 | 2021-05-16T13:51:23 | 2021-05-16T13:51:23 | 367,886,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | from django.urls import path
from . import views
urlpatterns = [
path('login', views.loginUser, name='Login User'),
path('create', views.createUser, name='Create User'),
path('get_books', views.getBooks, name='Get Books'),
path('add_books', views.addBooks, name='Add Books'),
] | [
"[email protected]"
] | |
522a5c695089f792c0ee5a34b2e6eb98c2e75160 | 6604c21116d4b90c86740a99765b20129dc70728 | /Python-Milestone-Project-1/1-Displaying_Information.py | 23891dd1c3bc87639f0b89a57fa4f32fc05550c0 | [] | no_license | oliver-mashari/Learning-Python | 0951349f0d438a7d4f28d57cb47103e57337d70e | a7b25efd60fbbe044a5d0e1b8dd496704a2cc6bf | refs/heads/main | 2023-06-14T05:47:13.925133 | 2021-06-24T18:16:56 | 2021-06-24T18:16:56 | 325,369,723 | 0 | 1 | null | 2021-01-03T19:48:38 | 2020-12-29T19:25:15 | Python | UTF-8 | Python | false | false | 172 | py | # Displaying information
def display(row1,row2,row3):
print(row1)
print(row2)
print(row3)
example_row = [1,2,3]
display(example_row, example_row, example_row) | [
"[email protected]"
] | |
5579477131eae8fee930193ef855ff00c05dcd6a | 285fba7666d0365b89093b1c6ca80826012a1ba6 | /safe_eats_app/admin.py | ea6ff22bd97242ab1354e9e6c5d7a34e832c03f0 | [] | no_license | ron-s/safe_eats | 66bb63a81713cb1beb744b697e2f007499ea79f2 | efa5c7e60acbc6693f2933ffd5ae2e1cac1771d4 | refs/heads/master | 2021-01-15T09:14:47.967505 | 2017-08-17T22:25:27 | 2017-08-17T22:25:27 | 54,150,076 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | from django.contrib import admin
from .models import RestaurantInfo, InspectionReport, InspectionResult
# Register your models here.
admin.site.register(RestaurantInfo)
admin.site.register(InspectionReport)
admin.site.register(InspectionResult)
| [
"[email protected]"
] | |
5bede63ddee814a5b13e391ff4f2efd3c891ae52 | aa8ea5446aa147eb6643cc2ad98c1d03fd3ee6f2 | /main.py | 2f8cc2e51b5f25fb7fc96e0bbee76f84cc483537 | [] | no_license | Rowanzhang/spider | 925aade22d956aa44734513fb0d62a9d7708f468 | 937e47a5571debc557b029bbda785e08ba4fbe35 | refs/heads/master | 2021-01-14T16:39:25.676754 | 2020-02-24T08:28:09 | 2020-02-24T08:28:09 | 242,683,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | from package1.run import Run
htmls = input()
run = Run()
run.go_all(htmls)
| [
"[email protected]"
] | |
d381bed274f6bf34f749eff562c360753ab95206 | e4cab1d00a90cfe4a52be637d070ffc82f8f215f | /Multitasking_programming/day10/group_chat/desc_group_chat.py | d16a52136971f50d16ed47175f936e5366eed3a5 | [] | no_license | Ewardqifan/Net_Pro | dc75c9e1948d28e4b69fee9f74ae715cabd13ce3 | 7fdc45b77f52bd29f90f494901e0adba4294efe9 | refs/heads/master | 2020-05-17T13:46:34.554369 | 2019-04-27T08:01:03 | 2019-04-27T08:01:03 | 183,744,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | '''
群聊服务:
功能:类似QQ聊天群聊功能
[1]有人进入聊天室需要输入姓名(密码),姓名不能重复
[2]有人进入聊天室,其他人会收到通知(xx进入聊天室)
[3]一个人发消息其他人会收到消息(xx:msg)
[4]有人退出,则其他人会收到通知(xx离开了聊天室)
[5]扩展:服务器可以项所有群用户发送公告(管理员消息:msg)
具体实现:
1.网络连接搭建(udp)
2.进入聊天室
1).client(input name\
send name\
receiver msg\
if OK into room else Re-enter name\
print name)
2).server(receiver name\
judge permission\
if refuse send msg else save name to dict and send refuse msg\
send others the name)
3.实现聊天功能
1).client:
2).server:
4.退出聊天室
5.管理员公告
'''
| [
"[email protected]"
] | |
0bdb3b7ea639e486477738c5aba33786f1d1aa57 | 6ff3103158da07a40b501e746c0ecc6dc07c6c4f | /openks/models/pytorch/gen_learn.py | a92f563148f6e9251da4d0511211c1cfe588fa5c | [
"Apache-2.0"
] | permissive | jamesyifan/OpenKS | 22f1550e5f0773d3d431106e61a3fe6babf90a3d | e8125e272d3f901f20c4e6c3a51603a0905b0808 | refs/heads/master | 2023-02-27T21:21:35.086454 | 2021-01-29T12:50:08 | 2021-01-29T12:50:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,387 | py | # Copyright (c) 2021 OpenKS Authors, DCD Research Lab, Zhejiang University.
# All Rights Reserved.
import logging
import argparse
import torch
import torch.nn as nn
from sklearn.model_selection import train_test_split
from ..model import GeneralModel
@GeneralModel.register("general", "PyTorch")
class GeneralTorch(GeneralModel):
def __init__(self, name='pytorch-default', dataset=None, model=None, args=None):
self.name = name
self.dataset = dataset
self.args = self.parse_args(args)
self.model = model
def parse_args(self, args):
""" parameter settings """
parser = argparse.ArgumentParser(
description='Training and Testing Knowledge Graph Embedding Models',
usage='train.py [<args>] [-h | --help]'
)
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--batch_size', default=1024, type=int)
parser.add_argument('--hidden_dim', default=50, type=int)
parser.add_argument('--lr', default=0.01, type=float)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--valid_freq', default=10, type=int)
parser.add_argument('--model_path', default='./model.tar', type=str)
parser.add_argument('--opt', default='sgd', type=str)
parser.add_argument('--words_dim', type=int, default=300)
parser.add_argument('--num_layer', type=int, default=2)
parser.add_argument('--dropout', type=float, default=0.3)
parser.add_argument('--weight_decay',type=float, default=0)
parser.add_argument('--valid_freq', default=10, type=int)
return parser.parse_args(args)
def data_reader(self, ratio=0.01):
train_set, test_set = train_test_split(self.dataset, test_size=ratio)
return train_set, test_set, test_set
def get_span():
return NotImplemented
def evaluation(self, gold, pred, index2tag, type):
right = 0
predicted = 0
total_en = 0
for i in range(len(gold)):
gold_batch = gold[i]
pred_batch = pred[i]
for j in range(len(gold_batch)):
gold_label = gold_batch[j]
pred_label = pred_batch[j]
gold_span = get_span(gold_label, index2tag, type)
pred_span = get_span(pred_label, index2tag, type)
total_en += len(gold_span)
predicted += len(pred_span)
for item in pred_span:
if item in gold_span:
right += 1
if predicted == 0:
precision = 0
else:
precision = right / predicted
if total_en == 0:
recall = 0
else:
recall = right / total_en
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
def save_model(self, model, model_path):
torch.save(model, model_path)
def run(self):
device = torch.device('cuda') if self.args.gpu else torch.device('cpu')
train_set, valid_set, test_set = self.triples_reader(ratio=0.01)
train_set = DataSet(train_set)
train_generator = data.DataLoader(train_set, batch_size=self.args.batch_size)
valid_set = DataSet(valid_set)
valid_generator = data.DataLoader(valid_set, batch_size=1)
test_set = DataSet(test_set)
test_generator = data.DataLoader(test_set, batch_size=1)
model = self.model(
words_dim=self.args.words_dim,
num_layer=self.args.num_layer,
dropout=self.args.dropout,
hidden_dim=self.args.hidden_dim
)
model = model.to(device)
optimizer = torch.optim.Adam(parameter, lr=self.args.lr, weight_decay=self.args.weight_decay)
index2tag = None
start_epoch = 1
best_score = 0.0
# train iteratively
for epoch in range(start_epoch, self.args.epochs + 1):
print("Starting epoch: ", epoch)
model.train()
for batch_idx, batch in enumerate(train_set):
optimizer.zero_grad()
loss, scores = model(batch)
loss.backward()
optimizer.step()
if epoch % self.args.valid_freq == 0:
print("Starting validation...")
model.eval()
gold_list = []
pred_list = []
for dev_batch_idx, dev_batch in enumerate(valid_set):
answer = model(dev_batch)
index_tag = np.transpose(torch.max(answer, 1)[1].view(dev_batch.ed.size()).cpu().data.numpy())
gold_list.append(np.transpose(dev_batch.ed.cpu().data.numpy()))
pred_list.append(index_tag)
P, R, F = evaluation(gold_list, pred_list, index2tag, type=False)
print("{} Recall: {:10.6f}% Precision: {:10.6f}% F1 Score: {:10.6f}%".format("Dev", 100. * R, 100. * P, 100. * F))
if R > best_score:
best_dev_R = R
self.save_model(model, self.args.model_path)
| [
"[email protected]"
] | |
a46ac684734baa3f8ca8c5bbe81b463fcb7c751b | 5ae8a8bf314f780381a1e9c4b6f49cf9abfff26e | /study/algorithm/bug_fixing_reader.py | a50464f1c77cb4ee9752ac0e910d59acdc90c187 | [] | no_license | jsong00505/Python-Studies | d3227388ac99c55c0e3c79766000593dec40eceb | 2618809e52d05bf41018f6f40f467709a8e236d5 | refs/heads/master | 2021-06-15T17:28:47.429926 | 2017-02-23T12:18:24 | 2017-02-23T12:18:24 | 69,872,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | def solution(A):
n = len(A)
print n
L = [-1] + A
L.sort()
print L
count = 0
pos = (n + 1) // 2
candidate = L[pos]
for i in xrange(1, n + 1):
print i
if (L[i] == candidate):
count = count + 1
if (2*count > n):
return candidate
return -1
A = [1,1,1,50,1]
print solution(A) | [
"[email protected]"
] | |
d9627bd2f10378d4ec95ae689ab346b05b1957f6 | 347de3ae38f260979bba8d87c6bd7b51fe716f29 | /Bubble_Sort.py | e07e1680917db30f40b06c2974c3e494951125f8 | [] | no_license | implse/Simple_Algorithms | 522ba6f288ddb1b11c104dab313b6604a4a8204a | 832cee90d7b0e64be91cd39a6068bdc94d19dbcc | refs/heads/master | 2023-02-19T21:23:46.640904 | 2021-01-18T18:53:43 | 2021-01-18T18:53:43 | 119,513,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | # Bubble Sort : Time Complexity O(n^2)
# Bubble Sort : Time Complexity O(n^2)
def bubble_sort(data):
for i in range(len(data) - 1):
for j in range(len(data) - 1 - i):
if data[j] > data[j + 1]:
data[j], data[j + 1] = data[j + 1], data[j]
return data
# Buble Sort Recursive
def bubble_sort(data):
n = len(data)
def bubble_sort_recursive(data, n):
# Base Case
if n == 1:
return
for i in range(n - 1):
if data[i] > data[i + 1]:
data[i], data[i + 1] = data[i + 1], data[i]
# Recursive Case
bubble_sort_recursive(data, n - 1)
# Function call
bubble_sort_recursive(data, n)
return data
# Test
b1 = [13, 19, 24, 37, 8, 1, 3, 4, 71, 14, 40, 21, 6, 10, 7, 45, 18]
print(bubble_sort(b1))
| [
"[email protected]"
] | |
41e29613679d045243c72743450cd0e2bbf693c0 | 9d95d4445f0a6f38653f74054fcbf4f3a2d64875 | /web-app/model.py | a9c94ae49ae82b138964bd5eacbb0e10e4bb4708 | [
"Apache-2.0"
] | permissive | Acveah/Analysis-of-DDoS-attack-from-Botnet-devices-using-Machine-Learning-Classifiers | acb3d078f1c1191866aa09b3744a3a62b293a514 | b0ec05db61a630936633f4478f9b3b6718231cef | refs/heads/master | 2023-03-26T09:36:37.299357 | 2020-06-20T18:23:09 | 2020-06-20T18:23:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn import neighbors
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv('dataset_ddos_six.csv')
#input faetures
X = df[['average_dur','stddev_dur','min_dur','max_dur','srate','drate']]
#output target
encoder = LabelEncoder()
y = df[['attack']]
#train-test-split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=.4)
#model
model=RandomForestClassifier(n_estimators=5)
model.fit(X_train, y_train)
# Save the model as a pickle in a file
joblib.dump(model, 'model.pkl')
#prediction
predictions=model.predict(X_test)
print(predictions)
print(accuracy_score(y_test,predictions)*100) | [
"[email protected]"
] | |
131cb39c816851db1179d81b3c2cff819d12db30 | 23d4b01214eae328637c22e5c6b5ceefecab6b7a | /lesson2-2/task6.py | 4024d477543bfd09e078257170eafe6c0595e485 | [] | no_license | TomAndy/python_qa | 31aad52052b05907e89be2db75035ad61b14f4cf | 0ecc712ef3022b81efbcb77afa7f7ca1c314355b | refs/heads/master | 2021-06-25T12:04:03.710570 | 2017-04-12T19:15:51 | 2017-04-12T19:15:51 | 56,247,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py |
def create_dict(n=100):
dd={}
for i in range(2,n+1,2):
dd[i]=i**2
return dd
if __name__== '__main__':
n=2
print (create_dict())
print (create_dict(n)) | [
"[email protected]"
] | |
3f279d93291390085602cb925c163d1dfda17c69 | 4e89a680fda4fc7a48b41f287fff2ab7b7faafcc | /core/views.py | b7f53e1ea836e39407459e77393f30c19221bb85 | [] | no_license | FeniixDev/AnimatFlix | 42391d78bdafa685a0f96d28954201d022d4b9ee | 99ab8d688cb43d6240b43db41aeb630f8fa85e2e | refs/heads/master | 2022-11-04T13:15:51.227427 | 2020-06-16T14:35:39 | 2020-06-16T14:35:39 | 272,728,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,305 | py | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.urls import reverse
from django.contrib.auth import views, authenticate
from django.views import generic
from .forms import SignUpForm, LoginForm
from ..animes.models import Anime
class IndexView(LoginRequiredMixin, generic.ListView):
template_name = "pages/index.html"
redirect_field_name = ''
model = Anime
def get_queryset(self):
"""Return 5 last anime"""
return Anime.objects.order_by('-released_date')[:5]
def register(req):
if req.user.is_authenticated:
return HttpResponseRedirect(reverse('home'))
if req.method == 'POST':
form = SignUpForm(req.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('login'))
else:
form = SignUpForm()
return render(req, 'registration/register.html', {
'form': form
})
class LoginView(views.LoginView):
form_class = LoginForm
template_name = 'registration/login.html'
class Meta:
fields = ['username'] | [
"[email protected]"
] | |
5cfe770dc79080be825b5284d0244ccdc045ac0e | b359f3059cac0b6e9b27db7df531c043f1ce3d4c | /test_data2es.py | 78fe1c27703b207b154a124368fc3a81c84d7aa7 | [] | no_license | rotblade/csv2es | 454844311b4b76fddb2c59e78f883c9e85273c22 | 1743226c2b08118161265ddc426c1c351b1116f1 | refs/heads/master | 2021-04-30T07:46:32.684595 | 2018-02-13T08:09:36 | 2018-02-13T08:09:36 | 121,355,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,285 | py | import csv
import json
import unittest
from utils import str_to_esfield, t2i, get_fieldnames, \
isperiod, time_interval, index_op, index_body
class TestOneGeneralFunc(unittest.TestCase):
def test_1_strconvert(self):
self.assertEqual(str_to_esfield('This is a--testString.!'),
'this_is_a_test_string')
def test_2_time2integer(self):
self.assertEqual(t2i('03:30:18'), 210)
def test_3_istimeintev(self):
self.assertTrue(isperiod('03:30:18'))
self.assertFalse(isperiod('03:30:18x'))
self.assertFalse(isperiod(''))
self.assertFalse(isperiod('160'))
self.assertFalse(isperiod(':'))
def test_4_timeinterval(self):
self.assertEqual(time_interval('1/17/2016 03:30:15 AM',
'1/17/2016 03:31:45 AM',
'%m/%d/%Y %I:%M:%S %p'), 1.5)
self.assertEqual(time_interval('1/17/2016 03:30:15 AM',
'1/17/2016 03:31:45 PM',
'%m/%d/%Y %I:%M:%S %p'), 721.5)
self.assertEqual(time_interval('1/aa/2016 03:30:15 AM',
'',
'%m/%d/%Y %I:%M:%S %p'), 0.0)
class TestTwoCsvFile(unittest.TestCase):
def setUp(self):
self.doc_file = open('test.csv', newline='')
def tearDown(self):
self.doc_file.close()
def test_1_fieldnames(self):
fieldnames = ['ticket_no', 'name', 'work_title', 'job_desc']
fields = get_fieldnames(self.doc_file)
self.assertEqual(len(fields), 4)
self.assertEqual(fields, fieldnames)
def test_2_row(self):
dict_row = {
'ticket_no': '10002',
'name': 'Zhang Gavin',
'job_desc': 'aaaa_',
'work_title': 'supervisor'
}
fields = get_fieldnames(self.doc_file)
dict_reader = csv.DictReader(self.doc_file, fields)
self.assertEqual(next(dict_reader), dict_row)
def test_3_op(self):
dict_action = {
'_index': 'qd',
'_type': 'ticket',
'_id': '10002',
'_source': {
'ticket_no': '10002',
'name': 'Zhang Gavin',
'job_desc': 'aaaa_',
'work_title': 'supervisor'
}
}
fields = get_fieldnames(self.doc_file)
dict_reader = csv.DictReader(self.doc_file, fields)
row = next(dict_reader)
meta = {
'index': 'qd',
'type': 'ticket',
'id': row[fields[0]]
}
self.assertEqual(index_op(row, meta), dict_action)
def test_4_body(self):
d = {
'properties': {
'ticket_no': {'type': 'integer'},
'name': {'type': 'string'},
'work_title': {'type': 'string'},
'job_desc': {'type': 'string'}
}
}
body = {
'mappings': {
'ticket': d
}
}
with open('test.json') as f:
mapping = json.loads(f.read())
self.assertEqual(index_body('ticket', mapping), body)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
056343cacdaddd53d8a581d0497978dd0f1f256f | 1d455c7173a692368d50e8f2bb4cc17a4e1907a0 | /Pdjango/AWS/UploadImage/serializer.py | 8c30f4914d6b1ab8f01051de28e9cc1df8ddaa94 | [
"Apache-2.0"
] | permissive | martin11106/amazonec2 | ec4652c976e11b901639ae3a28af2533aa8b7ac7 | 708399bfbbd27dd755c2b2371713fcd4ba497976 | refs/heads/master | 2020-08-27T11:50:43.080337 | 2019-10-25T03:29:01 | 2019-10-25T03:29:01 | 217,354,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | from rest_framework import routers, serializers, viewsets
from UploadImage.models import UploadImage
class UploadImageSerializers(serializers.ModelSerializer):
class Meta:
model=UploadImage
fields=('__all__')#visulisar
| [
"[email protected]"
] | |
2cc7f466d91e7d724b471e72dd34875273300bf3 | 5311021d5a80dc073cb761778e1ae85cd25d6afe | /22-Algorithms/ChapterNotes.py | a2c23863d49db045955bf2dde9f8576cba44ff03 | [] | no_license | tomgonzo/Learning-PY | 83a3f61160acc65a2dcd0307ae336d479fdf3577 | 9a6580e8ab0f66be6999b3eb6cf56ea7edfaae4b | refs/heads/master | 2021-05-05T23:05:15.554585 | 2018-01-25T02:57:19 | 2018-01-25T02:57:19 | 116,344,019 | 1 | 0 | null | 2019-10-18T15:32:06 | 2018-01-05T05:00:49 | Python | UTF-8 | Python | false | false | 3,558 | py | #Chapter 22 - Algorithms - ChapterNotes.py
#An algorithm is a series of steps that solve a problem.
#Fizzbuzz!!!
#Fizzbuzz is a popular interview question designed to eliminate candidates.
#Write a program that prints the numbers 1-100
#For multiples of 3 print "Fizz" instead of the number
#For multiples of 5 print "Buzz" instead of the number
#For multiples of both 3 and 5 print "FizzBuzz"
def fizz_buzz():
for i in range(1,101):
if i % 3 == 0 and i % 5 == 0:
print("FizzBuzz")
elif i % 3 == 0:
print("Fizz")
elif i % 5 == 0:
print("Buzz")
else:
print(i)
### Sequential Search! ###
#A search algorithm finds information ina. data structure like a list.
#A sequential search is a simple search algorithm that checks each item
#in a data structure to see if the item matches what it's looking for.
#Example of Sequential Search:
def ss(number_list, n):
found = False
for i in number_list:
if i == n:
found = True
break
return found
numbers = range(0,100)
s1 == ss(numbers, 2)
print(s1)
s2 == ss(numbers, 202)
print(s2)
### Palindrome ###
#This algorithm checks if a word is a palindrome
def palindrome(word):
word = word.lower()
return word[::-1] == word
while True:
word = input("Enter a word: ")
print(palindrome(word))
### Anagrams! ###
#Anagrams are recreated by rearranging the letters of another word.
#Determine if two words are anagrams by rearranging the letters of both
#in alphabetical order.
def anagram(w1, w2):
w1 = w1.lower()
w2 = w2.lower()
return sorted(w1) == sorted(w2)
in1 = input("Enter the first word: ")
in2 = input("Enter the second word: ")
print(anagram(in1, in2))
### Count Character Occurences! ####
#This algorithm returns the number of times each character occurs
#It iterates one by one through each character, and keeps track
#of occurences in a dictionary
def count_characters(string):
count_dict = {}
for c in string:
if c in count_dict:
count_dict[c] += 1
else:
count_dict[c] = 1
print(count_dict)
word = input("Please enter a word: ")
count_characters(word)
### Recursion! ###
#Recursion is a method of solving problems by breaking the problem up
#into smaller pieces until it can be easily solved.
#Up to now we built iterative algorithms, they solve problems by
#repeating steps over and over, typically using a loop.
#Recursive Algorithms rely on functions that call themselves. Any
#problem that can be solved iteratively can be solved recursively.
#Recursive algorithms tend to be more elegant solutions.
#Recursive algorithms are written inside functions. The function must
#have a base case: a condition to end the algorithm so it doesn't run
#forever. Inside the function, the function calls itself. Each time it
#does, it gets closer to the base case.
#Eventually, the base case condition is satisfied, the problem is
#solved, and the function stops calling itself. An algorithm that
#follows these rules satisfied the three laws of recursion:
#1. A recursive algorithm must have a base case.
#2. A recursive algorithm must change its state and move toward
#the base case.
#3. A recursive algorithm must call itself, recursively.
#Example! Printing the lyrics to 99 bottles of beer on the wall.
def bottles_of_beer(bob):
if bob < 1:
print("No more bottles of beer on the wall. :(")
return
tmp = bob
bob -= 1
print(f"""{tmp} bottles of beer on the wall.
{tmp} bottles of beer. Take one down, pass it around,
{bob} bottles of beer on the wall.""")
bottles_of_beer(bob)
bottles_of_beer(99)
| [
"[email protected]"
] | |
fba9be84fa69074b96c2f2a81d06f46b69b1b01d | 81d289b7dd9e8038b5dc37312b1ac7929d1f5bd0 | /organization/models.py | 970853fb420a9952596308a219fc58ad3f85ae25 | [
"MIT"
] | permissive | paulmand3l/elevendance | 8630b59f31fca62a4a8834c15ffac285aa6e587a | feca80cdd35cec63faecd972eb7225c0a1269e08 | refs/heads/master | 2020-05-03T04:20:56.931659 | 2014-03-03T06:58:29 | 2014-03-03T06:58:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,181 | py | import recurrence.fields
from django.db import models
from django.contrib import admin
from django.contrib.auth.models import User
from django_extensions.db.fields import AutoSlugField
from localflavor.us.models import USStateField
from payroll.models import Formula
class Organization(models.Model):
name = models.CharField(max_length=200)
auto_slug = AutoSlugField(populate_from='name')
founded = models.DateField('date organization was founded', blank=True, null=True)
members = models.ManyToManyField(User, through='Membership')
def isMember(self, person):
return self.members.filter(person=person).exists()
def __str__(self):
return self.name
class Venue(models.Model):
organization = models.ForeignKey(Organization, related_name="venues")
name = models.CharField(max_length=200)
auto_slug = AutoSlugField(populate_from='name')
street1 = models.CharField(max_length=200, blank=True)
street2 = models.CharField(max_length=200, blank=True)
city = models.CharField(max_length=100, blank=True)
state = USStateField(blank=True)
zip_code = models.IntegerField(blank=True, null=True)
# Used when automatically generating Dance objects
default_price = models.IntegerField(blank=True, null=True)
default_price_low = models.IntegerField(blank=True, null=True)
default_price_high = models.IntegerField(blank=True, null=True)
default_start = models.TimeField(blank=True, null=True)
default_end = models.TimeField(blank=True, null=True)
members = models.ManyToManyField(User, through='Membership')
def isMember(self, person):
return (self.members.filter(person=person).exists() or
self.organization.members.filter(person=person).exists())
def __str__(self):
return self.name
class Role(models.Model):
organization = models.ForeignKey(Organization, blank=True, null=True)
venue = models.ForeignKey(Venue, blank=True, null=True)
name = models.CharField(max_length=100)
admin = models.BooleanField(default=False)
formula = models.OneToOneField(Formula)
def __str__(self):
return '%s at %s' % (self.name,
(self.venue and self.venue.name)
or self.organization.name)
class Membership(models.Model):
organization = models.ForeignKey(Organization, blank=True, null=True)
venue = models.ForeignKey(Venue, blank=True, null=True)
person = models.ForeignKey(User, related_name="memberships")
role = models.ForeignKey(Role, blank=True, null=True)
formula = models.OneToOneField(Formula, blank=True, null=True)
notes = models.TextField(blank=True)
def __str__(self):
if self.role:
return '%s is a %s' % (self.person.get_full_name(), self.role)
else:
return '%s is staff at %s' % (self.person.get_full_name(),
(self.venue and self.venue.name)
or (self.organization and self.organization.name))
admin.site.register(Organization)
admin.site.register(Venue)
admin.site.register(Membership)
| [
"[email protected]"
] | |
0063fe58651525ee438dc4988247a7e448145146 | b9a68a2135a92419203f78fbf98f67340ed04095 | /marathon_waypoint/scripts/base_waypoints.py | e0baa376a25de62a479a14d418764507f7f4f8ee | [] | no_license | szenergy/szenergy-utility-programs | 487002758f2edfe877b88fa0853931f862a3a8e1 | 2a3d5e0213a4d68afe1ea22f5f38d399791e7ec8 | refs/heads/master | 2023-05-27T16:14:17.140427 | 2023-05-18T14:58:31 | 2023-05-18T14:58:31 | 207,478,962 | 2 | 3 | null | 2022-06-03T10:34:47 | 2019-09-10T06:12:33 | Jupyter Notebook | UTF-8 | Python | false | false | 2,826 | py | #!/usr/bin/env python
from matplotlib import transforms
import rospy
from std_msgs.msg import String
from autoware_msgs.msg import Lane
from geometry_msgs.msg import PoseStamped, Point
from visualization_msgs.msg import Marker
import math
import tf2
import tf2_geometry_msgs
current_pose = None
pub = None
marker_pub = None
tf_buffer = None
def current_pose_callback(msg):
global current_pose
current_pose = msg
def waypoints_callback(waypoints_msg):
global current_pose,pub,marker_pub,tf_buffer
euc_dist = 1000000
euc_index = 0
transform = tf_buffer.lookup_transform("base_link","map")
for i in range(0,len(waypoints_msg.waypoints)):
current_dist = math.sqrt(pow(current_pose.pose.position.x - waypoints_msg.waypoints[i].pose.pose.position.x,2) + pow(current_pose.pose.position.y - waypoints_msg.waypoints[i].pose.pose.position.y,2))
if current_dist < euc_dist:
euc_dist = current_dist
euc_index = i
lane_sender = Lane()
line_strip = Marker()
for waypoint in lane_sender.waypoints:
transformed_pose = tf2_geometry_msgs.do_transform(waypoint.pose,transform)
lane_sender.waypoints.append(transformed_pose)
lane_sender.waypoints = waypoints_msg.waypoints[euc_index:euc_index+99]
lane_sender.header = waypoints_msg.header
for waypoint in lane_sender.waypoints:
point = Point()
point.x = waypoint.pose.pose.position.x
point.y = waypoint.pose.pose.position.y
point.z = waypoint.pose.pose.position.z
line_strip.points.append(point)
line_strip.header = lane_sender.header
line_strip.type = Marker.SPHERE_LIST
line_strip.action = line_strip.ADD
line_strip.color.r = 0.0
line_strip.color.g = 0.0
line_strip.color.a = 1.0
line_strip.color.b = 1.0
line_strip.scale.x = 1.1
line_strip.scale.y = 1.1
line_strip.scale.z = 1.1
line_strip.pose.orientation.x = 0.0
line_strip.pose.orientation.y = 0.0
line_strip.pose.orientation.z = 0.0
line_strip.pose.orientation.w = 1.0
pub.publish(lane_sender)
marker_pub.publish(line_strip)
def talker():
global pub,marker_pub
rospy.init_node('waypoint_shortener', anonymous=True)
pub = rospy.Publisher('/base_waypoints', Lane, queue_size=1)
sub = rospy.Subscriber("/base_waypoints_raw",Lane,waypoints_callback,queue_size=1)
sub_current_pose = rospy.Subscriber("/current_pose",PoseStamped,current_pose_callback,queue_size=1)
marker_pub = rospy.Publisher("/original_waypoints",Marker,queue_size=1)
tf_buffer = tf2_ros.Buffer(rospy.Duration(10.0))
tf_listener = tf2_ros.TransformListener(tf_buffer)
rospy.spin()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass | [
"[email protected]"
] | |
e49e87cf1585357ecbb82fbfe88f473bd809164c | 204292cd802bf4e67a048db8687ced229c3af86b | /I0320008_Exercise9.10.py | f3ec31399de91ea4a8a658dc82b6223f5217e0b0 | [] | no_license | Alifianars03/I0320008_Alifiana-Rahma_Abyan_Tugas-9 | d5ff548be6c70eb8d6b621bd79bd2ac90a869f2e | c004c0d405c9fb68e3cbfdc8735a002db45811ea | refs/heads/main | 2023-04-12T04:22:20.818892 | 2021-04-30T15:19:21 | 2021-04-30T15:19:21 | 363,179,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | import array
# mendefinisikan fungsi untuk mengurutkan elemen array
def sort(A):
i = 0
while i < len(A) - 1:
j = len(A) - 1
while j >= i + 1:
if A[j] < A[j - 1]:
temp = A[j]
A[j] = A[j - 1]
A[j - 1] = temp
j -= 1
i += 1
def main():
A = array.array('i', [50, 10, 30, 40, 20])
print("Sebelum diurutkan: ")
for nilai in A:
print("%d " % nilai, end=" ")
print("\n")
# mengurutkan array
sort(A)
print("Setelah diurutkan: ")
for nilai in A:
print("%d " % nilai, end=" ")
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
2568443a44d2f2073712d8d9fc8688892d4f3b12 | 4be879c3328cd2a645f1b332687e3c3272b08bd0 | /collegeSpider/spiders/ustc.py | 482c1bfc249eb1aac7dc39172c66e93fa06837d3 | [] | no_license | px5xp/spider | c0fb4ca90557f18a61e0d63d703edca74cdd0c60 | d4b9397a6e5c200ee06b9c1286e3191d0328b421 | refs/heads/master | 2020-05-07T14:00:24.271328 | 2019-04-10T12:05:06 | 2019-04-10T12:05:06 | 180,572,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | # -*- coding: utf-8 -*-
import scrapy
from collegeSpider.items import CollegespiderItem
class UstcSpider(scrapy.Spider):
name = 'ustc'
allowed_domains = ['news.ustc.edu.cn']
start_urls = ['http://news.ustc.edu.cn/xwbl/list.htm']
def parse(self, response):
# 分组
li_list = response.xpath("//div[@id='wp_news_w3']/li")
for li in li_list:
item = CollegespiderItem()
item["title"] = li.xpath("./a/@title").extract_first()
item["publish_date"] = li.xpath("./span/text()").extract_first()
yield item
print(item)
# 翻页
next_url = response.xpath("//li[@class='page_nav']/a[@class='next']/@href").extract_first()
print(next_url)
# if next_url is not None:
# next_url = "http://news.hfut.edu.cn/"+next_url
# yield scrapy.Request(
# next_url,
# callback=self.parse,
# dont_filter=True
# )
| [
"[email protected]"
] | |
bdf728bea198defc7f289c24ef9e72a6d289f754 | c579fc781ebfcb6706e99c8820137dc395a44984 | /proyecto/codigo/benchmarking/benchmark.py | 0eaa40b827fa5597281cccc170ba7ce7570a2709 | [] | no_license | mjaramillu/ST0245-032 | 62bc550e7ee5f00b8f1069c9b1fb5e74bce87c70 | babf3fd18628e989fb8bc680a20be827232515cf | refs/heads/master | 2020-06-20T19:06:05.041449 | 2019-10-29T01:37:20 | 2019-10-29T01:37:20 | 197,216,969 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | import matplotlib.pyplot as plt
import numpy as np
f = open("../benchmark", "r")
data = {}
for l in f:
cols = l.split(" ");
if not cols[0] in data:
data[cols[0]] = 0
data[cols[0]] += float(cols[5]) / 100
names = []
averages = []
count = 0;
for x,y in data.items():
print(x, y)
names.append(x)
averages.append(y)
plt.bar(count, [y], 0.4, label=x)
count+=1
plt.rcParams.update({'font.size': 18})
ax = plt.gca()
plt.yticks(fontsize=18)
ax.set_xticks(np.arange(len(names)))
ax.set_xticklabels(names, fontsize=18)
ax.set_title("Time taken to find best split")
ax.set_ylabel("Time (sec)", fontsize=18)
ax.set_xlabel("Dataset",fontsize=18)
plt.show()
| [
"[email protected]"
] | |
ce34e0f3ab218380d54095a7ccde3bd02b64147c | 5bff028053e815f0d760cab30aa63cf9d8363080 | /test/functional/wallet_bumpfee.py | cfaafe498fb561e7b2b59c942cfe7268ffcee3f2 | [
"MIT"
] | permissive | nyerium-core/nyerium | 8080dd349f5f5bcc69a46c93a01992b03830c117 | 0bc3b4da2c8cf1c96ab3910cff4c743ff09fcd1e | refs/heads/master | 2021-06-24T21:09:52.542446 | 2020-10-07T19:55:24 | 2020-10-07T19:55:24 | 128,843,519 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 13,549 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from test_framework.blocktools import send_to_witness
from test_framework.test_framework import NyeriumTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
import io
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(NyeriumTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [["-prematurewitness", "-walletprematurewitness", "-deprecatedrpc=addwitnessaddress", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE)
self.start_node(1)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes(self.nodes[0], 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransaction(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 49900})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
blocktools.add_witness_commitment(block)
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
| [
"[email protected]"
] | |
ef44599a73be31089f46960667f7965618dd1508 | 664169ae450742292321751b4709395454333276 | /git_commands.py | 22b77e2f24ca10b03287a4d46311f4f04ee96503 | [] | no_license | KGODTAO/git_practice | fb9c715c4a3f6032a63f9ea8eeafe4843cd6ba2d | d626897a602f54c2357af365869645526e18859b | refs/heads/master | 2023-04-21T21:01:20.843181 | 2021-05-21T08:51:40 | 2021-05-21T08:51:40 | 369,470,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | git init - .git
git remote add name url
git pull origin master
git status
git add
git commit -m "comment"
git branch
git branch name branch
git checkout name branch
git push origin master
git reset filename
| [
"[email protected]"
] | |
997d0a19c362a6e5b2bc49d9edb9f33476a12db2 | deeca7b37d73f4dfe8c16df9c87e4565a2a5361c | /mod_home/home.py | 89330f80faaa8a1975634824f7f5d55f16981769 | [] | no_license | thiagosartor/Integrador_Equipe07 | f0c5fcd372e7177dac6049c43f3df86d6619c9d3 | ab14657b22047960993e4fa3747fc0ad4e5a727f | refs/heads/master | 2023-01-22T20:52:09.854970 | 2020-12-01T02:10:00 | 2020-12-01T02:10:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from flask import Blueprint, render_template
from mod_login.login import validaSessao
bp_home = Blueprint('home', __name__, url_prefix='/home', template_folder='templates')
@bp_home.route("/")
@validaSessao
def home():
return render_template("home.html") | [
"[email protected]"
] | |
edca8aa2ab4f9942f92cfd0ea06ce08206030690 | 5880e8c9b696a7021da7c7f1742349a3c93748d9 | /Proyecto 1/pruebas.py | dae7e03203d65bcbac701b4966cde89cebbd96ca | [] | no_license | antoniolj07/201900810_TareasLFP | 068bfdaab5e04e06586824d81ba674184c15452a | daa53c2cf34e2083aeb71ecb3fa37a9701df6528 | refs/heads/master | 2023-01-09T18:34:36.350331 | 2020-11-12T03:11:11 | 2020-11-12T03:11:11 | 286,088,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | objeto = {
'nombre_1': 'jorge',
'edad': '20',
'promedio-clase': '71',
'vivo': 'True'
}
keys = []
for key in objeto.keys():
keys.append(key)
print(keys)
| [
"[email protected]"
] | |
016de3502da414d0ad037a7d1341396097e20d27 | 112dbaab5ab30e91e3a86e844787dae1c25ecaae | /courses/algo_analysis_II/hw5_tsp/tsp.large.py | f3ae3ec3644275da4c0b464f11c23068e9c00672 | [] | no_license | moose-pop/learning | 9c88be2510664f3a219d374928b42ac615c3efb3 | 3ca8114f53aba86e0d03151ea22e1758c5398db1 | refs/heads/master | 2021-01-21T00:36:50.816766 | 2014-07-21T14:55:58 | 2014-07-21T14:55:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,078 | py | import os, re, sys
import numpy as np
from math import sqrt
from itertools import combinations
import operator
import gc
finder = re.compile("-?\d+\.?\d*")
def gen_name(lst):
return reduce(lambda i, j: i+str(j), sorted(lst), '')
def post_processing (half_way, old, graph, dict):
bucket = {}
for i in half_way:
this = [0]
match = gen_name(list( (set(range(len(graph))) - set(half_way[i].keys())) | set(this)))
if len(graph)%2 == 1:
distance = min(half_way[i][m] + half_way[match][n] + \
dict[m, n] for m in half_way[i] for n in half_way[match])
else:
distance = min(half_way[i][m] + old[match][n] + \
dict[m, n] for m in half_way[i] for n in old[match])
bucket[distance] = [i, match]
# return the two unoverlapped rountes with mininum summation
return min(bucket)
def tsp(graph):
# precompute the distance between nodes
dict = np.zeros((len(graph), len(graph)))
for i in range(len(graph)):
for j in range(i+1):
dict[i][j] = dict[j][i] = \
sqrt((graph[i][0]-graph[j][0])**2 + \
(graph[i][1]-graph[j][1])**2)
maximum = np.max(dict)*len(graph)*0.5
# initialize the starting array
old = {}
old['0'] = {0:0}
m = 2
while True:
sys.stdout.write("\rNow computing length: %s"%(m))
sys.stdout.flush()
gc.collect()
#current = filter(lambda x: 0 in x, combinations(range(len(graph)), m))
current_dict = {}
for s in combinations(range(len(graph)), m):
if 0 not in s: continue
cur_name = gen_name(s)
current_dict[cur_name] = {}
for j in s:
if j == 0: continue
temp = list(s)
temp.remove(j)
old_name = gen_name(temp)
current_dict[cur_name][j] = min(old[old_name][k]+dict[k,j] for k in temp if k!=j and k in old[old_name])
m += 1
if m >= len(graph)/2+2: break
old = {}
for i in current_dict:
old[i] = {}
temp = np.array(sorted(current_dict[i].iteritems(), key=operator.itemgetter(1))[:4])
for j in temp[:,0]:
old[i][j] = current_dict[i][j]
# To use dynamic programming exploring TSP, as it calculate smallest distance for N/2 vertex,
# actually, the problem has already be solved. Because the other half also has already in the
# dataset.
print "\n\n"
return post_processing(current_dict, old, graph, dict)
def main():
import sys
assert len(sys.argv)==2, "The proper input format is: ~$ python SCRIPT.py data_file start_node"
filename = sys.argv[1]
data = []
with open(os.path.join(os.path.dirname(__file__), filename)) as datafile:
num = int(datafile.readline())
for row in datafile:
data.append([float(k) for k in finder.findall(row)])
print tsp(np.array(data))
if __name__ == "__main__":
main()
#26442.730309
| [
"[email protected]"
] | |
9478cdf288dc2dc7ba08e7f766c208f02106ad85 | fe108bfc4ac5206949d8ae9f787acf2e46b78bed | /code/最长回文子串.py | e6e311d90f53da16281673ee87530a07aeb39dab | [] | no_license | Lmyxxn/JZoffer | 88dc8ea71a72e4f1b6806de4e7af3323d6e12f7e | 12b8889eda79a0af72b9326982c597e807d1ccea | refs/heads/master | 2022-04-18T06:05:19.209089 | 2020-04-13T16:00:53 | 2020-04-13T16:00:53 | 255,370,848 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | ### 中心扩散法,核心是双指针,遍历一次字符串数组,时间复杂度o(n*n),空间o(1),labuladong的算法小抄里面的,讲得很好
class Solution:
def longestPalindrome(self, s: str) -> str:
s = list(s)
n = len(s)
res = []
for i in range(n):
s1 = self.Palindrome(i, i, s)
s2 = self.Palindrome(i, i + 1, s)
if len(res) < len(s1): res = s1
if len(res) < len(s2): res = s2
return ''.join(res)
def Palindrome(self, left, right, s):
while left >= 0 and right < len(s) and s[left] == s[right]:
left -= 1
right += 1
return s[left + 1:right]
| [
"[email protected]"
] | |
7081540fb1459cd7828c1989303485a8c6803b4d | c8a3e0308789c4558019949be10269b1ef6f012e | /test/unit/test_hpe3par_hostset.py | bf8befb1b2a211b2be541eddd5261d1e2a8e3cf3 | [] | no_license | HewlettPackard/hpe3par_ansible_module | 4b828d47eef24ee365032bac9f09160394483c78 | d346db61d60c19771e9be9e190a8d408093d971c | refs/heads/master | 2023-09-04T09:18:55.518339 | 2021-08-17T10:18:56 | 2021-08-17T10:18:56 | 132,178,404 | 18 | 37 | null | 2022-03-25T11:34:00 | 2018-05-04T19:00:43 | Python | UTF-8 | Python | false | false | 22,367 | py | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation. Alternatively, at your
# choice, you may also redistribute it and/or modify it under the terms
# of the Apache License, version 2.0, available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <https://www.gnu.org/licenses/>
import mock
from Modules import hpe3par_hostset as hostset
import unittest
class TestHpe3parhostset(unittest.TestCase):
PARAMS_FOR_PRESENT = {'state': 'present', 'storage_system_username': 'USER',
'storage_system_ip': '192.168.0.1', 'storage_system_password': 'PASS',
'hostset_name': 'hostset', 'domain': 'domain', 'setmembers': 'new'}
fields = {
"state": {
"required": True,
"choices": ['present', 'absent', 'add_hosts', 'remove_hosts'],
"type": 'str'
},
"storage_system_ip": {
"required": True,
"type": "str"
},
"storage_system_username": {
"required": True,
"type": "str",
"no_log": True
},
"storage_system_password": {
"required": True,
"type": "str",
"no_log": True
},
"hostset_name": {
"required": True,
"type": "str"
},
"domain": {
"type": "str"
},
"setmembers": {
"type": "list"
}
}
@mock.patch('Modules.hpe3par_hostset.client')
@mock.patch('Modules.hpe3par_hostset.AnsibleModule')
def test_module_args(self, mock_module, mock_client):
"""
hpe3par host set - test module arguments
"""
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.return_value = mock_module
hostset.main()
mock_module.assert_called_with(
argument_spec=self.fields)
@mock.patch('Modules.hpe3par_hostset.client')
@mock.patch('Modules.hpe3par_hostset.AnsibleModule')
@mock.patch('Modules.hpe3par_hostset.create_hostset')
def test_main_exit_functionality_success_without_issue_attr_dict(self, mock_hostset, mock_module, mock_client):
"""
hpe3par hostset - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_hostset.return_value = (
True, True, "Created hostset host successfully.", {})
hostset.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Created hostset host successfully.")
# AnsibleModule.fail_json should not be called
self.assertEqual(instance.fail_json.call_count, 0)
@mock.patch('Modules.hpe3par_hostset.client')
@mock.patch('Modules.hpe3par_hostset.AnsibleModule')
@mock.patch('Modules.hpe3par_hostset.create_hostset')
def test_main_exit_functionality_success_with_issue_attr_dict(self, mock_hostset, mock_module, mock_client):
"""
hpe3par hostset - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_hostset.return_value = (
True, True, "Created hostset host successfully.", {"dummy": "dummy"})
hostset.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Created hostset host successfully.", issue={"dummy": "dummy"})
# AnsibleModule.fail_json should not be called
self.assertEqual(instance.fail_json.call_count, 0)
@mock.patch('Modules.hpe3par_hostset.client')
@mock.patch('Modules.hpe3par_hostset.AnsibleModule')
@mock.patch('Modules.hpe3par_hostset.create_hostset')
def test_main_exit_functionality_fail(self, mock_hostset, mock_module, mock_client):
"""
hpe3par hostset - exit fail check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_hostset.return_value = (
False, False, "hostset creation failed.", {"dummy": "dummy"})
hostset.main()
# AnsibleModule.exit_json should not be activated
self.assertEqual(instance.exit_json.call_count, 0)
# AnsibleModule.fail_json should be called
instance.fail_json.assert_called_with(msg='hostset creation failed.')
# Create hostset
@mock.patch('Modules.hpe3par_hostset.client')
def test_create_hostset_username_empty(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
result = hostset.create_hostset(
mock_client, None, None, None, None, None)
self.assertEqual(result, (
False,
False,
"Hostset create failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_create_hostset_hostname_empty(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
result = hostset.create_hostset(
mock_client, "user", "pass", None, None, None)
self.assertEqual(result, (
False,
False,
"Hostset create failed. Hostset name is null",
{}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_create_hostset_create_already_present(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
result = hostset.create_hostset(
mock_client, "user", "pass", "host", None, None)
self.assertEqual(result, (True, False, "Hostset already present", {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_create_hostset_create_exception_in_login(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
mock_client.HPE3ParClient.login.side_effect = Exception(
"Failed to login!")
result = hostset.create_hostset(
mock_client.HPE3ParClient, "user", "password", 'hostset_name', None, None)
self.assertEqual(
result, (False, False, "Hostset creation failed | Failed to login!", {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_create_hostset_create_sucess_login(self, mock_client):
"""
hpe3par flash cache - create a flash cache
"""
mock_client.HPE3ParClient.hostSetExists.return_value = False
result = hostset.create_hostset(
mock_client.HPE3ParClient, "user", "password", "hostname", "domain", ["member1"])
self.assertEqual(
result, (True, True, "Created Hostset hostname successfully.", {}))
# Delete hostset
@mock.patch('Modules.hpe3par_hostset.client')
def test_delete_hostset_username_empty(self, mock_client):
"""
hpe3par hostset - delete a hostset
"""
result = hostset.delete_hostset(mock_client, None, None, None)
self.assertEqual(result, (
False,
False,
"Hostset delete failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_delete_hostset_hostname_empty(self, mock_client):
"""
hpe3par hostset - delete a hostset
"""
result = hostset.delete_hostset(mock_client, "user", "pass", None)
self.assertEqual(result, (
False,
False,
"Hostset delete failed. Hostset name is null",
{}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_delete_hostset_create_already_present(self, mock_client):
"""
hpe3par hostset - delete a hostset
"""
mock_client.hostSetExists.return_value = False
mock_client.return_value = mock_client
result = hostset.delete_hostset(mock_client, "user", "pass", "host")
self.assertEqual(result, (True, False, "Hostset does not exist", {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_delete_hostset_create_exception_in_login(self, mock_client):
"""
hpe3par hostset - delete a hostset
"""
mock_client.HPE3ParClient.login.side_effect = Exception(
"Failed to login!")
result = hostset.delete_hostset(
mock_client.HPE3ParClient, "user", "password", "hostname")
self.assertEqual(
result, (False, False, "Hostset delete failed | Failed to login!", {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_delete_hostset_create_sucess_login(self, mock_client):
"""
hpe3par flash cache - create a flash cache
"""
mock_client.HPE3ParClient.hostSetExists.return_value = True
result = hostset.delete_hostset(
mock_client.HPE3ParClient, "user", "password", "hostname")
self.assertEqual(
result, (True, True, "Deleted Hostset hostname successfully.", {}))
# Add hosts to hostset.
@mock.patch('Modules.hpe3par_hostset.client')
def test_add_host_to_hostset_hostset_username_empty(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
result = hostset.add_hosts(mock_client, None, None, None, None)
self.assertEqual(result, (
False,
False,
"Add host to hostset failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_add_host_to_hostset_hostset_hostname_empty(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
result = hostset.add_hosts(mock_client, "user", "pass", None, None)
self.assertEqual(result, (
False,
False,
"Add host to hostset failed. Hostset name is null",
{}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_add_host_to_hostset_hostset_setmembers_empty(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
result = hostset.add_hosts(
mock_client, "user", "pass", "hostset", None)
self.assertEqual(result, (
False,
False,
"setmembers delete failed. Setmembers is null",
{}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_add_host_to_hostset_hostset_create_sucess_login(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
result = hostset.add_hosts(
mock_client, "user", "pass", "host", ["members"])
self.assertEqual(result, (True, True, 'Added hosts successfully.', {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_add_host_to_hostset_hostset_create_exception_in_login(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
mock_client.HPE3ParClient.login.side_effect = Exception(
"Failed to login!")
result = hostset.add_hosts(
mock_client.HPE3ParClient, "user", "password", "host", ["members"])
self.assertEqual(
result, (False, False, "Add hosts to hostset failed | Failed to login!", {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_add_host_to_hostset_hostset_doesnt_exists(self, mock_client):
"""
hpe3par flash cache - create a flash cache
"""
mock_client.HPE3ParClient.hostSetExists.return_value = False
result = hostset.add_hosts(
mock_client.HPE3ParClient, "user", "password", "hostname", ["member1"])
self.assertEqual(result, (False, False, "Hostset does not exist", {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_add_host_to_hostset_No_new_members_to_add_to_the_Host_set(self, mock_client):
"""
hpe3par flash cache - create a flash cache
"""
mock_client.HPE3ParClient.getHostSet.return_value.setmembers = [
"member1"]
result = hostset.add_hosts(
mock_client.HPE3ParClient, "user", "password", "hostname", ["member1"])
self.assertEqual(
result, (True, False, "No new members to add to the Host set hostname. Nothing to do.", {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_add_host_to_hostset_No_new_members_to_add_to_the_Host_set_login(self, mock_client):
"""
hpe3par flash cache - create a flash cache
"""
mock_client.HPE3ParClient.getHostSet.return_value.setmembers = []
result = hostset.add_hosts(
mock_client.HPE3ParClient, "user", "password", "hostname", ["member1"])
self.assertEqual(result, (True, True, 'Added hosts successfully.', {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_add_host_to_hostset_No_new_members_to_add_to_the_Host_set_login_setmembers_none(self, mock_client):
"""
hpe3par flash cache - create a flash cache
"""
mock_client.HPE3ParClient.getHostSet.return_value.setmembers = None
result = hostset.add_hosts(
mock_client.HPE3ParClient, "user", "password", "hostname", ["member1"])
self.assertEqual(result, (True, True, 'Added hosts successfully.', {}))
# Remove hosts from hostset.
@mock.patch('Modules.hpe3par_hostset.client')
def test_remove_host_from_hostset_hostset_username_empty(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
result = hostset.remove_hosts(mock_client, None, None, None, None)
self.assertEqual(result, (
False,
False,
"Remove host from hostset failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_remove_host_from_hostset_hostset_hostname_empty(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
result = hostset.remove_hosts(mock_client, "user", "pass", None, None)
self.assertEqual(result, (
False,
False,
"Remove host from hostset failed. Hostset name is null",
{}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_remove_host_from_hostset_hostset_setmembers_empty(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
result = hostset.remove_hosts(
mock_client, "user", "pass", "hostset", None)
self.assertEqual(result, (
False,
False,
"setmembers delete failed. Setmembers is null",
{}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_remove_host_from_hostset_hostset_create_sucess_login(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
mock_client.hostSetExists.return_value = True
mock_client.getHostSet.return_value.setmembers = ["members"]
mock_client.return_value = mock_client
result = hostset.remove_hosts(
mock_client, "user", "pass", "host", ["members"])
self.assertEqual(
result, (True, True, 'Removed hosts successfully.', {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_remove_host_from_hostset_hostset_create_exception_in_login(self, mock_client):
"""
hpe3par hostset - create a hostset
"""
mock_client.HPE3ParClient.login.side_effect = Exception(
"Failed to login!")
result = hostset.remove_hosts(
mock_client.HPE3ParClient, "user", "password", "host", ["members"])
self.assertEqual(
result, (False, False, "Remove hosts from hostset failed | Failed to login!", {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_remove_host_from_hostset_hostset_doesnt_exists(self, mock_client):
"""
hpe3par flash cache - create a flash cache
"""
mock_client.HPE3ParClient.hostSetExists.return_value = False
result = hostset.remove_hosts(
mock_client.HPE3ParClient, "user", "password", "hostname", ["member1"])
self.assertEqual(result, (True, False, "Hostset does not exist", {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_remove_host_from_hostset_No_new_members_to_remove_from_the_Host_set(self, mock_client):
"""
hpe3par flash cache - create a flash cache
"""
mock_client.HPE3ParClient.getHostSet.return_value.setmembers = []
result = hostset.remove_hosts(
mock_client.HPE3ParClient, "user", "password", "hostname", ["member1"])
self.assertEqual(
result, (True, False, "No members to remove from the Host set hostname. Nothing to do.", {}))
@mock.patch('Modules.hpe3par_hostset.client')
def test_remove_host_from_hostset_No_new_members_to_remove_from_the_Host_set_setmembers_none(self, mock_client):
"""
hpe3par flash cache - create a flash cache
"""
mock_client.HPE3ParClient.getHostSet.return_value.setmembers = None
result = hostset.remove_hosts(
mock_client.HPE3ParClient, "user", "password", "hostname", ["member1"])
self.assertEqual(
result, (True, True, 'Removed hosts successfully.', {}))
@mock.patch('Modules.hpe3par_hostset.client')
@mock.patch('Modules.hpe3par_hostset.AnsibleModule')
@mock.patch('Modules.hpe3par_hostset.create_hostset')
def test_main_exit_functionality_success_without_issue_attr_dict_present(self, mock_hostset, mock_module, mock_client):
"""
hpe3par hostset - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "present"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_hostset.return_value = (
True, True, "Created hostset host successfully.", {})
hostset.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Created hostset host successfully.")
# AnsibleModule.fail_json should not be called
self.assertEqual(instance.fail_json.call_count, 0)
@mock.patch('Modules.hpe3par_hostset.client')
@mock.patch('Modules.hpe3par_hostset.AnsibleModule')
@mock.patch('Modules.hpe3par_hostset.delete_hostset')
def test_main_exit_functionality_success_without_issue_attr_dict_present(self, mock_hostset, mock_module, mock_client):
"""
hpe3par hostset - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "absent"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_hostset.return_value = (
True, True, "Deleted hostset host successfully.", {})
hostset.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Deleted hostset host successfully.")
# AnsibleModule.fail_json should not be called
self.assertEqual(instance.fail_json.call_count, 0)
@mock.patch('Modules.hpe3par_hostset.client')
@mock.patch('Modules.hpe3par_hostset.AnsibleModule')
@mock.patch('Modules.hpe3par_hostset.add_hosts')
def test_main_exit_functionality_success_without_issue_attr_dict_add_hosts(self, mock_hostset, mock_module, mock_client):
"""
hpe3par hostset - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "add_hosts"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_hostset.return_value = (
True, True, "add_hosts hostset host successfully.", {})
hostset.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="add_hosts hostset host successfully.")
# AnsibleModule.fail_json should not be called
self.assertEqual(instance.fail_json.call_count, 0)
@mock.patch('Modules.hpe3par_hostset.client')
@mock.patch('Modules.hpe3par_hostset.AnsibleModule')
@mock.patch('Modules.hpe3par_hostset.remove_hosts')
def test_main_exit_functionality_success_without_issue_attr_dict_remove_hosts(self, mock_hostset, mock_module, mock_client):
"""
hpe3par hostset - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "remove_hosts"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_hostset.return_value = (
True, True, "remove_hosts hostset host successfully.", {})
hostset.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="remove_hosts hostset host successfully.")
# AnsibleModule.fail_json should not be called
self.assertEqual(instance.fail_json.call_count, 0)
if __name__ == '__main__':
unittest.main(exit=False)
| [
"[email protected]"
] | |
91df59fc8d8534dc32f43a9d23f76c9ed78d2e4d | da3ab1d78408ca1d5de37fb7d230b5c507d853b5 | /kevin/PointsInTumor.py | 258315205c48103ccdbcc7c1fdc8be095ce832fc | [] | no_license | kz26/petpeeve | 7f953324edbbffdb046118f295a4b35cc03be0db | 04357f526ec362a2ea04fe4ec8d209998a856018 | refs/heads/master | 2021-01-10T08:05:30.429735 | 2011-08-16T21:59:32 | 2011-08-16T21:59:32 | 48,551,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | #!/usr/bin/python
# Kevin Zhang
# 07/12/2011
# Reads in a text file of coordinate locations and calulates how many are true positives using a mask
# Arguments: coordinatesFile dcm_mask_dir
import os, sys, dicom, re
if len(sys.argv) != 3:
print "Usage: %s coordinatesFile dcm_mask_dir" % (sys.argv[0])
sys.exit(-1)
hits = 0
lines = 0
linepat = re.compile("[0-9]+")
f = open(sys.argv[1], 'r')
for line in f:
if not linepat.match(line.rstrip()): continue
l = line.rstrip().split(" ")
x = int(round(float(l[0])))
y = int(round(float(l[1])))
z = "%04d" % (int(round(float(l[2]))))
mfp = os.path.join(sys.argv[2], "Img001_%s" % (z))
if not os.path.exists(mfp):
print "WARNING: %s not found, skipping" % (mfp)
continue
ds = dicom.read_file(mfp)
if ds.pixel_array[x][y] != 0:
print line,
hits += 1
lines += 1
print "\n# of TPs: %s" % (hits)
print "# of FPs: %s" % (lines - hits)
print "Total # of points: %s" % (lines)
| [
"[email protected]"
] | |
853be31b8f040ea8e1a3ee8140e4a31ba13b66ab | 7a42d40a351824464a3c78dc0c3e78bbd8e0a92f | /derek_blog/django_comments/views/comments.py | 385d97dbeca6f2d9f35dacb2707ad16c96e412d4 | [] | no_license | AhMay/DerekBlogLearn | 6595063eafbc237b932e187b5cb3ad8ff32637fc | fdd5ea2fc5732cdc82ad006f7be0a2a1f30d0ba9 | refs/heads/master | 2020-07-09T05:20:33.283672 | 2019-09-29T10:10:23 | 2019-09-29T10:10:23 | 203,891,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,628 | py | from __future__ import absolute_import
from django import http
from django.apps import apps
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.utils.html import escape
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.http import require_POST
import django_comments
from django_comments import signals
from django_comments.views.utils import next_redirect, confirmation_view
class CommentPostBadRequest(http.HttpResponseBadRequest):
"""
Response returned when a comment post is invalid. If ``DEBUG`` is on a
nice-ish error message will be displayed (for debugging purposes), but in
production mode a simple opaque 400 page will be displayed.
"""
def __init__(self, why):
super(CommentPostBadRequest, self).__init__()
if settings.DEBUG:
self.content = render_to_string("comments/400-debug.html", {"why": why})
@csrf_protect
@require_POST
def post_comment(request, next=None, using=None):
"""
Post a comment.
HTTP POST is required. If ``POST['submit'] == "preview"`` or if there are
errors a preview template, ``comments/preview.html``, will be rendered.
"""
# Fill out some initial data fields from an authenticated user, if present
data = request.POST.copy()
if request.user.is_authenticated:
if not data.get('name', ''):
data["name"] = request.user.get_full_name() or request.user.get_username()
if not data.get('email', ''):
data["email"] = request.user.email
#May 添加
if not request.session.get('login',None) and not request.user.is_authenticated:
return redirect("/")
# Look up the object we're trying to comment about
ctype = data.get("content_type")
object_pk = data.get("object_pk")
if ctype is None or object_pk is None:
return CommentPostBadRequest("Missing content_type or object_pk field.")
try:
model = apps.get_model(*ctype.split(".", 1))
target = model._default_manager.using(using).get(pk=object_pk)
except TypeError:
return CommentPostBadRequest(
"Invalid content_type value: %r" % escape(ctype))
except AttributeError:
return CommentPostBadRequest(
"The given content-type %r does not resolve to a valid model." % escape(ctype))
except ObjectDoesNotExist:
return CommentPostBadRequest(
"No object matching content-type %r and object PK %r exists." % (
escape(ctype), escape(object_pk)))
except (ValueError, ValidationError) as e:
return CommentPostBadRequest(
"Attempting go get content-type %r and object PK %r exists raised %s" % (
escape(ctype), escape(object_pk), e.__class__.__name__))
# Do we want to preview the comment?
preview = "preview" in data
# Construct the comment form
form = django_comments.get_form()(target, data=data)
# Check security information
if form.security_errors():
return CommentPostBadRequest(
"The comment form failed security verification: %s" % escape(str(form.security_errors())))
# If there are errors or if we requested a preview show the comment
if form.errors or preview:
template_list = [
# These first two exist for purely historical reasons.
# Django v1.0 and v1.1 allowed the underscore format for
# preview templates, so we have to preserve that format.
"comments/%s_%s_preview.html" % (model._meta.app_label, model._meta.model_name),
"comments/%s_preview.html" % model._meta.app_label,
# Now the usual directory based template hierarchy.
"comments/%s/%s/preview.html" % (model._meta.app_label, model._meta.model_name),
"comments/%s/preview.html" % model._meta.app_label,
"comments/preview.html",
]
return render(request, template_list, {
"comment": form.data.get("comment", ""),
"form": form,
"next": data.get("next", next),
},
)
# Otherwise create the comment
comment = form.get_comment_object(site_id=get_current_site(request).id)
comment.ip_address = request.META.get("REMOTE_ADDR", None) or None
# if request.user.is_authenticated:
# comment.user = request.user
#May 添加
if request.session.get('login',None):
comment.user_name = request.session['screen_name']
comment.user_img = request.session['profile_image_url']
# Signal that the comment is about to be saved
responses = signals.comment_will_be_posted.send(
sender=comment.__class__,
comment=comment,
request=request
)
for (receiver, response) in responses:
if response is False:
return CommentPostBadRequest(
"comment_will_be_posted receiver %r killed the comment" % receiver.__name__)
# Save the comment and signal that it was saved
comment.save()
signals.comment_was_posted.send(
sender=comment.__class__,
comment=comment,
request=request
)
return next_redirect(request, fallback=next or 'comments-comment-done',
c=comment._get_pk_val())
comment_done = confirmation_view(
template="comments/posted.html",
doc="""Display a "comment was posted" success page."""
)
| [
"[email protected]"
] | |
93e49c0e51acf7546169651dc2c3db6f974afae5 | f9b5afbbdeb62b06ad40a6b7497172adfc8e5377 | /LockingSimultaneousAccess/library/get_epoch.py | f975bb2119c9d277f9d1b3f16842e1e4c49ee31c | [
"MIT"
] | permissive | david-vaczi/DeveloperRecipes | 1987fed58aec350270c8d23afb53f47260d285e9 | b334214fd1e66478c1be26d881fb6a4247a3c35f | refs/heads/master | 2023-09-04T03:38:49.996511 | 2021-10-26T12:52:33 | 2021-10-26T12:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!/usr/bin/python3
DOCUMENTATION = '''
---
module: get_epoch
short_description: Get the epoch
'''
RETURN = '''
epoch:
description: Epoch
type: int
returned: always
'''
import time
from ansible.module_utils.basic import AnsibleModule
def main():
fields = {
}
module = AnsibleModule(argument_spec=fields)
result=int(time.time())
module.exit_json(changed=True, epoch=result)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ad474fb9e014b5f5901e785c5b93ded8bc42856f | 68ccd5a88879d6b1d276a1150361d08b00863bf1 | /lab01/task.py | 654cbb2686d400f0656b83477a706a36af3cffbb | [] | no_license | nikitagolnyak/ML | 2ea2803b9092e9638d554e306d957970d363c622 | 5009c0d1da480c2ce460c3816d79d3530940c344 | refs/heads/master | 2022-04-19T23:34:18.722212 | 2020-04-10T16:22:32 | 2020-04-10T16:22:32 | 254,682,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,575 | py | import pandas as pd
from sklearn.model_selection import LeaveOneOut
from lab01.normal import *
from lab01.one_hot import *
kernels = ["uniform", "gaussian", "triangular",
"epanechnikov", "quartic", "triweight",
"tricube", "cosine", "sigmoid", "logistic"]
distances = ["manhattan", "euclidean", "chebyshev"]
class Combination:
def __init__(self, distance, kernel, measure, h):
self.distance = distance
self.kernel = kernel
self.measure = measure
self.k = h
def __str__(self):
return self.distance + ' ' + self.kernel + ' ' \
+ str(self.measure) + " h " + str(self.k)
def a_h(X, Y, q, kernel, distance, h):
numerator = 0
denominator = 0
if h == 0:
h = 0.0001
size = len(X)
# Y = Y.flatten()
q = q.flatten()
for i in range(size):
numerator = numerator + Y[i] * kernel(distance(q, X[i].flatten()) / h)
denominator = denominator + kernel(distance(q, X[i].flatten()) / h)
if denominator == 0:
alpha = 0
return alpha
else:
alpha = numerator / denominator
return alpha
def a_k(X, Y, q, kernel, distance, neighbours):
numerator = 0
denominator = 0
for i in range(len(X)):
numerator = numerator + Y[i] * kernel(distance(q, X[i]) / neighbours)
denominator = denominator + kernel(distance(q, X[i]) / neighbours)
if denominator == 0:
alpha = 0
return alpha
else:
alpha = numerator / denominator
return alpha
def normalize(df):
result = df.copy()
for feature_name in df.columns:
max_value = df[feature_name].max()
min_value = df[feature_name].min()
result[feature_name] = (df[feature_name] - min_value) / (max_value - min_value)
return result
def f_measure(matrix):
n_class = len(matrix)
t = []
c = [0] * n_class
p = []
for i in range(n_class):
arr = matrix[i]
cur_sum = 0
for j in range(n_class):
cur = int(arr[j])
cur_sum += cur
if i == j:
t.append(cur)
if j == n_class - 1:
p.append(cur_sum)
c[j] += cur
precs = 0
all = sum(p)
micro = 0
for i in range(n_class):
if p[i] == 0:
recall = 0
else:
recall = t[i] / p[i]
precs += (t[i] * c[i]) / p[i]
if c[i] == 0:
precision = 0
else:
precision = t[i] / c[i]
if recall + precision == 0:
fc = 0
else:
fc = (2.0 * recall * precision) / (recall + precision)
micro += (c[i] * fc) / all
w_recall = sum(t) / all
w_prec = precs / all
return 2.0 * (w_prec * w_recall) / (w_prec + w_recall)
if __name__ == '__main__':
print("Use one hot?")
ans = str(input())
df = pd.read_csv('data.csv', sep=',')
loo = LeaveOneOut()
if ans == "y":
loo.get_n_splits(df)
one_hot = pd.get_dummies(df['class'])
df = df.drop('class', axis=1)
df = df.join(one_hot)
df = normalize(df).to_numpy()
X, y = np.split(df, [-3], axis=1)
use_a_k_one_hot(loo, df, X, y)
# use_a_h_one_hot(loo, df, X, y)
else:
# df['class'] = pd.Categorical(df['class']).codes
df = normalize(df).to_numpy()
X, y = np.split(df, [-1], axis=1)
print(y)
labels = np.unique(y, axis=1)
use_a_k_normal(loo, df, X, y, labels)
# use_a_h_normal(loo, df, X, y, labels)
| [
"[email protected]"
] | |
05b6dfaf90a2830649b9dfb154d8db9de0010111 | e2a5912d65e65a1d1e30eb5163ec65d6da34e28d | /src/tf_nn/tf_utils_fr.py | e3c9c450e53079ef92ddad4c8a3ac57d2ab2bb53 | [] | no_license | zach-dev/face_classification | 57ec9414493d1ac2fe4910dd78ae6e67ac60c0de | 4f4e8575fcaee589a88240d350c1fb053e495fbe | refs/heads/master | 2020-03-08T23:51:32.435393 | 2018-04-09T01:57:01 | 2018-04-09T01:57:01 | 128,474,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,098 | py | import h5py
import numpy as np
import tensorflow as tf
import math
def load_dataset():
train_dataset = h5py.File('/Users/zwilson/school/coursera/deep_learning/code/tensorflow/data/train_signs.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('/Users/zwilson/school/coursera/deep_learning/code/tensorflow/data/test_signs.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def create_placeholders(n_x, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)
n_y -- scalar, number of classes (from 0 to 5, so -> 6)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype float
Y -- placeholder for the input labels, of shape [n_y, None] and dtype float
notes:
- use None because it allows flexibility on the number of examples used for the placeholders
for example, the number of examples during test/train is different
"""
X = tf.placeholder(dtype="float", shape=[n_x, None], name='X')
Y = tf.placeholder(dtype="float", shape=[n_y, None], name='Y')
return X, Y
def initialize_parameters():
"""
initializes parameters to build a neural network with tensorflow - the shapes are:
W1 : [64, 128]
b1 : [64, 1]
W2 : [32, 64]
b2 : [32, 1]
W3 : [16, 32]
b3 : [16, 1]
W4 : [12, 16]
b4 : [12, 1]
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
"""
tf.set_random_seed(1)
W1 = tf.get_variable("W1", [64, 128], initializer=tf.contrib.layers.xavier_initializer(seed=1))
b1 = tf.get_variable("b1", [64, 1], initializer=tf.zeros_initializer())
W2 = tf.get_variable("W2", [32, 64], initializer=tf.contrib.layers.xavier_initializer(seed=1))
b2 = tf.get_variable("b2", [32, 1], initializer=tf.zeros_initializer())
W3 = tf.get_variable("W3", [16, 32], initializer=tf.contrib.layers.xavier_initializer(seed=1))
b3 = tf.get_variable("b3", [16, 1], initializer=tf.zeros_initializer())
W4 = tf.get_variable("W4", [12, 16], initializer=tf.contrib.layers.xavier_initializer(seed=1))
b4 = tf.get_variable("b4", [12, 1], initializer=tf.zeros_initializer())
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3,
"W4": W4,
"b4": b4}
return parameters
def forward_propagation(X, parameters):
"""
implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2, b2, W3, b3
the shapes are given in initialize_parameters
returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
W4 = parameters['W4']
b4 = parameters['b4']
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2
# A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
A2_pre_do = tf.nn.relu(Z2) # A2 = relu(Z2)
# Dropout on hidden layer: RELU layer
A2 = tf.nn.dropout(A2_pre_do, keep_prob=0.73, seed=42)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z2 = np.dot(W2, a1) + b2
A3 = tf.nn.relu(Z3) # A2 = relu(Z2)
# A3_pre_do = tf.nn.relu(Z3) # A2 = relu(Z2)
# A3 = tf.nn.dropout(A3_pre_do, keep_prob=0.95, seed=43)
Z4 = tf.add(tf.matmul(W4, A3), b4) # Z3 = np.dot(W3,Z2) + b3
return Z4
def compute_cost(Z4, Y):
"""
computes the cost
arguments:
Z4 -- output of forward propagation (output of the last LINEAR unit), of shape (12, number of examples)
Y -- "true" labels vector placeholder, same shape as Z4
returns:
cost - Tensor of the cost function
"""
# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)
logits = tf.transpose(Z4)
labels = tf.transpose(Y)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z4, labels=Y))
return cost
def random_mini_batches(X, Y, mini_batch_size=64, seed=0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
mini_batch_size - size of the mini-batches, integer
seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
m = X.shape[1] # number of training examples
mini_batches = []
np.random.seed(seed)
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((Y.shape[0], m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(
m / mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k * mini_batch_size: k * mini_batch_size + mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size: k * mini_batch_size + mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size: m]
mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size: m]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def convert_to_one_hot(Y, C):
Y = np.eye(C)[Y.reshape(-1)].T
return Y
def predict(X, parameters):
W1 = tf.convert_to_tensor(parameters["W1"])
b1 = tf.convert_to_tensor(parameters["b1"])
W2 = tf.convert_to_tensor(parameters["W2"])
b2 = tf.convert_to_tensor(parameters["b2"])
W3 = tf.convert_to_tensor(parameters["W3"])
b3 = tf.convert_to_tensor(parameters["b3"])
W4 = tf.convert_to_tensor(parameters["W4"])
b4 = tf.convert_to_tensor(parameters["b4"])
params = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3,
"W4": W4,
"b4": b4}
x = tf.placeholder("float", [128, 1])
z4 = forward_propagation_for_predict(x, params)
p = tf.argmax(z4)
sess = tf.Session()
prediction = sess.run(p, feed_dict={x: X})
return prediction
def forward_propagation_for_predict(X, parameters):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
W4 = parameters['W4']
b4 = parameters['b4']
# Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z2 = np.dot(W2, a1) + b2
A3 = tf.nn.relu(Z3) # A3 = relu(Z3)
Z4 = tf.add(tf.matmul(W4, A3), b4) # Z3 = np.dot(W4,Z3) + b4
return Z4
| [
"[email protected]"
] | |
287169d13572d17d23c6c809bbbffa7cb35f30a5 | fca4d4398490b596158092407fc754de6bb1d836 | /sim.py | 7654ba9ea4e9c18777bebed68b9ba8f0934d1d1d | [] | no_license | Arash67/SimPyInt | 8b021bc210c6eefc8e54c947f5d2e489633859f4 | c05d1a9580edb83dd3b006115acdb8a146f14fb1 | refs/heads/main | 2023-08-18T21:19:14.813904 | 2021-10-08T02:49:18 | 2021-10-08T02:49:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,826 | py | #!/usr/bin/env python3
# this code is developed to tune BC parameters to match target in the output using SimVascular 1D solver
import os
from pathlib import Path
import sv
import sys
import vtk
# A: ================================================ CODE INPUTS ============================================
# A1: python script directory
script_dir = os.path.realpath('/Desktop/SimVasSim')
# A2: input files directory
# input_dir = os.path.join(script_dir,'input')
input_dir = script_dir
print(input_dir)
# A3: output file directory
# output_dir = os.path.join(script_dir,'output')
output_dir = script_dir
print(output_dir)
# A4: inlet/outlet face names
inlet_face_name_AA = 'cap_S01_AA'
outlet_face_name_RS = 'cap_S02_RS'
outlet_face_name_RC = 'cap_S03_RC'
outlet_face_name_LC = 'cap_S04_LC'
outlet_face_name_LS = 'cap_S05_LS'
outlet_face_name_DA = 'cap_S01_AA'
# A5: other names
mdlname = "Control"
flowwf_file_name = 'inflow.flow'
# B: ========================================== CREATE ROM SIMULATION =========================================
rom_simulation = sv.simulation.ROM()
# C: ===================================== CREATE ROM SIMULATION PARAMETERS ===================================
params = sv.simulation.ROMParameters()
# D: ============================================= MESH PROPERTIES ===========================================
mesh_params = params.MeshParameters()
# E ======================================== CREATE MODEL PARAMETERS =========================================
model_params = params.ModelParameters()
model_params.name = mdlname
model_params.inlet_face_names = [inlet_face_name_AA ]
model_params.outlet_face_names = [outlet_face_name_RS, outlet_face_name_RC, outlet_face_name_LC, outlet_face_name_LS, outlet_face_name_DA]
model_params.centerlines_file_name = os.path.join(input_dir, 'centerlines.vtp')
# F: ============================================ FLUID PROPERTIES ============================================
fluid_props = params.FluidProperties()
# G: ============================================= WALL PROPERTIES ============================================
print("Set wall properties ...")
material = params.WallProperties.OlufsenMaterial()
print("Material model: {0:s}".format(str(material)))
# H: ========================================== SET BOUNDARY CONDITION ========================================
bcs = params.BoundaryConditions()
# H1: inlet flow wave form
bcs.add_velocities(face_name=inlet_face_name_AA, file_name=os.path.join(input_dir,flowwf_file_name))
# H2: outlet RCRs
# H2a: arch branches
bcs.add_rcr(face_name=outlet_face_name_RS, Rp=4498.42, C=2.66239e-06, Rd=85470)
bcs.add_rcr(face_name=outlet_face_name_RC, Rp=4398.69, C=2.72275e-06, Rd=83575.2)
bcs.add_rcr(face_name=outlet_face_name_LC, Rp=4373.94, C=2.73816e-06, Rd=83104.8)
bcs.add_rcr(face_name=outlet_face_name_LS, Rp=4299.51, C=2.78556e-06, Rd=81690.7)
# H2b: descending aorta
bcs.add_rcr(face_name=outlet_face_name_DA, Rp=1517.72, C=7.89114e-06, Rd=28836.7)
# I: ========================================== SET SOLUTION PARAMETERS ======================================
solution_params = params.Solution()
solution_params.time_step = 0.0005
solution_params.num_time_steps = 660
# J: ======================================== WRITE SOLVER INPUT FILES =======================================
#script_dir = '/Desktop/SimVascular/GeneralizedCoAmodel/PyInterface/Example01/simulation.py'
#script_path = Path(os.path.realpath(script_dir)).parent
#output_dir = str(script_path / 'output')
rom_simulation.write_input_file(model_order=1, model=model_params, mesh=mesh_params, fluid=fluid_props, material=material, boundary_conditions=bcs, solution=solution_params, directory=output_dir)
| [
"[email protected]"
] | |
58bad32110dab6b2ff793befbea5142a61977784 | 70a50249cc740ecd7f2df0852f9fa42e1695d3ed | /src/app/views/login.py | fb68f173c5ff7986f4abc6cec5589535fe30680f | [] | no_license | Everaert-K/SecureApplication | b93283560ddad0e3708348ae27263ce4501462ca | 976427adff72f481e18eb2bba68b27eb3ea7ce17 | refs/heads/master | 2022-12-18T04:50:20.269215 | 2020-09-18T12:13:18 | 2020-09-18T12:13:18 | 296,605,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,157 | py | import web
from views.forms import login_form
import models.user, models.lockout
from views.forms import changepassword_form
from views.utils import get_nav_bar
import os, hmac, base64 #, pickle
import json
import hashlib
import logging
import bcrypt
import struct, time
from views.login_utils import *
import views.login_state as ls
# Get html templates
render = web.template.render('templates/')
# Log config
logging.basicConfig(filename='beelance.log',level=logging.INFO, format='%(asctime)s %(message)s')
class Login():
# Get the server secret to perform signatures
secret = web.config.get('session_parameters')['secret_key']
def GET(self):
"""
Show the login page
:return: The login page showing other users if logged in
"""
session = web.ctx.session
nav = get_nav_bar(session)
# Log the user in if the rememberme cookie is set and valid
self.check_rememberme()
return render.login(nav, login_form, "")
def POST(self):
"""
Log in to the web application and register the session
:return: The login page showing other users if logged in
"""
session = web.ctx.session
nav = get_nav_bar(session)
data = web.input(username="", password="", remember=False, twoFactor="")
app = ls.AppData(
session,
nav,
data,
render
)
if ls.tryLogin(app):
self.login(
data.username,
models.user.get_user_id_by_name(data.username),
data.remember
)
raise web.seeother("/")
else:
return app.return_value
def login(self, username, userid, remember):
"""
Log in to the application
"""
session = web.ctx.session
session.username = username
session.userid = userid
logging.info("LOGIN success: %s | True | %s" % (session.username, session.ip))
if remember:
rememberme = self.rememberme()
# web.setcookie('remember', rememberme , 600, None, True)
web.setcookie('remember', rememberme, 600)
def check_rememberme(self):
"""
Validate the rememberme cookie and log in
"""
username = ""
sign = ""
# If the user selected 'remember me' they log in automatically
try:
# Fetch the users cookies if it exists
cookies = web.cookies()
# Fetch the remember cookie and convert from string to bytes
remember_hash = bytes(cookies.remember[2:][:-1], 'ascii')
# Decode the hash
decode = base64.b64decode(remember_hash)
# Load the decoded hash to receive the host signature and the username
# username, sign = pickle.loads(decode)
username, sign = json.load(decode)
except AttributeError as e:
# The user did not have the stored remember me cookie
pass
# If the users signed cookie matches the host signature then log in
if self.sign_username(username) == sign:
userid = models.user.get_user_id_by_name(username)
self.login(username, userid, False)
def rememberme(self):
"""
Encode a base64 object consisting of the username signed with the
host secret key and the username. Can be reassembled with the
hosts secret key to validate user.
:return: base64 object consisting of signed username and username
"""
session = web.ctx.session
creds = [ session.username, self.sign_username(session.username) ]
# return base64.b64encode(pickle.dumps(creds))
return json.dumps(creds)
@classmethod
def sign_username(self, username):
"""
Sign the current users name with the hosts secret key
:return: The users signed name
"""
secret = base64.b64decode(self.secret)
return hmac.HMAC(secret, username.encode('ascii')).hexdigest()
| [
"[email protected]"
] | |
893322f8686b41ec344a6a46eeb4d4594104f94d | 3989a0e658f79aef096fb2ccdaf546104a09b45b | /post/forms.py | a4261b4156e172b6e1c49de7dfef785ef64fad4b | [] | no_license | chanathip2323/project1 | 8f5bb7c541741ee7989f179740ea1fb1c15dad6f | 9808aecee1b6e60aa8ca3fc03e444183c9a880af | refs/heads/main | 2023-05-13T21:54:05.839045 | 2021-06-04T09:06:06 | 2021-06-04T09:06:06 | 373,782,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields =(
"title",
"description",) | [
"[email protected]"
] | |
973db55e5433a85caa05a78824d960ca8e1087df | 29ad25d1f7181eb3e321783b441e84fce877e79f | /others/recipt.py | f1072c0d72d3f8f0d767fa2596e57d37714a91f7 | [] | no_license | sacrrie/reviewPractices | 376a5f7106d1319b36f663127808dd19bd2bc486 | 946599119f5d9e0b00bc49b81767536766d5c46c | refs/heads/master | 2021-07-15T23:05:33.727771 | 2020-06-21T04:29:50 | 2020-06-21T04:29:50 | 159,460,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,569 | py | import sys
def main(lines):
# このコードは標準入力と標準出力を用いたサンプルコードです。
# このコードは好きなように編集・削除してもらって構いません。
# ---
# This is a sample code to use stdin and stdout.
# Edit and remove this code as you like.
for i, v in enumerate(lines):
print("line[{0}]: {1}".format(i, v))
#function to process serial numbers before discounting
class register:
def __init__(self):
self.serial=[]
self.prices={}
self.point_card=set()
self.points={}
self.bucket=[]
self.card=False
self.user=None
def init(self,info):
for i in info:
if len(i)>1:
self.serial.append(i[0])
self.prices[i[0]]=i[1]
else:
self.serial.append(i[0])
def checksum(self,barcode):
if int(barcode)%10 != 0:
return False
return True
def check_inventory(self,barcode):
if barcode[:2]=='02':
if barcode[2:7] not in self.serial:
return False
return True
else:
if barcode[0:12] not in self.serial:
return False
return True
def original_price(self, barcode):
if barcode[:2]=='02':
price=int(barcode[7:12])
else:
price=int(self.price[barcode[:12]])
return price
def discount_price(self,barcode):
if len(barcode)==15:
ratio=100-int(barcode[12:14])
if ratio<0:
return False
else:
return((ratio/100.0)*self.original_price(barcode))
else:
original=self.original_price(barcode)
discount=int(barcode[12:17])
if discount>original:
return False
else:
return(original-discount)
def error_check(self,barcode):
res=[]
if (len(barcode) in (15,18)) and (self.check_inventory(barcode)) and (self.checksum(barcode)):
if (not self.discount_price(barcode)):
res.append('1')
if (len(barcode) not in (13,15,18)) or (not self.checksum(barcode)) or (not self.check_inventory(barcode)):
res.append('2')
return res
def scan(self,barcode):
err=self.error_check(barcode)
if len(err)>0:
print("staff call: "," ".join(err))
return False
if len(barcode)==13:
self.bucket.append(self.original_price(barcode))
else:
self.bucket.append(self.discount_price(barcode))
def card(self,barcode):
if len(barcode)>1:
self.card=True
self.user=barcode[1]
if barcode[1] not in self.point_card:
self.point_card.add(barcode[1])
self.points[barcode[1]]=0
def final(self):
if self.card:
pts=self.points[self.user]
payments=sum(self.bucket)-pts
if payments<0:
self.points[self.user]=abs(payments)
payments=0
else:
self.points[self.user]=0
self.points[self.user]+=payments//100
self.bucket=[]
self.card=False
self.user=None
return payments
if __name__ == '__main__':
lines = []
for l in sys.stdin:
t=l.rstrip('\r\n')
t=t.split()
#t=[int(x) for x in t]
lines.append(t)
main(lines)
| [
"[email protected]"
] | |
c58e66749d82650ff0a65697ea82f73272d3f245 | 09242cfafa604c96dfd2626bae84ec2b408c6bb8 | /programming/accounts/views.py | df1aa891ccdf2a7052ccad66ef253887e4fc237b | [] | no_license | yooncobra/programming_commits | aa6b49add508c9a3207612d1bb1b139b10a49ba9 | b34ac7349986f8d2aef505cf024a21f4902db4f7 | refs/heads/master | 2021-01-10T01:08:58.114177 | 2016-03-11T08:08:48 | 2016-03-11T08:08:48 | 52,246,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,477 | py | from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model, get_backends, authenticate, login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.tokens import default_token_generator as token_generator
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
from accounts.forms import SignupForm, SignupForm2
from accounts.forms import send_signup_confirm_email
def signup(request):
if request.method == 'POST':
form = SignupForm2(request.POST)
if form.is_valid():
user = form.save()
# backend_cls = get_backends()[0].__class__
# backend_path = backend_cls.__module__ + '.' + backend_cls.__name__
# https://github.com/django/django/blob/1,9/django/contrib/auth/__init__.py#L81
# user.backend = backend_path
authenticated_user = authenticate(
username=form.cleaned_data['username'],
password=form.cleaned_data['password1'])
auth_login(request, authenticated_user)
messages.info(request, '환영합니다.')
return redirect(settings.LOGIN_REDIRECT_URL)
# 회원가입 시에 이메일 승인
# user = form.save(commit=False)
# user.is_active = False
# user.save()
# send_signup_confirm_email(request, user)
# return redirect(settings.LOGIN_URL)
else:
form = SignupForm2()
return render(request, 'accounts/signup.html', {
'form': form,
})
def signup_confirm(request, uidb64, token):
User = get_user_model()
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user and token_generator.check_token(user, token):
user.is_active = True
user.save()
messages.info(request, '이메일을 인증했습니다. 로그인 해주세요.')
return redirect(settings.LOGIN_URL)
else:
messages.error(request, '잘못된 경로로 접근하셨습니다.')
return redirect(settings.LOGINS_URL)
@login_required
def profile(request):
return render(request, 'accounts/profile.html')
| [
"[email protected]"
] | |
1de5122b10d1b9578093b53c9039e4eba2eb920b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_55/825.py | 62b52bb263ab2108fe53731c38ca67585cafd57e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # roller coaster
import psyco
psyco.full()
# open files
fIn = open("input.txt")
fOut = open("output.txt", "w")
# get count
count = int(fIn.readline())
# do each line
for i in range(count):
(rollerRuns, Kapacity, NumberOfPeople) = [int(x) for x in fIn.readline().split(' ')]
peopleQueue = [int(x) for x in fIn.readline().split(' ')]
currentPos = 0
runCount = 0
moneyMade = 0
print rollerRuns, Kapacity, NumberOfPeople
while runCount < rollerRuns:
qcount = 0
seated = 0
runCount = runCount + 1
while qcount < len(peopleQueue) and seated + peopleQueue[currentPos] <= Kapacity:
seated = seated + peopleQueue[currentPos]
currentPos = (currentPos+1) % len(peopleQueue)
qcount = qcount + 1
moneyMade = moneyMade + seated
print seated, "!"
fOut.write("Case #{0}: {1}\n".format(i+1, moneyMade))
fOut.flush()
| [
"[email protected]"
] | |
c99a21b5f57c4169802b35840986dcb3c904c7a6 | 358588a49140d630089c6e9d3f6cc5edcbbb5377 | /chapter11/q5.py | 6c80dab0b17909f77c424d6dc9cd372d81709fc4 | [] | no_license | gzdshn/ctci-python | 8c2583c50cb3987ecef6c9509fb67d3b78914d94 | 69bc48e8c8db21d36d3ea0c8c88a094f268e367b | refs/heads/master | 2020-03-19T06:44:17.238168 | 2018-08-09T18:04:14 | 2018-08-09T18:04:14 | 136,050,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | ## CTCI - Chapter 11 - Sorting and Searching Question 5
## Find element in array of strings with empty strings
def search(A,val,start,end):
if start>end:
return -1
## Find a midpoint that is not empty
mid = (start+end)/2
if A[mid] == "":
midleft = mid - 1
midright = mid + 1
while midleft>=start and midright<=end:
if A[midright] != "":
mid = midright
break
elif A[midleft] != "":
mid = midleft
break
midright += 1
midleft -= 1
if midleft<start and midright>end:
return -1
## Binary Search with new midpoint
if A[mid]==val:
return mid
elif val<A[mid]:
return search(A,val,start,mid-1)
else:
return search(A,val,mid+1,end)
'''
A = ["at","","","","ball","","","car","","","dad","",""]
print search(A,"ball",0,len(A)-1)
''' | [
"[email protected]"
] | |
543ac6eec777a19d95246b4da21290dee7a577ac | 3555deb5ac4a709a5f12bc3a8d0d92d7cb795431 | /linear_program.py | d9072fe10c7119cff7efad96226a786224c8caa8 | [] | no_license | miffyrcee/py | b5e941cfb1b3b1dc94436fe7b11e43c29de86993 | 57d7d6ead1394c5e80dedc73f2dc021a62c5b24c | refs/heads/master | 2020-05-21T10:40:03.416893 | 2019-12-02T00:57:20 | 2019-12-02T00:57:20 | 186,017,721 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | import numpy as np
a = np.array((
[1, -1, -1, -1, -1],
[0, -2, 8, 0, 10],
[0, 5, 2, 0, 0],
[0, 3, -5, 10, 2],
[0, 1, 1, 1, 1],
))
b = np.array(([1000, 50, 100, 25, 0]))
print(np.linalg.solve(a, b))
| [
"[email protected]"
] | |
54b5f5b70b4a6bc0573707d8953d1b52cdebaa06 | 0d5e8c05aca9e829425ef005c158c4c86f1c854b | /codes/Log_Kmeans.py | 605b1a58efb9d41e0240090f7e34b99be7e0b889 | [
"Apache-2.0"
] | permissive | ldwen/LogMerge | cc6398701494bf4ee259b20138d6f618f94e6be4 | 5ca497c5da19bbcfa137afb22c4fb5b897b1ff2e | refs/heads/master | 2022-09-15T18:04:21.536993 | 2019-12-14T17:08:27 | 2019-12-14T17:08:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,233 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import numpy as np
from sklearn.cluster import KMeans
from sklearn.externals import joblib
import matplotlib.pyplot as plt
class My_Kmeans(object):
def __init__(self):
object.__init__(self)
def find_all_template(self):
for i in self.log_list:
files = os.listdir(i[0])
for file in files:
if file.endswith('Template_order'):
template_file = os.path.join(i[0],file)
with open(template_file,'r') as r:
for l in r:
self.template_set.add(l[:-1].lower())
def match_template_vec(self):
tem_list = list()
with open(self.template_vec,'r') as r:
for l in r :
l = l.split('//')
l[1] = list(map(float,l[1][1:-2].split()))
if l[0] in self.template_set:
self.template_list.append(l[0])
self.vec_list.append(l[1])
class TRAIN_Kmeans(My_Kmeans):
def __init__(self,train_a_list,train_b_list,train_count,classes_num,model_a_template_vec,model_b_template_vec,knn_model_save,class_vec_save):
self.log_a_list = train_a_list
self.log_b_list = train_b_list
self.train_count = train_count
self.classes_num = classes_num
self.model_a_template_vec = model_a_template_vec
self.model_b_template_vec = model_b_template_vec
self.knn_model_save = knn_model_save
self.class_vec_save = class_vec_save
self.model_a_template_set = set()
self.model_b_template_set = set()
self.template_list = list()
self.vec_list = list()
self.class_list = list()
def find_a_template(self):
for i in self.log_a_list:
files = os.listdir(i[0])
for file in files:
if file.endswith('Template_order'):
template_file = os.path.join(i[0],file)
with open(template_file,'r') as r:
for l in r:
self.model_a_template_set.add(l[:-1].lower())
def find_b_template(self):
all_count = 0
stop = 0
for i in self.log_b_list:
if stop == 1:
return
seq_set = set()
files = os.listdir(i[0])
label_file = i[1]
for file in files:
y = []
if file.endswith('seq_with_time'):
seq_with_time_file = os.path.join(i[0],file)
with open(seq_with_time_file,'r') as seq,open(label_file,'r') as label:
content = seq.readlines()
label_error = []
for tem_label in label:
label_error.append(tem_label.split()[0])
for j in range(0,len(label_error)-10,3):
# print(len(label_error))
# print(j)
y.append([label_error[j + 10]])
flag = 0
for index,y_tem in enumerate(y):
if y_tem == 1:
all_count += 1
if all_count >= self.train_count:
flag = 1
break
if flag == 1:
logs = content[0:index+2]
else:
logs = content
for line in logs:
seq_set.add(int(line.split()[1])-1)
for file in files:
if file.endswith('Template_order'):
template_file = os.path.join(i[0],file)
with open(template_file,'r') as r:
templates = r.readlines()
for index,l in enumerate(templates):
if index in seq_set:
self.model_b_template_set.add(l[:-1].lower())
def find_all_template(self):
self.find_a_template()
self.find_b_template()
def match_a_template_vec(self):
tem_list = list()
with open(self.model_a_template_vec,'r') as r:
for l in r :
l = l.split('//')
l[1] = list(map(float,l[1][1:-2].split()))
if l[0] in self.model_a_template_set:
self.template_list.append(l[0])
self.vec_list.append(l[1])
def match_b_template_vec(self):
tem_list = list()
with open(self.model_b_template_vec,'r') as r:
for l in r :
l = l.split('//')
l[1] = list(map(float,l[1][1:-2].split()))
if l[0] in self.model_b_template_set:
self.template_list.append(l[0])
self.vec_list.append(l[1])
def match_template_vec(self):
self.match_a_template_vec()
self.match_b_template_vec()
def save_template_class(self):
seq_class = {}
for i in self.log_a_list:
files = os.listdir(i[0])
for file in files:
if file.endswith('Template_order'):
template_file = os.path.join(i[0],file)
with open(template_file,'r') as r,open(os.path.join(i[0],'template_to_class'),'w') as w:
contents = r.readlines()
for index,l in enumerate(contents):
seq_class[index+1] = str(self.class_list[self.template_list.index(l[:-1].lower())])
w.write(l[:-1] + '//' + str(index + 1) + '//' + seq_class[index+1] + '\n')
for file in files:
if file.endswith('seq_with_time'):
seq_file = os.path.join(i[0],file)
with open(seq_file,'r') as r_seq,open(os.path.join(i[0],'seq_class'),'w') as w:
contents = r_seq.readlines()
for l in contents:
time = l.split()[0]
l = int(l.split()[1])
if l!= -1:
w.write(time + ' ' + seq_class[l] + '\n')
else:
w.write(time + ' ' + str(self.classes_num) + '\n')
def train_kmeans(self):
self.find_all_template()
self.match_template_vec()
km = KMeans(n_clusters=self.classes_num)#para
self.class_list = km.fit_predict(self.vec_list)
joblib.dump(km,self.knn_model_save)
self.save_template_class()
centroids = km.cluster_centers_
with open(self.class_vec_save,'w') as w:
for index,c in enumerate(centroids):
w.write(str(index) + '//' + str(c.tolist()) + '\n')
w.write(str(self.classes_num) + '//' + str([0.0 for t in range(150)]) + '\n')
class PREDICT_Kmeans(My_Kmeans):
def __init__(self,test_list,classes_num,template_vec,knn_model_load,class_vec):
self.log_list = test_list
self.classes_num = classes_num
self.template_vec = template_vec
self.knn_model_load = knn_model_load
self.class_vec = class_vec
self.template_set = set()
self.template_list = list()
self.vec_list = list()
self.class_list = list()
def save_template_class(self):
seq_class = {}
for i in self.log_list:
files = os.listdir(i[0])
for file in files:
if file.endswith('Template_order'):
template_file = os.path.join(i[0],file)
with open(template_file,'r') as r,open(os.path.join(i[0],'template_to_class'),'w') as w:
contents = r.readlines()
for index,l in enumerate(contents):
seq_class[index+1] = str(self.class_list[self.template_list.index(l[:-1].lower())])
w.write(l[:-1] + '//' + str(index + 1) + '//' + seq_class[index+1]+'\n')
for file in files:
if file.endswith('seq_with_time'):
seq_file = os.path.join(i[0],file)
with open(seq_file,'r') as r_seq,open(os.path.join(i[0],'seq_class'),'w') as w:
contents = r_seq.readlines()
for l in contents:
time = l.split()[0]
l = int(l.split()[1])
if l!= -1:
w.write(time + ' ' + seq_class[l] + '\n')
else:
w.write(time + ' ' + str(self.classes_num) + '\n')
def predict_kmeans(self):
self.find_all_template()
self.match_template_vec()
km = joblib.load(self.knn_model_load)
self.class_list = km.predict(self.vec_list)
self.save_template_class()
| [
"[email protected]"
] | |
35f0222445493e05c7b929adffc6c0aa8daf0d0a | 9dba8607dce414f9905700d7a4ac44668de5e1f1 | /cracking_trials/cracking/fiber_section/fiber_section_test_macros.py | d731b9888a48909c41c978543ab1a75fa3e260fe | [] | no_license | anaiortega/XCmodels | c0463ffe38531578aee281456e88528882255cd7 | e9b8c2f996a21b8aa3314242f3cc12b0e391b5df | refs/heads/master | 2023-08-16T22:44:01.168775 | 2023-08-14T18:15:10 | 2023-08-14T18:15:10 | 141,140,177 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,416 | py | # -*- coding: utf-8 -*-
import math
__author__= "Luis C. Pérez Tato (LCPT)"
__copyright__= "Copyright 2014, LCPT"
__license__= "GPL"
__version__= "3.0"
__email__= "[email protected]"
def extractFiberSectionProperties(fiberSec,scc):
fibers= fiberSec.getFibers()
global nFibers; nFibers= fibers.getNumFibers()
global sumAreas; sumAreas= fibers.getSumaAreas(1)
global Iz; Iz= fibers.getIz
global Iy; Iy= fibers.getIy
global Pyz; Pyz= fibers.getPyz
global zCenterOfMass; zCenterOfMass= fibers.getCenterOfMassZ()
global yCenterOfMass; yCenterOfMass= fibers.getCenterOfMassY()
global I1; I1= fibers.getI1(1,0,0)
global I2; I2= fibers.getI2(1,0,0)
global i1; i1= math.sqrt(I1/sumAreas) # Radius of gyration major principal axis
global i2; i2= math.sqrt(I2/sumAreas); # Radius of gyration minor principal axis
# th1= th1;
global Me1; Me1= 2*fy/scc.h*I1; # Elastic moment of the section around its principal major axis.
global Me2; Me2= 2*fy/scc.b*I2; # Elastic moment of the section around its principal minor axis.
global SzPosG; SzPosG= fibers.getSzPos(0,0,1)
global SyPosG; SyPosG= fibers.getSyPos(0,0,1)
def printRatios(scc):
print "areaTeor= ",(scc.area())
print "sumAreas= ",(sumAreas)
print "ratio1= ",(ratio1)
print "yCenterOfMass= ",(yCenterOfMass)
print "yCenterOfMassTeor= ",(yCenterOfMassTeor)
print "ratio2= ",(ratio2)
print "zCenterOfMass= ",(zCenterOfMass)
print "zCenterOfMassTeor= ",(zCenterOfMassTeor)
print "ratio3= ",(ratio3)
print "I1= ",(I1)
print "I1Teor= ",(scc.I1())
print "ratio4= ",(ratio4)
print "I2= ",(I2)
print "scc.I2()= ",(scc.I2())
print "ratio5= ",(ratio5)
# print "th1= ",(th1)}
print "i1= ",(i1)
print "i1Teor= ",(scc.i1())
print "ratio6= ",(ratio6)
print "i2= ",(i2)
print "i2Teor= ",(scc.i2())
print "ratio7= ",(ratio7)
print "Me1= ",(Me1)
print "Me1Teor= ",(scc.Me1(fy))
print "ratio8= ",(ratio8)
print "Me2= ",(Me2)
print "Me2Teor= ",(scc.Me2(fy))
print "ratio9= ",(ratio9)
print "SzPosG= ",(SzPosG)
print "SzPosGTeor= ",(scc.S1PosG())
print "ratio10= ",(ratio10)
print "SyPosG= ",(SyPosG)
print "SyPosGTeor= ",(scc.S2PosG())
print "ratio11= ",(ratio11)
# print "Mp1= ",(Mp1/100/1000)
print "Mp1Teor= ",(scc.Mp1(fy)/100/1000)
print "ratio12= ",(ratio12)
# print "Mp2= ",(Mp2/100/1000)
print "Mp2Teor= ",(scc.Mp2(fy)/100/1000)
print "ratio13= ",(ratio13)
| [
"[email protected]"
] | |
e94f2e785dc3b1e0d4a94fbeedca7a535b5501a8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2660/47920/321822.py | b1654b4ed61959703e197f2e508f4e4b307c0d7f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | print("b")
print("c") | [
"[email protected]"
] | |
85f5893f99da40c720585f284236bc9083117e57 | ed872a0efb1db283f48176474e22f4c4ad31db79 | /src/forum/migrations/0001_initial.py | 59300a9cb2960e366054264f9ebbdfd7657895c9 | [] | no_license | barontxu/djbookru | 34c2bf90e5d3542e4cbd2f3e600e1c0a12795d35 | 388bff0491e961f8efdf3cabd6c47d9fa2988547 | refs/heads/master | 2021-01-16T20:39:33.949315 | 2014-06-20T12:22:56 | 2014-06-20T12:22:56 | 23,031,683 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,793 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table('forum_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=80)),
('position', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('forum', ['Category'])
# Adding M2M table for field groups on 'Category'
m2m_table_name = db.shorten_name('forum_category_groups')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('category', models.ForeignKey(orm['forum.category'], null=False)),
('group', models.ForeignKey(orm['auth.group'], null=False))
))
db.create_unique(m2m_table_name, ['category_id', 'group_id'])
# Adding model 'Forum'
db.create_table('forum_forum', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(related_name='forums', to=orm['forum.Category'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=80)),
('position', self.gf('django.db.models.fields.IntegerField')(default=0)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('forum', ['Forum'])
# Adding model 'Topic'
db.create_table('forum_topic', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('forum', self.gf('django.db.models.fields.related.ForeignKey')(related_name='topics', to=orm['forum.Forum'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='forum_topics', to=orm['accounts.User'])),
('views', self.gf('django.db.models.fields.IntegerField')(default=0)),
('sticky', self.gf('django.db.models.fields.BooleanField')(default=False)),
('closed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('heresy', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('forum', ['Topic'])
# Adding model 'Post'
db.create_table('forum_post', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('topic', self.gf('django.db.models.fields.related.ForeignKey')(related_name='posts', to=orm['forum.Topic'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='forum_posts', to=orm['accounts.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('updated_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='forum_updated_posts', null=True, to=orm['accounts.User'])),
('body', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('forum', ['Post'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table('forum_category')
# Removing M2M table for field groups on 'Category'
db.delete_table(db.shorten_name('forum_category_groups'))
# Deleting model 'Forum'
db.delete_table('forum_forum')
# Deleting model 'Topic'
db.delete_table('forum_topic')
# Deleting model 'Post'
db.delete_table('forum_post')
models = {
'accounts.achievement': {
'Meta': {'object_name': 'Achievement'},
'active_icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inactive_icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'accounts.user': {
'Meta': {'object_name': 'User', '_ormbases': ['auth.User']},
'achievements': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['accounts.Achievement']", 'through': "orm['accounts.UserAchievement']", 'symmetrical': 'False'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_comments_read': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_doc_comments_read': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'accounts.userachievement': {
'Meta': {'unique_together': "(('user', 'achievement'),)", 'object_name': 'UserAchievement'},
'achievement': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Achievement']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forum.category': {
'Meta': {'ordering': "['position']", 'object_name': 'Category'},
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'forum_categories'", 'blank': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'forum.forum': {
'Meta': {'ordering': "['position']", 'object_name': 'Forum'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'forums'", 'to': "orm['forum.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'forum.post': {
'Meta': {'ordering': "['created']", 'object_name': 'Post'},
'body': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['forum.Topic']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'forum_updated_posts'", 'null': 'True', 'to': "orm['accounts.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'forum_posts'", 'to': "orm['accounts.User']"})
},
'forum.topic': {
'Meta': {'ordering': "['-updated']", 'object_name': 'Topic'},
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics'", 'to': "orm['forum.Forum']"}),
'heresy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'forum_topics'", 'to': "orm['accounts.User']"}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['forum'] | [
"[email protected]"
] | |
7183beb77f4dffe7f57f3cf725186dbf699e9697 | d0a66aba48b8c3fb20d62a2444d05aaf86c1faf8 | /numpy_array_1.py | 9f796ebc2f95932b3c28ed766ac8153739f458ed | [] | no_license | zoro6908/test1 | a273e1bee8e8db58759b1df98b657e35ba69df17 | e49952334cc0ec4725c89ac199b1fd0c8127a73b | refs/heads/master | 2022-07-11T00:20:13.560271 | 2020-05-11T07:48:04 | 2020-05-11T07:48:04 | 262,949,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | import numpy as np
# a = np.array([1,2,3])
# a= np.arange(10,100)
# a=np.zeros((5,5))
# a = np.linspace(0,20,5)
# a = np.zeros(8)
# print(a)
# c=a.reshape((2,2,2))
# print(c)
# arr=np.arange(2,20)
# print(arr)
# ele=arr[6]
# print(type(ele))
#
# arr=np.arange(20)
# print(arr)
# sli=slice(1,10,2)
# print(arr[sli])
arr=np.array([[1,2,3],[4,5,6],[7,8]])
# print(arr[0:1,0:1])
print(arr.shape)
print(arr.ndim)
print(arr.itemsize)
| [
"[email protected]"
] | |
41691c156d4f99f8adb3d61df01035aa45c3fe7b | 10ed27dfc0853704bdd6a39c9a18082783195e76 | /configs.py | 40797b8601bd6e1943bb31710fa7c5a6e1ef5754 | [] | no_license | TanmDL/PGMA_tensorflow | d6d21b8614940b72065230f3ba5c2b88aec2e785 | a25240eb59ed695ecc41abd40eebb25986c6f14c | refs/heads/master | 2020-07-29T09:18:57.049428 | 2019-05-07T11:35:27 | 2019-05-07T11:35:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,551 | py |
"""
MNIST configuratons.
"""
config_mnist = {}
config_mnist['dataset'] = 'mnist'
config_mnist['datashape'] = [28, 28, 1]
config_mnist['verbose'] = True
config_mnist['save_every_epoch'] = 20
##print every
config_mnist['print_every'] = 100
config_mnist['work_dir'] = 'results_mnist'
config_mnist['plot_num_pics'] = 400
config_mnist['plot_num_cols'] = 20
config_mnist['input_normalize_sym'] = False
config_mnist['data_dir'] = 'mnist'
config_mnist['optimizer'] = 'adam' # adam, sgd
config_mnist['adam_beta1'] = 0.5
config_mnist['lr'] = 1e-3
config_mnist['lr_adv'] = 1e-4
config_mnist['lr_schedule'] = 'plateau'
##batch size
config_mnist['batch_size'] = 100
##epoch number
config_mnist['task_num'] = 5
config_mnist['epoch_num_per'] = 5
#Training the last tasks more epochs is to show the stabilization of the method.
config_mnist['epoch_num'] = [config_mnist['epoch_num_per']] * (config_mnist['task_num'] - 1) + [config_mnist['epoch_num_per']*4]
config_mnist['init_std'] = 0.01
config_mnist['init_bias'] = 0.0
config_mnist['batch_norm'] = False
config_mnist['batch_norm_eps'] = 1e-5
config_mnist['batch_norm_decay'] = 0.9
config_mnist['conv_filters_dim'] = 4
config_mnist['e_pretrain'] = True
config_mnist['e_pretrain_sample_size'] = 1000
config_mnist['e_noise'] = 'add_noise'
config_mnist['e_num_filters'] = 256
config_mnist['e_num_layers'] = 3
config_mnist['e_arch'] = 'dcgan' # mlp, dcgan, ali
config_mnist['g_num_filters'] = 256
config_mnist['g_num_layers'] = 3
config_mnist['g_arch'] = 'dcgan_mod' # mlp, dcgan, dcgan_mod, ali
config_mnist['gan_p_trick'] = False
config_mnist['d_num_filters'] = 512
config_mnist['d_num_layers'] = 4
config_mnist['pz'] = 'normal' # uniform, normal, sphere
config_mnist['cost'] = 'l2sq' #l2, l2sq, l1
config_mnist['pz_scale'] = 1.
config_mnist['z_test'] = 'mmd'
config_mnist['mmd_kernel'] = 'IMQ' # RBF, IMQ
config_mnist['lambda_schedule'] = 'constant'
# main network
if config_mnist['task_num'] > 3:
#Reducing the number of zdim will improve the performance when addressing more tasks.
config_mnist['zdim'] = 32
#More parameters are needed for a lot of tasks.
config_mnist['main_info'] = [784, 600, 600, 10]
else:
config_mnist['zdim'] = 64
config_mnist['main_info'] = [784, 100, 100, 10]
#
config_mnist['auxi_info'] = [0, 0, 80]
config_mnist['t_info'] = [1000, 1000]
# sample size after one task
config_mnist['z_size'] = 40000
# sample size during t
config_mnist['sample_size'] = 400
config_mnist['t_keep_prob'] = 0.7
config_mnist['main_keep_prob'] = 0.8
config_mnist['seed'] = [101, 202, 303, 70]
| [
"[email protected]"
] | |
ccd8674ff8ba9109ee2f8c297508b854a91acbc9 | 75f63c03ea5d7d9718fdf84fb2c9a2f428ce83e3 | /experiments/07_learning_rate_selection/run_fmnist_mlp.py | a1f44076dc338a1f6e0362cbdc96e736871980aa | [] | no_license | h1j9d3q/cockpit | b7e662b8679cb1d8726e88644defbc8857b88419 | b484f5728c793a9f17df09553e36943f86b5d5a4 | refs/heads/master | 2023-06-29T05:55:46.929033 | 2021-08-05T13:44:54 | 2021-08-05T13:44:54 | 370,411,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | """Run SGD on Fashion MNIST using different learning rates."""
from gridsearch import run
from cockpit.utils import schedules
lrs = [1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 5e-3, 1e-2, 5e-2, 1e-1, 5e-1]
track_schedule = schedules.linear(1)
run("fmnist_mlp", lrs, track_schedule)
| [
"Anonymous"
] | Anonymous |
94d0275ae55e0a157355c6e20b146de6d46c9659 | 87b60b88251f11367f3563557223a25716c008c3 | /ascii.py | 854e2adc9021f5d6b6c39b52f60e880a8725eadc | [] | no_license | shanepatel/Structure_Utils | b2931070045dce5d51968151210025135c1f8b7d | 2fe4759598d7229de0ae9763a84a7ce6fb6214d2 | refs/heads/master | 2022-06-26T07:39:25.007254 | 2022-06-13T00:12:27 | 2022-06-13T00:32:01 | 135,334,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,653 | py | import sys
import atoms
from atoms import *
#*****************************************************************************************
def ascii_write(atoms,filename):
if filename=="screen":
print "%d" % atoms.nat
#print "%s" % atoms.boundcond
print "%24.15E%24.15E%24.15E" % (atoms.cellvec[0][0],atoms.cellvec[1][0],atoms.cellvec[1][1])
print "%24.15E%24.15E%24.15E" % (atoms.cellvec[2][0],atoms.cellvec[2][1],atoms.cellvec[2][2])
for i in range(atoms.nat):
x=atoms.rat[i][0]
y=atoms.rat[i][1]
z=atoms.rat[i][2]
print "%24.15E%24.15E%24.15E%5s" % (x,y,z,atoms.sat[i])
else:
f= open(filename,"w")
f.write("%d\n" % atoms.nat)
f.write("%24.15E%24.15E%24.15E\n" % (atoms.cellvec[0][0],atoms.cellvec[1][0],atoms.cellvec[1][1]))
f.write("%24.15E%24.15E%24.15E\n" % (atoms.cellvec[2][0],atoms.cellvec[2][1],atoms.cellvec[2][2]))
for i in range(atoms.nat):
x=atoms.rat[i][0]
y=atoms.rat[i][1]
z=atoms.rat[i][2]
f.write("%24.15E%24.15E%24.15E%5s\n" % (x,y,z,atoms.sat[i]))
#f.write(" %12.8f%12.8f%12.8f\n" % (x,y,z))
f.close()
#*****************************************************************************************
def ascii_read(filename):
f=open(filename,"r")
atoms=Atoms()
atoms.boundcond="bulk"
iline=0
iline_tot=0
nconf=0
atoms.nat=0
for line in f.readlines():
iline_tot+=1
tt=str(line).strip()
if tt[0]=='#': continue
#print tt
iline+=1
if iline==1:
atoms.epot=float(line.split()[1])*27.211385
#print atoms.epot
pass
elif iline==2:
atoms.cellvec[0][0]=float(line.split()[0])
atoms.cellvec[1][0]=float(line.split()[1])
atoms.cellvec[1][1]=float(line.split()[2])
elif iline==3:
atoms.cellvec[2][0]=float(line.split()[0])
atoms.cellvec[2][1]=float(line.split()[1])
atoms.cellvec[2][2]=float(line.split()[2])
else:
atoms.nat+=1
atoms.bemoved.append("TTT")
atoms.rat.append([])
icol=0
#loop over the elemets, split by whitespace
for i in line.split():
#print i
icol+=1
if icol<4:
atoms.rat[-1].append(float(i))
elif icol<5:
atoms.sat.append(i)
f.closed
return atoms
#*****************************************************************************************
| [
"[email protected]"
] | |
495e4800bad42a827e0b18c374ed2fd5b6049cc1 | abd26cfcf75f8bc3841907bee363fc98d6294b53 | /DRIMS/DRIMS/main/admin.py | 2c4d0fb46fa2c440ef389d04e2c05460d1489368 | [
"MIT"
] | permissive | EsraelDawit-a/Drims-Fse-Project | a6e5e5fc0ebd0982ed8925fb78f9404f6c7d8e15 | a68d842e947450bc27711a6acab89ec8e3ba3b03 | refs/heads/master | 2023-06-06T05:44:58.719308 | 2021-06-27T11:27:53 | 2021-06-27T11:27:53 | 356,553,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | from django.contrib import admin
from .models import *
from django.contrib.auth.admin import UserAdmin as A
from django.contrib import admin
from django.contrib.auth.forms import UserCreationForm
# Register your models here.
class CustomUserAdmin(admin.ModelAdmin):
list_display = ['username','phone','Adress','optional_adress','first_name','last_name','Email_Adress','role' ]
# add_form = A.add_form
# add_form_template = A.add_form_template
# add_fieldsets = A.add_fieldsets
class ProfileAdmin(admin.ModelAdmin):
list_display = ['user','pic','bio']
admin.site.register(CustomUser, CustomUserAdmin)
admin.site.register(Profile,ProfileAdmin)
# admin.site.register(Buyer, UA)
# admin.site.register(Seller, UA)
# admin.site.register(Transporter)
# admin.site.register(BranchAdminRegistration) | [
"[email protected]"
] | |
aea8383bf677353e270af874f52abb779e9b0bdc | 132ef72cbb2d404d4520a2cb5e1b604835f70d8b | /src/poetry/utils/constants.py | 0f799b16d7dd185bb05a8e5e7131aead1010958a | [
"MIT"
] | permissive | tianhm/poetry | b5e6d70a9fedebde3529915f990560ccaa9ceda6 | 2b15ce10f02b0c6347fe2f12ae902488edeaaf7c | refs/heads/master | 2023-08-31T04:00:39.337388 | 2023-03-27T15:20:47 | 2023-03-27T15:20:47 | 240,429,186 | 0 | 0 | MIT | 2023-03-31T18:52:04 | 2020-02-14T04:39:43 | Python | UTF-8 | Python | false | false | 115 | py | from __future__ import annotations
# Timeout for HTTP requests using the requests library.
REQUESTS_TIMEOUT = 15
| [
"[email protected]"
] | |
93f4a7b2d5f88879fe31f02794e2c14d975aed7a | b74f8a4e0e60a51a1d88b3f0ba85c000fc904708 | /wgpu/_parsers.py | cd39b09ba8fc09b9c353d7ec3968d507ac3b2679 | [
"BSD-2-Clause"
] | permissive | Korijn/wgpu-py | 37d2afb4b263135f207837002d2489e83cb7c5dd | 72f89be121ea6cd819a145ee3b037004211b3245 | refs/heads/master | 2020-11-27T19:11:07.080730 | 2019-12-22T09:50:36 | 2019-12-22T09:50:36 | 229,572,629 | 0 | 0 | BSD-2-Clause | 2019-12-22T13:33:46 | 2019-12-22T13:33:46 | null | UTF-8 | Python | false | false | 17,180 | py | """
This module provides classes to parse the .idl and .h files defining
the WebGPU and wgpu API. These help us to generate code (definitions
of flags, enums and stucts) and to provide help for developers.
"""
class BaseParser:
""" An object that can be used to walk over a str in an easy way.
Our parsers have the following attributes:
* flags: a dict mapping the (Pythonic) flag name to a dict of field-value pairs.
* enums: a dict mapping the (Pythonic) enum name to a dict of field-value pairs.
* structs: a dict mapping the (Pythonic) struct name to a dict of StructField objects.
* functions: a dict mapping the (normalized) func name to the line defining the function.
"""
def __init__(self, text):
self._text = text
self._length = len(text)
self._pos = 0
def end_reached(self):
return self._pos >= self._length
def read_until(self, char):
start = self._pos
while self._pos < self._length:
c = self._text[self._pos]
self._pos += 1
if c == char:
return self._text[start : self._pos]
return ""
def readline(self):
return self.read_until("\n")
def parse(self, verbose=False):
self._pos = 0
self.flags = {}
self.enums = {}
self.structs = {}
self.functions = {}
self.callbacks = {}
self.types = {}
self.unknown_lines = unknown_lines = []
if verbose:
print(f"##### Parsing with {self.__class__.__name__} ...")
self._parse()
self._normalize()
# Summarize
if verbose:
if self.unknown_lines:
print(f"Could not parse {len(self.unknown_lines)} lines")
else:
print(f"All lines where parsed")
print(f"Found {len(self.flags)} flags")
print(f"Found {len(self.enums)} enums")
print(f"Found {len(self.structs)} structs")
print(f"Found {len(self.functions)} functions")
print(f"Found {len(self.callbacks)} callbacks")
def _parser(self):
raise NotImplementedError()
def _normalize(self):
raise NotImplementedError()
class StructField:
""" A little object to specify the field of a struct.
"""
def __init__(self, line, name, typename, default=None):
self.line = line
self.name = name
self.typename = typename
self.default = default
def __repr__(self):
return f"<StructField '{self.typename} {self.name}'>"
def to_str(self):
return self.line
def py_arg(self):
t = self.typename
d = self.default
if t not in ("bool", "int", "float", "str"):
t = f"'{t}'"
if d is not None:
d = {"false": "False", "true": "True"}.get(d, d)
return f"{self.name}: {t}={d}"
else:
return f"{self.name}: {t}"
# %% IDL
class IdlParser(BaseParser):
""" Parse (part of) IDL files to obtain info about flags, enums and structs.
"""
def _normalize(self):
# Remove GPU prefix for flags, enums and structs
for d in (self.flags, self.enums, self.structs):
for name in list(d.keys()):
assert name.startswith("GPU")
new_name = name[3:]
if new_name.endswith("Dict"):
new_name = new_name[:-4]
d[new_name] = d.pop(name)
# Remove (abstract) base structs
for name in list(self.structs):
if name.endswith("Base"):
self.structs.pop(name)
# Normalize function name to be a flat lowercase name withour underscores
for name in list(self.functions.keys()):
assert name.startswith("GPU") and "." in name
new_name = name[3:].replace(".", "").lower()
self.functions[new_name] = self.functions.pop(name)
def _parse(self):
while not self.end_reached():
line = self.readline()
if not line.strip():
pass
elif line.startswith("//"):
pass
elif line.startswith("/*"):
if "*/" in line:
pass
else:
raise RuntimeError("Cannot handle multiline comments yet.")
elif line.startswith("interface "):
lines = [line]
while not line.startswith("};"):
line = self.readline()
lines.append(line)
classname = lines[0].split("{")[0].split(":")[0].split()[-1]
line_index = 0
while line_index < len(lines) - 1:
line_index += 1
line = lines[line_index].strip()
if not line or line.startswith("//"):
continue
elif line.startswith("const ") and "Flags" in line:
parts = line.strip(";").split()
assert parts[-2] == "="
assert parts[1].endswith("Flags")
basename = parts[1][:-5]
name = parts[2]
val = int(parts[-1], 16)
self.flags.setdefault(basename, {})[name] = val
elif "(" in line:
line = lines[line_index]
while line.count("(") > line.count(")"):
line_index += 1
line += lines[line_index]
assert line.count("(") == line.count(")")
line = line.strip()
line.replace("\n", " ")
for c in (" ", " ", " "):
line = line.replace(c, " ")
assert line.endswith(";")
funcname = line.split("(")[0].split()[-1]
line = (
line.replace("\n", " ")
.replace(" ", " ")
.replace(" ", " ")
)
self.functions[classname + "." + funcname] = line
elif line.startswith("enum "):
line += self.read_until("}") + self.readline()
lines = line.strip().split("\n")
name = lines[0].split(" ", 1)[1].strip("{ \t\r\n")
d = {}
for i, line in enumerate(lines[1:-1]):
line = line.strip()
if not line or line.startswith("//"):
continue
key = val = line.strip('", \t')
for i1, i2 in [
("-", "_"),
("1d", "d1"),
("2d", "d2"),
("3d", "d3"),
]:
key = key.replace(i1, i2)
d[key] = val
self.enums[name] = d
elif line.startswith("dictionary "):
assert line.count("{") == 1 and line.count("}") == 0
lines = [line]
while not line.startswith("};"):
line = self.readline()
lines.append(line)
name = lines[0].split(" ", 1)[1].strip("{ \t\r\n")
if "GPUDeviceDescriptor" in name:
a = 323
if ":" in name:
name, _, base = name.partition(":")
name, base = name.strip(), base.strip()
if base not in self.structs:
# print(f"dict {name} has unknown base dict {base}")
d = {}
else:
d = self.structs[base].copy()
else:
d = {}
for line in lines[1:-1]:
line = line.split("//")[0].strip()
if not line:
continue
assert line.endswith(";")
arg = line.strip().strip(",;").strip()
is_required = False
default = None
if "=" in arg:
arg, default = arg.rsplit("=", 1)
arg, default = arg.strip(), default.strip()
arg_type, arg_name = arg.strip().rsplit(" ", 1)
if arg_type.startswith("required "):
is_required = True
arg_type = arg_type[9:]
if arg_type in ["double", "float"]:
t = "float"
elif arg_type in ["long", "unsigned long", "unsigned long long"]:
t = "int"
elif arg_type in ["boolean"]:
t = "bool"
elif arg_type in ["DOMString", "DOMString?"]:
t = "str"
elif arg_type.startswith("GPU"):
t = arg_type
# todo: can in some cases resolve this to int/float via typedefs
elif arg_type.startswith("sequence<GPU"):
t = arg_type[9:-1] + "-list"
elif arg_type == "ImageBitmap":
t = "array"
elif arg_type in [
"(GPULoadOp or GPUColor)",
"(GPULoadOp or float)",
"(GPULoadOp or unsigned long)",
]:
# GPURenderPassColorAttachmentDescriptor
# GPURenderPassDepthStencilAttachmentDescriptor
t = (
arg_type[1:-1]
.replace(" ", "-")
.replace("unsigned-long", "int")
)
else:
assert False
d[arg_name] = StructField(line, arg_name, t, default)
self.structs[name] = d
else:
self.unknown_lines.append(line)
# %% C-header
class HParser(BaseParser):
""" Parse (part of) .h files to obtain info about flags, enums and structs.
"""
# def pythonise_type(self, t):
# t = self.types.get(t, t)
# t = self.types.get(t, t) # because can be XX -> XXDummy -> uint32_t
# if t in ("float", "double"):
# return "float"
# elif t in ("int32_t", "int64_t", "uint32_t", "uint64_t"):
# return "int"
# elif t.endswith("_t"):
# return t[:-2]
# elif t.startswith("WGPU"):
# return t[4:]
# else:
# return t
#
# def type_annotation(self, t):
# t = self.pythonise_type(t)
# if t in ("int", "float"):
# return f": {t}"
# elif t == "void":
# return ""
# else:
# return f": {t!r}"
#
# def type_to_ctype(self, t):
# while self.types.get(t, t) is not t:
# t = self.types.get(t, t)
# if t == "void":
# return "ctypes.c_void_p"
# elif t in ("bool", "float", "double"):
# return "ctypes.c_" + t
# elif t in ("uint8_t", "int32_t", "int64_t", "uint32_t", "uint64_t"):
# return "ctypes.c_" + t[:-2]
# elif t in ("uintptr_t", ):
# return "ctypes.POINTER(ctypes.c_uint64)" # todo: probably
# elif t == "WGPURawString":
# return "ctypes.c_char_p"
# elif t in ("WGPUBufferMapReadCallback", "WGPUBufferMapWriteCallback", "WGPURequestAdapterCallback"):
# return "ctypes.c_void_p" # todo: function pointer
# elif t in self.structs:
# return t
# elif t in self.enums:
# return "ctypes.c_int64" # todo: --->>>> uint32 causes access violation, ??? but with cffi it seems enums are 4 bytes ...
# # elif t == "WGPUBindingResource":
# # return "dunno"
# else:
# raise NotImplementedError()
def _normalize(self):
# Remove WGPU prefix for flags, enums and structs
for d in (self.flags, self.enums, self.structs):
for name in list(d.keys()):
assert name.startswith("WGPU")
new_name = name[4:]
d[new_name] = d.pop(name)
# Normalize function name to be a flat lowercase name withour underscores
for name in list(self.functions.keys()):
assert name.startswith("wgpu") and "." not in name
new_name = name[4:].replace("_", "").lower()
self.functions[new_name] = self.functions.pop(name)
def _parse(self):
while not self.end_reached():
line = self.readline()
if not line.strip():
pass
elif line.startswith("//"):
pass
elif line.startswith("/*"):
if "*/" in line:
pass
else:
raise RuntimeError("Cannot handle multiline comments yet.")
elif line.startswith("#include "):
pass
elif line.startswith("#if !defined(WGPU_REMOTE)") or line.startswith(
"#if defined(WGPU_LOCAL)"
):
pass
elif line.startswith("#endif"):
pass
elif line.startswith("#define "):
parts = line.split()
if len(parts) == 3:
basename, _, name = parts[1].partition("_")
val = int(parts[2].strip())
self.flags.setdefault(basename, {})[name] = val
elif "WGPU_LOCAL" in line:
pass
else:
self.unknown_lines.append(line)
elif line.startswith("typedef enum {"):
line += self.read_until("}") + self.readline()
lines = line.strip().split("\n")
name = lines[-1].split("}", 1)[1].strip("; ")
d = {}
for i, line in enumerate(lines[1:-1]):
key, _, val = line.strip().strip(",;").partition("=")
val = val.strip()
if not val:
val = i
key = key[len(name) + 1 :]
d[key.strip()] = int(val)
self.enums[name] = d
elif line.startswith("typedef struct"):
assert line.count("{") == 1 and line.count("}") == 0
nesting_level = 1
while nesting_level > 0:
more_line = self.read_until("}") + self.readline()
line += more_line
nesting_level += more_line.count("{") - more_line.count("}")
lines = line.strip().split("\n")
name = lines[-1].split("}", 1)[1].strip("; ")
assert name
d = {}
union = False
for line in lines[1:-1]:
line = line.strip()
if not union:
if line.startswith("union {"):
union = True
continue
else: # in a union
if line == "};":
union = False
continue
assert line.endswith(";")
arg = line.strip(",;")
if arg.startswith("const "):
arg = arg[6:]
arg_type, arg_name = arg.strip().split()
arg_name = arg_name.strip(" *")
if union:
line += " (in union)"
d[arg_name] = StructField(line, arg_name, arg_type)
self.structs[name] = d
elif line.startswith("typedef void (*") and "Callback" in line:
name = line.split("(*", 1)[1].split(")")[0].strip()
self.callbacks[name] = line.strip()
elif line.startswith("typedef "):
parts = line.strip().strip(";").split()
if len(parts) == 3:
self.types[parts[2]] = parts[1]
else:
self.unknown_lines.append(line)
elif (
line.startswith("void ") or line.startswith("WGPU")
) and "wgpu_" in line:
if ")" not in line:
line += self.read_until(")") + self.readline()
name = line.split("(")[0].strip().split()[-1].strip()
self.functions[name] = line
else:
self.unknown_lines.append(line)
if __name__ == "__main__":
idl_parser = IdlParser(open("./resources/webgpu.idl", "rb").read().decode())
idl_parser.parse()
h_parser = HParser(open("./resources/wgpu.h", "rb").read().decode())
h_parser.parse()
| [
"[email protected]"
] | |
3ce005e8fcf85778005f67433c18694532ca1231 | bcb161c349ef0dd32d6a1e349ecd409103cd7fd4 | /app/__init__.py | 83bc0bde530254cd5ccf01a1f465af4d765c2ceb | [] | no_license | apengok/jujulong_flask | 8f2218cbe67903009eaacba794f8020ea7c90c28 | 9c8319d99521b6b6dfb4a9e901425a0369c36fed | refs/heads/master | 2021-06-07T18:06:20.500540 | 2016-12-14T08:40:54 | 2016-12-14T08:40:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,544 | py | import os
from flask import Flask,render_template
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_pagedown import PageDown
from flask_admin import Admin
#from flask.ext.openid import OpenID
from config import config
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
admin = Admin(name='Jujulong',template_mode='bootstrap3')
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
admin.init_app(app)
if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
from flask_sslify import SSLify
sslify = SSLify(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint,url_prefix='/auth')
from .api_1_0 import api as api_1_0_blueprint
app.register_blueprint(api_1_0_blueprint,url_prefix='/api/v1.0')
from .kivie import kivie as kivie_blueprint
app.register_blueprint(kivie_blueprint,url_prefix='/kivie')
return app
| [
"[email protected]"
] | |
fc407b3fedab2b8a564cbb560adedffef15ee8e3 | c208954de92470c0144fad2e07a92ed1822edd59 | /Facetool/data/banner_decompiled.py | 55da4a85c78ac4b238e55f7d2dfd0f957023f249 | [
"MIT"
] | permissive | rendy026/reverse-enginnering | 4217f3b723569fb792bac0f22a56a305199db1dc | f04cec0bf518a2617fc4fd7155f755fafc2af799 | refs/heads/master | 2023-01-07T15:49:15.791052 | 2020-10-13T09:22:02 | 2020-10-13T09:22:02 | 303,575,571 | 0 | 0 | MIT | 2020-10-13T09:41:59 | 2020-10-13T03:17:42 | Python | UTF-8 | Python | false | false | 9,313 | py | # -*- coding: utf-8 -*-
# coding=utf-8
import time,os,sys, requests
import auto
import v2
import v4
import v4b
import v5
import v6
import v7
import v8
import vp
import vip
import get
import new1, new2
W = '\033[1;37m' #Putih
N = '\033[0m' # Tutup
R = '\033[1;37m\033[31m' #Merah
G = '\033[1;32m' #Ijo
B = '\033[1;37m\033[34m' # biru
O = '\033[33m' # Kuning
C = '\033[36m' #Biru laut
K = '\x1b[1;93m' #Kuning
gr = "\x1b[00m═══════════════════════════════════"
def user():
import requests as r
try:
open('/data/data/com.termux/files/usr/lib/.1.txt')
tambah = False
except:
open('/data/data/com.termux/files/usr/lib/.1.txt', 'w').write('')
tambah = True
if tambah:
requests.get('http://Sereware56.000webhostapp.com/hitung.php?command=t')
def slowprint(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(0.1 / 100)
def exit():
slowprint('\x1b[91m[!]\x1b[00m Exiting Program !')
os.system('xdg-open https://youtube.com/saydog-official')
os.system('exit')
logo = """
\x1b[1;32m
·▄▄▄ ▄▄▄· ▄▄· ▄▄▄ .▄▄▄▄▄ ▄▄▌
▐▄▄·▐█ ▀█ ▐█ ▌▪▀▄.▀·•██ ▪ ▪ ██•
██▪ ▄█▀▀█ ██ ▄▄▐▀▀▪▄ ▐█.▪ ▄█▀▄ ▄█▀▄ ██▪
██▌.▐█ ▪▐▌▐███▌▐█▄▄▌ ▐█▌·▐█▌.▐▌▐█▌.▐▌▐█▌▐▌
▀▀▀ ▀ ▀ ·▀▀▀ ▀▀▀ ▀▀▀ ▀█▄▀▪ ▀█▄▀▪.▀▀
\x1b[91mFACEBOOK HACKING TOOLS\x1b[00m \033[041m PRO \033[00m
"""
def header():
os.system('clear')
user()
os.system('xdg-open https://www.youtube.com/channel/UCLU9H65QrIC6u2UetU6476w')
slowprint(logo)
print('\x1b[1;32m + ----=[\x1b[00m Coded By \x1b[33mIqbalmh18\x1b[1;32m ]=---- +')
print('\x1b[1;32m + ----=[\x1b[00m Author By \x1b[33mMr.XsZ\x1b[1;32m ]=---- +')
print('\x1b[00m')
slowprint('Welcome to Facebook Hacking Tools')
slowprint('For support the Author please subscribe YouTube Channel')
slowprint('YouTube : \x1b[1;32m\033[041m SAYDOG \033[00m')
slowprint('YouTube : \x1b[91m\033[042m Mr.XsZ \033[00m')
print('\x1b[00m')
slowprint('USAGE THIS TOOLS')
slowprint('Use command : \x1b[33mhelp')
print('')
main()
def main():
bal = raw_input('\x1b[00mFace®tool \x1b[91m> \x1b[1;32m')
if bal == '' in bal:
main()
elif bal == 'exit' in bal:
exit()
elif bal == 'token' in bal:
tok = raw_input('\x1b[00mPaste Your Token Here : \x1b[1;32m')
os.system('print "'+tok+'" > login.txt;cat login.txt > data/login.txt')
os.system('sleep 2')
slowprint('\x1b[00mSaved as \x1b[33mlogin.txt')
slowprint('\x1b[00mBack to menu, please wait ...')
os.system('sleep 3')
header()
elif bal == 'logout' in bal:
os.system('rm login.txt;rm data/login.txt')
slowprint('\x1b[00mLogout Session Token : \x1b[33m Success\x1b[00m')
slowprint('\x1b[00mBack to menu, please wait ...')
os.system('sleep 3')
header()
elif bal == 'update' in bal:
print '[%s#%s] Updating ...' % (G, N)
os.system('git pull')
print '%s[%s**%s]%s was updated. \xc2\xaf\\_(\xe3\x83\x84)_/\xc2\xaf' % (G, R, G, N)
sys.exit()
elif bal == 'help' in bal:
print('\x1b[00m')
print('\x1b[1;32m HELP MENU\x1b[00m')
print'%s' % gr
print(' %s[%s+%s] Jumlah User :%s ' + requests.get('http://Sereware56.000webhostapp.com/hitung.php').text) % (W,O,W,G)
print('\x1b[00m╔═════════════════════════════════╗')
print('\x1b[00m║ Command Description ║')
print('\x1b[00m╠═════════════════════════════════╣')
print('\x1b[00m║ show \x1b[91mShow all tools\x1b[00m ║')
print('\x1b[00m║ token \x1b[91mLogin token fb\x1b[00m ║')
print('\x1b[00m║ logout \x1b[91mLogout token fb\x1b[00m ║')
print('\x1b[00m║ get \x1b[91mGet Token\x1b[00m ║')
print('\x1b[00m║ update \x1b[91mUpdate Script\x1b[00m ║')
print('\x1b[00m║ exit \x1b[91mExit program\x1b[00m ║')
print('\x1b[00m╚═════════════════════════════════╝')
print('')
main()
elif bal == 'get' in bal:
get.tokenz()
elif bal == 'show' in bal:
print('\x1b[00m')
os.system('xdg-open https://www.instagram.com/4N9GA')
slowprint('Welcome to Tools Menu')
slowprint('Choose number for running the tools')
print'%s' % gr
print('\x1b[1;32m 0.\x1b[1;91m Back')
print'%s' % gr
print('%s 1%s DarkFb %sV 1.2' % (G,W,K))
print'%s' % gr
print('%s 2%s DarkFb %sV 1.4' % (G,W,K))
print'%s' % gr
print('%s 3%s DarkFb %sV 1.4 %s (Beta)' % (G,W,K,W))
print'%s' % gr
print('%s 4%s DarkFb %sV 1.5' % (G,W,K))
print'%s' % gr
print('%s 5%s DarkFb %sV 1.6' % (G,W,K))
print'%s' % gr
print('%s 6%s DarkFb %sV 1.7' % (G,W,K))
print'%s' % gr
print('%s 7%s DarkFb %sV 1.8' % (G,W,K))
print'%s' % gr
print('%s 8%s DarkFb %sV 1.7 %s Premium' % (G,W,K,W))
print'%s' % gr
print('%s 9%s DarkFb %sV 1.7 %s Vip' % (G,W,K,W))
print'%s' % gr
print('%s 10%s Auto Cark ' % (G,W))
print'%s' % gr
print('%s 11%s Auto Brute Force %sV 0.1 %s New' % (G,W,K,W))
print'%s' % gr
print('%s 12%s Auto Brute Force %sV 0.2 %s New' % (G,W,K,W))
print'%s' % gr
print('%s 13%s Auto Hack ' % (G,W))
print'%s' % gr
print('\x1b[00m')
a = raw_input('\x1b[91m[\x1b[00mCHOOSE\x1b[91m] Number : \x1b[1;32m')
if a == '' in a:
header()
if a == '0' in a:
header()
elif a == '1' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('sleep 2')
os.system('xdg-open https://youtu.be/27J0GqAL78w')
v2.login()
header()
elif a == '2' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('xdg-open https://youtu.be/vfbRPJn1fPE')
os.system('sleep 2')
v4.login()
header()
elif a == '3' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('xdg-open https://youtu.be/vfbRPJn1fPE')
os.system('sleep 2')
v4b.login()
header()
elif a == '4' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('sleep 2')
os.system('xdg-open https://youtu.be/sfmLO1e6MUA')
v5.login()
header()
elif a == '5' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('sleep 2')
os.system('xdg-open https://youtu.be/sfmLO1e6MUA')
v6.login()
header()
elif a == '6' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('sleep 2')
os.system('xdg-open https://youtu.be/27J0GqAL78w')
print "%s [%s!%s] Akun Anda Tidak Premium " % (W,R,W)
sys.exit()
elif a == '7' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('sleep 2')
os.system('xdg-open https://youtu.be/vfbRPJn1fPE')
print "%s [%s!%s] Akun Anda Tidak Premium " % (W,R,W)
sys.exit()
elif a == '8' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('sleep 2')
os.system('xdg-open https://youtu.be/27J0GqAL78w')
print "%s [%s!%s] Akun Anda Tidak Premium " % (W,R,W)
sys.exit()
elif a == '9' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('sleep 2')
os.system('xdg-open https://youtu.be/27J0GqAL78w')
print "%s [%s!%s] Akun Anda Tidak Premium " % (W,R,W)
sys.exit()
elif a == '10' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('sleep 2')
os.system('xdg-open https://youtu.be/ntb_JmvtXzo')
auto.autoBrute()
header()
elif a == '11' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('sleep 2')
print "%s [%s!%s] Akun Anda Tidak Premium " % (W,R,W)
sys.exit()
os.system('xdg-open https://youtu.be/ntb_JmvtXzo')
elif a == '12' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('sleep 2')
print "%s [%s!%s] Akun Anda Tidak Premium " % (W,R,W)
sys.exit()
os.system('xdg-open https://youtu.be/ntb_JmvtXzo')
elif a == '13' in a:
print('\x1b[00m')
slowprint('\033[041m RUNNING THE TOOLS \033[00m')
slowprint('\x1b[00m Please Wait ... ')
os.system('sleep 2')
print "%s [%s!%s] Akun Anda Tidak Premium " % (W,R,W)
sys.exit()
os.system('xdg-open https://youtu.be/ntb_JmvtXzo')
else:
header()
else:
print('\x1b[91m[!]\x1b[00m Unknown Command !')
main()
#header() | [
"[email protected]"
] | |
8ec36347697a024be3727ec528439673b06f7c31 | 54b6a91727b6275e77bf20a9c013888e5d81c7b5 | /Inception-DenseNet/inference.py | af64a1ada3cd00675d1d2374236e5c71d14fd338 | [] | no_license | AI-repo/Regularizations-DNN | 1c2a02c6a88408b446079da4962426110f3a80c5 | 6ac08e62354385136b83f10c221da0cc1250b803 | refs/heads/master | 2022-03-25T06:32:22.469465 | 2019-12-23T09:10:01 | 2019-12-23T09:10:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,874 | py | """Sample PyTorch Inference script
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import argparse
import logging
import numpy as np
import torch
from timm.models import create_model, apply_test_time_pool
from timm.data import Dataset, create_loader, resolve_data_config
from timm.utils import AverageMeter, setup_default_logging
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='PyTorch ImageNet Inference')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--output_dir', metavar='DIR', default='./', help='path to output files')
parser.add_argument('--model', '-m', metavar='MODEL', default='dpn92', help='model architecture (default: dpn92)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N', help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=64, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=224, type=int, metavar='N', help='Input image dimension')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME', help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=3055, help='Number classes in dataset')
parser.add_argument('--log-freq', default=10, type=int, metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1, help='Number of GPUS to use')
parser.add_argument('--no-test-pool', dest='no_test_pool', action='store_true', help='disable test time pool')
parser.add_argument('--topk', default=5, type=int, metavar='N', help='Top-k to output to CSV')
def main():
setup_default_logging()
args = parser.parse_args()
# might as well try to do something useful...
args.pretrained = args.pretrained or not args.checkpoint
# create model
model = create_model(
args.model,
num_classes=args.num_classes,
in_chans=3,
pretrained=args.pretrained,
checkpoint_path=args.checkpoint)
logging.info('Model %s created, param count: %d' %
(args.model, sum([m.numel() for m in model.parameters()])))
config = resolve_data_config(vars(args), model=model)
model, test_time_pool = apply_test_time_pool(model, config, args)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
else:
model = model.cuda()
loader = create_loader(
Dataset(args.data),
input_size=config['input_size'],
batch_size=args.batch_size,
use_prefetcher=True,
interpolation=config['interpolation'],
mean=config['mean'],
std=config['std'],
num_workers=args.workers,
crop_pct=1.0 if test_time_pool else config['crop_pct'])
model.eval()
k = min(args.topk, args.num_classes)
batch_time = AverageMeter()
end = time.time()
topk_ids = []
with torch.no_grad():
for batch_idx, (input, _) in enumerate(loader):
input = input.cuda()
labels = model(input)
topk = labels.topk(k)[1]
topk_ids.append(topk.cpu().numpy())
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
logging.info('Predict: [{0}/{1}] Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
batch_idx, len(loader), batch_time=batch_time))
topk_ids = np.concatenate(topk_ids, axis=0).squeeze()
with open(os.path.join(args.output_dir, './topk_ids.csv'), 'w') as out_file:
filenames = loader.dataset.filenames()
for filename, label in zip(filenames, topk_ids):
filename = os.path.basename(filename)
out_file.write('{0},{1},{2},{3},{4},{5}\n'.format(
filename, label[0], label[1], label[2], label[3], label[4]))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2a489f15271680f2e83a5c08d9434cf1ff97b209 | 6dde75c27bdae3e7be8470a555c90968be1169b6 | /discoutils/prebyblo_filter.py | a8c29ac95ef0f1bf36530bb0730b4370f6e8d70f | [] | no_license | tttthomasssss/DiscoUtils | 81936fed2fb504f3809d92913e4eab16707f2028 | e1746533b7bd3d3bcbc7318128b925cdfb4282d7 | refs/heads/master | 2020-05-21T05:46:52.690587 | 2017-03-30T21:35:27 | 2017-03-30T21:35:27 | 36,299,367 | 0 | 0 | null | 2015-05-26T13:56:02 | 2015-05-26T13:56:02 | null | UTF-8 | Python | false | false | 4,358 | py | import argparse
import logging
import re
__author__ = 'Julie'
def count(filename, pos_patterns):
logging.info('Counting entry patterns: %s', [x.pattern for x in pos_patterns])
counts = {} #dictionary to hold counts of items
with open(filename, 'r') as instream:
logging.info("Reading %s", filename)
linesread = 0
for line in instream:
initial = line.split('\t')[0]
if any(posPATT.match(initial) for posPATT in pos_patterns):
current = counts.get(initial, 0)
counts[initial] = current + 1
linesread += 1
if linesread % 10000 == 0:
logging.info("Read %d lines", linesread)
return counts, linesread
def filterline(fields, pattern):
#filter out features in exclusion_list
if pattern:
fields = [f for f in fields if pattern.match(f)]
return '%s\n' % ('\t'.join(fields)) if fields else ''
else:
return '%s\n' % ('\t'.join(fields))
def do_filtering(filename, outstream, threshold, pos_patterns, feature_pattern, counts, total_lines):
logging.info("Rereading %s", filename)
with open(filename, 'r') as instream:
linesprocessed = 0
for line in instream:
line = line.rstrip()
fields = line.split('\t')
initial = fields.pop(0)
if any(p.match(initial) for p in pos_patterns):
if counts[initial] > threshold:
fields = filterline(fields, feature_pattern)
if fields:
outstream.write('%s\t%s' % (initial, fields))
linesprocessed += 1
if linesprocessed % 10000 == 0:
percent = linesprocessed * 100. / total_lines
logging.info("Processed %d lines (%2.1f percent)", linesprocessed, percent)
def read_configuration():
#first 2 args must be filename and frequency threshold
pos_patterns = {'N': re.compile('.*/N'),
'V': re.compile('.*/V'),
'J': re.compile('.*/J'),
'R': re.compile('.*/RB')}
feature_patterns = {'wins': re.compile('T:.*'),
'deps': re.compile('.+-(HEAD|DEP):.+'),
'all': re.compile('.+'),
}
def pos_pattern_validator(v):
try:
return pos_patterns[v]
except:
raise argparse.ArgumentTypeError("String '%s' does not match required format" % v)
def feature_pattern_validator(v):
try:
return feature_patterns[v]
except:
raise argparse.ArgumentTypeError("String '%s' does not match required format" % v)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('filename', help='Input file')
parser.add_argument('threshold', help='Entry frequency threshold', type=int, default=1)
parser.add_argument('-pos', required=False, type=pos_pattern_validator, nargs='+',
default={pos_patterns['N']},
help='Entry type to accept. Valid choices are N, V, J, R')
parser.add_argument('-feats', required=False, default='deps', type=feature_pattern_validator,
help='Feature type to accept. Valid choices are deps, wins or all')
parser.add_argument('-o', '--output', required=False, default=None,
help='Name of output file. Default')
return parser.parse_args()
def main():
parameters = read_configuration()
output = parameters.output if parameters.output else '%s.pbfiltered' % parameters.filename
logging.info("Writing %s", output)
counts, total_lines = count(parameters.filename, parameters.pos) # make count dictionary
logging.info("Number of counted words is %d", len(counts))
with open(output, 'w') as outfile:
logging.info('Writing to %s', output)
do_filtering(parameters.filename, outfile, parameters.threshold,
parameters.pos, parameters.feats, counts, total_lines)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format="%(asctime)s\t%(module)s.%(funcName)s """
"(line %(lineno)d)\t%(levelname)s : %(""message)s")
main()
| [
"[email protected]"
] | |
d400e7e6970141f565c6fade749a505fffce6f5f | 3a194cb61957235b9be6631161a50101d74de271 | /1149_rgb_street.py | a4a46674a817626e8e57d2090711fd1c560dee79 | [] | no_license | jada-gwon/MRA | 0b7edf0a75c2de434cb563a68640c91baff34047 | b4fbf17a45bdb95c9e0bd2bf9b0716e1d67087da | refs/heads/main | 2023-08-21T18:42:39.671862 | 2021-09-15T08:58:18 | 2021-09-15T08:58:18 | 378,040,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | N = int(input())
H = [list(map(int, input().split())) for i in range(N)]
DP = [[None for j in range(3)] for i in range(N)]
for i in range(1, N):
DP[i][0] = min(DP[i - 1][1], DP[i - 1][2]) + H[i][0];
DP[i][1] = min(DP[i - 1][0], DP[i - 1][2]) + H[i][1];
DP[i][2] = min(DP[i - 1][0], DP[i - 1][1]) + H[i][2];
print(min(DP[-1][0], DP[-1][1], DP[-1][2])) | [
"[email protected]"
] | |
9f73dff52e84a197a2e2dd03db06765b6f79140e | 4f510470b3093ab2c60f929221af82c79b121ca7 | /ML/新建文件夹/Day04all/svm_bal.py | e008eb573c5604fe57aa655a601d1bf163158790 | [] | no_license | q737645224/python3 | ce98926c701214f0fc7da964af45ba0baf8edacf | 4bfabe3f4bf5ba4133a16102c51bf079d500e4eb | refs/heads/master | 2020-03-30T07:11:17.202996 | 2018-10-30T06:14:51 | 2018-10-30T06:14:51 | 150,921,088 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp
x, y = [], []
with open('../../data/imbalance.txt', 'r') as f:
for line in f.readlines():
data = [float(substr) for substr
in line.split(',')]
x.append(data[:-1])
y.append(data[-1])
x = np.array(x)
y = np.array(y, dtype=int)
train_x, test_x, train_y, test_y = ms.train_test_split(
x, y, test_size=0.25, random_state=7)
model = svm.SVC(kernel='linear',
class_weight='balanced')
model.fit(train_x, train_y)
l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005
b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005
grid_x = np.meshgrid(np.arange(l, r, h),
np.arange(b, t, v))
flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()]
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)
pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))
mp.figure('SVM Linear Classification',
facecolor='lightgray')
mp.title('SVM Linear Classification', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray')
C0, C1 = y == 0, y == 1
mp.scatter(x[C0][:, 0], x[C0][:, 1], c='orangered', s=60)
mp.scatter(x[C1][:, 0], x[C1][:, 1], c='limegreen', s=60)
mp.show()
| [
"[email protected]"
] | |
26e1b0a5223319c690a95aeec824d5f3299ce85a | 4a2e98d2d744816ccd86baa61a1d8c30e47bb7ae | /pyhap/tlv.py | 3308ee9ecd24b773cbf28f291eacabfb5fb639d4 | [
"Apache-2.0"
] | permissive | partylover/HAP-python | 7fce3dacc22a924244ad718d686cb130442fd7f6 | 7e630f60acfebb7f0f019267ca44b9579a93eb22 | refs/heads/master | 2021-05-06T17:48:38.086427 | 2017-10-27T09:13:49 | 2017-10-27T09:13:49 | 111,837,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | # Encodes and decodes Tag-Length-Value data.
import struct
def encode(*args):
assert len(args) % 2 == 0
pieces = []
for x in range(0, len(args), 2):
tag = args[x]
data = args[x + 1]
total_length = len(data)
if len(data) <= 255:
encoded = tag + struct.pack("B", total_length) + data
else:
encoded = b""
for x in range(0, total_length // 255):
encoded = encoded + tag + b'\xFF' + data[x * 255: (x + 1) * 255]
remaining = total_length % 255
encoded = encoded + tag + struct.pack("B", remaining) \
+ data[-remaining:]
pieces.append(encoded)
return b"".join(pieces)
def decode(data):
objects = {}
remaining = len(data)
current = 0
while current < len(data):
# The following hack is because bytes[x] is an int
# and we want to keep the tag as a byte.
tag = data[current:current+1]
length = data[current+1]
value = data[current + 2 : current + 2 + length]
if tag in objects:
objects[tag] = objects[tag] + value
else:
objects[tag] = value
current = current + 2 + length
return objects
| [
"[email protected]"
] | |
e4b20dafd64ea2e3484414033db3f270a5e8067d | 0a3bbce6b2ef1b0faf894d02a829cbc6e9c126f5 | /request.py | 1a975ed2297c2bd20adc9ea4be4cf32a2cbca0d4 | [] | no_license | mznzgt/k8s_simulator | 97f89756b22f1627d77ad0e986ffcce3ad56db7b | b04042f5937bdc0c75da25e1533882cb6ab0c085 | refs/heads/master | 2023-08-22T09:12:48.253508 | 2021-10-25T06:42:38 | 2021-10-25T06:42:38 | 420,079,034 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py |
import threading
#Requests stress Pod resources for a given period of time to simulate load
#deploymentLabel is the Deployment that the request is beings sent to
#cpuCost is the number of threads that the request will use on a pod
#execTime is how long the request will use those resource for before completing
class Request:
def __init__(self, INFOLIST):
self.label = INFOLIST[0]
self.deploymentLabel = INFOLIST[1]
self.fail = threading.Event()
| [
"[email protected]"
] | |
fd719b8953b0c6fb6f22e6a3c8d510fe769cc54f | 51a5b120d26dbfc6066e070bfdbf92680484cf53 | /buggy/__init__.py | af09a898b379805986afb12363ab040bc27d2bee | [
"BSD-3-Clause"
] | permissive | fusionbox/buggy | c02d902fa1c0744541acdbf07cdc3765f6fb7d4e | fb6f4a34f6896b65c843ebe711f5bf3279d33049 | refs/heads/master | 2021-01-19T17:07:26.489733 | 2019-01-31T21:01:43 | 2019-01-31T21:01:43 | 88,305,496 | 2 | 2 | BSD-3-Clause | 2019-07-16T14:44:13 | 2017-04-14T21:43:37 | Python | UTF-8 | Python | false | false | 46 | py | default_app_config = 'buggy.apps.BuggyConfig'
| [
"[email protected]"
] | |
0a600e2420bb8e38685b76c17c2afe383d5ec98c | c3f6562533f6924435b22c72e2a4e28f4ca0f89f | /scratch/machine_4/root.py | 8da2240fa05e145e1b567d0a50415a471621ccd3 | [
"MIT"
] | permissive | Strangemother/python-state-machine | cd19708749ad56c5fd59719c417254aafbdfeb84 | c18e8cbbad9ebf1766c742c33e7fb7c92cd88328 | refs/heads/master | 2021-01-02T08:47:51.897217 | 2017-10-02T17:43:33 | 2017-10-02T17:43:38 | 34,356,886 | 1 | 0 | null | 2015-11-22T17:02:02 | 2015-04-21T23:07:37 | Python | UTF-8 | Python | false | false | 132 | py | from machine import Machine
from conditions import Condition
import node as nodes
from node import Node
import examples as examples
| [
"[email protected]"
] | |
16f124addb862201b70610ede3335c4467feef06 | 174a10a913d2a91cadd5e8bacd6232c07d7addf9 | /startcamp/20181218/write_file.py | 1c07fe6ddda0450935c90b6e0b143ad8e79bcd77 | [] | no_license | pjh6315/TIL | 7cf161ecd208ac75768bffe158963a297bdd9e64 | 6a7d45778c57aca7225485a1894bb93fb8ba6c23 | refs/heads/master | 2020-04-12T00:41:04.761991 | 2019-03-18T08:46:20 | 2019-03-18T08:46:20 | 162,208,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | # f = open('ssafy.txt','w') # w: write , r: read , a: append
# f.write('This is SSAFY!')
# f.close()
# \t : tab
# \\ : 백슬래시 '\' 입력하고싶을때
# \' \" : 마찬가지
with open('ssafy.txt','w',encoding='utf8') as f:
f.writelines(['1\n','2\n','3\n'])
"""
for i in range(10):
f.write(f'This is \"SSAFY\"!!!!!! {i}\n')
""" | [
"[email protected]"
] | |
139eb8b63126d23b949ba149133245ef3c622943 | 2260302d1c8847159ad5d9f6af11829e81495d4d | /venv/bin/django-admin.py | f2ce3960601e8ca2930c05fd69702a7c09ca2272 | [] | no_license | ShaharAsraf/BBM | 53b8b2588e640664aa31a7f0e680a1421df98737 | 03f6fee83154f1a0989f51c7afdb470d29695f49 | refs/heads/master | 2023-06-01T21:34:01.384383 | 2021-06-19T16:42:51 | 2021-06-19T16:42:51 | 378,416,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | #!/home/shahar/PycharmProjects/bloodbankmanagement/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
6472574692488b25c0b61559769acc571a0679f3 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/BIANCA-BRICK-MIBMODEM-MIB.py | 144003d311bdf77a5486fc1f282c845a1507b3a9 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 22,995 | py | #
# PySNMP MIB module BIANCA-BRICK-MIBMODEM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BIANCA-BRICK-MIBMODEM-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:38:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Counter32, TimeTicks, Unsigned32, ObjectIdentity, Integer32, IpAddress, Counter64, Gauge32, ModuleIdentity, iso, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Counter32", "TimeTicks", "Unsigned32", "ObjectIdentity", "Integer32", "IpAddress", "Counter64", "Gauge32", "ModuleIdentity", "iso", "NotificationType")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
org = MibIdentifier((1, 3))
dod = MibIdentifier((1, 3, 6))
internet = MibIdentifier((1, 3, 6, 1))
private = MibIdentifier((1, 3, 6, 1, 4))
enterprises = MibIdentifier((1, 3, 6, 1, 4, 1))
bintec = MibIdentifier((1, 3, 6, 1, 4, 1, 272))
bibo = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4))
mdm = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 18))
mdmProfileTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 18, 1), )
if mibBuilder.loadTexts: mdmProfileTable.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileTable.setDescription('This Table contains entries for the modem profiles. For PPP connections, modem profiles are assigned to incoming connections via the isdnDispatchtable. For outgoing connections, the profile is assigned by the biboPppTable. Profile 1 is used as default profile for ppp and is the only profile available for isdnlogin connections.')
mdmProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1), ).setIndexNames((0, "BIANCA-BRICK-MIBMODEM-MIB", "mdmProfileName"))
if mibBuilder.loadTexts: mdmProfileEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileEntry.setDescription('')
mdmProfileName = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("profile-1", 1), ("profile-2", 2), ("profile-3", 3), ("profile-4", 4), ("profile-5", 5), ("profile-6", 6), ("profile-7", 7), ("profile-8", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmProfileName.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileName.setDescription('The name of the profile. Eight profiles are available.')
mdmProfileDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileDescr.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileDescr.setDescription('Profile Description. Info only.')
mdmProfileModulation = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("bell103", 1), ("bell212", 2), ("v21", 3), ("v22", 4), ("v22bis", 5), ("v23", 6), ("v32", 7), ("v32bis", 8), ("v34", 9), ("k56flex", 10), ("vfc", 11), ("v90", 12)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileModulation.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileModulation.setDescription('This object specifies the preferred modulation (automode on) or the modulation (automode disabled) to be used in originating or answering a connection. bell103 (1) : 300 bell212 (2) : 1200 v21 (3) : 300 v22 (4) : 1200 v22bis (5) : 2400 or 1200 v23 (6) : 75/1200 or 1200/75 v32 (7) : 9600, 4800 v32bis (8) : 14000, 12000, 9600, 7200, 4800 v34 (9) : 33600, 31200, 28800, 26400, 24000, 21600, 19200, 16800, 14400, 12000, 9600, 7200, 4800, 2400 k56flex (10) : 56000, 54000, 52000, 50000, 48000, 46000, 44000, 42000, 40000, 38000, 36000, 34000, 32000 vfc (11) : 28800, 26400, 24000, 21600, 19200, 16800, 14400, 12000, 9600 v90 (12) : 56000, 54667, 53333, 52000, 50667, 49333, 48000, 46667, 45333, 42667, 41333, 40000, 38667, 37333, 36000, 34667, 33333, 32000, 30667, 29333, 28000 ')
mdmProfileMinBps = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(300, 1200, 2400, 4800, 7200, 9600, 12000, 14400, 16800, 19200, 21600, 24000, 26400, 28800, 31200, 33600, 32000, 34000, 36000, 38000, 40000, 42000, 44000, 46000, 48000, 50000, 52000, 54000, 56000, 75))).clone(namedValues=NamedValues(("b300", 300), ("b1200", 1200), ("b2400", 2400), ("b4800", 4800), ("b7200", 7200), ("b9600", 9600), ("b12000", 12000), ("b14400", 14400), ("b16800", 16800), ("b19200", 19200), ("b21600", 21600), ("b24000", 24000), ("b26400", 26400), ("b28800", 28800), ("b31200", 31200), ("b33600", 33600), ("b32000", 32000), ("b34000", 34000), ("b36000", 36000), ("b38000", 38000), ("b40000", 40000), ("b42000", 42000), ("b44000", 44000), ("b46000", 46000), ("b48000", 48000), ("b50000", 50000), ("b52000", 52000), ("b54000", 54000), ("b56000", 56000), ("b75", 75)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileMinBps.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileMinBps.setDescription('This object specified the lowest rate, at which the modem may establish a connection.')
mdmProfileMaxRecvBps = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(300, 1200, 2400, 4800, 7200, 9600, 12000, 14400, 16800, 19200, 21600, 24000, 26400, 28800, 31200, 33600, 32000, 34000, 36000, 38000, 40000, 42000, 44000, 46000, 48000, 50000, 52000, 54000, 56000))).clone(namedValues=NamedValues(("b300", 300), ("b1200", 1200), ("b2400", 2400), ("b4800", 4800), ("b7200", 7200), ("b9600", 9600), ("b12000", 12000), ("b14400", 14400), ("b16800", 16800), ("b19200", 19200), ("b21600", 21600), ("b24000", 24000), ("b26400", 26400), ("b28800", 28800), ("b31200", 31200), ("b33600", 33600), ("b32000", 32000), ("b34000", 34000), ("b36000", 36000), ("b38000", 38000), ("b40000", 40000), ("b42000", 42000), ("b44000", 44000), ("b46000", 46000), ("b48000", 48000), ("b50000", 50000), ("b52000", 52000), ("b54000", 54000), ("b56000", 56000)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileMaxRecvBps.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileMaxRecvBps.setDescription('This object specifies the highest receive rate, at which the modem may establsh a connection.')
mdmProfileMaxXmitBps = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(300, 1200, 2400, 4800, 7200, 9600, 12000, 14400, 16800, 19200, 21600, 24000, 26400, 28800, 31200, 33600, 32000, 34000, 36000, 38000, 40000, 42000, 44000, 46000, 48000, 50000, 52000, 54000, 56000))).clone(namedValues=NamedValues(("b300", 300), ("b1200", 1200), ("b2400", 2400), ("b4800", 4800), ("b7200", 7200), ("b9600", 9600), ("b12000", 12000), ("b14400", 14400), ("b16800", 16800), ("b19200", 19200), ("b21600", 21600), ("b24000", 24000), ("b26400", 26400), ("b28800", 28800), ("b31200", 31200), ("b33600", 33600), ("b32000", 32000), ("b34000", 34000), ("b36000", 36000), ("b38000", 38000), ("b40000", 40000), ("b42000", 42000), ("b44000", 44000), ("b46000", 46000), ("b48000", 48000), ("b50000", 50000), ("b52000", 52000), ("b54000", 54000), ("b56000", 56000)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileMaxXmitBps.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileMaxXmitBps.setDescription('This object specifies the highest transmit rate, at which the modem may establsh a connection.')
mdmProfileAutoMode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("on", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileAutoMode.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileAutoMode.setDescription('When this object is set to on, the modem will automatically negotiate the best rates beginning with the selected Modulation and MaxBps values. Otherwise, it will use only the selected modulation in the range between MinBps and MaxBps.')
mdmProfileComprV42bis = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("auto", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileComprV42bis.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileComprV42bis.setDescription('This object specifies, whether V.42bis compression shall be used for modem connections. When set to off, V.42bis compression will never be used. When set to auto, compression may be negotiated with the partner modem.')
mdmProfileComprMNP5 = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("auto", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileComprMNP5.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileComprMNP5.setDescription('This object specifies, whether MNP Class 5 compression shall be used for modem connections. When set to off, MNP Class 5 compression will never be used. When set to auto, compression may be negotiated with the partner modem.')
mdmProfileErrorCorr = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("none", 1), ("required", 2), ("auto", 3), ("lapm", 4), ("mnp", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileErrorCorr.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileErrorCorr.setDescription('This object specifies the kind of error correction being used for a connection: none no error correction will be used. required either LAPM or MNP will be used. If the partner is not able to do error correction, the connection cannot be established. auto negotiation takes place to use either LAPM, MNP or no error correction. lapm Enforce LAPM error correction. If the partner is not able to do LAPM, the connection cannot be established. mnp use MNP error connection If the partner is not able to do MNP, the connection cannot be established. ')
mdmProfileXmitLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-15, 0))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileXmitLevel.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileXmitLevel.setDescription('This object specifies the transmit attenuation in dB.')
mdmProfileCDWaitTime = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1000, 255000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileCDWaitTime.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileCDWaitTime.setDescription('This object specifies the amount of time in milliseconds, the modem will wait for the appearance of the carrier. If the carrier will not appear in this time period, the connection will be disconnected.')
mdmProfileCDRespTime = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(100, 25500))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileCDRespTime.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileCDRespTime.setDescription('This object specifies the period of time in milliseconds, a carrier has to be present, before it will be recognized as a carrier.')
mdmProfileCDDiscTime = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(100, 25500))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileCDDiscTime.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileCDDiscTime.setDescription('This object specifies the amount of time, the carrier has to drop, before the modem will assume the carrier to be lost.')
mdmProfileRetrain = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("retrain", 2), ("fallbf", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileRetrain.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileRetrain.setDescription('The modem will : off (1) : disable line quality monitor and auto-retrain. retrain (2) : enable line quality monitor and auto-retrain. fallbf (3) : enable line quality monitor and fallback/fall forward.')
mdmProfileIdleTimerMode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("static", 1), ("dynamic", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileIdleTimerMode.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileIdleTimerMode.setDescription('The idle timer is used to specify the duration of an interval between successive characters received from the modem which, when exceeded, will cause the modem driver to terminate the assembly of a data packet and to forward it to the higher-layer protocols (this is in analogy to the ITU X.3 parameter #4). This object specifies the mode how the idle timer is set: static (1): the idle time is taken from the object mdmProfileIdleTimerFixedDelay (default) dynamic (2): the idle time is set to the duration that a number of mdmProfileIdleTimerCharDelay characters (octets) would take for transmission corresponding to the actually used receive bit-rate of the modem. If the bit-rate can not be found out, the static mode is used instead.')
mdmProfileIdleTimerFixedDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileIdleTimerFixedDelay.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileIdleTimerFixedDelay.setDescription('This object specifies the duration of the idle timer in milliseconds (see the object mdmProfileIdleTimerMode for further description). Default: 5')
mdmProfileIdleTimerCharDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 1, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmProfileIdleTimerCharDelay.setStatus('mandatory')
if mibBuilder.loadTexts: mdmProfileIdleTimerCharDelay.setDescription('This object specifies the number of characters (octets) which is used to calculate the idle timer corresponding to the actually used receive bit-rate of the modem (see the object mdmProfileIdleTimerMode for further description). Default: 3')
mdmTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 18, 2), )
if mibBuilder.loadTexts: mdmTable.setStatus('mandatory')
if mibBuilder.loadTexts: mdmTable.setDescription('The mdmTable contains information for each modem. The entries contain various statistical data for modem connections. Only the system can add entries to this table. The user is able to reboot, enable or disable a modem. The system updates infos about the current connection.')
mdmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1), ).setIndexNames((0, "BIANCA-BRICK-MIBMODEM-MIB", "mdmIndex"))
if mibBuilder.loadTexts: mdmEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mdmEntry.setDescription('')
mdmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mdmIndex.setDescription('Index of this modem (Slot/Cpu/Modem)')
mdmAction = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("reboot", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmAction.setStatus('mandatory')
if mibBuilder.loadTexts: mdmAction.setDescription('Writing to this variable reboot (1) : initiates a reboot. disabled (2) : remove modem resource from allocation pool. enabled (3) : put modem to allocation resource pool. ')
mdmType = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("csm56K", 1), ("csm336", 2), ("mdm144", 3), ("mdm336", 4), ("telindus", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmType.setStatus('mandatory')
if mibBuilder.loadTexts: mdmType.setDescription('Type of this modem csm56K (1) : CentralSiteModem K56Flex csm336 (2) : CentralSiteModem 33600 mdm144 (3) : ClientSiteModem 14400 mdm336 (4) : ClientSiteModem 33600 telindus (5) : ADSP with Telindus firmware ')
mdmState = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("booting", 1), ("idle", 2), ("calling", 3), ("called", 4), ("connected", 5), ("hangup", 6), ("stopped", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmState.setStatus('mandatory')
if mibBuilder.loadTexts: mdmState.setDescription('Current status of the modem. booting (1) : init phase idle (2) : this modem resource is available calling (3) : outgoing call initiated called (4) : incoming call initiated connected (5) : data transfer phase established hangup (6) : going to terminate a connection stopped (7) : out of order, modem ist not useable ')
mdmMode = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 7))).clone(namedValues=NamedValues(("modem", 1), ("ppp", 2), ("fax", 3), ("dtmf", 4), ("none", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmMode.setStatus('mandatory')
if mibBuilder.loadTexts: mdmMode.setDescription('Type of mode modem (1) : modem native ppp (2) : modem native + async hdlc fax (3) : fax mode dtmf (4) : receive and signal DTMF touchtones none (7) : modem curently not used ')
mdmModulation = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 31))).clone(namedValues=NamedValues(("bell103", 1), ("bell212", 2), ("v21", 3), ("v22", 4), ("v22bis", 5), ("v23", 6), ("v32", 7), ("v32bis", 8), ("v34", 9), ("k56flex", 10), ("vfc", 11), ("v90", 12), ("unknown", 31)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmModulation.setStatus('mandatory')
if mibBuilder.loadTexts: mdmModulation.setDescription('Negotiated modulation. same as mdmProfileModulation.')
mdmErrorCorr = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("alt", 2), ("lapm", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmErrorCorr.setStatus('mandatory')
if mibBuilder.loadTexts: mdmErrorCorr.setDescription('Negotiated error correction protocol. none (1) : no error correction alt (2) : MNP error correction established lapm (3) : LAPM error correction established ')
mdmCompression = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("class5", 2), ("v42bis", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmCompression.setStatus('mandatory')
if mibBuilder.loadTexts: mdmCompression.setDescription('Negotiated compression correction protocol. none (1) : no compression class5 (2) : MNP5 compression v42bis (3) : V.42bis compression ')
mdmXmitSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmXmitSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: mdmXmitSpeed.setDescription('Negotiated transmit speed.')
mdmRcvSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmRcvSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: mdmRcvSpeed.setDescription('Negotiated receive speed.')
mdmIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mdmIfIndex.setDescription("the (ISDN) interface's slot this modem is connected to")
mdmIfBchannel = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 18, 2, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmIfBchannel.setStatus('mandatory')
if mibBuilder.loadTexts: mdmIfBchannel.setDescription("the (ISDN) interface's B channel this modem is connected to")
mibBuilder.exportSymbols("BIANCA-BRICK-MIBMODEM-MIB", mdmIfIndex=mdmIfIndex, mdmProfileComprV42bis=mdmProfileComprV42bis, mdmRcvSpeed=mdmRcvSpeed, mdmProfileMaxXmitBps=mdmProfileMaxXmitBps, mdmProfileCDWaitTime=mdmProfileCDWaitTime, mdmProfileName=mdmProfileName, dod=dod, bintec=bintec, mdmProfileDescr=mdmProfileDescr, mdmModulation=mdmModulation, enterprises=enterprises, mdmIndex=mdmIndex, mdmCompression=mdmCompression, mdmProfileTable=mdmProfileTable, internet=internet, mdmEntry=mdmEntry, mdmState=mdmState, mdmProfileMinBps=mdmProfileMinBps, mdmProfileXmitLevel=mdmProfileXmitLevel, mdmType=mdmType, mdmProfileCDRespTime=mdmProfileCDRespTime, mdmProfileIdleTimerFixedDelay=mdmProfileIdleTimerFixedDelay, mdmProfileModulation=mdmProfileModulation, org=org, mdmAction=mdmAction, mdmProfileComprMNP5=mdmProfileComprMNP5, mdmProfileEntry=mdmProfileEntry, mdmMode=mdmMode, mdmXmitSpeed=mdmXmitSpeed, mdmErrorCorr=mdmErrorCorr, mdmIfBchannel=mdmIfBchannel, private=private, mdmProfileIdleTimerCharDelay=mdmProfileIdleTimerCharDelay, mdmTable=mdmTable, bibo=bibo, mdmProfileCDDiscTime=mdmProfileCDDiscTime, mdm=mdm, mdmProfileIdleTimerMode=mdmProfileIdleTimerMode, mdmProfileMaxRecvBps=mdmProfileMaxRecvBps, mdmProfileAutoMode=mdmProfileAutoMode, mdmProfileErrorCorr=mdmProfileErrorCorr, mdmProfileRetrain=mdmProfileRetrain)
| [
"[email protected]"
] | |
e2863988a5be5d69a26c738e86a813522864128e | 9fae57f33c3fae1607a5b0036c17195ed7dcd682 | /urban_clap/urban_clap/urls.py | bbee1774f1774b913d9820399cec7e4b12acda38 | [] | no_license | Nainagerwani07/urban_clap-clone | 7baa605dfa7c29b3552d6f509513f3e1b2139fd3 | 60384c1acaa9692f472623fd4d8c3f0ac147437d | refs/heads/master | 2020-06-27T03:04:41.171230 | 2019-07-31T09:51:02 | 2019-07-31T09:51:02 | 199,827,744 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | """urban_clap URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include
from django.conf.urls import url
from django.urls import path
from .router import router
from rest_framework.authtoken import views
# from django.contrib.auth import views as auth_views
urlpatterns = [
path('', include('urban.urls')),
path('admin/', admin.site.urls),
path('api/', include(router.urls)),
path('api-token-auth/', views.obtain_auth_token),
# path('custom-url/', include('django_expiring_token.urls')),
# url(r'^password_reset/$', auth_views.password_reset, name='password_reset'),
# url(r'^password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
# url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
# auth_views.password_reset_confirm, name='password_reset_confirm'),
# url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
]
| [
"[email protected]"
] | |
cd6c9b787d9d968dda1a8d7cc9a4f7be1a6fbbe3 | 4e97cacd839adf7667e9c5de882213fd2c6be1f8 | /hello2.py | 4144e174d9932b9c4f951296ff8b65a91d697dac | [] | no_license | Marlar22/flask_mega_tutorial | 0770a1ee47343625e6c713c9f9489cfb8f18c081 | c583b138b0c32a992da6a01f9d52a7c18612a0e8 | refs/heads/master | 2020-03-20T04:28:34.866700 | 2018-06-18T03:45:35 | 2018-06-18T03:45:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return '''
<html>
<head><title>This is my flask app</title></head>
</body>
'''
if __name__ == "__main__":
app.run(debug=True)
| [
""
] | |
0b6c8e608458e1e42d180c5b59bc259f38398666 | 546f70077efb407d86d399dad1a3aeae250c1454 | /phonescrubber/config.py | 8f1020a9be2c1fb2c55a0ff03d1f7bff150486bc | [
"MIT"
] | permissive | hummans/phone-number-validator | 3e942ab29ddb67c71dfeaf5ceb66ed6695446b03 | 762122efa03c3e6057204c8b5a7e3bdc468c94e4 | refs/heads/master | 2022-02-26T11:54:53.913787 | 2019-08-28T15:32:01 | 2019-08-28T15:32:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | import os
class Config(object):
def init_app(self):
return
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True
class StagingConfig(Config):
DEBUG = False
class ProductionConfig(Config):
DEBUG = False
TESTING = False
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'staging': StagingConfig,
'production': ProductionConfig,
'demo': ProductionConfig,
'default': DevelopmentConfig
}
| [
"[email protected]"
] | |
249fe015b9c47bf50605b1299fc814a0741aef70 | ec4ed516b15d96599f39c21c211830884a8561e2 | /16020332.py | 6f50c142067ead2d3458a1ec7fa4a8dce8f5d5ac | [] | no_license | Dhirasha/Server | 4edac6b92bb14f83792965d0a925ec22e401fe14 | ace77647c5fb08125e06cbceb0947709e619b458 | refs/heads/master | 2020-05-16T15:59:46.938791 | 2019-04-24T04:34:02 | 2019-04-24T04:34:02 | 161,175,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,399 | py | import socket
HOST_NAME ,PORT_NUMBER = '127.0.0.1' , 8080
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def server_start():
try:
print("Starting server on..... " + str(HOST_NAME) + ":" + str(PORT_NUMBER))
listen_socket.bind((HOST_NAME,PORT_NUMBER))
print("Started server on....")
server_conn()
except OSError:
print(" port is already in use ")
server_shutdown()
except IndexError:
print(" port is already in use ")
server_shutdown()
def server_shutdown():
try:
print("Server shutting down.....")
listen_socket.shutdown(socket.SHUT_RDWR)
except Exception:
pass
def define_headers(file_code, file_type):
print("Requested File Type : " + file_type)
header = ''
if file_code == 500:
header += 'HTTP/1.1 500 Unexpected error\n'
elif file_code == 200:
header += 'HTTP/1.1 200 OK\n'
elif file_code == 403:
header += 'HTTP/1.1 403 Access denied\n'
elif file_code == 400:
header += 'HTTP/1.1 400 Bad Request\n'
if file_type == 'jpg' or file_type == 'jpeg':
header += 'Content-Type: image/jpeg\n'
elif file_type == 'html' or file_type == 'php':
header += 'Content-Type: text/html\n'
elif file_type == 'png':
header += 'Content-Type: image/png\n'
elif file_type == 'css':
header += 'Content-Type: text/css\n'
header += 'Connection: close\n\n'
return header
def server_conn():
listen_socket.listen()
while True:
(client_connection,client_address) = listen_socket.accept()
client_connection.settimeout(120)
while True:
request=client_connection.recv(1024).decode()
print (request)
file_requested = request.split(' ')[1]
file_requested = file_requested.split('?')[0]
if file_requested == "/":
file_requested = "/index.html"
file_path = 'server' + file_requested
try:
response_header = "HTTP/1.1 200 OK\n\n"
response_header = response_header.encode()
f = open(file_path, 'rb')
response_data = f.read()
f.close()
print("Requested File : " + file_path)
except FileNotFoundError:
response_header = b"HTTP/1.1 404 Not Found\n\n"
response_data = b"<h1>Error 404 - File Not Found<h1>"
except UnboundLocalError as e:
response_header = b"HTTP/UnboundlocalError\n\n"
response_data = b"<h1>unbound error request<h1>"
except IndexError as e:
response_header = b"HTTP/IndexError\n\n"
response_data =b"<h1>Index error<h1>"
except SystemError as e:
response_header = b"HTTP/SystemError\n\n"
response_data =b"<h1>SystemError<h1>"
response_header += b'connection: close\n\n'
print(response_header.decode())
response = response_header + response_data
client_connection.send(response)
client_connection.close()
break
server_start()
| [
"[email protected]"
] | |
d78cba31e3a21e3b472b19e2b5f49e86497b5b29 | 2838c48dd51994fcc1f3d461877ca7189e5703bd | /api/handlers/base_handler.py | eb051462321caa737338d46a7a2929dab64db2d4 | [] | no_license | maykonlf/TornadoAPI-Curso | 06737c7ef8c47ed16d490a2cb7fe1b757530b9b9 | c10e90ec9347789b682d8c3758f40f50d27f259e | refs/heads/master | 2020-04-02T01:24:00.037407 | 2018-10-17T21:27:50 | 2018-10-17T21:27:50 | 153,853,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | from http import HTTPStatus
import tornado.web
class BaseHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "")
self.set_header("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE")
self.set_header("Content-Type", "application/json")
def options(self):
self.set_status(HTTPStatus.NO_CONTENT)
self.finish()
| [
"[email protected]"
] | |
d6147d6a9881a3793111bda1a6b618c434cc8aa3 | 8d42729aa769243c9bd908d95a963fc1711b1cbd | /psono/administration/serializers/delete_user.py | 51103b8a6ddbc1e2f8b4eb143e6b2fdcc58ca07c | [
"BSD-3-Clause",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"CC0-1.0"
] | permissive | it-notify/psono-server | 9450eb88b2ddd357a6e5298b35b662c85e07b030 | 4a5434a005e05db8e87e871559ed6c1ec5dd84a8 | refs/heads/master | 2020-08-08T20:48:33.896231 | 2019-09-28T14:21:02 | 2019-09-28T14:21:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, exceptions
from restapi.fields import UUIDField
from restapi.models import User
class DeleteUserSerializer(serializers.Serializer):
user_id = UUIDField(required=True)
def validate(self, attrs: dict) -> dict:
user_id = attrs.get('user_id')
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
msg = _("NO_PERMISSION_OR_NOT_EXIST")
raise exceptions.ValidationError(msg)
attrs['user'] = user
return attrs
| [
"[email protected]"
] | |
ecf0b967ad97d8010746961a1bf736916b7dcb1d | dbc17aeffac05409cc7c9c24d0c48cfd1539a5e9 | /TopCoder/python/WordBreak.py | d78ff5a2c64c169e6b853113f2fa1e4a4f41296a | [] | no_license | sumanshil/TopCoder | 031ea2003b8c906cc3ecb146c065db9b566ff0a4 | a1fc7389d6f76fae2eb882c6ec751d668334fa42 | refs/heads/master | 2022-12-22T05:48:07.242153 | 2020-02-27T16:28:47 | 2020-02-27T16:28:47 | 29,289,890 | 0 | 3 | null | 2022-12-10T03:18:05 | 2015-01-15T09:20:15 | Java | UTF-8 | Python | false | false | 1,579 | py | from typing import List
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
#res = self.recursive(0, s, wordDict)
dp = [False] * (len(s) + 1)
dp[0] = True
map = {}
for word in wordDict:
len_word = len(word)
list = map.get(len_word, [])
list.append(word)
map[len_word] = list
index = 1
while index <= len(s):
for key, value in map.items():
if key <= index:
suffix = index - key
sub_str = s[suffix: suffix + key]
if dp[suffix] and map.get(len(sub_str), None) and sub_str in map.get(len(sub_str)):
dp[index] = True
index += 1
return dp[len(dp) - 1]
def recursive(self, index: int, s: str, wordDict: List[str]) -> bool:
if index >= len(s):
return True
for word in wordDict:
sub_str = s[index: index + len(word)]
if sub_str == word:
if self.recursive(index+len(word), s, wordDict):
return True
return False
def isPresent(self, s: str, wordDict: List[str]) -> bool:
try:
if wordDict.index(s, 0, len(wordDict)) >= 0:
return True
except ValueError:
return False
if __name__ == '__main__':
sol = Solution()
res = sol.wordBreak("catsandog",["cats", "dog", "sand", "and", "cat"])
#res = sol.wordBreak("leetcode", ["leet", "code"])
print(res) | [
"[email protected]"
] | |
a559985e89ae89ef42c159ad5519e064cfc3ff87 | 6cc9b71006983db63bf4e2639861d9691ab9c8db | /src/crud.py | d42ffb7c1359d8c055fd01368f3d8d2576a03ea6 | [] | no_license | get-aakash/fastapi_crud | 905ba14ee4b8733831d6b7b87826aad2b54210ca | dfef435cdf74e16c3534c5149d66e18acbbfea85 | refs/heads/master | 2023-09-01T14:01:44.532620 | 2021-09-03T08:50:15 | 2021-09-03T08:50:15 | 388,077,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,136 | py | from datetime import datetime, timedelta
from fastapi.exceptions import HTTPException
from fastapi.param_functions import File
from sqlalchemy.orm import Session, session
from passlib.context import CryptContext
from . import models, schemas
import jwt
from dotenv import dotenv_values
from fastapi import status
from fastapi.encoders import jsonable_encoder
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
config_credentials = dotenv_values(".env")
def get_password_hash(password):
return pwd_context.hash(password)
def check_password(password, hash_password) -> str:
return pwd_context.verify(password, hash_password)
async def verify_token(id: str, db: Session):
user = db.query(models.Token).filter(models.Token.token_data == id).first()
return user
def get_user_by_email(db: Session, email: str):
return db.query(models.User).filter(models.User.email == email).first()
def get_email_by_reset_code(db: Session, reset_code: str):
return (
db.query(models.ResetCode)
.filter(models.ResetCode.reset_code == reset_code)
.first()
)
def get_users(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.User).offset(skip).limit(limit).all()
def get_user(db: Session, user_id: int):
return db.query(models.User).filter(models.User.id == user_id).first()
def get_user_by_username(db, username: str):
return db.query(models.User).filter(models.User.full_name == username).first()
def create_super_user(db: Session, user: schemas.UserCreate):
hashed_password = get_password_hash(user.password)
db_user = models.User(
email=user.email,
hashed_password=hashed_password,
full_name=user.full_name,
is_admin=True,
is_staff=False,
)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def create_user(db: Session, user: schemas.UserCreate):
hashed_password = get_password_hash(user.password)
db_user = models.User(
email=user.email,
hashed_password=hashed_password,
full_name=user.full_name,
)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def delete_user(db: Session, user_id: int):
delete_user = db.query(models.User).filter(models.User.id == user_id).first()
db.delete(delete_user)
db.commit()
return delete_user
def update_user(
db: Session, user_id: int, new_password: str, new_email: str, new_full_name: str
):
update_user = db.query(models.User).filter(models.User.id == user_id).first()
update_user.hashed_password = new_password
update_user.email = new_email
update_user.full_name = new_full_name
db.add(update_user)
db.commit()
db.refresh(update_user)
return update_user
def create_item(
item: schemas.ItemCreate,
db: Session,
user_id: int,
category_id: int,
):
db_item = models.Item(
**item.dict(),
owner_id=user_id,
category_id=category_id,
)
db.add(db_item)
db.commit()
db.refresh(db_item)
return db_item
def get_items(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Item).offset(skip).limit(limit).all()
def get_item(db: Session, item_id: int):
db_item = (
db.query(
models.Category.category_title,
models.Item.id,
models.Category.id,
models.Item.item_title,
models.Item.item_description,
models.Item.item_price,
)
.join(models.Category)
.filter(models.Item.id == item_id)
.first()
)
return db_item
def delete_item(db: Session, item_id: int):
delete_item = db.query(models.Item).filter(models.Item.id == item_id).first()
db.delete(delete_item)
db.commit()
return delete_item
def update_item(db: Session, item_id: int, item: schemas.ItemCreate):
update_items = db.query(models.Item).filter(models.Item.id == item_id).first()
update_items.description = item.description
update_items.title = item.title
update_items.price = item.price
db.commit()
db.refresh(update_items)
return update_items
def pass_user(db: Session, username):
user_value = db.query(models.User).filter(models.User.full_name == username).first()
user_dict = jsonable_encoder(user_value)
current_user = schemas.User(**user_dict)
return current_user
def check_reset_password(new_password: str, id: int, db: Session):
hashed_password = get_password_hash(new_password)
db_user_to_update = db.query(models.User).filter(models.User.id == id).first()
db_user_to_update.hashed_password = hashed_password
db.add(db_user_to_update)
db.commit()
db.refresh(db_user_to_update)
return db_user_to_update
async def forgot_password(email: str, reset_code: str, db: Session):
data = db.query(models.ResetCode).filter(models.ResetCode.email == email).first()
if data:
return [{"message": "the reset code already exist"}]
db_code = models.ResetCode(
email=email,
reset_code=reset_code,
expired_in=datetime.now() + timedelta(minutes=15),
)
db.add(db_code)
db.commit()
db.refresh(db_code)
return db_code
def update_reset_code(db: Session, email: str, reset_code: str):
update_code = (
db.query(models.ResetCode).filter(models.ResetCode.email == email).first()
)
update_code.email = email
update_code.reset_code = reset_code
update_code.expired_in = datetime.now() + timedelta(minutes=15)
db.add(update_code)
db.commit()
db.refresh(update_code)
return update_code
def get_reset_code(db: Session, email: str):
return db.query(models.ResetCode).filter(models.ResetCode.email == email).first()
def create_category(category: schemas.CategoryCreate, db: Session, user_id: int):
db_category = models.Category(**category.dict(), owner_id=user_id)
db.add(db_category)
db.commit()
db.refresh(db_category)
return db_category
def get_categorys(db: Session, skip: int = 0, limit: int = 100):
data = db.query(models.Category).offset(skip).limit(limit).all()
return data
def get_category(db: Session, category_id: int):
data = db.query(models.Category).filter(models.Category.id == category_id).first()
return data
def delete_category(db: Session, category_id: int):
delete_category = (
db.query(models.Category).filter(models.Category.id == category_id).first()
)
db.delete(delete_category)
db.commit()
return delete_category
def update_category(db: Session, category_id: int, category: schemas.CategoryCreate):
update_category = (
db.query(models.Category).filter(models.Category.id == category_id).first()
)
update_category.description = category.description
update_category.title = category.title
db.commit()
db.refresh(update_category)
return update_category
def create_cart(db: Session, user_id: int, category_id: int, item_id: int):
db_cart = models.Cart(owner_id=user_id, category_id=category_id, item_id=item_id)
db.add(db_cart)
db.commit()
db.refresh(db_cart)
return db_cart
def get_carts(db: Session, user_id: int):
return (
db.query(
models.Cart.id,
models.Item.item_title,
models.Item.item_description,
models.Item.item_price,
)
.select_from(models.Cart)
.join(models.Item)
.filter(models.Cart.owner_id == user_id)
.all()
)
def get_cart(db: Session, item_id: int, user_id: int):
return (
db.query(models.Cart)
.filter(models.Cart.item_id == item_id, models.Cart.owner_id == user_id)
.first()
)
def get_cart_by_id(db: Session, cart_id: int):
return db.query(models.Cart).filter(models.Cart.id == cart_id).first()
def get_order_by_id(db: Session, order_id: int):
return db.query(models.Order).filter(models.Order.id == order_id).first()
def get_cart_by_item(db: Session, item_id: int):
return db.query(models.Cart).filter(models.Cart.item_id == item_id).first()
def delete_cart(db: Session, item_id: int):
delete_cart = db.query(models.Cart).filter(models.Cart.item_id == item_id).first()
db.delete(delete_cart)
db.commit()
return delete_cart
def order(
db: Session,
user_id: int,
cart_id: int,
category_id: int,
item_id: int,
order: schemas.OrderCreate,
):
db_order = models.Order(
**order.dict(),
cart_id=cart_id,
owner_id=user_id,
category_id=category_id,
item_id=item_id,
)
db.add(db_order)
db.commit()
db.refresh(db_order)
return db_order
def get_order(db: Session, user_id: int):
data = db.query(models.Order).filter(models.Order.owner_id == user_id).all()
sample = []
for dbs in data:
value = (
db.query(
models.Order.id,
models.Category.category_title,
models.Item.item_title,
models.Item.item_description,
models.Item.item_price,
models.Order.quantity,
models.Order.address,
)
.select_from(models.Order)
.join(models.Category)
.filter(models.Order.id == dbs.id)
.filter(models.Item.id == dbs.item_id)
.all()
)
sample.append(value)
return {"Items ordered": sample}
def get_orders(db: Session, user_id: int):
return db.query(models.Order).filter(models.Order.owner_id == user_id).all()
def bill(
db: Session,
owner_id: int,
total: float,
category_id: int,
item_id: int,
order_id: int,
):
db_bill = models.Billing(
total=total,
owner_id=owner_id,
category_id=category_id,
item_id=item_id,
order_id=order_id,
)
db.add(db_bill)
db.commit()
db.refresh(db_bill)
return db_bill
def get_bills(db: Session, user_id: int):
bills = 0
sample1 = []
sample = []
bill = db.query(models.Billing).filter(models.Billing.owner_id == user_id).all()
for b in bill:
value = (
db.query(
models.Order.id,
models.Category.category_title,
models.Item.item_title,
models.Item.item_description,
models.Item.item_price,
models.Order.quantity,
models.Order.address,
)
.select_from(models.Order)
.join(models.Category)
.filter(models.Order.id == b.id)
.filter(models.Item.id == b.item_id)
.all()
)
print(b.total)
bills = bills + b.total
sample1.append(value)
sample.append(bills)
return {"Items Purchased": sample1, "Total": f"Rs.{sample}"}
def get_bill(db: Session, user_id: int, order_id: int):
return (
db.query(models.Billing)
.filter(models.Billing.owner_id == user_id)
.filter(models.Billing.order_id == order_id)
.first()
)
def get_bill_By_order_id(db: Session, user_id: int, order_id: int):
return db.query(models.Billing).filter(models)
def create_profile(
db: Session,
img_name: str,
img_url: str,
first_name: str,
last_name: str,
address: str,
user_id: str,
):
db_img = models.UserProfile(
img_name=img_name,
img_url=img_url,
first_name=first_name,
last_name=last_name,
address=address,
user_id=user_id,
)
db.add(db_img)
db.commit()
db.refresh(db_img)
return db_img
def profiles(db: Session, skip: int = 0, limit: int = 100):
data = db.query(models.UserProfile).offset(skip).limit(limit).all()
return data
def get_user_profile(db: Session, user_id: int):
db_user = (
db.query(
models.UserProfile.first_name,
models.UserProfile.last_name,
models.UserProfile.img_name,
models.UserProfile.img_url,
)
.filter(models.UserProfile.user_id == user_id)
.all()
)
for userprofile in db_user:
print(userprofile)
return db_user
def delete_profile(db: Session, profile_id: int):
delete_profile = (
db.query(models.UserProfile).filter(models.UserProfile.id == profile_id).first()
)
db.delete(delete_profile)
db.commit()
return delete_profile
def update_profile(
db: Session,
img_name: str,
img_url: str,
first_name: str,
last_name: str,
address: str,
user_id: str,
profile_id: int,
):
update_category = (
db.query(models.UserProfile).filter(models.UserProfile.id == profile_id).first()
)
update_category.img_name = img_name
update_category.img_url = img_url
update_category.first_name = first_name
update_category.last_name = last_name
update_category.address = address
update_category.user_id = user_id
db.commit()
db.refresh(update_category)
return update_category
| [
"[email protected]"
] | |
83845dddd9826d631ab6a2747c16c991a8de1421 | 03f40e1e96f78240904ee90ae3257611e0d2b981 | /venv/lib/python3.8/site-packages/sqlalchemy/orm/session.py | 95a25fd5dd58932867e34779df93df262568a416 | [] | no_license | otr0624/StoreApp | bd584a37af668a4055969cdf03fa2e688f51e395 | 76ae4040ccfe1f415c8c2acf88550690cb537290 | refs/heads/master | 2022-04-22T19:35:03.231742 | 2020-04-14T23:43:19 | 2020-04-14T23:43:19 | 255,651,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129,485 | py | # orm/session.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the Session class and related utilities."""
import itertools
import sys
import weakref
from . import attributes
from . import exc
from . import identity
from . import loading
from . import persistence
from . import query
from . import state as statelib
from .base import _class_to_mapper
from .base import _none_set
from .base import _state_mapper
from .base import instance_str
from .base import object_mapper
from .base import object_state
from .base import state_str
from .deprecated_interfaces import SessionExtension
from .unitofwork import UOWTransaction
from .. import engine
from .. import exc as sa_exc
from .. import sql
from .. import util
from ..inspection import inspect
from ..sql import expression
from ..sql import util as sql_util
__all__ = ["Session", "SessionTransaction", "SessionExtension", "sessionmaker"]
_sessions = weakref.WeakValueDictionary()
"""Weak-referencing dictionary of :class:`.Session` objects.
"""
def _state_session(state):
"""Given an :class:`.InstanceState`, return the :class:`.Session`
associated, if any.
"""
if state.session_id:
try:
return _sessions[state.session_id]
except KeyError:
pass
return None
class _SessionClassMethods(object):
"""Class-level methods for :class:`.Session`, :class:`.sessionmaker`."""
@classmethod
@util.deprecated(
"1.3",
"The :meth:`.Session.close_all` method is deprecated and will be "
"removed in a future release. Please refer to "
":func:`.session.close_all_sessions`.",
)
def close_all(cls):
"""Close *all* sessions in memory."""
close_all_sessions()
@classmethod
@util.dependencies("sqlalchemy.orm.util")
def identity_key(cls, orm_util, *args, **kwargs):
"""Return an identity key.
This is an alias of :func:`.util.identity_key`.
"""
return orm_util.identity_key(*args, **kwargs)
@classmethod
def object_session(cls, instance):
"""Return the :class:`.Session` to which an object belongs.
This is an alias of :func:`.object_session`.
"""
return object_session(instance)
ACTIVE = util.symbol("ACTIVE")
PREPARED = util.symbol("PREPARED")
COMMITTED = util.symbol("COMMITTED")
DEACTIVE = util.symbol("DEACTIVE")
CLOSED = util.symbol("CLOSED")
class SessionTransaction(object):
"""A :class:`.Session`-level transaction.
:class:`.SessionTransaction` is a mostly behind-the-scenes object
not normally referenced directly by application code. It coordinates
among multiple :class:`.Connection` objects, maintaining a database
transaction for each one individually, committing or rolling them
back all at once. It also provides optional two-phase commit behavior
which can augment this coordination operation.
The :attr:`.Session.transaction` attribute of :class:`.Session`
refers to the current :class:`.SessionTransaction` object in use, if any.
The :attr:`.SessionTransaction.parent` attribute refers to the parent
:class:`.SessionTransaction` in the stack of :class:`.SessionTransaction`
objects. If this attribute is ``None``, then this is the top of the stack.
If non-``None``, then this :class:`.SessionTransaction` refers either
to a so-called "subtransaction" or a "nested" transaction. A
"subtransaction" is a scoping concept that demarcates an inner portion
of the outermost "real" transaction. A nested transaction, which
is indicated when the :attr:`.SessionTransaction.nested`
attribute is also True, indicates that this :class:`.SessionTransaction`
corresponds to a SAVEPOINT.
**Life Cycle**
A :class:`.SessionTransaction` is associated with a :class:`.Session`
in its default mode of ``autocommit=False`` immediately, associated
with no database connections. As the :class:`.Session` is called upon
to emit SQL on behalf of various :class:`.Engine` or :class:`.Connection`
objects, a corresponding :class:`.Connection` and associated
:class:`.Transaction` is added to a collection within the
:class:`.SessionTransaction` object, becoming one of the
connection/transaction pairs maintained by the
:class:`.SessionTransaction`. The start of a :class:`.SessionTransaction`
can be tracked using the :meth:`.SessionEvents.after_transaction_create`
event.
The lifespan of the :class:`.SessionTransaction` ends when the
:meth:`.Session.commit`, :meth:`.Session.rollback` or
:meth:`.Session.close` methods are called. At this point, the
:class:`.SessionTransaction` removes its association with its parent
:class:`.Session`. A :class:`.Session` that is in ``autocommit=False``
mode will create a new :class:`.SessionTransaction` to replace it
immediately, whereas a :class:`.Session` that's in ``autocommit=True``
mode will remain without a :class:`.SessionTransaction` until the
:meth:`.Session.begin` method is called. The end of a
:class:`.SessionTransaction` can be tracked using the
:meth:`.SessionEvents.after_transaction_end` event.
**Nesting and Subtransactions**
Another detail of :class:`.SessionTransaction` behavior is that it is
capable of "nesting". This means that the :meth:`.Session.begin` method
can be called while an existing :class:`.SessionTransaction` is already
present, producing a new :class:`.SessionTransaction` that temporarily
replaces the parent :class:`.SessionTransaction`. When a
:class:`.SessionTransaction` is produced as nested, it assigns itself to
the :attr:`.Session.transaction` attribute, and it additionally will assign
the previous :class:`.SessionTransaction` to its :attr:`.Session.parent`
attribute. The behavior is effectively a
stack, where :attr:`.Session.transaction` refers to the current head of
the stack, and the :attr:`.SessionTransaction.parent` attribute allows
traversal up the stack until :attr:`.SessionTransaction.parent` is
``None``, indicating the top of the stack.
When the scope of :class:`.SessionTransaction` is ended via
:meth:`.Session.commit` or :meth:`.Session.rollback`, it restores its
parent :class:`.SessionTransaction` back onto the
:attr:`.Session.transaction` attribute.
The purpose of this stack is to allow nesting of
:meth:`.Session.rollback` or :meth:`.Session.commit` calls in context
with various flavors of :meth:`.Session.begin`. This nesting behavior
applies to when :meth:`.Session.begin_nested` is used to emit a
SAVEPOINT transaction, and is also used to produce a so-called
"subtransaction" which allows a block of code to use a
begin/rollback/commit sequence regardless of whether or not its enclosing
code block has begun a transaction. The :meth:`.flush` method, whether
called explicitly or via autoflush, is the primary consumer of the
"subtransaction" feature, in that it wishes to guarantee that it works
within in a transaction block regardless of whether or not the
:class:`.Session` is in transactional mode when the method is called.
Note that the flush process that occurs within the "autoflush" feature
as well as when the :meth:`.Session.flush` method is used **always**
creates a :class:`.SessionTransaction` object. This object is normally
a subtransaction, unless the :class:`.Session` is in autocommit mode
and no transaction exists at all, in which case it's the outermost
transaction. Any event-handling logic or other inspection logic
needs to take into account whether a :class:`.SessionTransaction`
is the outermost transaction, a subtransaction, or a "nested" / SAVEPOINT
transaction.
.. seealso::
:meth:`.Session.rollback`
:meth:`.Session.commit`
:meth:`.Session.begin`
:meth:`.Session.begin_nested`
:attr:`.Session.is_active`
:meth:`.SessionEvents.after_transaction_create`
:meth:`.SessionEvents.after_transaction_end`
:meth:`.SessionEvents.after_commit`
:meth:`.SessionEvents.after_rollback`
:meth:`.SessionEvents.after_soft_rollback`
"""
_rollback_exception = None
def __init__(self, session, parent=None, nested=False):
self.session = session
self._connections = {}
self._parent = parent
self.nested = nested
self._state = ACTIVE
if not parent and nested:
raise sa_exc.InvalidRequestError(
"Can't start a SAVEPOINT transaction when no existing "
"transaction is in progress"
)
if self.session._enable_transaction_accounting:
self._take_snapshot()
self.session.dispatch.after_transaction_create(self.session, self)
@property
def parent(self):
"""The parent :class:`.SessionTransaction` of this
:class:`.SessionTransaction`.
If this attribute is ``None``, indicates this
:class:`.SessionTransaction` is at the top of the stack, and
corresponds to a real "COMMIT"/"ROLLBACK"
block. If non-``None``, then this is either a "subtransaction"
or a "nested" / SAVEPOINT transaction. If the
:attr:`.SessionTransaction.nested` attribute is ``True``, then
this is a SAVEPOINT, and if ``False``, indicates this a subtransaction.
.. versionadded:: 1.0.16 - use ._parent for previous versions
"""
return self._parent
nested = False
"""Indicates if this is a nested, or SAVEPOINT, transaction.
When :attr:`.SessionTransaction.nested` is True, it is expected
that :attr:`.SessionTransaction.parent` will be True as well.
"""
@property
def is_active(self):
return self.session is not None and self._state is ACTIVE
def _assert_active(
self,
prepared_ok=False,
rollback_ok=False,
deactive_ok=False,
closed_msg="This transaction is closed",
):
if self._state is COMMITTED:
raise sa_exc.InvalidRequestError(
"This session is in 'committed' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is PREPARED:
if not prepared_ok:
raise sa_exc.InvalidRequestError(
"This session is in 'prepared' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is DEACTIVE:
if not deactive_ok and not rollback_ok:
if self._rollback_exception:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"due to a previous exception during flush."
" To begin a new transaction with this Session, "
"first issue Session.rollback()."
" Original exception was: %s" % self._rollback_exception,
code="7s2a",
)
elif not deactive_ok:
raise sa_exc.InvalidRequestError(
"This session is in 'inactive' state, due to the "
"SQL transaction being rolled back; no further "
"SQL can be emitted within this transaction."
)
elif self._state is CLOSED:
raise sa_exc.ResourceClosedError(closed_msg)
@property
def _is_transaction_boundary(self):
return self.nested or not self._parent
def connection(self, bindkey, execution_options=None, **kwargs):
self._assert_active()
bind = self.session.get_bind(bindkey, **kwargs)
return self._connection_for_bind(bind, execution_options)
def _begin(self, nested=False):
self._assert_active()
return SessionTransaction(self.session, self, nested=nested)
def _iterate_self_and_parents(self, upto=None):
current = self
result = ()
while current:
result += (current,)
if current._parent is upto:
break
elif current._parent is None:
raise sa_exc.InvalidRequestError(
"Transaction %s is not on the active transaction list" % (upto)
)
else:
current = current._parent
return result
def _take_snapshot(self):
if not self._is_transaction_boundary:
self._new = self._parent._new
self._deleted = self._parent._deleted
self._dirty = self._parent._dirty
self._key_switches = self._parent._key_switches
return
if not self.session._flushing:
self.session.flush()
self._new = weakref.WeakKeyDictionary()
self._deleted = weakref.WeakKeyDictionary()
self._dirty = weakref.WeakKeyDictionary()
self._key_switches = weakref.WeakKeyDictionary()
def _restore_snapshot(self, dirty_only=False):
"""Restore the restoration state taken before a transaction began.
Corresponds to a rollback.
"""
assert self._is_transaction_boundary
to_expunge = set(self._new).union(self.session._new)
self.session._expunge_states(to_expunge, to_transient=True)
for s, (oldkey, newkey) in self._key_switches.items():
# we probably can do this conditionally based on
# if we expunged or not, but safe_discard does that anyway
self.session.identity_map.safe_discard(s)
# restore the old key
s.key = oldkey
# now restore the object, but only if we didn't expunge
if s not in to_expunge:
self.session.identity_map.replace(s)
for s in set(self._deleted).union(self.session._deleted):
self.session._update_impl(s, revert_deletion=True)
assert not self.session._deleted
for s in self.session.identity_map.all_states():
if not dirty_only or s.modified or s in self._dirty:
s._expire(s.dict, self.session.identity_map._modified)
def _remove_snapshot(self):
"""Remove the restoration state taken before a transaction began.
Corresponds to a commit.
"""
assert self._is_transaction_boundary
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
s._expire(s.dict, self.session.identity_map._modified)
statelib.InstanceState._detach_states(list(self._deleted), self.session)
self._deleted.clear()
elif self.nested:
self._parent._new.update(self._new)
self._parent._dirty.update(self._dirty)
self._parent._deleted.update(self._deleted)
self._parent._key_switches.update(self._key_switches)
def _connection_for_bind(self, bind, execution_options):
self._assert_active()
if bind in self._connections:
if execution_options:
util.warn(
"Connection is already established for the "
"given bind; execution_options ignored"
)
return self._connections[bind][0]
local_connect = False
if self._parent:
conn = self._parent._connection_for_bind(bind, execution_options)
if not self.nested:
return conn
else:
if isinstance(bind, engine.Connection):
conn = bind
if conn.engine in self._connections:
raise sa_exc.InvalidRequestError(
"Session already has a Connection associated for the "
"given Connection's Engine"
)
else:
conn = bind._contextual_connect()
local_connect = True
try:
if execution_options:
conn = conn.execution_options(**execution_options)
if self.session.twophase and self._parent is None:
transaction = conn.begin_twophase()
elif self.nested:
transaction = conn.begin_nested()
else:
transaction = conn.begin()
except:
# connection will not not be associated with this Session;
# close it immediately so that it isn't closed under GC
if local_connect:
conn.close()
raise
else:
self._connections[conn] = self._connections[conn.engine] = (
conn,
transaction,
conn is not bind,
)
self.session.dispatch.after_begin(self.session, self, conn)
return conn
def prepare(self):
if self._parent is not None or not self.session.twophase:
raise sa_exc.InvalidRequestError(
"'twophase' mode not enabled, or not root transaction; "
"can't prepare."
)
self._prepare_impl()
def _prepare_impl(self):
self._assert_active()
if self._parent is None or self.nested:
self.session.dispatch.before_commit(self.session)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_self_and_parents(upto=self):
subtransaction.commit()
if not self.session._flushing:
for _flush_guard in range(100):
if self.session._is_clean():
break
self.session.flush()
else:
raise exc.FlushError(
"Over 100 subsequent flushes have occurred within "
"session.commit() - is an after_flush() hook "
"creating new objects?"
)
if self._parent is None and self.session.twophase:
try:
for t in set(self._connections.values()):
t[1].prepare()
except:
with util.safe_reraise():
self.rollback()
self._state = PREPARED
def commit(self):
self._assert_active(prepared_ok=True)
if self._state is not PREPARED:
self._prepare_impl()
if self._parent is None or self.nested:
for t in set(self._connections.values()):
t[1].commit()
self._state = COMMITTED
self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
self.close()
return self._parent
def rollback(self, _capture_exception=False):
self._assert_active(prepared_ok=True, rollback_ok=True)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_self_and_parents(upto=self):
subtransaction.close()
boundary = self
rollback_err = None
if self._state in (ACTIVE, PREPARED):
for transaction in self._iterate_self_and_parents():
if transaction._parent is None or transaction.nested:
try:
for t in set(transaction._connections.values()):
t[1].rollback()
transaction._state = DEACTIVE
self.session.dispatch.after_rollback(self.session)
except:
rollback_err = sys.exc_info()
finally:
transaction._state = DEACTIVE
if self.session._enable_transaction_accounting:
transaction._restore_snapshot(dirty_only=transaction.nested)
boundary = transaction
break
else:
transaction._state = DEACTIVE
sess = self.session
if (
not rollback_err
and sess._enable_transaction_accounting
and not sess._is_clean()
):
# if items were added, deleted, or mutated
# here, we need to re-restore the snapshot
util.warn(
"Session's state has been changed on "
"a non-active transaction - this state "
"will be discarded."
)
boundary._restore_snapshot(dirty_only=boundary.nested)
self.close()
if self._parent and _capture_exception:
self._parent._rollback_exception = sys.exc_info()[1]
if rollback_err:
util.raise_(rollback_err[1], with_traceback=rollback_err[2])
sess.dispatch.after_soft_rollback(sess, self)
return self._parent
def close(self, invalidate=False):
self.session.transaction = self._parent
if self._parent is None:
for connection, transaction, autoclose in set(self._connections.values()):
if invalidate:
connection.invalidate()
if autoclose:
connection.close()
else:
transaction.close()
self._state = CLOSED
self.session.dispatch.after_transaction_end(self.session, self)
if self._parent is None:
if not self.session.autocommit:
self.session.begin()
self.session = None
self._connections = None
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self._assert_active(deactive_ok=True, prepared_ok=True)
if self.session.transaction is None:
return
if type_ is None:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class Session(_SessionClassMethods):
"""Manages persistence operations for ORM-mapped objects.
The Session's usage paradigm is described at :doc:`/orm/session`.
"""
public_methods = (
"__contains__",
"__iter__",
"add",
"add_all",
"begin",
"begin_nested",
"close",
"commit",
"connection",
"delete",
"execute",
"expire",
"expire_all",
"expunge",
"expunge_all",
"flush",
"get_bind",
"is_modified",
"bulk_save_objects",
"bulk_insert_mappings",
"bulk_update_mappings",
"merge",
"query",
"refresh",
"rollback",
"scalar",
)
@util.deprecated_params(
weak_identity_map=(
"1.0",
"The :paramref:`.Session.weak_identity_map` parameter as well as "
"the strong-referencing identity map are deprecated, and will be "
"removed in a future release. For the use case where objects "
"present in a :class:`.Session` need to be automatically strong "
"referenced, see the recipe at "
":ref:`session_referencing_behavior` for an event-based approach "
"to maintaining strong identity references. ",
),
_enable_transaction_accounting=(
"0.7",
"The :paramref:`.Session._enable_transaction_accounting` "
"parameter is deprecated and will be removed in a future release.",
),
extension=(
"0.7",
":class:`.SessionExtension` is deprecated in favor of the "
":class:`.SessionEvents` listener interface. The "
":paramref:`.Session.extension` parameter will be "
"removed in a future release.",
),
)
def __init__(
self,
bind=None,
autoflush=True,
expire_on_commit=True,
_enable_transaction_accounting=True,
autocommit=False,
twophase=False,
weak_identity_map=None,
binds=None,
extension=None,
enable_baked_queries=True,
info=None,
query_cls=None,
):
r"""Construct a new Session.
See also the :class:`.sessionmaker` function which is used to
generate a :class:`.Session`-producing callable with a given
set of arguments.
:param autocommit:
.. warning::
The autocommit flag is **not for general use**, and if it is
used, queries should only be invoked within the span of a
:meth:`.Session.begin` / :meth:`.Session.commit` pair. Executing
queries outside of a demarcated transaction is a legacy mode
of usage, and can in some cases lead to concurrent connection
checkouts.
Defaults to ``False``. When ``True``, the
:class:`.Session` does not keep a persistent transaction running,
and will acquire connections from the engine on an as-needed basis,
returning them immediately after their use. Flushes will begin and
commit (or possibly rollback) their own transaction if no
transaction is present. When using this mode, the
:meth:`.Session.begin` method is used to explicitly start
transactions.
.. seealso::
:ref:`session_autocommit`
:param autoflush: When ``True``, all query operations will issue a
:meth:`~.Session.flush` call to this ``Session`` before proceeding.
This is a convenience feature so that :meth:`~.Session.flush` need
not be called repeatedly in order for database queries to retrieve
results. It's typical that ``autoflush`` is used in conjunction
with ``autocommit=False``. In this scenario, explicit calls to
:meth:`~.Session.flush` are rarely needed; you usually only need to
call :meth:`~.Session.commit` (which flushes) to finalize changes.
:param bind: An optional :class:`.Engine` or :class:`.Connection` to
which this ``Session`` should be bound. When specified, all SQL
operations performed by this session will execute via this
connectable.
:param binds: A dictionary which may specify any number of
:class:`.Engine` or :class:`.Connection` objects as the source of
connectivity for SQL operations on a per-entity basis. The keys
of the dictionary consist of any series of mapped classes,
arbitrary Python classes that are bases for mapped classes,
:class:`.Table` objects and :class:`.Mapper` objects. The
values of the dictionary are then instances of :class:`.Engine`
or less commonly :class:`.Connection` objects. Operations which
proceed relative to a particular mapped class will consult this
dictionary for the closest matching entity in order to determine
which :class:`.Engine` should be used for a particular SQL
operation. The complete heuristics for resolution are
described at :meth:`.Session.get_bind`. Usage looks like::
Session = sessionmaker(binds={
SomeMappedClass: create_engine('postgresql://engine1'),
SomeDeclarativeBase: create_engine('postgresql://engine2'),
some_mapper: create_engine('postgresql://engine3'),
some_table: create_engine('postgresql://engine4'),
})
.. seealso::
:ref:`session_partitioning`
:meth:`.Session.bind_mapper`
:meth:`.Session.bind_table`
:meth:`.Session.get_bind`
:param \class_: Specify an alternate class other than
``sqlalchemy.orm.session.Session`` which should be used by the
returned class. This is the only argument that is local to the
:class:`.sessionmaker` function, and is not sent directly to the
constructor for ``Session``.
:param enable_baked_queries: defaults to ``True``. A flag consumed
by the :mod:`sqlalchemy.ext.baked` extension to determine if
"baked queries" should be cached, as is the normal operation
of this extension. When set to ``False``, all caching is disabled,
including baked queries defined by the calling application as
well as those used internally. Setting this flag to ``False``
can significantly reduce memory use, however will also degrade
performance for those areas that make use of baked queries
(such as relationship loaders). Additionally, baked query
logic in the calling application or potentially within the ORM
that may be malfunctioning due to cache key collisions or similar
can be flagged by observing if this flag resolves the issue.
.. versionadded:: 1.2
:param _enable_transaction_accounting: A
legacy-only flag which when ``False`` disables *all* 0.5-style
object accounting on transaction boundaries.
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each :meth:`~.commit`,
so that all attribute/object access subsequent to a completed
transaction will load from the most recent database state.
:param extension: An optional
:class:`~.SessionExtension` instance, or a list
of such instances, which will receive pre- and post- commit and
flush events, as well as a post-rollback event.
:param info: optional dictionary of arbitrary data to be associated
with this :class:`.Session`. Is available via the
:attr:`.Session.info` attribute. Note the dictionary is copied at
construction time so that modifications to the per-
:class:`.Session` dictionary will be local to that
:class:`.Session`.
.. versionadded:: 0.9.0
:param query_cls: Class which should be used to create new Query
objects, as returned by the :meth:`~.Session.query` method.
Defaults to :class:`.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
of the database in use along with an XID. During a
:meth:`~.commit`, after :meth:`~.flush` has been issued for all
attached databases, the :meth:`~.TwoPhaseTransaction.prepare`
method on each database's :class:`.TwoPhaseTransaction` will be
called. This allows each database to roll back the entire
transaction, before each transaction is committed.
:param weak_identity_map: Defaults to ``True`` - when set to
``False``, objects placed in the :class:`.Session` will be
strongly referenced until explicitly removed or the
:class:`.Session` is closed.
"""
if weak_identity_map in (True, None):
self._identity_cls = identity.WeakInstanceDict
else:
self._identity_cls = identity.StrongInstanceDict
self.identity_map = self._identity_cls()
self._new = {} # InstanceState->object, strong refs object
self._deleted = {} # same
self.bind = bind
self.__binds = {}
self._flushing = False
self._warn_on_events = False
self.transaction = None
self.hash_key = _new_sessionid()
self.autoflush = autoflush
self.autocommit = autocommit
self.expire_on_commit = expire_on_commit
self.enable_baked_queries = enable_baked_queries
self._enable_transaction_accounting = _enable_transaction_accounting
self.twophase = twophase
self._query_cls = query_cls if query_cls else query.Query
if info:
self.info.update(info)
if extension:
for ext in util.to_list(extension):
SessionExtension._adapt_listener(self, ext)
if binds is not None:
for key, bind in binds.items():
self._add_bind(key, bind)
if not self.autocommit:
self.begin()
_sessions[self.hash_key] = self
connection_callable = None
transaction = None
"""The current active or inactive :class:`.SessionTransaction`."""
@util.memoized_property
def info(self):
"""A user-modifiable dictionary.
The initial value of this dictionary can be populated using the
``info`` argument to the :class:`.Session` constructor or
:class:`.sessionmaker` constructor or factory methods. The dictionary
here is always local to this :class:`.Session` and can be modified
independently of all other :class:`.Session` objects.
.. versionadded:: 0.9.0
"""
return {}
def begin(self, subtransactions=False, nested=False):
"""Begin a transaction on this :class:`.Session`.
.. warning::
The :meth:`.Session.begin` method is part of a larger pattern
of use with the :class:`.Session` known as **autocommit mode**.
This is essentially a **legacy mode of use** and is
not necessary for new applications. The :class:`.Session`
normally handles the work of "begin" transparently, which in
turn relies upon the Python DBAPI to transparently "begin"
transactions; there is **no need to explicitly begin transactions**
when using modern :class:`.Session` programming patterns.
In its default mode of ``autocommit=False``, the
:class:`.Session` does all of its work within
the context of a transaction, so as soon as you call
:meth:`.Session.commit`, the next transaction is implicitly
started when the next database operation is invoked. See
:ref:`session_autocommit` for further background.
The method will raise an error if this :class:`.Session` is already
inside of a transaction, unless
:paramref:`~.Session.begin.subtransactions` or
:paramref:`~.Session.begin.nested` are specified. A "subtransaction"
is essentially a code embedding pattern that does not affect the
transactional state of the database connection unless a rollback is
emitted, in which case the whole transaction is rolled back. For
documentation on subtransactions, please see
:ref:`session_subtransactions`.
:param subtransactions: if True, indicates that this
:meth:`~.Session.begin` can create a "subtransaction".
:param nested: if True, begins a SAVEPOINT transaction and is
equivalent to calling :meth:`~.Session.begin_nested`. For
documentation on SAVEPOINT transactions, please see
:ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction`
acts as a Python context manager, allowing :meth:`.Session.begin`
to be used in a "with" block. See :ref:`session_autocommit` for
an example.
.. seealso::
:ref:`session_autocommit`
:meth:`.Session.begin_nested`
"""
if self.transaction is not None:
if subtransactions or nested:
self.transaction = self.transaction._begin(nested=nested)
else:
raise sa_exc.InvalidRequestError(
"A transaction is already begun. Use "
"subtransactions=True to allow subtransactions."
)
else:
self.transaction = SessionTransaction(self, nested=nested)
return self.transaction # needed for __enter__/__exit__ hook
def begin_nested(self):
"""Begin a "nested" transaction on this Session, e.g. SAVEPOINT.
The target database(s) and associated drivers must support SQL
SAVEPOINT for this method to function correctly.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction` acts as a context manager, allowing
:meth:`.Session.begin_nested` to be used in a "with" block.
See :ref:`session_begin_nested` for a usage example.
.. seealso::
:ref:`session_begin_nested`
:ref:`pysqlite_serializable` - special workarounds required
with the SQLite driver in order for SAVEPOINT to work
correctly.
"""
return self.begin(nested=True)
def rollback(self):
"""Rollback the current transaction in progress.
If no transaction is in progress, this method is a pass-through.
This method rolls back the current transaction or nested transaction
regardless of subtransactions being in effect. All subtransactions up
to the first real transaction are closed. Subtransactions occur when
:meth:`.begin` is called multiple times.
.. seealso::
:ref:`session_rollback`
"""
if self.transaction is None:
pass
else:
self.transaction.rollback()
def commit(self):
"""Flush pending changes and commit the current transaction.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
By default, the :class:`.Session` also expires all database
loaded state on all ORM-managed attributes after transaction commit.
This so that subsequent operations load the most recent
data from the database. This behavior can be disabled using
the ``expire_on_commit=False`` option to :class:`.sessionmaker` or
the :class:`.Session` constructor.
If a subtransaction is in effect (which occurs when begin() is called
multiple times), the subtransaction will be closed, and the next call
to ``commit()`` will operate on the enclosing transaction.
When using the :class:`.Session` in its default mode of
``autocommit=False``, a new transaction will
be begun immediately after the commit, but note that the newly begun
transaction does *not* use any connection resources until the first
SQL is actually emitted.
.. seealso::
:ref:`session_committing`
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.commit()
def prepare(self):
"""Prepare the current transaction in progress for two phase commit.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
Only root transactions of two phase sessions can be prepared. If the
current transaction is not such, an
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.prepare()
def connection(
self,
mapper=None,
clause=None,
bind=None,
close_with_result=False,
execution_options=None,
**kw
):
r"""Return a :class:`.Connection` object corresponding to this
:class:`.Session` object's transactional state.
If this :class:`.Session` is configured with ``autocommit=False``,
either the :class:`.Connection` corresponding to the current
transaction is returned, or if no transaction is in progress, a new
one is begun and the :class:`.Connection` returned (note that no
transactional state is established with the DBAPI until the first
SQL statement is emitted).
Alternatively, if this :class:`.Session` is configured with
``autocommit=True``, an ad-hoc :class:`.Connection` is returned
using :meth:`.Engine.connect` on the underlying
:class:`.Engine`.
Ambiguity in multi-bind or unbound :class:`.Session` objects can be
resolved through any of the optional keyword arguments. This
ultimately makes usage of the :meth:`.get_bind` method for resolution.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes precedence
over ``mapper``, ``clause``.
:param mapper:
Optional :func:`.mapper` mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause``.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.) which will be used to locate a bind, if a bind
cannot otherwise be identified.
:param close_with_result: Passed to :meth:`.Engine.connect`,
indicating the :class:`.Connection` should be considered
"single use", automatically closing when the first result set is
closed. This flag only has an effect if this :class:`.Session` is
configured with ``autocommit=True`` and does not already have a
transaction in progress.
:param execution_options: a dictionary of execution options that will
be passed to :meth:`.Connection.execution_options`, **when the
connection is first procured only**. If the connection is already
present within the :class:`.Session`, a warning is emitted and
the arguments are ignored.
.. versionadded:: 0.9.9
.. seealso::
:ref:`session_transaction_isolation`
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(
bind,
close_with_result=close_with_result,
execution_options=execution_options,
)
def _connection_for_bind(self, engine, execution_options=None, **kw):
if self.transaction is not None:
return self.transaction._connection_for_bind(engine, execution_options)
else:
conn = engine._contextual_connect(**kw)
if execution_options:
conn = conn.execution_options(**execution_options)
return conn
def execute(self, clause, params=None, mapper=None, bind=None, **kw):
r"""Execute a SQL expression construct or string statement within
the current transaction.
Returns a :class:`.ResultProxy` representing
results of the statement execution, in the same manner as that of an
:class:`.Engine` or
:class:`.Connection`.
E.g.::
result = session.execute(
user_table.select().where(user_table.c.id == 5)
)
:meth:`~.Session.execute` accepts any executable clause construct,
such as :func:`~.sql.expression.select`,
:func:`~.sql.expression.insert`,
:func:`~.sql.expression.update`,
:func:`~.sql.expression.delete`, and
:func:`~.sql.expression.text`. Plain SQL strings can be passed
as well, which in the case of :meth:`.Session.execute` only
will be interpreted the same as if it were passed via a
:func:`~.expression.text` construct. That is, the following usage::
result = session.execute(
"SELECT * FROM user WHERE id=:param",
{"param":5}
)
is equivalent to::
from sqlalchemy import text
result = session.execute(
text("SELECT * FROM user WHERE id=:param"),
{"param":5}
)
The second positional argument to :meth:`.Session.execute` is an
optional parameter set. Similar to that of
:meth:`.Connection.execute`, whether this is passed as a single
dictionary, or a sequence of dictionaries, determines whether the DBAPI
cursor's ``execute()`` or ``executemany()`` is used to execute the
statement. An INSERT construct may be invoked for a single row::
result = session.execute(
users.insert(), {"id": 7, "name": "somename"})
or for multiple rows::
result = session.execute(users.insert(), [
{"id": 7, "name": "somename7"},
{"id": 8, "name": "somename8"},
{"id": 9, "name": "somename9"}
])
The statement is executed within the current transactional context of
this :class:`.Session`. The :class:`.Connection` which is used
to execute the statement can also be acquired directly by
calling the :meth:`.Session.connection` method. Both methods use
a rule-based resolution scheme in order to determine the
:class:`.Connection`, which in the average case is derived directly
from the "bind" of the :class:`.Session` itself, and in other cases
can be based on the :func:`.mapper`
and :class:`.Table` objects passed to the method; see the
documentation for :meth:`.Session.get_bind` for a full description of
this scheme.
The :meth:`.Session.execute` method does *not* invoke autoflush.
The :class:`.ResultProxy` returned by the :meth:`.Session.execute`
method is returned with the "close_with_result" flag set to true;
the significance of this flag is that if this :class:`.Session` is
autocommitting and does not have a transaction-dedicated
:class:`.Connection` available, a temporary :class:`.Connection` is
established for the statement execution, which is closed (meaning,
returned to the connection pool) when the :class:`.ResultProxy` has
consumed all available data. This applies *only* when the
:class:`.Session` is configured with autocommit=True and no
transaction has been started.
:param clause:
An executable statement (i.e. an :class:`.Executable` expression
such as :func:`.expression.select`) or string SQL statement
to be executed.
:param params:
Optional dictionary, or list of dictionaries, containing
bound parameter values. If a single dictionary, single-row
execution occurs; if a list of dictionaries, an
"executemany" will be invoked. The keys in each dictionary
must correspond to parameter names present in the statement.
:param mapper:
Optional :func:`.mapper` or mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause`` when locating a bind. See :meth:`.Session.get_bind`
for more details.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes
precedence over ``mapper`` and ``clause`` when locating
a bind.
:param \**kw:
Additional keyword arguments are sent to :meth:`.Session.get_bind()`
to allow extensibility of "bind" schemes.
.. seealso::
:ref:`sqlexpression_toplevel` - Tutorial on using Core SQL
constructs.
:ref:`connections_toplevel` - Further information on direct
statement execution.
:meth:`.Connection.execute` - core level statement execution
method, which is :meth:`.Session.execute` ultimately uses
in order to execute the statement.
"""
clause = expression._literal_as_text(clause, allow_coercion_to_text=True)
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind, close_with_result=True).execute(
clause, params or {}
)
def scalar(self, clause, params=None, mapper=None, bind=None, **kw):
"""Like :meth:`~.Session.execute` but return a scalar result."""
return self.execute(
clause, params=params, mapper=mapper, bind=bind, **kw
).scalar()
def close(self):
"""Close this Session.
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
"""
self._close_impl(invalidate=False)
def invalidate(self):
"""Close this Session, using connection invalidation.
This is a variant of :meth:`.Session.close` that will additionally
ensure that the :meth:`.Connection.invalidate` method will be called
on all :class:`.Connection` objects. This can be called when
the database is known to be in a state where the connections are
no longer safe to be used.
E.g.::
try:
sess = Session()
sess.add(User())
sess.commit()
except gevent.Timeout:
sess.invalidate()
raise
except:
sess.rollback()
raise
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
.. versionadded:: 0.9.9
"""
self._close_impl(invalidate=True)
def _close_impl(self, invalidate):
self.expunge_all()
if self.transaction is not None:
for transaction in self.transaction._iterate_self_and_parents():
transaction.close(invalidate)
def expunge_all(self):
"""Remove all object instances from this ``Session``.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
"""
all_states = self.identity_map.all_states() + list(self._new)
self.identity_map = self._identity_cls()
self._new = {}
self._deleted = {}
statelib.InstanceState._detach_states(all_states, self)
def _add_bind(self, key, bind):
try:
insp = inspect(key)
except sa_exc.NoInspectionAvailable as err:
if not isinstance(key, type):
util.raise_(
sa_exc.ArgumentError("Not an acceptable bind target: %s" % key),
replace_context=err,
)
else:
self.__binds[key] = bind
else:
if insp.is_selectable:
self.__binds[insp] = bind
elif insp.is_mapper:
self.__binds[insp.class_] = bind
for selectable in insp._all_tables:
self.__binds[selectable] = bind
else:
raise sa_exc.ArgumentError("Not an acceptable bind target: %s" % key)
def bind_mapper(self, mapper, bind):
"""Associate a :class:`.Mapper` or arbitrary Python class with a
"bind", e.g. an :class:`.Engine` or :class:`.Connection`.
The given entity is added to a lookup used by the
:meth:`.Session.get_bind` method.
:param mapper: a :class:`.Mapper` object, or an instance of a mapped
class, or any Python class that is the base of a set of mapped
classes.
:param bind: an :class:`.Engine` or :class:`.Connection` object.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_table`
"""
self._add_bind(mapper, bind)
def bind_table(self, table, bind):
"""Associate a :class:`.Table` with a "bind", e.g. an :class:`.Engine`
or :class:`.Connection`.
The given :class:`.Table` is added to a lookup used by the
:meth:`.Session.get_bind` method.
:param table: a :class:`.Table` object, which is typically the target
of an ORM mapping, or is present within a selectable that is
mapped.
:param bind: an :class:`.Engine` or :class:`.Connection` object.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_mapper`
"""
self._add_bind(table, bind)
def get_bind(self, mapper=None, clause=None):
"""Return a "bind" to which this :class:`.Session` is bound.
The "bind" is usually an instance of :class:`.Engine`,
except in the case where the :class:`.Session` has been
explicitly bound directly to a :class:`.Connection`.
For a multiply-bound or unbound :class:`.Session`, the
``mapper`` or ``clause`` arguments are used to determine the
appropriate bind to return.
Note that the "mapper" argument is usually present
when :meth:`.Session.get_bind` is called via an ORM
operation such as a :meth:`.Session.query`, each
individual INSERT/UPDATE/DELETE operation within a
:meth:`.Session.flush`, call, etc.
The order of resolution is:
1. if mapper given and session.binds is present,
locate a bind based first on the mapper in use, then
on the mapped class in use, then on any base classes that are
present in the ``__mro__`` of the mapped class, from more specific
superclasses to more general.
2. if clause given and session.binds is present,
locate a bind based on :class:`.Table` objects
found in the given clause present in session.binds.
3. if session.bind is present, return that.
4. if clause given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the clause.
5. if mapper given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the :class:`.Table` or other
selectable to which the mapper is mapped.
6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError`
is raised.
Note that the :meth:`.Session.get_bind` method can be overridden on
a user-defined subclass of :class:`.Session` to provide any kind
of bind resolution scheme. See the example at
:ref:`session_custom_partitioning`.
:param mapper:
Optional :func:`.mapper` mapped class or instance of
:class:`.Mapper`. The bind can be derived from a :class:`.Mapper`
first by consulting the "binds" map associated with this
:class:`.Session`, and secondly by consulting the :class:`.MetaData`
associated with the :class:`.Table` to which the :class:`.Mapper`
is mapped for a bind.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.). If the ``mapper`` argument is not present or could not
produce a bind, the given expression construct will be searched
for a bound element, typically a :class:`.Table` associated with
bound :class:`.MetaData`.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_mapper`
:meth:`.Session.bind_table`
"""
if mapper is clause is None:
if self.bind:
return self.bind
else:
raise sa_exc.UnboundExecutionError(
"This session is not bound to a single Engine or "
"Connection, and no context was provided to locate "
"a binding."
)
if mapper is not None:
try:
mapper = inspect(mapper)
except sa_exc.NoInspectionAvailable as err:
if isinstance(mapper, type):
util.raise_(
exc.UnmappedClassError(mapper), replace_context=err,
)
else:
raise
if self.__binds:
if mapper:
for cls in mapper.class_.__mro__:
if cls in self.__binds:
return self.__binds[cls]
if clause is None:
clause = mapper.persist_selectable
if clause is not None:
for t in sql_util.find_tables(clause, include_crud=True):
if t in self.__binds:
return self.__binds[t]
if self.bind:
return self.bind
if isinstance(clause, sql.expression.ClauseElement) and clause.bind:
return clause.bind
if mapper and mapper.persist_selectable.bind:
return mapper.persist_selectable.bind
context = []
if mapper is not None:
context.append("mapper %s" % mapper)
if clause is not None:
context.append("SQL expression")
raise sa_exc.UnboundExecutionError(
"Could not locate a bind configured on %s or this Session"
% (", ".join(context))
)
def query(self, *entities, **kwargs):
"""Return a new :class:`.Query` object corresponding to this
:class:`.Session`."""
return self._query_cls(entities, self, **kwargs)
@property
@util.contextmanager
def no_autoflush(self):
"""Return a context manager that disables autoflush.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
"""
autoflush = self.autoflush
self.autoflush = False
try:
yield self
finally:
self.autoflush = autoflush
def _autoflush(self):
if self.autoflush and not self._flushing:
try:
self.flush()
except sa_exc.StatementError as e:
# note we are reraising StatementError as opposed to
# raising FlushError with "chaining" to remain compatible
# with code that catches StatementError, IntegrityError,
# etc.
e.add_detail(
"raised as a result of Query-invoked autoflush; "
"consider using a session.no_autoflush block if this "
"flush is occurring prematurely"
)
util.raise_(e, with_traceback=sys.exc_info()[2])
def refresh(
self, instance, attribute_names=None, with_for_update=None, lockmode=None,
):
"""Expire and refresh the attributes on the given instance.
A query will be issued to the database and all attributes will be
refreshed with their current database value.
Lazy-loaded relational attributes will remain lazily loaded, so that
the instance-wide refresh operation will be followed immediately by
the lazy load of that attribute.
Eagerly-loaded relational attributes will eagerly load within the
single refresh operation.
Note that a highly isolated transaction will return the same values as
were previously read in that same transaction, regardless of changes
in database state outside of that transaction - usage of
:meth:`~Session.refresh` usually only makes sense if non-ORM SQL
statement were emitted in the ongoing transaction, or if autocommit
mode is turned on.
:param attribute_names: optional. An iterable collection of
string attribute names indicating a subset of attributes to
be refreshed.
:param with_for_update: optional boolean ``True`` indicating FOR UPDATE
should be used, or may be a dictionary containing flags to
indicate a more specific set of FOR UPDATE flags for the SELECT;
flags should match the parameters of :meth:`.Query.with_for_update`.
Supersedes the :paramref:`.Session.refresh.lockmode` parameter.
.. versionadded:: 1.2
:param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query`
as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`.
Superseded by :paramref:`.Session.refresh.with_for_update`.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.expire_all`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._expire_state(state, attribute_names)
if with_for_update == {}:
raise sa_exc.ArgumentError(
"with_for_update should be the boolean value "
"True, or a dictionary with options. "
"A blank dictionary is ambiguous."
)
if lockmode:
with_for_update = query.LockmodeArg.parse_legacy_query(lockmode)
elif with_for_update is not None:
if with_for_update is True:
with_for_update = query.LockmodeArg()
elif with_for_update:
with_for_update = query.LockmodeArg(**with_for_update)
else:
with_for_update = None
if (
loading.load_on_ident(
self.query(object_mapper(instance)),
state.key,
refresh_state=state,
with_for_update=with_for_update,
only_load_props=attribute_names,
)
is None
):
raise sa_exc.InvalidRequestError(
"Could not refresh instance '%s'" % instance_str(instance)
)
def expire_all(self):
"""Expires all persistent instances within this Session.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` should not be needed when
autocommit is ``False``, assuming the transaction is isolated.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
"""
for state in self.identity_map.all_states():
state._expire(state.dict, self.identity_map._modified)
def expire(self, instance, attribute_names=None):
"""Expire the attributes on an instance.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._expire_state(state, attribute_names)
def _expire_state(self, state, attribute_names):
self._validate_persistent(state)
if attribute_names:
state._expire_attributes(state.dict, attribute_names)
else:
# pre-fetch the full cascade since the expire is going to
# remove associations
cascaded = list(
state.manager.mapper.cascade_iterator("refresh-expire", state)
)
self._conditional_expire(state)
for o, m, st_, dct_ in cascaded:
self._conditional_expire(st_)
def _conditional_expire(self, state):
"""Expire a state if persistent, else expunge if pending"""
if state.key:
state._expire(state.dict, self.identity_map._modified)
elif state in self._new:
self._new.pop(state)
state._detach(self)
@util.deprecated(
"0.7",
"The :meth:`.Session.prune` method is deprecated along with "
":paramref:`.Session.weak_identity_map`. This method will be "
"removed in a future release.",
)
def prune(self):
"""Remove unreferenced instances cached in the identity map.
Note that this method is only meaningful if "weak_identity_map" is set
to False. The default weak identity map is self-pruning.
Removes any object in this Session's identity map that is not
referenced in user code, modified, new or scheduled for deletion.
Returns the number of objects pruned.
"""
return self.identity_map.prune()
def expunge(self, instance):
"""Remove the `instance` from this ``Session``.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
if state.session_id is not self.hash_key:
raise sa_exc.InvalidRequestError(
"Instance %s is not present in this Session" % state_str(state)
)
cascaded = list(state.manager.mapper.cascade_iterator("expunge", state))
self._expunge_states([state] + [st_ for o, m, st_, dct_ in cascaded])
def _expunge_states(self, states, to_transient=False):
for state in states:
if state in self._new:
self._new.pop(state)
elif self.identity_map.contains_state(state):
self.identity_map.safe_discard(state)
self._deleted.pop(state, None)
elif self.transaction:
# state is "detached" from being deleted, but still present
# in the transaction snapshot
self.transaction._deleted.pop(state, None)
statelib.InstanceState._detach_states(states, self, to_transient=to_transient)
def _register_persistent(self, states):
"""Register all persistent objects from a flush.
This is used both for pending objects moving to the persistent
state as well as already persistent objects.
"""
pending_to_persistent = self.dispatch.pending_to_persistent or None
for state in states:
mapper = _state_mapper(state)
# prevent against last minute dereferences of the object
obj = state.obj()
if obj is not None:
instance_key = mapper._identity_key_from_state(state)
if (
_none_set.intersection(instance_key[1])
and not mapper.allow_partial_pks
or _none_set.issuperset(instance_key[1])
):
raise exc.FlushError(
"Instance %s has a NULL identity key. If this is an "
"auto-generated value, check that the database table "
"allows generation of new primary key values, and "
"that the mapped Column object is configured to "
"expect these generated values. Ensure also that "
"this flush() is not occurring at an inappropriate "
"time, such as within a load() event." % state_str(state)
)
if state.key is None:
state.key = instance_key
elif state.key != instance_key:
# primary key switch. use safe_discard() in case another
# state has already replaced this one in the identity
# map (see test/orm/test_naturalpks.py ReversePKsTest)
self.identity_map.safe_discard(state)
if state in self.transaction._key_switches:
orig_key = self.transaction._key_switches[state][0]
else:
orig_key = state.key
self.transaction._key_switches[state] = (
orig_key,
instance_key,
)
state.key = instance_key
# there can be an existing state in the identity map
# that is replaced when the primary keys of two instances
# are swapped; see test/orm/test_naturalpks.py -> test_reverse
old = self.identity_map.replace(state)
if (
old is not None
and mapper._identity_key_from_state(old) == instance_key
and old.obj() is not None
):
util.warn(
"Identity map already had an identity for %s, "
"replacing it with newly flushed object. Are there "
"load operations occurring inside of an event handler "
"within the flush?" % (instance_key,)
)
state._orphaned_outside_of_session = False
statelib.InstanceState._commit_all_states(
((state, state.dict) for state in states), self.identity_map
)
self._register_altered(states)
if pending_to_persistent is not None:
for state in states.intersection(self._new):
pending_to_persistent(self, state)
# remove from new last, might be the last strong ref
for state in set(states).intersection(self._new):
self._new.pop(state)
def _register_altered(self, states):
if self._enable_transaction_accounting and self.transaction:
for state in states:
if state in self._new:
self.transaction._new[state] = True
else:
self.transaction._dirty[state] = True
def _remove_newly_deleted(self, states):
persistent_to_deleted = self.dispatch.persistent_to_deleted or None
for state in states:
if self._enable_transaction_accounting and self.transaction:
self.transaction._deleted[state] = True
if persistent_to_deleted is not None:
# get a strong reference before we pop out of
# self._deleted
obj = state.obj() # noqa
self.identity_map.safe_discard(state)
self._deleted.pop(state, None)
state._deleted = True
# can't call state._detach() here, because this state
# is still in the transaction snapshot and needs to be
# tracked as part of that
if persistent_to_deleted is not None:
persistent_to_deleted(self, state)
def add(self, instance, _warn=True):
"""Place an object in the ``Session``.
Its state will be persisted to the database on the next flush
operation.
Repeated calls to ``add()`` will be ignored. The opposite of ``add()``
is ``expunge()``.
"""
if _warn and self._warn_on_events:
self._flush_warning("Session.add()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._save_or_update_state(state)
def add_all(self, instances):
"""Add the given collection of instances to this ``Session``."""
if self._warn_on_events:
self._flush_warning("Session.add_all()")
for instance in instances:
self.add(instance, _warn=False)
def _save_or_update_state(self, state):
state._orphaned_outside_of_session = False
self._save_or_update_impl(state)
mapper = _state_mapper(state)
for o, m, st_, dct_ in mapper.cascade_iterator(
"save-update", state, halt_on=self._contains_state
):
self._save_or_update_impl(st_)
def delete(self, instance):
"""Mark an instance as deleted.
The database delete operation occurs upon ``flush()``.
"""
if self._warn_on_events:
self._flush_warning("Session.delete()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._delete_impl(state, instance, head=True)
def _delete_impl(self, state, obj, head):
if state.key is None:
if head:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" % state_str(state)
)
else:
return
to_attach = self._before_attach(state, obj)
if state in self._deleted:
return
self.identity_map.add(state)
if to_attach:
self._after_attach(state, obj)
if head:
# grab the cascades before adding the item to the deleted list
# so that autoflush does not delete the item
# the strong reference to the instance itself is significant here
cascade_states = list(
state.manager.mapper.cascade_iterator("delete", state)
)
self._deleted[state] = obj
if head:
for o, m, st_, dct_ in cascade_states:
self._delete_impl(st_, o, False)
def merge(self, instance, load=True):
"""Copy the state of a given instance into a corresponding instance
within this :class:`.Session`.
:meth:`.Session.merge` examines the primary key attributes of the
source instance, and attempts to reconcile it with an instance of the
same primary key in the session. If not found locally, it attempts
to load the object from the database based on primary key, and if
none can be located, creates a new instance. The state of each
attribute on the source instance is then copied to the target
instance. The resulting target instance is then returned by the
method; the original source instance is left unmodified, and
un-associated with the :class:`.Session` if not already.
This operation cascades to associated instances if the association is
mapped with ``cascade="merge"``.
See :ref:`unitofwork_merging` for a detailed discussion of merging.
.. versionchanged:: 1.1 - :meth:`.Session.merge` will now reconcile
pending objects with overlapping primary keys in the same way
as persistent. See :ref:`change_3601` for discussion.
:param instance: Instance to be merged.
:param load: Boolean, when False, :meth:`.merge` switches into
a "high performance" mode which causes it to forego emitting history
events as well as all database access. This flag is used for
cases such as transferring graphs of objects into a :class:`.Session`
from a second level cache, or to transfer just-loaded objects
into the :class:`.Session` owned by a worker thread or process
without re-querying the database.
The ``load=False`` use case adds the caveat that the given
object has to be in a "clean" state, that is, has no pending changes
to be flushed - even if the incoming object is detached from any
:class:`.Session`. This is so that when
the merge operation populates local attributes and
cascades to related objects and
collections, the values can be "stamped" onto the
target object as is, without generating any history or attribute
events, and without the need to reconcile the incoming data with
any existing related objects or collections that might not
be loaded. The resulting objects from ``load=False`` are always
produced as "clean", so it is only appropriate that the given objects
should be "clean" as well, else this suggests a mis-use of the
method.
.. seealso::
:func:`.make_transient_to_detached` - provides for an alternative
means of "merging" a single object into the :class:`.Session`
"""
if self._warn_on_events:
self._flush_warning("Session.merge()")
_recursive = {}
_resolve_conflict_map = {}
if load:
# flush current contents if we expect to load data
self._autoflush()
object_mapper(instance) # verify mapped
autoflush = self.autoflush
try:
self.autoflush = False
return self._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
finally:
self.autoflush = autoflush
def _merge(
self, state, state_dict, load=True, _recursive=None, _resolve_conflict_map=None,
):
mapper = _state_mapper(state)
if state in _recursive:
return _recursive[state]
new_instance = False
key = state.key
if key is None:
if state in self._new:
util.warn(
"Instance %s is already pending in this Session yet is "
"being merged again; this is probably not what you want "
"to do" % state_str(state)
)
if not load:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects transient (i.e. unpersisted) objects. flush() "
"all changes on mapped instances before merging with "
"load=False."
)
key = mapper._identity_key_from_state(state)
key_is_persistent = attributes.NEVER_SET not in key[1] and (
not _none_set.intersection(key[1])
or (mapper.allow_partial_pks and not _none_set.issuperset(key[1]))
)
else:
key_is_persistent = True
if key in self.identity_map:
try:
merged = self.identity_map[key]
except KeyError:
# object was GC'ed right as we checked for it
merged = None
else:
merged = None
if merged is None:
if key_is_persistent and key in _resolve_conflict_map:
merged = _resolve_conflict_map[key]
elif not load:
if state.modified:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on "
"mapped instances before merging with load=False."
)
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_state.key = key
self._update_impl(merged_state)
new_instance = True
elif key_is_persistent:
merged = self.query(mapper.class_).get(key[1])
if merged is None:
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
new_instance = True
self._save_or_update_state(merged_state)
else:
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
_recursive[state] = merged
_resolve_conflict_map[key] = merged
# check that we didn't just pull the exact same
# state out.
if state is not merged_state:
# version check if applicable
if mapper.version_id_col is not None:
existing_version = mapper._get_state_attr_by_column(
state,
state_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
merged_version = mapper._get_state_attr_by_column(
merged_state,
merged_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
if (
existing_version is not attributes.PASSIVE_NO_RESULT
and merged_version is not attributes.PASSIVE_NO_RESULT
and existing_version != merged_version
):
raise exc.StaleDataError(
"Version id '%s' on merged state %s "
"does not match existing version '%s'. "
"Leave the version attribute unset when "
"merging to update the most recent version."
% (existing_version, state_str(merged_state), merged_version,)
)
merged_state.load_path = state.load_path
merged_state.load_options = state.load_options
# since we are copying load_options, we need to copy
# the callables_ that would have been generated by those
# load_options.
# assumes that the callables we put in state.callables_
# are not instance-specific (which they should not be)
merged_state._copy_callables(state)
for prop in mapper.iterate_properties:
prop.merge(
self,
state,
state_dict,
merged_state,
merged_dict,
load,
_recursive,
_resolve_conflict_map,
)
if not load:
# remove any history
merged_state._commit_all(merged_dict, self.identity_map)
if new_instance:
merged_state.manager.dispatch.load(merged_state, None)
return merged
def _validate_persistent(self, state):
if not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persistent within this Session" % state_str(state)
)
def _save_impl(self, state):
if state.key is not None:
raise sa_exc.InvalidRequestError(
"Object '%s' already has an identity - "
"it can't be registered as pending" % state_str(state)
)
obj = state.obj()
to_attach = self._before_attach(state, obj)
if state not in self._new:
self._new[state] = obj
state.insert_order = len(self._new)
if to_attach:
self._after_attach(state, obj)
def _update_impl(self, state, revert_deletion=False):
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" % state_str(state)
)
if state._deleted:
if revert_deletion:
if not state._attached:
return
del state._deleted
else:
raise sa_exc.InvalidRequestError(
"Instance '%s' has been deleted. "
"Use the make_transient() "
"function to send this object back "
"to the transient state." % state_str(state)
)
obj = state.obj()
# check for late gc
if obj is None:
return
to_attach = self._before_attach(state, obj)
self._deleted.pop(state, None)
if revert_deletion:
self.identity_map.replace(state)
else:
self.identity_map.add(state)
if to_attach:
self._after_attach(state, obj)
elif revert_deletion:
self.dispatch.deleted_to_persistent(self, state)
def _save_or_update_impl(self, state):
if state.key is None:
self._save_impl(state)
else:
self._update_impl(state)
def enable_relationship_loading(self, obj):
"""Associate an object with this :class:`.Session` for related
object loading.
.. warning::
:meth:`.enable_relationship_loading` exists to serve special
use cases and is not recommended for general use.
Accesses of attributes mapped with :func:`.relationship`
will attempt to load a value from the database using this
:class:`.Session` as the source of connectivity. The values
will be loaded based on foreign key and primary key values
present on this object - if not present, then those relationships
will be unavailable.
The object will be attached to this session, but will
**not** participate in any persistence operations; its state
for almost all purposes will remain either "transient" or
"detached", except for the case of relationship loading.
Also note that backrefs will often not work as expected.
Altering a relationship-bound attribute on the target object
may not fire off a backref event, if the effective value
is what was already loaded from a foreign-key-holding value.
The :meth:`.Session.enable_relationship_loading` method is
similar to the ``load_on_pending`` flag on :func:`.relationship`.
Unlike that flag, :meth:`.Session.enable_relationship_loading` allows
an object to remain transient while still being able to load
related items.
To make a transient object associated with a :class:`.Session`
via :meth:`.Session.enable_relationship_loading` pending, add
it to the :class:`.Session` using :meth:`.Session.add` normally.
If the object instead represents an existing identity in the database,
it should be merged using :meth:`.Session.merge`.
:meth:`.Session.enable_relationship_loading` does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before flush()
proceeds. This method is not intended for general use.
.. seealso::
``load_on_pending`` at :func:`.relationship` - this flag
allows per-relationship loading of many-to-ones on items that
are pending.
:func:`.make_transient_to_detached` - allows for an object to
be added to a :class:`.Session` without SQL emitted, which then
will unexpire attributes on access.
"""
state = attributes.instance_state(obj)
to_attach = self._before_attach(state, obj)
state._load_pending = True
if to_attach:
self._after_attach(state, obj)
def _before_attach(self, state, obj):
if state.session_id == self.hash_key:
return False
if state.session_id and state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')" % (state_str(state), state.session_id, self.hash_key)
)
self.dispatch.before_attach(self, state)
return True
def _after_attach(self, state, obj):
state.session_id = self.hash_key
if state.modified and state._strong_obj is None:
state._strong_obj = obj
self.dispatch.after_attach(self, state)
if state.key:
self.dispatch.detached_to_persistent(self, state)
else:
self.dispatch.transient_to_pending(self, state)
def __contains__(self, instance):
"""Return True if the instance is associated with this session.
The instance may be pending or persistent within the Session for a
result of True.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
return self._contains_state(state)
def __iter__(self):
"""Iterate over all pending or persistent instances within this
Session.
"""
return iter(list(self._new.values()) + list(self.identity_map.values()))
def _contains_state(self, state):
return state in self._new or self.identity_map.contains_state(state)
def flush(self, objects=None):
"""Flush all the object changes to the database.
Writes out all pending object creations, deletions and modifications
to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
automatically ordered by the Session's unit of work dependency
solver.
Database operations will be issued in the current transactional
context and do not affect the state of the transaction, unless an
error occurs, in which case the entire transaction is rolled back.
You may flush() as often as you like within a transaction to move
changes from Python to the database's transaction buffer.
For ``autocommit`` Sessions with no active manual transaction, flush()
will create a transaction on the fly that surrounds the entire set of
operations into the flush.
:param objects: Optional; restricts the flush operation to operate
only on elements that are in the given collection.
This feature is for an extremely narrow set of use cases where
particular objects may need to be operated upon before the
full flush() occurs. It is not intended for general use.
"""
if self._flushing:
raise sa_exc.InvalidRequestError("Session is already flushing")
if self._is_clean():
return
try:
self._flushing = True
self._flush(objects)
finally:
self._flushing = False
def _flush_warning(self, method):
util.warn(
"Usage of the '%s' operation is not currently supported "
"within the execution stage of the flush process. "
"Results may not be consistent. Consider using alternative "
"event listeners or connection-level operations instead." % method
)
def _is_clean(self):
return (
not self.identity_map.check_modified()
and not self._deleted
and not self._new
)
def _flush(self, objects=None):
dirty = self._dirty_states
if not dirty and not self._deleted and not self._new:
self.identity_map._modified.clear()
return
flush_context = UOWTransaction(self)
if self.dispatch.before_flush:
self.dispatch.before_flush(self, flush_context, objects)
# re-establish "dirty states" in case the listeners
# added
dirty = self._dirty_states
deleted = set(self._deleted)
new = set(self._new)
dirty = set(dirty).difference(deleted)
# create the set of all objects we want to operate upon
if objects:
# specific list passed in
objset = set()
for o in objects:
try:
state = attributes.instance_state(o)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(o), replace_context=err,
)
objset.add(state)
else:
objset = None
# store objects whose fate has been decided
processed = set()
# put all saves/updates into the flush context. detect top-level
# orphans and throw them into deleted.
if objset:
proc = new.union(dirty).intersection(objset).difference(deleted)
else:
proc = new.union(dirty).difference(deleted)
for state in proc:
is_orphan = _state_mapper(state)._is_orphan(state)
is_persistent_orphan = is_orphan and state.has_identity
if (
is_orphan
and not is_persistent_orphan
and state._orphaned_outside_of_session
):
self._expunge_states([state])
else:
_reg = flush_context.register_object(
state, isdelete=is_persistent_orphan
)
assert _reg, "Failed to add object to the flush context!"
processed.add(state)
# put all remaining deletes into the flush context.
if objset:
proc = deleted.intersection(objset).difference(processed)
else:
proc = deleted.difference(processed)
for state in proc:
_reg = flush_context.register_object(state, isdelete=True)
assert _reg, "Failed to add object to the flush context!"
if not flush_context.has_work:
return
flush_context.transaction = transaction = self.begin(subtransactions=True)
try:
self._warn_on_events = True
try:
flush_context.execute()
finally:
self._warn_on_events = False
self.dispatch.after_flush(self, flush_context)
flush_context.finalize_flush_changes()
if not objects and self.identity_map._modified:
len_ = len(self.identity_map._modified)
statelib.InstanceState._commit_all_states(
[(state, state.dict) for state in self.identity_map._modified],
instance_dict=self.identity_map,
)
util.warn(
"Attribute history events accumulated on %d "
"previously clean instances "
"within inner-flush event handlers have been "
"reset, and will not result in database updates. "
"Consider using set_committed_value() within "
"inner-flush event handlers to avoid this warning." % len_
)
# useful assertions:
# if not objects:
# assert not self.identity_map._modified
# else:
# assert self.identity_map._modified == \
# self.identity_map._modified.difference(objects)
self.dispatch.after_flush_postexec(self, flush_context)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
def bulk_save_objects(
self,
objects,
return_defaults=False,
update_changed_only=True,
preserve_order=True,
):
"""Perform a bulk save of the given list of objects.
The bulk save feature allows mapped objects to be used as the
source of simple INSERT and UPDATE operations which can be more easily
grouped together into higher performing "executemany"
operations; the extraction of data from the objects is also performed
using a lower-latency process that ignores whether or not attributes
have actually been modified in the case of UPDATEs, and also ignores
SQL expressions.
The objects as given are not added to the session and no additional
state is established on them, unless the ``return_defaults`` flag
is also set, in which case primary key attributes and server-side
default values will be populated.
.. versionadded:: 1.0.0
.. warning::
The bulk save feature allows for a lower-latency INSERT/UPDATE
of rows at the expense of most other unit-of-work features.
Features such as object management, relationship handling,
and SQL clause support are **silently omitted** in favor of raw
INSERT/UPDATES of records.
**Please read the list of caveats at** :ref:`bulk_operations`
**before using this method, and fully test and confirm the
functionality of all code developed using these systems.**
:param objects: a sequence of mapped object instances. The mapped
objects are persisted as is, and are **not** associated with the
:class:`.Session` afterwards.
For each object, whether the object is sent as an INSERT or an
UPDATE is dependent on the same rules used by the :class:`.Session`
in traditional operation; if the object has the
:attr:`.InstanceState.key`
attribute set, then the object is assumed to be "detached" and
will result in an UPDATE. Otherwise, an INSERT is used.
In the case of an UPDATE, statements are grouped based on which
attributes have changed, and are thus to be the subject of each
SET clause. If ``update_changed_only`` is False, then all
attributes present within each object are applied to the UPDATE
statement, which may help in allowing the statements to be grouped
together into a larger executemany(), and will also reduce the
overhead of checking history on attributes.
:param return_defaults: when True, rows that are missing values which
generate defaults, namely integer primary key defaults and sequences,
will be inserted **one at a time**, so that the primary key value
is available. In particular this will allow joined-inheritance
and other multi-table mappings to insert correctly without the need
to provide primary key values ahead of time; however,
:paramref:`.Session.bulk_save_objects.return_defaults` **greatly
reduces the performance gains** of the method overall.
:param update_changed_only: when True, UPDATE statements are rendered
based on those attributes in each state that have logged changes.
When False, all attributes present are rendered into the SET clause
with the exception of primary key attributes.
:param preserve_order: when True, the order of inserts and updates
matches exactly the order in which the objects are given. When
False, common types of objects are grouped into inserts
and updates, to allow for more batching opportunities.
.. versionadded:: 1.3
.. seealso::
:ref:`bulk_operations`
:meth:`.Session.bulk_insert_mappings`
:meth:`.Session.bulk_update_mappings`
"""
def key(state):
return (state.mapper, state.key is not None)
obj_states = (attributes.instance_state(obj) for obj in objects)
if not preserve_order:
obj_states = sorted(obj_states, key=key)
for (mapper, isupdate), states in itertools.groupby(obj_states, key):
self._bulk_save_mappings(
mapper,
states,
isupdate,
True,
return_defaults,
update_changed_only,
False,
)
def bulk_insert_mappings(
self, mapper, mappings, return_defaults=False, render_nulls=False
):
"""Perform a bulk insert of the given list of mapping dictionaries.
The bulk insert feature allows plain Python dictionaries to be used as
the source of simple INSERT operations which can be more easily
grouped together into higher performing "executemany"
operations. Using dictionaries, there is no "history" or session
state management features in use, reducing latency when inserting
large numbers of simple rows.
The values within the dictionaries as given are typically passed
without modification into Core :meth:`.Insert` constructs, after
organizing the values within them across the tables to which
the given mapper is mapped.
.. versionadded:: 1.0.0
.. warning::
The bulk insert feature allows for a lower-latency INSERT
of rows at the expense of most other unit-of-work features.
Features such as object management, relationship handling,
and SQL clause support are **silently omitted** in favor of raw
INSERT of records.
**Please read the list of caveats at** :ref:`bulk_operations`
**before using this method, and fully test and confirm the
functionality of all code developed using these systems.**
:param mapper: a mapped class, or the actual :class:`.Mapper` object,
representing the single kind of object represented within the mapping
list.
:param mappings: a sequence of dictionaries, each one containing the
state of the mapped row to be inserted, in terms of the attribute
names on the mapped class. If the mapping refers to multiple tables,
such as a joined-inheritance mapping, each dictionary must contain all
keys to be populated into all tables.
:param return_defaults: when True, rows that are missing values which
generate defaults, namely integer primary key defaults and sequences,
will be inserted **one at a time**, so that the primary key value
is available. In particular this will allow joined-inheritance
and other multi-table mappings to insert correctly without the need
to provide primary
key values ahead of time; however,
:paramref:`.Session.bulk_insert_mappings.return_defaults`
**greatly reduces the performance gains** of the method overall.
If the rows
to be inserted only refer to a single table, then there is no
reason this flag should be set as the returned default information
is not used.
:param render_nulls: When True, a value of ``None`` will result
in a NULL value being included in the INSERT statement, rather
than the column being omitted from the INSERT. This allows all
the rows being INSERTed to have the identical set of columns which
allows the full set of rows to be batched to the DBAPI. Normally,
each column-set that contains a different combination of NULL values
than the previous row must omit a different series of columns from
the rendered INSERT statement, which means it must be emitted as a
separate statement. By passing this flag, the full set of rows
are guaranteed to be batchable into one batch; the cost however is
that server-side defaults which are invoked by an omitted column will
be skipped, so care must be taken to ensure that these are not
necessary.
.. warning::
When this flag is set, **server side default SQL values will
not be invoked** for those columns that are inserted as NULL;
the NULL value will be sent explicitly. Care must be taken
to ensure that no server-side default functions need to be
invoked for the operation as a whole.
.. versionadded:: 1.1
.. seealso::
:ref:`bulk_operations`
:meth:`.Session.bulk_save_objects`
:meth:`.Session.bulk_update_mappings`
"""
self._bulk_save_mappings(
mapper, mappings, False, False, return_defaults, False, render_nulls,
)
def bulk_update_mappings(self, mapper, mappings):
"""Perform a bulk update of the given list of mapping dictionaries.
The bulk update feature allows plain Python dictionaries to be used as
the source of simple UPDATE operations which can be more easily
grouped together into higher performing "executemany"
operations. Using dictionaries, there is no "history" or session
state management features in use, reducing latency when updating
large numbers of simple rows.
.. versionadded:: 1.0.0
.. warning::
The bulk update feature allows for a lower-latency UPDATE
of rows at the expense of most other unit-of-work features.
Features such as object management, relationship handling,
and SQL clause support are **silently omitted** in favor of raw
UPDATES of records.
**Please read the list of caveats at** :ref:`bulk_operations`
**before using this method, and fully test and confirm the
functionality of all code developed using these systems.**
:param mapper: a mapped class, or the actual :class:`.Mapper` object,
representing the single kind of object represented within the mapping
list.
:param mappings: a sequence of dictionaries, each one containing the
state of the mapped row to be updated, in terms of the attribute names
on the mapped class. If the mapping refers to multiple tables, such
as a joined-inheritance mapping, each dictionary may contain keys
corresponding to all tables. All those keys which are present and
are not part of the primary key are applied to the SET clause of the
UPDATE statement; the primary key values, which are required, are
applied to the WHERE clause.
.. seealso::
:ref:`bulk_operations`
:meth:`.Session.bulk_insert_mappings`
:meth:`.Session.bulk_save_objects`
"""
self._bulk_save_mappings(mapper, mappings, True, False, False, False, False)
def _bulk_save_mappings(
self,
mapper,
mappings,
isupdate,
isstates,
return_defaults,
update_changed_only,
render_nulls,
):
mapper = _class_to_mapper(mapper)
self._flushing = True
transaction = self.begin(subtransactions=True)
try:
if isupdate:
persistence._bulk_update(
mapper, mappings, transaction, isstates, update_changed_only,
)
else:
persistence._bulk_insert(
mapper,
mappings,
transaction,
isstates,
return_defaults,
render_nulls,
)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
finally:
self._flushing = False
@util.deprecated_params(
passive=(
"0.8",
"The :paramref:`.Session.is_modified.passive` flag is deprecated "
"and will be removed in a future release. The flag is no longer "
"used and is ignored.",
)
)
def is_modified(self, instance, include_collections=True, passive=None):
r"""Return ``True`` if the given instance has locally
modified attributes.
This method retrieves the history for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously committed value, if any.
It is in effect a more expensive and accurate
version of checking for the given instance in the
:attr:`.Session.dirty` collection; a full test for
each attribute's net "dirty" status is performed.
E.g.::
return session.is_modified(someobject)
A few caveats to this method apply:
* Instances present in the :attr:`.Session.dirty` collection may
report ``False`` when tested with this method. This is because
the object may have received change events via attribute mutation,
thus placing it in :attr:`.Session.dirty`, but ultimately the state
is the same as that loaded from the database, resulting in no net
change here.
* Scalar attributes may not have recorded the previously set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally upon set only if the
attribute container has the ``active_history`` flag set to ``True``.
This flag is set typically for primary key attributes and scalar
object references that are not a simple many-to-one. To set this
flag for any arbitrary mapped column, use the ``active_history``
argument with :func:`.column_property`.
:param instance: mapped instance to be tested for pending changes.
:param include_collections: Indicates if multivalued collections
should be included in the operation. Setting this to ``False`` is a
way to detect only local-column based properties (i.e. scalar columns
or many-to-one foreign keys) that would result in an UPDATE for this
instance upon flush.
:param passive: not used
"""
state = object_state(instance)
if not state.modified:
return False
dict_ = state.dict
for attr in state.manager.attributes:
if (
not include_collections and hasattr(attr.impl, "get_collection")
) or not hasattr(attr.impl, "get_history"):
continue
(added, unchanged, deleted) = attr.impl.get_history(
state, dict_, passive=attributes.NO_CHANGE
)
if added or deleted:
return True
else:
return False
@property
def is_active(self):
"""True if this :class:`.Session` is in "transaction mode" and
is not in "partial rollback" state.
The :class:`.Session` in its default mode of ``autocommit=False``
is essentially always in "transaction mode", in that a
:class:`.SessionTransaction` is associated with it as soon as
it is instantiated. This :class:`.SessionTransaction` is immediately
replaced with a new one as soon as it is ended, due to a rollback,
commit, or close operation.
"Transaction mode" does *not* indicate whether
or not actual database connection resources are in use; the
:class:`.SessionTransaction` object coordinates among zero or more
actual database transactions, and starts out with none, accumulating
individual DBAPI connections as different data sources are used
within its scope. The best way to track when a particular
:class:`.Session` has actually begun to use DBAPI resources is to
implement a listener using the :meth:`.SessionEvents.after_begin`
method, which will deliver both the :class:`.Session` as well as the
target :class:`.Connection` to a user-defined event listener.
The "partial rollback" state refers to when an "inner" transaction,
typically used during a flush, encounters an error and emits a
rollback of the DBAPI connection. At this point, the
:class:`.Session` is in "partial rollback" and awaits for the user to
call :meth:`.Session.rollback`, in order to close out the
transaction stack. It is in this "partial rollback" period that the
:attr:`.is_active` flag returns False. After the call to
:meth:`.Session.rollback`, the :class:`.SessionTransaction` is
replaced with a new one and :attr:`.is_active` returns ``True`` again.
When a :class:`.Session` is used in ``autocommit=True`` mode, the
:class:`.SessionTransaction` is only instantiated within the scope
of a flush call, or when :meth:`.Session.begin` is called. So
:attr:`.is_active` will always be ``False`` outside of a flush or
:meth:`.Session.begin` block in this mode, and will be ``True``
within the :meth:`.Session.begin` block as long as it doesn't enter
"partial rollback" state.
From all the above, it follows that the only purpose to this flag is
for application frameworks that wish to detect if a "rollback" is
necessary within a generic error handling routine, for
:class:`.Session` objects that would otherwise be in
"partial rollback" mode. In a typical integration case, this is also
not necessary as it is standard practice to emit
:meth:`.Session.rollback` unconditionally within the outermost
exception catch.
To track the transactional state of a :class:`.Session` fully,
use event listeners, primarily the :meth:`.SessionEvents.after_begin`,
:meth:`.SessionEvents.after_commit`,
:meth:`.SessionEvents.after_rollback` and related events.
"""
return self.transaction and self.transaction.is_active
identity_map = None
"""A mapping of object identities to objects themselves.
Iterating through ``Session.identity_map.values()`` provides
access to the full set of persistent objects (i.e., those
that have row identity) currently in the session.
.. seealso::
:func:`.identity_key` - helper function to produce the keys used
in this dictionary.
"""
@property
def _dirty_states(self):
"""The set of all persistent states considered dirty.
This method returns all states that were modified including
those that were possibly deleted.
"""
return self.identity_map._dirty_states()
@property
def dirty(self):
"""The set of all persistent instances considered dirty.
E.g.::
some_mapped_object in session.dirty
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the :meth:`.Session.is_modified` method.
"""
return util.IdentitySet(
[state.obj() for state in self._dirty_states if state not in self._deleted]
)
@property
def deleted(self):
"The set of all instances marked as 'deleted' within this ``Session``"
return util.IdentitySet(list(self._deleted.values()))
@property
def new(self):
"The set of all instances marked as 'new' within this ``Session``."
return util.IdentitySet(list(self._new.values()))
class sessionmaker(_SessionClassMethods):
"""A configurable :class:`.Session` factory.
The :class:`.sessionmaker` factory generates new
:class:`.Session` objects when called, creating them given
the configurational arguments established here.
e.g.::
# global scope
Session = sessionmaker(autoflush=False)
# later, in a local scope, create and use a session:
sess = Session()
Any keyword arguments sent to the constructor itself will override the
"configured" keywords::
Session = sessionmaker()
# bind an individual session to a connection
sess = Session(bind=connection)
The class also includes a method :meth:`.configure`, which can
be used to specify additional keyword arguments to the factory, which
will take effect for subsequent :class:`.Session` objects generated.
This is usually used to associate one or more :class:`.Engine` objects
with an existing :class:`.sessionmaker` factory before it is first
used::
# application starts
Session = sessionmaker()
# ... later
engine = create_engine('sqlite:///foo.db')
Session.configure(bind=engine)
sess = Session()
.. seealso:
:ref:`session_getting` - introductory text on creating
sessions using :class:`.sessionmaker`.
"""
def __init__(
self,
bind=None,
class_=Session,
autoflush=True,
autocommit=False,
expire_on_commit=True,
info=None,
**kw
):
r"""Construct a new :class:`.sessionmaker`.
All arguments here except for ``class_`` correspond to arguments
accepted by :class:`.Session` directly. See the
:meth:`.Session.__init__` docstring for more details on parameters.
:param bind: a :class:`.Engine` or other :class:`.Connectable` with
which newly created :class:`.Session` objects will be associated.
:param class\_: class to use in order to create new :class:`.Session`
objects. Defaults to :class:`.Session`.
:param autoflush: The autoflush setting to use with newly created
:class:`.Session` objects.
:param autocommit: The autocommit setting to use with newly created
:class:`.Session` objects.
:param expire_on_commit=True: the expire_on_commit setting to use
with newly created :class:`.Session` objects.
:param info: optional dictionary of information that will be available
via :attr:`.Session.info`. Note this dictionary is *updated*, not
replaced, when the ``info`` parameter is specified to the specific
:class:`.Session` construction operation.
.. versionadded:: 0.9.0
:param \**kw: all other keyword arguments are passed to the
constructor of newly created :class:`.Session` objects.
"""
kw["bind"] = bind
kw["autoflush"] = autoflush
kw["autocommit"] = autocommit
kw["expire_on_commit"] = expire_on_commit
if info is not None:
kw["info"] = info
self.kw = kw
# make our own subclass of the given class, so that
# events can be associated with it specifically.
self.class_ = type(class_.__name__, (class_,), {})
def __call__(self, **local_kw):
"""Produce a new :class:`.Session` object using the configuration
established in this :class:`.sessionmaker`.
In Python, the ``__call__`` method is invoked on an object when
it is "called" in the same way as a function::
Session = sessionmaker()
session = Session() # invokes sessionmaker.__call__()
"""
for k, v in self.kw.items():
if k == "info" and "info" in local_kw:
d = v.copy()
d.update(local_kw["info"])
local_kw["info"] = d
else:
local_kw.setdefault(k, v)
return self.class_(**local_kw)
def configure(self, **new_kw):
"""(Re)configure the arguments for this sessionmaker.
e.g.::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite://'))
"""
self.kw.update(new_kw)
def __repr__(self):
return "%s(class_=%r, %s)" % (
self.__class__.__name__,
self.class_.__name__,
", ".join("%s=%r" % (k, v) for k, v in self.kw.items()),
)
def close_all_sessions():
"""Close all sessions in memory.
This function consults a global registry of all :class:`.Session` objects
and calls :meth:`.Session.close` on them, which resets them to a clean
state.
This function is not for general use but may be useful for test suites
within the teardown scheme.
.. versionadded:: 1.3
"""
for sess in _sessions.values():
sess.close()
def make_transient(instance):
"""Alter the state of the given instance so that it is :term:`transient`.
.. note::
:func:`.make_transient` is a special-case function for
advanced use cases only.
The given mapped instance is assumed to be in the :term:`persistent` or
:term:`detached` state. The function will remove its association with any
:class:`.Session` as well as its :attr:`.InstanceState.identity`. The
effect is that the object will behave as though it were newly constructed,
except retaining any attribute / collection values that were loaded at the
time of the call. The :attr:`.InstanceState.deleted` flag is also reset
if this object had been deleted as a result of using
:meth:`.Session.delete`.
.. warning::
:func:`.make_transient` does **not** "unexpire" or otherwise eagerly
load ORM-mapped attributes that are not currently loaded at the time
the function is called. This includes attributes which:
* were expired via :meth:`.Session.expire`
* were expired as the natural effect of committing a session
transaction, e.g. :meth:`.Session.commit`
* are normally :term:`lazy loaded` but are not currently loaded
* are "deferred" via :ref:`deferred` and are not yet loaded
* were not present in the query which loaded this object, such as that
which is common in joined table inheritance and other scenarios.
After :func:`.make_transient` is called, unloaded attributes such
as those above will normally resolve to the value ``None`` when
accessed, or an empty collection for a collection-oriented attribute.
As the object is transient and un-associated with any database
identity, it will no longer retrieve these values.
.. seealso::
:func:`.make_transient_to_detached`
"""
state = attributes.instance_state(instance)
s = _state_session(state)
if s:
s._expunge_states([state])
# remove expired state
state.expired_attributes.clear()
# remove deferred callables
if state.callables:
del state.callables
if state.key:
del state.key
if state._deleted:
del state._deleted
def make_transient_to_detached(instance):
"""Make the given transient instance :term:`detached`.
.. note::
:func:`.make_transient_to_detached` is a special-case function for
advanced use cases only.
All attribute history on the given instance
will be reset as though the instance were freshly loaded
from a query. Missing attributes will be marked as expired.
The primary key attributes of the object, which are required, will be made
into the "key" of the instance.
The object can then be added to a session, or merged
possibly with the load=False flag, at which point it will look
as if it were loaded that way, without emitting SQL.
This is a special use case function that differs from a normal
call to :meth:`.Session.merge` in that a given persistent state
can be manufactured without any SQL calls.
.. versionadded:: 0.9.5
.. seealso::
:func:`.make_transient`
:meth:`.Session.enable_relationship_loading`
"""
state = attributes.instance_state(instance)
if state.session_id or state.key:
raise sa_exc.InvalidRequestError("Given object must be transient")
state.key = state.mapper._identity_key_from_state(state)
if state._deleted:
del state._deleted
state._commit_all(state.dict)
state._expire_attributes(state.dict, state.unloaded_expirable)
def object_session(instance):
"""Return the :class:`.Session` to which the given instance belongs.
This is essentially the same as the :attr:`.InstanceState.session`
accessor. See that attribute for details.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
else:
return _state_session(state)
_new_sessionid = util.counter()
| [
"[email protected]"
] | |
36bddcd2bcbf06f731e1550d821f62cda442c623 | 28cb8580ec1e635d5050b800a5a71136cb91c5d7 | /calculators.py | 0fa5f2a01b884981f60d6d4bbeb5992c6be48c07 | [
"MIT"
] | permissive | lxf-gzu/effective_mass_calculator | a760c269aafa07a23249b809aeb714498a76ac45 | 7e9ef8ae6e9e7dcbc3d4ff8d3f27e7ca964ce098 | refs/heads/master | 2021-10-25T06:22:42.475637 | 2019-04-02T06:55:04 | 2019-04-02T06:55:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,610 | py | import numpy as np
from numpy import linalg as la
from scipy.optimize import curve_fit
from scipy.spatial.distance import euclidean
A_m=10e10
h_bar=1.0545718e-34
eV=1.6e-19
me=9.10938356e-31
'''
unit conversion relationship:
E(eV)*1.6e-19=(1.0545e-34)^2 * (10^10)^2 * k2=^2 / 2m
where the k is in unit of 1/A
mass in unit of kg
'''
def fitting_functions(data,a,b,c,d,e,f,g,h,i,k):
x, y, z = data
return a * x ** 2 + b * y ** 2 + c * z ** 2 + d * x * y + e * x * z + f * y * z + g * x + h * y + i * z + k
def effectiveMassCalcu_CF(e_grid,kx_grid,ky_grid,kz_grid):
# energy in unit of eV
# kz in unit of 1/A
e_fit=e_grid.flatten()
x_fit=kx_grid.flatten()
y_fit=ky_grid.flatten()
z_fit=kz_grid.flatten()
data_fit=np.vstack((x_fit,y_fit,z_fit))
result,varance=curve_fit(fitting_functions,data_fit,e_fit,)
second_order=np.array([[result[0],result[3],result[4]],
[result[3],result[1],result[5]],
[result[4],result[5],result[2]]])
e_mass_tensor=1/ (second_order*(2*eV)/((h_bar*A_m)**2))
return e_mass_tensor/me
def effectiveMassCalcu_FD(e_grid,kx_grid,ky_grid,kz_grid):
second_order=np.zeros((3,3))
#find the finite difference
k_m=np.array([kx_grid[1,1,1],ky_grid[1,1,1],kz_grid[1,1,1]])
k_x=np.array([kx_grid[2,1,1],ky_grid[2,1,1],kz_grid[2,1,1]])
k_y=np.array([kx_grid[1,2,1],ky_grid[1,2,1],kz_grid[1,2,1]])
k_z=np.array([kx_grid[1,1,2],ky_grid[1,1,2],kz_grid[1,1,2]])
d=[euclidean(k_m,k_x),euclidean(k_m,k_y),euclidean(k_m,k_z)]
for i in range(3):
for j in range(3):
tmp=1.0/(d[i]*d[j])
if i==j:
o = [1, 1, 1]
p1 = [1, 1, 1]
p2 = [1, 1, 1]
p1[i]+=1
p2[i]+=-1
deff=tmp*(e_grid[p1[0],p1[1],p1[2]]+e_grid[p2[0],p2[1],p2[2]]-2*e_grid[o[0],o[1],o[2]])
else:
p1 = [1, 1, 1]
p2 = [1, 1, 1]
p3 = [1, 1, 1]
p4 = [1, 1, 1]
p1[i]+=1
p1[j]+=1
p2[i] += 1
p2[j] += -1
p3[i] += -1
p3[j] += 1
p4[i] += -1
p4[j] += -1
deff=tmp*(e_grid[p1[0]][p1[1]][p1[2]]-e_grid[p2[0]][p2[1]][p2[2]]-e_grid[p3[0]][p3[1]][p3[2]]+e_grid[p4[0]][p4[1]][p4[2]])
second_order[i][j]=deff
e_mass_tensor=1/ (second_order*(2*eV)/((h_bar*A_m)**2))
return e_mass_tensor/me | [
"[email protected]"
] | |
5b65135d5eb8cb14bba2ee5cebc9e6ccf75c2200 | ee3ba2af93581aaca5a1393f3eb22fa794be2a12 | /manage.py | 62b3538aa850e06f04c7eae23518adc851d8acbb | [] | no_license | wbchief/myflask | 303ed98c969c58a07953aa37c28f90ace3b9a284 | a4d82bc80df84cb7e418058de3519c29e29db7f1 | refs/heads/master | 2020-03-30T23:48:10.771252 | 2018-10-14T09:56:17 | 2018-10-14T09:56:17 | 151,713,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from flask_script import Shell
from app import create_app, db
from app.models import User, Role, Post, Permission
app = create_app('testing')
@app.context_processor
def include_permission_class():
return {'Permission': Permission}
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role, Post=Post)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run() | [
"[email protected]"
] | |
66a43ae1a34c4909c78db34a8062796dbc43165f | 5ec89827284eaff56f800407eb111cf07835558c | /Capitulo3/Capitulo3-ListasSimples/MetodoAppend.py | 480ed96698600ebe3d311f1f22a120d94e9c8660 | [] | no_license | guilhermepuck/eadFiap20 | c95a7a39c9a11135ca246fc6def7d1d8ecf9fdf3 | e619991a050d41aed14e623402d36040e82bb5bd | refs/heads/master | 2023-01-12T17:52:45.566133 | 2020-11-16T04:13:57 | 2020-11-16T04:13:57 | 308,762,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | inverntario = []
resposta = "S"
while resposta == "S":
inverntario.append(input('Equipamento: '))
inverntario.append(float(input('Valor: ')))
inverntario.append(int(input('Número Serial: ')))
inverntario.append(int(input('Departamento: ')))
resposta = input('Digite \"S\" para continuar: ').upper()
for elemento in inverntario:
print(elemento)
| [
"[email protected]"
] | |
0c16b3661deed643f15342b723114acc21bdc1d9 | c719641899290d19cabce13663bd75520f89e268 | /Assignment3.py | 8cd801a419837ae05524d74b46a1e03242b88c75 | [] | no_license | frason88/Python_Lab_MU | a21c2d5c7f541916b063b193227c2ae26b824d0b | c79a8046d54b60295329c812c41d07b658ebb489 | refs/heads/main | 2023-05-22T04:25:56.177066 | 2021-06-11T14:21:49 | 2021-06-11T14:21:49 | 374,122,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | # -*- coding: utf-8 -*-
"""
@author: jkfrason
"""
class Vehicle:
def __init__(self, name, max_speed, mileage, capacity):
self.name = name
self.max_speed = max_speed
self.mileage = mileage
self.capacity = capacity
def seating_capacity(self, capacity):
return f"The seating capacity of a {self.name} is {capacity} passengers"
def fare(self):
return self.capacity * 100
class Bus(Vehicle):
def seating_capacity(self, capacity=50):
return super().seating_capacity(capacity=50)
def fare(self):
amount = super().fare()
amount += amount * 10 / 100
return amount
class taxi(Vehicle):
def seating_capacity(self, capacity=3):
return super().seating_capacity(capacity=3)
School_bus = Bus("Volvo Bus", 180, 12,50)
print(School_bus.seating_capacity())
print("Total Bus fare is:", School_bus.fare())
print("\n--------------------------------------------------------------------\n")
taxi = taxi("Taxi", 280, 22,3)
print(taxi.seating_capacity())
print("Total taxi fare is:", taxi.fare())
| [
"[email protected]"
] | |
761f670b17d74baddac4ecc8fa65e6f6a51ec262 | ab75d43198c0521479136f6031891f47b3dadd52 | /api_keys.py | bbbd4a025dbce08ad8afe2c35209145f04ba828a | [] | no_license | benbukowski/Weatherpy | 91c4d076fc2ae67fea9c76441130ea67c4188056 | 1e272c92b4af6028e17e57e12319c7c43826c5ff | refs/heads/master | 2020-06-25T06:31:50.929989 | 2019-08-05T21:31:43 | 2019-08-05T21:31:43 | 199,231,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | # OpenWeatherMap API Key
api_key = "c2b27c01620c4a483416ddd20ee5c790"
| [
"[email protected]"
] | |
5eb65796119975d33b47e7c6ddab71c7e3c01492 | 9d684e5da274c561d5993ad4831e807ba7a123a5 | /sdh/curator/actions/__init__.py | 6504d43b44aba24374547d2d2e1a7e02f87d984b | [
"Apache-2.0"
] | permissive | SmartDeveloperHub/sdh-curator | fbdeb408d5c62128344cbdfe7cf85d3ad715e7a2 | ceb06ac39d378d55ab2075c9d9f010e313ad31a0 | refs/heads/master | 2016-08-12T12:30:25.337192 | 2016-02-04T13:26:20 | 2016-02-04T13:26:20 | 44,231,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,841 | py | """
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import inspect
import logging
from sdh.curator.actions.core.base import Action
from sdh.curator.actions.ext import stream, query, enrichment
__author__ = 'Fernando Serena'
log = logging.getLogger('sdh.curator.actions')
action_modules = {}
action_modules['query'] = query
action_modules['stream'] = stream
action_modules['enrichment'] = enrichment
def search_module(module, predicate, limit=1):
py_mod = action_modules[module]
if py_mod is not None:
cand_elms = filter(predicate,
inspect.getmembers(py_mod, lambda x: inspect.isclass(x) and not inspect.isabstract(x)))
if len(cand_elms) > limit:
raise ValueError('Too many elements in module {}'.format(module))
return cand_elms
return None
def get_instance(module, clz, *args):
module = action_modules[module]
class_ = getattr(module, clz)
instance = class_(*args)
return instance
def execute(*args, **kwargs):
name = args[0]
log.debug('Searching for a compliant "{}" action handler...'.format(name))
try:
_, clz = search_module(name,
lambda (_, cl): issubclass(cl, Action) and cl != Action).pop()
data = kwargs.get('data', None)
log.debug(
'Found! Requesting an instance of {} to perform a/n {} action described as:\n{}'.format(clz, name,
data))
action = clz(data)
action.submit()
log.info('A {} request was successfully submitted with id {}'.format(name, action.request_id))
except IndexError:
raise EnvironmentError('Action module found but class is missing: "{}"'.format(name))
| [
"[email protected]"
] | |
c29f9d1d594784407c648eac9132edaa55c51fca | f68cd225b050d11616ad9542dda60288f6eeccff | /testscripts/RDKB/component/WIFIAgent/TS_WIFIAGENT_2.4GHZ_OperatingStdFromSupportedStds.py | 71960884a4a9b04dfe0c880a739854581df956cb | [
"Apache-2.0"
] | permissive | cablelabs/tools-tdkb | 18fb98fadcd169fa9000db8865285fbf6ff8dc9d | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | refs/heads/master | 2020-03-28T03:06:50.595160 | 2018-09-04T11:11:00 | 2018-09-05T00:24:38 | 147,621,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,310 | py | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version='1.0' encoding='utf-8'?>
<xml>
<id></id>
<!-- Do not edit id. This will be auto filled while exporting. If you are adding a new script keep the id empty -->
<version>2</version>
<!-- Do not edit version. This will be auto incremented while updating. If you are adding a new script you can keep the vresion as 1 -->
<name>TS_WIFIAGENT_2.4GHZ_OperatingStdFromSupportedStds</name>
<!-- If you are adding a new script you can specify the script name. Script Name should be unique same as this file name with out .py extension -->
<primitive_test_id></primitive_test_id>
<!-- Do not change primitive_test_id if you are editing an existing script. -->
<primitive_test_name>WIFIAgent_Get</primitive_test_name>
<!-- -->
<primitive_test_version>1</primitive_test_version>
<!-- -->
<status>FREE</status>
<!-- -->
<synopsis>Check Device.WiFi.Radio.1.OperatingStandards is a subset of SupportedStandards list</synopsis>
<!-- -->
<groups_id />
<!-- -->
<execution_time>10</execution_time>
<!-- -->
<long_duration>false</long_duration>
<!-- -->
<advanced_script>false</advanced_script>
<!-- execution_time is the time out time for test execution -->
<remarks></remarks>
<!-- Reason for skipping the tests if marked to skip -->
<skip>false</skip>
<!-- -->
<box_types>
<box_type>Broadband</box_type>
<!-- -->
<box_type>Emulator</box_type>
<!-- -->
<box_type>RPI</box_type>
<!-- -->
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
<!-- -->
</rdk_versions>
<test_cases>
<test_case_id>TC_WIFIAGENT_59</test_case_id>
<test_objective>Check Device.WiFi.Radio.1.OperatingStandards is a subset of SupportedStandards list</test_objective>
<test_type>Positive</test_type>
<test_setup>XB3, Emulator, Rpi</test_setup>
<pre_requisite>1.Ccsp Components in DUT should be in a running state that includes component under test Cable Modem
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>WIFIAgent_Get</api_or_interface_used>
<input_parameters>Device.WiFi.Radio.1.SupportedStandards
Device.WiFi.Radio.1.OperatingStandards</input_parameters>
<automation_approch>1. Load wifiagent module
2. Using WIFIAgent_Get, get and save Device.WiFi.Radio.2.SupportedStandards
3 . Using WIFIAgent_Get, get Device.WiFi.Radio.2.OperatingStandards
4. Check if operating standards are a subset of supported standards
5. Unload wifiagent module</automation_approch>
<except_output>Device.WiFi.Radio.1.OperatingStandards is a subset of SupportedStandards list</except_output>
<priority>High</priority>
<test_stub_interface>WifiAgent</test_stub_interface>
<test_script>TS_WIFIAGENT_5GHZ_OperatingStandard</test_script>
<skipped>No</skipped>
<release_version></release_version>
<remarks></remarks>
</test_cases>
<script_tags />
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("wifiagent","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WIFIAGENT_2.4GHZ_OperatingStdFromSupportedStds');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus
if "SUCCESS" in loadmodulestatus.upper():
obj.setLoadModuleStatus("SUCCESS");
#Get the list of supported Standards
tdkTestObj = obj.createTestStep('WIFIAgent_Get');
tdkTestObj.addParameter("paramName","Device.WiFi.Radio.1.SupportedStandards")
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the list of supported standards"
print "EXPECTED RESULT 1: Should get the list of supported standards"
suppStd = details.split("VALUE:")[1].split(' ')[0];
print "ACTUAL RESULT 1: Supported stds are %s " %details
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#get the current Operating Standards for 2.4GHz Channel
tdkTestObj = obj.createTestStep('WIFIAgent_Get');
tdkTestObj.addParameter("paramName","Device.WiFi.Radio.1.OperatingStandards")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
operStd = details.split("VALUE:")[1].split(' ')[0].split(',');
flag = 1;
for index in range(len(operStd)):
if operStd[index] not in suppStd:
flag = 0;
break;
if expectedresult in actualresult and flag ==1:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the current operating standard"
print "EXPECTED RESULT 1: current operating standardy should be a subset of supported standard list"
print "ACTUAL RESULT 1: Band is %s " %details
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the current operating standard"
print "EXPECTED RESULT 1: current operating standardy should be a subset of supported standard list"
print "ACTUAL RESULT 1: %s " %details
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the list of supported standards"
print "EXPECTED RESULT 1: Should get the list of supported standards"
print "ACTUAL RESULT 1: %s " %details
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("wifiagent");
else:
print "Failed to load wifi module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| [
"[email protected]"
] | |
329d3204dc3b2a84aab3c1ac936990725c674272 | 17ce3b1a263f7698cdb557db00dc04f419c98c30 | /plotter.py | d1b55c1b1fcfd441902099ad719d863fd33323fb | [] | no_license | gioperalto/supervised-learning-algos | 336e80c2b178d15b395117bec7266d6a1263beaf | 6de5a5dced092ba6a0a481a0087dd7dc5dd5ae19 | refs/heads/master | 2022-12-14T23:11:33.256893 | 2020-09-20T19:50:46 | 2020-09-20T19:50:46 | 297,150,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,987 | py | import matplotlib.pyplot as plt
class Plotter:
def __init__(self, name, learner, axes):
self.name = name
self.learner = learner
self.x_axis = axes['x']
self.y_axis = axes['y']
def add_plot(self, x, y, label, marker='.'):
plt.plot(x, y, linestyle='-', marker=marker, label=label)
def find_max(self, x, y, label):
max_acc, offset = max(y), max(x)*.0125
i = y.index(max_acc)
plt.axvline(x=x[i], label='{}={:.4f} ({})'.format(self.x_axis, x[i], label), color='g')
plt.text(x=x[i]+offset, y=100, s='{:.1f}'.format(max_acc))
def find_max_int(self, x, y, label):
max_acc, offset = max(y), max(x)*.0125
i = y.index(max_acc)
plt.axvline(x=x[i], label='{}={} ({})'.format(self.x_axis, x[i], label), color='g')
plt.text(x=x[i]+offset, y=100, s='{:.1f}'.format(max_acc))
def find_min(self, x, y, label, top=True):
min_mse, x_offset = min(y), max(x)*.0125
y_offset = (max(y)/2.) + min(y) if top else (max(y)/2.) - min(y)
i = y.index(min_mse)
plt.axvline(x=x[i], label='{}={:.4f} ({})'.format(self.x_axis, x[i], label), color='g')
plt.text(x=x[i]+x_offset, y=y_offset, s='{:.5f}'.format(min_mse))
def find_min_int(self, x, y, label, top=True):
min_mse, x_offset = min(y), max(x)*.0125
y_offset = (max(y)/2.) + min(y) if top else min(y) - (max(y)/2.)
i = y.index(min_mse)
plt.axvline(x=x[i], label='{}={} ({})'.format(self.x_axis, x[i], label), color='g')
plt.text(x=x[i]+x_offset, y=y_offset, s='{:.5f}'.format(min_mse))
def save(self, loc='best', framealpha=.8, top_limit=None):
if top_limit is not None:
plt.ylim(top=top_limit)
plt.xlabel(self.x_axis)
plt.ylabel(self.y_axis)
plt.title(self.name)
plt.legend(loc=loc, framealpha=framealpha)
plt.savefig('images/{}/{}'.format(self.learner, self.name))
plt.close() | [
"[email protected]"
] | |
221ab89d4bc6379c32813f24b04d6a884a96d362 | 10d98fecb882d4c84595364f715f4e8b8309a66f | /smith/preprocessing_smith.py | 928937fe73d5a39a6c6dc6a5a096c56496bf1a3b | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 22,890 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to preprocess text data into SMITH dual encoder model inputs."""
import collections
import random
import nltk
import tensorflow.compat.v1 as tf
import tqdm
from smith import utils
from smith import wiki_doc_pair_pb2
from smith.bert import tokenization
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None, "Input data path.")
flags.DEFINE_string(
"output_file", None,
"Output TF examples (or comma-separated list of files) in TFRecord "
"files.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the SMITH model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool("add_masks_lm", True,
"If true, add masks for word prediction LM pre-training.")
flags.DEFINE_integer(
"max_sent_length_by_word", 32, "The maximum length of a sentence by tokens."
"A sentence will be cut off if longer than this length, and will be padded "
"if shorter than it. The sentence can also be a sentence block.")
flags.DEFINE_integer(
"max_doc_length_by_sentence", 64,
"The maximum length of a document by sentences. A "
"document will be cut off if longer than this length, and"
"will be padded if shorter than it.")
flags.DEFINE_bool(
"greedy_sentence_filling", True,
"If true, apply the greedy sentence filling trick to reduce the "
"number of padded tokens.")
flags.DEFINE_integer("max_predictions_per_seq", 5,
"Maximum number of masked LM predictions per sequence.")
flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.")
class TrainingInstance(object):
"""A single training instance (sentence pair as dual encoder model inputs)."""
def __init__(self,
tokens_1,
segment_ids_1,
masked_lm_positions_1,
masked_lm_labels_1,
input_mask_1,
masked_lm_weights_1,
tokens_2,
segment_ids_2,
masked_lm_positions_2,
masked_lm_labels_2,
input_mask_2,
masked_lm_weights_2,
instance_id,
documents_match_labels=-1.0):
self.tokens_1 = tokens_1
self.segment_ids_1 = segment_ids_1
self.masked_lm_positions_1 = masked_lm_positions_1
self.masked_lm_labels_1 = masked_lm_labels_1
self.input_mask_1 = input_mask_1
self.masked_lm_weights_1 = masked_lm_weights_1
self.tokens_2 = tokens_2
self.segment_ids_2 = segment_ids_2
self.masked_lm_positions_2 = masked_lm_positions_2
self.masked_lm_labels_2 = masked_lm_labels_2
self.input_mask_2 = input_mask_2
self.masked_lm_weights_2 = masked_lm_weights_2
self.instance_id = instance_id
self.documents_match_labels = documents_match_labels
def __str__(self):
s = ""
s += "instance_id: %s\n" % self.instance_id
s += "documents_match_labels: %s\n" % (str(self.documents_match_labels))
s += "tokens_1: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens_1]))
s += "segment_ids_1: %s\n" % (" ".join([str(x) for x in self.segment_ids_1
]))
s += "masked_lm_positions_1: %s\n" % (" ".join(
[str(x) for x in self.masked_lm_positions_1]))
s += "masked_lm_labels_1: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.masked_lm_labels_1]))
s += "input_mask_1: %s\n" % (" ".join([str(x) for x in self.input_mask_1]))
s += "masked_lm_weights_1: %s\n" % (" ".join(
[str(x) for x in self.masked_lm_weights_1]))
s += "tokens_2: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens_2]))
s += "segment_ids_2: %s\n" % (" ".join([str(x) for x in self.segment_ids_2
]))
s += "masked_lm_positions_2: %s\n" % (" ".join(
[str(x) for x in self.masked_lm_positions_2]))
s += "masked_lm_labels_2: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.masked_lm_labels_2]))
s += "input_mask_2: %s\n" % (" ".join([str(x) for x in self.input_mask_2]))
s += "masked_lm_weights_2: %s\n" % (" ".join(
[str(x) for x in self.masked_lm_weights_2]))
s += "\n"
return s
def __repr__(self):
return self.__str__()
def add_features_for_one_doc(features, tokens, segment_ids, input_mask,
masked_lm_positions, masked_lm_labels,
masked_lm_weights, tokenizer, doc_index):
"""Add features for one document in a WikiDocPair example."""
input_ids = tokenizer.convert_tokens_to_ids(tokens)
features["input_ids_" + doc_index] = utils.create_int_feature(input_ids)
features["input_mask_" + doc_index] = utils.create_int_feature(input_mask)
features["segment_ids_" + doc_index] = utils.create_int_feature(segment_ids)
if masked_lm_labels:
masked_lm_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels)
features["masked_lm_positions_" +
doc_index] = utils.create_int_feature(masked_lm_positions)
features["masked_lm_ids_" +
doc_index] = utils.create_int_feature(masked_lm_ids)
features["masked_lm_weights_" +
doc_index] = utils.create_float_feature(masked_lm_weights)
def write_instance_to_example_files(instances, tokenizer, output_files):
"""Create TF example files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
features = collections.OrderedDict()
add_features_for_one_doc(
features=features,
tokens=instance.tokens_1,
segment_ids=instance.segment_ids_1,
input_mask=instance.input_mask_1,
masked_lm_positions=instance.masked_lm_positions_1,
masked_lm_labels=instance.masked_lm_labels_1,
masked_lm_weights=instance.masked_lm_weights_1,
tokenizer=tokenizer,
doc_index="1")
add_features_for_one_doc(
features=features,
tokens=instance.tokens_2,
segment_ids=instance.segment_ids_2,
input_mask=instance.input_mask_2,
masked_lm_positions=instance.masked_lm_positions_2,
masked_lm_labels=instance.masked_lm_labels_2,
masked_lm_weights=instance.masked_lm_weights_2,
tokenizer=tokenizer,
doc_index="2")
# Adds fields on more content/id information of the current example.
features["instance_id"] = utils.create_bytes_feature(
[bytes(instance.instance_id, "utf-8")])
features["tokens_1"] = utils.create_bytes_feature(
[bytes(t, "utf-8") for t in instance.tokens_1])
features["tokens_2"] = utils.create_bytes_feature(
[bytes(t, "utf-8") for t in instance.tokens_2])
# Adds the documents matching labels.
features["documents_match_labels"] = utils.create_float_feature(
[float(instance.documents_match_labels)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info(
"tokens_1: %s" %
" ".join([tokenization.printable_text(x) for x in instance.tokens_1]))
tf.logging.info(
"tokens_2: %s" %
" ".join([tokenization.printable_text(x) for x in instance.tokens_2]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
elif feature.bytes_list.value:
values = feature.bytes_list.value
tf.logging.info("%s: %s" %
(feature_name, " ".join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info("Wrote %d total instances", total_written)
def get_smith_model_tokens(input_text, tokenizer, sent_token_counter):
"""Generate tokens given an input text for the SMITH model."""
res_tokens = []
for sent in nltk.tokenize.sent_tokenize(input_text):
# The returned res_tokens is a 2D list to maintain the sentence boundary
# information. We removed all the empty tokens in this step.
if not sent:
continue
tokens = [w for w in tokenizer.tokenize(sent) if w]
sent_token_counter[0] += 1 # Track number of sentences.
sent_token_counter[1] += len(tokens) # Track number of tokens.
res_tokens.append(tokens)
return (res_tokens, sent_token_counter)
def create_training_instances_wiki_doc_pair(
input_file, tokenizer, max_sent_length_by_word, max_doc_length_by_sentence,
masked_lm_prob, max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from WikiDocPair proto data."""
# The input data is in the WikiDocPair proto format in tfrecord.
wiki_doc_pair = wiki_doc_pair_pb2.WikiDocPair()
instances = []
# Add some counters to track some data statistics.
sent_token_counter = [0, 0]
for example in tqdm.tqdm(tf.python_io.tf_record_iterator(input_file)):
doc_pair = wiki_doc_pair.FromString(example)
# If model_name = smith_dual_encoder, we firstly use a sentence tokenizer
# to split doc_one/doc_two texts into different sentences and use [SEN] to
# label the sentence boundary information. So in the masking and padding
# step, we know the boundary between different sentences and we can do the
# masking and padding according to the actual length of each sentence.
doc_one_text = " \n\n\n\n\n\n ".join(
[a.text for a in doc_pair.doc_one.section_contents])
doc_two_text = " \n\n\n\n\n\n ".join(
[a.text for a in doc_pair.doc_two.section_contents])
doc_one_text = tokenization.convert_to_unicode(doc_one_text).strip()
doc_two_text = tokenization.convert_to_unicode(doc_two_text).strip()
doc_one_tokens, sent_token_counter = get_smith_model_tokens(
doc_one_text, tokenizer, sent_token_counter)
doc_two_tokens, sent_token_counter = get_smith_model_tokens(
doc_two_text, tokenizer, sent_token_counter)
# Skip the document pairs if any document is empty.
if not doc_one_tokens or not doc_two_tokens:
continue
vocab_words = list(tokenizer.vocab.keys())
instance_id = doc_pair.id
if doc_pair.human_label_for_classification:
doc_match_label = doc_pair.human_label_for_classification
else:
# Set the label as 0.0 if there are no available labels.
doc_match_label = 0.0
instances.append(
create_instance_from_wiki_doc_pair(
instance_id, doc_match_label, doc_one_tokens, doc_two_tokens,
max_sent_length_by_word, max_doc_length_by_sentence, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
return (instances, sent_token_counter)
def create_instance_from_wiki_doc_pair(instance_id, doc_match_label,
doc_one_tokens, doc_two_tokens,
max_sent_length_by_word,
max_doc_length_by_sentence,
masked_lm_prob, max_predictions_per_seq,
vocab_words, rng):
"""Creates `TrainingInstance`s for a WikiDocPair input data."""
(tokens_1, segment_ids_1, masked_lm_positions_1, masked_lm_labels_1, \
input_mask_1, masked_lm_weights_1) = \
get_tokens_segment_ids_masks(max_sent_length_by_word, max_doc_length_by_sentence, doc_one_tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng)
(tokens_2, segment_ids_2, masked_lm_positions_2, masked_lm_labels_2, \
input_mask_2, masked_lm_weights_2) = \
get_tokens_segment_ids_masks(max_sent_length_by_word, max_doc_length_by_sentence, doc_two_tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng)
instance = TrainingInstance(
tokens_1=tokens_1,
segment_ids_1=segment_ids_1,
masked_lm_positions_1=masked_lm_positions_1,
masked_lm_labels_1=masked_lm_labels_1,
input_mask_1=input_mask_1,
masked_lm_weights_1=masked_lm_weights_1,
tokens_2=tokens_2,
segment_ids_2=segment_ids_2,
masked_lm_positions_2=masked_lm_positions_2,
masked_lm_labels_2=masked_lm_labels_2,
input_mask_2=input_mask_2,
masked_lm_weights_2=masked_lm_weights_2,
instance_id=instance_id,
documents_match_labels=doc_match_label)
return instance
def get_tokens_segment_ids_masks(max_sent_length_by_word,
max_doc_length_by_sentence, doc_one_tokens,
masked_lm_prob, max_predictions_per_seq,
vocab_words, rng):
"""Get the tokens, segment ids and masks of an input sequence."""
# The format of tokens for SMITH dual encoder models is like:
# [CLS] block1_token1 block1_token2 block1_token3 ... [SEP] [SEP] [PAD] ...
# [CLS] block2_token1 block2_token2 block2_token3 ... [SEP] [SEP] [PAD] ...
# [CLS] block3_token1 block3_token2 block3_token3 ... [SEP] [SEP] [PAD] ...
# If max_sent_length_by_word is large, then there will be many padded
# words in the sentence. Here we added an optional "greedy sentence filling"
# trick in order to reduce the number of padded words and maintain all
# content in the document. We allow a "sentence" block to contain more than
# one natural sentence and try to fill as many as sentences into the
# "sentence" block. If a sentence will be cut off and the current sentence
# block is not empty, we will put the sentence into the next "sentence" block.
# According to ALBERT paper and RoBERTa paper, a segment is usually comprised
# of more than one natural sentence, which has been shown to benefit
# performance. doc_one_tokens is a 2D list which contains the sentence
# boundary information.
sentence_num = len(doc_one_tokens)
# sent_block_token_list is a 2D list to maintain sentence block tokens.
sent_block_token_list = []
natural_sentence_index = -1
while natural_sentence_index + 1 < sentence_num:
natural_sentence_index += 1
sent_tokens = doc_one_tokens[natural_sentence_index]
if not sent_tokens:
continue
if FLAGS.greedy_sentence_filling:
cur_sent_block_length = 0
cur_sent_block = []
# Fill as many senteces as possible in the current sentence block in a
# greedy way.
while natural_sentence_index < sentence_num:
cur_natural_sent_tokens = doc_one_tokens[natural_sentence_index]
if not cur_natural_sent_tokens:
natural_sentence_index += 1
continue
cur_sent_len = len(cur_natural_sent_tokens)
if ((cur_sent_block_length + cur_sent_len) <=
(max_sent_length_by_word - 3)) or cur_sent_block_length == 0:
# One exceptional case here is that if the 1st sentence of a sentence
# block is already going across the boundary, then the current
# sentence block will be empty. So when cur_sent_block_length is 0
# and we meet a natural sentence with length longer than
# (max_sent_length_by_word - 3), we still put this natural sentence
# in the current sentence block. In this case, this long natural
# sentence will be cut off with the final length up to
# (max_sent_length_by_word - 3).
cur_sent_block.extend(cur_natural_sent_tokens)
cur_sent_block_length += cur_sent_len
natural_sentence_index += 1
else:
# If cur_sent_block_length + cur_sent_len > max_sent_length_by_word-3
# and the current sentence block is not empty, the sentence which
# goes across the boundary will be put into the next sentence block.
natural_sentence_index -= 1
break
sent_tokens = cur_sent_block
sent_block_token_list.append(sent_tokens)
if len(sent_block_token_list) >= max_doc_length_by_sentence:
break # Skip more sentence blocks if the document is too long.
# For each sentence block, generate the token sequences, masks and paddings.
tokens_doc = []
segment_ids_doc = []
masked_lm_positions_doc = []
masked_lm_labels_doc = []
input_mask_doc = []
masked_lm_weights_doc = []
for block_index in range(len(sent_block_token_list)):
tokens_block, segment_ids_block, masked_lm_positions_block, \
masked_lm_labels_block, input_mask_block, masked_lm_weights_block = \
get_token_masks_paddings(
sent_block_token_list[block_index],
max_sent_length_by_word,
masked_lm_prob,
max_predictions_per_seq,
vocab_words,
rng,
block_index)
tokens_doc.extend(tokens_block)
segment_ids_doc.extend(segment_ids_block)
masked_lm_positions_doc.extend(masked_lm_positions_block)
masked_lm_labels_doc.extend(masked_lm_labels_block)
input_mask_doc.extend(input_mask_block)
masked_lm_weights_doc.extend(masked_lm_weights_block)
# Pad sentence blocks if the actual number of sentence blocks is less than
# max_doc_length_by_sentence.
sentence_block_index = len(sent_block_token_list)
while sentence_block_index < max_doc_length_by_sentence:
for _ in range(max_sent_length_by_word):
tokens_doc.append("[PAD]")
segment_ids_doc.append(0)
input_mask_doc.append(0)
for _ in range(max_predictions_per_seq):
masked_lm_positions_doc.append(0)
masked_lm_labels_doc.append("[PAD]")
masked_lm_weights_doc.append(0.0)
sentence_block_index += 1
assert len(tokens_doc) == max_sent_length_by_word * max_doc_length_by_sentence
assert len(masked_lm_labels_doc
) == max_predictions_per_seq * max_doc_length_by_sentence
return (tokens_doc, segment_ids_doc, masked_lm_positions_doc,
masked_lm_labels_doc, input_mask_doc, masked_lm_weights_doc)
def get_token_masks_paddings(block_tokens, max_sent_length_by_word,
masked_lm_prob, max_predictions_per_seq,
vocab_words, rng, block_index):
"""Generates tokens, masks and paddings for the input block tokens."""
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_sent_length_by_word - 3
# Truncates the sequence if sequence length is longer than max_num_tokens.
tokens = []
segment_ids = []
if len(block_tokens) > max_num_tokens:
block_tokens = block_tokens[0:max_num_tokens]
tokens_a = block_tokens
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
masked_lm_positions = []
masked_lm_labels = []
masked_lm_weights = []
if max_predictions_per_seq > 0:
(tokens, masked_lm_positions,
masked_lm_labels) = utils.create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
# Add [PAD] to tokens and masked LM related lists.
input_mask = [1] * len(tokens)
while len(tokens) < max_sent_length_by_word:
tokens.append("[PAD]")
input_mask.append(0)
segment_ids.append(0)
assert len(tokens) == max_sent_length_by_word
assert len(input_mask) == max_sent_length_by_word
assert len(segment_ids) == max_sent_length_by_word
if max_predictions_per_seq > 0:
# Transfer local positions in masked_lm_positions to global positions in the
# whole document to be consistent with the model training pipeline.
masked_lm_positions = [
(i + max_sent_length_by_word * block_index) for i in masked_lm_positions
]
masked_lm_weights = [1.0] * len(masked_lm_labels)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_labels.append("[PAD]")
masked_lm_weights.append(0.0)
return (tokens, segment_ids, masked_lm_positions, masked_lm_labels,
input_mask, masked_lm_weights)
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Reading from input files ***")
for input_file in input_files:
tf.logging.info(" %s", input_file)
rng = random.Random(FLAGS.random_seed)
# Creates training instances.
max_predictions_per_seq = FLAGS.max_predictions_per_seq if FLAGS.add_masks_lm else 0
masked_lm_prob = FLAGS.masked_lm_prob if FLAGS.add_masks_lm else 0
instances, sent_token_counter = create_training_instances_wiki_doc_pair(
input_file=FLAGS.input_file,
tokenizer=tokenizer,
max_sent_length_by_word=FLAGS.max_sent_length_by_word,
max_doc_length_by_sentence=FLAGS.max_doc_length_by_sentence,
masked_lm_prob=masked_lm_prob,
max_predictions_per_seq=max_predictions_per_seq,
rng=rng)
output_files = FLAGS.output_file.split(",")
tf.logging.info("*** Writing to output files ***")
for output_file in output_files:
tf.logging.info(" %s", output_file)
# Transfers training instances into tensorflow examples and write the results.
write_instance_to_example_files(instances, tokenizer, output_files)
# Finally outputs some data statistics.
tf.logging.info("sent_count, token_count, doc_pair_count: %d %d %d",
sent_token_counter[0], sent_token_counter[1], len(instances))
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_file")
flags.mark_flag_as_required("vocab_file")
tf.app.run()
| [
"[email protected]"
] | |
5a99e21e09133b8330af575f20aed0a74083b8e9 | c522b0332ee42d01f1ee5bdd3cdd3d72eb9af24b | /venv/lib/python3.8/site-packages/lusid/models/version.py | b4a391fed53d87838174441a2083936e6230f96b | [] | no_license | Jeffkent01coder/trackphone | e5aad6f99efb0f0c11f260d1f2a0b232d5453dfe | 3570375938c7e947eb272d2cec1589202351141c | refs/heads/master | 2023-05-10T22:27:40.255686 | 2021-06-02T10:23:17 | 2021-06-02T10:23:17 | 373,125,235 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,951 | py | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2820
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Version(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'effective_from': 'datetime',
'as_at_date': 'datetime'
}
attribute_map = {
'effective_from': 'effectiveFrom',
'as_at_date': 'asAtDate'
}
required_map = {
'effective_from': 'required',
'as_at_date': 'required'
}
def __init__(self, effective_from=None, as_at_date=None): # noqa: E501
"""
Version - a model defined in OpenAPI
:param effective_from: The effective datetime at which this version became valid. Only applies when a single entity is being interacted with. (required)
:type effective_from: datetime
:param as_at_date: The asAt datetime at which the data was committed to LUSID. (required)
:type as_at_date: datetime
""" # noqa: E501
self._effective_from = None
self._as_at_date = None
self.discriminator = None
self.effective_from = effective_from
self.as_at_date = as_at_date
@property
def effective_from(self):
"""Gets the effective_from of this Version. # noqa: E501
The effective datetime at which this version became valid. Only applies when a single entity is being interacted with. # noqa: E501
:return: The effective_from of this Version. # noqa: E501
:rtype: datetime
"""
return self._effective_from
@effective_from.setter
def effective_from(self, effective_from):
"""Sets the effective_from of this Version.
The effective datetime at which this version became valid. Only applies when a single entity is being interacted with. # noqa: E501
:param effective_from: The effective_from of this Version. # noqa: E501
:type: datetime
"""
if effective_from is None:
raise ValueError("Invalid value for `effective_from`, must not be `None`") # noqa: E501
self._effective_from = effective_from
@property
def as_at_date(self):
"""Gets the as_at_date of this Version. # noqa: E501
The asAt datetime at which the data was committed to LUSID. # noqa: E501
:return: The as_at_date of this Version. # noqa: E501
:rtype: datetime
"""
return self._as_at_date
@as_at_date.setter
def as_at_date(self, as_at_date):
"""Sets the as_at_date of this Version.
The asAt datetime at which the data was committed to LUSID. # noqa: E501
:param as_at_date: The as_at_date of this Version. # noqa: E501
:type: datetime
"""
if as_at_date is None:
raise ValueError("Invalid value for `as_at_date`, must not be `None`") # noqa: E501
self._as_at_date = as_at_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Version):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
dd55d969908dd3f04d0e960b578e9d237be44a6e | db2c2f07fc34f96d72587154dadf0d5c6e78bec6 | /rwalk2.py | f9f10d940bc8af2cebc90590df01d7f35b96e00c | [] | no_license | DavidVelez/Scientific-computing-hw | af744cbfc43140ea9a2b0beec4dc4856faf2dbdc | 7aaa2570e89f8bb705b33565137019436219d71f | refs/heads/master | 2021-01-10T18:04:20.422622 | 2016-05-01T01:43:29 | 2016-05-01T01:43:29 | 51,524,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | #uncoding: utf-8
import numpy as np
import random
N=2000 #N=pasos
B=range(100) #B=Borrachos
def rwalk1d(N):
S=0
for i in range(N):
h=1+(N/2-N*random.random())
if h < 0:
S=S-1
else:
if h > 0:
S=S+1
else:
S=S
return S
#print 'la posicion final es:',rwalk1d(1000,0.69)
def rw(B,N):
c=0
for i in range (100):
x=rwalk1d(N)
if x[i]==x[i]:
c=c+1
print c
print rw(B,N)
| [
"[email protected]"
] | |
b2db7ee4570dbb793eaa865fbfca30b6a76da4f5 | fe01647fb8912e0d5711c907a8dfe06e9bd0f428 | /date.py | 0f010e5dad3106ab82ddfd160038dad7b49b036c | [] | no_license | shashidhar1305/demopygit | 76d463b51ac515a0af699ff30b8e4f37c536a44e | ef4dac54f8dbfc277ce2ddbef37f657b17dd8385 | refs/heads/main | 2023-02-09T06:11:50.334751 | 2021-01-07T10:45:49 | 2021-01-07T10:45:49 | 327,581,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | import time
import datetime
print("current date and time: ",datetime.datetime.now())
print("current year: ",datetime.datetime.today().strftime("%Y"))
print("month of year: ",datetime.datetime.today().strftime("%B"))
print("week number of the year: ",datetime.datetime.today().strftime("%W"))
print("weekdayof the week: ",datetime.date.today().strftime("%w"))
print("Day of year: ",datetime.datetime.today().strftime("%j"))
print("Day of the month: ",datetime.datetime.today().strftime("%d"))
print("Day of week: ",datetime.datetime.today().strftime("%A"))
| [
"[email protected]"
] |
Subsets and Splits