blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2a66332dbd94525a7a5039ba712550ebe7a1566a
|
bfff33b62a22e8c84a816ebbf870fc806cea31e5
|
/cloud-photos/bin/rstpep2html.py
|
6798917cbcf8f436e1713cdbe65e9c627aa3ad6b
|
[
"MIT"
] |
permissive
|
xiaolim/cloud-comp-hw2
|
93abe7b1b50c0768a9f928df45df4e4d873755c2
|
aded9c25f302e37ceb21e436c6886f5db4fb16da
|
refs/heads/master
| 2020-04-09T05:32:52.085653 | 2018-12-05T22:23:03 | 2018-12-05T22:23:03 | 160,068,898 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 691 |
py
|
#!/Users/limxiaoyu/anaconda3/bin/python
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
|
[
"[email protected]"
] | |
8dfb146210977f3e59d065241592fdfc7e6fa4ce
|
18b68d7364f4ad912561d446ab15c9eda57b7785
|
/practice/Lynda_Python/Ch2/classes_start.py
|
27d107d2377ffbe164c6f43820341b6574005289
|
[] |
no_license
|
gkalidas/Python
|
1ae8fb443486a6122727a4ca34faadc02bd8d3ba
|
7413bd9055e64973b9708e1b5c926efd0e599044
|
refs/heads/master
| 2020-04-10T05:08:27.104227 | 2019-12-15T17:02:10 | 2019-12-15T17:02:10 | 160,818,601 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 758 |
py
|
#
# Example file for working with classes
#
class myClass():
def method1(self):
print("myClass method1")
def method2(self, someString):
print("myClass method2 " + someString)
#anotherClass is inheriting the methods from myClass
class anotherClass(myClass):
def method1(self):
#we are inheriting the method of myClass
#self is similar to "this" keyword
myClass.method1(self)
print("anotherClass method1")
#method overriding, as we are not calling the inherited function
def method2(self, someString):
print("anotherClass method2 ")
def main():
c = myClass()
c.method1()
c.method2("This is string")
c2 = anotherClass()
c2.method1()
c2.method2("This is a string2")
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
def7d86f686c9d996392625132e26379342f6314
|
70dd1a05828fcb923130c31480d7a3003c4bd3f8
|
/SE final/back/api/migrations/0002_auto_20190429_1811.py
|
ce8b87bbe0cbc9f8b0d93b673bb7335313ae006a
|
[] |
no_license
|
itagaev/webdev2019
|
69e4c5a019e2f1f1c4de80e69ce16ddcb079068f
|
e2312d8a85ae066f652975816732d692a2b8416e
|
refs/heads/master
| 2020-04-19T04:15:48.903665 | 2019-07-01T16:54:08 | 2019-07-01T16:54:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 448 |
py
|
# Generated by Django 2.2 on 2019-04-29 15:11
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='competition',
name='created_at',
field=models.DateTimeField(verbose_name=datetime.datetime(2019, 4, 29, 18, 11, 17, 180541)),
),
]
|
[
"[email protected]"
] | |
5d63a29f73198d37de548cfa0f47b0028dc6aa21
|
01265678a724c0be60a9b0f6ead3dd74df748ef0
|
/evaluation.py
|
bddc8d3629dcc93de90b4141c0901015eea074da
|
[] |
no_license
|
sabyasachi-choudhury/Kasey
|
0b4a364977ab8600d57e181060fe6f5ca5aa2846
|
a1a6079e1c42dffa43fb1c71d5eec0b7699eba9e
|
refs/heads/main
| 2023-06-20T07:04:47.696602 | 2021-07-14T06:50:19 | 2021-07-14T06:50:19 | 385,841,440 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 734 |
py
|
import random
import main as mn
chars = 'qwertyuiopasdfghjklzxcvbnm '
def test(epochs):
probs = []
correct_detections = []
for x in range(epochs):
word = ""
for y in range(random.randint(8, 25)):
word += random.choice(chars)
prediction = mn.classify(word)
if prediction:
probs.append(prediction[0][1])
# print(word)
else:
correct_detections.append(word)
avg = sum(probs)/len(probs) * 100
print(avg)
print(len(correct_detections))
return avg
averages = []
for x in range(10):
print("Epoch:", x+1)
averages.append(test(1000))
print("Big avg:", sum(averages)/len(averages))
|
[
"[email protected]"
] | |
3b3178710bb1584d794ee1cd7a69dd56e62533e4
|
4267016f8ea7ce51d7628a9992a97820a0bd305f
|
/main.py
|
35c8974559c8077a95f8c6d07e714aa9e49a0ea0
|
[] |
no_license
|
moiss16/ASISTENTE
|
9dd345487b71fe77c3a8dd3c00e8292dc416c05b
|
83b502c0865d298f8ec436674ac15b33748233ff
|
refs/heads/main
| 2023-05-01T07:42:49.246277 | 2021-05-05T00:14:04 | 2021-05-05T00:14:04 | 364,419,415 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,286 |
py
|
import speech_recognition as sr
r= sr.Recognizer()
import time
import webbrowser
def record_audio(ask=False):
with sr.Microphone() as source:
if ask:
print(ask)
audio = r.listen(source)
voice_data =''
try:
voice_data =r.recognize_google(audio)
except sr.UnknownValueError:
print('Lo siento no te entiendo')
except sr.RequestError:
print('Lo siento, error de conexion')
return voice_data
def respond(voice_data):
if 'como te llamas' in voice_data:
print('Mi nombre es alexis')
if 'hora' in voice_data:
print(time.ctime())
if 'buscar' in voice_data:
buscar = record_audio('¿que necesitas buscar?')
url= ('https://google.com/search?q=' + buscar)
webbrowser.get().open(url)
print('esto es lo que encontre para:'+buscar)
if 'place' in voice_data:
place = record_audio("¿Que lugar?")
url=('https://google.nl/maps/place/'+place+'/&')
webbrowser.get().open(url)
print('Esto es lo que encontre para '+place)
#time.sleep(1)
print('¿Como te puedo ayudar?')
#while 1:
voice_data = record_audio()
respond(voice_data)
#print(voice_data)
|
[
"[email protected]"
] | |
fe4480acba5a812f116032631f99921ae72e907e
|
8366d4e68bcba4bad6e4cdcc0e04b0a8c90652b9
|
/MRSPPlot.py
|
3375f85222f8b4e88fe7ea0218f10c8e46db8b0e
|
[] |
no_license
|
jancervenka/EVCSim
|
dfacd84ce0e369056f36780b9a5612e6a17da84c
|
daedeedf76e86db4d6c2651ca9a7c8280c9cbd8d
|
refs/heads/master
| 2021-06-18T07:09:31.606064 | 2017-06-07T08:13:23 | 2017-06-07T08:13:23 | 91,266,760 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 277 |
py
|
import matplotlib.pyplot as plt
MRSP = [120 for i in range(0, 2999)]
MRSP += [60 for i in range(0, 2000)]
plt.plot(MRSP, color = 'b')
plt.xlabel('distance [m]')
plt.ylabel('MRSP value [km/h]')
plt.title('MRSP Test')
plt.xlim([0, len(MRSP) - 1])
plt.ylim([0, 150])
plt.show()
|
[
"[email protected]"
] | |
8ce1689b4605bab929cceaf30bd0e1e4bc9293a9
|
b40d1a26ea04a19ec0da7bf55db84b7ee36cc898
|
/leetcode.com/python/1007_Minimum_Domino_Rotations_For_Equal_Row.py
|
974ee558096c8fe9a393d9b91f507186e8e356d7
|
[
"MIT"
] |
permissive
|
partho-maple/coding-interview-gym
|
5e8af7d404c28d4b9b52e5cffc540fd51d8025cf
|
20ae1a048eddbc9a32c819cf61258e2b57572f05
|
refs/heads/master
| 2022-09-11T16:36:01.702626 | 2022-03-14T08:39:47 | 2022-03-14T08:39:47 | 69,802,909 | 862 | 438 |
MIT
| 2022-08-18T06:42:46 | 2016-10-02T14:51:31 |
Python
|
UTF-8
|
Python
| false | false | 2,177 |
py
|
# Source: https://tinyurl.com/v3zqer7
# Approach 1
class Solution(object):
def minDominoRotations(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
result = float("inf")
for domino in range(1, 7): # Since each domino can have only 1 to 6 values. So check all values if we can make it
isPossible = True
topRorationCount, bottomRotationCount = 0, 0
for a, b in zip(A, B):
if domino != a and domino != b: #
isPossible = False
break
if domino == a and domino != b:
bottomRotationCount += 1
elif domino != a and domino == b:
topRorationCount += 1
if isPossible:
result = min(result, min(topRorationCount, bottomRotationCount))
return -1 if result == float("inf") else result
# Source: https://tinyurl.com/v3zqer7
# Approach 2
class Solution(object):
def minDominoRotations(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
rotations = self.checkRotationFor(A, B, A[0])
# If one could make all elements in A or B equal to A[0]
if rotations != -1 or A[0] == B[0]:
return rotations
# If one could make all elements in A or B equal to B[0]
else:
return self.checkRotationFor(A, B, B[0])
def checkRotationFor(self, A, B, num):
"""
Return minimum number of swaps,
if one could make all elements in A or B equal to 'num'.
Else return -1
"""
# How many rotations should be done
# to have all elements in A equal to 'num'
# and to have all elements in B equal to 'num'
length = len(A)
rotations_A, rotations_B = 0, 0
for i in range(length):
if A[i] != num and B[i] != num:
return -1
elif A[i] != num:
rotations_A += 1
elif B[i] != num:
rotations_B += 1
return min(rotations_A, rotations_B)
|
[
"[email protected]"
] | |
132e2992317c4be95ec5e084b122faf57bdb6871
|
8e647469ee4851dd76fa2d1104e9e99bb102d10f
|
/autotest/Library/update_library.py
|
c0a655dbe41875ef4d9a619e978ac6a85acc99c9
|
[] |
no_license
|
sunyn95123/Rf_autoRequest
|
0e1c4b2e4bcf3844d6b8d65ad00460d503f5a351
|
5945a57464bcca0f6ed2cc357c1e108608035734
|
refs/heads/master
| 2020-07-09T11:30:40.183285 | 2019-08-28T07:47:22 | 2019-08-28T07:47:22 | 203,958,339 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 533 |
py
|
# -*- coding: utf-8 -*-
import shutil,os
from distutils.sysconfig import get_python_lib;
python_library_dir=get_python_lib()
BASE_DIR = os.path.split(os.path.realpath(__file__))[0]
def update_library(name):
try:
shutil.rmtree(python_library_dir+'/'+name+'/')
except:
pass
try:
shutil.copytree(BASE_DIR+'/'+name, python_library_dir+'/'+name+'/')
print u'更新'+name+u'成功'
except:
pass
update_library('TestLibrary')
update_library('RedisLibrary')
# raw_input();
|
[
"[email protected]"
] | |
9fca8ba3b59b43a17a00a17f8d571843d28eba97
|
249af4ed4bd9c97d86baf8fc04856e8ce41c2398
|
/image_processing.py
|
cdaab5733319000555d4e335d9e049be1f2435c2
|
[
"CC-BY-3.0"
] |
permissive
|
robinhad/photoscan
|
de1d527d3568bc74cb860852a3b23719e524317d
|
dc90a521baaa7fb37595a0892da97679d6a6dbd5
|
refs/heads/master
| 2023-03-02T23:54:37.882915 | 2021-02-01T19:58:52 | 2021-02-01T19:58:52 | 126,396,982 | 1 | 0 | null | 2023-02-27T21:52:08 | 2018-03-22T21:27:18 |
Python
|
UTF-8
|
Python
| false | false | 8,076 |
py
|
import cv2
import numpy as np
import math
import base64
MIN_MATCH_COUNT = 10
def rotate_image(image, angle):
"""
Rotates an OpenCV 2 / NumPy image about it's centre by the given angle
(in degrees). The returned image will be large enough to hold the entire
new image, with a black background
"""
# Get the image size
# No that's not an error - NumPy stores image matricies backwards
image_size = (image.shape[1], image.shape[0])
image_center = tuple(np.array(image_size) / 2)
# Convert the OpenCV 3x2 rotation matrix to 3x3
rot_mat = np.vstack(
[cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]
)
rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])
# Shorthand for below calcs
image_w2 = image_size[0] * 0.5
image_h2 = image_size[1] * 0.5
# Obtain the rotated coordinates of the image corners
rotated_coords = [
(np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([ image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],
(np.array([ image_w2, -image_h2]) * rot_mat_notranslate).A[0]
]
# Find the size of the new image
x_coords = [pt[0] for pt in rotated_coords]
x_pos = [x for x in x_coords if x > 0]
x_neg = [x for x in x_coords if x < 0]
y_coords = [pt[1] for pt in rotated_coords]
y_pos = [y for y in y_coords if y > 0]
y_neg = [y for y in y_coords if y < 0]
right_bound = max(x_pos)
left_bound = min(x_neg)
top_bound = max(y_pos)
bot_bound = min(y_neg)
new_w = int(abs(right_bound - left_bound))
new_h = int(abs(top_bound - bot_bound))
# We require a translation matrix to keep the image centred
trans_mat = np.matrix([
[1, 0, int(new_w * 0.5 - image_w2)],
[0, 1, int(new_h * 0.5 - image_h2)],
[0, 0, 1]
])
# Compute the tranform for the combined rotation and translation
affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]
# Apply the transform
result = cv2.warpAffine(
image,
affine_mat,
(new_w, new_h),
flags=cv2.INTER_LINEAR
)
return result
def largest_rotated_rect(w, h, angle):
"""
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle within the rotated rectangle.
Original JS code by 'Andri' and Magnus Hoff from Stack Overflow
Converted to Python by Aaron Snoswell
"""
quadrant = int(math.floor(angle / (math.pi / 2))) & 3
sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle
alpha = (sign_alpha % math.pi + math.pi) % math.pi
bb_w = w * math.cos(alpha) + h * math.sin(alpha)
bb_h = w * math.sin(alpha) + h * math.cos(alpha)
gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)
delta = math.pi - alpha - gamma
length = h if (w < h) else w
d = length * math.cos(alpha)
a = d * math.sin(alpha) / math.sin(delta)
y = a * math.cos(gamma)
x = y * math.tan(gamma)
return (
bb_w - 2 * x,
bb_h - 2 * y
)
def crop_around_center(image, width, height):
"""
Given a NumPy / OpenCV 2 image, crops it to the given width and height,
around it's centre point
"""
image_size = (image.shape[1], image.shape[0])
image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))
if(width > image_size[0]):
width = image_size[0]
if(height > image_size[1]):
height = image_size[1]
x1 = int(image_center[0] - width * 0.5)
x2 = int(image_center[0] + width * 0.5)
y1 = int(image_center[1] - height * 0.5)
y2 = int(image_center[1] + height * 0.5)
return image[y1:y2, x1:x2]
def encode_image_as_string(img):
retval, buffer = cv2.imencode('.jpg', img)
return base64.b64encode(buffer)
def decode_image_from_string(image_string):
nparr = np.fromstring(base64.b64decode(image_string), np.uint8)
return cv2.imdecode(nparr, 0) # cv2.imdecode(nparr, cv2.IMREAD_COLOR)
def decode_grayscale_image_from_string(image_string):
nparr = np.fromstring(base64.b64decode(jpg_as_text), np.uint8)
return cv2.imdecode(nparr, 0)
def get_components(normalised_homography):
'''((translationx, translationy), rotation, (scalex, scaley), shear)'''
a = normalised_homography[0, 0]
b = normalised_homography[0, 1]
c = normalised_homography[0, 2]
d = normalised_homography[1, 0]
e = normalised_homography[1, 1]
f = normalised_homography[1, 2]
p = math.sqrt(a*a + b*b)
r = (a*e - b*d)/(p)
q = (a*d+b*e)/(a*e - b*d)
translation = (round(c, 2), round(f, 2))
scale = (round(p, 2), round(r, 2))
shear = round(q, 2) #y axis
theta = round(math.atan2(b, a), 2) #x axis
return (translation, theta, scale, shear)
def toRadians(radians):
return round(math.degrees(radians),2)
def find_image_angle_properties(img1, img2):
print("Initializing comparers")
surf = cv2.xfeatures2d.SURF_create()
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
font = cv2.FONT_HERSHEY_SIMPLEX
flann = cv2.FlannBasedMatcher(index_params, search_params)
img1 = img1
img2 = img2
image_height, image_width = img2.shape[0:2]
img2 = rotate_image(img2, 270)
img2 = crop_around_center(
img2,
*largest_rotated_rect(
image_width,
image_height,
math.radians(270)
)
)
kp1, des1 = surf.detectAndCompute(img1, None)
kp2, des2 = surf.detectAndCompute(img2, None)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32(
[kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32(
[kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
matches_properties, mask = cv2.findHomography(
src_pts, dst_pts, cv2.RANSAC, 5.0)
h, w = img1.shape
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]
).reshape(-1, 1, 2)
if matches_properties is None:
pass
else:
matches_count = len(good)
matches_data = matches_properties
components = get_components(matches_data)
fontScale = 4
color = (255, 255, 255)
fontThickness = 10
lineHeight = 120
cv2.putText(img2, "Matches: " + str(matches_count), (1, 1*lineHeight),
font, fontScale, color, fontThickness, cv2.LINE_AA)
cv2.putText(img2, "Translation: " +
str(components[0]), (1, 2*lineHeight), font, fontScale, color, fontThickness, cv2.LINE_AA)
cv2.putText(
img2, "Theta: " + str(toRadians(components[1])), (1, 3*lineHeight), font, fontScale, color, fontThickness, cv2.LINE_AA)
cv2.putText(
img2, "Scale: " + str(components[2]), (1, 4*lineHeight), font, fontScale, color, fontThickness, cv2.LINE_AA)
cv2.putText(
img2, "Shear: " + str(toRadians(components[3])), (1, 5*lineHeight), font, fontScale, color, fontThickness, cv2.LINE_AA)
dst = cv2.perspectiveTransform(pts, matches_data)
img2 = cv2.polylines(img2, [np.int32(dst)],
True, (255, 255, 255), 3, cv2.LINE_AA)
return img2, components
return img2
if __name__ == '__main__':
import matplotlib.pyplot as plt
img1 = cv2.imread('test.jpg', 0)
img2 = cv2.imread('test.jpg', 0)
img = find_image_angle_properties(img1, img2)
plt.imshow(img, cmap='gray', interpolation='bicubic')
plt.show()
|
[
"[email protected]"
] | |
a963765aa5a55bd302f57ef62a9abee6b95fc4a4
|
3671ab5562659dddd9deaf1ad09a576663ceaf8e
|
/pos-system_step1.py
|
034067308ba5f8cd132efb22e44ad237cc7d8b05
|
[] |
no_license
|
xxxicemanxxx/study-04-pos-system-01-master_end
|
107b230416382d8954eed27815727af4e41d73fa
|
1562c27e6162ad19f8e0e6e092f53ccd1007ea03
|
refs/heads/main
| 2023-03-20T21:33:52.432671 | 2021-02-28T09:19:29 | 2021-02-28T09:19:29 | 343,063,965 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,491 |
py
|
### 商品クラス
class Item:
def __init__(self,item_code,item_name,price):
self.item_code=item_code
self.item_name=item_name
self.price=price
def get_price(self):
return self.price
### オーダークラス
class Order:
def __init__(self,item_master):
self.item_order_list=[]
self.item_master=item_master
def add_item_order(self,item_code):
self.item_order_list.append(item_code)
#課題1_オーダー登録した商品名、価格を表示
def view(self,item_code):
for menu in self.item_master:
if item_code==menu.item_code:
#format {: >3}はスペース、右埋め >、文字数 3
print("商品コード:{},商品名:{: >3},価格:{}".format(menu.item_code,menu.item_name,menu.price))
### メイン処理
def main():
# マスタ登録
item_master=[]
item_master.append(Item("001","りんご",100))
item_master.append(Item("002","なし",120))
item_master.append(Item("003","みかん",150))
print("■メニュー■")
for menu in item_master:
print("商品コード:{},商品名:{: >3},価格:{}".format(menu.item_code,menu.item_name,menu.price))
print("")
# オーダー登録
print("オーダーした商品はこちらです")
order=Order(item_master)
order.view("003")
order.view("001")
order.view("002")
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
90c47ae4f35e9cfdd36566a5dcfdace8cb271d97
|
1b0317f8b8de13dc071ee3d8ee9cc2c12c88aad9
|
/src/searchers/__init__.py
|
6f712186e7a19c9b9415bd146b188eb3dc7d3db1
|
[
"MIT"
] |
permissive
|
jesseward/stagehand
|
3182203c5cf49cf8671a61edbe8b971eedb18063
|
63ffd9a33f82d57039193e666e84b3b43982aa4e
|
refs/heads/master
| 2021-01-16T22:07:10.308245 | 2012-05-13T23:56:34 | 2012-05-13T23:56:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,265 |
py
|
from __future__ import absolute_import
import logging
import re
import kaa
from ..utils import load_plugins
from ..config import config
from .base import SearcherError
log = logging.getLogger('stagehand.searchers')
plugins = load_plugins('searchers', globals())
@kaa.coroutine(progress=True)
def search(progress, series, episodes, date=None, min_size=None, ideal_size=None, quality='HD', skip=[]):
tried = set()
always = [name for name in plugins if plugins[name].Searcher.ALWAYS_ENABLED]
for name in config.searchers.enabled + always:
if name not in plugins or name in skip or name in tried:
continue
tried.add(name)
searcher = plugins[name].Searcher()
try:
results = yield searcher.search(series, episodes, date, min_size, ideal_size, quality)
except SearcherError, e:
log.warning('%s failed: %s', name, e.args[0])
except Exception:
log.exception('%s failed with unhandled error', name)
else:
# FIXME: if some episodes don't have results, need to try other searchers.
if results:
yield results
return
else:
log.debug2('%s found no results', name)
yield {}
|
[
"[email protected]"
] | |
2b6dbf579ae37711f46b26057e43ff7b642659e2
|
77c8c500d4077ad733fbfe2c6a85a1dd47bd3cb5
|
/chelseashin/ProblemSolving/2156_포도주시식.py
|
940cea71221cff29f679eb73ae27638dc45e2bad
|
[] |
no_license
|
chelseashin/AlgorithmStudy2021
|
786f03c4c17bc057518d428481e7d710d24ec98e
|
1a4744a621ed25715fc9060c5224f0b1092d9c00
|
refs/heads/master
| 2023-06-22T22:27:47.289806 | 2021-07-28T02:54:22 | 2021-07-28T02:54:22 | 326,441,667 | 1 | 5 | null | 2021-06-29T01:27:40 | 2021-01-03T15:44:16 |
Python
|
UTF-8
|
Python
| false | false | 848 |
py
|
# 참고 : https://pacific-ocean.tistory.com/152
# https://claude-u.tistory.com/204
# dp[i]의 최댓값을 구하는 것은 세 가지 방법에 의해 결정된다.
# 1) OXOO: 연속 두 개
# 2) OXO: 하나 띄고 한 개
# 3) X: i 번째를 마시지 않는 경우
from sys import stdin
input = stdin.readline
n = int(input())
a = [0] + [int(input()) for _ in range(n)]
dp = [0, a[1]]
if n > 1:
dp.append(a[1] + a[2])
for i in range(3, n+1):
dp.append(max(dp[i-1],
dp[i-3]+a[i-1]+a[i],
dp[i-2]+a[i]))
# print(n, a, dp)
print(dp[n])
# 위와 같은 방법
# wine = [0] + [int(input()) for _ in range(n)]
# dp = [0] * (n+1)
# dp[1] = wine[1]
# if n > 1:
# dp[2] = wine[1] + wine[2]
# for i in range(3, n+1):
# dp[i] = max(dp[i-3]+wine[i-1]+wine[i], dp[i-2]+wine[i], dp[i-1])
#
# print(dp[n])
|
[
"[email protected]"
] | |
49ca3426c5822ee4c4b4e67e340cb352471922c5
|
da327862940b303918d29989ba3f08f7b84ed81e
|
/search_operation/search_operation/asgi.py
|
c2d87bebe2609a82b331f6cc39393670633a8751
|
[
"Apache-2.0"
] |
permissive
|
DingLi23/COEN6311_super
|
f5044c32de9c0dba2298e348d09fb18f3a2ae1c6
|
a48c14999fe53cd2a780c98eabf605f4a2eccbba
|
refs/heads/main
| 2023-09-06T02:09:38.858760 | 2021-11-25T18:58:06 | 2021-11-25T18:58:06 | 406,982,729 | 0 | 1 |
Apache-2.0
| 2021-11-25T18:58:06 | 2021-09-16T01:55:08 |
Vue
|
UTF-8
|
Python
| false | false | 409 |
py
|
"""
ASGI config for search_operation project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'search_operation.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
ffea19ab0a9ee990515424ae3b8b5e47593e62f3
|
17b423540da45db7365e96a79b0bff5b27851b76
|
/Recsys_Challenge_Trailmix/Recsys_Challenge_Trailmix_CODEONLY/Model_QQ/preprocess.py
|
11464ec46a7690de375b19a203ef20e6a08ef7f6
|
[
"Apache-2.0"
] |
permissive
|
VickyYu7/RecSys-Challenge-2018-Trailmix
|
0174e7b373fbe73b151e80dd24c2c56991da82d7
|
27a3524c9cd397b62f7c008bd0b79a81fa16a852
|
refs/heads/master
| 2023-03-16T15:54:15.425798 | 2018-07-03T15:35:56 | 2018-07-03T15:35:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 17,301 |
py
|
# import json
# import numpy as np
#
# a = json.load(open('PL_TRACKS_ALL.json'))
#
# x = 0 # '864737' 376
# y = 1000 # '54' 5
# for i in range(len(a)):
# if len(a[str(i)]) > x:
# x = len(a[str(i)])
# g = i
# if len(a[str(i)]) < y:
# y = len(a[str(i)])
# k = i
#
#
# name = []
# for i in range(len(a)):
# #if i % 10000 == 0:
# print(i)
# for j in range(len(a[str(i)])):
# if a[str(i)][j] not in name:
# name.append(a[str(i)][j])
#
# for i in range(len(a)):
# if i % 10000 == 0:
# print(i)
# for j in range(len(a[str(i)])):
# name.append(a[str(i)][j])
#
# name = np.unique(name)
#
#
#
#
# b = np.asarray(name)
# # np.save('song_name.py', b)
# b = np.load('song_name.py.npy')
#
# import copy
# c = copy.deepcopy(a)
# num_of_song = len(b)
#
# # for i in range(len(a)):
# # #if i % 10000 == 0:
# # print(i)
# # for j in range(len(a[str(i)])):
# # c[str(i)][j] = int(np.where(b == a[str(i)][j])[0])
# # # for k in range(num_of_song):
# # # if b[k] == a[str(i)][j]:
# # # c[str(i)][j] = k+1
#
# map = np.zeros([8, 75, 75], dtype=np.int)
# for i in range(len(b)):
# map[ord(b[i][0])-48, ord(b[i][1]) - 48, ord(b[i][2]) - 48] += 1
#
#
# Cmap = np.reshape(np.cumsum(map), [8, 75, 75])
#
# for i in range(len(a)):
# if i % 10000 == 0:
# print(i)
# for j in range(len(a[str(i)])):
# tmp1 = ord(a[str(i)][j][0]) - 48
# tmp2 = ord(a[str(i)][j][1]) - 48
# tmp3 = ord(a[str(i)][j][2]) - 48
# s2 = Cmap[tmp1, tmp2, tmp3]
# s1 = Cmap[tmp1, tmp2, tmp3] - map[tmp1, tmp2, tmp3]
# tmpb = b[s1:s2]
# c[str(i)][j] = s1 + int(np.where(tmpb == a[str(i)][j])[0])
#
# qq = 0
# for i in range(len(a)):
# if i % 10000 == 0:
# print(i)
# for j in range(len(a[str(i)])):
# if a[str(i)][j] == b[c[str(i)][j]]:
# qq += 1
#
#
# # with open('Map.json', 'w') as fp:
# # json.dump(c, fp)
#
# c = json.load(open('Map.json'))
#
# output = open("all.txt", 'a')
# for i in range(len(a)):
# if i % 10000 == 0:
# print(i)
# for j in range(len(a[str(i)])):
# line = str(i) + '\t' + str(c[str(i)][j]) + '\n'
# output.write(line)
# output.close()
#
# with open("all.txt") as in_f:
# num_of_rating = 0
# for line in in_f:
# num_of_rating += 1
# print(num_of_rating) # 10000054
#
# i = 0
# qq = []
# for line in file("Data/all_NeuMF.txt"):
# if i % 10000 == 0:
# print(i)
# i += 1
# data = line.rstrip('\n').split('\t')
# qq.append(int(data[1]))
#
# max(qq)
# print(line)
# print(data)
#
# # i_idx = str(0)
# # mark = str(0)
# # outfile = file("netflix/u_u_5.txt", "w")
# # for line in file("netflix/u_u_4.txt"):
# # data = line.rstrip('\n').split('\t')
# # if i_idx == data[1]:
# # data.append(mark)
# # mark = data[3]
# # else:
# # data.append(str(0))
# # mark = data[3]
# # i_idx = data[1]
# # outfile.write('\t'.join(data))
# # outfile.write('\n')
# # outfile.close()
#
#
# # for i in range(len(a)):
# # #if i % 10000 == 0:
# # print(i)
# # for j in range(len(a[str(i)])):
# # c[str(i)][j] = int(np.where(b == a[str(i)][j])[0])
# # # for k in range(num_of_song):
# # # if b[k] == a[str(i)][j]:
# # # c[str(i)][j] = k+1
#
#
#
# # for i in range(len(a)):
# # if i % 10000 == 0:
# # print(i)
# # for j in range(len(a[str(i)])):
# # tmp1 = ord(a[str(i)][j][0]) - 48
# # tmp2 = ord(a[str(i)][j][1]) - 48
# # tmp3 = ord(a[str(i)][j][2]) - 48
# # if tmp1 == 0 and tmp2 == 0 and tmp3 == 0:
# # s1 = 0
# # elif tmp1 == 0 and tmp2 == 0:
# # s1 = sum(map[0, 0, 0:tmp3])
# # elif tmp2 == 0 and tmp3 == 0:
# # s1 = sum(sum(sum(map[0:tmp1, :, :])))
# # elif tmp1 == 0 and tmp3 == 0:
# # s1 = sum(sum(map[0, 0:tmp2, :]))
# # elif tmp1 == 0:
# # s1 = sum(sum(map[0, 0:tmp2, :])) + sum(map[0, tmp2, 0:tmp3])
# # elif tmp2 == 0:
# # s1 = sum(sum(sum(map[0:tmp1, :, :]))) + sum(map[tmp1, tmp2, 0:tmp3])
# # elif tmp3 == 0:
# # s1 = sum(sum(sum(map[0:tmp1, :, :]))) + sum(sum(map[tmp1, 0:tmp2, :]))
# # else:
# # s1 = sum(sum(sum(map[0:tmp1, :, :])))\
# # + sum(sum(map[tmp1, 0:tmp2, :])) + sum(map[tmp1, tmp2, 0:tmp3])
# # s2 = s1 + map[tmp1, tmp2, tmp3]
# # tmpb = b[s1:s2]
# # c[str(i)][j] = s1 + int(np.where(tmpb == a[str(i)][j])[0])
#
# # qq1 = []
# # qq2 = []
# # for line in file("Data/all_NeuMF.txt"):
# # data = line.rstrip('\n').split('\t')
# # qq1.append(int(data[0]))
# # qq2.append(int(data[1]))
#
# qq1 = []
# for line in file("Data/Task_2/PL.train.rating.txt"):
# data = line.rstrip('\n').split('\t')
# if int(data[0]) == 7:
# print(data)
# qq1.append(int(data[1]))
# print(qq1)
# import numpy as np
# b = np.load('song_name.py.npy')
#
# map = np.zeros([8, 75, 75], dtype=np.int)
# for i in range(len(b)):
# map[ord(b[i][0])-48, ord(b[i][1]) - 48, ord(b[i][2]) - 48] += 1
#
#
# Cmap = np.reshape(np.cumsum(map), [8, 75, 75])
# np.save('Cmap', Cmap)
# np.save('map', map)
# import json
# import numpy as np
#
# #tmp = '5_TEST_T9'
# #filename = 'PL_TRACKS_'+tmp+'.json'
# filename = 'PL_TRACKS_ALL.json'
# a = json.load(open(filename))
# Cmap = np.load('Cmap.npy')
# map1 = np.load('map.npy')
# b = np.load('song_name.py.npy')
#
# import copy
# c = copy.deepcopy(a)
#
# k = 0
# for i in a.keys():
# k += 1
# if k % 10000 == 0:
# print(k)
# for j in range(len(a[i])):
# tmp1 = ord(a[str(i)][j][0]) - 48
# tmp2 = ord(a[str(i)][j][1]) - 48
# tmp3 = ord(a[str(i)][j][2]) - 48
# s2 = Cmap[tmp1, tmp2, tmp3]
# s1 = Cmap[tmp1, tmp2, tmp3] - map1[tmp1, tmp2, tmp3]
# tmpb = b[s1:s2]
# c[i][j] = s1 + int(np.where(tmpb == a[i][j])[0])
#
# # ff = 'Data/'+tmp+'.json'
# ff = 'PL_TRACKS_ALL_MAP.json'
# with open(ff, 'w') as fp:
# json.dump(c, fp)
# import json
# import numpy as np
#
# tmp = 'ALL'
# filename = 'PL_TRACKS_'+tmp+'.json'
# a = json.load(open(filename))
#
# qq = []
# k = 0
# for i in a.keys():
# k += 1
# if k % 10000 == 0:
# print(k)
# if len(a[i]) != len(set(a[i])):
# print(i)
# qq.append(i)
#
# print len(qq)
# Cmap = np.load('Cmap.npy')
# qq=0
# for i in xrange(8):
# for j in xrange(75):
# for k in xrange(75):
# if i*j*k!=0:
# if Cmap[i,0,k]<Cmap[i-1,74,k]:
# qq+=1
#
#
#
#
# 053xKa7PdxQsJNWmBjV0sv
#
# 053xKa7PdxQsJNWmBjV0sv
# import json
# import numpy as np
#
# filename = 'Data/ALL.json'
# a = json.load(open(filename))
#
# t = np.zeros(2300000)
# np.save('track_stat.npy', t)
#
# k = 0
# for i in a.keys():
# k += 1
# if k % 10000 == 0:
# print k
# tmp = a[i]
# a[i] = list(set(a[i]))
# if len(a[i]) != 0:
# for j in xrange(len(a[i])):
# t[a[i][j]] += 1
#
# np.save('track_stat.npy', t)
# import numpy as np
# import matplotlib.mlab as mlab
# import matplotlib.pyplot as plt
#
# t = np.load('track_stat.npy')
#
#
# g = t[t<100]
#
# plt.hist(t, 10)
#
#
# a20 = np.where(t>20)[0]
# a50 = np.where(t>50)[0]
# a100 = np.where(t>100)[0]
# a500 = np.where(t>500)[0]
#
# np.save('item_idx_20_up.npy', a20)
# np.save('item_idx_50_up.npy', a50)
# np.save('item_idx_100_up.npy', a100)
# np.save('item_idx_500_up.npy', a500)
# import numpy as np
# import matplotlib.mlab as mlab
# import matplotlib.pyplot as plt
#
# t = np.load('track_stat.npy')
#
# n = 100
# qq = np.zeros(n)
# for i in range(n):
# qq[i] = len(t[(t>5*i)&(t<=5*(i+1))])/2262292.0
#
#
# qq2 = np.zeros(n)
# for i in range(n):
# qq2[i] = sum(t[(t>5*i)&(t<=5*(i+1))])/sum(t)
#
# # plt.plot(qq, qq2)
# plt.figure(3)
# plt.plot(range(0, 500, 5), qq2[0:100])
#
#
# plt.plot(range(0, 100, 5), np.cumsum(qq2))
#
# plt.figure(2)
# plt.plot(range(0, 100, 5), np.cumsum(qq[0:20]))
#
#
# plt.figure(4)
# plt.plot(range(0, 500, 5), qq2*1.0/qq)
# import json
# import numpy as np
#
# filename = 'Data/ALL.json'
# a = json.load(open(filename))
#
# t = np.zeros([2262292, 350])
# np.save('track_stat_2.npy', t)
#
# k = 0
# for i in a.keys():
# k += 1
# if k % 10000 == 0:
# print k
# tmp = a[i]
# a[i] = list(set(a[i]))
# if len(a[i]) != 0:
# for j in xrange(len(a[i])):
# t[a[i][j], j] += 1
#
# np.save('track_stat_2.npy', t)
# import numpy as np
# import matplotlib.mlab as mlab
# import matplotlib.pyplot as plt
#
# t = np.load('track_stat_2.npy')
#
# # >1
# tmp = np.sum(t[:, 1:], 1)
# a0 = np.where(tmp>0)[0]
# a5 = np.where(tmp>5)[0]
# a20 = np.where(tmp>20)[0]
# a50 = np.where(tmp>50)[0]
# a100 = np.where(tmp>100)[0]
# a500 = np.where(tmp>500)[0]
# np.save('item_idx_0_up_1.npy', a0)
# np.save('item_idx_5_up_1.npy', a5)
# np.save('item_idx_20_up_1.npy', a20)
# np.save('item_idx_50_up_1.npy', a50)
# np.save('item_idx_100_up_1.npy', a100)
# np.save('item_idx_500_up_1.npy', a500)
#
#
# # >5
# tmp = np.sum(t[:, 5:], 1)
# a0 = np.where(tmp>0)[0]
# a5 = np.where(tmp>5)[0]
# a20 = np.where(tmp>20)[0]
# a50 = np.where(tmp>50)[0]
# a100 = np.where(tmp>100)[0]
# a500 = np.where(tmp>500)[0]
# np.save('item_idx_0_up_5.npy', a0)
# np.save('item_idx_5_up_5.npy', a5)
# np.save('item_idx_20_up_5.npy', a20)
# np.save('item_idx_50_up_5.npy', a50)
# np.save('item_idx_100_up_5.npy', a100)
# np.save('item_idx_500_up_5.npy', a500)
#
# # >25
# tmp = np.sum(t[:, 25:], 1)
# a0 = np.where(tmp>0)[0]
# a5 = np.where(tmp>5)[0]
# a20 = np.where(tmp>20)[0]
# a50 = np.where(tmp>50)[0]
# a100 = np.where(tmp>100)[0]
# a500 = np.where(tmp>500)[0]
# np.save('item_idx_0_up_25.npy', a0)
# np.save('item_idx_5_up_25.npy', a5)
# np.save('item_idx_20_up_25.npy', a20)
# np.save('item_idx_50_up_25.npy', a50)
# np.save('item_idx_100_up_25.npy', a100)
# np.save('item_idx_500_up_25.npy', a500)
#
# # >100
# tmp = np.sum(t[:, 100:], 1)
# a0 = np.where(tmp>0)[0]
# a5 = np.where(tmp>5)[0]
# a20 = np.where(tmp>20)[0]
# a50 = np.where(tmp>50)[0]
# a100 = np.where(tmp>100)[0]
# a500 = np.where(tmp>500)[0]
# np.save('item_idx_0_up_100.npy', a0)
# np.save('item_idx_5_up_100.npy', a5)
# np.save('item_idx_20_up_100.npy', a20)
# np.save('item_idx_50_up_100.npy', a50)
# np.save('item_idx_100_up_100.npy', a100)
# np.save('item_idx_500_up_100.npy', a500)
# import json
# import numpy as np
# q = json.load(open('PL_NUM_HOLDOUTS_READONLY.json'))
# ori = json.load(open('PL_NUM_TRACKS_READONLY.json'))
# t = np.array(q.values())
#
# t_ori = np.array(ori.values())
#
# diff = t_ori-t
# import json
# a = json.load(open('PL_TRACKS_ALL_MAP.json'))
#
# k = 0
# t = 0
# for i in a.keys():
# k += 1
# if k % 10000 == 0:
# print(k)
# t += len(list(set(a[i])))
# print(t)
#
#
#
# import json
# a = json.load(open('PL_TRACKS_FINAL_TEST.json'))
#
# k = 0
# t = 0
# for i in a.keys():
# k += 1
# if k % 10000 == 0:
# print(k)
# t += len(list(set(a[i])))
# print(t)
# a = json.load(open('PL_TRACKS_FINAL_TEST.json'))
# a['1003738']
# [754961, 1203346, 1974353, 1040207, 1498381]
# a = json.load(open('WAR_PL_TRACKS_READONLY.json'))
# a['1003738']
# [u'2aibwv5hGXSgw7Yru8IYTO', u'48UPSzbZjgc449aqz8bxox', u'6nTiIhLmQ3FWhvrGafw2zj', u'3ZffCQKLFLUvYM59XKLbVm', u'59WN2psjkt1tyaxjspN8fp']
# b = np.load('song_name.py.npy')
# Traceback (most recent call last):
# File "<input>", line 1, in <module>
# NameError: name 'np' is not defined
# import numpy as np
# b = np.load('song_name.py.npy')
# import json
# import numpy as np
# #tmp = '5_TEST_T7'
# #filename = 'PL_TRACKS_'+tmp+'.json'
# #filename = 'Data/TryT5.json'
# filename = 'QQ_submit_word2vec.json'
# a = json.load(open(filename))
# Cmap = np.load('Cmap.npy')
# map1 = np.load('map.npy')
# b = np.load('song_name.py.npy')
#
# import copy
# c = copy.deepcopy(a)
#
# k = 0
# for i in a.keys():
# k += 1
# if k % 10000 == 0:
# print(k)
# for j in range(len(a[i])):
# tmp1 = ord(a[str(i)][j][0]) - 48
# tmp2 = ord(a[str(i)][j][1]) - 48
# tmp3 = ord(a[str(i)][j][2]) - 48
# s2 = Cmap[tmp1, tmp2, tmp3]
# s1 = Cmap[tmp1, tmp2, tmp3] - map1[tmp1, tmp2, tmp3]
# tmpb = b[s1:s2]
# c[i][j] = s1 + int(np.where(tmpb == a[i][j])[0])
#
# # ff = 'Data/'+tmp+'.json'
# ff = 'QQ_submit_word2vec_MAP.json'
# # ff = 'Data/Try.json'
# with open(ff, 'w') as fp:
# json.dump(c, fp)
import json
a = json.load(open('xing.json'))
b = json.load(open('QQ_submit_word2vec_MAP.json'))
c = json.load(open('PL_NUM_HOLDOUTS_READONLY.json'))
d = json.load(open('PL_NUM_TRACKS_READONLY.json'))
qq = {}
xing = {}
for i in b.keys():
qq[i] = b[i][0:c[i]]
for i in a.keys():
xing[i] = a[i][0:c[i]]
import numpy as np
num = np.zeros(10000)
k = 0
for i in qq.keys():
num[k] = len(set(qq[i]).intersection(set(xing[i])))
k += 1
total = 0
for i in qq.keys():
total += len(qq[i])
ori = np.zeros(10000)
k = 0
for i in qq.keys():
ori[k] = len(set(qq[i]))
k+=1
gg = num/ori
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
plt.plot(gg)
plt.hist(gg,bins=50,normed=True)
# Statistics are only based on 9000 users T1-T9 no T0
import numpy as np
idx = np.load('item_idx_100_up.npy')
qq_t = 0 # 670562
for i in qq.keys():
for j in range(len(qq[i])):
if qq[i][j] in idx:
qq_t += 1
c_t = 0 # 670562
for i in qq.keys():
for j in range(len(qq[i])):
c_t += 1
xing_t = 0 # 657957 total: 670562 diff: 12605
for i in qq.keys():
for j in range(len(qq[i])):
if xing[i][j] in idx:
xing_t += 1
xing_a_t = 0 # 4356849 total: 4500000 diff: 143151
for i in qq.keys():
for j in range(len(a[i])):
if a[i][j] in idx:
xing_a_t += 1
qq_b_t = 0 # 4500000
for i in qq.keys():
for j in range(len(b[i])):
if b[i][j] in idx:
qq_b_t += 1
# Overlap
k = 0
num = np.zeros(9000)
for i in qq.keys():
num[k] = len(set(qq[i]).intersection(set(xing[i])))
k += 1
k = 0
num2 = np.zeros(9000)
for i in qq.keys():
num2[k] = len(set(b[i]).intersection(set(a[i])))
k += 1
# First 10/20/30 Tracks Statistics
N = np.zeros(9000) # 5-225, >10: 8891 >20: 8435 >30: 7948 >40: 6971 >50: 6119
k = 0
for i in qq.keys():
N[k] = c[i]
k += 1
# qq top 10/20/30 in Xing
# top 5
N5 = np.zeros(9000)
k = 0
for i in qq.keys():
for j in range(5):
if b[i][j] in xing[i]:
N5[k] += 1
k += 1
# top 10
N10 = np.zeros(9000)
k = 0
for i in qq.keys():
for j in range(min(10, c[i])):
if b[i][j] in xing[i][0:min(10, c[i])]:
N10[k] += 1
k += 1
Ng10 = np.minimum(N10, N)
N20 = np.zeros(9000)
k = 0
for i in qq.keys():
for j in range(20):
if b[i][j] in xing[i]:
N20[k] += 1
k += 1
Ng20 = np.minimum(N20, N)
N30 = np.zeros(9000)
k = 0
for i in qq.keys():
for j in range(30):
if b[i][j] in xing[i]:
N30[k] += 1
k += 1
Ng30 = np.minimum(N30, N)
#
# # Change to orginal
# import numpy as np
# import json
# filename = 'Data/TryT5.json'
# Final = json.load(open(filename))
# b = np.load('song_name.py.npy')
# k = 0
# for p in Final.keys():
# k += 1
# if k % 10000 == 0:
# print(k)
# for q in range(len(Final[p])):
# Final[p][q] = b[Final[p][q]]
#
#
# ff = 'Data/TryT5Final.json'
# with open(ff, 'w') as fp:
# json.dump(Final, fp)
# import json
#
# a = json.load(open('PL_TRACKS_ALL_MAP.json'))
# for i in a.keys():
# a[i] = [str(x) for x in a[i]]
#
# ff = 'PL_TRACKS_ALL_MAP_STR.json'
# with open(ff, 'w') as fp:
# json.dump(a, fp)
# import json
# import numpy as np
# Final = json.load(open('Data/TryT5.json'))
# b = np.load('song_name.py.npy')
# k = 0
# for p in Final.keys():
# k += 1
# if k % 10000 == 0:
# print(k)
# for q in range(len(Final[p])):
# Final[p][q] = b[Final[p][q]]
#
# ff = 'Data/T5.json'
# with open(ff, 'w') as fp:
# json.dump(Final, fp)
#
#
#
# Final = json.load(open('Data/TryT2.json'))
# b = np.load('song_name.py.npy')
# k = 0
# for p in Final.keys():
# k += 1
# if k % 10000 == 0:
# print(k)
# for q in range(len(Final[p])):
# Final[p][q] = b[Final[p][q]]
#
# ff = 'Data/T2.json'
# with open(ff, 'w') as fp:
# json.dump(Final, fp)
#
#
# Final = json.load(open('Data/TryT6.json'))
# b = np.load('song_name.py.npy')
# k = 0
# for p in Final.keys():
# k += 1
# if k % 10000 == 0:
# print(k)
# for q in range(len(Final[p])):
# Final[p][q] = b[Final[p][q]]
#
# ff = 'Data/T6.json'
# with open(ff, 'w') as fp:
# json.dump(Final, fp)
import json
import numpy as np
a = json.load(open('PL_TRACKS_FINAL_TEST.json'))
# t = np.load('item_idx_100_up.npy')
t = np.load('item_idx_5_up.npy')
t1 = np.zeros(10000)
t2 = np.zeros(10000)
k = 0
for i in a.keys():
t1[k] = len(a[i])
tmp = 0
for j in a[i]:
if j in t:
tmp+=1
t2[k] = tmp
k += 1
|
[
"[email protected]"
] | |
df21e317ceb61c338a1fa5912cc06bf5acc616da
|
03ff7c5f41cde72b285944e454550b9a6e33200e
|
/core/models/model_abstract.py
|
74d18f7ca387daaa6b7bf992b3303eb3c4527792
|
[] |
no_license
|
hungdng/djang_skeleton
|
03604777a55f5e45dfd205ed672ce6da495d39bb
|
6dfb8b6b20add6c3c083732cbf71d53d950d3416
|
refs/heads/master
| 2022-12-10T02:54:14.301275 | 2019-03-22T04:36:14 | 2019-03-22T04:36:14 | 166,770,164 | 2 | 0 | null | 2022-12-08T01:33:14 | 2019-01-21T07:45:50 |
Python
|
UTF-8
|
Python
| false | false | 464 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class TimestampedModel(models.Model):
created_by = models.IntegerField(blank=True, null=True)
created_date = models.DateTimeField(auto_now_add=True)
updated_by = models.IntegerField(blank=True, null=True)
updated_date = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
ordering = ['-created_date', '-updated_date']
|
[
"[email protected]"
] | |
64eee566526221275418f7edcba051489bb669c0
|
17463c8428ac4e34cad25a6377760e65a59bd19c
|
/chess/migrations/0001_initial.py
|
5670a552d2c6547167595554f16068a7909f64e8
|
[] |
no_license
|
khoshraftar/online-chess
|
4ab10feb1c4c60ead7dd7f1dfe784599d604ee32
|
647d66e0260bafccb9c857d098dfce8bc664d298
|
refs/heads/master
| 2021-01-11T17:45:12.501974 | 2017-02-14T15:54:26 | 2017-02-14T15:54:26 | 79,835,773 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 767 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-01 13:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('key', models.CharField(max_length=32)),
('email', models.CharField(max_length=48)),
('active', models.BooleanField()),
('wins', models.IntegerField()),
],
),
]
|
[
"[email protected]"
] | |
8dca8b20ef143c405cdd0d90dfba7321b13722d0
|
c3797ced6b4d0a691a219debd977bd992027292c
|
/ner_training/training_data/all/prodotti.py
|
8758486568324c5689cea7b358843a4abc6186cf
|
[] |
no_license
|
marcomoauro/twitter
|
edef7fa7d384fbe8709302c24629e6071a34b232
|
c74e03487b046c7b7191bb4a9a0fea271ac19a0f
|
refs/heads/master
| 2023-03-07T03:43:37.987280 | 2021-02-22T20:30:48 | 2021-02-22T20:30:48 | 303,747,371 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,771 |
py
|
import re
def filler_it(infobox_df, key_value):
d = {}
for index, row in infobox_df.iterrows():
if row['Property'] != key_value:
continue
values = row['Value'].split(':')[0].replace('_', ' ').replace('\\', "").replace('(mass media)', "").split('n*')
for value in values:
if value == '':
continue
value = value.replace('*', '').strip()
if '#' in value or '&' in value or re.search(r'\d', value):
continue
if ',' in value:
vals = value.split(',')
for v in vals:
vv = v.strip()
d.setdefault(row['Resource'].lower(), []).append(vv)
else:
d.setdefault(row['Resource'].lower(), []).append(value)
return d
def filler_en(infobox_df, key_value):
d = {}
for index, row in infobox_df.iterrows():
if row['Property'] != key_value:
continue
values = str(row['Value']).split(':')[0].replace('_', ' ').replace('\\', "").replace('(mass media)', "").split('n*')
for value in values:
if value == '':
continue
value = value.replace('*', '').strip()
if '#' in value or '&' in value or re.search(r'\d', value) or value == 'nan':
continue
if ' and ' in value:
vals = value.split(' and ')
for v in vals:
vv = v.strip()
d.setdefault(row['Resource'].replace('_', ' ').lower(), {'PRODUCT': []})['PRODUCT'].append(vv)
else:
d.setdefault(row['Resource'].replace('_', ' ').lower(), {'PRODUCT': []})['PRODUCT'].append(value)
return d
|
[
"[email protected]"
] | |
acefbcfd99b052440436e61cd90374c4d20f9db7
|
1311acbc58cbe0d28ff984c75a5c7d37387e2ebf
|
/project1_1/venv/bin/easy_install
|
6c77c1299e966ca7c769e30dd7bd32c5469b41cf
|
[] |
no_license
|
kasu207/cs50x
|
6718bd0ba82ebf8259654a3f182fe6084af20d15
|
d1590a29bfd3d43fd60d8d4226a14659088c8f68
|
refs/heads/master
| 2020-08-13T02:26:36.400144 | 2019-10-18T14:31:58 | 2019-10-18T14:31:58 | 214,889,718 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 298 |
#!/Users/philipprichter/Downloads/P_Projekt_Workspace/project1_1/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
fccc5e04254af51c2fc4a03cdf992b81f31a1d28
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/sql/v20190601preview/__init__.py
|
82b3a2004814746567987c5300774fdd220485e0
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,559 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .database import *
from .get_database import *
from .get_managed_database import *
from .get_server import *
from .get_server_azure_ad_administrator import *
from .get_sync_group import *
from .get_sync_member import *
from .get_workload_classifier import *
from .get_workload_group import *
from .managed_database import *
from .server import *
from .server_azure_ad_administrator import *
from .sync_group import *
from .sync_member import *
from .workload_classifier import *
from .workload_group import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:sql/v20190601preview:Database":
return Database(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:ManagedDatabase":
return ManagedDatabase(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:Server":
return Server(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:ServerAzureADAdministrator":
return ServerAzureADAdministrator(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:SyncGroup":
return SyncGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:SyncMember":
return SyncMember(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:WorkloadClassifier":
return WorkloadClassifier(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:sql/v20190601preview:WorkloadGroup":
return WorkloadGroup(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "sql/v20190601preview", _module_instance)
_register_module()
|
[
"[email protected]"
] | |
5011a21caf349d8ce94e37300ed1812a3e77ff99
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog/optimized_25989.py
|
e0a1a41656b1eecde1b382880dffcadd2189e571
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,842 |
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((455.091, 548.131, 441.132), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((522.671, 548.429, 441.943), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((603.488, 547.37, 430.019), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((518.447, 575.68, 322.142), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((797.322, 526.929, 442.213), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((500.094, 537.392, 437.867), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((498.73, 536.413, 437.883), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((498.274, 508.872, 446.765), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((502.548, 483.082, 459.011), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((507.427, 454.916, 455.226), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((501.996, 437.831, 432.369), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((483.284, 448.014, 412.33), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((479.04, 548.96, 453.22), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((480.576, 351.176, 373.656), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((673.497, 390.666, 419.207), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((673.497, 390.666, 419.207), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((646.276, 396.064, 413.934), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((628.041, 411.52, 429.302), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((608.845, 430.869, 437.911), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((591.527, 453.567, 442.301), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((574.681, 476.771, 447.418), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((557.879, 500.257, 452.816), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((615.326, 292.629, 312.28), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((486.93, 713.637, 585.084), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((588.597, 521.567, 466.245), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((588.597, 521.567, 466.245), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((591.457, 516.909, 437.6), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((586.606, 526.984, 410.798), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((589.86, 554.228, 399.824), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((468.427, 583.357, 396.507), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((714.296, 539.961, 401.056), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((511.432, 560.558, 418.793), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((511.424, 560.567, 418.769), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((526.333, 544.602, 401.069), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((526.398, 522.405, 418.378), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((528.681, 499.153, 433.922), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((513.118, 484.584, 415.528), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((489.537, 476.382, 428.774), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((476.397, 455.114, 443.081), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((519.426, 501.731, 500.317), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((446.248, 399.583, 381.235), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((539.353, 543.386, 500.727), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((544.576, 549.144, 475.707), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((557.977, 563.579, 421.9), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((571.409, 578.04, 368.107), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((508.62, 629.225, 376.098), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((636.519, 560.093, 288.137), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((479.555, 565.106, 435.982), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((502.064, 579.203, 445.777), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((529.561, 585.617, 448.267), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((558.025, 587.772, 449.749), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((586.625, 589.775, 450.073), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((615.365, 589.726, 448.653), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((539.329, 564.984, 455.17), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((694.073, 613.407, 442.164), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"[email protected]"
] | |
59ab5667a34c44fdb895072c8f91f93182bc126b
|
acf314ab0fa399018764b2ebd96e33c66362994e
|
/0x0F-python-object_relational_mapping/1-filter_states.py
|
3db5e1107d420d574d6614b5ae1f741eb6da16ad
|
[] |
no_license
|
glyif/holbertonschool-higher_level_programming
|
98f9c2da0b71a4e9e2dd9f6fde755875e9015f34
|
14c02d79e2008db1b992b08f9faa55b20dbe0691
|
refs/heads/master
| 2021-01-20T06:53:16.179354 | 2017-09-28T18:14:12 | 2017-09-28T18:14:12 | 89,939,980 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 975 |
py
|
#!/usr/bin/python3
"""
mysqldb filter states
"""
import sys
import MySQLdb
def db_connection(user_name, password, db_name, host="localhost"):
"""
db_connection - connects to db
:param user_name: username
:param password: password
:param db_name: database name
:param host: host - default to localhost
:return: db
"""
db = MySQLdb.connect(host=host,
user=user_name,
passwd=password,
db=db_name)
return db
def db_query(db, query):
"""
db_query - queries database
:param db: database
:param query: query
:return: none
"""
cur = db.cursor()
cur.execute(query)
data = cur.fetchall()
for row in data:
print(row)
if __name__ == "__main__":
db = db_connection(sys.argv[1], sys.argv[2], sys.argv[3])
db_query(db, """SELECT id, name FROM states
WHERE name LIKE 'N%' ORDER BY states.id ASC""")
|
[
"[email protected]"
] | |
712121c38d64a3f28e3a885b2d91a38da3391ebc
|
bc60447abf65fbe9882e9b66a8adff371cd12bd5
|
/xgb_model/scripts/my_model/cpmodule/orf_extraction.py
|
37001a4305bec079b017ce1eb1e2cac35c6a631f
|
[] |
no_license
|
fogside/lncRNA
|
03bb74af705f25947353dc3234a389be6a79f820
|
780b57db25969d1d7e17470c4a617abd6ef55de8
|
refs/heads/master
| 2020-02-26T15:55:41.575331 | 2017-07-17T00:37:37 | 2017-07-17T00:37:37 | 70,860,801 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,797 |
py
|
import numpy as np
class ORFFinder:
def __init__(self, seq):
self.seq = seq.upper()
def _find_start(self, seq, start=0):
for i in range(start, len(seq), 3):
if seq[i:i + 3] == 'ATG':
return i
return -1
def _find_stop(self, seq, start=0, stop_codons=['TAG', 'TAA', 'TGA']):
for i in range(start, len(seq), 3):
if seq[i:i + 3] in stop_codons:
return i
return -1
def find_longest(self):
starts = []
stops = []
for i in range(3):
ret1 = ret2 = 0
results = []
while ret2 != -1 and ret1 != -1:
ret1 = self._find_start(self.seq, start=(i if ret2 == 0 else ret2 + 3))
if ret1 == -1:
break
ret2 = self._find_stop(self.seq, ret1 + 3)
if ret2 == -1:
if ret1 != -1:
results.append((len(self.seq) - ret1, ret1, len(self.seq)))
break
results.append((ret2 - ret1, ret1, ret2))
if results != []:
max_size_idx = np.argmax([m[0] for m in results])
starts.append(results[max_size_idx][1])
stops.append(results[max_size_idx][2])
# print('max_size:', results[max_size_idx][0])
if len(starts) == 0:
return -1
long3 = [self.seq[n:k + 3] for n, k in zip(starts, stops)]
longest = long3[np.argmax([len(s) for s in long3])]
mean_orf_length = np.mean([len(seqv) for seqv in long3])
longest_size = len(longest)
orf_coverage = (longest_size * 1.0) / len(self.seq)
return starts, longest, longest_size, mean_orf_length, orf_coverage
|
[
"[email protected]"
] | |
ead92cae183577bac7b20e6db878d3f9acdcc363
|
721d06b9b63b647a199e0c83635d2eb036bd6a5b
|
/teams/migrations/0003_auto_20160813_1911.py
|
af168de8e01f6e57b0135f814f1bc2fd7f000b50
|
[] |
no_license
|
thekthuser/matador
|
e5f7063539a47f30383ae8e1265fbaad70781c50
|
56861b7075bbe8ef9fe087cacaab1345b4e1ff66
|
refs/heads/master
| 2020-12-07T13:25:43.153243 | 2016-08-18T22:59:00 | 2016-08-18T22:59:00 | 66,030,517 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 948 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-13 19:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teams', '0002_auto_20160813_0142'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='user',
),
migrations.AddField(
model_name='member',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
migrations.AddField(
model_name='member',
name='password',
field=models.CharField(default='asdf', max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='member',
name='phone',
field=models.CharField(max_length=255),
),
]
|
[
"[email protected]"
] | |
1c53597309aa94040b5fb54133d52910d63bde90
|
c2beeb4fe6f64f4f2037644c9c80b93dd4e8abe0
|
/Burst and Quantum Time Prediction/thl.py
|
b7f8e41c921cb19241637ad60caee89e30d6c136
|
[] |
no_license
|
lkcbharath/OS-Project
|
911edff1cc51ab47b68a257d42d25b754874741b
|
b615b5c277f128a1e58c1cb818a03bc82633f871
|
refs/heads/master
| 2020-05-23T15:39:19.099395 | 2019-04-09T06:45:11 | 2019-04-09T06:45:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,579 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 22 13:01:19 2019
@author: arpit
"""
import numpy as np
import random
X_train = [ None for i in range (1000)]
for i in range(1000):
X_train[i]=[ None for j in range(10)]
for i in range(1000):
for j in range(10):
X_train[i][j] = random.randrange(1,100)
Y_train = [ None for i in range(1000)]
for i in range(1000):
n=10
proc = [i for i in range(1,10)]
a = findavgTime_rr(proc,n,X_train[i],2)
b = findavgTime_fcfs(proc, n, X_train[i])
if(a<b):
Y_train[i] ="rr"
else:
Y_train[i] = "fcfs"
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
classifier.fit(X_train, Y_train)
X_test = [ None for i in range(1000)]
for i in range(1000):
X_test[i]=[ None for j in range(10)]
for i in range(1000):
for j in range(10):
X_test[i][j] = random.randrange(1,100)
Y_test = [ None for i in range(1000)]
for i in range(1000):
n=10
proc = [i for i in range(1,10)]
a = findavgTime_rr(proc,n,X_test[i],2)
b = findavgTime_fcfs(proc, n, X_test[i])
if(a<b):
Y_test[i] ="rr"
else:
Y_test[i] = "fcfs"
y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(Y_test, y_pred)
cm
from sklearn.metrics import accuracy_score, precision_score,recall_score,matthews_corrcoef,cohen_kappa_score
print("Accuracy:"+str(accuracy_score(Y_test,y_pred)))
cm
|
[
"[email protected]"
] | |
713b13337285a49c5be9aad3361e48e0a44c225b
|
b4d49c9aab4942bd1041577467295cc472525d70
|
/Similar sypmtoms/Utils/get_severity.py
|
4422c2b8ab2bb5e1136aefe79f16e141d86a91a1
|
[] |
no_license
|
smritika-sadhukhan/health_chatbot
|
92842985112a93e36b492390a4a35f5fe346a9cd
|
d6052773c4246953c84e85099c65de471477b76b
|
refs/heads/main
| 2023-05-02T17:06:12.913202 | 2021-05-24T10:29:37 | 2021-05-24T10:29:37 | 370,313,278 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,491 |
py
|
import csv
class GetSeverityDict:
def getSeverityDict(self) -> dict:
'''
[SUMMARY]: Extract the score wrt symptomsfrom the file
Arguement:
Return:
severityDictionary : dictionary with keys as symptoms and values as scores
'''
severityDictionary = dict()
with open("Files/Symptom_severity.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
line_count = 0
try:
for row in csv_reader:
_diction = {row[0]: int(row[1])}
severityDictionary.update(_diction)
except:
pass
return severityDictionary
def calc_condition(self, exp :list, days=5) -> str:
'''
[SUMMARY]: Extract the score wrt symptomsfrom the file
Arguement:
exp - list of symptoms
Return:
str as suggestions
'''
severityDictionary = self.getSeverityDict()
sum = 0
for item in exp:
try:
sum = sum + severityDictionary[item]
except:
return "It is recommended to consult with the doctor"
if (sum * days) / (len(exp) + 1) > 13:
return "You should take the consultation from doctor. "
else:
return "It might not be that bad but you should take precautions."
|
[
"[email protected]"
] | |
0324dd3dc62f88495cb95ea7424deef660c43536
|
e2e39726195c7bc075b9bd56e757acd136527d5c
|
/typings/vtkmodules/vtkIOXML/__init__.pyi
|
5a9a78845d01dba6a9f4391c9438656d0f13da23
|
[
"BSD-3-Clause"
] |
permissive
|
gen4438/vtk-python-stubs
|
a652272183d2d1ee48d4639e86bcffc1ac454af0
|
c9abd76362adf387af64ce5ddbd04c5d3bebe9da
|
refs/heads/main
| 2023-04-04T02:13:15.459241 | 2021-04-15T10:47:28 | 2021-04-15T10:53:59 | 358,224,363 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,682 |
pyi
|
"""
This type stub file was generated by pyright.
"""
import vtkmodules.vtkCommonCore as __vtkmodules_vtkCommonCore
import vtkmodules.vtkCommonExecutionModel as __vtkmodules_vtkCommonExecutionModel
import vtkmodules.vtkIOXMLParser as __vtkmodules_vtkIOXMLParser
from .vtkXMLReader import vtkXMLReader
from .vtkXMLDataReader import vtkXMLDataReader
from .vtkXMLUnstructuredDataReader import vtkXMLUnstructuredDataReader
from .vtkXMLPolyDataReader import vtkXMLPolyDataReader
from .vtkRTXMLPolyDataReader import vtkRTXMLPolyDataReader
from .vtkXMLCompositeDataReader import vtkXMLCompositeDataReader
from .vtkXMLWriter import vtkXMLWriter
from .vtkXMLCompositeDataWriter import vtkXMLCompositeDataWriter
from .vtkXMLDataObjectWriter import vtkXMLDataObjectWriter
from .vtkXMLDataSetWriter import vtkXMLDataSetWriter
from .vtkXMLFileReadTester import vtkXMLFileReadTester
from .vtkXMLGenericDataObjectReader import vtkXMLGenericDataObjectReader
from .vtkXMLHierarchicalBoxDataFileConverter import vtkXMLHierarchicalBoxDataFileConverter
from .vtkXMLUniformGridAMRReader import vtkXMLUniformGridAMRReader
from .vtkXMLHierarchicalBoxDataReader import vtkXMLHierarchicalBoxDataReader
from .vtkXMLUniformGridAMRWriter import vtkXMLUniformGridAMRWriter
from .vtkXMLHierarchicalBoxDataWriter import vtkXMLHierarchicalBoxDataWriter
from .vtkXMLMultiBlockDataReader import vtkXMLMultiBlockDataReader
from .vtkXMLMultiGroupDataReader import vtkXMLMultiGroupDataReader
from .vtkXMLHierarchicalDataReader import vtkXMLHierarchicalDataReader
from .vtkXMLHyperTreeGridReader import vtkXMLHyperTreeGridReader
from .vtkXMLHyperTreeGridWriter import vtkXMLHyperTreeGridWriter
from .vtkXMLStructuredDataReader import vtkXMLStructuredDataReader
from .vtkXMLImageDataReader import vtkXMLImageDataReader
from .vtkXMLStructuredDataWriter import vtkXMLStructuredDataWriter
from .vtkXMLImageDataWriter import vtkXMLImageDataWriter
from .vtkXMLMultiBlockDataWriter import vtkXMLMultiBlockDataWriter
from .vtkXMLPartitionedDataSetCollectionReader import vtkXMLPartitionedDataSetCollectionReader
from .vtkXMLPartitionedDataSetCollectionWriter import vtkXMLPartitionedDataSetCollectionWriter
from .vtkXMLPartitionedDataSetReader import vtkXMLPartitionedDataSetReader
from .vtkXMLPartitionedDataSetWriter import vtkXMLPartitionedDataSetWriter
from .vtkXMLPDataObjectReader import vtkXMLPDataObjectReader
from .vtkXMLPDataReader import vtkXMLPDataReader
from .vtkXMLPHyperTreeGridReader import vtkXMLPHyperTreeGridReader
from .vtkXMLPStructuredDataReader import vtkXMLPStructuredDataReader
from .vtkXMLPImageDataReader import vtkXMLPImageDataReader
from .vtkXMLUnstructuredDataWriter import vtkXMLUnstructuredDataWriter
from .vtkXMLPolyDataWriter import vtkXMLPolyDataWriter
from .vtkXMLPUnstructuredDataReader import vtkXMLPUnstructuredDataReader
from .vtkXMLPPolyDataReader import vtkXMLPPolyDataReader
from .vtkXMLPRectilinearGridReader import vtkXMLPRectilinearGridReader
from .vtkXMLPStructuredGridReader import vtkXMLPStructuredGridReader
from .vtkXMLPTableReader import vtkXMLPTableReader
from .vtkXMLPUnstructuredGridReader import vtkXMLPUnstructuredGridReader
from .vtkXMLRectilinearGridReader import vtkXMLRectilinearGridReader
from .vtkXMLRectilinearGridWriter import vtkXMLRectilinearGridWriter
from .vtkXMLStructuredGridReader import vtkXMLStructuredGridReader
from .vtkXMLStructuredGridWriter import vtkXMLStructuredGridWriter
from .vtkXMLTableReader import vtkXMLTableReader
from .vtkXMLTableWriter import vtkXMLTableWriter
from .vtkXMLUnstructuredGridReader import vtkXMLUnstructuredGridReader
from .vtkXMLUnstructuredGridWriter import vtkXMLUnstructuredGridWriter
__loader__ = ...
__spec__ = ...
|
[
"[email protected]"
] | |
3c81c3f89b94937881d22e2ef06540a9c2752999
|
63626432d54708bb895c102c564847397a7ddb87
|
/ListenerApi2Events.py
|
beb93db675eca274fd5f2613f4e8eb19766f542d
|
[] |
no_license
|
mikaukora/robotframework-listener-api-events
|
fb6f4279a4a27a9f3351e2896e17684331033f53
|
28584b8073516df05f8f921778e04b6f78e71ba7
|
refs/heads/master
| 2022-12-13T16:18:48.771586 | 2020-09-07T06:54:24 | 2020-09-07T06:54:24 | 293,446,319 | 8 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,622 |
py
|
from websocket import create_connection
import json
class ListenerApi2Events():
ROBOT_LISTENER_API_VERSION = 2
def send_message(self, event, name=None, attrs=None, message=None, path=None):
ws = create_connection("ws://localhost:5678/")
info = {}
info["event"] = event
if name:
info["name"] = name
if path:
info["path"] = path
if message:
info.update(message)
if attrs:
info.update(attrs)
ws.send(json.dumps(info))
ws.close()
def start_suite(self, name, attrs):
self.send_message("start_suite", name, attrs)
def end_suite(self, name, attrs):
self.send_message("end_suite", name, attrs)
def start_test(self, name, attrs):
self.send_message("start_test", name, attrs)
def end_test(self, name, attrs):
self.send_message("end_test", name, attrs)
def start_keyword(self, name, attrs):
self.send_message("start_keyword", name, attrs)
def end_keyword(self, name, attrs):
self.send_message("end_keyword", name, attrs)
def log_message(self, message):
self.send_message("log_message", message=message)
def message(self, message):
self.send_message("message", message=message)
def library_import(self, name, attrs):
self.send_message("library_import", name, attrs)
def resource_import(self, name, attrs):
self.send_message("resource_import", name, attrs)
def variables_import(self, name, attrs):
self.send_message("variables_import", name, attrs)
def output_file(self, path):
self.send_message("output_file", path=path)
def log_file(self, path):
self.send_message("log_file", path=path)
def report_file(self, path):
self.send_message("report_file", path=path)
def xunit_file(self, path):
self.send_message("xunit_file", path=path)
def debug_file(self, path):
self.send_message("debug_file", path=path)
def close(self):
self.send_message("close")
if __name__ == "__main__":
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
clients = []
class SimpleServ(WebSocket):
def handleMessage(self):
for client in clients:
if client != self and not client.closed:
client.sendMessage(self.data)
def handleConnected(self):
clients.append(self)
def handleClose(self):
clients.remove(self)
server = SimpleWebSocketServer('', 5678, SimpleServ)
server.serveforever()
|
[
"[email protected]"
] | |
d7e949f538a3a780aa931750ee78faf99c3e1323
|
1a166165ab8287d01cbb377a13efdb5eff5dfef0
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_ddos_protection_plans_operations.py
|
d971927ea9d9c040f54655faee5d8e8cf1f6edd5
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
manoj0806/azure-sdk-for-python
|
7a14b202ff80f528abd068bf50334e91001a9686
|
aab999792db1132232b2f297c76800590a901142
|
refs/heads/master
| 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 |
MIT
| 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null |
UTF-8
|
Python
| false | false | 30,324 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations(object):
"""DdosProtectionPlansOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.DdosProtectionPlan"
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.DdosProtectionPlan"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DdosProtectionPlan"]
"""Creates or updates a DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.DdosProtectionPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_04_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DdosProtectionPlan"]
"""Update a DDoS protection plan tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the update DDoS protection plan resource tags.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_04_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DdosProtectionPlanListResult"]
"""Gets all DDoS protection plans in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_04_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DdosProtectionPlanListResult"]
"""Gets all the DDoS protection plans in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_04_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
|
[
"[email protected]"
] | |
327135d687d1b7e2e6a791d235e80b8e8c08dc69
|
14d8821a32bc8d1bade0e2d07652d811534862e8
|
/spring1718_assignment2_v2/cs231n/optim.py
|
1ef1d38c1279e65d091993d82c5703211ed74cc0
|
[] |
no_license
|
zouhanrui/cs231nAssignment_2
|
7973af1b8d2f4e90b0fa1cb62369c8b2d5b1271b
|
afb06d12c404f53711196c9bd049278e87daa62c
|
refs/heads/master
| 2020-03-22T05:05:31.452114 | 2018-07-05T16:39:29 | 2018-07-05T16:39:29 | 139,541,591 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,671 |
py
|
import numpy as np
"""
This file implements various first-order update rules that are commonly used
for training neural networks. Each update rule accepts current weights and the
gradient of the loss with respect to those weights and produces the next set of
weights. Each update rule has the same interface:
def update(w, dw, config=None):
Inputs:
- w: A numpy array giving the current weights.
- dw: A numpy array of the same shape as w giving the gradient of the
loss with respect to w.
- config: A dictionary containing hyperparameter values such as learning
rate, momentum, etc. If the update rule requires caching values over many
iterations, then config will also hold these cached values.
Returns:
- next_w: The next point after the update.
- config: The config dictionary to be passed to the next iteration of the
update rule.
NOTE: For most update rules, the default learning rate will probably not
perform well; however the default values of the other hyperparameters should
work well for a variety of different problems.
For efficiency, update rules may perform in-place updates, mutating w and
setting next_w equal to w.
"""
def sgd(w, dw, config=None):
"""
Performs vanilla stochastic gradient descent.
config format:
- learning_rate: Scalar learning rate.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
w -= config['learning_rate'] * dw
return w, config
def sgd_momentum(w, dw, config=None):
"""
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a
moving average of the gradients.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('momentum', 0.9)
v = config.get('velocity', np.zeros_like(w))
next_w = None
###########################################################################
# TODO: Implement the momentum update formula. Store the updated value in #
# the next_w variable. You should also use and update the velocity v. #
###########################################################################
pass
mu = config['momentum']
lr = config['learning_rate']
v = mu * v - lr * dw
w += v
next_w = w
###########################################################################
# END OF YOUR CODE #
###########################################################################
config['velocity'] = v
return next_w, config
def rmsprop(w, dw, config=None):
"""
Uses the RMSProp update rule, which uses a moving average of squared
gradient values to set adaptive per-parameter learning rates.
config format:
- learning_rate: Scalar learning rate.
- decay_rate: Scalar between 0 and 1 giving the decay rate for the squared
gradient cache.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- cache: Moving average of second moments of gradients.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('decay_rate', 0.99)
config.setdefault('epsilon', 1e-8)
config.setdefault('cache', np.zeros_like(w))
next_w = None
###########################################################################
# TODO: Implement the RMSprop update formula, storing the next value of w #
# in the next_w variable. Don't forget to update cache value stored in #
# config['cache']. #
###########################################################################
pass
lr = config['learning_rate']
dr = config['decay_rate']
eps = config['epsilon']
cache = config['cache']
cache = dr * cache + (1 - dr) * (dw**2)
w += - lr * dw / (np.sqrt(cache) + eps)
next_w = w
config['cache'] = cache
###########################################################################
# END OF YOUR CODE #
###########################################################################
return next_w, config
def adam(w, dw, config=None):
"""
Uses the Adam update rule, which incorporates moving averages of both the
gradient and its square and a bias correction term.
config format:
- learning_rate: Scalar learning rate.
- beta1: Decay rate for moving average of first moment of gradient.
- beta2: Decay rate for moving average of second moment of gradient.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- m: Moving average of gradient.
- v: Moving average of squared gradient.
- t: Iteration number.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-3)
config.setdefault('beta1', 0.9)
config.setdefault('beta2', 0.999)
config.setdefault('epsilon', 1e-8)
config.setdefault('m', np.zeros_like(w))
config.setdefault('v', np.zeros_like(w))
config.setdefault('t', 0)
next_w = None
###########################################################################
# TODO: Implement the Adam update formula, storing the next value of w in #
# the next_w variable. Don't forget to update the m, v, and t variables #
# stored in config. #
# #
# NOTE: In order to match the reference output, please modify t _before_ #
# using it in any calculations. #
###########################################################################
pass
lr = config['learning_rate']
beta1 = config['beta1']
beta2 = config['beta2']
eps = config['epsilon']
m = config['m']
v = config['v']
t = config['t']
t += 1
m = beta1*m + (1-beta1)*dw
mt = m / (1-beta1**t)
v = beta2*v + (1-beta2)*(dw**2)
vt = v / (1-beta2**t)
w -= lr * mt / (np.sqrt(vt) + eps)
next_w = w
config['m'] = m
config['v'] = v
###########################################################################
# END OF YOUR CODE #
###########################################################################
return next_w, config
|
[
"[email protected]"
] | |
a354f004938c6bbb37e415100f48ee081223cee6
|
a682419ca9862ff33b18a30a7599ad64f892e24d
|
/modules/soupsieve/css_match.py
|
b7ec67e95b731588b2df279b52772ba0b69095f3
|
[] |
no_license
|
lasanjin/corona-swe
|
1dd3449a973fa9ece0fc39b125235ea0b31a7069
|
ad21a7e717d666fa679aa5896e24138dcddfead4
|
refs/heads/master
| 2021-05-18T17:28:09.292155 | 2020-06-08T21:42:53 | 2020-06-08T21:42:53 | 251,337,604 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 52,243 |
py
|
"""CSS matcher."""
from datetime import datetime
from . import util
import re
from .import css_types as ct
import unicodedata
# Empty tag pattern (whitespace okay)
RE_NOT_EMPTY = re.compile('[^ \t\r\n\f]')
RE_NOT_WS = re.compile('[^ \t\r\n\f]+')
# Relationships
REL_PARENT = ' '
REL_CLOSE_PARENT = '>'
REL_SIBLING = '~'
REL_CLOSE_SIBLING = '+'
# Relationships for :has() (forward looking)
REL_HAS_PARENT = ': '
REL_HAS_CLOSE_PARENT = ':>'
REL_HAS_SIBLING = ':~'
REL_HAS_CLOSE_SIBLING = ':+'
NS_XHTML = 'http://www.w3.org/1999/xhtml'
NS_XML = 'http://www.w3.org/XML/1998/namespace'
DIR_FLAGS = ct.SEL_DIR_LTR | ct.SEL_DIR_RTL
RANGES = ct.SEL_IN_RANGE | ct.SEL_OUT_OF_RANGE
DIR_MAP = {
'ltr': ct.SEL_DIR_LTR,
'rtl': ct.SEL_DIR_RTL,
'auto': 0
}
RE_NUM = re.compile(r"^(?P<value>-?(?:[0-9]{1,}(\.[0-9]+)?|\.[0-9]+))$")
RE_TIME = re.compile(r'^(?P<hour>[0-9]{2}):(?P<minutes>[0-9]{2})$')
RE_MONTH = re.compile(r'^(?P<year>[0-9]{4,})-(?P<month>[0-9]{2})$')
RE_WEEK = re.compile(r'^(?P<year>[0-9]{4,})-W(?P<week>[0-9]{2})$')
RE_DATE = re.compile(r'^(?P<year>[0-9]{4,})-(?P<month>[0-9]{2})-(?P<day>[0-9]{2})$')
RE_DATETIME = re.compile(
r'^(?P<year>[0-9]{4,})-(?P<month>[0-9]{2})-(?P<day>[0-9]{2})T(?P<hour>[0-9]{2}):(?P<minutes>[0-9]{2})$'
)
RE_WILD_STRIP = re.compile(r'(?:(?:-\*-)(?:\*(?:-|$))*|-\*$)')
MONTHS_30 = (4, 6, 9, 11) # April, June, September, and November
FEB = 2
SHORT_MONTH = 30
LONG_MONTH = 31
FEB_MONTH = 28
FEB_LEAP_MONTH = 29
DAYS_IN_WEEK = 7
class _FakeParent(object):
"""
Fake parent class.
When we have a fragment with no `BeautifulSoup` document object,
we can't evaluate `nth` selectors properly. Create a temporary
fake parent so we can traverse the root element as a child.
"""
def __init__(self, element):
"""Initialize."""
self.contents = [element]
def __len__(self):
"""Length."""
return len(self.contents)
class _DocumentNav(object):
"""Navigate a Beautiful Soup document."""
@classmethod
def assert_valid_input(cls, tag):
"""Check if valid input tag or document."""
# Fail on unexpected types.
if not cls.is_tag(tag):
raise TypeError("Expected a BeautifulSoup 'Tag', but instead recieved type {}".format(type(tag)))
@staticmethod
def is_doc(obj):
"""Is `BeautifulSoup` object."""
from modules import bs4
return isinstance(obj, bs4.BeautifulSoup)
@staticmethod
def is_tag(obj):
"""Is tag."""
from modules import bs4
return isinstance(obj, bs4.Tag)
@staticmethod
def is_declaration(obj): # pragma: no cover
"""Is declaration."""
from modules import bs4
return isinstance(obj, bs4.Declaration)
@staticmethod
def is_cdata(obj):
"""Is CDATA."""
from modules import bs4
return isinstance(obj, bs4.CData)
@staticmethod
def is_processing_instruction(obj): # pragma: no cover
"""Is processing instruction."""
from modules import bs4
return isinstance(obj, bs4.ProcessingInstruction)
@staticmethod
def is_navigable_string(obj):
"""Is navigable string."""
from modules import bs4
return isinstance(obj, bs4.NavigableString)
@staticmethod
def is_special_string(obj):
"""Is special string."""
from modules import bs4
return isinstance(obj, (bs4.Comment, bs4.Declaration, bs4.CData, bs4.ProcessingInstruction, bs4.Doctype))
@classmethod
def is_content_string(cls, obj):
"""Check if node is content string."""
return cls.is_navigable_string(obj) and not cls.is_special_string(obj)
@staticmethod
def create_fake_parent(el):
"""Create fake parent for a given element."""
return _FakeParent(el)
@staticmethod
def is_xml_tree(el):
"""Check if element (or document) is from a XML tree."""
return el._is_xml
def is_iframe(self, el):
"""Check if element is an `iframe`."""
return ((el.name if self.is_xml_tree(el) else util.lower(el.name)) == 'iframe') and self.is_html_tag(el)
def is_root(self, el):
"""
Return whether element is a root element.
We check that the element is the root of the tree (which we have already pre-calculated),
and we check if it is the root element under an `iframe`.
"""
root = self.root and self.root is el
if not root:
parent = self.get_parent(el)
root = parent is not None and self.is_html and self.is_iframe(parent)
return root
def get_contents(self, el, no_iframe=False):
"""Get contents or contents in reverse."""
if not no_iframe or not self.is_iframe(el):
for content in el.contents:
yield content
def get_children(self, el, start=None, reverse=False, tags=True, no_iframe=False):
"""Get children."""
if not no_iframe or not self.is_iframe(el):
last = len(el.contents) - 1
if start is None:
index = last if reverse else 0
else:
index = start
end = -1 if reverse else last + 1
incr = -1 if reverse else 1
if 0 <= index <= last:
while index != end:
node = el.contents[index]
index += incr
if not tags or self.is_tag(node):
yield node
def get_descendants(self, el, tags=True, no_iframe=False):
"""Get descendants."""
if not no_iframe or not self.is_iframe(el):
next_good = None
for child in el.descendants:
if next_good is not None:
if child is not next_good:
continue
next_good = None
is_tag = self.is_tag(child)
if no_iframe and is_tag and self.is_iframe(child):
if child.next_sibling is not None:
next_good = child.next_sibling
else:
last_child = child
while self.is_tag(last_child) and last_child.contents:
last_child = last_child.contents[-1]
next_good = last_child.next_element
yield child
if next_good is None:
break
# Coverage isn't seeing this even though it's executed
continue # pragma: no cover
if not tags or is_tag:
yield child
def get_parent(self, el, no_iframe=False):
"""Get parent."""
parent = el.parent
if no_iframe and parent is not None and self.is_iframe(parent):
parent = None
return parent
@staticmethod
def get_tag_name(el):
"""Get tag."""
return el.name
@staticmethod
def get_prefix_name(el):
"""Get prefix."""
return el.prefix
@staticmethod
def get_uri(el):
"""Get namespace `URI`."""
return el.namespace
@classmethod
def get_next(cls, el, tags=True):
"""Get next sibling tag."""
sibling = el.next_sibling
while tags and not cls.is_tag(sibling) and sibling is not None:
sibling = sibling.next_sibling
return sibling
@classmethod
def get_previous(cls, el, tags=True):
"""Get previous sibling tag."""
sibling = el.previous_sibling
while tags and not cls.is_tag(sibling) and sibling is not None:
sibling = sibling.previous_sibling
return sibling
@staticmethod
def has_html_ns(el):
"""
Check if element has an HTML namespace.
This is a bit different than whether a element is treated as having an HTML namespace,
like we do in the case of `is_html_tag`.
"""
ns = getattr(el, 'namespace') if el else None
return ns and ns == NS_XHTML
@staticmethod
def split_namespace(el, attr_name):
"""Return namespace and attribute name without the prefix."""
return getattr(attr_name, 'namespace', None), getattr(attr_name, 'name', None)
@staticmethod
def get_attribute_by_name(el, name, default=None):
"""Get attribute by name."""
value = default
if el._is_xml:
try:
value = el.attrs[name]
except KeyError:
pass
else:
for k, v in el.attrs.items():
if util.lower(k) == name:
value = v
break
return value
@staticmethod
def iter_attributes(el):
"""Iterate attributes."""
for k, v in el.attrs.items():
yield k, v
@classmethod
def get_classes(cls, el):
"""Get classes."""
classes = cls.get_attribute_by_name(el, 'class', [])
if isinstance(classes, str):
classes = RE_NOT_WS.findall(classes)
return classes
def get_text(self, el, no_iframe=False):
"""Get text."""
return ''.join(
[node for node in self.get_descendants(el, tags=False, no_iframe=no_iframe) if self.is_content_string(node)]
)
class Inputs(object):
"""Class for parsing and validating input items."""
@staticmethod
def validate_day(year, month, day):
"""Validate day."""
max_days = LONG_MONTH
if month == FEB:
max_days = FEB_LEAP_MONTH if ((year % 4 == 0) and (year % 100 != 0)) or (year % 400 == 0) else FEB_MONTH
elif month in MONTHS_30:
max_days = SHORT_MONTH
return 1 <= day <= max_days
@staticmethod
def validate_week(year, week):
"""Validate week."""
max_week = datetime.strptime("{}-{}-{}".format(12, 31, year), "%m-%d-%Y").isocalendar()[1]
if max_week == 1:
max_week = 53
return 1 <= week <= max_week
@staticmethod
def validate_month(month):
"""Validate month."""
return 1 <= month <= 12
@staticmethod
def validate_year(year):
"""Validate year."""
return 1 <= year
@staticmethod
def validate_hour(hour):
"""Validate hour."""
return 0 <= hour <= 23
@staticmethod
def validate_minutes(minutes):
"""Validate minutes."""
return 0 <= minutes <= 59
@classmethod
def parse_value(cls, itype, value):
"""Parse the input value."""
parsed = None
if itype == "date":
m = RE_DATE.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
day = int(m.group('day'), 10)
if cls.validate_year(year) and cls.validate_month(month) and cls.validate_day(year, month, day):
parsed = (year, month, day)
elif itype == "month":
m = RE_MONTH.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
if cls.validate_year(year) and cls.validate_month(month):
parsed = (year, month)
elif itype == "week":
m = RE_WEEK.match(value)
if m:
year = int(m.group('year'), 10)
week = int(m.group('week'), 10)
if cls.validate_year(year) and cls.validate_week(year, week):
parsed = (year, week)
elif itype == "time":
m = RE_TIME.match(value)
if m:
hour = int(m.group('hour'), 10)
minutes = int(m.group('minutes'), 10)
if cls.validate_hour(hour) and cls.validate_minutes(minutes):
parsed = (hour, minutes)
elif itype == "datetime-local":
m = RE_DATETIME.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
day = int(m.group('day'), 10)
hour = int(m.group('hour'), 10)
minutes = int(m.group('minutes'), 10)
if (
cls.validate_year(year) and cls.validate_month(month) and cls.validate_day(year, month, day) and
cls.validate_hour(hour) and cls.validate_minutes(minutes)
):
parsed = (year, month, day, hour, minutes)
elif itype in ("number", "range"):
m = RE_NUM.match(value)
if m:
parsed = float(m.group('value'))
return parsed
class _Match(object):
"""Perform CSS matching."""
def __init__(self, selectors, scope, namespaces, flags):
"""Initialize."""
self.assert_valid_input(scope)
self.tag = scope
self.cached_meta_lang = []
self.cached_default_forms = []
self.cached_indeterminate_forms = []
self.selectors = selectors
self.namespaces = {} if namespaces is None else namespaces
self.flags = flags
self.iframe_restrict = False
# Find the root element for the whole tree
doc = scope
parent = self.get_parent(doc)
while parent:
doc = parent
parent = self.get_parent(doc)
root = None
if not self.is_doc(doc):
root = doc
else:
for child in self.get_children(doc):
root = child
break
self.root = root
self.scope = scope if scope is not doc else root
self.has_html_namespace = self.has_html_ns(root)
# A document can be both XML and HTML (XHTML)
self.is_xml = self.is_xml_tree(doc)
self.is_html = not self.is_xml or self.has_html_namespace
def supports_namespaces(self):
"""Check if namespaces are supported in the HTML type."""
return self.is_xml or self.has_html_namespace
def get_tag_ns(self, el):
"""Get tag namespace."""
if self.supports_namespaces():
namespace = ''
ns = self.get_uri(el)
if ns:
namespace = ns
else:
namespace = NS_XHTML
return namespace
def is_html_tag(self, el):
"""Check if tag is in HTML namespace."""
return self.get_tag_ns(el) == NS_XHTML
def get_tag(self, el):
"""Get tag."""
name = self.get_tag_name(el)
return util.lower(name) if name is not None and not self.is_xml else name
def get_prefix(self, el):
"""Get prefix."""
prefix = self.get_prefix_name(el)
return util.lower(prefix) if prefix is not None and not self.is_xml else prefix
def find_bidi(self, el):
"""Get directionality from element text."""
for node in self.get_children(el, tags=False):
# Analyze child text nodes
if self.is_tag(node):
# Avoid analyzing certain elements specified in the specification.
direction = DIR_MAP.get(util.lower(self.get_attribute_by_name(node, 'dir', '')), None)
if (
self.get_tag(node) in ('bdi', 'script', 'style', 'textarea', 'iframe') or
not self.is_html_tag(node) or
direction is not None
):
continue # pragma: no cover
# Check directionality of this node's text
value = self.find_bidi(node)
if value is not None:
return value
# Direction could not be determined
continue # pragma: no cover
# Skip `doctype` comments, etc.
if self.is_special_string(node):
continue
# Analyze text nodes for directionality.
for c in node:
bidi = unicodedata.bidirectional(c)
if bidi in ('AL', 'R', 'L'):
return ct.SEL_DIR_LTR if bidi == 'L' else ct.SEL_DIR_RTL
return None
def extended_language_filter(self, lang_range, lang_tag):
"""Filter the language tags."""
match = True
lang_range = RE_WILD_STRIP.sub('-', lang_range).lower()
ranges = lang_range.split('-')
subtags = lang_tag.lower().split('-')
length = len(ranges)
rindex = 0
sindex = 0
r = ranges[rindex]
s = subtags[sindex]
# Primary tag needs to match
if r != '*' and r != s:
match = False
rindex += 1
sindex += 1
# Match until we run out of ranges
while match and rindex < length:
r = ranges[rindex]
try:
s = subtags[sindex]
except IndexError:
# Ran out of subtags,
# but we still have ranges
match = False
continue
# Empty range
if not r:
match = False
continue
# Matched range
elif s == r:
rindex += 1
# Implicit wildcard cannot match
# singletons
elif len(s) == 1:
match = False
continue
# Implicitly matched, so grab next subtag
sindex += 1
return match
def match_attribute_name(self, el, attr, prefix):
"""Match attribute name and return value if it exists."""
value = None
if self.supports_namespaces():
value = None
# If we have not defined namespaces, we can't very well find them, so don't bother trying.
if prefix:
ns = self.namespaces.get(prefix)
if ns is None and prefix != '*':
return None
else:
ns = None
for k, v in self.iter_attributes(el):
# Get attribute parts
namespace, name = self.split_namespace(el, k)
# Can't match a prefix attribute as we haven't specified one to match
# Try to match it normally as a whole `p:a` as selector may be trying `p\:a`.
if ns is None:
if (self.is_xml and attr == k) or (not self.is_xml and util.lower(attr) == util.lower(k)):
value = v
break
# Coverage is not finding this even though it is executed.
# Adding a print statement before this (and erasing coverage) causes coverage to find the line.
# Ignore the false positive message.
continue # pragma: no cover
# We can't match our desired prefix attribute as the attribute doesn't have a prefix
if namespace is None or ns != namespace and prefix != '*':
continue
# The attribute doesn't match.
if (util.lower(attr) != util.lower(name)) if not self.is_xml else (attr != name):
continue
value = v
break
else:
for k, v in self.iter_attributes(el):
if util.lower(attr) != util.lower(k):
continue
value = v
break
return value
def match_namespace(self, el, tag):
"""Match the namespace of the element."""
match = True
namespace = self.get_tag_ns(el)
default_namespace = self.namespaces.get('')
tag_ns = '' if tag.prefix is None else self.namespaces.get(tag.prefix, None)
# We must match the default namespace if one is not provided
if tag.prefix is None and (default_namespace is not None and namespace != default_namespace):
match = False
# If we specified `|tag`, we must not have a namespace.
elif (tag.prefix is not None and tag.prefix == '' and namespace):
match = False
# Verify prefix matches
elif (
tag.prefix and
tag.prefix != '*' and (tag_ns is None or namespace != tag_ns)
):
match = False
return match
def match_attributes(self, el, attributes):
"""Match attributes."""
match = True
if attributes:
for a in attributes:
value = self.match_attribute_name(el, a.attribute, a.prefix)
pattern = a.xml_type_pattern if self.is_xml and a.xml_type_pattern else a.pattern
if isinstance(value, list):
value = ' '.join(value)
if value is None:
match = False
break
elif pattern is None:
continue
elif pattern.match(value) is None:
match = False
break
return match
def match_tagname(self, el, tag):
"""Match tag name."""
name = (util.lower(tag.name) if not self.is_xml and tag.name is not None else tag.name)
return not (
name is not None and
name not in (self.get_tag(el), '*')
)
def match_tag(self, el, tag):
"""Match the tag."""
match = True
if tag is not None:
# Verify namespace
if not self.match_namespace(el, tag):
match = False
if not self.match_tagname(el, tag):
match = False
return match
def match_past_relations(self, el, relation):
"""Match past relationship."""
found = False
if relation[0].rel_type == REL_PARENT:
parent = self.get_parent(el, no_iframe=self.iframe_restrict)
while not found and parent:
found = self.match_selectors(parent, relation)
parent = self.get_parent(parent, no_iframe=self.iframe_restrict)
elif relation[0].rel_type == REL_CLOSE_PARENT:
parent = self.get_parent(el, no_iframe=self.iframe_restrict)
if parent:
found = self.match_selectors(parent, relation)
elif relation[0].rel_type == REL_SIBLING:
sibling = self.get_previous(el)
while not found and sibling:
found = self.match_selectors(sibling, relation)
sibling = self.get_previous(sibling)
elif relation[0].rel_type == REL_CLOSE_SIBLING:
sibling = self.get_previous(el)
if sibling and self.is_tag(sibling):
found = self.match_selectors(sibling, relation)
return found
def match_future_child(self, parent, relation, recursive=False):
"""Match future child."""
match = False
children = self.get_descendants if recursive else self.get_children
for child in children(parent, no_iframe=self.iframe_restrict):
match = self.match_selectors(child, relation)
if match:
break
return match
def match_future_relations(self, el, relation):
"""Match future relationship."""
found = False
if relation[0].rel_type == REL_HAS_PARENT:
found = self.match_future_child(el, relation, True)
elif relation[0].rel_type == REL_HAS_CLOSE_PARENT:
found = self.match_future_child(el, relation)
elif relation[0].rel_type == REL_HAS_SIBLING:
sibling = self.get_next(el)
while not found and sibling:
found = self.match_selectors(sibling, relation)
sibling = self.get_next(sibling)
elif relation[0].rel_type == REL_HAS_CLOSE_SIBLING:
sibling = self.get_next(el)
if sibling and self.is_tag(sibling):
found = self.match_selectors(sibling, relation)
return found
def match_relations(self, el, relation):
"""Match relationship to other elements."""
found = False
if relation[0].rel_type.startswith(':'):
found = self.match_future_relations(el, relation)
else:
found = self.match_past_relations(el, relation)
return found
def match_id(self, el, ids):
"""Match element's ID."""
found = True
for i in ids:
if i != self.get_attribute_by_name(el, 'id', ''):
found = False
break
return found
def match_classes(self, el, classes):
"""Match element's classes."""
current_classes = self.get_classes(el)
found = True
for c in classes:
if c not in current_classes:
found = False
break
return found
def match_root(self, el):
"""Match element as root."""
is_root = self.is_root(el)
if is_root:
sibling = self.get_previous(el, tags=False)
while is_root and sibling is not None:
if (
self.is_tag(sibling) or (self.is_content_string(sibling) and sibling.strip()) or
self.is_cdata(sibling)
):
is_root = False
else:
sibling = self.get_previous(sibling, tags=False)
if is_root:
sibling = self.get_next(el, tags=False)
while is_root and sibling is not None:
if (
self.is_tag(sibling) or (self.is_content_string(sibling) and sibling.strip()) or
self.is_cdata(sibling)
):
is_root = False
else:
sibling = self.get_next(sibling, tags=False)
return is_root
def match_scope(self, el):
"""Match element as scope."""
return self.scope is el
def match_nth_tag_type(self, el, child):
"""Match tag type for `nth` matches."""
return(
(self.get_tag(child) == self.get_tag(el)) and
(self.get_tag_ns(child) == self.get_tag_ns(el))
)
def match_nth(self, el, nth):
"""Match `nth` elements."""
matched = True
for n in nth:
matched = False
if n.selectors and not self.match_selectors(el, n.selectors):
break
parent = self.get_parent(el)
if parent is None:
parent = self.create_fake_parent(el)
last = n.last
last_index = len(parent) - 1
index = last_index if last else 0
relative_index = 0
a = n.a
b = n.b
var = n.n
count = 0
count_incr = 1
factor = -1 if last else 1
idx = last_idx = a * count + b if var else a
# We can only adjust bounds within a variable index
if var:
# Abort if our nth index is out of bounds and only getting further out of bounds as we increment.
# Otherwise, increment to try to get in bounds.
adjust = None
while idx < 1 or idx > last_index:
if idx < 0:
diff_low = 0 - idx
if adjust is not None and adjust == 1:
break
adjust = -1
count += count_incr
idx = last_idx = a * count + b if var else a
diff = 0 - idx
if diff >= diff_low:
break
else:
diff_high = idx - last_index
if adjust is not None and adjust == -1:
break
adjust = 1
count += count_incr
idx = last_idx = a * count + b if var else a
diff = idx - last_index
if diff >= diff_high:
break
diff_high = diff
# If a < 0, our count is working backwards, so floor the index by increasing the count.
# Find the count that yields the lowest, in bound value and use that.
# Lastly reverse count increment so that we'll increase our index.
lowest = count
if a < 0:
while idx >= 1:
lowest = count
count += count_incr
idx = last_idx = a * count + b if var else a
count_incr = -1
count = lowest
idx = last_idx = a * count + b if var else a
# Evaluate elements while our calculated nth index is still in range
while 1 <= idx <= last_index + 1:
child = None
# Evaluate while our child index is still in range.
for child in self.get_children(parent, start=index, reverse=factor < 0, tags=False):
index += factor
if not self.is_tag(child):
continue
# Handle `of S` in `nth-child`
if n.selectors and not self.match_selectors(child, n.selectors):
continue
# Handle `of-type`
if n.of_type and not self.match_nth_tag_type(el, child):
continue
relative_index += 1
if relative_index == idx:
if child is el:
matched = True
else:
break
if child is el:
break
if child is el:
break
last_idx = idx
count += count_incr
if count < 0:
# Count is counting down and has now ventured into invalid territory.
break
idx = a * count + b if var else a
if last_idx == idx:
break
if not matched:
break
return matched
def match_empty(self, el):
"""Check if element is empty (if requested)."""
is_empty = True
for child in self.get_children(el, tags=False):
if self.is_tag(child):
is_empty = False
break
elif self.is_content_string(child) and RE_NOT_EMPTY.search(child):
is_empty = False
break
return is_empty
def match_subselectors(self, el, selectors):
"""Match selectors."""
match = True
for sel in selectors:
if not self.match_selectors(el, sel):
match = False
return match
def match_contains(self, el, contains):
"""Match element if it contains text."""
match = True
content = None
for contain_list in contains:
if content is None:
content = self.get_text(el, no_iframe=self.is_html)
found = False
for text in contain_list.text:
if text in content:
found = True
break
if not found:
match = False
return match
def match_default(self, el):
"""Match default."""
match = False
# Find this input's form
form = None
parent = self.get_parent(el, no_iframe=True)
while parent and form is None:
if self.get_tag(parent) == 'form' and self.is_html_tag(parent):
form = parent
else:
parent = self.get_parent(parent, no_iframe=True)
# Look in form cache to see if we've already located its default button
found_form = False
for f, t in self.cached_default_forms:
if f is form:
found_form = True
if t is el:
match = True
break
# We didn't have the form cached, so look for its default button
if not found_form:
for child in self.get_descendants(form, no_iframe=True):
name = self.get_tag(child)
# Can't do nested forms (haven't figured out why we never hit this)
if name == 'form': # pragma: no cover
break
if name in ('input', 'button'):
v = self.get_attribute_by_name(child, 'type', '')
if v and util.lower(v) == 'submit':
self.cached_default_forms.append([form, child])
if el is child:
match = True
break
return match
def match_indeterminate(self, el):
"""Match default."""
match = False
name = self.get_attribute_by_name(el, 'name')
def get_parent_form(el):
"""Find this input's form."""
form = None
parent = self.get_parent(el, no_iframe=True)
while form is None:
if self.get_tag(parent) == 'form' and self.is_html_tag(parent):
form = parent
break
last_parent = parent
parent = self.get_parent(parent, no_iframe=True)
if parent is None:
form = last_parent
break
return form
form = get_parent_form(el)
# Look in form cache to see if we've already evaluated that its fellow radio buttons are indeterminate
found_form = False
for f, n, i in self.cached_indeterminate_forms:
if f is form and n == name:
found_form = True
if i is True:
match = True
break
# We didn't have the form cached, so validate that the radio button is indeterminate
if not found_form:
checked = False
for child in self.get_descendants(form, no_iframe=True):
if child is el:
continue
tag_name = self.get_tag(child)
if tag_name == 'input':
is_radio = False
check = False
has_name = False
for k, v in self.iter_attributes(child):
if util.lower(k) == 'type' and util.lower(v) == 'radio':
is_radio = True
elif util.lower(k) == 'name' and v == name:
has_name = True
elif util.lower(k) == 'checked':
check = True
if is_radio and check and has_name and get_parent_form(child) is form:
checked = True
break
if checked:
break
if not checked:
match = True
self.cached_indeterminate_forms.append([form, name, match])
return match
def match_lang(self, el, langs):
"""Match languages."""
match = False
has_ns = self.supports_namespaces()
root = self.root
has_html_namespace = self.has_html_namespace
# Walk parents looking for `lang` (HTML) or `xml:lang` XML property.
parent = el
found_lang = None
last = None
while not found_lang:
has_html_ns = self.has_html_ns(parent)
for k, v in self.iter_attributes(parent):
attr_ns, attr = self.split_namespace(parent, k)
if (
((not has_ns or has_html_ns) and (util.lower(k) if not self.is_xml else k) == 'lang') or
(
has_ns and not has_html_ns and attr_ns == NS_XML and
(util.lower(attr) if not self.is_xml and attr is not None else attr) == 'lang'
)
):
found_lang = v
break
last = parent
parent = self.get_parent(parent, no_iframe=self.is_html)
if parent is None:
root = last
has_html_namespace = self.has_html_ns(root)
parent = last
break
# Use cached meta language.
if not found_lang and self.cached_meta_lang:
for cache in self.cached_meta_lang:
if root is cache[0]:
found_lang = cache[1]
# If we couldn't find a language, and the document is HTML, look to meta to determine language.
if found_lang is None and (not self.is_xml or (has_html_namespace and root.name == 'html')):
# Find head
found = False
for tag in ('html', 'head'):
found = False
for child in self.get_children(parent, no_iframe=self.is_html):
if self.get_tag(child) == tag and self.is_html_tag(child):
found = True
parent = child
break
if not found: # pragma: no cover
break
# Search meta tags
if found:
for child in parent:
if self.is_tag(child) and self.get_tag(child) == 'meta' and self.is_html_tag(parent):
c_lang = False
content = None
for k, v in self.iter_attributes(child):
if util.lower(k) == 'http-equiv' and util.lower(v) == 'content-language':
c_lang = True
if util.lower(k) == 'content':
content = v
if c_lang and content:
found_lang = content
self.cached_meta_lang.append((root, found_lang))
break
if found_lang:
break
if not found_lang:
self.cached_meta_lang.append((root, False))
# If we determined a language, compare.
if found_lang:
for patterns in langs:
match = False
for pattern in patterns:
if self.extended_language_filter(pattern, found_lang):
match = True
if not match:
break
return match
def match_dir(self, el, directionality):
"""Check directionality."""
# If we have to match both left and right, we can't match either.
if directionality & ct.SEL_DIR_LTR and directionality & ct.SEL_DIR_RTL:
return False
if el is None or not self.is_html_tag(el):
return False
# Element has defined direction of left to right or right to left
direction = DIR_MAP.get(util.lower(self.get_attribute_by_name(el, 'dir', '')), None)
if direction not in (None, 0):
return direction == directionality
# Element is the document element (the root) and no direction assigned, assume left to right.
is_root = self.is_root(el)
if is_root and direction is None:
return ct.SEL_DIR_LTR == directionality
# If `input[type=telephone]` and no direction is assigned, assume left to right.
name = self.get_tag(el)
is_input = name == 'input'
is_textarea = name == 'textarea'
is_bdi = name == 'bdi'
itype = util.lower(self.get_attribute_by_name(el, 'type', '')) if is_input else ''
if is_input and itype == 'tel' and direction is None:
return ct.SEL_DIR_LTR == directionality
# Auto handling for text inputs
if ((is_input and itype in ('text', 'search', 'tel', 'url', 'email')) or is_textarea) and direction == 0:
if is_textarea:
value = []
for node in self.get_contents(el, no_iframe=True):
if self.is_content_string(node):
value.append(node)
value = ''.join(value)
else:
value = self.get_attribute_by_name(el, 'value', '')
if value:
for c in value:
bidi = unicodedata.bidirectional(c)
if bidi in ('AL', 'R', 'L'):
direction = ct.SEL_DIR_LTR if bidi == 'L' else ct.SEL_DIR_RTL
return direction == directionality
# Assume left to right
return ct.SEL_DIR_LTR == directionality
elif is_root:
return ct.SEL_DIR_LTR == directionality
return self.match_dir(self.get_parent(el, no_iframe=True), directionality)
# Auto handling for `bdi` and other non text inputs.
if (is_bdi and direction is None) or direction == 0:
direction = self.find_bidi(el)
if direction is not None:
return direction == directionality
elif is_root:
return ct.SEL_DIR_LTR == directionality
return self.match_dir(self.get_parent(el, no_iframe=True), directionality)
# Match parents direction
return self.match_dir(self.get_parent(el, no_iframe=True), directionality)
def match_range(self, el, condition):
"""
Match range.
Behavior is modeled after what we see in browsers. Browsers seem to evaluate
if the value is out of range, and if not, it is in range. So a missing value
will not evaluate out of range; therefore, value is in range. Personally, I
feel like this should evaluate as neither in or out of range.
"""
out_of_range = False
itype = util.lower(self.get_attribute_by_name(el, 'type'))
mn = self.get_attribute_by_name(el, 'min', None)
if mn is not None:
mn = Inputs.parse_value(itype, mn)
mx = self.get_attribute_by_name(el, 'max', None)
if mx is not None:
mx = Inputs.parse_value(itype, mx)
# There is no valid min or max, so we cannot evaluate a range
if mn is None and mx is None:
return False
value = self.get_attribute_by_name(el, 'value', None)
if value is not None:
value = Inputs.parse_value(itype, value)
if value is not None:
if itype in ("date", "datetime-local", "month", "week", "number", "range"):
if mn is not None and value < mn:
out_of_range = True
if not out_of_range and mx is not None and value > mx:
out_of_range = True
elif itype == "time":
if mn is not None and mx is not None and mn > mx:
# Time is periodic, so this is a reversed/discontinuous range
if value < mn and value > mx:
out_of_range = True
else:
if mn is not None and value < mn:
out_of_range = True
if not out_of_range and mx is not None and value > mx:
out_of_range = True
return not out_of_range if condition & ct.SEL_IN_RANGE else out_of_range
def match_defined(self, el):
"""
Match defined.
`:defined` is related to custom elements in a browser.
- If the document is XML (not XHTML), all tags will match.
- Tags that are not custom (don't have a hyphen) are marked defined.
- If the tag has a prefix (without or without a namespace), it will not match.
This is of course requires the parser to provide us with the proper prefix and namespace info,
if it doesn't, there is nothing we can do.
"""
name = self.get_tag(el)
return (
name.find('-') == -1 or
name.find(':') != -1 or
self.get_prefix(el) is not None
)
def match_placeholder_shown(self, el):
"""
Match placeholder shown according to HTML spec.
- text area should be checked if they have content. A single newline does not count as content.
"""
match = False
content = self.get_text(el)
if content in ('', '\n'):
match = True
return match
def match_selectors(self, el, selectors):
"""Check if element matches one of the selectors."""
match = False
is_not = selectors.is_not
is_html = selectors.is_html
# Internal selector lists that use the HTML flag, will automatically get the `html` namespace.
if is_html:
namespaces = self.namespaces
iframe_restrict = self.iframe_restrict
self.namespaces = {'html': NS_XHTML}
self.iframe_restrict = True
if not is_html or self.is_html:
for selector in selectors:
match = is_not
# We have a un-matchable situation (like `:focus` as you can focus an element in this environment)
if isinstance(selector, ct.SelectorNull):
continue
# Verify tag matches
if not self.match_tag(el, selector.tag):
continue
# Verify tag is defined
if selector.flags & ct.SEL_DEFINED and not self.match_defined(el):
continue
# Verify element is root
if selector.flags & ct.SEL_ROOT and not self.match_root(el):
continue
# Verify element is scope
if selector.flags & ct.SEL_SCOPE and not self.match_scope(el):
continue
# Verify element has placeholder shown
if selector.flags & ct.SEL_PLACEHOLDER_SHOWN and not self.match_placeholder_shown(el):
continue
# Verify `nth` matches
if not self.match_nth(el, selector.nth):
continue
if selector.flags & ct.SEL_EMPTY and not self.match_empty(el):
continue
# Verify id matches
if selector.ids and not self.match_id(el, selector.ids):
continue
# Verify classes match
if selector.classes and not self.match_classes(el, selector.classes):
continue
# Verify attribute(s) match
if not self.match_attributes(el, selector.attributes):
continue
# Verify ranges
if selector.flags & RANGES and not self.match_range(el, selector.flags & RANGES):
continue
# Verify language patterns
if selector.lang and not self.match_lang(el, selector.lang):
continue
# Verify pseudo selector patterns
if selector.selectors and not self.match_subselectors(el, selector.selectors):
continue
# Verify relationship selectors
if selector.relation and not self.match_relations(el, selector.relation):
continue
# Validate that the current default selector match corresponds to the first submit button in the form
if selector.flags & ct.SEL_DEFAULT and not self.match_default(el):
continue
# Validate that the unset radio button is among radio buttons with the same name in a form that are
# also not set.
if selector.flags & ct.SEL_INDETERMINATE and not self.match_indeterminate(el):
continue
# Validate element directionality
if selector.flags & DIR_FLAGS and not self.match_dir(el, selector.flags & DIR_FLAGS):
continue
# Validate that the tag contains the specified text.
if not self.match_contains(el, selector.contains):
continue
match = not is_not
break
# Restore actual namespaces being used for external selector lists
if is_html:
self.namespaces = namespaces
self.iframe_restrict = iframe_restrict
return match
def select(self, limit=0):
"""Match all tags under the targeted tag."""
if limit < 1:
limit = None
for child in self.get_descendants(self.tag):
if self.match(child):
yield child
if limit is not None:
limit -= 1
if limit < 1:
break
def closest(self):
"""Match closest ancestor."""
current = self.tag
closest = None
while closest is None and current is not None:
if self.match(current):
closest = current
else:
current = self.get_parent(current)
return closest
def filter(self): # noqa A001
"""Filter tag's children."""
return [tag for tag in self.get_contents(self.tag) if not self.is_navigable_string(tag) and self.match(tag)]
def match(self, el):
"""Match."""
return not self.is_doc(el) and self.is_tag(el) and self.match_selectors(el, self.selectors)
class CSSMatch(_DocumentNav, _Match):
"""The Beautiful Soup CSS match class."""
class SoupSieve(ct.Immutable):
"""Compiled Soup Sieve selector matching object."""
__slots__ = ("pattern", "selectors", "namespaces", "custom", "flags", "_hash")
def __init__(self, pattern, selectors, namespaces, custom, flags):
"""Initialize."""
super(SoupSieve, self).__init__(
pattern=pattern,
selectors=selectors,
namespaces=namespaces,
custom=custom,
flags=flags
)
def match(self, tag):
"""Match."""
return CSSMatch(self.selectors, tag, self.namespaces, self.flags).match(tag)
def closest(self, tag):
"""Match closest ancestor."""
return CSSMatch(self.selectors, tag, self.namespaces, self.flags).closest()
def filter(self, iterable): # noqa A001
"""
Filter.
`CSSMatch` can cache certain searches for tags of the same document,
so if we are given a tag, all tags are from the same document,
and we can take advantage of the optimization.
Any other kind of iterable could have tags from different documents or detached tags,
so for those, we use a new `CSSMatch` for each item in the iterable.
"""
if CSSMatch.is_tag(iterable):
return CSSMatch(self.selectors, iterable, self.namespaces, self.flags).filter()
else:
return [node for node in iterable if not CSSMatch.is_navigable_string(node) and self.match(node)]
def select_one(self, tag):
"""Select a single tag."""
tags = self.select(tag, limit=1)
return tags[0] if tags else None
def select(self, tag, limit=0):
"""Select the specified tags."""
return list(self.iselect(tag, limit))
def iselect(self, tag, limit=0):
"""Iterate the specified tags."""
for el in CSSMatch(self.selectors, tag, self.namespaces, self.flags).select(limit):
yield el
def __repr__(self): # pragma: no cover
"""Representation."""
return "SoupSieve(pattern={!r}, namespaces={!r}, custom={!r}, flags={!r})".format(
self.pattern,
self.namespaces,
self.custom,
self.flags
)
__str__ = __repr__
ct.pickle_register(SoupSieve)
|
[
"[email protected]"
] | |
1f5c2bd6304a0c9d074d8c9541a0eb810b6bf790
|
45006b78675997765c2248ce2944aa24c9cd3787
|
/tf_agents/bandits/policies/policy_utilities.py
|
9d39be6fd7480a0e6372bfa270c35c47d4db578c
|
[
"Apache-2.0"
] |
permissive
|
ymodak/agents
|
43d2105965db763b07233139c0b87896c956547a
|
a6ab65605a6910cb3130a500614d006c9271157b
|
refs/heads/master
| 2022-09-24T03:47:05.815845 | 2020-05-18T18:02:40 | 2020-05-18T18:03:03 | 265,031,865 | 0 | 0 |
Apache-2.0
| 2020-05-18T18:50:52 | 2020-05-18T18:50:51 | null |
UTF-8
|
Python
| false | false | 8,891 |
py
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for bandit policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.utils import common
class InfoFields(object):
"""Strings which can be used in the policy info fields."""
# Mean of predicted rewards (per arm).
PREDICTED_REWARDS_MEAN = 'predicted_rewards_mean'
# Samples of predicted rewards (per arm).
PREDICTED_REWARDS_SAMPLED = 'predicted_rewards_sampled'
# Type of bandit policy (see enumerations in `BanditPolicyType`).
BANDIT_POLICY_TYPE = 'bandit_policy_type'
# Used to store the chosen action for a per-arm model.
CHOSEN_ARM_FEATURES = 'chosen_arm_features'
PolicyInfo = collections.namedtuple( # pylint: disable=invalid-name
'PolicyInfo',
(policy_step.CommonFields.LOG_PROBABILITY,
InfoFields.PREDICTED_REWARDS_MEAN,
InfoFields.PREDICTED_REWARDS_SAMPLED,
InfoFields.BANDIT_POLICY_TYPE))
# Set default empty tuple for all fields.
PolicyInfo.__new__.__defaults__ = ((),) * len(PolicyInfo._fields)
PerArmPolicyInfo = collections.namedtuple( # pylint: disable=invalid-name
'PerArmPolicyInfo',
(policy_step.CommonFields.LOG_PROBABILITY,
InfoFields.PREDICTED_REWARDS_MEAN,
InfoFields.PREDICTED_REWARDS_SAMPLED,
InfoFields.BANDIT_POLICY_TYPE,
InfoFields.CHOSEN_ARM_FEATURES))
# Set default empty tuple for all fields.
PerArmPolicyInfo.__new__.__defaults__ = ((),) * len(PerArmPolicyInfo._fields)
def populate_policy_info(arm_observations, chosen_actions, rewards_for_argmax,
est_rewards, emit_policy_info,
accepts_per_arm_features):
"""Populates policy info given all needed input.
Args:
arm_observations: In case the policy accepts per-arm feautures, this is a
Tensor with the per-arm features. Otherwise its value is unused.
chosen_actions: A Tensor with the indices of the chosen actions.
rewards_for_argmax: The sampled or optimistically boosted reward estimates
based on which the policy chooses the action greedily.
est_rewards: A Tensor with the rewards estimated by the model.
emit_policy_info: A set of policy info keys, specifying wich info fields to
populate
accepts_per_arm_features: (bool) Whether the policy accepts per-arm
features.
Returns:
A policy info.
"""
if accepts_per_arm_features:
# Saving the features for the chosen action to the policy_info.
chosen_arm_features = tf.gather(
params=arm_observations, indices=chosen_actions, batch_dims=1)
policy_info = PerArmPolicyInfo(
predicted_rewards_sampled=(
rewards_for_argmax if
InfoFields.PREDICTED_REWARDS_SAMPLED in emit_policy_info else ()),
predicted_rewards_mean=(
est_rewards
if InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info else ()),
chosen_arm_features=chosen_arm_features)
else:
policy_info = PolicyInfo(
predicted_rewards_sampled=(
rewards_for_argmax if
InfoFields.PREDICTED_REWARDS_SAMPLED in emit_policy_info else ()),
predicted_rewards_mean=(
est_rewards
if InfoFields.PREDICTED_REWARDS_MEAN in emit_policy_info else ()))
return policy_info
class BanditPolicyType(object):
"""Enumeration of bandit policy types."""
# No bandit policy type specified.
UNKNOWN = 0
# Greedy decision made by bandit agent.
GREEDY = 1
# Random decision for exploration made by epsilon-greedy agent sampled from
# uniform distribution over actions.
UNIFORM = 2
def create_bandit_policy_type_tensor_spec(shape):
"""Create tensor spec for bandit policy type."""
return tensor_spec.BoundedTensorSpec(
shape=shape, dtype=tf.int32,
minimum=BanditPolicyType.UNKNOWN, maximum=BanditPolicyType.UNIFORM)
@common.function
def masked_argmax(input_tensor, mask, output_type=tf.int32):
"""Computes the argmax where the allowed elements are given by a mask.
If a row of `mask` contains all zeros, then this method will return -1 for the
corresponding row of `input_tensor`.
Args:
input_tensor: Rank-2 Tensor of floats.
mask: 0-1 valued Tensor of the same shape as input.
output_type: Integer type of the output.
Returns:
A Tensor of rank 1 and type `output_type`, with the masked argmax of every
row of `input_tensor`.
"""
input_tensor.shape.assert_is_compatible_with(mask.shape)
neg_inf = tf.constant(-float('Inf'), input_tensor.dtype)
modified_input = tf.compat.v2.where(
tf.cast(mask, tf.bool), input_tensor, neg_inf)
argmax_tensor = tf.argmax(modified_input, axis=-1, output_type=output_type)
# Replace results for invalid mask rows with -1.
reduce_mask = tf.cast(tf.reduce_max(mask, axis=1), tf.bool)
neg_one = tf.constant(-1, output_type)
return tf.compat.v2.where(reduce_mask, argmax_tensor, neg_one)
def has_bandit_policy_type(info, check_for_tensor=False):
"""Check if policy info has `bandit_policy_type` field/tensor."""
if info in ((), None):
return False
fields = getattr(info, '_fields', None)
has_field = fields is not None and InfoFields.BANDIT_POLICY_TYPE in fields
if has_field and check_for_tensor:
return isinstance(info.bandit_policy_type, tf.Tensor)
else:
return has_field
def set_bandit_policy_type(info, bandit_policy_type):
"""Sets the InfoFields.BANDIT_POLICY_TYPE on info to bandit_policy_type.
If policy `info` does not support InfoFields.BANDIT_POLICY_TYPE, this method
returns `info` as-is (without any modification).
Args:
info: Policy info on which to set bandit policy type.
bandit_policy_type: Tensor containing BanditPolicyType enums or TensorSpec
from `create_bandit_policy_type_tensor_spec()`.
Returns:
Policy info with modified field (if possible).
"""
if info in ((), None):
return PolicyInfo(bandit_policy_type=bandit_policy_type)
fields = getattr(info, '_fields', None)
if fields is not None and InfoFields.BANDIT_POLICY_TYPE in fields:
return info._replace(bandit_policy_type=bandit_policy_type)
try:
info[InfoFields.BANDIT_POLICY_TYPE] = bandit_policy_type
except TypeError:
pass
return info
@common.function
def bandit_policy_uniform_mask(values, mask):
"""Set bandit policy type tensor to BanditPolicyType.UNIFORM based on mask.
Set bandit policy type `values` to BanditPolicyType.UNIFORM; returns tensor
where output[i] is BanditPolicyType.UNIFORM if mask[i] is True, otherwise it
is left as values[i].
Args:
values: Tensor containing `BanditPolicyType` enumerations.
mask: Tensor of the same shape as `values` with boolean flags indicating
values to set to `BanditPolicyType.UNIFORM`.
Returns:
Tensor containing `BanditPolicyType` enumerations with masked values.
"""
return tf.where(
mask, tf.fill(tf.shape(values), BanditPolicyType.UNIFORM), values)
def get_model_index(arm_index, accepts_per_arm_features):
"""Returns the model index for a specific arm.
The number of models depends on the observation format: If the policy accepts
per-arm features, there is only one single model used for every arm. Otherwise
there is a model for every arm.
Args:
arm_index: The index of the arm for which the model index is needed.
accepts_per_arm_features: (bool) Whether the policy works with per-arm
features.
Returns:
The index of the model for the arm requested.
"""
return 0 if accepts_per_arm_features else arm_index
def compute_feasibility_probability(observation, constraints, batch_size,
num_actions, action_mask=None):
"""Helper function to compute the action feasibility probability."""
feasibility_prob = tf.ones([batch_size, num_actions])
if action_mask is not None:
feasibility_prob = tf.cast(action_mask, tf.float32)
for c in constraints:
# We assume the constraints are independent.
action_feasibility = c.compute_action_feasibility(observation)
feasibility_prob *= action_feasibility
return feasibility_prob
|
[
"[email protected]"
] | |
63203f447c110e2a884bbe8afcb0e393270b4256
|
ed0e0b3ceb3ca5f885d056a94d8d78c0926f4d97
|
/Tests/Unit/test_service_layer/test_services.py
|
e00e07dd050e7f21ce7661e546125e8e84989e3b
|
[] |
no_license
|
JC-09/CS235FlixSkeleton-part-3
|
9bd6c0603f774fefa9f47ef7d15192b94d9b5ec8
|
cbc7e5723779825575d2fff05d5945986cb8e2fa
|
refs/heads/master
| 2023-01-01T14:14:56.947000 | 2020-10-25T07:55:50 | 2020-10-25T07:55:50 | 303,101,409 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,766 |
py
|
from datetime import date
import pytest
from CS235Flix.authentication.services import AuthenticationException
from CS235Flix.movies import services as movies_services
from CS235Flix.authentication import services as auth_services
from CS235Flix.movies.services import NonExistentMovieException, NonExistentActorException, NonExistentDirectorException, NoSearchResultsException
def test_can_add_user(in_memory_repo):
new_username = 'jz'
new_password = 'abcd1A23'
auth_services.add_user(new_username, new_password, in_memory_repo)
user_as_dict = auth_services.get_user(new_username, in_memory_repo)
assert user_as_dict['username'] == new_username
# Check that password has been encrypted.
assert user_as_dict['password'].startswith('pbkdf2:sha256:')
def test_cannot_add_user_with_existing_name(in_memory_repo):
username = 'thorke'
password = 'abcd1A23'
with pytest.raises(auth_services.NameNotUniqueException):
auth_services.add_user(username, password, in_memory_repo)
def test_authentication_with_valid_credentials(in_memory_repo):
new_username = 'pmccartney'
new_password = 'abcd1A23'
auth_services.add_user(new_username, new_password, in_memory_repo)
try:
auth_services.authenticate_user(new_username, new_password, in_memory_repo)
except AuthenticationException:
assert False
def test_authentication_with_invalid_credentials(in_memory_repo):
new_username = 'pmccartney'
new_password = 'abcd1A23'
auth_services.add_user(new_username, new_password, in_memory_repo)
with pytest.raises(auth_services.AuthenticationException):
auth_services.authenticate_user(new_username, '0987654321', in_memory_repo)
def test_can_add_review(in_memory_repo):
movie_id = 2
review_text = 'What a great movie!'
username = 'fmercury'
rating = 10
# Call the service layer to add the review
movies_services.add_review(review_text=review_text, username=username,
movie_id=movie_id, rating=rating, repo=in_memory_repo)
# Retrieve the reviews for the movie from the repository
reviews_as_dict = movies_services.get_reviews_for_movie(movie_id=movie_id, repo=in_memory_repo)
# Check that the reviews include a review with the new review text
assert next(
(dictionary['review_text'] for dictionary in reviews_as_dict if dictionary['review_text'] == review_text), None
) is not None
def test_cannot_add_review_for_non_existent_movie(in_memory_repo):
movie_id = 20
review_text = 'What a great movie!'
username = 'fmercury'
rating = 10
# Call the service layer to attempt add the review
with pytest.raises(movies_services.NonExistentMovieException):
movies_services.add_review(review_text=review_text, username=username,
movie_id=movie_id, rating=rating, repo=in_memory_repo)
def test_cannot_add_review_by_unknown_user(in_memory_repo):
movie_id = 2
review_text = 'What a great movie!'
username = 'unknownUser'
rating = 10
# Call the service layer to attempt add the review
with pytest.raises(movies_services.UnknownUserException):
movies_services.add_review(review_text=review_text, username=username,
movie_id=movie_id, rating=rating, repo=in_memory_repo)
def test_can_get_a_movie(in_memory_repo):
movie_id = 1
movie_as_dict = movies_services.get_movie(movie_id=movie_id, repo=in_memory_repo)
assert movie_as_dict['id'] == movie_id
assert movie_as_dict['title'] == 'Guardians of the Galaxy'
assert movie_as_dict['release_year'] == 2014
assert movie_as_dict['description'] == 'A group of intergalactic criminals are forced to work together to stop a fanatical warrior from taking control of the universe.'
assert movie_as_dict['director'] == 'James Gunn'
assert movie_as_dict['runtime_minutes'] == 121
actor_fullnames = [dictionary['actor_fullname'] for dictionary in movie_as_dict['actors']]
assert 'Chris Pratt' in actor_fullnames
assert 'Vin Diesel' in actor_fullnames
assert 'Bradley Cooper' in actor_fullnames
assert 'Zoe Saldana' in actor_fullnames
genres = [dictionary['genre_name'] for dictionary in movie_as_dict['genres']]
assert 'Action' in genres
assert 'Adventure' in genres
assert 'Sci-Fi' in genres
def test_cannot_get_movie_with_non_existent_id(in_memory_repo):
movie_id = 33
# Call the service layer to attempt to retrieve the Movie
with pytest.raises(movies_services.NonExistentMovieException):
movies_services.get_movie(movie_id, in_memory_repo)
def test_get_latest_movie(in_memory_repo):
movie_as_dict = movies_services.get_latest_movie(in_memory_repo)
assert movie_as_dict['id'] == 7
def test_get_oldest_movie(in_memory_repo):
movie_as_dict = movies_services.get_oldest_movie(in_memory_repo)
assert movie_as_dict['id'] == 2
def test_get_movies_by_release_year_with_one_movie_in_the_year(in_memory_repo):
target_year = 2012
movies_as_dict, prev_year, next_year = movies_services.get_movies_by_release_year(target_year, in_memory_repo)
assert len(movies_as_dict) == 1
assert movies_as_dict[0]['id'] == 2
assert prev_year is None
assert next_year == 2014
def test_get_movies_by_release_year_with_multiple_movies_in_the_year(in_memory_repo):
target_year = 2016
movies_as_dict, prev_year, next_year = movies_services.get_movies_by_release_year(target_year, in_memory_repo)
# Check that there are 8 movies released in 2016 in the repository
assert len(movies_as_dict) == 8
# Check that the movie ids for the movies returned are 3, 4, 5, 6, 7, 8, 9, 10
movie_ids = [movie['id'] for movie in movies_as_dict]
assert set([3, 4, 5, 6, 7, 8, 9, 10]).issubset(movie_ids)
# Check that the dates of movies surrounding the target year are 2014 and None
assert prev_year == 2014
assert next_year is None
def test_get_movies_by_release_year_with_non_existent_release_year(in_memory_repo):
target_year = 2010
movies_as_dict, prev_year, next_year = movies_services.get_movies_by_release_year(target_year, in_memory_repo)
assert len(movies_as_dict) == 0
def test_get_movies_by_id(in_memory_repo):
target_movie_ids = [3, 6, 17, 19]
movies_as_dict = movies_services.get_movies_by_id(target_movie_ids, in_memory_repo)
# Check that 2 movies were returned from the query
assert len(movies_as_dict) == 2
# Check that the movie ids returned were 3 and 6
movie_ids = [movie['id'] for movie in movies_as_dict]
assert set([3,6]).issubset(movie_ids)
def test_search_movies_by_actor_fullname(in_memory_repo):
target_actor = "Chris Pratt"
movies_as_dict = movies_services.search_movie_by_actor_fullname(target_actor, in_memory_repo)
# Check that 2 movies were returned from the query
assert len(movies_as_dict) == 2
# Check that the movie ids returned were 1 and 10
movie_ids = [movie['id'] for movie in movies_as_dict]
assert 1 in movie_ids
assert 10 in movie_ids
def test_search_movies_by_non_existent_actor(in_memory_repo):
non_existent_actor = 'Not Exist'
with pytest.raises(NonExistentActorException):
movies_as_dict = movies_services.search_movie_by_actor_fullname(non_existent_actor, in_memory_repo)
def test_search_movies_by_director_fullname(in_memory_repo):
target_director = "M. Night Shyamalan"
movies_as_dict = movies_services.search_movie_directed_by_director_fullname(target_director, in_memory_repo)
# Check that 1 movie is returned from the query
assert len(movies_as_dict) == 1
# Check that the movie id is 3
assert movies_as_dict[0]['id'] == 3
def test_search_movies_by_non_existent_director_fullname(in_memory_repo):
non_existent_director = 'Not Exist'
with pytest.raises(NonExistentDirectorException):
movies_as_dict = movies_services.search_movie_directed_by_director_fullname(non_existent_director, in_memory_repo)
def test_search_by_a_valid_actor_name_and_a_valid_director_name(in_memory_repo):
target_actor = 'Ryan Gosling'
target_director = 'Damien Chazelle'
movies_as_dict = movies_services.search_movie_by_actor_and_director(target_actor, target_director, in_memory_repo)
# Check that 1 movie is returned from the query
assert len(movies_as_dict) == 1
# Check that the movie id is 7
assert movies_as_dict[0]['id'] == 7
def test_search_an_invalid_actor_name_or_an_invalid_director_name(in_memory_repo):
correct_actor = 'Ryan Gosling'
correct_director = 'Damien Chazelle'
fake_actor = 'Fake Actor'
fake_director = 'Fake Director'
with pytest.raises(NoSearchResultsException):
# Check that no movie is returned for the combination of correct actor and fake director
movies_as_dict = movies_services.search_movie_by_actor_and_director(correct_actor, fake_director, in_memory_repo)
assert len(movies_as_dict) == 0
# Check that no movie is returned for the combination of fake actor and correct director
movies_as_dict = movies_services.search_movie_by_actor_and_director(fake_actor, correct_director, in_memory_repo)
assert len(movies_as_dict) == 0
# Check that no movie is returned for the combination of fake actor and fake director
movies_as_dict = movies_services.search_movie_by_actor_and_director(fake_actor, fake_director, in_memory_repo)
assert len(movies_as_dict) == 0
def test_search_a_valid_movie_title(in_memory_repo):
movies_as_dict = movies_services.search_movie_by_title('La La Land', in_memory_repo)
assert movies_as_dict[0]['id'] == 7
def test_search_a_non_existent_movie_title(in_memory_repo):
with pytest.raises(NoSearchResultsException):
movies_as_dict = movies_services.search_movie_by_title('3sdf4as5df14as35d1few', in_memory_repo)
assert len(movies_as_dict) == 0
def test_get_reviews_for_movie(in_memory_repo):
reviews_as_dict = movies_services.get_reviews_for_movie(1, in_memory_repo)
# Check that 3 reviews were returned for movie with id 1
assert len(reviews_as_dict) == 3
# Check that the reviews relate to the movie whose id is 1
movie_ids = [review['movie_id'] for review in reviews_as_dict]
movie_ids = set(movie_ids)
assert 1 in movie_ids and len(movie_ids) == 1
def test_get_reviews_for_non_existent_movies(in_memory_repo):
with pytest.raises(NonExistentMovieException):
reviews_as_dict = movies_services.get_reviews_for_movie(30, in_memory_repo)
def test_get_reviews_for_movie_without_reviews(in_memory_repo):
reviews_as_dict = movies_services.get_reviews_for_movie(8, in_memory_repo)
assert len(reviews_as_dict) == 0
def test_get_top_5_movies_by_revenue(in_memory_repo):
movies = movies_services.get_top_6_movies_by_revenue(in_memory_repo)
assert len(movies) == 6
assert movies[0]['title'] == "Guardians of the Galaxy"
assert movies[1]['title'] == "Suicide Squad"
assert movies[2]['title'] == "Sing"
assert movies[3]['title'] == "La La Land"
assert movies[4]['title'] == "Split"
assert movies[5]['title'] == "Prometheus"
def test_get_suggestions_for_a_user(in_memory_repo):
username = "thorke"
suggestions = movies_services.get_suggestions_for_a_user(username=username, repo=in_memory_repo)
assert len(suggestions) == 1
assert suggestions[0]['title'] == "Guardians of the Galaxy"
def test_getting_earliest_year(in_memory_repo):
assert in_memory_repo.get_earliest_year() == 2012
def test_getting_latest_year(in_memory_repo):
assert in_memory_repo.get_latest_year() == 2016
|
[
"[email protected]"
] | |
3c03e2c9fad0a62cf6af453b96f9f41fd1acb4c2
|
29f3cb23d6c93c12ea1faeedc4a452e9f5b5ab19
|
/feedbackApp/migrations/0001_initial.py
|
b003421cd56b02ab5b0061917970cb85c4af2e50
|
[] |
no_license
|
Kinga-penjor/jamyang-prj
|
205581c14b718c8a004515ee9f43d2699eeaa8a6
|
6e5e7b3f452cdce5a42703ae4536b60ffc2ea895
|
refs/heads/main
| 2023-06-08T19:21:18.037525 | 2021-06-28T21:06:46 | 2021-06-28T21:06:46 | 381,152,942 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,745 |
py
|
# Generated by Django 3.1.5 on 2021-06-11 18:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Announcement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('announcement', models.TextField()),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('event_name', models.CharField(max_length=100)),
('event_description', models.TextField()),
('event_image', models.ImageField(upload_to='')),
],
),
migrations.CreateModel(
name='Feedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('feedback', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('comment', models.TextField(blank=True, null=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
26f00db3c32d9d29465643d06dc02532c017e608
|
8d9318a33afc2c3b5ca8ac99fce0d8544478c94a
|
/Books/Casandra DB/opscenter-5.1.0/lib/py-debian/2.6/i386/twisted/python/_inotify.py
|
c002674f122a6bd2a9e80aa2344b47edeff20f78
|
[] |
no_license
|
tushar239/git-large-repo
|
e30aa7b1894454bf00546312a3fb595f6dad0ed6
|
9ee51112596e5fc3a7ab2ea97a86ec6adc677162
|
refs/heads/master
| 2021-01-12T13:48:43.280111 | 2016-11-01T22:14:51 | 2016-11-01T22:14:51 | 69,609,373 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 51 |
py
|
../../../../../py-unpure/twisted/python/_inotify.py
|
[
"[email protected]"
] | |
af41a271f525131b50329d7c85145af2a2f0fe15
|
0f07fa3dbbd91ab8c38605f7d4f7f4fe49138f25
|
/budget/test/test_url.py
|
4e39de1f03dcc53fb10038214ce6bac984f29810
|
[] |
no_license
|
akshay2424/django-testing-example
|
fe7fa4d81489840828fe82bcb66acbdc4653834b
|
77e6a27230d4034a345c300e35710996337e1276
|
refs/heads/master
| 2023-08-13T01:54:02.210965 | 2021-09-16T12:00:29 | 2021-09-16T12:00:29 | 404,025,449 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 610 |
py
|
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from budget.views import project_list, project_detail, ProjectCreateView
class TestUrls(SimpleTestCase):
def test_list_url_resolves(self):
url = reverse('list')
self.assertEquals(resolve(url).func, project_list)
def test_add_url_resolves(self):
url = reverse('add')
self.assertEquals(resolve(url).func.view_class, ProjectCreateView)
def test_detail_url_resolves(self):
url = reverse('detail',args=['some-slug'])
self.assertEquals(resolve(url).func, project_detail)
|
[
"[email protected]"
] | |
073b6b152e0805dbc16dce1d402482e505bd9770
|
9d5723c09148cc353e5339a706ba582a162dceec
|
/hunkim/lab12-5.py
|
822c653469deeadaddde45a16a92e53b9bc3eaab
|
[] |
no_license
|
SilverQ/dl_study
|
424bce279c059c290a4c766e87fadb150fff82da
|
663b432abc5afd0eed278368a5fea19ece6a383c
|
refs/heads/master
| 2022-11-14T08:27:10.937535 | 2020-07-02T10:05:04 | 2020-07-02T10:05:04 | 82,505,280 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,120 |
py
|
'''
The original script shows how to predict the next day's closing stock prices using a basic RNN
https://github.com/hunkim/DeepLearningZeroToAll/blob/master/lab-12-5-rnn_stock_prediction.py
At first, let's understand the original code and prior arts completely
'''
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os
tf.set_random_seed(777) # reproducibility
np.set_printoptions(precision=2)
if "DISPLAY" not in os.environ:
# remove Travis CI Error
matplotlib.use('Agg')
def MinMaxScaler(data):
'''
Min Max Normalization
Parameters
----------
data : numpy.ndarray
input data to be normalized
shape: [Batch size, dimension]
Returns
----------
data : numpy.ndarry
normalized data
shape: [Batch size, dimension]
References
----------
.. [1] http://sebastianraschka.com/Articles/2014_about_feature_scaling.html
원래는 normalized data만 반환하였으나, 데이터의 복구를 위해 min, max도 반환
'''
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
# noise term prevents the zero division
return [numerator / (denominator + 1e-7), np.min(data, 0), np.max(data, 0)]
# train Parameters
seq_length = 7
data_dim = 5
hidden_dim = 10
output_dim = 1
learning_rate = 0.01
iterations = 500
# Open, High, Low, Volume, Close
xy = np.loadtxt('data-02-stock_daily.csv', delimiter=',')
xy_rev = xy[::-1] # reverse order (chronically ordered), 날짜 오름차순으로.
'''
print('xy: ', xy[-3:])
xy: [[ 566.89 567. 556.93 10800. 556.97]
[ 561.2 566.43 558.67 41200. 559.99]
[ 568. 568. 552.92 13100. 558.46]]
print('xy_rev: ', xy_rev[:3])
xy: [[ 568. 568. 552.92 13100. 558.46]
[ 561.2 566.43 558.67 41200. 559.99]
[ 566.89 567. 556.93 10800. 556.97]]
'''
# split data to train_set/test_set and Scaling
train_size = int(len(xy_rev) * 0.7)
train_set = xy_rev[0:train_size]
test_set = xy_rev[train_size - seq_length:] # Index from [train_size - seq_length] to utilize past sequence
[train_set, min, max] = MinMaxScaler(train_set)
[test_set, min, max] = MinMaxScaler(test_set)
'''
print('train_set: ', train_set[:3])
print('min: ', min) # 컬럼별로 min-max 연산은 따로따로 한 것을 알 수 있음.!!!
train_set: [[0.25 0.25 0.23 0. 0.23]
[0.23 0.24 0.25 0. 0.24]
[0.25 0.24 0.25 0. 0.23]]
min: [ 494.65 495.98 487.56 7900. 492.55]
'''
# build datasets. Create batch for 7-days.
def build_dataset(time_series, seq_length):
dataX = []
dataY = []
for i in range(0, len(time_series) - seq_length):
_x = time_series[i:i + seq_length, :]
_y = time_series[i + seq_length, [-1]] # the next day's closing stock prices
# print(_x, "->", _y)
dataX.append(_x)
dataY.append(_y)
return np.array(dataX), np.array(dataY)
trainX, trainY = build_dataset(train_set, seq_length)
testX, testY = build_dataset(test_set, seq_length)
'''
print('trainX: ', trainX[:4])
print('trainY: ', trainY[:3])
trainX: [[[2.53e-01 2.45e-01 2.34e-01 4.66e-04 2.32e-01]
[2.30e-01 2.40e-01 2.55e-01 2.98e-03 2.37e-01]
[2.49e-01 2.42e-01 2.48e-01 2.60e-04 2.27e-01]
[2.21e-01 2.47e-01 2.55e-01 0.00e+00 2.63e-01]
[3.63e-01 3.70e-01 2.67e-01 1.25e-02 2.62e-01]
[2.59e-01 3.11e-01 2.74e-01 4.56e-01 2.72e-01]
[2.76e-01 2.78e-01 1.98e-01 5.70e-01 1.78e-01]]
[[2.30e-01 2.40e-01 2.55e-01 2.98e-03 2.37e-01]
[2.49e-01 2.42e-01 2.48e-01 2.60e-04 2.27e-01]
[2.21e-01 2.47e-01 2.55e-01 0.00e+00 2.63e-01]
[3.63e-01 3.70e-01 2.67e-01 1.25e-02 2.62e-01]
[2.59e-01 3.11e-01 2.74e-01 4.56e-01 2.72e-01]
[2.76e-01 2.78e-01 1.98e-01 5.70e-01 1.78e-01]
[1.59e-01 1.79e-01 1.42e-01 3.94e-01 1.61e-01]]
[[2.49e-01 2.42e-01 2.48e-01 2.60e-04 2.27e-01]
[2.21e-01 2.47e-01 2.55e-01 0.00e+00 2.63e-01]
[3.63e-01 3.70e-01 2.67e-01 1.25e-02 2.62e-01]
[2.59e-01 3.11e-01 2.74e-01 4.56e-01 2.72e-01]
[2.76e-01 2.78e-01 1.98e-01 5.70e-01 1.78e-01]
[1.59e-01 1.79e-01 1.42e-01 3.94e-01 1.61e-01]
[1.65e-01 2.01e-01 1.93e-01 2.82e-01 2.20e-01]]
[[2.21e-01 2.47e-01 2.55e-01 0.00e+00 2.63e-01]
[3.63e-01 3.70e-01 2.67e-01 1.25e-02 2.62e-01]
[2.59e-01 3.11e-01 2.74e-01 4.56e-01 2.72e-01]
[2.76e-01 2.78e-01 1.98e-01 5.70e-01 1.78e-01]
[1.59e-01 1.79e-01 1.42e-01 3.94e-01 1.61e-01]
[1.65e-01 2.01e-01 1.93e-01 2.82e-01 2.20e-01]
[2.24e-01 2.36e-01 2.34e-01 2.98e-01 2.52e-01]]]
trainY: [[0.16]
[0.22]
[0.25]]
'''
# input place holders
X = tf.placeholder(tf.float32, [None, seq_length, data_dim])
Y = tf.placeholder(tf.float32, [None, 1])
# build a LSTM network
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
Y_pred = tf.contrib.layers.fully_connected(
outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
# cost/loss
loss = tf.reduce_sum(tf.square(Y_pred - Y)) # sum of the squares
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
train = optimizer.minimize(loss)
# RMSE
targets = tf.placeholder(tf.float32, [None, 1])
predictions = tf.placeholder(tf.float32, [None, 1])
rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)))
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
# Training step
for i in range(iterations):
_, step_loss = sess.run([train, loss], feed_dict={
X: trainX, Y: trainY})
if i % 100 ==0:
print("[step: {}] loss: {}".format(i, step_loss))
# Test step
test_predict = sess.run(Y_pred, feed_dict={X: testX})
rmse_val = sess.run(rmse, feed_dict={
targets: testY, predictions: test_predict})
print("RMSE: {}".format(rmse_val))
# Plot predictions
plt.plot(testY)
plt.plot(test_predict)
plt.xlabel("Time Period")
plt.ylabel("Stock Price")
# plt.show()
plt.savefig('Stock_price.png')
|
[
"[email protected]"
] | |
90cfe023a486800c522f045b7b3674252d5bfa76
|
eaa10dda27889d451c205fee9aa8139bf68c6e4d
|
/coder.py
|
2bfd93f7d4a151d69d4fb1faff3d90339a46ecaa
|
[] |
no_license
|
difranco/DREAM5-inference
|
581c0152745f4c1fb6a5186afdae79afa91aee34
|
ee09b35f78969296121724114b3d94d7dc031ddc
|
refs/heads/master
| 2020-06-15T17:36:09.370485 | 2016-12-12T08:48:37 | 2016-12-12T08:48:37 | 75,275,013 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,148 |
py
|
from collections import defaultdict
from sys import intern
class Coder:
"""
Creates sequential ids for objects and keeps track of them
useful e.g. for filling out a matrix with info related to objects
and translating back and forth between matrix indices and the objects
"""
def next_id(self):
self.current_id += 1
return self.current_id
def total_seen(self):
return self.current_id + 1
def __init__(self):
self.current_id = -1
self.obj_to_int = defaultdict(self.next_id)
self.int_to_obj = dict()
def encode(self, obj):
if isinstance(obj, str):
obj = intern(obj)
code = self.obj_to_int[obj]
self.int_to_obj[code] = obj
return code
def decode(self, i):
return self.int_to_obj[i]
def get_code(self, obj):
"""
Gets the code for an object but won't extend the code
if the object isn't already present
"""
if isinstance(obj, str):
obj = intern(obj)
if obj in self.obj_to_int: return self.obj_to_int[obj]
else: return None
|
[
"[email protected]"
] | |
af3c4c05de8dadde3e803145348c565650a8bd18
|
14ba224c43cf3a5c9a792ecb9a6d1d8086a13a7a
|
/API/predict_assemble.py
|
7fcab493f684a2e34670b7ba5645442804847c26
|
[] |
no_license
|
adimajo/MLitw
|
74ef6951513c1604e596781ed033f0a7e725931e
|
1cdaded885efacf34a9fdcc565b7d1fd7369e662
|
refs/heads/master
| 2022-12-18T21:22:29.574052 | 2020-08-18T14:35:18 | 2020-08-18T14:35:18 | 279,025,051 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 810 |
py
|
import numpy as np
def predict_assemble(X, models, weights):
"""
Prediction from ASSEMBLE model
:param numpy.ndarray X: features of samples to predict
:param models: list of weak learners
:param weights: weights for each weak learner
:return: class prediction for each point
"""
if len(models) > 1:
predict = np.sum(np.concatenate([(weight * model.predict(X)).reshape(-1, 1) for weight, model in zip(weights,
models)],
axis=1),
axis=1).reshape(-1, 1)
else:
predict = np.array([weight * model.predict(X) for weight, model in zip(weights, models)]).reshape(-1, 1)
return predict
|
[
"[email protected]"
] | |
a5f074b4c3dbcf95ececa07a42d1e17cdd081b40
|
1953ad2d8cc8a36e29d3d48e5458aeb69bf17bdd
|
/[2206]벽부수고 이동하기.py
|
96b44a27cedd966ef19ce8c5d99d396647b7cf34
|
[] |
no_license
|
wookkl/backjoon-problemsolving
|
8b75ac4575ffdc15615bc5672f1d5358ac3016a4
|
fbe7e051c7513f52b2ac26472dfc34955013549d
|
refs/heads/master
| 2023-04-10T22:33:49.614340 | 2021-04-25T00:50:47 | 2021-04-25T00:50:47 | 219,535,847 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,044 |
py
|
from collections import deque
dx=[1,-1,0,0];dy=[0,0,1,-1]
def bfs():
q=deque([[0,0,False]])
cnt=0
vstd[0][0][0]=True
while q:
for _ in range(len(q)):
r,c,b=q.popleft()
if [r,c]==[n-1,m-1]:return cnt+1
for i in range(4):
x=r+dx[i];y=c+dy[i]
if 0<=x<n and 0<=y<m:
if not b and not vstd[0][x][y]:
if matrix[x][y]=='1':
q.append([x,y,True])
vstd[1][x][y]=True
else:
q.append([x,y,False])
vstd[0][x][y]=True
else:
if matrix[x][y]=='0' and not vstd[1][x][y]:
q.append([x,y,True])
vstd[1][x][y]=True
cnt+=1
return -1
n,m=map(int,input().split())
matrix=list(list(input().rstrip()) for _ in range(n))
vstd=[[[False]*m for _ in range(n)] for _ in range(2)]
print(bfs())
|
[
"[email protected]"
] | |
be8eb5971f2b8e1e26457e6d656cc20a49cc56d9
|
e6be04e3683f9ea2e6acea7662871ce2891dbb72
|
/assignment1-training1.py
|
fbbf882fdfe3808421b687e78003c6e2ef6a7903
|
[] |
no_license
|
amiralimoghadamzadeh/assignments1-3
|
fb5f5b69f654cd4a38da0d067fb7b6a0886cd171
|
4cbaa9e934e12ce16d7ad3d504f95c50e2a6c214
|
refs/heads/main
| 2023-09-04T09:47:35.059973 | 2021-11-07T11:50:56 | 2021-11-07T11:50:56 | 425,489,142 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 432 |
py
|
first_name = input("enter your first name")
last_name = input('enter last name')
s1 = int(input('whats your first score?'))
s2 = int(input('whats your second score'))
s3 = int(input('what is your third score?'))
avg = int((s1 + s2 + s3) / 3)
if avg << 10:
print(first_name,last_name,'mashroot')
elif 10 << avg << 16:
print(first_name,last_name,'mamooli')
elif avg >> 16:
print(first_name,last_name,"momtaz")
|
[
"[email protected]"
] | |
ceebf67a85fa9c0b8a799faca28119ca339f27b9
|
7242e2b2e2bdb54944ac965f3f39ff69e7b806e3
|
/featureeng/Filter.py
|
e17c2bded3cb2b7bc9abb449cddc396e873040e9
|
[] |
no_license
|
roshanmadhushanka/Yahoo
|
917090ae6dcd3e87ef4af0bcf267f853b80422a2
|
47c9586781f5c929697d58cf0761fe2f0e2902aa
|
refs/heads/master
| 2020-06-12T05:54:16.279668 | 2016-12-08T01:56:56 | 2016-12-08T01:56:56 | 75,601,588 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,621 |
py
|
import numpy as np
import pandas as pd
def threeSigma(series, threshold=3):
'''
Identify anomalies according to three sigma rule
Three Sigma Rule
----------------
std = standard deviation of data
mean = mean of data
if abs(x - mean) > 3 * std then x is an outlier
:param threshold: 3 is the default value. Change at your own risk
:param series: input data array
:return: Index array of where anomalies are
'''
series = np.array(list(series))
std = np.std(series) # Standard deviation
avg = np.average(series)# Mean
anomaly_indexes = []
for i in range(series.size):
if (series[i] - avg) > threshold * std:
anomaly_indexes.append(i)
return anomaly_indexes
def iqr(series, threshold=3):
'''
Identify anomalies according to Inner-Quartile Range
IQR Rule
----------------
Q25 = 25 th percentile
Q75 = 75 th percentile
IQR = Q75 - Q25 Inner quartile range
if abs(x-Q75) > 1.5 * IQR : A mild outlier
if abs(x-Q75) > 3.0 * IQR : An extreme outlier
:param series: input data array
:param threshold: 1.5 mild, 3 extreme
:return: Index array of where anomalies are
'''
series = np.array(list(series))
q25 = np.percentile(series, 25)
q75 = np.percentile(series, 75)
iqr = q75 - q25
anomaly_indexes = []
for i in range(series.size):
if (series[i] - q75) > threshold * iqr:
anomaly_indexes.append(i)
return anomaly_indexes
def percentile_based(series, lower, upper):
'''
Remove anomalies based on the percentile
:param series: Input series
:param lower: Lower percentile as a fraction
:param upper: Upper percentile as a fraction
:return: Filtered series
'''
series = np.array(list(series))
q_lower = np.percentile(series, lower*100)
q_upper = np.percentile(series, upper*100)
anomaly_indexes = []
for i in range(series.size):
if series[i] < q_lower or series[i] > q_upper:
anomaly_indexes.append(i)
x, p = np.histogram(anomaly_indexes)
return anomaly_indexes
def filterData(panda_frame, columns, removal_method, threshold):
# Anomaly index container
rm_index = []
# Select anomaly removal type
if removal_method == "iqr":
for column in columns:
series = panda_frame[column]
anomaly = iqr(series, threshold)
rm_index.extend(anomaly)
elif removal_method == "threesigma":
for column in columns:
series = panda_frame[column]
anomaly = iqr(series, threshold)
rm_index.extend(anomaly)
# Sort indexes
rm_index.sort()
anomaly_series = list(set(rm_index))
# Remove anomalies
p_filtered = panda_frame.drop(panda_frame.index[anomaly_series])
return p_filtered
def filterDataPercentile(panda_frame, columns, lower_percentile, upper_percentile, column_err_threshold, order='under'):
'''
Filter anomalies based on
:param panda_frame: Input data frame
:param columns: Columns that need to apply filter
:param lower_percentile: Below this level consider as an anomaly
:param upper_percentile: Beyond this level consider as an anomaly
:param column_err_threshold: Per column threshold. If a particular row detects as an anomaly how many columns that
needs to show as an anomaly
:return:
'''
# Anomaly index container
rm_index = []
for column in columns:
series = panda_frame[column]
anomaly = percentile_based(series, lower_percentile, upper_percentile)
rm_index.extend(anomaly)
dict = {}
for i in rm_index:
if dict.has_key(i):
dict[i] += 1
else:
dict[i] = 1
if order == 'under':
anomaly_index = [x for x in dict.keys() if dict[x] <= column_err_threshold]
elif order == 'above':
anomaly_index = [x for x in dict.keys() if dict[x] >= column_err_threshold]
# anomaly_count = [dict[x] for x in dict.keys() if dict[x] > column_err_threshold]
#
#
# plt.stem(anomaly_index, anomaly_count)
# plt.legend(['index', 'count'], loc='upper left')
# plt.title('Anomaly count')
# plt.show()
# Remove anomalies
p_filtered = panda_frame.drop(panda_frame.index[anomaly_index])
return p_filtered
def filterDataAutoEncoder(panda_frame, reconstruction_error, threshold):
'''
:param panda_frame: Input data frame
:param reconstruction_error: Reconstruction error fromauto encoders
:param threshold: Anomaly removal threshold
:return:
'''
rm_index = []
for i in range(len(reconstruction_error)):
if reconstruction_error[i] > threshold:
rm_index.append(i)
p_filtered = panda_frame.drop(panda_frame.index[rm_index])
return p_filtered
def indices_seperate(panda_frame=pd, column_name=None):
'''
Indices at value changing points, For one dimensional array
:param column_name: Name of the column
:param panda_frame: Pandas data frame
:return: Inices array
'''
column = []
if not column_name:
return
try:
column = panda_frame[column_name]
except KeyError:
# Key not found exception
return
if len(column) == 0:
# There is nothing to slice
return
column = np.array(column)
# Each index where the value changes
indices = np.where(column[:-1] <> column[1:])[0]
indices = np.insert(indices, len(indices), len(column) - 1, axis=0)
return indices
|
[
"roshan alwis"
] |
roshan alwis
|
e51206efcea2772f6ee55662e1707dd87a46f65b
|
8bd8a3ed654c47415cf0aa116e6feca21ca2e0c5
|
/grace/sermons/migrations/0003_auto_20171018_1015.py
|
50df3106f3fcb02dab2f23adc5cd24137c1f7c19
|
[] |
no_license
|
VladGavrilooff1234/grace
|
833e6f3397a76ee551bdc2549f31efe0dbc580ef
|
05590324501de5889b8749ed29cfc7610ff9ea6f
|
refs/heads/master
| 2020-04-07T20:19:14.029685 | 2019-05-06T14:47:11 | 2019-05-06T14:47:11 | 158,683,472 | 0 | 0 | null | 2018-11-22T10:39:22 | 2018-11-22T10:39:22 | null |
UTF-8
|
Python
| false | false | 888 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-18 07:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sermons', '0002_auto_20171016_1508'),
]
operations = [
migrations.AddField(
model_name='biblebook',
name='abbreviation',
field=models.CharField(blank=True, max_length=70, null=True),
),
migrations.AlterField(
model_name='biblebook',
name='part_of_bible',
field=models.CharField(choices=[('ot', 'Ветхий Завет'), ('nt', 'Новый Завет')], default='nt', max_length=2),
),
migrations.AlterField(
model_name='sermon',
name='audio',
field=models.CharField(max_length=255),
),
]
|
[
"[email protected]"
] | |
e07ee181de234248ff9216f45bdb1340e32fea69
|
d6b3bc97fec0237e5a367674aeb94ee292400d81
|
/graph.py
|
d56dc59e4997b7b485ab869350f3665671c8bbaa
|
[] |
no_license
|
bilaer/Algorithm-And-Data-Structure-Practices-in-Python
|
e75c63cfcae9939f14b87aeae293c83d575e0225
|
c031d299db8e112c3ffa0c21157bd677da8c4c33
|
refs/heads/master
| 2021-01-01T06:57:04.506788 | 2017-07-19T16:54:36 | 2017-07-19T16:54:36 | 97,559,184 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,210 |
py
|
import queue
import copy
class Vertex(object):
def __init__(self, vertexId):
self.vertexId = vertexId
self.vertexList = set()
self.vertexCost = dict()
def hasAdjVertex(self):
return len(self.vertexList) != 0
def addAdjVertex(self, v, weight):
self.vertexList.add(v)
self.vertexCost[v] = weight
def getNumOfAdjVertex(self):
return len(self.vertexList)
class Graph(object):
def __init__(self, vertexes):
self.vertex = vertexes
self.adjList = [Vertex(i) for i in range(vertexes + 1)]
self.cost = [0] * (vertexes + 1)
def getVertexNum(self):
return self.vertex
def getAdjVertexList(self, u):
if u >= 0 and u <= self.vertex:
return self.adjList[u].vertexList
def printGraph(self):
for i in range(self.vertex + 1):
print("%d:" %(i))
print(self.adjList[i].vertexList)
# UndirectGraph uses the Graph for the base class
class UndirectGraph(Graph):
def __init__(self, vertexes):
super().__init__(vertexes)
def addEdge(self, u, v, weight=0):
if u <= self.vertex and u >= 0 and v <= self.vertex and v >= 0 and u != v:
self.adjList[u].addAdjVertex(v, weight)
self.adjList[v].addAdjVertex(u, weight)
else:
print("arguments is illegal!")
def addVertex(self):
self.vertex = self.vertex + 1
self.adjList.append(Vertex(self.vertex))
class DirectGraph(Graph):
def __init__(self, vertexes):
super().__init__(vertexes)
self.indegree = [0] * (self.vertex + 1)
def addEdge(self, u, v, weight=0):
if u <= self.vertex and u >= 0 and v <= self.vertex and v >= 0:
self.adjList[u].addAdjVertex(v, weight)
self.indegree[v] = self.indegree[v] + 1
def topsort(self):
# topNum is the list use to store the sort result
topNum = [0] * (self.vertex + 1)
q = queue.Queue()
counter = 0
indegreeTemp = copy.deepcopy(self.indegree)
# Put all the vertex with 0 indegree(no vertex is coming into
# this vertex) into the queue
for vertex in range(self.vertex + 1):
if self.indegree[vertex] == 0:
q.put(vertex)
while (not q.empty()):
v = q.get()
counter = counter + 1
topNum[v] = counter
vertexList = self.getAdjVertexList(v)
for u in vertexList:
indegreeTemp[u] = indegreeTemp[u] - 1
if indegreeTemp[u] == 0:
q.put(u)
# Cycle exists
if counter != (self.vertex + 1):
return []
return topNum
def breadthFirstSearch(self):
pass
def Dijkstra(self):
pass
if __name__=="__main__":
test = DirectGraph(6)
test.addEdge(0, 1)
test.addEdge(0, 6)
test.addEdge(0, 5)
test.addEdge(1, 2)
test.addEdge(1, 6)
test.addEdge(2, 6)
test.addEdge(2, 3)
test.addEdge(3, 4)
test.addEdge(5, 4)
test.addEdge(6, 3)
test.addEdge(6, 4)
test.addEdge(6, 5)
print(test.topsort())
print([0,1,2,3,4,5,6])
|
[
"[email protected]"
] | |
7e077f1e8b1f141b4fccb4ab0ce5ff8a66be4fa0
|
b6ab4693aa077097a8b503aeacafb53c8761aeaf
|
/src/falconpy/_endpoint/_falconx_sandbox.py
|
fa14f3158ccb5c07da33b520f35d12c0559f2b68
|
[
"Unlicense"
] |
permissive
|
woodtechie1428/falconpy
|
36bd2ed85f629b43e7644c2c29d369eda3800ff7
|
fcbec209f04a8d2340c66a5bea5c27c421f550d1
|
refs/heads/main
| 2023-08-03T09:14:10.930495 | 2021-09-10T15:58:39 | 2021-09-10T15:58:39 | 406,604,183 | 0 | 0 |
Unlicense
| 2021-09-15T03:44:50 | 2021-09-15T03:44:48 | null |
UTF-8
|
Python
| false | false | 13,381 |
py
|
"""
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
_endpoint._falconx_sandbox - Internal API endpoint constant library
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
_falconx_sandbox_endpoints = [
[
"GetArtifacts",
"GET",
"/falconx/entities/artifacts/v1",
"Download IOC packs, PCAP files, and other analysis artifacts.",
"falconx_sandbox",
[
{
"type": "string",
"description": "ID of an artifact, such as an IOC pack, PCAP file, or actor image. "
"Find an artifact ID in a report or summary.",
"name": "id",
"in": "query",
"required": True
},
{
"type": "string",
"description": "The name given to your downloaded file.",
"name": "name",
"in": "query"
},
{
"type": "string",
"description": "Format used to compress your downloaded file. Currently, you must "
"provide the value `gzip`, the only valid format.",
"name": "Accept-Encoding",
"in": "header"
}
]
],
[
"GetSummaryReports",
"GET",
"/falconx/entities/report-summaries/v1?ids={}",
"Get a short summary version of a sandbox report.",
"falconx_sandbox",
[
{
"type": "array",
"items": {
"type": "string"
},
"collectionFormat": "csv",
"description": "ID of a summary. Find a summary ID from the response when submitting a "
"malware sample or search with `/falconx/queries/reports/v1`.",
"name": "ids",
"in": "query",
"required": True
}
]
],
[
"GetReports",
"GET",
"/falconx/entities/reports/v1?ids={}",
"Get a full sandbox report.",
"falconx_sandbox",
[
{
"type": "array",
"items": {
"type": "string"
},
"collectionFormat": "csv",
"description": "ID of a report. Find a report ID from the response when submitting a "
"malware sample or search with `/falconx/queries/reports/v1`.",
"name": "ids",
"in": "query",
"required": True
}
]
],
[
"DeleteReport",
"DELETE",
"/falconx/entities/reports/v1?ids={}",
"Delete report based on the report ID. Operation can be checked for success by polling for the "
"report ID on the report-summaries endpoint.",
"falconx_sandbox",
[
{
"type": "string",
"description": "ID of a report.",
"name": "ids",
"in": "query",
"required": True
}
]
],
[
"GetSubmissions",
"GET",
"/falconx/entities/submissions/v1?ids={}",
"Check the status of a sandbox analysis. Time required for analysis varies but is usually less than 15 minutes.",
"falconx_sandbox",
[
{
"type": "array",
"items": {
"type": "string"
},
"collectionFormat": "csv",
"description": "ID of a submitted malware sample. Find a submission ID from the response when submitting "
"a malware sample or search with `/falconx/queries/submissions/v1`.",
"name": "ids",
"in": "query",
"required": True
}
]
],
[
"Submit",
"POST",
"/falconx/entities/submissions/v1",
"Submit an uploaded file or a URL for sandbox analysis. Time required for analysis varies but is "
"usually less than 15 minutes.",
"falconx_sandbox",
[
{
"description": "Submit either a URL or a sample SHA256 for sandbox analysis. "
"The sample file must have been previously uploaded through `/samples/entities/samples/v2`. "
"You must specify a JSON object that includes the `falconx.SubmissionParametersV1` key/value pairs "
"shown below.\n\n**`environment_id`**: Specifies the sandbox environment used for analysis. "
"Values:\n\n- `300`: Linux Ubuntu 16.04, 64-bit\n- `200`: Android (static analysis)\n- `160`: "
"Windows 10, 64-bit\n- `110`: Windows 7, 64-bit\n- `100`: Windows 7, 32-bit\n\n**`sha256`** "
"ID of the sample, which is a SHA256 hash value. Find a sample ID from the response when uploading "
"a malware sample or search with `/falconx/queries/submissions/v1`.The `url` parameter must be unset "
"if `sha256` is used.\n\n**`url`** A web page or file URL. It can be HTTP(S) or FTP. The `sha256` "
"parameter must be unset if `url` is used.\n\n**`action_script`** (optional): Runtime script for "
"sandbox analysis. Values:\n\n- `default`\n- `default_maxantievasion`\n- `default_randomfiles`\n- "
"`default_randomtheme`\n- `default_openie`\n\n**`command_line`** (optional): Command line script "
"passed to the submitted file at runtime. Max length: 2048 characters\n\n**`document_password`** "
"(optional): Auto-filled for Adobe or Office files that prompt for a password. Max length: 32 "
"characters\n\n**`enable_tor`** (optional): If `true`, sandbox analysis routes network traffic via "
"TOR. Default: `false`.\n\n**`submit_name`** (optional): Name of the malware sample that's used for "
"file type detection and analysis\n\n**`system_date`** (optional): Set a custom date in the format "
"`yyyy-MM-dd` for the sandbox environment\n\n**`system_time`** (optional): Set a custom time in the "
"format `HH:mm` for the sandbox environment.",
"name": "body",
"in": "body",
"required": True
}
]
],
[
"QueryReports",
"GET",
"/falconx/queries/reports/v1",
"Find sandbox reports by providing an FQL filter and paging details. "
"Returns a set of report IDs that match your criteria.",
"falconx_sandbox",
[
{
"type": "string",
"description": "Optional filter and sort criteria in the form of an FQL query. "
"For more information about FQL queries, see [our FQL documentation in Falcon]"
"(https://falcon.crowdstrike.com/support/documentation/45/falcon-query-language-feature-guide).",
"name": "filter",
"in": "query"
},
{
"type": "string",
"description": "The offset to start retrieving reports from.",
"name": "offset",
"in": "query"
},
{
"type": "integer",
"description": "Maximum number of report IDs to return. Max: 5000.",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "Sort order: `asc` or `desc`.",
"name": "sort",
"in": "query"
}
]
],
[
"QuerySubmissions",
"GET",
"/falconx/queries/submissions/v1",
"Find submission IDs for uploaded files by providing an FQL filter and paging details. "
"Returns a set of submission IDs that match your criteria.",
"falconx_sandbox",
[
{
"type": "string",
"description": "Optional filter and sort criteria in the form of an FQL query. "
"For more information about FQL queries, see [our FQL documentation in Falcon]"
"(https://falcon.crowdstrike.com/support/documentation/45/falcon-query-language-feature-guide).",
"name": "filter",
"in": "query"
},
{
"type": "string",
"description": "The offset to start retrieving submissions from.",
"name": "offset",
"in": "query"
},
{
"type": "integer",
"description": "Maximum number of submission IDs to return. Max: 5000.",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "Sort order: `asc` or `desc`.",
"name": "sort",
"in": "query"
}
]
],
[
"GetSampleV2",
"GET",
"/samples/entities/samples/v2?ids={}",
"Retrieves the file associated with the given ID (SHA256)",
"falconx_sandbox",
[
{
"type": "string",
"description": "User UUID",
"name": "X-CS-USERUUID",
"in": "header"
},
{
"type": "string",
"description": "The file SHA256.",
"name": "ids",
"in": "query",
"required": True
},
{
"type": "string",
"default": False,
"description": "Flag whether the sample should be zipped and password protected with pass='infected'",
"name": "password_protected",
"in": "query"
}
]
],
[
"UploadSampleV2",
"POST",
"/samples/entities/samples/v2",
"Upload a file for sandbox analysis. After uploading, use `/falconx/entities/submissions/v1` to start analyzing the file.",
"falconx_sandbox",
[
{
"type": "string",
"description": "User UUID",
"name": "X-CS-USERUUID",
"in": "header"
},
{
"description": "Content of the uploaded sample in binary format. "
"For example, use `--data-binary @$FILE_PATH` when using cURL. Max file size: 100 MB.\n\n"
"Accepted file formats:\n\n- Portable executables: `.exe`, `.scr`, `.pif`, `.dll`, `.com`, `.cpl`, etc.\n"
"- Office documents: `.doc`, `.docx`, `.ppt`, `.pps`, `.pptx`, `.ppsx`, `.xls`, `.xlsx`, `.rtf`, `.pub`\n"
"- PDF\n- APK\n- Executable JAR\n- Windows script component: `.sct`\n- Windows shortcut: `.lnk`\n- "
"Windows help: `.chm`\n- HTML application: `.hta`\n- Windows script file: `.wsf`\n- Javascript: `.js`\n"
"- Visual Basic: `.vbs`, `.vbe`\n- Shockwave Flash: `.swf`\n- Perl: `.pl`\n- Powershell: `.ps1`, `.psd1`, `.psm1`\n"
"- Scalable vector graphics: `.svg`\n- Python: `.py`\n- Linux ELF executables\n"
"- Email files: MIME RFC 822 `.eml`, Outlook `.msg`.",
"name": "body",
"in": "body",
"required": True
},
{
"type": "file",
"description": "The binary file.",
"name": "upfile",
"in": "formData",
"required": True
},
{
"type": "string",
"description": "Name of the file.",
"name": "file_name",
"in": "query",
"required": True
},
{
"type": "string",
"description": "A descriptive comment to identify the file for other users.",
"name": "comment",
"in": "query"
},
{
"type": "boolean",
"default": True,
"description": "Defines visibility of this file in Falcon MalQuery, either via the API or the "
"Falcon console.\n\n- `true`: File is only shown to users within your customer account\n- `false`: "
"File can be seen by other CrowdStrike customers \n\nDefault: `true`.",
"name": "is_confidential",
"in": "query"
}
]
],
[
"DeleteSampleV2",
"DELETE",
"/samples/entities/samples/v2?ids={}",
"Removes a sample, including file, meta and submissions from the collection",
"falconx_sandbox",
[
{
"type": "string",
"description": "User UUID",
"name": "X-CS-USERUUID",
"in": "header"
},
{
"type": "string",
"description": "The file SHA256.",
"name": "ids",
"in": "query",
"required": True
}
]
],
[
"QuerySampleV1",
"POST",
"/samples/queries/samples/GET/v1",
"Retrieves a list with sha256 of samples that exist and customer has rights to access them, "
"maximum number of accepted items is 200",
"falconx_sandbox",
[
{
"type": "string",
"description": "User UUID",
"name": "X-CS-USERUUID",
"in": "header"
},
{
"description": "Pass a list of sha256s to check if the exist. It will be returned the list of existing hashes.",
"name": "body",
"in": "body",
"required": True
}
]
]
]
|
[
"[email protected]"
] | |
2478a4b7c246b5b1f9e6e2b017c98a41b9d31ff9
|
6d92a12a9e4627d447deeee2b2a804191568681d
|
/day18.py
|
3a491f9880b4e64124e38b2d4b991b1eccb042ed
|
[] |
no_license
|
mayojava/adventOfCode2020
|
704093ebff5eb1045842b4d46240232752f029f0
|
efbc6450354ec7c14c8f40117c681cdef3d30e4a
|
refs/heads/main
| 2023-02-06T04:34:10.207563 | 2020-12-25T10:56:48 | 2020-12-25T10:56:48 | 324,337,879 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,900 |
py
|
def main():
f = open('day18.txt', 'r')
sm = 0
for line in f:
line = line.strip()
stack = []
for i in range(0, len(line)):
current = line[i].strip()
if len(current) == 0: continue
if current == '(':
stack.append('(')
elif is_sign(current):
stack.append(current)
elif current.isdigit():
if len(stack) == 0:
stack.append(current)
elif is_sign(stack[-1]):
expr = [current]
while len(stack) > 0 and stack[-1] != '(' and stack[-1] != ')':
expr = [stack.pop(-1)] + expr
stack.append(eveluate(expr))
else: stack.append(current)
elif current == ')':
expr = []
while stack[-1] != '(':
expr = [str(stack.pop(-1))] + expr
stack.pop(-1)
stack.append(eveluate(expr))
#print(stack)
if len(stack) == 1:
#print('res', stack[0])
sm += stack[0]
else:
res = int(stack[0])
for k in range(1, len(stack)):
if is_sign(stack[k]):
res = calculate(res, stack[k], stack[k+1])
sm += res
#print('res', res)
print(sm)
def eveluate(expr):
if len(expr) == 1:
return int(expr[0])
res = int(expr[0])
for i in range(1, len(expr)):
if is_sign(expr[i]):
res = calculate(res, expr[i], expr[i+1])
return res
def is_sign(ch):
return ch == '+' or ch == '*'
def calculate(op1, opr, op2):
if opr == '+':
return int(op1) + int(op2)
else:
return int(op1) * int(op2)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
36cc5a4f3b24b18cffd92d7046c81ced4ac397e1
|
75089d61a7f985fc23a3e29e6517a744c1b5c76b
|
/data/__init__.py
|
ce8ecdf19bf474e1e68babbf0bdf5d154c34d3d5
|
[] |
no_license
|
KUR-creative/data-warehouse
|
e4e85ffa7cd0ec416bb67c62aef624bca6323370
|
e1e4720f8ad529291f2c36b1c284a6e4b15ac637
|
refs/heads/master
| 2023-01-21T14:21:54.533736 | 2020-12-03T06:34:20 | 2020-12-03T06:34:20 | 288,881,020 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 27 |
py
|
from . import (
raw,
)
|
[
"[email protected]"
] | |
9a50a114a51d30f2c76516fd25e221f73c1435c6
|
2c716b9fac8d47d392b910e861bb738f47e3fdca
|
/try_tensorflow/alignment/util.py
|
6209de19a03e000b915f1ce5bb7ef05aed9ae8db
|
[] |
no_license
|
easonnie/master-tensor
|
1eb3a043fea814e44c25d4e463f17b01c9c45d07
|
8cb2583f7634d1c03838e5e1bf8e03cbece1926a
|
refs/heads/master
| 2020-02-26T14:53:49.448212 | 2016-08-10T04:42:33 | 2016-08-10T04:42:33 | 62,082,239 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,072 |
py
|
import tensorflow as tf
def get_mask(input_len, max_time, batch_size):
"""
:param input_len: A tensor [batch_size] specify the input length of each Batch
:param max_time: Max time step
:param batch_size: Batch Size
:return: A mask for 1d sequence inputs [batch, max_time]
"""
lengths_transposed = tf.expand_dims(input_len, 1)
length_tile = tf.tile(lengths_transposed, [1, max_time])
range_ = tf.range(0, max_time, 1)
range_row = tf.expand_dims(range_, 0)
range_tiled = tf.tile(range_row, [batch_size, 1])
mask = tf.to_float(tf.less(range_tiled, length_tile))
return mask
def masked(inputs, input_len, batch_size=None, max_time=None):
if max_time is None:
max_time = int(inputs.get_shape()[1])
if batch_size is None:
batch_size = tf.shape(inputs)[0]
return inputs * get_mask(input_len, max_time, batch_size)
def get_mask2d(input_len, max_time, batch_size, dimension):
"""
:param input_len:
:param max_time: A scalar
:param batch_size:
:param dimension: Dimension of each elements in the sequence
:return: A mask for 2d sequence inputs [batch, max_time, dimension]
"""
mask = get_mask(input_len, max_time, batch_size)
mask_2d = tf.tile(tf.expand_dims(mask, dim=2), [1, 1, dimension])
return mask_2d
def masked2d(inputs, input_len, batch_size=None, max_time=None, d=None):
if d is None:
d = int(inputs.get_shape()[2])
if max_time is None:
max_time = int(inputs.get_shape()[1])
if batch_size is None:
batch_size = tf.shape(inputs)[0]
return inputs * get_mask2d(input_len, max_time, batch_size, d)
def _avg2d_along_time(inputs, input_len, d=None):
"""
:param input: Input tensor [batch, max_time, dimension]
:param input_len: Max time step for each sample [batch]
:param d: dimension. If not provided, it'll be the last dimension of the input
:return: Avg along time [batch, dimension]
"""
raw_sum = tf.reduce_sum(inputs, reduction_indices=1)
# if not d:
# d = int(inputs.get_shape()[2])
# len_tile = tf.tile(tf.expand_dims(tf.to_float(input_len), 1), [1, d])
# avg = raw_sum / len_tile
avg = raw_sum / tf.expand_dims(tf.to_float(input_len), 1)
return avg
def _sum2d_along_time(inputs, input_len, d=None):
"""
:param input: Input tensor [batch, max_time, dimension]
:param input_len: Max time step for each sample [batch]
:param d: dimension. If not provided, it'll be the last dimension of the input
:return: Avg along time [batch, dimension]
"""
raw_sum = tf.reduce_sum(inputs, reduction_indices=1)
return raw_sum
def avg2d_along_time(inputs, input_len, batch_size=None):
d = int(inputs.get_shape()[2])
max_time = int(inputs.get_shape()[1])
if batch_size is None:
batch_size = tf.shape(inputs)[0]
return _avg2d_along_time(masked2d(inputs, input_len, batch_size), input_len)
def sum2d_along_time(inputs, input_len, batch_size=None):
d = int(inputs.get_shape()[2])
max_time = int(inputs.get_shape()[1])
if batch_size is None:
batch_size = tf.shape(inputs)[0]
return _sum2d_along_time(masked2d(inputs, input_len, batch_size), input_len)
def last_relevant(inputs, input_len):
batch_size = tf.shape(inputs)[0]
max_length = int(inputs.get_shape()[1])
output_size = int(inputs.get_shape()[2])
index = tf.range(0, batch_size) * max_length + (input_len - 1)
flat = tf.reshape(inputs, [-1, output_size])
relevant = tf.gather(flat, index)
return relevant
def softmax_on_score(inputs, input_len):
"""
:param inputs: [batch, time, 1]
:param input_len: [batch]
:return: [batch, time]
"""
max_length = int(inputs.get_shape()[1])
flatten_inputs = tf.reshape(inputs, [-1, max_length])
m_softmax = masked(tf.exp(flatten_inputs), input_len)
res_softmax = m_softmax / tf.reduce_sum(m_softmax, reduction_indices=[1], keep_dims=True)
return res_softmax
def weighted_sum(inputs, weights):
"""
:param inputs: [batch, max_length, dimension]
:param weights: [batch, max_length]
:return: [batch, dimension]
"""
d = int(inputs.get_shape()[2])
max_time = int(inputs.get_shape()[1])
# flat_inputs = tf.reshape(inputs, [-1, max_time, d])
flat_weights = tf.reshape(weights, [-1, max_time, 1])
result = tf.reduce_sum(tf.reshape(inputs * flat_weights, [-1, max_time, d]), reduction_indices=[1])
return result
if __name__ == '__main__':
pass
# from model.test.seq_1d_test_case import *
# mask = masked(inputs=inputs, input_len=times, batch_size=batch_size)
#
# with tf.Session() as sess:
# print(mask.eval(feed_dict=feed_dict))
#
# from model.test.seq_2d_test_case import *
# mask2d = avg2d_along_time(inputs=inputs, input_len=times, batch_size=batch_size)
# last = last_relevant(inputs, times)
#
# with tf.Session() as sess:
# print(mask2d.eval(feed_dict=feed_dict))
# print(last.eval(feed_dict=feed_dict))
|
[
"[email protected]"
] | |
ee9cee9c908ac3278c8545a66f4d96149faae702
|
7ce05272d21c903abc85ebc74544009aacd80c82
|
/Advance_Python/Python_Database_Programming/Other/add_user_in_bank.py
|
5c2f984a2d9a435280b32ffcf34ffcf45b74ed87
|
[] |
no_license
|
sachinyadav3496/PythonInternBatch2018
|
8899a866f60a39b4c7eff4f5bc79ec2586833403
|
8e2610ad80c39ea747e8a6547ebe540e7b019a79
|
refs/heads/master
| 2021-06-26T09:18:58.178457 | 2020-10-03T09:49:32 | 2020-10-03T09:49:32 | 136,880,809 | 18 | 34 | null | 2020-10-03T09:49:33 | 2018-06-11T05:56:26 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 548 |
py
|
import pymysql as sql
db = sql.connect(host='localhost',port=3306,user='bank_app',password='redhat',database='bank_app')
c = db.cursor()
f = open('bank_data.csv')
data = []
for line in f :
d = line.split(',')
d[2] = float(d[2][:-1])
data.append(d)
f.close()
for var in data :
name = var[0]
password = var[1]
bal = var[2]
cmd = "insert into bank(user,password,bal) values('{}','{}',{})".format(name,password,bal)
c.execute(cmd)
db.commit()
print("Added data to bank sucessfully")
c.close()
db.close()
|
[
"[email protected]"
] | |
908d6b9bdd11c832f27b876675752c230f0dd8e9
|
901bfc797cc369c0bea21167ac471d0311cb93ac
|
/e3/DiffieHellman.py
|
bf11a36e45a5949541a91c675a66430dd0b9b984
|
[
"MIT"
] |
permissive
|
NigrumAquila/subject_cryptographic_protection
|
022216fd1481febc3a010efdfd11ab3398c73d00
|
2b4015b3c1b6d57391e866a70d308e78e5cab719
|
refs/heads/master
| 2021-03-17T15:58:10.590822 | 2020-05-01T06:30:54 | 2020-05-01T06:30:54 | 247,001,657 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 858 |
py
|
import __main__
if __main__.__file__ != 'main.py':
exit('run main.py')
from .DHlib.DHalg import encrypt, decrypt, getSharedSecret, printAllKeys, printParams
from lib.colors import *
from lib.duty import *
key = getSharedSecret()
printAllKeys()
while True:
printParams();
message = typedText('Enter message for RSA encryption: ')
printTextAndValue('Original message: ', message)
encrypted_message = encrypt(key, message)
try:
printTextAndValue('Encrypted message: ', encrypted_message)
except UnicodeError:
warning('\rYour encoding isn\'t UTF-8')
end('Please, restart it with "PYTHONIOENCODING=UTF-8 python main.py" or by IDE with utf8 encoding')
decrypted_message = decrypt(key, encrypted_message)
printTextAndValue('Decrypted message: ', decrypted_message)
repeatProcedure()
|
[
"[email protected]"
] | |
f5b2ea2f20edbedb90a3351960045e897c52f2c3
|
db98aeb4883d2aa9969970d353b9d6212c7dbde2
|
/lectures/07-python-dictionaries/examples/dna9.py
|
f71e08b870a9f5ce84946e6c88096ad74de04bfa
|
[
"MIT"
] |
permissive
|
qianwenluo/biosys-analytics
|
cec7e84477e01f9aa17e30c1fd8286710deed617
|
f936095931fa8f237de8bdf058b960db86effa49
|
refs/heads/master
| 2020-04-15T20:19:25.669143 | 2019-05-07T17:52:17 | 2019-05-07T17:52:17 | 164,988,099 | 0 | 1 |
MIT
| 2019-01-10T04:12:20 | 2019-01-10T04:12:20 | null |
UTF-8
|
Python
| false | false | 478 |
py
|
#!/usr/bin/env python3
"""Tetra-nucleotide counter"""
import sys
import os
from collections import defaultdict
args = sys.argv[1:]
if len(args) != 1:
print('Usage: {} DNA'.format(os.path.basename(sys.argv[0])))
sys.exit(1)
arg = args[0]
dna = ''
if os.path.isfile(arg):
dna = ''.join(open(arg).read().splitlines())
else:
dna = arg
count = defaultdict(int)
for base in dna.lower():
count[base] += 1
print(' '.join(map(lambda b: str(count[b]), "acgt")))
|
[
"[email protected]"
] | |
9179bc2056bb20944f1a658cd714cd709d8fbea2
|
75467b6481708e8f1eeb62da92bb6a028aa8477c
|
/AD/apps.py
|
c70ba505f4157b6d318b5183bcae08f5e3bf2518
|
[] |
no_license
|
hossein-76/radio_javan
|
a19936f55b40592acec75329fc448306f05cf98b
|
c520ea7cc464c79f9b448904b97b6c3e27800964
|
refs/heads/master
| 2022-12-20T20:29:52.162084 | 2019-07-26T16:09:52 | 2019-07-26T16:09:52 | 199,034,705 | 1 | 0 | null | 2022-12-08T01:22:48 | 2019-07-26T14:52:00 |
Python
|
UTF-8
|
Python
| false | false | 81 |
py
|
from django.apps import AppConfig
class AdsConfig(AppConfig):
name = 'ADs'
|
[
"[email protected]"
] | |
c73480ee59b61505ed6d24fb36af2b8ebb8a8f45
|
a0cf17db6095722c88b2e2d605c6ccf4e7a364dd
|
/consensuscluster/tests/test_get_ax_size.py
|
d36cbd97940f5f9bfe2830f0d3899d901a344cfc
|
[] |
no_license
|
GolLight/Single-cell-cluster-recognition-system
|
25aae8e671a85191e80fb12968009020060fc226
|
077885baf552393395c9a43449e3a105efe5c53b
|
refs/heads/master
| 2021-05-17T14:51:01.394256 | 2020-05-25T08:32:31 | 2020-05-25T08:32:31 | 250,829,627 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,240 |
py
|
"""Contains unit tests for the function _get_ax_size.
There are a lot of moving parts involved in writing these tests, so I
felt it was best to encapsulate them all in their own file. The reason
there's so much going on here is that we want to test a whole bunch of
combinations of:
* Various Figure-sizes
* Various DPI values
* Various methods of Figure creation
* Various patterns of Axes creation on the Figure
The goal is to test that _get_ax_size returns an answer that is within
a reasonable margin of the answer you'd get by hand. Because I just
grabbed this function off of stackoverflow without any evidence that it
was actually correct, it's important to really test the bejesus out of
it.
"""
from math import floor, ceil
import pytest
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
from matplotlib.testing import decorators
from consensuscluster.plotutils import _get_ax_size
# These are the maximum fractions of error we're ok with.
# There are different fractions for the lower-bound and upper-bound
# because for _get_ax_size, we would prefer to overestimate rather
# than underestimate.
MAX_ERROR_LOWER = .06
MAX_ERROR_UPPER = .1
figsizes = [
(1, 1),
(3, 3),
(4, 4),
(4, 9),
(.87, .4445),
(5.829, 1)
]
dpis = [100, 43.793]
figure_creation_funcs = [
lambda figsize, dpi: Figure(figsize=figsize, dpi=dpi),
lambda figsize, dpi: plt.figure(figsize=figsize, dpi=dpi)
]
def _check_answer(true_width_pix, true_height_pix,
approx_width_pix, approx_height_pix):
"""Helper function for testing _get_ax_size.
Asserts that the answer found by _get_ax_size is within the
acceptable margin of error of the true answer (or at least,
whatever we're considering the true answer to be).
:param true_width_pix: True width of the Axes, in pixels.
:param true_height_pix: True height of the Axes, in pixels.
:param approx_width_pix: The width approximation returned by
_get_ax_size.
:param approx_height_pix: The height approximation returned by
_get_ax_size.
:return: nothing.
"""
# Here I round the bounds down/up to the nearest pixel depending on
# whether it's a lower or upper bound. The main reason for this is
# to make the tests for really small widths/heights more lax, bc
# often the approximate will be within a pixel of the real answer
# but the test will still fail.
# -Vicram
width_lower_bound = true_width_pix - MAX_ERROR_LOWER * true_width_pix
width_lower_bound = floor(width_lower_bound)
width_upper_bound = true_width_pix + MAX_ERROR_UPPER * true_width_pix
width_upper_bound = ceil(width_upper_bound)
height_lower_bound = true_height_pix - MAX_ERROR_LOWER * true_height_pix
height_lower_bound = floor(height_lower_bound)
height_upper_bound = true_height_pix + MAX_ERROR_UPPER * true_height_pix
height_upper_bound = ceil(height_upper_bound)
assert width_lower_bound <= approx_width_pix <= width_upper_bound
assert height_lower_bound <= approx_height_pix <= height_upper_bound
def _check_answer_subplots(fig, axarr, rows, cols,
total_width_pix, total_height_pix):
"""Check _get_ax_size on every Axes in an array of Axes (subplots).
This function will compute the "correct" width/height pixels using
the number of rows/cols and then check the output of _get_ax_size
against these for EACH Axes in the axarr.
:param fig: Parent Figure containing the subplots.
:param axarr: Array of Axes containing equal-sized subplots.
:param rows: Number of rows of subplots in the full Figure.
:param cols: Number of columns of subplots in the full Figure.
:param total_width_pix: Total width (in pixels) of the full Figure.
:param total_height_pix: Total height (in pixels) of the full
Figure.
:return: nothing.
"""
correct_width_sub = total_width_pix / cols # "True" width, in pixels
correct_height_sub = total_height_pix / rows
for i in range(rows):
for j in range(cols):
ax_sub = axarr[i, j]
(approx_width_sub, approx_height_sub) = _get_ax_size(
ax_sub,
fig
)
_check_answer(correct_width_sub, correct_height_sub,
approx_width_sub, approx_height_sub)
@pytest.mark.parametrize('figsize', figsizes)
@pytest.mark.parametrize('dpi', dpis)
@pytest.mark.parametrize('figfunc', figure_creation_funcs)
@decorators.cleanup
def test_ax_and_axarr(figsize, dpi, figfunc):
"""Test creating a single Axes then an Axes array on the same fig.
:return: nothing.
"""
(width, height) = figsize # True values, in inches
# True values, in pixels
width_pix = width * dpi
height_pix = height * dpi
fig = figfunc(figsize, dpi)
ax = fig.gca()
# ax should cover the entire figure.
(approx_width, approx_height) = _get_ax_size(ax, fig)
_check_answer(width_pix, height_pix,
approx_width, approx_height)
# Second, create a subplot on that same Figure
axarr = fig.subplots(5, 3)
_check_answer_subplots(fig, axarr, 5, 3,
width_pix, height_pix)
|
[
"[email protected]"
] | |
2ae309ab7516c2e17c6d104bf77aa92bce5dbd7d
|
26e91aead18d0fad6f5ce8fc4adf7d8e05a2f07f
|
/byceps/services/board/models/topic.py
|
ddc5451016c5f326ba92595817d09bd24677a035
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
leathe/byceps
|
40c1f8a1aab3521fcac45d88eab6364d448d4e67
|
cd0c618af63fed1cd7006bb67da46eac0ddbb1c7
|
refs/heads/master
| 2020-12-02T09:02:51.087511 | 2019-12-14T17:00:22 | 2019-12-14T17:00:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,529 |
py
|
"""
byceps.services.board.models.topic
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
from sqlalchemy.ext.associationproxy import association_proxy
from ....blueprints.board.authorization import (
BoardPermission,
BoardTopicPermission,
)
from ....database import BaseQuery, db, generate_uuid
from ....typing import UserID
from ....util.instances import ReprBuilder
from ...authentication.session.models.current_user import CurrentUser
from ...user.models.user import User
from ..transfer.models import CategoryID
from .category import Category
class TopicQuery(BaseQuery):
def for_category(self, category_id: CategoryID) -> BaseQuery:
return self.filter_by(category_id=category_id)
def only_visible_for_user(self, user: CurrentUser) -> BaseQuery:
"""Only return topics the user may see."""
if not user.has_permission(BoardPermission.view_hidden):
return self.without_hidden()
return self
def without_hidden(self) -> BaseQuery:
"""Only return topics every user may see."""
return self.filter(Topic.hidden == False)
class Topic(db.Model):
"""A topic."""
__tablename__ = 'board_topics'
query_class = TopicQuery
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
category_id = db.Column(db.Uuid, db.ForeignKey('board_categories.id'), index=True, nullable=False)
category = db.relationship(Category)
created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
creator_id = db.Column(db.Uuid, db.ForeignKey('users.id'), nullable=False)
title = db.Column(db.UnicodeText, nullable=False)
posting_count = db.Column(db.Integer, default=0, nullable=False)
last_updated_at = db.Column(db.DateTime, default=datetime.utcnow)
last_updated_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'))
last_updated_by = db.relationship(User, foreign_keys=[last_updated_by_id])
hidden = db.Column(db.Boolean, default=False, nullable=False)
hidden_at = db.Column(db.DateTime)
hidden_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'))
hidden_by = db.relationship(User, foreign_keys=[hidden_by_id])
locked = db.Column(db.Boolean, default=False, nullable=False)
locked_at = db.Column(db.DateTime)
locked_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'))
locked_by = db.relationship(User, foreign_keys=[locked_by_id])
pinned = db.Column(db.Boolean, default=False, nullable=False)
pinned_at = db.Column(db.DateTime)
pinned_by_id = db.Column(db.Uuid, db.ForeignKey('users.id'))
pinned_by = db.relationship(User, foreign_keys=[pinned_by_id])
initial_posting = association_proxy('initial_topic_posting_association', 'posting')
posting_limited_to_moderators = db.Column(db.Boolean, default=False, nullable=False)
def __init__(
self, category_id: CategoryID, creator_id: UserID, title: str
) -> None:
self.category_id = category_id
self.creator_id = creator_id
self.title = title
def may_be_updated_by_user(self, user: CurrentUser) -> bool:
return (
(
not self.locked
and user.id == self.creator_id
and user.has_permission(BoardTopicPermission.update)
)
or user.has_permission(BoardPermission.update_of_others)
)
@property
def reply_count(self) -> int:
return self.posting_count - 1
def count_pages(self, postings_per_page: int) -> int:
"""Return the number of pages this topic spans."""
full_page_count, remaining_postings = divmod(
self.posting_count, postings_per_page
)
if remaining_postings > 0:
return full_page_count + 1
else:
return full_page_count
def __eq__(self, other) -> bool:
return self.id == other.id
def __repr__(self) -> str:
builder = ReprBuilder(self) \
.add_with_lookup('id') \
.add('category', self.category.title) \
.add_with_lookup('title')
if self.hidden:
builder.add_custom(f'hidden by {self.hidden_by.screen_name}')
if self.locked:
builder.add_custom(f'locked by {self.locked_by.screen_name}')
if self.pinned:
builder.add_custom(f'pinned by {self.pinned_by.screen_name}')
return builder.build()
|
[
"[email protected]"
] | |
6f991dee711262f4d305164906f281370d56dcc8
|
4122761aa8fa29a57248d5cc6efa09c3aec22012
|
/utility/textpredictor.py
|
76724dd12ea355c163e292831b6614803c1cc42d
|
[] |
no_license
|
DeltaSierra4/SLFYCare
|
004b08cfee6b2a8a416e1dcf5f45e2c4f27c8bb4
|
625f83ff7830d29a9f0123e62c2bce0295757b1a
|
refs/heads/master
| 2020-12-27T06:07:55.625759 | 2020-02-02T15:22:26 | 2020-02-02T15:22:26 | 237,789,461 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,411 |
py
|
#!/usr/bin/env python
# coding: utf8
"""Train a convolutional neural network text classifier on the
IMDB dataset, using the TextCategorizer component. The dataset will be loaded
automatically via Thinc's built-in dataset loader. The model is added to
spacy.pipeline, and predictions are available via `doc.cats`. For more details,
see the documentation:
* Training: https://spacy.io/usage/training
Compatible with: spaCy v2.0.0+
"""
from __future__ import unicode_literals, print_function
import plac
import random
from pathlib import Path
import thinc.extra.datasets
import json
import spacy
from spacy.util import minibatch, compounding
@plac.annotations(
model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
input_dir=("Optional input directory", "option", "o", Path),
n_texts=("Number of texts to train from", "option", "t", int),
n_iter=("Number of training iterations", "option", "n", int),
init_tok2vec=("Pretrained tok2vec weights", "option", "t2v", Path),
)
def main(model=None, input_dir="messagelist.json", n_iter=8, n_texts=7000, init_tok2vec=None):
model = "samplemodel"
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# add the text classifier to the pipeline if it doesn't exist
# nlp.create_pipe works for built-ins that are registered with spaCy
if "textcat" not in nlp.pipe_names:
textcat = nlp.create_pipe(
"textcat", config={"exclusive_classes": True, "architecture": "simple_cnn"}
)
nlp.add_pipe(textcat, last=True)
# otherwise, get it, so we can add labels to it
else:
textcat = nlp.get_pipe("textcat")
# add label to text classifier
textcat.add_label("POSITIVE")
textcat.add_label("NEGATIVE")
# predict everything!
predictions = []
with open(input_dir, 'r') as f1:
datastore = json.load(f1)
for item in datastore:
currmess = item["results"]
doc = nlp(currmess)
if doc.cats["POSITIVE"] >= doc.cats["NEGATIVE"]:
predictions.append(2)
else:
predictions.append(-2)
#print(predictions)
return(predictions)
if __name__ == "__main__":
plac.call(main)
|
[
"[email protected]"
] | |
2ed5006395d6e55cc012484b9d82f09f074e11cf
|
8fc2ab3d29a30e603e19b30bb9517928de529167
|
/hackerank_whatsnext.py
|
2d44363c21597514612dc972cc035e6441f66752
|
[] |
no_license
|
rushilchugh/Practise
|
35a9861bec6786580dc0a440eb25d78e43cb7bc9
|
98fd593b95dad641bef1d519c6c6ed1daaae630f
|
refs/heads/master
| 2020-03-13T21:14:14.013604 | 2018-04-27T12:23:50 | 2018-04-27T12:23:50 | 131,291,684 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,765 |
py
|
__author__ = 'Rushil'
#SetCount(x) - Number of ones in an binary number x
#Johnny wants to find a binary number, D, that is the smallest binary number >B where setCount(B) = setCount(D)
#He then wants to compress D into an array of integers,C (in the same way that integer array A contains the compressed form of binary string B).
#Values in even represents consecutive 1
#Values in odd represents consecutive 0
from itertools import groupby
import re
#Given input 4 1 3 2 4
def get_bin_rep(num):
inp_text = num.replace(' ','')
f_str = ''
for index,char in enumerate(inp_text):
if index % 2 == 0:
f_str += '1'*int(char)
else:
f_str += '0'*int(char)
return f_str
def get_other_bin(bin_num):
occ_0 = 0
bin_num = list(bin_num)
if bin_num[-1] == '0':
f1_index = ''.join(bin_num).rfind('1')
bin_num[-1] = '1'
bin_num[f1_index] = '0'
return ''.join(bin_num)
for index,i in enumerate(bin_num):
if i == '0':
occ_0 = index
bin_num[occ_0] = '1'
bin_num[occ_0 + 1] = '0'
return ''.join(bin_num)
def make_rep(bin_num):
#11110111010111
f_str = ''
for i,j in groupby(bin_num):
f_str += str(len(list(j)))
f_str += ' '
return f_str
#
#print(get_other_bin('11110111001111'))
#print(make_rep('11110111001111'))
#print(make_rep(get_other_bin(get_bin_rep('4 1 3 2 4'))))
n = int(input().strip())
m_list = []
for i in range(n):
w_len = input().strip()
m_word = input().strip()
m_list.append(m_word)
for i in m_list:
f_sol = make_rep(get_other_bin(get_bin_rep(i)))
print(len(f_sol))
print(f_sol)
|
[
"[email protected]"
] | |
c25ffc67fbe5a254079333e36cdc5e64b0ab3001
|
af019c9443958d2f2497468e38e3e3a5a63c05bc
|
/main.py
|
e5ac0e580a4fe39d5e56022860fa55fb966cae82
|
[] |
no_license
|
jackieallam/day11-blackjack
|
c3679609e58b56e19de8efef242969b3da1879f6
|
14f12dbf632d5b7d18f90513b85c8b30fe8ae430
|
refs/heads/master
| 2023-06-15T15:06:40.781296 | 2021-06-29T15:02:31 | 2021-06-29T15:02:31 | 328,802,266 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,797 |
py
|
# ############## Blackjack Project #####################
import random
from art import logo
from os import system # system("clear")
play_again = True
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
end_message = {
"win_bj": "Win with a Blackjack 😎",
"lose_bj": "Lose, opponent has Blackjack 😱",
"win_over": "Opponent went over. You win 😁",
"lose_over": "You went over. You lose 😭",
"win": "You win 😃",
"lose": "You lose 😤",
"draw": "Draw 🙃",
}
def display_current_score():
print(f"Your cards: {player_cards}, current score: {sum(player_cards)}")
print(f"Computer's first card: {comp_cards[0]}")
def display_final_score():
print(f"Your final hand: {player_cards}, final score: {sum(player_cards)}")
print(f"Computer's final hand: {comp_cards}, final score: {sum(comp_cards)}")
def hit():
yes = input("Type 'y' to get another card, type 'n' to pass: ")
if yes == "y" or yes == "Y":
player_cards.append(random.choice(cards))
if (sum(player_cards) > 21) and (11 in player_cards):
player_cards[player_cards.index(11)] = 1 # Replace 11 with 1
display_current_score()
if sum(player_cards) <= 21:
hit()
def blackjack():
# For testing:
# player_cards.extend([11,2]) # For testing with specific cards
# comp_cards.extend([11,5])
player_cards.extend(random.sample(cards, 2))
comp_cards.extend(random.sample(cards, 2))
player_score = sum(player_cards)
comp_score = sum(comp_cards)
display_current_score()
if player_score <= 21:
hit()
while comp_score < 17:
comp_cards.append(random.choice(cards))
if 11 in comp_cards and sum(comp_cards) > 21:
comp_cards[comp_cards.index(11)] = 1 # Replace 11 with 1
comp_score = sum(comp_cards)
player_score = sum(player_cards)
display_final_score()
if player_score > 21:
end = end_message["lose_over"]
elif comp_score > 21:
end = end_message["win_over"]
elif comp_score == 21:
end = end_message["lose_bj"]
elif player_score > comp_score:
end = end_message["win"]
elif comp_score > player_score:
end = end_message["lose"]
elif comp_score == player_score:
end = end_message["draw"]
if player_score == 21:
if comp_score == 21:
end = end_message["draw"]
else:
end = end_message["win_bj"]
print(end)
while play_again:
ask = input("Do you want to play a game of Blackjack? Type 'y' or 'n': ")
if ask == "n" or ask == "N":
play_again = False
elif ask == "y" or ask == "Y":
system("clear")
player_cards = []
comp_cards = []
print(logo)
blackjack()
|
[
"[email protected]"
] | |
8c29871ccc383ff4fb417ccd8d0b558894d55db8
|
26ec8ef7f5ebba126d59ac88d0f026b0a936c539
|
/4_hard_findMedianSortedArrays.py
|
800a06f39fbdee5f11c913a170287a91a4137929
|
[] |
no_license
|
JohnnySu-SJTUer/LeetCodeLearning
|
d5187819e1ee7982f349bf901d43d80088c86f2d
|
bedf04fc1d1bdf6f775725151e1c2a98705ce737
|
refs/heads/master
| 2020-04-16T07:50:40.443027 | 2019-03-28T15:15:10 | 2019-03-28T15:15:10 | 165,401,418 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 917 |
py
|
class Solution:
def findMedianSortedArrays(self, nums1: 'List[int]', nums2: 'List[int]') -> 'float':
l = len(nums1)+len(nums2)
if l%2 == 1:
return self.kth(self, nums1, nums2, l//2)
else:
return (self.kth(self, nums1, nums2, l//2)+self.kth(self,nums1,nums2,l//2-1))/2
def kth(self,a,b,k):
if not a:
return b[k]
if not b:
return a[k]
i,j = len(a)//2,len(b)//2
if i+j<k:
if a[i]>b[j]:
return self.kth(self,a,b[j+1:],k-j-1)
else:
return self.kth(self,a[i+1:],b,k-i-1)
else:
if a[i]>b[j]:
return self.kth(self,a[:i],b,k)
else:
return self.kth(self,a,b[:j],k)
if __name__=='__main__':
nums1 = [1, 3]
nums2 = [2]
s = Solution
print(s.findMedianSortedArrays(s,nums1,nums2))
|
[
"[email protected]"
] | |
eaa3899de8a8f9ae7d1a85242e4f65dffb9305e0
|
ce00a7dcaa15b510aa9d2f639d7e4a94ee05a8a7
|
/virtualenviron/lib/python3.6/sre_constants.py
|
f0d376fda8e6cb44cd7d970470308e766ebeb872
|
[] |
no_license
|
karthikgr23/my-first-blog
|
102e8eb3827f49b1c7bfae41c9a5d2ef99619e72
|
8a5016338f7650d231f0b0418814dee2b4b427f2
|
refs/heads/master
| 2021-04-26T22:16:07.005499 | 2018-03-06T12:50:13 | 2018-03-06T12:50:13 | 123,902,613 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 56 |
py
|
/home/karthikgr/anaconda3/lib/python3.6/sre_constants.py
|
[
"[email protected]"
] | |
8d0e3b733f372a26073bb0aadd788b781e5a8715
|
d59db8f77bb046e298cd56a8ca4be8530937d19e
|
/LinkedList.py
|
a1c377353e5d041c5e020a1345ad485aa58aafbf
|
[] |
no_license
|
Kobyadu1/Class-Projects
|
de3006ae40e9e407adff754df00829f60dc4f97a
|
31afb26f50a3ba8de861d2e176b4a4807525aaf9
|
refs/heads/master
| 2022-12-11T23:32:55.278585 | 2020-09-05T18:26:05 | 2020-09-05T18:26:05 | 293,130,353 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,646 |
py
|
class ListNode:
prev = None
next = None
data = None
def __init__(self, prev, next, data):
self.prev = prev
self.next = next
self.data = data
class LList:
head = ListNode(None,None,None)
tail = ListNode(None,None,None)
def __init__(self, startdata):
startnode = ListNode(self.tail,self.head,startdata)
self.head.next = startnode
self.tail.prev = startnode
def addFirst(self, data):
node = ListNode(self.head, self.head.next, data)
self.head.next.prev = node
self.head.next = node
def add(self, data):
node = ListNode(self.tail.prev, self.tail, data)
self.tail.prev.next = node
self.tail.prev = node
def addIn(self, index, data):
count = 0
current = self.head.next
while count != index:
current = current.next
count += 1
node = ListNode(current.prev, current, data)
current.prev.next = node
current.prev = node
def addAll(self, collection):
for data in collection:
self.add(data)
def addAllIn(self, index, collection):
for data in collection:
self.add(index, data)
index += 1
def contains(self,data):
current = self.head.next
while current.next != self.tail:
if current.data == data:
return True
current = current.next
return False
def get(self, index):
count = 0
current = self.head.next
while count != index:
current = current.next
count += 1
return current
def getFirst(self):
return self.head.next
def getLast(self):
return self.tail.prev
def indexOf(self, data):
index = 0
current = self.head.next
while current != self.tail:
if current.data == data:
return index
current = current.next
index += 1
return -1
def print(self):
current = self.head.next
while current != self.tail:
print(current.data)
current = current.next
def lastindexOf(self, data):
index = -1
count = 0
current = self.head.next
while current != self.tail:
if data == current.data:
index = count
current = current.next
count += 1
return index
linked = LList(1)
linked.addAll([2,3,4,5,6,7,8,9,10])
linked.print()
|
[
"[email protected]"
] | |
6967ee97c749445740d9a111355532bd09a8007b
|
80dca48156a080f8f0e25c79f86b78c6dff95fc4
|
/apps/core/views.py
|
d9cb2954d5c83540d9bfc8328ce1b412681cd87d
|
[] |
no_license
|
gaguilarmi/react-firebase-reflux
|
7b18ff60a91b7db8440d6f1d21a12990900779dc
|
9e625dd9d316172f0bc2edbfe95c2bdc02150ccf
|
refs/heads/master
| 2020-12-24T16:14:53.540732 | 2015-07-08T14:58:54 | 2015-07-08T14:58:54 | 38,760,356 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 138 |
py
|
# -*- coding: utf-8 -*-
from django.views.generic import TemplateView
class IndexView(TemplateView):
template_name = 'core/index.html'
|
[
"[email protected]"
] | |
fbcf2765ede98d3822c8da95a849902cd2e9b39e
|
21f34c61ce7f3ca154b37aeb5dfd32d9ac3236e5
|
/leetcode/1295_even_digits.py
|
2c89087622b25c5c3070ce4b92b1df0c2ba97b2b
|
[] |
no_license
|
codingbbq/python-katas
|
b3b5fa15419766b37390e40e864ce90be2664f55
|
44241dae4394939f963787aab08d5e7c290dbb9a
|
refs/heads/master
| 2021-06-27T15:28:57.775778 | 2021-01-05T10:17:53 | 2021-01-05T10:17:53 | 184,697,423 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 584 |
py
|
# 1295. Find Numbers with Even Number of Digits
# https://leetcode.com/problems/find-numbers-with-even-number-of-digits/
from typing import List
class Solution:
def findNumbers(self, nums: List[int]) -> int:
count = 0
for x in nums:
if self.countDigit(x) % 2 == 0:
count+=1
print(count)
# Find the count of digits in each number
def countDigit(self, x:int) -> int:
digit = 0
while x!=0:
x//=10
digit+=1
return digit
Solution().findNumbers(nums = [555,901,482,1771])
|
[
"[email protected]"
] | |
d60d8e14e831c0191ef2510644b7bc3cce6a9b07
|
b11c263432483e2bea42f14c35e71e6a75e0f038
|
/jasper/client/modules/Weather.py
|
1797ca07ab340c27f671ca34a04864e157ae5622
|
[
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
pyroesque/jasper_home_automation
|
69f02e142d92e790f0e9b4dba0920b1c9730eb28
|
36abb590a876f986d0cebe5c1281ca9616a6d5d2
|
refs/heads/master
| 2020-05-31T14:25:15.735664 | 2015-09-02T19:50:22 | 2015-09-02T19:50:22 | 41,823,500 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,390 |
py
|
import re
import datetime
import feedparser
from app_utils import getTimezone
from semantic.dates import DateService
WORDS = ["WEATHER", "TODAY", "TOMORROW"]
def replaceAcronyms(text):
"""Replaces some commonly-used acronyms for an improved verbal weather report."""
def parseDirections(text):
words = {
'N': 'north',
'S': 'south',
'E': 'east',
'W': 'west',
}
output = [words[w] for w in list(text)]
return ' '.join(output)
acronyms = re.findall(r'\b([NESW]+)\b', text)
for w in acronyms:
text = text.replace(w, parseDirections(w))
text = re.sub(r'(\b\d+)F(\b)', '\g<1> Fahrenheit\g<2>', text)
text = re.sub(r'(\b)mph(\b)', '\g<1>miles per hour\g<2>', text)
text = re.sub(r'(\b)in\.', '\g<1>inches', text)
return text
def getForecast(profile):
return feedparser.parse("http://rss.wunderground.com/auto/rss_full/"
+ str(profile['location']))['entries']
def handle(text, mic, profile, house):
"""
Responds to user-input, typically speech text, with a summary of
the relevant weather for the requested date (typically, weather
information will not be available for days beyond tomorrow).
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone number)
"""
if not profile['location']:
mic.say(
"I'm sorry, I can't seem to access that information. Please make sure that you've set your location on the dashboard.")
return
tz = getTimezone(profile)
service = DateService(tz=tz)
date = service.extractDay(text)
if not date:
date = datetime.datetime.now(tz=tz)
weekday = service.__daysOfWeek__[date.weekday()]
if date.weekday() == datetime.datetime.now(tz=tz).weekday():
date_keyword = "Today"
elif date.weekday() == (
datetime.datetime.now(tz=tz).weekday() + 1) % 7:
date_keyword = "Tomorrow"
else:
date_keyword = "On " + weekday
forecast = getForecast(profile)
output = None
for entry in forecast:
try:
date_desc = entry['title'].split()[0].strip().lower()
if date_desc == 'forecast': #For global forecasts
date_desc = entry['title'].split()[2].strip().lower()
weather_desc = entry['summary']
elif date_desc == 'current': #For first item of global forecasts
continue
else:
weather_desc = entry['summary'].split('-')[1] #US forecasts
if weekday == date_desc:
output = date_keyword + \
", the weather will be " + weather_desc + "."
break
except:
continue
if output:
output = replaceAcronyms(output)
mic.say(output)
else:
mic.say(
"I'm sorry. I can't see that far ahead.")
def isValid(text):
"""
Returns True if the text is related to the weather.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\b(weathers?|temperature|forecast|outside|hot|cold|jacket|coat|rain)\b', text, re.IGNORECASE))
|
[
"[email protected]"
] | |
1c932818631a21d3047779106c52d81d12e86a87
|
799510a1bf8a271e4dbdc7bf1548881e67b3eb84
|
/p-99/p09.py
|
267494664c368af52196b375b3dde44ae398e4db
|
[] |
no_license
|
foodhype/practice
|
a5b434cd64604f0d6b1cf013a9c49866b61b4207
|
0cf77cfd26b3d5cc71f80afdcfbf8046923c84fa
|
refs/heads/master
| 2021-01-10T19:44:33.185046 | 2015-03-05T23:48:26 | 2015-03-05T23:48:26 | 15,242,836 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 514 |
py
|
def pack(li):
packed = []
running_elem = None
current_run = None
for element in li:
if element == running_elem:
current_run.append(element)
else:
running_elem = element
current_run = [running_elem]
packed.append(current_run)
return packed
def main():
li = ['a', 'a', 'a', 'a', 'b', 'c', 'c', 'a', 'a', 'd', 'e', 'e', 'e', 'e']
assert pack(li) == [['a', 'a', 'a', 'a'], ['b'], ['c', 'c'], ['a', 'a'], ['d'], ['e', 'e', 'e', 'e']]
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
fcf8a3b00da21365dc2045c95a6867034dcee9d5
|
f3d63f6138d389e1309d0ab979df2fd12722ced6
|
/sound2h.py
|
e37868951682c889610b931639715e61ecc93481
|
[
"MIT"
] |
permissive
|
TrashRobotics/HalloweenPumpkin
|
43b2199cc2876294f8f9e4259be6c9f75f997592
|
4ee6217512a527123efe528d72fc2c5feda60e7c
|
refs/heads/main
| 2023-08-23T22:00:04.137899 | 2021-10-22T08:42:50 | 2021-10-22T08:42:50 | 417,945,007 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 628 |
py
|
import re
fileName = 'test.raw'
name = "sound" #re.split('\.|/', fileName)[-2]
fin = open(fileName, 'rb')
fout = open(name + ".h", "wt")
audioLen = 0
audioData = '\t'
count = 0
byte = fin.read(1)
while byte:
audioData += hex(ord(byte)) + ', '
audioLen += 1
count += 1
if count >= 8:
audioData += "\n\t"
count = 0
byte = fin.read(1)
header = """
#define {nameUpper}_LENGTH {audioLen}
const uint8_t {name}_data[] PROGMEM = {{
{audioData}
}};
""".format(nameUpper=name.upper(), name=name, audioLen=audioLen, audioData=audioData)
print(header)
fout.write(header)
fin.close()
fout.close()
|
[
"[email protected]"
] | |
3ccac70f2fe05cd2ec090c926330b1774bbfe340
|
47f24b482656bc9c3224a8f4b37ac0a1af5149f7
|
/Assignment_4/lab4-rollno/kmeans.py
|
1669d8cdf141b0c0747bb53a805ef03f9aad0ce3
|
[] |
no_license
|
anshuln/CS_335_Assignments_2020
|
55bc38cfae011c56c3566ff385149709626d8a0e
|
2fdab9e8da03d3ede3ea1d220269e614be7a4ba4
|
refs/heads/master
| 2023-01-30T10:37:56.591146 | 2020-12-18T09:53:30 | 2020-12-18T09:53:30 | 285,848,441 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,512 |
py
|
import argparse
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(5927)
class KMeans():
def __init__(self, D, n_clusters):
self.n_clusters = n_clusters
self.cluster_centers = np.zeros((n_clusters, D))
def init_clusters(self, data):
### TODO
### Initialize cluster_centers using n_clusters points sampled from data
### END TODO
def pred(self, x):
### TODO: Given a sample x, return id of closest cluster center
### END TODO
def train(self, data, max_iter=10000, epsilon=1e-4):
for it in range(max_iter):
### TODO
### Declare and initialize required variables
### Update labels for each point
### Update cluster centers
### Note: If some cluster is empty, do not update the cluster center
### Check for convergence
### Stop if distance between each of the old and new cluster centers is less than epsilon
### END TODO
return it
def replace_by_center(self, data):
out = np.zeros_like(data)
for i, x in enumerate(data):
out[i] = self.cluster_centers[self.pred(x)]
return out
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image', default='1', choices=['1', '2', '3'])
parser.add_argument('--k', default=5, type=int)
args = parser.parse_args()
image = plt.imread(f'data/{args.image}.png')
x = image.reshape(-1, 3)
kmeans = KMeans(D=3, n_clusters=args.k)
kmeans.init_clusters(x)
kmeans.train(x)
out = kmeans.replace_by_center(x)
plt.imshow(out.reshape(image.shape))
plt.show()
|
[
"[email protected]"
] | |
3e348edb55816858db38924bb78bde6527da1d9b
|
867abdb2cd546e9ecfd92c8cf02bc928714603b6
|
/src/segment.py
|
eec347cbf81b5c84b662773a38fdc136a5824dec
|
[] |
no_license
|
sonatamint/pydevTest
|
985d414b14aaa321c54e561134a91e6b12408465
|
c517d7c8225fa15698b3398b18458a8fe57d716a
|
refs/heads/master
| 2020-04-22T00:29:41.625973 | 2019-02-10T13:31:24 | 2019-02-10T13:31:24 | 169,981,378 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 852 |
py
|
#encoding=utf-8
import os
import jieba
import re
inglist = open('C:/facts/ingList.txt', 'r', encoding="utf-8")
alllines = re.sub("\s+|\r\n", " ", inglist.read())
for wd in alllines.split(' '):
if len(wd)==1:
print(wd)
'''
jieba.add_word(wd)
outfile = open('C:/facts/ingredients.txt', 'w')
for root, dirs, files in os.walk("C:/facts/label_cut/result/"):
for f in files:
infile = open(root+f, 'r', encoding="utf-8")
instring = infile.read()
instring = re.sub("\W+", "", instring)
segs = jieba.cut(instring)
outstring = " ".join(segs)
outstring = re.sub(" [_丿] | [_丿]$"," ", outstring)
outstring = re.sub(" \d{1,2} | \d{1,2}$| [a-zA-Z]{1,2} | [a-zA-Z]{1,2}$"," ", outstring)
outfile.write(f+" "+outstring+"\n")
infile.close()
outfile.close()
'''
|
[
"[email protected]"
] | |
a4315be2234838908f42b3d2d0d042647f384c92
|
a80884040ce1c178274a3068d216f440dd541844
|
/tests/regression/test_tee_map_completion.py
|
07635a9b627669bb5320d9242fc4ef3be123bf53
|
[
"MIT"
] |
permissive
|
maki-nage/rxsci
|
a4aae51edc1ef684b55df22e34c11aa1d54ef740
|
915e59ebf593c4b313265bb87cf0e1209ec2ee0f
|
refs/heads/master
| 2023-01-19T14:32:11.638497 | 2023-01-17T08:06:35 | 2023-01-17T08:06:35 | 242,592,973 | 9 | 2 |
MIT
| 2022-11-08T21:54:16 | 2020-02-23T21:23:56 |
Python
|
UTF-8
|
Python
| false | false | 464 |
py
|
import rx
import rx.operators as ops
import rxsci as rs
def test_completion():
data = [1, 2, 3]
actual_data = []
actual_completed = []
rx.from_(data).pipe(
rs.ops.tee_map(
ops.count(),
rs.math.sum(reduce=True),
)
).subscribe(
on_next=actual_data.append,
on_completed=lambda: actual_completed.append(True)
)
assert actual_completed == [True]
assert actual_data == [(3, 6)]
|
[
"[email protected]"
] | |
a185967613d1e7eee7bce678e95b4793e1843f84
|
17792e118f537be0b02fd3eb515f77363ecdefed
|
/vod_webapp/vod_webapp/settings.py
|
9bf41884743379ad61ce04fceed2c1c2ffc9d153
|
[] |
no_license
|
hizni/dj-vod
|
380c5d174bda6d99f59c81ba1d11f9a352987b0a
|
ba77a5cb5331d6f170313dd48be71ba0e5aa7c9f
|
refs/heads/master
| 2021-01-01T16:38:51.755601 | 2017-07-21T14:22:36 | 2017-07-21T14:22:36 | 97,883,692 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,983 |
py
|
"""
Django settings for vod_webapp project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '92s=613h*8&5mx+f9^^3ou+h!*iw3195-t(a@p!rj1j5lv^s-8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'csv_upload',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'vod_webapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vod_webapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR, 'bower_components'),
]
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
|
[
"[email protected]"
] | |
28d7d37773b000b74a0651e75715b1992064c925
|
5d5f6ba3bdcb52b4750a5f28afa8a1a1019bfc9e
|
/python_basics/python_fundamentals/functionsIntermediate/functionsIntermediate1.py
|
d4c0b59e7de7fe8d6ba8eb493267361224b8c5de
|
[] |
no_license
|
eDiazGtz/pythonLearning
|
06e96f2f5a6e48ac314cb815cf9fbf65d0b7c2c8
|
57d7b2292cf5d9769cce9adf765962c3c0930d6c
|
refs/heads/master
| 2023-06-18T02:16:09.293375 | 2021-05-03T18:09:52 | 2021-05-03T18:09:52 | 335,090,531 | 0 | 0 | null | 2021-05-03T18:09:53 | 2021-02-01T21:35:24 |
Python
|
UTF-8
|
Python
| false | false | 1,200 |
py
|
import random
# random.random() returns a random floating number between 0.000 and 1.000
# random.random() * 50 returns a random floating number between 0.000 and 50.000
# random.random() * 25 + 10 returns a random floating number between 10.000 and 35.000
# round(num) returns the rounded integer value of num 0.5 round up
#print(randInt()) # should print a random integer between 0 to 100
#print(randInt(max=50)) # should print a random integer between 0 to 50
#print(randInt(min=50)) # should print a random integer between 50 to 100
#print(randInt(min=50, max=500)) # should print a random integer between 50 and 500
def randInt(min=0, max=100):
range = max - min
if(range < 0):
return "Min must be less than Max; Max must be greater than 0"
num = round(random.random() * range + min)
return num
print(randInt()) # should print a random integer between 0 to 100
print(randInt(max=50)) # should print a random integer between 0 to 50
print(randInt(min=50)) # should print a random integer between 50 to 100
print(randInt(min=50, max=500)) # should print a random integer between 50 and 500
|
[
"[email protected]"
] | |
63d70140049c304b134777030f178f040cf88238
|
d0ecb327aa4f51b78a26434b5ac321aa1d4b7adf
|
/small.py
|
adbd1219a9a2d7b1450b377772853a715b168196
|
[
"MIT"
] |
permissive
|
CrownCrafter/School
|
853cd0297739c9e6b0e1fecb9d2820a625136ec3
|
488810b223ad746d7d1b396e609ce8f90f25662c
|
refs/heads/main
| 2023-03-06T22:50:52.462224 | 2021-02-16T13:45:54 | 2021-02-16T13:45:54 | 327,005,394 | 0 | 1 |
MIT
| 2021-02-06T10:03:43 | 2021-01-05T13:21:32 |
Python
|
UTF-8
|
Python
| false | false | 191 |
py
|
#!/usr/bin/env python3
lis = []
while(True):
x = int(input("Enter number "))
lis.append(x)
i = input("Finished? ")
if(i == 'y'):
break
print(min(lis))
print(max(lis))
|
[
"[email protected]"
] | |
f7435178ccc683de1d2885f05319ad9cdb3527a9
|
64f6fc569ccace5a8fd3fa837d8d3815936b1cca
|
/len.py
|
4d5a492ed9ff17b1ffafb876684ca4415dedf00f
|
[] |
no_license
|
odairpedrosojunior001/Udacity_IPND
|
80cbf8031de74661202bd9420a33b3b16af8b08b
|
1306eb1417288fac09b45782597e018e7a3e8046
|
refs/heads/master
| 2020-03-29T07:27:21.948928 | 2019-02-05T00:24:02 | 2019-02-05T00:24:02 | 149,665,780 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 96 |
py
|
test_string_1 = "This is a good NOUN to use when you VERB your food"
print len(test_string_1)
|
[
"[email protected]"
] | |
6075bf7ba52d2c689ce7e8d799b2bdfa2bb43e1b
|
4ad06bae18751fd71df145d126e3624ea90e05e6
|
/flat_sharp/interpolation.py
|
b8f5c77d79fd72a53d69363fa13955e41a1408be
|
[] |
no_license
|
daniellengyel/flat_sharp
|
04d82399e44d178e52c56acf1ba2ff3a75e4c27f
|
4a1e3f4abbebc7a5342aaa63080493b77aff5677
|
refs/heads/master
| 2021-04-22T16:14:26.238625 | 2020-07-09T13:03:44 | 2020-07-09T13:03:44 | 249,861,447 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,933 |
py
|
import numpy as np
from utils import *
from data_getters import *
from postprocessing import *
import copy
import torch
def interpolate_models(model1, model2, beta):
params1 = model1.named_parameters()
params2 = model2.named_parameters()
new_model = copy.deepcopy(model2)
new_params = new_model.named_parameters()
dict_new_params = dict(new_params)
for name1, param1 in params1:
if name1 in dict_new_params:
dict_new_params[name1].data.copy_((1. - beta) * param1.data + beta * dict_new_params[name1].data)
return new_model
def scale_output_model(model1, alpha):
if isinstance(model1, LeNet):
last_layer_names = ["fc3.weight", "fc3.bias"]
else:
last_layer_names = ["fc2.weight", "fc2.bias"]
params1 = model1.named_parameters()
new_model = copy.deepcopy(model1)
new_params = new_model.named_parameters()
dict_new_params = dict(new_params)
for name1, param1 in params1:
if name1 in last_layer_names:
dict_new_params[name1].data.copy_(alpha * param1.data)
return new_model
def T_alpha_models(model, num_inter_models, alpha_range):
inter_models_arr = []
alphas = np.linspace(alpha_range[0], alpha_range[1], num_inter_models)
for alpha in alphas:
params1 = model.named_parameters()
new_model = copy.deepcopy(model)
new_params = new_model.named_parameters()
dict_new_params = dict(new_params)
for name1, param1 in params1:
if name1 in dict_new_params:
dict_new_params[name1].data.copy_((1. - beta) * param1.data + beta * dict_new_params[name1].data)
inter_models_arr.append(curr_model)
return inter_models_arr
return new_model
def get_loss_grad(net, criterion, data):
inputs, labels = data
# Compute gradients for input.
inputs.requires_grad = True
net.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs.float(), labels)
loss.backward(retain_graph=True)
param_grads = get_grad_params_vec(net)
return loss, torch.norm(param_grads)
def get_model_interpolate_arr(model_a, model_b, num_inter_models, beta_bound=None):
inter_models_arr = []
if beta_bound is None:
beta_bound = [0, 1]
betas = np.linspace(beta_bound[0], beta_bound[1], num_inter_models)
for beta in betas:
curr_model = interpolate_models(model_a, model_b, beta)
inter_models_arr.append(curr_model)
return inter_models_arr
def get_model_interpolate_2d(offset, v1, v2, num_inter_models, alpha_bound, beta_bound, func):
X = np.linspace(alpha_bound[0], alpha_bound[1], num_inter_models)
Y = np.linspace(beta_bound[0], beta_bound[1], num_inter_models)
v1_net = vec_to_net(v1, offset)
v2_net = vec_to_net(v2, offset)
v1_dict = dict(v1_net.named_parameters())
v2_dict = dict(v2_net.named_parameters())
val_arr = []
for x in X:
curr_arr = []
for y in Y:
curr_model = copy.deepcopy(offset)
dict_curr_model = dict(curr_model.named_parameters())
for name1, param1 in offset.named_parameters():
dict_curr_model[name1].data.copy_(dict_curr_model[name1].data + x * v1_dict[name1].data + y * v2_dict[name1].data)
to_append = func(curr_model)
curr_arr.append(to_append)
val_arr.append(curr_arr)
return val_arr
def project_onto(net, v1, v2, offset):
v1_norm = v1 / torch.norm(v1)
v2_norm = v2 / torch.norm(v2)
net_vect = get_params_vec(net) - get_params_vec(offset)
alpha = torch.matmul(v1_norm, net_vect)
beta = torch.matmul(v2_norm, net_vect)
return alpha, beta
def take_n_gd_steps(net, optimizer, criterion, data, n=1, get_grad=True, v1=None, v2=None, offset=None):
grads_arr = []
projections = []
if (v1 is not None) and (v2 is not None):
projections.append(project_onto(net, v1, v2, offset))
for _ in range(n):
inputs, labels = data
# Compute gradients for input.
inputs.requires_grad = True
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs.float(), labels)
loss.backward(retain_graph=True)
optimizer.step()
if (_ % 100) == 0:
print(_)
print(loss)
print()
if get_grad:
grads_arr.append(get_grad_params_vec(net))
if (v1 is not None) and (v2 is not None):
projections.append(project_onto(net, v1, v2, offset))
return net, grads_arr, projections
def do_the_do(model, optimizer, criterion, data_loader, num_inter_models, num_steps=1, beta_bound=None):
data = next(iter(data_loader))
model_a = copy.deepcopy(model)
model_b = take_n_gd_steps(model, optimizer, criterion, data, n=num_steps)
inter_models = get_model_interpolate_arr(model_a, model_b, num_inter_models, beta_bound=beta_bound)
return inter_models
exp_id = "1589992134.56161"
if __name__ == "__main__":
# get data
train_data, test_data = get_postprocessing_data(experiment_folder, vectorized=True)
train_loader = DataLoader(train_data, batch_size=10000, shuffle=True) # fix the batch size
test_loader = DataLoader(test_data, batch_size=len(test_data))
criterion = torch.nn.CrossEntropyLoss()
cfs_dict = exp_dict["stuff"]["configs"].loc[exp_id].to_dict()
nets = get_nets(cfs_dict)
optimizers = get_optimizers(cfs_dict)(nets)
inter_nets = []
for nn_idx in range(len(nets)):
inter_nets.append(do_the_do(nets[nn_idx], optimizers[nn_idx], criterion, train_loader, 20))
for nn_index in range(len(nets)):
y_val = inter_nets[nn_index][1][:, 1]
plt.plot(list(range(len(y_val))), y_val)
plt.show()
|
[
"[email protected]"
] | |
79544c774dc44900e7a3b50b31cd598892ed8018
|
ad9261738cd35560b3bcad7ef90edf6f4763c7a7
|
/Flags.py
|
3d9d8bf7f9250354cb24f696e64c36a53c6e897e
|
[
"MIT"
] |
permissive
|
hackerYM/codility-sample-code
|
a0c33f68b22c933abce448c3ab01f07c4c8126ef
|
de08519118a079eac1ed4cdfc566fe1f0fc0fe9b
|
refs/heads/master
| 2023-03-20T09:50:20.735641 | 2021-03-14T10:19:53 | 2021-03-14T10:19:53 | 175,593,644 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 617 |
py
|
def solution(A):
peaks = [idx for idx in range(1, len(A)-1) if A[idx-1] < A[idx] > A[idx+1]]
if len(peaks) < 3:
return len(peaks)
nexts = []
for peak in peaks:
nexts += [peak] * (peak - len(nexts) + 1)
max_flag = [i for i in range(len(A)) if i * (i-1) <= peaks[-1] - peaks[0]][-1]
for flag in range(max_flag, 2, -1):
distances, cur_peak = 0, peaks[0]
while cur_peak < peaks[-1] and cur_peak + flag < len(nexts):
distances += 1
cur_peak = nexts[cur_peak + flag]
if distances + 1 >= flag:
return flag
return 2
|
[
"[email protected]"
] | |
8863ba9a0134655bdf9da3eb44538128cf32f2d2
|
62e1ca03e2afa0fcaa386b075ad514e3dcaa8b6f
|
/I0320039_soal2_tugas3.py
|
e89f72e7590aff9bec249cdc069ff918f6511d54
|
[] |
no_license
|
Fahruddinari/Fahruddin_Ari_Wicaksono_I0320039_Andhika_Tugas3
|
852230856180d578ddc7dbb64471d594b7e6b4d2
|
6cc707c2f4eb62e7e893954881d36b19439717a1
|
refs/heads/main
| 2023-03-22T23:07:59.090902 | 2021-03-19T10:53:24 | 2021-03-19T10:53:24 | 349,378,660 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,019 |
py
|
#Membuat program dictionary
biodata = {'nama': 'Fahruddin Ari',
'hobi1':'ngegame',
'hobi2':'lari',
'hobi3':'bermain bulu tangkis',
'sosmed1':'uddinaja_',
'sosmed2':'uddinsukamamba',
'sosmed3':'lambe_moba',
'lagu1':'balonku ada 5',
'lagu2':'topi saya bundar',
'lagu3':'pok ame ame',
'makanan1':'mie ayam',
'makanan2':'bubur ayam',
'makanan3':'sate katak'}
#Mengubah salah satu hobi dan sosial media
biodata['hobi2'] = 'futsal'
print("biodata['hobi2']: ",biodata['hobi2'])
biodata['sosmed2'] = 'arie_untunk'
print("biodata['sosmed2']: ",biodata['sosmed2'])
print("++++++++++++++++++")
#Menghapus dua makanan favorit
del biodata['makanan1']
del biodata['makanan2']
for x,y in biodata.items():
print(x,y)
print(biodata)
print("++++++++++++++++++++")
#Menambah satu hobi
biodata['hobi4'] = 'catur'
for a,b in biodata.items():
print(a,b)
print(biodata)
print("++++++++++++++++++++")
|
[
"[email protected]"
] | |
2276a5e908cbc0767016fa9235b0889ad1a2b7b8
|
504a44be2a37170137139596c4ef34c27b6242e4
|
/app/admin/Customer/playground/config/wsgi.py
|
d2399f7a8c05118c77c0f1403ee1adc31d672abc
|
[] |
no_license
|
princessjacob/Finals-Webtek-Lab
|
7ad915eaa4a6d9c1ec9905c7aa45af49f6e59443
|
979f558070ad7d1a5483501828d9ceb7bfeeb228
|
refs/heads/master
| 2021-01-19T20:11:50.353926 | 2017-05-19T13:56:23 | 2017-05-19T13:56:23 | 88,492,569 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 399 |
py
|
"""
WSGI config for petkovetmo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.base")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
55bdca8cf8a2c528a2289905984cbaea270cd012
|
6c6d0666de6087f173530023e2f2fb622023e1c1
|
/gothonweb/helpblock.py
|
4f9c4bf9440f3d2ae455ad07ff2332b7c376d907
|
[] |
no_license
|
BoroGor/gothonweb
|
40b49176c7c72225528a51a2f522fa9ce67aca39
|
e67f6c13d60953576b2e20a8f629655795c3f07c
|
refs/heads/master
| 2023-08-25T13:12:44.831612 | 2021-09-14T05:45:34 | 2021-09-14T05:45:34 | 401,238,863 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 274 |
py
|
tips = {'Central Corridor': ['shoot', 'dodge', 'tell a joke'],
'Laser Weapon Armory': ['ZERO ONE TREE TWO'],
'The Bridge': ['throw the bomb', 'slowly place the bomb'],
'Escape Pod': ['2'],
}
def help(roomname):
return tips.get(roomname)
|
[
"[email protected]"
] | |
a6da7c02ce3322c5d0e085019208ee3f4c9c7d5a
|
5f8b1beafc1a293f21d45a30247348cfc6f99299
|
/gigasecond/gigasecond.py
|
d863071267eb095e9f1a10ee1756c62cc8487719
|
[] |
no_license
|
vineel96/PythonTrack-Exercism
|
aca3d16b0ca352060a7ab16c2627f44695f6b855
|
c756cf38e39aa6347199cf3143f8b401345ad832
|
refs/heads/master
| 2020-06-20T17:43:48.972591 | 2019-07-30T06:54:51 | 2019-07-30T06:54:51 | 197,196,771 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 110 |
py
|
from datetime import datetime , timedelta
def add(moment):
return moment + timedelta(seconds=pow(10,9))
|
[
"[email protected]"
] | |
d588e6478802c54e8e6dc92004a337bcbaa10169
|
2f8de36f19a8269aacf696b88e71e662e97eb3d8
|
/auctions/models.py
|
e02f3e3950c5b4e83b6a04d90e14af3afc8abd51
|
[] |
no_license
|
Tomnowell/cs50w-commerce
|
30906ba6fadcdf94a8820173b99b70e325fd925c
|
41631f98e24715389c04ce2a951b8d2bcd77cfac
|
refs/heads/main
| 2023-02-24T14:09:40.593241 | 2021-01-29T12:38:45 | 2021-01-29T12:38:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,607 |
py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.core.validators import MinValueValidator
from datetime import datetime, timedelta
from django.utils import timezone
class User(AbstractUser):
def __str__(self):
return f"{self.username}"
class Category(models.Model):
name = models.CharField(max_length=32)
class Meta:
ordering = ('name',)
def __str__(self):
return f"{self.name}"
@property
def count_active_auctions(self):
return Auction.objects.filter(category=self).count()
class Auction(models.Model):
item_name = models.CharField(max_length=64)
item_description = models.TextField(max_length=800)
image = models.ImageField(
blank = True,
null = True,
upload_to =''
)
category = models.ForeignKey(
Category,
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="auctions"
)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
DURATIONS = (
(3, "Three Days"),
(7, "One Week"),
(14, "Two Weeks"),
(28, "Four Weeks")
)
duration = models.IntegerField(choices=DURATIONS)
ended_manually = models.BooleanField(default=False)
start_bid = models.DecimalField(
max_digits=7,
decimal_places=2,
validators=[MinValueValidator(0.01)]
)
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name="auctions"
)
watchers = models.ManyToManyField(
User,
blank=True,
related_name="watchlist"
)
class Meta:
ordering = ('-end_time',)
def __str__(self):
return f"Auction #{self.id}: {self.item_name} ({self.user.username})"
def save(self, *args, **kwargs):
self.start_time = datetime.now()
self.end_time = self.start_time + timedelta(days=self.duration)
super().save(*args, **kwargs) # call existing save() method
def is_finshed(self):
if self.ended_manually or self.end_time < timezone.now():
return True
else:
return False
class Bid(models.Model):
amount = models.DecimalField(max_digits=7, decimal_places=2)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="bids")
auction = models.ForeignKey(Auction, on_delete=models.CASCADE, related_name="bids")
class Meta:
ordering = ('-amount',)
def __str__(self):
return f"Bid #{self.id}: {self.amount} on {self.auction.item_name} by {self.user.username}"
class Comment(models.Model):
message = models.TextField(max_length=255)
time = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="comments")
auction = models.ForeignKey(Auction, on_delete=models.CASCADE, related_name="comments")
class Meta:
ordering = ('-time',)
def __str__(self):
return f"Comment #{self.id}: {self.user.username} on {self.auction.item_name}: {self.message}"
|
[
"[email protected]"
] | |
e8cb6b230d208935d065fedcf70f0c591e8ba666
|
8bdd86dd0ae6b6f7aae17ff0ef2d887afd06d2fa
|
/examples/sharecuts.py
|
e78cac870e69efa96b9030c63a0ef69e72d5fb6a
|
[
"MIT"
] |
permissive
|
shuxiaokai/looter
|
b0504600e4d5730eff2aab27fbe19d2fd5fb1f18
|
2be094576e31fd13123719ca94e42cb31475dffa
|
refs/heads/master
| 2023-04-18T01:19:51.827004 | 2020-05-17T08:11:28 | 2020-05-17T08:11:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 849 |
py
|
"""
捷径社区的捷径排行榜
"""
from pprint import pprint
import requests
import looter as lt
domain = 'https://sharecuts.cn'
total = []
def crawl(url):
items = requests.get(url, headers=lt.DEFAULT_HEADERS).json()
for item in items:
data = {}
data['name'] = item['name']
data['category'] = item['Category']['name']
data['note'] = item['note']
data['author'] = item['User']['nickname']
data['url'] = item['url']
data['downloads'] = item['downloads_count']
data['votes'] = item['votes_count']
data['comments'] = item['comments_count']
pprint(data)
total.append(data)
if __name__ == '__main__':
task = f'{domain}/api/shortcuts/hot?offset=0&limit=1025'
crawl(task)
lt.save(total, name='sharecuts.csv', sort_by='votes', order='desc')
|
[
"[email protected]"
] | |
4af6502a710b7d7100d9f5e384a09128caf93cb0
|
576cc83449e10fd3f98281970c46016ea7a5aea2
|
/Cameo/filter.py
|
90ff87c95582c437a4e823c03071524feb09e16e
|
[] |
no_license
|
HotView/PycharmProjects
|
215ab9edd341e3293daebcf86d97537f8cd28d75
|
61393fe5ba781a8c1216a5cbe7e0d06149a10190
|
refs/heads/master
| 2020-06-02T07:41:53.608742 | 2019-11-13T08:31:57 | 2019-11-13T08:31:57 | 191,085,178 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 46 |
py
|
import cv2
import numpy as np
import dateutil
|
[
"[email protected]"
] | |
c8770ff0014c49e8aef32a4df572380d038204df
|
23f3349e8b50f0cb3e461bbd65c1ea8dec792d0b
|
/2_semestr/lec_05.py
|
683d1a2c6fabbc138893e0adaa2d19cb1db944a8
|
[] |
no_license
|
JacobLutin/Python_bmstu
|
d17866dbab0e74f0f9d600c4dbd9d53eb5c5b7be
|
66fd8679de7556978b9cd1e9fd8646a8d7d6daa8
|
refs/heads/master
| 2020-05-29T14:40:09.310602 | 2017-03-27T05:18:58 | 2017-03-27T05:18:58 | 64,742,311 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,240 |
py
|
import numpy as np
a = np.arange(12)
a1 = np.copy(a)
print("Исходная матрицы")
a2 = np.reshape(a1, (3, 4))
print(a2, '\n')
a2 = a2.T
print("Транспонированная матрица")
print(a2, '\n')
#min, max, sum, сортировка
b = np.array([[2, 8, 0], [6, 1, 3], [4, 7, 5]])
print("Новая исходная матрица\n", b, '\n')
dsum = b.sum()
dmin = b.min()
dmax = b.max()
print('Некоторые значения для всей матрицы')
print('sum=', dsum, ' min=', dmin, ' max=', dmax, '\n')
mincol = b.min(axis=0)
maxrow = b.max(axis=1)
print('Значения min и max для столбцов и строк')
print('min в столбцах = ', mincol, ' max в строках = ', maxrow, '\n')
# Функция sort описание
# sort(axis=-1, kind='quicksort', order=None)
# axis - ось, по которой идет сортировка.
# kind - тпи сортировки. Возможные значения 'quicksort', 'mergesort', 'heapsort'
c = b.copy()
c.sort(axis=0, kind='mergesort')
print('Сортировка столбцов\n', c)
print()
c = b.copy()
c.sort(axis=1, kind='mergesort')
print('Сортировка строк\n', c)
print()
|
[
"[email protected]"
] | |
1735148bff67a24d8a32629ee813a3ceab208dcd
|
fc66ee8b51974e23d8931ed0bef895d42c5a806d
|
/Reversi.py
|
9e639ee60964aff6a0197f481f1de32399c4dec8
|
[] |
no_license
|
sirigowda/Reversi-Game
|
d87434f11420935b235acd93f8d88d24e2c46611
|
d44a6f83a42a27149f9cf20fa5e9eb822339d24b
|
refs/heads/master
| 2021-01-21T22:54:52.437683 | 2017-09-20T01:03:09 | 2017-09-20T01:03:09 | 102,177,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,876 |
py
|
import copy
import constants
import Pawn
class Reversi:
moves_made = []
state = [[]]
moves = ""
moves_list = []
# start game
def __init__(self, intialstate, startplayer, maxdepth):
self.state = intialstate
self.maxdepth = maxdepth
self.startplayer = startplayer
if self.startplayer == 'X':
self.opponent = 'O'
else:
self.opponent = 'X'
#evaluates state of board
def evaluate(self, state_of_board):
blackWeight = 0
whiteWeight = 0
for row in range(0, 8, 1):
for column in range(0, 8, 1):
if state_of_board[row][column] == self.startplayer:
blackWeight += constants.UTILITY_VALUES[row][column]
elif state_of_board[row][column] == self.opponent:
whiteWeight += constants.UTILITY_VALUES[row][column]
return blackWeight - whiteWeight
# check if row, column within range
def outofrange(self, row, column):
rowColumnRange = range(0, 8)
if row in rowColumnRange and column in rowColumnRange:
return 1
return -1
# returns result obtained from executing action on current state
def result(self, state, action, current):
resultstate = copy.deepcopy(state)
if current == 'X':
opponentpawn = 'O'
else:
opponentpawn = 'X'
neighbourRow = action.row
neighbourColumn = action.column
resultstate[neighbourRow][neighbourColumn] = current
for rowDirection, columnDirection in constants.ROW_COL_NEIGHBOURS:
# check in the direction of every neighbouring node
neighbourRow = action.row
neighbourColumn = action.column
neighbourRow += rowDirection
neighbourColumn += columnDirection
if self.outofrange(neighbourRow, neighbourColumn) == -1 or resultstate[neighbourRow][
neighbourColumn] == current:
continue
# move in the direction as long as you keep encountering the opponents pawn
# stop when you encounter anything else or out of range
while self.outofrange(neighbourRow, neighbourColumn) == 1 and resultstate[neighbourRow][
neighbourColumn] == opponentpawn:
neighbourRow += rowDirection
neighbourColumn += columnDirection
if self.outofrange(neighbourRow, neighbourColumn) == 1 and resultstate[neighbourRow][
neighbourColumn] == current:
neighbourRow -= rowDirection
neighbourColumn -= columnDirection
while resultstate[neighbourRow][neighbourColumn] == opponentpawn:
# update state
resultstate[neighbourRow][neighbourColumn] = current
# update pawns
# iterate
neighbourRow -= rowDirection
neighbourColumn -= columnDirection
return resultstate
# returns all possible valid actions on current state
# state does not get altered by this method
def getPossibleActions(self, state, currentplayer):
actions = []
if currentplayer == 'X':
opponentpawn = 'O'
else:
opponentpawn = 'X'
for row in range(0, 8, 1):
for column in range(0, 8, 1):
if state[row][column] == currentplayer:
for rowDirection, columnDirection in constants.ROW_COL_NEIGHBOURS:
neighbourRow = copy.copy(row)
neighbourColumn = copy.copy(column)
neighbourRow += rowDirection
neighbourColumn += columnDirection
if (self.outofrange(neighbourRow, neighbourColumn) == -1 or state[neighbourRow][
neighbourColumn] != opponentpawn):
continue
if state[neighbourRow][neighbourColumn] == opponentpawn:
self.no_opponant = False
# move in the direction as long as you keep encountering the opponant's pawn and stop when you encounter anything else or out of range
while self.outofrange(neighbourRow, neighbourColumn) == 1 and state[neighbourRow][
neighbourColumn] == opponentpawn:
neighbourRow += rowDirection
neighbourColumn += columnDirection
duplicate = False
if self.outofrange(neighbourRow, neighbourColumn) == 1 and state[neighbourRow][
neighbourColumn] == '*':
for action in actions:
if action.row == neighbourRow and action.column == neighbourColumn:
duplicate = True
if duplicate != True:
actions.append(Pawn.Pawn(neighbourRow, neighbourColumn))
# if it is on board all pawns encountered till now must be changed
return actions
def cutoff_test(self, state, depth, currentplayer):
if (depth == self.maxdepth):
return -1
return self.getPossibleActions(state, currentplayer)
def node(self, nodevalue):
if nodevalue == "root":
return "root"
elif nodevalue == "pass":
return "pass"
else:
start = ord('a') - 1
return chr(start + nodevalue.column + 1) + str(nodevalue.row + 1)
def max_value(self, max_state, max_alpha, max_beta, max_depth, max_current_player, pass_turn):
if max_current_player == 'X':
min_opponent_player = 'O'
else:
min_opponent_player = 'X'
validActions = self.cutoff_test(max_state, max_depth, max_current_player)
terminal_node = self.moves_made[-1]
v = float('-inf')
# When depth==maxdepth
if validActions == -1:
self.print_move(self.node(terminal_node), max_depth, str(self.evaluate(max_state)),
max_alpha, max_beta)
return self.evaluate(max_state)
# When there are no possible actions in current state
elif len(validActions) == 0:
# When the previous turn was a pass too, terminate the game
if pass_turn == True:
self.print_move(self.node(terminal_node), max_depth, v, max_alpha, max_beta)
v = self.evaluate(max_state)
terminal_node = self.moves_made[-1]
self.print_move(self.node(terminal_node), max_depth + 1, v, max_alpha, max_beta)
if (v >= max_beta):
self.print_move(self.node(terminal_node), max_depth, v, max_alpha, max_beta)
return v
max_alpha = max(max_alpha, v)
self.print_move(self.node(terminal_node), max_depth, v, max_alpha, max_beta)
return v
# PASS MOVE
self.print_move(self.node(terminal_node), max_depth, v, max_alpha, max_beta)
self.moves_made.append("pass")
v = self.min_value(max_state, max_alpha, max_beta, max_depth + 1, min_opponent_player, True)
self.moves_made.pop()
if (v >= max_beta):
self.print_move(self.node(self.moves_made[-1]), max_depth, v, max_alpha, max_beta)
return v
max_alpha = max(max_alpha, v)
self.print_move(self.node(self.moves_made[-1]), max_depth, v, max_alpha, max_beta)
return v
self.print_move(self.node(terminal_node), max_depth, v, max_alpha, max_beta)
validActions = sorted(validActions, key=lambda a: (a.row, a.column))
# When there are valid actions
for action in validActions:
self.moves_made.append(action)
v = max(v, self.min_value(self.result(max_state, action, max_current_player),
max_alpha, max_beta, max_depth + 1, min_opponent_player, False))
self.moves_made.pop()
if (v >= max_beta):
self.print_move(self.node(self.moves_made[-1]), max_depth, v, max_alpha, max_beta)
return v
max_alpha = max(max_alpha, v)
self.print_move(self.node(self.moves_made[-1]), max_depth, v, max_alpha, max_beta)
return v
def min_value(self, min_state, min_alpha, min_beta, min_depth, min_current_player, pass_turn):
if min_current_player == 'X':
max_opponent_player = 'O'
else:
max_opponent_player = 'X'
self.no_opponant = False
validActions = self.cutoff_test(min_state, min_depth, min_current_player)
v = float('inf')
terminal_node = self.moves_made[-1]
# When depth == maxdepth
if validActions == -1:
self.print_move(self.node(terminal_node), min_depth, str(self.evaluate(min_state)), min_alpha,
min_beta)
return self.evaluate(min_state)
# When there are no valid moves
elif len(validActions) == 0:
# When the previous turn was a pass too, terminate the game
if pass_turn == True:
self.print_move(self.node(terminal_node), min_depth, v, min_alpha, min_beta)
v = self.evaluate(min_state)
terminal_node = self.moves_made[-1]
self.print_move(self.node(terminal_node), min_depth + 1, v, min_alpha, min_beta)
if (v <= min_alpha):
self.print_move(self.node(terminal_node), min_depth, v, min_alpha, min_beta)
return v
min_beta = min(min_beta, v)
self.print_move(self.node(terminal_node), min_depth, v, min_alpha, min_beta)
return v
# PASS MOVE
self.print_move(self.node(terminal_node), min_depth, v, min_alpha, min_beta)
self.moves_made.append("pass")
v = self.max_value(min_state, min_alpha, min_beta, min_depth + 1, max_opponent_player, True)
self.moves_made.pop()
if (v <= min_alpha):
self.print_move(self.node(self.moves_made[-1]), min_depth, v, min_alpha, min_beta)
return v
min_beta = min(min_beta, v)
self.print_move(self.node(self.moves_made[-1]), min_depth, v, min_alpha, min_beta)
return v
# When there are valid actions
self.print_move(self.node(terminal_node), min_depth, v, min_alpha, min_beta)
validActions = sorted(validActions, key=lambda a: (a.row, a.column))
for action in validActions:
self.moves_made.append(action)
v = min(v, self.max_value(self.result(min_state, action, min_current_player), min_alpha, min_beta,
min_depth + 1, max_opponent_player, False))
self.moves_made.pop()
if (v <= min_alpha):
self.print_move(self.node(self.moves_made[-1]), min_depth, v, min_alpha, min_beta)
return v
min_beta = min(min_beta, v)
self.print_move(self.node(self.moves_made[-1]), min_depth, v, min_alpha, min_beta)
return v
def alpha_beta_search_next_move(self, state, max_alpha, max_beta, currentplayer):
# Initialze start states
depth = 0
if currentplayer == 'X':
min_opponent_player = 'O'
else:
min_opponent_player = 'X'
self.moves_made.append("root")
nextmove = None
validActions = self.cutoff_test(state, depth, currentplayer)
terminal_node = self.moves_made[-1]
v = float('-inf')
if validActions == -1:
self.print_move(self.node(terminal_node), depth, str(self.evaluate(state)), max_alpha,
max_beta)
return
# When there are no valid moves
if len(validActions) == 0:
self.print_move(self.node(terminal_node), depth, v, max_alpha, max_beta)
self.moves_made.append("pass")
v = self.min_value(state, max_alpha, max_beta, depth + 1, min_opponent_player, True)
self.moves_made.pop()
if (v >= max_beta):
self.print_move(self.node(self.moves_made[-1]), depth, v, max_alpha, max_beta)
return
max_alpha = max(max_alpha, v)
self.print_move(self.node(self.moves_made[-1]), depth, v, max_alpha, max_beta)
return
self.print_move(self.node(terminal_node), depth, v, max_alpha, max_beta)
validActions = sorted(validActions, key=lambda a: (a.row, a.column))
possibleActions = self.getPossibleActions(state, currentplayer)
v = float('-inf')
possibleActions = sorted(possibleActions, key=lambda a: (a.row, a.column))
for action in possibleActions:
self.moves_made.append(action)
value = self.min_value(self.result(state, action, currentplayer),
max_alpha, max_beta, depth + 1, min_opponent_player, False)
self.moves_made.pop()
if v < value:
v = value
nextmove = action
# Returns move in order of preference when there are multiple valid moves of same value
elif v == value:
if nextmove.row > action.row:
nextmove = action
elif nextmove.row == action.row:
if nextmove.column > action.column:
nextmove = action
max_alpha = max(max_alpha, v)
self.print_move(self.node(self.moves_made[-1]), depth, v, max_alpha, max_beta)
return nextmove
def output_results(self, resultant_state):
result = ""
for row in range(0, 8):
for column in range(0, 8):
result += resultant_state[row][column]
result += "\n"
return result
def print_move(self, move, depth, v, alpha, beta):
printmove = ""
printmove += move + "," + str(depth) + "," + self.replace_inf_string(v) + "," + self.replace_inf_string(
alpha) + "," + self.replace_inf_string(beta)
self.moves_list.append(printmove)
def add_moves(self, str):
self.moves += "\n"
self.moves += str
def replace_inf_string(self, val):
if val == float('-inf'):
return "-Infinity"
elif val == float('inf'):
return "Infinity"
else:
return str(val)
|
[
"[email protected]"
] | |
8efd667bb6de1041e779d62e066a81e2e8baf1fc
|
092ba02a9ec08c0ab6d115a30f7204d791ad1c20
|
/paddlenlp/ops/optimizer/adamw.py
|
fd343172dd68178adba08b311579c675f5510ba3
|
[] |
no_license
|
kevinng77/blenderbot_paddle
|
ade96551bd4b92531a9b82442b4df9a767c9f506
|
af8aa66703915aa5be3e820f2016bf02bea1fa2e
|
refs/heads/master
| 2023-08-27T03:03:30.050700 | 2021-11-01T10:23:19 | 2021-11-01T10:23:19 | 393,221,853 | 12 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,329 |
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import os
import paddle
from paddle.optimizer.optimizer import Optimizer
from paddle.fluid import core
from paddle.fluid import framework
from paddle.fluid.framework import Variable
from paddle.fluid import layers
from paddle.fluid import unique_name
from paddle.fluid.framework import in_dygraph_mode, _dygraph_tracer
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph import base as imperative_base
__all__ = ["AdamW", ]
class AdamW(Optimizer):
r"""
The AdamW optimizer is implemented based on the AdamW Optimization
in paper `DECOUPLED WEIGHT DECAY REGULARIZATION <https://arxiv.org/pdf/1711.05101.pdf>`_.
it can resolves the problem of L2 regularization failure in the Adam optimizer.
.. math::
t & = t + 1
moment\_1\_out & = {\\beta}_1 * moment\_1 + (1 - {\\beta}_1) * grad
moemnt\_2\_out & = {\\beta}_2 * moment\_2 + (1 - {\\beta}_2) * grad * grad
learning\_rate & = learning\_rate * \\
\\frac{\sqrt{1 - {\\beta}_2^t}}{1 - {beta}_1^t}
param\_out & = param - learning\_rate * (\\frac{moment\_1}{\sqrt{moment\_2} + \epsilon} + \lambda * param)
Args:
learning_rate (float|LRScheduler, optional): The learning rate used to update ``Parameter``.
It can be a float value or a LRScheduler. The default value is 0.001.
beta1 (float, optional): The exponential decay rate for the 1st moment estimates.
It should be a float number or a Tensor with shape [1] and data type as float32.
The default value is 0.9.
beta2 (float, optional): The exponential decay rate for the 2nd moment estimates.
It should be a float number or a Tensor with shape [1] and data type as float32.
The default value is 0.999.
epsilon (float, optional): A small float value for numerical stability.
It should be a float number or a Tensor with shape [1] and data type as float32.
The default value is 1e-08.
parameters (list|tuple, optional): List/Tuple of ``Tensor`` to update to minimize ``loss``. \
This parameter is required in dygraph mode. \
The default value is None in static mode, at this time all parameters will be updated.
weight_decay (float, optional): The weight decay coefficient, it can be float or Tensor. The default value is 0.01.
apply_decay_param_fun (function|None, optional): If it is not None,
only tensors that makes apply_decay_param_fun(Tensor.name)==True
will be updated. It only works when we want to specify tensors.
Default: None.
grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
some derived class of ``GradientClipBase`` . There are three cliping strategies
( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
:ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
lazy_mode (bool, optional): The official Adam algorithm has two moving-average accumulators.
The accumulators are updated at every step. Every element of the two moving-average
is updated in both dense mode and sparse mode. If the size of parameter is very large,
then the update may be very slow. The lazy mode only update the element that has
gradient in current mini-batch, so it will be much more faster. But this mode has
different semantics with the original Adam algorithm and may lead to different result.
The default value is False.
multi_precision (bool, optional): Whether to use multi-precision during weight updating. Default is false.
name (str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
The default value is None.
Examples:
.. code-block:: python
import paddle
import paddlenlp
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand([10,10], dtype="float32")
out = linear(inp)
loss = paddle.mean(out)
adamw = paddlenlp.ops.optimizer.Adam(learning_rate=0.1,
parameters=linear.parameters())
out.backward()
adamw.step()
adamw.clear_grad()
"""
_moment1_acc_str = "moment1"
_moment2_acc_str = "moment2"
_beta1_pow_acc_str = "beta1_pow_acc"
_beta2_pow_acc_str = "beta2_pow_acc"
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
parameters=None,
weight_decay=0.0,
grad_clip=None,
lazy_mode=False,
multi_precision=False,
apply_decay_param_fun=None,
name=None):
assert learning_rate is not None
assert beta1 is not None
assert beta2 is not None
assert epsilon is not None
if not isinstance(beta1, Variable):
if not 0 <= beta1 < 1:
raise ValueError(
"Invaild value of beta1, expect beta1 in [0,1).")
if not isinstance(beta2, Variable):
if not 0 <= beta2 < 1:
raise ValueError(
"Invaild value of beta2, expect beta2 in [0,1).")
if not isinstance(epsilon, Variable):
if not 0 <= epsilon:
raise ValueError(
"Invaild value of epsilon, expect epsilon >= 0.")
super(AdamW, self).__init__(
learning_rate=learning_rate,
parameters=parameters,
weight_decay=None,
grad_clip=grad_clip,
name=name)
self.type = "adamw"
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._lazy_mode = lazy_mode
self._multi_precision = multi_precision
self._weight_decay = weight_decay
self._apply_decay_param_fun = apply_decay_param_fun
self._master_weights = {}
def _create_master_weight(self, param):
assert isinstance(self.helper, LayerHelper)
var_name = param.name + "_fp32_master"
var_name = unique_name.generate(var_name)
var = layers.create_global_var(
name=var_name,
shape=param.shape,
value=0,
dtype='float32',
persistable=True)
block = self.helper.startup_program.global_block()
block.append_op(
type="cast",
inputs={"X": [param]},
outputs={"Out": [var]},
attrs={
"in_dtype": param.dtype,
"out_dtype": core.VarDesc.VarType.FP32
})
self._master_weights[param.name] = var
return var
def _get_accumulator(self, name, param):
"""Utility function to fetch an accumulator for a parameter
Args:
name: name of the accumulator
param: parameter variable for which accumulator is to be fetched
Returns:
accumulator variable for the parameter
"""
if self._name is not None:
name = self._name + "_" + name
find_master = self._multi_precision and param.dtype == core.VarDesc.VarType.FP16
target_param = self._master_weights[
param.name] if find_master else param
target_name = target_param.name
if (name not in self._accumulators or
target_name not in self._accumulators[name]):
raise Exception("Accumulator {} does not exist for parameter {}".
format(name, target_name))
return self._accumulators[name][target_name]
def _add_moments_pows(self, p):
acc_dtype = p.dtype
if acc_dtype == core.VarDesc.VarType.FP16:
acc_dtype = core.VarDesc.VarType.FP32
self._add_accumulator(self._moment1_acc_str, p, dtype=acc_dtype)
self._add_accumulator(self._moment2_acc_str, p, dtype=acc_dtype)
self._add_accumulator(
name=self._beta1_pow_acc_str,
param=p,
dtype=acc_dtype,
fill_value=0.9 if isinstance(self._beta1, Variable) \
else self._beta1,
shape=[1],
type=core.VarDesc.VarType.LOD_TENSOR, device='cpu')
self._add_accumulator(
name=self._beta2_pow_acc_str,
param=p,
dtype=acc_dtype,
fill_value=0.999 if isinstance(self._beta2, Variable) \
else self._beta2,
shape=[1],
type=core.VarDesc.VarType.LOD_TENSOR, device='cpu')
def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block)
# Create accumulator tensors for first and second moments
for p in parameters:
if self._multi_precision and p.dtype == core.VarDesc.VarType.FP16:
master_p = self._create_master_weight(p)
self._add_moments_pows(master_p)
continue
if p.dtype == core.VarDesc.VarType.FP16 and not self._multi_precision:
warnings.warn(
"Accumulating with FP16 in optimizer can lead to poor accuracy or slow convergence."
"Consider using multi_precision=True option of the Adam optimizer."
)
self._add_moments_pows(p)
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
moment1 = self._get_accumulator(self._moment1_acc_str,
param_and_grad[0])
moment2 = self._get_accumulator(self._moment2_acc_str,
param_and_grad[0])
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param_and_grad[0])
beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
param_and_grad[0])
find_master = self._multi_precision and param_and_grad[
0].dtype == core.VarDesc.VarType.FP16
master_weight = (self._master_weights[param_and_grad[0].name]
if find_master else None)
lr = self._create_param_lr(param_and_grad)
# create the adam optimize op
if self._apply_decay_param_fun is not None \
and not self._apply_decay_param_fun(param_and_grad[0].name):
weight_decay = 0.0
else:
weight_decay = self._weight_decay
if framework.in_dygraph_mode():
_beta1 = self._beta1 if not isinstance(
self._beta1, Variable) else self._beta1.numpy().item(0)
_beta2 = self._beta2 if not isinstance(
self._beta2, Variable) else self._beta2.numpy().item(0)
ins = {
'Param': param_and_grad[0],
'Grad': param_and_grad[1],
'LearningRate': lr,
'Moment1': moment1,
'Moment2': moment2,
'Beta1Pow': beta1_pow_acc,
'Beta2Pow': beta2_pow_acc,
}
attrs = {
'beta1': _beta1,
'beta2': _beta2,
'epsilon': self._epsilon,
'lazy_mode': self._lazy_mode,
'min_row_size_to_use_multithread': 1000,
'multi_precision': False,
'weight_decay': weight_decay,
'lr_ratio': 1.0
}
outs = {
'ParamOut': param_and_grad[0],
'Moment1Out': moment1,
'Moment2Out': moment2,
'Beta1PowOut': beta1_pow_acc,
'Beta2PowOut': beta2_pow_acc,
}
framework._dygraph_tracer().trace_op(
type="adamw", inputs=ins, outputs=outs, attrs=attrs)
return None
inputs = {
"Param": [param_and_grad[0]],
"Grad": [param_and_grad[1]],
"LearningRate": [lr],
"Moment1": [moment1],
"Moment2": [moment2],
"Beta1Pow": [beta1_pow_acc],
"Beta2Pow": [beta2_pow_acc]
}
outputs = {
"ParamOut": [param_and_grad[0]],
"Moment1Out": [moment1],
"Moment2Out": [moment2],
"Beta1PowOut": [beta1_pow_acc],
"Beta2PowOut": [beta2_pow_acc],
}
attrs = {
"lazy_mode": self._lazy_mode,
"min_row_size_to_use_multithread": 1000,
"multi_precision": find_master,
'weight_decay': weight_decay,
'lr_ratio': 1.0
}
if isinstance(self._beta1, Variable):
inputs['Beta1Tensor'] = self._beta1
else:
attrs['beta1'] = self._beta1
if isinstance(self._beta2, Variable):
inputs['Beta2Tensor'] = self._beta2
else:
attrs['beta2'] = self._beta2
if isinstance(self._epsilon, Variable):
inputs['EpsilonTensor'] = self._epsilon
else:
attrs['epsilon'] = self._epsilon
if find_master:
inputs["MasterParam"] = master_weight
outputs["MasterParamOut"] = master_weight
for name in ["Beta1Tensor", "Beta2Tensor", "MasterParam"]:
if name in inputs:
raise ValueError(
"Custom AdamW should NOT have input: {}".format(name))
adam_op = block.append_op(
type=self.type,
inputs=inputs,
outputs=outputs,
attrs=attrs,
stop_gradient=True)
return adam_op
@imperative_base.no_grad
@framework.dygraph_only
def step(self):
params_grads = []
for param in self._parameter_list:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
if hasattr(grad_var, "_is_sparse") and grad_var._is_sparse(
) and self.regularization is not None:
raise RuntimeError(
"AdamW don't support weight_decay with sparse parameters, please set it to None."
)
params_grads.append((param, grad_var))
optimize_ops = self._apply_optimize(
loss=None, startup_program=None, params_grads=params_grads)
|
[
"[email protected]"
] | |
dcf32321584fe37884e0f4817db5a71e31b2c2c1
|
b0c2f67b2878a312c6e6ffa5fe4158bd55dad69c
|
/chap4/exp4.1.py
|
55fe517c4d14c71d71fb13cf69c53d6a324056ee
|
[] |
no_license
|
loukey/pythonDemo
|
efda51be07beede0be2a8cdaae9b7e153bc790bc
|
9e6f64908ccba64d32ffc58edbb8d6f8ab6bf68d
|
refs/heads/master
| 2021-01-20T04:39:24.054749 | 2017-05-19T05:41:00 | 2017-05-19T05:41:00 | 89,711,134 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 270 |
py
|
# -*- coding: utf-8 -*-
#例4.1简单地打印数字
#很有趣的是,Python没有do..while以及while..until
#但是我们也可以强行实现
while True:
print 'Please enter a number:'
number = input()
print number
if number==0:
break
print 'List Ended'
|
[
"[email protected]"
] | |
bac598ef7573c63aeb9e39fc13f67f09c8edb748
|
747f759311d404af31c0f80029e88098193f6269
|
/addons/training_doc/training_doc.py
|
e31f2e4105271968edebb810aead63d83aef037f
|
[] |
no_license
|
sgeerish/sirr_production
|
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
|
1081f3a5ff8864a31b2dcd89406fac076a908e78
|
refs/heads/master
| 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 66 |
py
|
/home/openerp/production/extra-addons/training_doc/training_doc.py
|
[
"[email protected]"
] | |
d6bf727ab1080c6cce9f88f40e0ccb9579e448d1
|
5822e3c41edeee70b77965dc5da5ddf07a0b4c86
|
/0x13-bit_manipulation/practice/7-rectangle.py
|
abe39c86bb8f644f3c1ea353d06b7eaaecafba5e
|
[] |
no_license
|
alishabelle/holbertonschool-low_level_programming
|
4b7afee35344136213cee5bab0f6350112bb0d31
|
9f3a5bff185488410736464c8abec63f26835fc2
|
refs/heads/master
| 2020-04-21T09:23:56.284061 | 2019-09-27T06:25:12 | 2019-09-27T06:25:12 | 169,447,181 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 820 |
py
|
class Rectangle:
def width(self):
self.__width = width
def width(self, value):
if not isinstance(width, int):
raise TypeError("width must be an integer")
if width < 0:
raise ValueError("width must be >= 0")
@property
def height(self):
self.__height = height
@height.setter
def height(self, value):
if not isinstance(height, int):
raise TypeError("height must be an integer")
if height < 0:
raise ValueError("height must be >= 0")
def __init__(self, width=0, height=0):
self.width = width
self.height = height
def area(self):
return self.__width * self.__height
def perimeter(self):
return (self.__width * self.__height) * 2
|
[
"[email protected]"
] | |
e7d61fbec6bfa61be24a55df9f6df6c2562cbe86
|
74234aa26532edad7ace5a6513ae02ab8ef00593
|
/the prototype code from part a/part_a_OLS_of_fifth_order_with_ND_bootstrap.py
|
cb859ae7d50888c82695dc1419a57fe3e39ed51f
|
[] |
no_license
|
sheputis/Machine_learning_project_1
|
9b4ae563a0b837256ec4fe3b826c1019cfc6b2a1
|
d5f28c3d90245599300fdb9e441787e569556336
|
refs/heads/master
| 2020-03-31T02:29:45.255061 | 2018-10-06T09:51:25 | 2018-10-06T09:51:25 | 151,826,445 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,239 |
py
|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from random import random, seed
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error, r2_score
# in this we add normal random variable of a variance zigma and calculate the squared error and r2_score
def FrankeFunction(x,y,noise=0):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4 + noise
print("____________________________________________BOOTSTRAP________________________________________________________")
class bootstrap:
def __init__(self,n): #n the length of the array that is put in
self.n = n
self.n_test = None;
self.array_of_indices_training = self.generate_training_indices()
self.array_of_indices_test = self.generate_test_indices()
def generate_training_indices(self):
return np.random.random_integers(self.n,size = self.n)-1 #indices from 0 to n-1
def generate_test_indices(self):
test_indices =[]
for i in range(self.n):
if sum(self.array_of_indices_training==i)==0:
test_indices.append(i)
test_indices = np.array(test_indices)
return test_indices
def generate_training_data(self,input_array):
temp = input_array.copy()
for i in range(self.n):
temp[i] = input_array[self.array_of_indices_training[i]]
return temp
def generate_test_data(self,input_array):
self.n_test = len(self.array_of_indices_test)
temp = input_array[:self.n_test].copy()
for i in range(self.n_test):
temp[i] = input_array[self.array_of_indices_test[i]]
return temp
"""
a=np.array([[1],[2],[3],[4],[5]])
print("c.original_array_x")
print(a)
print('_________')
c = bootstrap(a)
print(c.training_data)
print('_________')
print(c.test_data)
print('_________')
"""
class OLS_main:
def __init__(self):
delta=0.05
x = np.arange(0, 1, delta)
y = np.arange(0, 1, delta) #0.05
n = len(x)
self.x, self.y = np.meshgrid(x,y) #the x's and y's are the matrices that will be plotted
self.n = self.y.size
self.zigma = 0.1 #this is the variance of the noise var(y)
self.noise = self.zigma*np.random.randn(n,n)
self.x_, self.y_,self.noise_ = self.x.reshape(-1, 1), self.y.reshape(-1,1), self.noise.reshape(-1,1) #the x_'s and y_'s will be the vectors used for calculation
self.X_ = self.generate_X_of_degree(5)
self.z = FrankeFunction(self.x, self.y,self.noise)
self.z_ = FrankeFunction(self.x_, self.y_,self.noise_)
self.beta_lin_reg = (np.linalg.inv(self.X_.T.dot(self.X_)).dot(self.X_.T)).dot(self.z_)
self.z_fit_ = self.X_.dot(self.beta_lin_reg)
self.z_fit=self.z_fit_.reshape((n,n))
def generate_X_of_degree(self,n):
X_ = np.c_[self.x_,self.y_]
poly = PolynomialFeatures(n)
return poly.fit_transform(X_) #generating the sample matrix with two variables that are both polynomials of 5th order
def plot_everything(self):
print("________________________________________plotting_________________________________________________________")
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(self.x, self.y, self.z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
surf2 = ax.plot_surface(self.x,self.y,self.z_fit, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Customize the z axis.
ax.set_zlim(-0.10, 1.40)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
def variance_in_beta(self):#this needs to be edited, the number 21 has to be changed to the amount of columns in X (polyfit)
print("_____________________________calculating variance in beta variables________________________________________")
var_Beta = (np.linalg.inv(self.X_.T.dot(self.X_)))*self.zigma
for i in range(21): #writing out variances
print(var_Beta[i][i])
def errors(self):
print("____________________________________________errors_________________________________________________________")
mse_ = mean_squared_error(self.z_,self.z_fit_)
r2_score_ = r2_score(self.z_,self.z_fit_)
print("Mean squared error: %.5f" % mse_)
print("R2r2_score: %.5f" % r2_score_)
return mse_ , r2_score_
class OLS_:
def __init__(self,x,y,z): #x,y are the coordinates and z is the corresponding precalculated(with noise) function output
self.x = x
self.y = y
self.z = z
self.X = self.generate_X_of_degree(5)
self.beta = self.find_beta()
self.z_fit = self.X.dot(self.beta)
def generate_X_of_degree(self,n):
X = np.c_[self.x,self.y]
poly = PolynomialFeatures(n)
return poly.fit_transform(X) #generating the sample matrix with two variables that are both polynomials of 5th order
def find_beta(self):
return (np.linalg.inv(self.X.T.dot(self.X)).dot(self.X.T)).dot(self.z)
def errors(self):
print("____________________________________________errors_________________________________________________________")
mse_ = mean_squared_error(self.z,self.z_fit)
r2_score_ = r2_score(self.z,self.z_fit)
print("Mean squared error bootstrap: %.5f" % mse_)
print("R2r2_score bootstrap: %.5f" % r2_score_)
return mse_ , r2_score_
print("))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))")
def errors(z,z_fit):
print("____________________________________________errors_________________________________________________________")
mse_ = mean_squared_error(z,z_fit)
r2_score_ = r2_score(z,z_fit)
print("Mean squared error: %.5f" % mse_)
print("R2r2_score: %.5f" % r2_score_)
return mse_ , r2_score_
print("___________________________calculating many bootstrap mse's ____________________________________________________")
A = OLS_main() #here we prepare all the variables we need
class run_the_bootstraps:
def __init__(self,x,y,z): #x,y and z have to be the column vector where each element corresponds
self.x, self.y, self.z = x ,y ,z
self.boot_error_list_training = []
self.nr_bootstraps = 10
self.run_bootstrap_on_training_data()
def run_bootstrap_on_training_data(self):
for k in range(self.nr_bootstraps):
BOOT = bootstrap(len(self.x))
x_train = BOOT.generate_training_data(self.x)
y_train = BOOT.generate_training_data(self.y)
z_train = BOOT.generate_training_data(self.z)
B = OLS_(x_train,y_train,z_train)
self.boot_error_list_training.append(B.errors())
self.boot_error_list_training = np.array(self.boot_error_list_training)
# hist = np.histogram(self.boot_error_list_training)
plt.hist(self.boot_error_list_training[:,0])
plt.show()
def run_bootstrap_on_test_data(self):
for k in range(self.nr_bootstraps):
BOOT = bootstrap(len(self.x))
x_test = BOOT.generate_test_data(self.x)
y_test = BOOT.generate_test_data(self.y)
z_test = BOOT.generate_test_data(self.z)
B = OLS_(x_test,y_test,z_test)
self.boot_error_list_test.append(B.errors())
self.boot_error_list_test = np.array(self.boot_error_list_test)
# hist = np.histogram(self.boot_error_list_training)
plt.hist(self.boot_error_list_test[:,0])
plt.show()
Dd = run_the_bootstraps(A.x_,A.y_,A.z_) #here we feed the variables from A instance to the bootstrap class
|
[
"[email protected]"
] | |
7539ce848cc1fae5940a0471597cdd1ae47d939a
|
fd49fb2d491691a542d5586f7ff913d23666ae13
|
/caspar_desk/settings_base.py
|
55b7beb624167aa228f3fc534421d629e353ab5f
|
[] |
no_license
|
kiloreven/caspar-desk
|
0c4d74c0c8a70c87b081ab2948a48ac6e3f2d966
|
180b89c3848fa857004f89bde7595b27b2612e9f
|
refs/heads/master
| 2021-01-10T08:26:46.685275 | 2015-10-12T19:47:04 | 2015-10-12T19:47:04 | 43,375,025 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,780 |
py
|
"""
Django settings for django_admin project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'caspar_desk',
'ws4redis',
'django_extensions',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'caspar_desk.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.static',
'ws4redis.context_processors.default',
],
},
},
]
WSGI_APPLICATION = 'caspar_desk.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
# '/var/www/static/',
)
WEBSOCKET_URL = '/ws/'
#WS4REDIS_SUBSCRIBER = 'myapp.redis_store.RedisSubscriber'
WSGI_APPLICATION = 'ws4redis.django_runserver.application'
# Always use IPython for shell_plus
SHELL_PLUS = "ipython"
|
[
"[email protected]"
] | |
4dc51f42988c513bc9bc2fb8d13879f74bde8ad0
|
e1aead4168d0e09ac86b45988137408aa0fa8be6
|
/src/main.py
|
b769316f7e634571af5aa308028e8819c06d50a9
|
[] |
no_license
|
BreenC/Projet2
|
1b08f1e4c207bf7ede3cb609293a2286815c9384
|
b3ebc7bca5896e620bccdc2745bc2711d2a03eb9
|
refs/heads/master
| 2020-04-07T14:19:16.773410 | 2018-12-07T18:09:50 | 2018-12-07T18:09:50 | 158,442,423 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,230 |
py
|
"""
Projet2 - Résolution de problèmes industriels
"""
import argparse
from src.Centrale import Centrale
from src.Parser import Parser
class Main:
def __init__(self):
pathqtot = "C:/Users/camil/Documents/Polytech/UQAC/trimestre 1/Optimisation/Projet 2 code/Projet2/data/qtot.txt"
#pathqtot = "C:/Users/mdaadaa/Desktop/Projet2/data/qtot.txt"
pathelamont = "C:/Users/camil/Documents/Polytech/UQAC/trimestre 1/Optimisation/Projet 2 code/Projet2/data/elamont.txt"
#pathelamont = "C:/Users/mdaadaa/Desktop/Projet2/data/elamont.txt"
centrale = Centrale()
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="Path to the file containing data")
args = parser.parse_args()
centrale
# print("Entrer le chemin du fichier du débit total :")
# pathqtot = input()
Parser.parse_qtot(pathqtot, centrale)
#print("Entrer le chemin du fichier de l'élevation en amont :")
#pathelamont = input()
Parser.parse_elamont(pathelamont, centrale)
#Parser.parse_Pi(pathpuiss)
Parser.create_turbines(self,centrale)
param = 1
while param != 0:
print("Pour changer un paramètre d'une turbine entrer son numéro (de 1 à 5) sinon entrer 0 :")
param = int(input())
if param != 0 :
print("Pour rendre la turbine indisponible entrer 0 sinon taper 1 :")
dispo = input()
if dispo == 0 :
for turbine in centrale.turbines:
if turbine.numero == param :
turbine.rendre_indisponible(turbine)
print("Pour changer la borne supérieure (initialement de 160) entré la valeur a enlever ( exemple : 30 pour une borne de 130) pour ne rien changer entrer 0 :")
borne = int(input())
if borne <= 160 and borne >0 :
centrale.get_turbine_i(param).change_borne_sup(borne)
ind = 100
# while ind < len(centrale.qtot):
centrale.run(ind)
#ind = ind + 1
# print(centrale.puissance_totale)
if __name__ == '__main__':
Main()
|
[
"[email protected]"
] | |
6ddfdae3b363584b1b4840702b657f759e0c0da3
|
697e341ee6c0641cb8699ab88129ad343dcc9fde
|
/docx_fanyi.py
|
0f0ed65d183bcf0e04f5e62c4aca2a3b59182bd0
|
[] |
no_license
|
PANBOHE/googleTranspy3.6
|
944aad7d3f351b8e65e6ba72bb856b49a6687e13
|
783b1fa7fc846e5ee45809f596f9fe1b523e9747
|
refs/heads/master
| 2020-04-24T01:55:53.243035 | 2019-02-20T06:57:51 | 2019-02-20T06:57:51 | 171,617,894 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,993 |
py
|
# -*- coding: utf-8 -*-
# @File : docx_fanyi.py
# @Author: Panbo
# @Date : 2019/2/20
# @Desc :
import time
import os
import traceback
import docx
from docx import Document
from googletrans import Translator
g_log = None
g_trans = Translator(service_urls=[
'translate.google.cn',
])
def translate_buff(buff_para, buff_text, src, dest):
joiner = '|||'
tt = joiner.join(buff_text)
msg = '\t正在翻译:共有 {} 个字'.format(len(tt))
print(msg)
if g_log:
g_log.show.emit(msg)
try:
tr = g_trans.translate(tt, dest=dest, src=src)
except:
traceback.print_exc()
msg = '\t<b>Google翻译异常,请稍后再试</b>'
print(msg)
if g_log:
g_log.show.emit(msg)
return
tr = tr.text.split(joiner)
for i, para in enumerate(buff_para):
para.text += '\n' + tr[i]
def translate_docx(fn, src, dest):
doc = Document(fn)
buff_para = []
buff_text = []
buff_len = 0
max_len = 4900
for para in doc.paragraphs:
text = para.text.strip()
if not text: continue
text_len = len(text.encode('utf8'))
if buff_len + text_len < max_len:
buff_para.append(para)
buff_text.append(text)
buff_len += text_len
continue
translate_buff(buff_para, buff_text, src, dest)
msg = '休眠 10 秒钟再进行下一次翻译'
print(msg)
if g_log:
g_log.show.emit(msg)
time.sleep(10)
buff_para = [para]
buff_text = [text]
buff_len = text_len
if buff_para:
translate_buff(buff_para, buff_text, src, dest)
# save
n_dir = os.path.dirname(fn)
n_file = os.path.basename(fn)
to_save = os.path.join(n_dir, 'fanyi-'+n_file)
doc.save(to_save)
return to_save
if __name__ == '__main__':
from sys import argv
fn = argv[1]
src = 'en'
dest = 'zh-cn'
translate_docx(fn, src, dest)
|
[
"[email protected]"
] | |
3c2914aeeb137940e884af34f7f4ae1b9a1cb124
|
306d2a92fb331aec6ddf0794b538d6e3385a0df9
|
/app/api/news/views.py
|
4821c74644481851dbbc7b49363e6c122d7dddf6
|
[] |
no_license
|
Zarinabonu/ForceApp
|
f343d3a52aee08890230c5425c9e238df99c5a7f
|
13f8e8613999c4850fc6f0bfcec66f897eecbe4a
|
refs/heads/master
| 2020-12-10T08:00:25.072289 | 2020-01-20T13:14:07 | 2020-01-20T13:14:07 | 233,540,795 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 491 |
py
|
from rest_framework.generics import ListAPIView
from app.api.news.serializers import NewsSerializer
from app.model import News
class NewsListAPIView(ListAPIView):
serializer_class = NewsSerializer
def get_queryset(self):
qs = News.objects.all()
v = self.request.GET.get('-views')
l_seen = self.request.GET.get('last_seen')
if v:
qs = qs.order_by('views')
if l_seen:
qs = qs.order_by('-created')
return qs
|
[
"[email protected]"
] | |
d57f83bffdfb4d3b6a1515261b2d121eadb2561f
|
8ed86b8e9c451abcb2ce0ddf2f2067c11f3993d8
|
/osmnx/simplify.py
|
9a7f930496b164cd592daeb1d16d9989edec354f
|
[
"MIT"
] |
permissive
|
surfcao/osmnx
|
65830096c21b8353a536f776dfedba7de20eac4c
|
51c9addb42425657fa6b11c7442f79f10b9e3e22
|
refs/heads/master
| 2021-01-19T23:32:40.068378 | 2017-04-19T20:22:01 | 2017-04-19T20:22:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,568 |
py
|
###################################################################################################
# Module: simplify.py
# Description: Simplify and correct network topology
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/gboeing/osmnx
###################################################################################################
import time
import logging as lg
from shapely.geometry import Point, LineString
from .utils import log
def is_endpoint(G, node, strict=True):
"""
Return True if the node is a "real" endpoint of an edge in the network, otherwise False.
OSM data includes lots of nodes that exist only as points to help streets bend around curves.
An end point is a node that either:
1) is its own neighbor, ie, it self-loops.
2) or, has no incoming edges or no outgoing edges, ie, all its incident edges point inward or all its incident edges point outward.
3) or, it does not have exactly two neighbors and degree of 2 or 4.
4) or, if strict mode is false, if its edges have different OSM IDs.
Parameters
----------
G : networkx multidigraph
node : int
the node to examine
strict : bool
if False, allow nodes to be end points even if they fail all other rules but have edges with different OSM IDs
Returns
-------
bool
"""
neighbors = set(list(G.predecessors(node)) + list(G.successors(node)))
n = len(neighbors)
d = G.degree(node)
if node in neighbors:
# if the node appears in its list of neighbors, it self-loops. this is always an endpoint.
return True
# if node has no incoming edges or no outgoing edges, it must be an end point
elif G.out_degree(node)==0 or G.in_degree(node)==0:
return True
elif not (n==2 and (d==2 or d==4)):
# else, if it does NOT have 2 neighbors AND either 2 or 4 directed edges, it is an endpoint
# either it has 1 or 3+ neighbors, in which case it is a dead-end or an intersection of multiple streets
# or it has 2 neighbors but 3 degree (indicating a change from oneway to twoway)
# or more than 4 degree (indicating a parallel edge) and thus is an endpoint
return True
elif not strict:
# non-strict mode
osmids = []
# add all the edge OSM IDs for incoming edges
for u in G.predecessors(node):
for key in G.edge[u][node]:
osmids.append(G.edge[u][node][key]['osmid'])
# add all the edge OSM IDs for outgoing edges
for v in G.successors(node):
for key in G.edge[node][v]:
osmids.append(G.edge[node][v][key]['osmid'])
# if there is more than 1 OSM ID in the list of edge OSM IDs then it is an endpoint, if not, it isn't
return len(set(osmids)) > 1
else:
# if none of the preceding rules returned true, then it is not an endpoint
return False
def build_path(G, node, endpoints, path):
"""
Recursively build a path of nodes until you hit an endpoint node.
Parameters
----------
G : networkx multidigraph
node : int
the current node to start from
endpoints : set
the set of all nodes in the graph that are endpoints
path : list
the list of nodes in order in the path so far
Returns
-------
paths_to_simplify : list
"""
# for each successor in the passed-in node
for successor in G.successors(node):
if not successor in path:
# if this successor is already in the path, ignore it, otherwise add it to the path
path.append(successor)
if not successor in endpoints:
# if this successor is not an endpoint, recursively call build_path until you find an endpoint
path = build_path(G, successor, endpoints, path)
else:
# if this successor is an endpoint, we've completed the path, so return it
return path
if (not path[-1] in endpoints) and (path[0] in G.successors(path[-1])):
# if the end of the path is not actually an endpoint and the path's first node is a successor of the
# path's final node, then this is actually a self loop, so add path's first node to end of path to close it
path.append(path[0])
return path
def get_paths_to_simplify(G, strict=True):
"""
Create a list of all the paths to be simplified between endpoint nodes.
The path is ordered from the first endpoint, through the interstitial nodes,
to the second endpoint.
Parameters
----------
G : networkx multidigraph
strict : bool
if False, allow nodes to be end points even if they fail all other rules but have edges with different OSM IDs
Returns
-------
paths_to_simplify : list
"""
# first identify all the nodes that are endpoints
start_time = time.time()
endpoints = set([node for node in G.nodes() if is_endpoint(G, node, strict=strict)])
log('Identified {:,} edge endpoints in {:,.2f} seconds'.format(len(endpoints), time.time()-start_time))
start_time = time.time()
paths_to_simplify = []
# for each endpoint node, look at each of its successor nodes
for node in endpoints:
for successor in G.successors(node):
if not successor in endpoints:
# if the successor is not an endpoint, build a path from the endpoint node to the next endpoint node
try:
path = build_path(G, successor, endpoints, path=[node, successor])
paths_to_simplify.append(path)
except RuntimeError:
log('Recursion error: exceeded max depth, moving on to next endpoint successor', level=lg.WARNING)
# recursion errors occur if some connected component is a self-contained ring in which all nodes are not end points
# handle it by just ignoring that component and letting its topology remain intact (this should be a rare occurrence)
# RuntimeError is what Python <3.5 will throw, Py3.5+ throws RecursionError but it is a subtype of RuntimeError so it still gets handled
log('Constructed all paths to simplify in {:,.2f} seconds'.format(time.time()-start_time))
return paths_to_simplify
def is_simplified(G):
"""
Determine if a graph has already had its topology simplified.
If any of its edges have a geometry attribute, we know that it has previously been simplified.
Parameters
----------
G : networkx multidigraph
Returns
-------
bool
"""
edges_with_geometry = [d for u, v, k, d in G.edges(data=True, keys=True) if 'geometry' in d]
return len(edges_with_geometry) > 0
def simplify_graph(G_, strict=True):
"""
Simplify a graph's topology by removing all nodes that are not intersections or dead-ends.
Create an edge directly between the end points that encapsulate them,
but retain the geometry of the original edges, saved as attribute in new edge
Parameters
----------
G_ : graph
strict : bool
if False, allow nodes to be end points even if they fail all other rules but have edges with different OSM IDs
Returns
-------
networkx multidigraph
"""
if is_simplified(G_):
raise Exception('This graph has already been simplified, cannot simplify it again.')
log('Begin topologically simplifying the graph...')
G = G_.copy()
initial_node_count = len(list(G.nodes()))
initial_edge_count = len(list(G.edges()))
all_nodes_to_remove = []
all_edges_to_add = []
# construct a list of all the paths that need to be simplified
paths = get_paths_to_simplify(G, strict=strict)
start_time = time.time()
for path in paths:
# add the interstitial edges we're removing to a list so we can retain their spatial geometry
edge_attributes = {}
for u, v in zip(path[:-1], path[1:]):
# there shouldn't be multiple edges between interstitial nodes
edges = G.edge[u][v]
if not len(edges) == 1:
log('Multiple edges between "{}" and "{}" found when simplifying'.format(u, v), level=lg.WARNING)
# the only element in this list as long as above assertion is True (MultiGraphs use keys (the 0 here), indexed with ints from 0 and up)
edge = edges[0]
for key in edge:
if key in edge_attributes:
# if this key already exists in the dict, append it to the value list
edge_attributes[key].append(edge[key])
else:
# if this key doesn't already exist, set the value to a list containing the one value
edge_attributes[key] = [edge[key]]
for key in edge_attributes:
# don't touch the length attribute, we'll sum it at the end
if len(set(edge_attributes[key])) == 1 and not key == 'length':
# if there's only 1 unique value in this attribute list, consolidate it to the single value (the zero-th)
edge_attributes[key] = edge_attributes[key][0]
elif not key == 'length':
# otherwise, if there are multiple values, keep one of each value
edge_attributes[key] = list(set(edge_attributes[key]))
# construct the geometry and sum the lengths of the segments
edge_attributes['geometry'] = LineString([Point((G.node[node]['x'], G.node[node]['y'])) for node in path])
edge_attributes['length'] = sum(edge_attributes['length'])
# add the nodes and edges to their lists for processing at the end
all_nodes_to_remove.extend(path[1:-1])
all_edges_to_add.append({'origin':path[0],
'destination':path[-1],
'attr_dict':edge_attributes})
# for each edge to add in the list we assembled, create a new edge between the origin and destination
for edge in all_edges_to_add:
G.add_edge(edge['origin'], edge['destination'], **edge['attr_dict'])
# finally remove all the interstitial nodes between the new edges
G.remove_nodes_from(set(all_nodes_to_remove))
msg = 'Simplified graph (from {:,} to {:,} nodes and from {:,} to {:,} edges) in {:,.2f} seconds'
log(msg.format(initial_node_count, len(list(G.nodes())), initial_edge_count, len(list(G.edges())), time.time()-start_time))
return G
|
[
"[email protected]"
] | |
780debb8110d8c3798abbe8dcd8dd86a68ae8057
|
ee379267ca10990edcb7eaf6322219018dd60dc7
|
/test.py
|
aa99e5e2bf03e996cdcf21686ee5499b9556b3bf
|
[] |
no_license
|
tiago939/target
|
33105b66f598851a05a95a20b49490ba0ed3741b
|
ea11719a9ce7ceb095e4e7a57a5908f68ed1d266
|
refs/heads/master
| 2020-04-01T18:17:48.387317 | 2018-12-04T21:26:33 | 2018-12-04T21:26:33 | 153,483,519 | 3 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,654 |
py
|
import numpy as np
import main, pickle
#load the data from the main script
norm = main.data()[0]
test_data = main.data()[3]
test_labels = main.data()[4]
total = main.data()[6]
hot_label = main.data()[7]
#load the parameters from the main script
net = main.network()[0]
#activation function
def f(z):
if z>100.0:
return 1.0
elif z<-100.0:
return 0.0
else:
return 1.0/(1.0+np.exp(-z))
x=[] #input to each neuron
y=[] #activation of each neuron
for layer in range(len(net)):
x.append(np.array([0.0]*net[layer]))
y.append(np.array([0.0]*net[layer]))
def getScore():
score_test = 0.0
weights = pickle.load( open( "weights", "rb" ) )
loss_test = 0.0
for example in range(total):
#update activation of each neuron
y[0] = test_data[example]/norm
for layer in range(1,len(net)):
x[layer]=np.dot(weights[layer-1],y[layer-1])
y[layer]=map(f,x[layer])
#guess the class from classifcation problem
guess = np.argmax(y[len(net)-1])
if hot_label==True:
target = np.array([0.0]*net[len(net)-1])
target[test_labels[example]] = 1.0
else:
target = test_labels[example]
#classification performance on the test data
if hot_label == True: #for classification problems
if guess == test_labels[example]:
score_test += 1.0
loss_test += (0.5/total)*(1.0/net[len(net)-1])*sum( (target - y[len(net)-1])**2.0)
if hot_label == True:
return score_test/total, loss_test
else:
return loss_test
|
[
"[email protected]"
] | |
08f86a84c521a7399eb73771bf38c269b6da22fc
|
ce6318753653fe34c62e5d28930b7313a68542a9
|
/addressbook/__init__.py
|
700e8c8e4a6bd50694abc90396c553349d0aac14
|
[
"MIT"
] |
permissive
|
kctzstyle/python-gui-addressbook
|
8b7f8d91440a7a455155fc647b799cfce4835ed4
|
0bd07a3e4c8181959d249a8272d890d19e44ceb6
|
refs/heads/main
| 2023-02-22T06:40:19.812963 | 2021-01-30T09:05:19 | 2021-01-30T09:05:19 | 332,575,633 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 57 |
py
|
__version__ = '1.0.0'
__all__ = ['models', 'db', 'app']
|
[
"[email protected]"
] | |
8cfc1028ad38075dcb2db3428de7837cfef3caa5
|
5cc2e06c01b621959c89cfe0f7b6b818c4d8ac60
|
/Source/Player.py
|
98925b907d90905647aacc6b8e108df925c2dcff
|
[] |
no_license
|
JimmyLord/PythonFramework
|
96a6dbe145168193fd3e39fa654d40b3fb2be159
|
2015de06b1a2f6a6f352af314cbd8394383cf89e
|
refs/heads/master
| 2021-06-19T08:00:42.450789 | 2021-06-11T16:00:51 | 2021-06-11T16:00:51 | 135,480,826 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,509 |
py
|
import pygame
import imgui
import GameObject
class Player(GameObject.GameObject):
def __init__(self, position, sprite, texture):
super().__init__( position, sprite, texture )
def onEvent(self, event):
super().onEvent( event )
# Inputs will set the direction the player is moving.
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
self.direction.y += 1
if event.key == pygame.K_s:
self.direction.y += -1
if event.key == pygame.K_a:
self.direction.x += -1
if event.key == pygame.K_d:
self.direction.x += 1
if event.type == pygame.KEYUP:
if event.key == pygame.K_w:
self.direction.y -= 1
if event.key == pygame.K_s:
self.direction.y -= -1
if event.key == pygame.K_a:
self.direction.x -= -1
if event.key == pygame.K_d:
self.direction.x -= 1
def update(self, deltaTime):
# GameObject update will do the actual player movement.
super().update( deltaTime )
# Debug info displayed using imgui.
imgui.begin( "Player", True )
changed, newvalue = imgui.slider_float2( "Position", self.position.x, self.position.y, 0, 20 )
self.position.x = newvalue[0]
self.position.y = newvalue[1]
imgui.end()
def draw(self):
super().draw()
|
[
"[email protected]"
] | |
5a50f463abe0ed27c652cf9283ca5fbb489593a5
|
ebefbc9fab052700e6b913056ae3d45e0902ed78
|
/WEBSITE/get_tweets.py
|
4daa8fd7f1cc315fba1ce7a61158d39ee9ec028a
|
[] |
no_license
|
tanyagarg2509/Virago_MLProject
|
36e69f27c59090348c93006c8d144bad798a4f47
|
05520a998f7e2c501afc56bba99558678b974442
|
refs/heads/master
| 2022-12-06T19:54:05.367965 | 2020-08-10T09:08:02 | 2020-08-10T09:08:02 | 279,497,946 | 2 | 2 | null | 2020-08-01T09:09:16 | 2020-07-14T06:08:12 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,041 |
py
|
import tweepy
import time
import pandas as pd
pd.set_option('display.max_colwidth', 1000)
# api key
consumer_key = 'Tan0zgQRjQxjyubOPCbERip1p'
consumer_secret = 'Rcjd0sID7brrcH682RVDFNjc8yxBuEHI6Kq5PQlhVPiskVi6ri'
access_token = '261123310-nhpBB26oASCDw2HdQA7Hoxui7f0jpMaPOr068b9W'
access_token_secret = 'gWQ41E4bfc4EalWnnWv147NeoCDkwo2cadKJFtIbTVy0z'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
def get_related_tweets(text_query):
# list to store tweets
tweets_list = []
# no of tweets
count = 30
try:
# Pulling individual tweets from query
for tweet in api.search(q=text_query, count=count):
print(tweet.text)
# Adding to list that contains all tweets
tweets_list.append({'tweet_text': tweet.text})
return pd.DataFrame.from_dict(tweets_list)
except BaseException as e:
print('failed on_status,', str(e))
time.sleep(3)
|
[
"[email protected]"
] | |
40ac13a7d56e1369c096d6b0d9a77961d16430bf
|
443585e4fc146308b18bc2f9234d0947da38d3e5
|
/input_output/2.py
|
8e66671a1fb57677c88f36c9a0d41923421258d1
|
[] |
no_license
|
ggyudongggyu/20201208commit
|
b524c4a7fb241cacaacffa5882c55d1d0ccba11f
|
fbb58a8ed06f454a2a79a9b8c75deabaec62b317
|
refs/heads/master
| 2023-02-02T21:59:51.518218 | 2020-12-24T14:32:21 | 2020-12-24T14:32:21 | 319,578,473 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 31 |
py
|
in_str = input()
print(in_str)
|
[
"[email protected]"
] | |
849db7158f590c4e5a37537eff1e8f8126ff98d2
|
85f39176fb8bb79e9e575a3d7a5db9cc429dbfc4
|
/utils/model_utils.py
|
23764da1fc831b04f43364a3af8c3c41891827b8
|
[] |
no_license
|
zhihuikaicheng/pytorch-reid-lite
|
a5b47984da94c55221fdf59fa5242445f7e9d51c
|
6d01b376d9bc4fccea50d235f7da715c7452d73a
|
refs/heads/master
| 2020-04-06T10:15:38.833155 | 2018-11-14T09:29:07 | 2018-11-14T09:29:07 | 157,373,657 | 0 | 0 | null | 2018-11-13T12:03:32 | 2018-11-13T12:03:32 | null |
UTF-8
|
Python
| false | false | 2,938 |
py
|
import torch
import os
import logging
from collections import OrderedDict
torch_ver = torch.__version__[:3]
def save_and_evaluate(net, config, evaluate_func, save_ckpt=True):
config["best_eval_result"] = 0.0 if "best_eval_result" not in config\
else config["best_eval_result"]
state_dict = net.state_dict()
if save_ckpt:
checkpoint_path = os.path.join(config["sub_working_dir"], "model.pth")
torch.save(state_dict, checkpoint_path)
logging.info("Model checkpoint saved to %s" % checkpoint_path)
if evaluate_func:
net.eval()
logging.info("Running evalutation: %s" %
config["evaluation_params"]["type"])
eval_result = evaluate_func(config)
if eval_result > config["best_eval_result"]:
config["best_eval_result"] = eval_result
logging.info("New best result: {}"
.format(config["best_eval_result"]))
best_checkpoint_path = os.path.join(config["sub_working_dir"],
"model_best.pth")
torch.save(state_dict, best_checkpoint_path)
logging.info(
"Best checkpoint saved to {}".format(best_checkpoint_path))
else:
logging.info("Best result: {}".format(config["best_eval_result"]))
net.train()
torch.cuda.empty_cache()
def restore_model(model_path, model, eval_mode=False):
logging.info(
"Restoring trained model from %s" % model_path
)
if eval_mode:
state_dict = torch.load(
model_path,
map_location=lambda storage, loc: storage.cuda(0))
else:
state_dict = torch.load(model_path)
if "module." in state_dict.keys()[0] and \
"module." not in model.state_dict().keys()[0]:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k
if "se_module" in k:
name = k.replace("se_module", "se_layer")
name = name.replace("module.", "")
name = name.replace("se_layer", "se_module")
else:
name = name.replace("module.", "")
new_state_dict[name] = v
state_dict = new_state_dict
elif "module." not in state_dict.keys()[0] and \
"module." in model.state_dict().keys()[0]:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = "module." + k
new_state_dict[name] = v
state_dict = new_state_dict
# Check if there is key mismatch:
mismatch_keys = []
for k in model.state_dict().keys():
if k not in state_dict:
mismatch_keys.append(k)
if len(mismatch_keys) > 0:
logging.warn("[MODEL_RESTORE] number of mismatch_keys: %s"
% len(mismatch_keys))
model.load_state_dict(state_dict, strict=False)
|
[
"[email protected]"
] | |
d5b2d0a9e571234c680d803851735c7c32986bee
|
62912bea20c56093f27fb2826e0f5f4a26a3ed0b
|
/symphony/cli/pyinventory/api/user.py
|
26b7cb869b430045ee70020b452c8fdb9a7edcd2
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
sijad/magma
|
5c2b6520e207f05c29f29248627b90629f1f4088
|
78b5f16432d7070a84da74b90d4f1e3f8348fa37
|
refs/heads/master
| 2021-04-09T06:34:15.295104 | 2020-03-20T19:28:42 | 2020-03-20T19:31:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,129 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from typing import List
from ..client import SymphonyClient
from ..consts import Entity, User
from ..exceptions import EntityNotFoundError
from ..graphql.edit_user_input import EditUserInput
from ..graphql.edit_user_mutation import EditUserMutation
from ..graphql.user_query import UserQuery
from ..graphql.user_status_enum import UserStatus
from ..graphql.users_query import UsersQuery
USER_ROLE = 1
def get_user(client: SymphonyClient, email: str) -> User:
"""Returns `pyinventory.consts.User` object by its email
Args:
email: the email address the user registered with
Returns:
pyinventory.consts.User object
Raises:
EntityNotFoundError: the user was not found
FailedOperationException: internal inventory error
Example:
```
user = client.get_user("[email protected]")
```
"""
result = UserQuery.execute(client, email)
user = result.user
if user is None:
raise EntityNotFoundError(entity=Entity.User, entity_name=email)
return User(
id=user.id,
auth_id=user.authID,
email=user.email,
status=user.status,
role=user.role,
)
def add_user(client: SymphonyClient, email: str, password: str) -> User:
"""Adds new user to inventory with its email and password
Args:
email: the email address of the user
password: password the user would connect with
Returns:
pyinventory.consts.User object
Raises:
EntityNotFoundError: the user was not created properly
FailedOperationException: internal inventory error
AssertionError: The user was not created for some known reason
HTTPError: Error with connection
Example:
```
user = client.add_user("[email protected]", "P0ssW!rd0f43")
```
"""
resp = client.post(
"/user/async/",
{"email": email, "password": password, "role": USER_ROLE, "networkIDs": []},
)
if not resp.ok:
error_message = resp.json().get("error", None)
if error_message is not None:
raise AssertionError(error_message)
raise
return get_user(client, email)
def deactivate_user(client: SymphonyClient, user: User) -> None:
"""Deactivate the user which would prevent the user from login in to symphony
Users in symphony are never deleted. Only de-activated.
Args:
user: user to deactivate
Raises:
FailedOperationException: internal inventory error
Example:
```
user = client.get_user("[email protected]")
client.deactivate_user(user)
```
"""
EditUserMutation.execute(
client, input=EditUserInput(id=user.id, status=UserStatus.DEACTIVATED)
)
def activate_user(client: SymphonyClient, user: User) -> None:
"""Activate the user which would allow the user to login again to symphony
Args:
user: user to activate
Raises:
FailedOperationException: internal inventory error
Example:
```
user = client.get_user("[email protected]")
client.activate_user(user)
```
"""
EditUserMutation.execute(
client, input=EditUserInput(id=user.id, status=UserStatus.ACTIVE)
)
def get_users(client: SymphonyClient) -> List[User]:
"""Get the list of users in the system (both active and deactivate)
Returns:
list of `pyinventory.consts.User` objects
Raises:
FailedOperationException: internal inventory error
Example:
```
users = client.get_users()
for user in users:
print(user.email)
```
"""
result = UsersQuery.execute(client).users
if result is None:
return []
users = []
for edge in result.edges:
node = edge.node
if node is not None:
users.append(
User(
id=node.id,
auth_id=node.authID,
email=node.email,
status=node.status,
role=node.role,
)
)
return users
def get_active_users(client: SymphonyClient) -> List[User]:
"""Get the list of the active users in the system
Returns:
list of `pyinventory.consts.User` objects
Raises:
FailedOperationException: internal inventory error
Example:
```
users = client.get_active_users()
for user in users:
print(user.email)
```
"""
users = get_users(client)
return [user for user in users if user.status == UserStatus.ACTIVE]
|
[
"[email protected]"
] | |
aa0c4ed9935283356909351e267db22a24e3bf0b
|
913e24ea110f839c73363bc1aac9673e561fa5f8
|
/gym_environments/widowx_env/envs/v41_widowx_pybullet_env_fixedGoal_actionStepCoeff25_noBounds_1joint_reward16.py
|
08772e2ee4242328bac40c5c52cdb93a98e9dff2
|
[
"MIT"
] |
permissive
|
PierreExeter/WidowX-reacher
|
24e2b3f72e9aec24a9a61e6a8958c200e0dbe893
|
560c6779dc91a887191f344c43de24926ba75b4d
|
refs/heads/master
| 2023-03-06T13:48:12.810858 | 2021-02-22T15:36:52 | 2021-02-22T15:36:52 | 264,480,232 | 4 | 0 |
MIT
| 2021-02-22T15:27:44 | 2020-05-16T16:36:53 |
Python
|
UTF-8
|
Python
| false | false | 17,140 |
py
|
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from numbers import Number
from collections import OrderedDict
import pybullet as p
import pybullet_data
import os
import numpy as np
import random
# ENVIRONMENT CONFIGURATION
NEUTRAL_VALUES = [0.015339807878856412, -1.4839419194602816,
1.4971652489763858, -0.008369006790373335, -0.08692557798018634, .027]
RESET_VALUES = [0.015339807878856412, -1.2931458041875956,
1.0109710760673565, -1.3537670644267164, -0.07158577010132992, .027]
# AFTER 60 timesteps, moving the joint 2 by 0.01, we reach this tip position: [0.13962698, 0.00214202, 0.31920969]
# action: 0.01
# obs: [ 0.13962698 0.00214202 0.31920969 -0.69314635]
# reward: -0.009033412729705663
# done: False
# info: {'total_distance': 0.0950442672111562, 'goal position': array([0.20422488, 0.00313302, 0.24949928]), 'tip position': array([0.13962698, 0.00214202, 0.31920969]), 'joint position': array([-0.69314635]), 'current_joint_pos': array([-0.70314634], dtype=float32), 'new_joint_pos': array([-0.69314635], dtype=float32), 'joint_vel': array([0.], dtype=float32)}
# timestep: 59
# MINIMUM ACHIEVABLE DISTANCE BY PYBULLET: 4.9471871525143285e-09 m. So it is possible to reach 5e-4 (smallest success ratio)
# action: 0.01
# obs: [ 0.13962698 0.00214202 0.31920969 -0.69314635]
# reward: -2.447466072200283e-17
# done: False
# info: {'total_distance': 4.9471871525143285e-09, 'goal position': array([0.13962698, 0.00214202, 0.31920969]), 'tip position': array([0.13962698, 0.00214202, 0.31920969]), 'joint position': array([-0.69314635]), 'current_joint_pos': array([-0.70314634], dtype=float32), 'new_joint_pos': array([-0.69314635], dtype=float32), 'joint_vel': array([0.], dtype=float32)}
# timestep: 59
# RL BOUNDS
BOUNDS_XMIN = -100
BOUNDS_XMAX = 100
BOUNDS_YMIN = -100
BOUNDS_YMAX = 100
BOUNDS_ZMIN = -100
BOUNDS_ZMAX = 100
# JOINT_MIN = np.array([
# -3.1,
# -1.571,
# -1.571,
# -1.745,
# -2.617,
# 0.003
# ])
# JOINT_MAX = np.array([
# 3.1,
# 1.571,
# 1.571,
# 1.745,
# 2.617,
# 0.03
# ])
# only use joint 2
JOINT_MIN = -1.571
JOINT_MAX = 1.571
JOINT_NAMES = ['joint_1', 'joint_2', 'joint_3',
'joint_4', 'joint_5', 'gripper_joint']
SIM_START_POSITION = np.array([-0.185033226409, 0.00128528, 0.46227163])
class WidowxEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
"""
How to initialize this environment:
env = gym.make('replab-v0').start_rospy(goal_oriented=[GOAL_ORIENTED])
If goal_oriented is true, then the environment's observations become a dict
and the goal is randomly resampled upon every reset
params:
goal_oriented: Changes some aspects of the environment for goal-oriented tasks
rospy.init_node is set with random number so we can have multiple
nodes running at the same time.
self.goal is set to a fixed, randomly drawn goal if goal_oriented = False
"""
# self.obs_space_low = np.array(
# [-.16, -.15, 0.14, -3.1, -1.6, -1.6, -1.8, -3.1, 0])
# self.obs_space_high = np.array(
# [.16, .15, .41, 3.1, 1.6, 1.6, 1.8, 3.1, 0.05])
# observation_space = spaces.Box(
# low=self.obs_space_low, high=self.obs_space_high, dtype=np.float32)
# self.observation_space = observation_space
# pierre: reduce observation space
self.obs_space_low = np.array(
[-.16, -.15, 0.14, -1.6])
self.obs_space_high = np.array(
[.16, .15, .41, 1.6])
observation_space = spaces.Box(
low=self.obs_space_low, high=self.obs_space_high, dtype=np.float32)
self.observation_space = observation_space
# added by Pierre, normalize action space, cf https://stable-baselines.readthedocs.io/en/master/guide/rl_tips.html
# self.action_space = spaces.Box(low=np.array([-0.5, -0.25, -0.25, -0.25, -0.5, -0.005]) / 25,
# high=np.array([0.5, 0.25, 0.25, 0.25, 0.5, 0.005]) / 25, dtype=np.float32)
# changed by Pierre: only move joint 2
self.action_space = spaces.Box(low=np.array([-0.01]), high=np.array([0.01]), dtype=np.float32)
# PB: actions are too big and the robot moves too much
# self.action_space = spaces.Box(low=np.array([-1, -1, -1, -1, -1, -1]),
# high=np.array([1, 1, 1, 1, 1, 1]), dtype=np.float32)
self.current_pos = None
# self.goal = np.array([-.14, -.13, 0.26])
self.goal = np.array([0.13962698, 0.00214202, 0.31920969]) #[.14, .0, 0.26]) # added by Pierre: changed to feasible target by moving only joint 2
# self.set_goal(self.sample_goal_for_rollout())
# print("********goal is : ***********", self.goal)
self.start_sim(goal_oriented=False, render_bool=True)
# re-added by Pierre
def start_sim(self, goal_oriented=False, render_bool=False):
self.render_bool = render_bool
self.goal_oriented = goal_oriented
if self.render_bool:
self.physics_client = p.connect(p.GUI)
else:
self.physics_client = p.connect(p.DIRECT)
if self.goal_oriented:
self.observation_space = spaces.Dict(dict(
desired_goal=spaces.Box(low=np.array(
[-.16, -.15, 0.25]), high=np.array([.16, .15, 0.41]), dtype=np.float32),
achieved_goal=spaces.Box(low=self.obs_space_low[
:3], high=self.obs_space_high[:3], dtype=np.float32),
observation=self.observation_space
))
# p.resetSimulation()
# p.setTimeStep(0.01)
p.resetDebugVisualizerCamera(cameraDistance=0.6, cameraYaw=0, cameraPitch=-30, cameraTargetPosition=[
0.2, 0, 0.1], physicsClientId=self.physics_client) # added by Pierre
p.setAdditionalSearchPath(pybullet_data.getDataPath())
path = os.path.abspath(os.path.dirname(__file__))
self.arm = p.loadURDF(os.path.join(path, "URDFs/widowx/widowx.urdf"), useFixedBase=True)
self.sphere = p.loadURDF(os.path.join(path, "URDFs/sphere.urdf"),
useFixedBase=True) # added by Pierre
self.plane = p.loadURDF('plane.urdf') # added by Pierre
self.reset()
return self
# shared functions between both sim and robot mode
def sample_goal_for_rollout(self):
return np.random.uniform(low=np.array([-.14, -.13, 0.26]), high=np.array([.14, .13, .39]))
def set_goal(self, goal):
self.goal = goal
def step(self, action):
"""
Parameters
----------
action : [change in x, change in y, change in z]
Returns
-------
ob, reward, episode_over, info : tuple
ob (object) :
either current position or an observation object, depending on
the type of environment this is representing
reward (float) :
negative, squared, l2 distance between current position and
goal position
episode_over (bool) :
Whether or not we have reached the goal
info (dict) :
For now, all this does is keep track of the total distance from goal.
This is used for rlkit to get the final total distance after evaluation.
See function get_diagnostics for more info.
"""
action = np.array(action, dtype=np.float32)
self.my_action = action # added by Pierre
# modified by Pierre
self.joint_positions, self.joint_velocities = self._get_current_joint_positions()
self.new_joint_positions = self.joint_positions + action
self.new_joint_positions = np.clip(np.array(self.new_joint_positions), JOINT_MIN, JOINT_MAX)
self._force_joint_positions_training(self.new_joint_positions)
# joint_positions = self._get_current_joint_positions()
# new_joint_positions = joint_positions + action
# new_joint_positions = np.clip(np.array(new_joint_positions), JOINT_MIN, JOINT_MAX)
# self._force_joint_positions(new_joint_positions)
end_effector_pos = self._get_current_end_effector_position()
x, y, z = end_effector_pos[0], end_effector_pos[1], end_effector_pos[2]
conditions = [
x <= BOUNDS_XMAX,
x >= BOUNDS_XMIN,
y <= BOUNDS_YMAX,
y >= BOUNDS_YMIN,
z <= BOUNDS_ZMAX,
z >= BOUNDS_ZMIN
]
violated_boundary = False
for condition in conditions:
if not condition:
violated_boundary = True
break
if violated_boundary:
# if out of boundarie, don't update joint position
self._force_joint_positions_training(self.joint_positions)
self.current_pos = self._get_current_state()
return self._generate_step_tuple()
def _generate_step_tuple(self):
episode_over = False
self.total_distance_from_goal = np.linalg.norm(self.current_pos[:3] - self.goal) # np.sqrt(-reward)
reward = self._get_reward(self.goal)
# self.tip_vel = self._get_current_end_effector_velocity()
# added by Pierre
info = {}
info['total_distance'] = self.total_distance_from_goal
info['goal position'] = self.goal
info['tip position'] = self.current_pos[:3]
info['joint position'] = self.current_pos[3:]
info['current_joint_pos'] = self.joint_positions
info['new_joint_pos'] = self.new_joint_positions
info['joint_vel'] = self.joint_velocities
info['penalty'] = self.penalty
# info['tip_vel'] = self.tip_vel
# if reward > -0.0001:
# if total_distance_from_goal < 0.0005: # added by Pierre
# episode_over = True
if self.goal_oriented:
obs = self._get_obs()
return obs, reward, episode_over, info
return self.current_pos, reward, episode_over, info
def reset(self):
p.resetBasePositionAndOrientation(
self.arm, [0, 0, 0], p.getQuaternionFromEuler([np.pi, np.pi, np.pi]))
p.resetBasePositionAndOrientation(self.sphere, self.goal, p.getQuaternionFromEuler(
[np.pi, np.pi, np.pi])) # added by Pierre: move sphere to self.goal position
self._force_joint_positions(RESET_VALUES)
self.current_pos = self._get_current_state()
# commented by Pierre: don't re-sample new goal
if self.goal_oriented:
# self.set_goal(self.sample_goal_for_rollout())
return self._get_obs()
return self.current_pos
def _get_obs(self):
obs = {}
obs['observation'] = self.current_pos
obs['desired_goal'] = self.goal
obs['achieved_goal'] = self.current_pos[:3]
return obs
def sample_goals(self, num_goals):
sampled_goals = np.array(
[self.sample_goal_for_rollout() for i in range(num_goals)])
goals = {}
goals['desired_goal'] = sampled_goals
return goals
def _get_reward(self, goal):
self.beta = 10
self.penalty = self.beta * np.linalg.norm(self.my_action)
rew = - self.total_distance_from_goal - self.penalty
return rew
def render(self, mode='human', close=False):
pass
def compute_reward(self, achieved_goal, goal, info):
return - (np.linalg.norm(achieved_goal - goal)**2)
def get_diagnostics(self, paths):
"""
This adds the diagnostic "Final Total Distance" for RLkit
"""
def get_stat_in_paths(paths, dict_name, scalar_name):
if len(paths) == 0:
return np.array([[]])
if type(paths[0][dict_name]) == dict:
return [path[dict_name][scalar_name] for path in paths]
return [[info[scalar_name] for info in path[dict_name]] for path in paths]
def create_stats_ordered_dict(
name,
data,
stat_prefix=None,
always_show_all_stats=True,
exclude_max_min=False,
):
if stat_prefix is not None:
name = "{} {}".format(stat_prefix, name)
if isinstance(data, Number):
return OrderedDict({name: data})
if len(data) == 0:
return OrderedDict()
if isinstance(data, tuple):
ordered_dict = OrderedDict()
for number, d in enumerate(data):
sub_dict = create_stats_ordered_dict(
"{0}_{1}".format(name, number),
d,
)
ordered_dict.update(sub_dict)
return ordered_dict
if isinstance(data, list):
try:
iter(data[0])
except TypeError:
pass
else:
data = np.concatenate(data)
if (isinstance(data, np.ndarray) and data.size == 1
and not always_show_all_stats):
return OrderedDict({name: float(data)})
stats = OrderedDict([
(name + ' Mean', np.mean(data)),
(name + ' Std', np.std(data)),
])
if not exclude_max_min:
stats[name + ' Max'] = np.max(data)
stats[name + ' Min'] = np.min(data)
return stats
statistics = OrderedDict()
stat_name = 'total_distance'
stat = get_stat_in_paths(paths, 'env_infos', stat_name)
statistics.update(create_stats_ordered_dict('Final %s' % (stat_name), [
s[-1] for s in stat], always_show_all_stats=True,))
return statistics
# Functions only for sim mode
def _get_current_joint_positions(self):
# joint_positions = []
# joint_velocities = [] # added by Pierre
# for i in range(6):
# joint_positions.append(p.getJointState(self.arm, i)[0]) # check that's the joint angle
# joint_velocities.append(p.getJointState(self.arm, i)[1]) # added by Pierre
# return np.array(joint_positions, dtype=np.float32), np.array(joint_velocities, dtype=np.float32)
joint_positions = []
joint_velocities = [] # added by Pierre
# only return position of joint 2
joint_positions.append(p.getJointState(self.arm, 1)[0]) # check that's the joint angle
joint_velocities.append(p.getJointState(self.arm, 1)[1]) # added by Pierre
return np.array(joint_positions, dtype=np.float32), np.array(joint_velocities, dtype=np.float32)
def _get_current_end_effector_position(self):
real_position = np.array(list(p.getLinkState(self.arm, 5, computeForwardKinematics=1)[4]))
# real_position[2] = -real_position[2] #SIM z coordinates are reversed
# adjusted_position = real_position + SIM_START_POSITION
return real_position
# added by Pierre
def _get_current_end_effector_velocity(self):
real_vel = np.array(
list(p.getLinkState(self.arm, 5, computeLinkVelocity=1, computeForwardKinematics=1)[6]))
return real_vel
def _set_joint_positions(self, joint_positions):
# In SIM, gripper halves are controlled separately
joint_positions = list(joint_positions) + [joint_positions[-1]]
p.setJointMotorControlArray(
self.arm,
[0, 1, 2, 3, 4, 7, 8],
controlMode=p.POSITION_CONTROL,
targetPositions=joint_positions
)
# original function: only used at reset
def _force_joint_positions(self, joint_positions):
for i in range(5):
p.resetJointState(
self.arm,
i,
joint_positions[i]
)
for i in range(7, 9):
p.resetJointState(
self.arm,
i,
joint_positions[-1]
)
def _force_joint_positions_training(self, joint_positions):
p.resetJointState(
self.arm,
1,
joint_positions[0]
)
def _get_current_state(self):
return np.concatenate(
[self._get_current_end_effector_position(),
self._get_current_joint_positions()[0]],
axis=0)
# Functions for pickling
def __getstate__(self):
state = self.__dict__.copy()
return state
def __setstate__(self, state):
self.__dict__.update(state)
if state['render_bool']:
self.start_sim(goal_oriented=state['goal_oriented'], render_bool=False)
else:
self.start_sim(goal_oriented=state['goal_oriented'], render_bool=state['render_bool'])
self.reset()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.