blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f83f46f9d7f4babfa8c81312e6fd61edc7c8c9a | cc52011cb420651cdd9d37af1ffbad68f935c7be | /junk/face_test.py | 02765a052dfcd7d9789560f7bae2bde65f24a6ca | [] | no_license | fatpat314/mask_detection | 0988a341fd47849977bbb7babdc0ed2fce928a6d | 025b420014e8aac71d867e06ef9202a473e5357c | refs/heads/master | 2022-12-20T05:43:44.700740 | 2020-09-28T08:19:56 | 2020-09-28T08:19:56 | 290,686,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,137 | py | import face_recognition
import cv2
import numpy as np
import os
import glob
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
# other example, but it includes some basic performance tweaks to make things run a lot faster:
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
"""img_dir = "images"
data_path = os.path.join(img_dir, '*g')
files = glob.glob(data_path)
data = []
masked_faces_encodings = []
for fl in files:
data.append(fl)
masked_faces_images = face_recognition.load_image_file(fl)
masked_faces_encoding = face_recognition.face_encodings(masked_faces_images)
masked_faces_encodings.append(masked_faces_encoding)
masked_faces = ["Masked"]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
print(masked_faces_encodings)
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame,(0,0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
# matches = face_recognition.compare_faces(masked_faces_encodings, face_encoding)
name = "Unmasked"
if name == "Unmasked":
print("ALERT!!!!", "\a")
# # If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = masked_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(masked_faces_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = masked_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
print("IMG DATA: ", data)"""
# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("face.jpeg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("face2.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
obama_face_encoding,
biden_face_encoding
]
known_face_names = [
"Barack Obama",
"Joe Biden"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# tolerance=0.0
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unmasked"
if name == "Unmasked":
print("ALERT!!!!", "\a")
# # If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
# import face_recognition
# import cv2
# import numpy as np
#
# """Does this only need to trigger when it sees a face? Otherwise just keep looping through frames until a face a found.
# Because the facial recognition is not able to recognized masked faces"""
#
# video_capture = cv2.VideoCapture(0)
#
# # Initialize some variables
# face_locations = []
# face_encodings = []
# face_names = []
# process_this_frame = True
#
# while True:
# # Grab a single frame of video
# ret, frame = video_capture.read()
#
# # Resize frame of video to 1/4 size for faster face recognition processing
# small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
#
# # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
# rgb_small_frame = small_frame[:, :, ::-1]
#
# # Only process every other frame of video to save time
# if process_this_frame:
# # Find all the faces and face encodings in the current frame of video
# face_locations = face_recognition.face_locations(rgb_small_frame)
# face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
#
# face_names = []
# for face_encoding in face_encodings:
# name = "Unmasked"
# if name == "Unmasked":
# print("ALERT!!!", '\a')
#
# process_this_frame = not process_this_frame
#
#
# # Display the results
# for (top, right, bottom, left), name in zip(face_locations, face_names):
# # Scale back up face locations since the frame we detected in was scaled to 1/4 size
# top *= 4
# right *= 4
# bottom *= 4
# left *= 4
#
# # Draw a box around the face
# cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
#
# # Draw a label with a name below the face
# cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
# font = cv2.FONT_HERSHEY_DUPLEX
# cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
#
# # Display the resulting image
# cv2.imshow('Video', frame)
#
# # Hit 'q' on the keyboard to quit!
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
#
# # Release handle to the webcam
# video_capture.release()
# cv2.destroyAllWindows()
#
#
#
#
#
#
#
#
#
#
# # from PIL import Image
# # import face_recognition
# # import cv2
# # import sys
#
# # faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# #
# # video_capture = cv2.VideoCapture(0)
# #
# # while True:
# # ret, frame = video_capture.read()
# #
# # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# # faces = faceCascade.detectMultiScale(
# # gray,
# # scaleFactor=1.5,
# # minNeighbors=5,
# # minSize=(30, 30),
# # flags=cv2.CASCADE_SCALE_IMAGE
# # )
# #
# # for (x, y, w, h) in faces:
# # cv2.rectangle(frame, (x, y), (x+w, y+h), (0 ,255, 0), 2)
# #
# # cv2.imshow('FaceDetections', frame)
# #
# # if k%256 == 27:
# # break
# #
# # elif k%256 -- 32:
# # img_name = "facedetect_webcam_{}.png".format(img_counter)
# # cv2.imwrite(img_name, frame)
# # print("{} written!".format(img_name))
# # img_counter += 1
# #
# # video_capture.release()
# # cv2.destroyAllWindows()
#
#
# #
# # # cascPath = sys.argv[1]
# # faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# #
# # video_capture = cv2.VideoCapture(0)
# #
# # while True:
# # # Capture frame-by-frame
# # ret, frame = video_capture.read()
# #
# # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# #
# # faces = faceCascade.detectMultiScale(
# # gray,
# # scaleFactor=1.1,
# # minNeighbors=5,
# # minSize=(30, 30),
# # # flags=cv2.cv2.CV_HAAR_SCALE_IMAGE
# # )
# #
# # # Draw a rectangle around the faces
# # for (x, y, w, h) in faces:
# # cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# #
# # # Display the resulting frame
# # cv2.imshow('Video', frame)
# #
# # if cv2.waitKey(1) & 0xFF == ord('q'):
# # break
# #
# # # When everything is done, release the capture
# # video_capture.release()
# # cv2.destroyAllWindows()
# #
# #
# #
# #
# #
# #
# #
# #
# #
# # masked_faces = face_recognition
# #
# # known_image = face_recognition.load_image_file("mask.jpeg")
# # unknown_image = face_recognition.load_image_file("face.jpeg")
# #
# # try:
# # known_image_encoding = face_recognition.face_encodings(known_image)[0]
# # unknown_image_encoding = face_recognition.face_encodings(unknown_image)[0]
# # except IndexError:
# # print("I was not able to locate any faces in at least one of the images. Check the image files. Aborting...")
# # quit()
# #
# # known_faces = [
# # known_image_encoding
# # ]
# #
# # results = face_recognition.compare_faces(known_faces, unknown_image_encoding)
# #
# # print("Is the unknown face face.jpg {}".format(results[0]))
# # print("Is the unknown face a new person that we have never seen before? {}".format(not True in results))
# #
#
# #
# #
# # # def face_rec():
# # # known_image = face_recognition.load_image_file("face.jpg")
# # # unknown_image = face_recognition.load_image_file("face.jpeg")
# # #
# # # known_encoding = face_recognition.face_encodings(known_image)[0]
# # # unknown_encoding = face_recognition.face_encodings(unknown_image)[0]
# # #
# # # results = face_recognition.compare_faces([known_encoding], unknown_encoding)
# # # print(results)
# # # return results
# #
# #
# #
# #
# # image = face_recognition.load_image_file("group.jpg")
# # face_locations = face_recognition.face_locations(image)
# # print("I found {} face(s) in this photograth.".format(len(face_locations)))
# # for face_location in face_locations:
# # top, right, bottom, left = face_location
# # print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
# # face_image = image[top:bottom, left:right]
# # pil_image = Image.fromarray(face_image)
# # pil_image.show()
| [
"[email protected]"
] | |
5d0f4cb826491c6d60bd55e2f82ff687aad64d45 | 9acbf0279c38d11e89f16831e9c43b49badabb00 | /IPTVPlayer/tsiplayer/addons/resources/sites/hds_stream.py | 9875340d9a861185488cd59312f8b1383ca23e95 | [] | no_license | dgbkn/e2iPlayer | 4f101b87bc5f67bf14690d012a62cbe8755ab82c | e5f413ea032eb9012569d9d149a368a3e73d9579 | refs/heads/master | 2023-05-15T05:01:18.204256 | 2021-06-06T18:03:42 | 2021-06-06T18:03:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,881 | py | # -*- coding: utf-8 -*-
# vStream https://github.com/Kodi-vStream/venom-xbmc-addons
import re
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.gui.hoster import cHosterGui
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.gui.gui import cGui
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.inputParameterHandler import cInputParameterHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.requestHandler import cRequestHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.parser import cParser
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.comaddon import progress
# from resources.lib.util import cUtil # outils pouvant etre utiles
SITE_IDENTIFIER = 'hds_stream'
SITE_NAME = 'Hds-stream'
SITE_DESC = 'Film streaming HD complet en vf. Des films et séries pour les fan de streaming hds.'
URL_MAIN = 'https://hds.club/'
MOVIE_MOVIES = (True, 'showMenuMovies')
MOVIE_NEWS = (URL_MAIN + 'films/', 'showMovies')
MOVIE_GENRES = (URL_MAIN, 'showGenres')
MOVIE_EXCLUS = (URL_MAIN + 'tendance/', 'showMovies')
# MOVIE_ANNEES = (True, 'showMovieYears')
SERIE_SERIES = (True, 'showMenuTvShows')
SERIE_NEWS = (URL_MAIN + 'series/', 'showMovies')
URL_SEARCH = (URL_MAIN + '?s=', 'showMovies')
URL_SEARCH_MOVIES = (URL_SEARCH[0], 'showMovies')
URL_SEARCH_SERIES = (URL_SEARCH[0], 'showMovies')
FUNCTION_SEARCH = 'showMovies'
def load():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_SEARCH[0])
oGui.addDir(SITE_IDENTIFIER, 'showSearch', 'Recherche', 'search.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_EXCLUS[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_EXCLUS[1], 'Films (Populaire)', 'news.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler)
# oOutputParameterHandler.addParameter('siteUrl', MOVIE_ANNEES[0])
# oGui.addDir(SITE_IDENTIFIER, MOVIE_ANNEES[1], 'Films (Par années)', 'annees.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMenuMovies():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_SEARCH_MOVIES[0])
oGui.addDir(SITE_IDENTIFIER, URL_SEARCH_MOVIES[1], 'Recherche Films', 'search.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_EXCLUS[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_EXCLUS[1], 'Films (Populaire)', 'news.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler)
# oOutputParameterHandler.addParameter('siteUrl', MOVIE_ANNEES[0])
# oGui.addDir(SITE_IDENTIFIER, MOVIE_ANNEES[1], 'Films (Par années)', 'annees.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMenuTvShows():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_SEARCH_SERIES[0])
oGui.addDir(SITE_IDENTIFIER, URL_SEARCH_SERIES[1], 'Recherche Séries ', 'search.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showSearch():
oGui = cGui()
sSearchText = oGui.showKeyBoard()
if (sSearchText != False):
sUrl = URL_SEARCH[0] + sSearchText.replace(' ', '+')
showMovies(sUrl)
oGui.setEndOfDirectory()
return
def showGenres():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oParser = cParser()
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
sPattern = 'menu-item-object-genres.+?<a href="([^"]+)".*?>(.+?)<'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
genres = set(aResult[1])
genres = sorted(genres, key=lambda genre: genre[1])
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in genres:
sUrl = aEntry[0]
sTitle = aEntry[1]
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMovieYears():
oGui = cGui()
oParser = cParser()
oRequestHandler = cRequestHandler(URL_MAIN)
sHtmlContent = oRequestHandler.request()
sHtmlContent = oParser.abParse(sHtmlContent, '<h2>Films Par Années</h2>', '<h2>Films Par Genres</h2>')
sPattern = '<li><a href="([^"]+)">([^<]+)<'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
sUrl = aEntry[0]
sYear = aEntry[1]
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', sYear, 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMovies(sSearch=''):
oGui = cGui()
oParser = cParser()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
if sSearch:
sUrl = sSearch
sPattern = 'class="result-item">.*?href="([^"]+)"><img src="([^"]+).*?class="title"><a.*?>([^<]+).*?class="year">([^<]+).*?class="contenido"><p>([^<]+)</p>'
elif 'tendance/' in sUrl:
sPattern = 'id="post-[0-9].+?<img src="([^"]+)".+?class="data".+?href="([^"]+)">([^<]+).*?, ([^<]+)</span>'
else:
sPattern = 'id="post-[0-9].+?<img src="([^"]+)".+?class="data".+?href="([^"]+)">([^<]+).*?, ([^<]+)</span>.*?<div class="texto">([^<]*)</div>'
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addText(SITE_IDENTIFIER)
if (aResult[0] == True):
total = len(aResult[1])
progress_ = progress().VScreate(SITE_NAME)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
if sSearch:
sUrl2 = aEntry[0]
sThumb = aEntry[1]
sTitle = aEntry[2]
sYear = aEntry[3]
sDesc = aEntry[4]
else:
sThumb = aEntry[0]
if sThumb.startswith('//'):
sThumb = 'https:' + sThumb
sUrl2 = aEntry[1]
sTitle = aEntry[2]
sYear = aEntry[3]
if 'tendance/' in sUrl:
sDesc = ''
else:
sDesc = aEntry[4]
sDisplayTitle = ('%s (%s)') % (sTitle, sYear)
oOutputParameterHandler.addParameter('siteUrl', sUrl2)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oOutputParameterHandler.addParameter('sDesc', sDesc)
oOutputParameterHandler.addParameter('sYear', sYear)
if '/series' in sUrl2:
oGui.addTV(SITE_IDENTIFIER, 'showSxE', sTitle, '', sThumb, sDesc, oOutputParameterHandler)
else:
oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)
progress_.VSclose(progress_)
if not sSearch:
sNextPage, sPaging = __checkForNextPage(sHtmlContent)
if (sNextPage != False):
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sNextPage)
oGui.addNext(SITE_IDENTIFIER, 'showMovies', 'Page ' + sPaging, oOutputParameterHandler)
oGui.setEndOfDirectory()
def __checkForNextPage(sHtmlContent):
oParser = cParser()
sPattern = '>Page \d+ de (\d+)</span>.*?<span class="current.+?href=["\']([^"\']+/page/\d+)/["\'] class="inactive'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
sNumberMax = aResult[1][0][0]
sNextPage = aResult[1][0][1]
sNumberNext = re.search('page/([0-9]+)', sNextPage).group(1)
sPaging = sNumberNext + '/' + sNumberMax
return sNextPage, sPaging
return False, 'none'
def showSxE():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sThumb = oInputParameterHandler.getValue('sThumb')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sDesc = oInputParameterHandler.getValue('sDesc')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
sPattern = '<span class=\'title\'>([^<]+)|class=\'numerando\'>\d - ([^<]+).+?class=\'episodiotitle\'><a href=\'([^\']+)\'>([^<]+)'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
if aEntry[0]:
oGui.addText(SITE_IDENTIFIER, '[COLOR crimson]' + aEntry[0] + '[/COLOR]')
else:
sUrl = aEntry[2]
EpTitle = aEntry[3]
Ep = aEntry[1]
sTitle = sMovieTitle + ' Episode' + Ep + EpTitle
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oOutputParameterHandler.addParameter('sDesc', sDesc)
oGui.addEpisode(SITE_IDENTIFIER, 'showSeriesHosters', sTitle, '', sThumb, sDesc, oOutputParameterHandler)
oGui.setEndOfDirectory()
def showHosters():
oGui = cGui()
oParser = cParser()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
sDesc = oInputParameterHandler.getValue('sDesc')
oRequest = cRequestHandler(sUrl)
sHtmlContent = oRequest.request()
sPattern = "class='dooplay_player_option' data-type='([^']+)' data-post='([^']+)' data-nume='([^']+)'"
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
url_main = GET_REAL_URLMAIN(sUrl)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
sUrl2 = url_main + 'wp-admin/admin-ajax.php'
dType = aEntry[0]
dPost = aEntry[1]
dNum = aEntry[2]
pdata = 'action=doo_player_ajax&post=' + dPost + '&nume=' + dNum + '&type=' + dType
sHost = 'Serveur ' + dNum
sTitle = ('%s [COLOR coral]%s[/COLOR]') % (sMovieTitle, sHost)
oOutputParameterHandler.addParameter('siteUrl', sUrl2)
oOutputParameterHandler.addParameter('referer', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sMovieTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oOutputParameterHandler.addParameter('pdata', pdata)
oGui.addLink(SITE_IDENTIFIER, 'showLink', sTitle, sThumb, sDesc, oOutputParameterHandler)
oGui.setEndOfDirectory()
def showSeriesHosters():
oGui = cGui()
oParser = cParser()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
sDesc = oInputParameterHandler.getValue('sDesc')
oRequest = cRequestHandler(sUrl)
sHtmlContent = oRequest.request()
sPattern = "id='player-option-.+?data-type='([^']+)'.+?data-post='([^']+)'.+?data-nume='([^']+)'.+?'server'>([^.|^<]+)"
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
url_main = GET_REAL_URLMAIN(sUrl)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
sUrl2 = url_main + 'wp-admin/admin-ajax.php'
dType = aEntry[0]
dPost = aEntry[1]
dNum = aEntry[2]
pdata = 'action=doo_player_ajax&post=' + dPost + '&nume=' + dNum + '&type=' + dType
if (aEntry[3]).startswith('Unknown'):
sHost = 'Serveur ' + dNum
else:
sHost = aEntry[3].capitalize()
sTitle = ('%s [COLOR coral]%s[/COLOR]') % (sMovieTitle, sHost)
oOutputParameterHandler.addParameter('siteUrl', sUrl2)
oOutputParameterHandler.addParameter('referer', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sMovieTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oOutputParameterHandler.addParameter('pdata', pdata)
oGui.addLink(SITE_IDENTIFIER, 'showLink', sTitle, sThumb, sDesc, oOutputParameterHandler)
oGui.setEndOfDirectory()
def showLink():
oGui = cGui()
oParser = cParser()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
referer = oInputParameterHandler.getValue('referer')
pdata = oInputParameterHandler.getValue('pdata')
oRequest = cRequestHandler(sUrl)
oRequest.setRequestType(1)
oRequest.addHeaderEntry('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:70.0) Gecko/20100101 Firefox/70.0')
oRequest.addHeaderEntry('Referer', referer)
oRequest.addHeaderEntry('Accept', '*/*')
oRequest.addHeaderEntry('Accept-Language', 'fr-FR,fr;q=0.9,en-US;q=0.8,en;q=0.7')
oRequest.addHeaderEntry('Content-Type', 'application/x-www-form-urlencoded')
oRequest.addParametersLine(pdata)
sHtmlContent = oRequest.request().replace('\\', '')
sPattern = '(http[^"]+)'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
sHosterUrl = aEntry
oHoster = cHosterGui().checkHoster(sHosterUrl)
if (oHoster != False):
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
oGui.setEndOfDirectory()
def GET_REAL_URLMAIN(url):
sd = url.split('.')
sdm = URL_MAIN.split('.')
return URL_MAIN.replace(sdm[0], sd[0])
| [
"[email protected]"
] | |
da6cdfe9ab180d0e96dc02d884b46c6a2f8a3e88 | 6e8f2e28479566dbaa338300b2d61f784ff83f97 | /.history/code/preprocess_20210421153926.py | 328dd81758da2a656921e2d8033defa2f29c1d4b | [] | no_license | eeng5/CV-final-project | 55a7d736f75602858233ebc380c4e1d67ab2b866 | 580e28819560b86f6974959efb1d31ef138198fc | refs/heads/main | 2023-04-09T21:28:21.531293 | 2021-04-21T19:57:22 | 2021-04-21T19:57:22 | 352,703,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,518 | py |
import os
import random
import numpy as np
from PIL import Image
import tensorflow as tf
import hyperparameters as hp
class Datasets():
""" Class for containing the training and test sets as well as
other useful data-related information. Contains the functions
for preprocessing.
"""
def __init__(self, data_path, task, aug, generate):
self.data_path = data_path
self.emotions = ['angry', 'happy', 'disgust', 'sad', 'neutral', 'surprise', 'fear']
self.emotion_dict = self.createEmotionDict()
self.task = task
self.aug = aug
if generate == 1:
if self.aug == '1':
self.createSimpleData()
else:
self.createComplexData()
# Dictionaries for (label index) <--> (class name)
self.idx_to_class = {}
self.class_to_idx = {}
# For storing list of classes
self.classes = [""] * hp.num_classes
# Setup data generators
self.train_data = self.get_data(
os.path.join(self.data_path, "train/"), False)
self.test_data = self.get_data(
os.path.join(self.data_path, "test/"), False)
def cleanTestDirs(self,):
for e in self.emotions:
pathy = self.data_path+'test/'+e
pics = 1
for f in Path(pathy).glob('*.jpg'):
try:
#f.unlink()
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanTrainDirs(self,):
for e in self.emotions:
pathy = self.data_path+'train/'+e
for f in Path(pathy).glob('*.jpg'):
try:
#f.unlink()
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanAll(self,):
self.cleanTestDirs()
self.cleanTrainDirs()
def createPixelArray(self, arr):
arr = list(map(int, arr.split()))
array = np.array(arr, dtype=np.uint8)
array = array.reshape((48, 48))
return array
def equalize_hist(self, img):
img = cv2.equalizeHist(img)
return img
def showImages(self, imgs):
_, axs = plt.subplots(1, len(imgs), figsize=(20, 20))
axs = axs.flatten()
for img, ax in zip(imgs, axs):
ax.imshow(img,cmap=plt.get_cmap('gray'))
plt.show()
def augmentIMG(self, img, task):
imgs = [img]
img1 = self.equalize_hist(img)
imgs.append(img1)
img2 = cv2.bilateralFilter(img1, d=9, sigmaColor=75, sigmaSpace=75)
imgs.append(img2)
if task == 3:
kernel = np.array([[-1.0, -1.0, -1.0],
[-1.0, 9, -1.0],
[-1.0, -1.0, -1.0]])
img3 = cv2.filter2D(img2,-1,kernel)
imgs.append(img3)
img4 = self.equalize_hist(img3)
imgs.append(img4)
img5 = cv2.bilateralFilter(img4, d=9, sigmaColor=100, sigmaSpace=100)
imgs.append(img5)
img6 = cv2.flip(img, 1) # flip horizontally
imgs.append(img6)
return imgs
def saveIMG(self, arr, num, folderLoc):
im = Image.fromarray(arr)
filename = folderLoc + "image_"+ num+".jpg"
im.save(filename)
def createTrain(self, task):
path1 = self.data_path+"train.csv"
df = pd.read_csv(path1) # CHANGE ME
base_filename = data_path+"train/" # CHANGE ME
for index, row in df.iterrows():
px = row['pixels']
emot = int(row['emotion'])
emot_loc = self.emotion_dict[emot]
filename = base_filename + emot_loc
img = self.createPixelArray(px)
img_arr = self.augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
self.saveIMG(i, num, filename)
def createTest(self, task):
path1 = data_path +"icml_face_data.csv"
df = pd.read_csv(path1) # CHANGE ME
base_filename = data_path + "test/" # CHANGE ME
for index, row in df.iterrows():
if (row[' Usage'] == "PublicTest"):
px = row[' pixels']
emot = int(row['emotion'])
emot_loc = self.emotion_dict[emot]
filename = base_filename + emot_loc
img = self.createPixelArray(px)
img_arr = self.augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
saveIMG(i, num, filename)
def createEmotionDict(self,):
emotionDict = {}
emotionDict[0]="angry/"
emotionDict[1]="disgust/"
emotionDict[2]="fear/"
emotionDict[3]="happy/"
emotionDict[4]="sad/"
emotionDict[5]="surprise/"
emotionDict[6] = "neutral/"
return emotionDict
def createSimpleData(self,):
self.cleanAll()
print("Cleaning done")
self.createTrain(1)
print("Training Data Generation done")
self.createTest(1)
print("Testing Data Generation done")
def createComplexData(self,):
self.cleanAll()
self.createTrain(3)
self.createTest(3)
def preprocess_fn(self, img):
""" Preprocess function for ImageDataGenerator. """
img = img / 255.
return img
def get_data(self, path, shuffle):
""" Returns an image data generator which can be iterated
through for images and corresponding class labels.
Arguments:
path - Filepath of the data being imported, such as
"../data/train" or "../data/test"
shuffle - Boolean value indicating whether the data should
be randomly shuffled.
Returns:
An iterable image-batch generator
"""
data_gen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=self.preprocess_fn)
# VGG must take images of size 224x224
img_size = hp.img_size
classes_for_flow = None
# Make sure all data generators are aligned in label indices
if bool(self.idx_to_class):
classes_for_flow = self.classes
# Form image data generator from directory structure
data_gen = data_gen.flow_from_directory(
path,
target_size=(img_size, img_size),
class_mode='sparse',
batch_size=hp.batch_size,
shuffle=shuffle,
classes=classes_for_flow)
# Setup the dictionaries if not already done
if not bool(self.idx_to_class):
unordered_classes = []
for dir_name in os.listdir(path):
if os.path.isdir(os.path.join(path, dir_name)):
unordered_classes.append(dir_name)
for img_class in unordered_classes:
self.idx_to_class[data_gen.class_indices[img_class]] = img_class
self.class_to_idx[img_class] = int(data_gen.class_indices[img_class])
self.classes[int(data_gen.class_indices[img_class])] = img_class
return data_gen | [
"[email protected]"
] | |
c26cdbd6de229d90cf71d67bf49f6a27ab68512f | 2d0bada349646b801a69c542407279cc7bc25013 | /src/vai_optimizer/tensorflow/tf_nndct/utils/__init__.py | 3274ff4b5eecc566b7b6ab5c8e5fb76d2336b986 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] | permissive | Xilinx/Vitis-AI | 31e664f7adff0958bb7d149883ab9c231efb3541 | f74ddc6ed086ba949b791626638717e21505dba2 | refs/heads/master | 2023-08-31T02:44:51.029166 | 2023-07-27T06:50:28 | 2023-07-27T06:50:28 | 215,649,623 | 1,283 | 683 | Apache-2.0 | 2023-08-17T09:24:55 | 2019-10-16T21:41:54 | Python | UTF-8 | Python | false | false | 616 | py | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nndct_shared.utils import registry
| [
"[email protected]"
] | |
4db1b6a570c6c09cb4abbde4d2d5b91439464880 | 86a563e6eff56cf96bfa3c6dcdfb706e68114530 | /ch05/layer_naive.py | f4262f3f86dabc938f835840d0e9ffd66c61601c | [] | no_license | mingrammer/deep-learning-from-scratch | be322ee82fe5c8d2bcde3ac3e7d35792c5314d1f | 4e158aa3f773ac7c60585f3f1627e94dac7a05ba | refs/heads/master | 2021-01-01T06:36:44.414300 | 2017-08-10T17:15:55 | 2017-08-10T17:15:55 | 97,468,838 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | class MulLayer:
def __init__(self):
self.x = None
self.y = None
def forward(self, x, y):
self.x = x
self.y = y
out = x * y
return out
def backward(self, dout):
dx = dout * self.y
dy = dout * self.x
return dx, dy
class AddLayer:
def __init__(self):
pass
def forward(self, x, y):
out = x + y
return out
def backward(self, dout):
dx = dout * 1
dy = dout * 1
return dx, dy
| [
"[email protected]"
] | |
a6d693cdcbe37656bb5535ac4a05fe5cc9372d37 | 41d0bd94bbaec0299e6be6fc56a726545c1894cb | /sources/nytimes/__init__.py | 6b17755df17000bfee582d94d3ef7ceaa7c83853 | [
"Unlicense"
] | permissive | AB9IL/stream-sources | f86eec0552d0992e7ee02a39076e0a1042ebfe27 | ede8bd3ad7d51723d489192d0a6c5b2ea31ffe56 | refs/heads/master | 2023-02-03T23:09:25.582012 | 2020-12-23T08:12:42 | 2020-12-23T08:12:42 | 319,333,418 | 0 | 0 | Unlicense | 2020-12-07T13:47:06 | 2020-12-07T13:47:05 | null | UTF-8 | Python | false | false | 244 | py | from sources.generic import FeedSource
class Source(FeedSource):
SOURCE = {
'name': 'The New York Times',
'url': 'https://www.nytimes.com',
}
FEED_URL = 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml'
| [
"[email protected]"
] | |
57058094d1fac2a6430800baef3bfb044fb40353 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/core/searchtext/iterators/InstructionSearchAddressIterator.pyi | 714c2a10a62f4d6f5eb1d692fba25f1bfdbdb764 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | pyi | from typing import Iterator
import ghidra.program.model.address
import java.lang
import java.util
import java.util.function
class InstructionSearchAddressIterator(object, ghidra.program.model.address.AddressIterator):
def __init__(self, __a0: ghidra.program.model.listing.InstructionIterator): ...
def __iter__(self) -> Iterator[object]: ...
def equals(self, __a0: object) -> bool: ...
def forEach(self, __a0: java.util.function.Consumer) -> None: ...
def forEachRemaining(self, __a0: java.util.function.Consumer) -> None: ...
def getClass(self) -> java.lang.Class: ...
def hasNext(self) -> bool: ...
def hashCode(self) -> int: ...
def iterator(self) -> java.util.Iterator: ...
def next(self) -> object: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def remove(self) -> None: ...
def spliterator(self) -> java.util.Spliterator: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"[email protected]"
] | |
56be18a63c0d30a9e4ba2dae5d07aad985c61656 | 40c4b8b618d67fc48b862809b6e2835bb7cf76eb | /leetcode/65.py | e19e991fccbe8881504df78c7650cbe96eaad2ad | [] | no_license | berquist/ctci | 9fa08ac724990eee32f8ad7cffc3517491570d41 | f0a69d3e4dd1b73a43c96dcb7a9c7b9955c04c39 | refs/heads/master | 2022-08-18T01:53:16.994300 | 2022-08-15T00:36:07 | 2022-08-15T00:36:07 | 120,108,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | class Solution(object):
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
# assert Solution().isNumber("0") == True
# assert Solution().isNumber(" 0.1 ") == True
# assert Solution().isNumber("abc") == False
# assert Solution().isNumber("1 a") == False
# assert Solution().isNumber("2e10") == True
| [
"[email protected]"
] | |
101f05c1b708685c9f582744ecc1a14472bcf253 | 30b2b8a449558fc327daebf51096bf251ef6a8e9 | /scripts/Assemble.py | 389daba962491debc1e343d62c2dfc8ec94ca8d5 | [
"Zlib",
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | ekg/shasta | 0ac3462d0e3f73375a1b583967992b7e5deba1fd | e2fd3c3d79fb4cafe77c62f6af2fef46f7a04b01 | refs/heads/master | 2020-06-02T12:59:50.717211 | 2019-06-10T12:13:22 | 2019-06-10T12:13:22 | 191,161,600 | 0 | 0 | NOASSERTION | 2019-06-10T12:13:04 | 2019-06-10T12:13:03 | null | UTF-8 | Python | false | false | 686 | py | #!/usr/bin/python3
import shasta
import GetConfig
import ast
# Read the config file.
config = GetConfig.getConfig()
# Create the Assembler.
a = shasta.Assembler()
# Set up the consensus caller.
a.setupConsensusCaller(config['Assembly']['consensusCaller'])
# Figure out if we should use marginPhase, and if so set it up.
useMarginPhase = ast.literal_eval(config['Assembly']['useMarginPhase'])
if useMarginPhase:
a.setupMarginPhase()
a.accessKmers()
a.accessMarkers()
a.accessMarkerGraphVertices()
a.accessMarkerGraphEdges()
a.accessAssemblyGraphEdges()
a.accessAssemblyGraphEdgeLists()
a.accessMarkerGraphVertexRepeatCounts()
a.accessMarkerGraphEdgeConsensus()
a.assemble()
| [
"[email protected]"
] | |
7f2e99bfc97cb0b7fc22df73b15f8e1a322d6df3 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/exp-big-393.py | 20bce7381a23b7eb2eaa3484512995b694ea0636 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,178 | py | # Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp2(x: int, y: int, x2: int, y2: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
$ID = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp3(x: int, y: int, x2: int, y2: int, x3: int, y3: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp4(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp5(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int, x5: int, y5: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
# Input parameter
n:int = 42
n2:int = 42
n3:int = 42
n4:int = 42
n5:int = 42
# Run [0, n]
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
while i <= n:
print(exp(2, i % 31))
i = i + 1 | [
"[email protected]"
] | |
2cfadbdf605826104ecf7f24efa19f78691766cf | c11f92e6a1578338cf759b5e1624a53225642e79 | /babi/user_data.py | 8307f03bf2d4e55df9bd70db01a1dca7746c0fcf | [
"MIT"
] | permissive | pganssle/babi | c1d50df3bdb924316779ab82e996ad46baafb986 | d20be693d2c067570f0a82e2c2baee34c827c3bd | refs/heads/master | 2021-04-11T19:55:08.285937 | 2020-03-21T18:47:37 | 2020-03-21T18:47:37 | 249,049,571 | 0 | 0 | MIT | 2020-03-21T19:50:32 | 2020-03-21T19:50:32 | null | UTF-8 | Python | false | false | 393 | py | import os.path
def _xdg(*path: str, env: str, default: str) -> str:
return os.path.join(
os.environ.get(env) or os.path.expanduser(default),
'babi', *path,
)
def xdg_data(*path: str) -> str:
return _xdg(*path, env='XDG_DATA_HOME', default='~/.local/share')
def xdg_config(*path: str) -> str:
return _xdg(*path, env='XDG_CONFIG_HOME', default='~/.config')
| [
"[email protected]"
] | |
8e34a70a3f9397eeb53ec22828a93db95486d8b8 | e458083d9e0f3564d3089de9febe3cad61733f47 | /Weekdays/python_looping/python_loopings/iterative.py | a8a0cb3dacd9d0abb7f63a3db3305d1043195b89 | [] | no_license | chavhanpunamchand/pythonYogeshSir | cd71d002927f8bbc8ad5ecff3282e0b7c6cfc13c | 3ee675f188e2680cde9e04ad03f2f5c9f3d46ba0 | refs/heads/master | 2023-02-14T01:53:34.342147 | 2021-01-09T17:36:17 | 2021-01-09T17:36:17 | 293,258,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,839 | py | '''
Iterative --> points
do..while --atleast once --> thru flag--* --> not provided in python
while --> may or may not
range
random
random.randoint
enumerate
dict.keys
dict.values
dict.items
for --> when u are sure about no of iterations in advance
--> when u are not sure about no of iterations in advance --> read data from db--> read data from files-->
while --> may or may not body execution --> condition and then body
do..while --> atleast once body execution --> body then condition --> condition bypass it thru flag-->
for -> loop --
Iterative --> statements --> execute the body as long as condition is satisfied
for
while
do.while * --> not in python --> we need to explicitly --> implement this
range(10) start--> 0 end=10 step=1
range(1,10) step=1
range(1,10,3) start -1 end 10 --> incr -->3
1 4 7
for(initialization;conditions;incremet/decrement) --> other lang
//body
for --> start stop step
range --> seq --> start stop -> step ---> range(10) --> 0 10 1 --> 0-9
start -> include -- range(1,10) --> 1 10 1 -->1-9
stop --> dont include range(1,10,2) 1 10 2 -->1,3,5,7,9
step --> increment by
list/set/tuple --> item --> simply element --
dict -->
step1
1.dict --> keys --> based on keys using dict.get(key) -> value --> we need to retrive
2.dict.keys --keys --> based on keys using dict.get(key) -> value --> we need to retrive
step2
packed = pickling = (10,20,30,40,50) values = (10,20,30) for item in dict.items() --> (key,value)
unpacked--unpickling = 10,20,40,50 v1,v2,v3 = 10,20,30 for k,v in dict.items() --> key value
3.dict.items() -- pair --> for k,v in dict.items() --> unpacked 1 11 --> direct
for i,k,v in enumerate(dict.items()) --> unpacked 0,1 11 --> direct
for item in dict.items() --> packed (1,11) -> direct
for i,item in enumerate(dict.items()) --> packed (1,11) -> direct
step1 --> key -- we can retrive value
step2 -- key,value -->direct
step3
dict.values() --> only values --> u cannot retrive keys--based
only values -->
enumerate --> assign numbers to the --> counter --> 0,1,2,3 -->
'''
import sys
import random
# i want to read the file print -> all lines- ->
#body -->
# fib -> series --> 0 1 1 2 3 5 8 --->
val = int(input('Enter Stop Point ')) # 5
#do --> atleast once -->
num1 = 0 # 0 1
num2 = 1 #1
result = 0 #0
counter = 1 #1
# swap -->
while (counter<=val): # true #final ans ---> 0,1,1,2
print(result,end=',') #1
counter +=1 # 4
num1 = num2 #1
num2 = result # 1
result = num1 + num2 #1+1 --> 2
sys.exit(0)
while True:
file = open('File')
if not file.readlines(): # in case no lines
break
num1 = int(input('Enter NO -->'))
num2 = int(input('Enter NO -->'))
while num1 == num2:
break
while True: # do while --> atleast once
num1 = int(input('Enter NO -->'))
num2 = int(input('Enter NO -->'))
if num1 == num2:
break
flag = 1
val = int(input('Enter Number : '))
while flag or val%2==0: # # when no is even
print('inside body') #atleast once body execution happens # may or may not body execution
val = int(input('Enter Number : '))
flag=0 # reset -> next time flag will not bypass--> execution
print('Outside body -->')
sys.exit(0)
#do while kind of implementation --> while do.while --> with the help of flag
flag = [0] # present
num = int(input('Enter Number : '))
# once --> first --> gc --> atleast once -> do while --> implementation
while not num%3==0 or flag: # loop will execute unless entered number is not divisible by 3
print('Inside Body') #gc --> always --> cannot-->depends on condition
num = int(input('Enter Number : '))
flag.clear() #empty --> absent
print("Divisible by 3 -->", num)
sys.exit(0)
#every iterator --> group of elements --> list,set,tuple,dict
#while --> condition --> then body -> may or may not
#do..while-->body first --> then condition --> atleast once
'''
while condition: --> body may or may not execute --> always --> depends on condition
body
do
body --> atleast once body will surely execute -- irrespective of condition
while condition
'''
#for item in range(1000): # if we are sure about no of iterations -->
# if we are not sure about no of iteration in advance
while True: # # start loop --> unless not break --> if num divisible by 3 --> when ??==? no-->
num = int(input('Enter Number :'))
if num%3==0:
print('Number found',num)
break
sys.exit(0)
sys.exit(0)
#no of attempts --> no --> if u are not sure about of numbers---> while
num = int(input('Enter Number : ')) # no of elements --> to reach to this condition ??
if num%3 == 0:
print('Number Found -- >',num)
sys.exit(0)
values = [random.randint(1,100) for item in range(10)] # this line is exactly to --> 51 to 57 lines -> compressive--> short hand expression -- short cut
print(values)
# i want to print all those -- nums divisiable by 3--> #3 stop
for item in values: #10
if item%3==0:
print(item)
break
sys.exit(0)
values = []
for item in range(10): # 10 times -->
val = random.randint(1,100) # 1 - 100 --> both inclusive -> random no generates
values.append(val)
print(values)
sys.exit(0)
valuesList = list(range(1,20))
valuesSet = set(range(1,20))
valuesTuple = tuple(range(1,20))
valuesList1 = list(range(1,10)) # 9 -->1------> 9 1:11,2:12 --> key
valuesList2 = list(range(11,20)) #9--> 11----> 19 -->value
valuesDict = dict(zip(valuesList1,valuesList2))
for cnt,pair in enumerate(valuesDict.items()):
print(cnt,pair) #0 (1,11) 1 (2,12)
sys.exit(0)
print('For List -->')
for index,item in enumerate(valuesList): # if we want counter for the element
print(index,item) # 0:1 --> 8:9
print('For Set --->')
for index,item in enumerate(valuesSet): # enumerate --> assigns counter to the elements-->
print(index,item)
sys.exit(0)
#values = [10,"A",True] # item ?? --? first int --> second --string--> 3rd --> boolean
#list --> group of elements as single entity -->
# array --> type of array --> object --> hom data elements
print('Dict Items ->',valuesDict)
print('using dict --> items() method --> unpacked')
for key,val in valuesDict.items():
print(key,val) #1 11
print('using dict --> items() method --> packed --> tuple pairs')
for pair in valuesDict.items():
print(pair) #tuple(1,11) (2,12) (3,13)
sys.exit(0)
print('using dict --> values() method')
for val in valuesDict.values():
print(val) #only values --> 11 12 -----> 19
sys.exit(0)
print('Tuple Iterations using for loop')
for item in valuesTuple:
print(item,end=' . ') #tuple elements sepereated by dot
print('using dict.nothing which is bydefault --> keys()')
for item in valuesDict: #default .keys()
print('Key {}'.format(item),"Value {}".format(valuesDict.get(item))) #only keys --> 1 2 3 4------> 9
print('using dict --> keys() method')
for key in valuesDict.keys(): #
print('Key {}'.format(key),"Value {}".format(valuesDict.get(key))) #only keys --> 1 2 3 4------> 9
print('List Of Items ->',valuesList)
print('List Iterations using for loop')
for item in valuesList:
print(item,end=' # ') #list elements sepearated by hash #
print('Set Iterations using for loop')
for item in valuesSet:
print(item, end=' , ') # set elements sepearated by ,
#include--1 but not 10
val = range(1,20,2) #1 <10
ans = list(val)
print(ans)
for item in ans:
print(item,end=',')
sys.exit(0)
val = range(10) # seq -->
print(val,type(val))
rlist = set(range(10))
print(rlist)
| [
"[email protected]"
] | |
3a0e1f78250db2e482d5eff70a0c07b7ee2c4b50 | 24a47669907cb008c3fea4265c4b6f37dddc54a4 | /keras_/kerascv/models/sepreresnet.py | 4331887dec1490c392c0c3197e65d35f06adb823 | [
"MIT"
] | permissive | JHLee0513/imgclsmob | ee1f6b8c7f677ed0e8a23e26d3165d37fd8549b4 | 45abcc1d313b84fa3595e13f0e4fa04b5db6c75d | refs/heads/master | 2020-04-22T14:13:25.337524 | 2019-02-12T18:26:09 | 2019-02-12T18:26:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,161 | py | """
SE-PreResNet, implemented in Keras.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['sepreresnet', 'sepreresnet18', 'sepreresnet34', 'sepreresnet50', 'sepreresnet50b', 'sepreresnet101',
'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200', 'sepreresnet200b']
import os
from keras import layers as nn
from keras.models import Model
from .common import conv1x1, se_block, is_channels_first, flatten
from .preresnet import preres_block, preres_bottleneck_block, preres_init_block, preres_activation
def sepreres_unit(x,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
name="sepreres_unit"):
"""
SE-PreResNet unit.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
name : str, default 'sepreres_unit'
Unit name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor.
"""
identity = x
if bottleneck:
x, x_pre_activ = preres_bottleneck_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
name=name + "/body")
else:
x, x_pre_activ = preres_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
name=name + "/body")
x = se_block(
x=x,
channels=out_channels,
name=name + "/se")
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1(
x=x_pre_activ,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
name=name + "/identity_conv")
x = nn.add([x, identity], name=name + "/add")
return x
def sepreresnet(channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000):
"""
SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
input_shape = (in_channels, 224, 224) if is_channels_first() else (224, 224, in_channels)
input = nn.Input(shape=input_shape)
x = preres_init_block(
x=input,
in_channels=in_channels,
out_channels=init_block_channels,
name="features/init_block")
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = sepreres_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = preres_activation(
x=x,
name="features/post_activ")
x = nn.AvgPool2D(
pool_size=7,
strides=1,
name="features/final_pool")(x)
# x = nn.Flatten()(x)
x = flatten(x)
x = nn.Dense(
units=classes,
input_dim=in_channels,
name="output")(x)
model = Model(inputs=input, outputs=x)
model.in_size = in_size
model.classes = classes
return model
def get_sepreresnet(blocks,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join('~', '.keras', 'models'),
**kwargs):
"""
Create PreResNet or SE-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = sepreresnet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sepreresnet18(**kwargs):
"""
SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs)
def sepreresnet34(**kwargs):
"""
SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs)
def sepreresnet50(**kwargs):
"""
SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs)
def sepreresnet50b(**kwargs):
"""
SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs)
def sepreresnet101(**kwargs):
"""
SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs)
def sepreresnet101b(**kwargs):
"""
SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs)
def sepreresnet152(**kwargs):
"""
SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs)
def sepreresnet152b(**kwargs):
"""
SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs)
def sepreresnet200(**kwargs):
"""
SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an
experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs)
def sepreresnet200b(**kwargs):
"""
SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs)
def _test():
import numpy as np
import keras
pretrained = False
models = [
sepreresnet18,
sepreresnet34,
sepreresnet50,
sepreresnet50b,
sepreresnet101,
sepreresnet101b,
sepreresnet152,
sepreresnet152b,
sepreresnet200,
sepreresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
# net.summary()
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet18 or weight_count == 11776928)
assert (model != sepreresnet34 or weight_count == 21957204)
assert (model != sepreresnet50 or weight_count == 28080472)
assert (model != sepreresnet50b or weight_count == 28080472)
assert (model != sepreresnet101 or weight_count == 49319320)
assert (model != sepreresnet101b or weight_count == 49319320)
assert (model != sepreresnet152 or weight_count == 66814296)
assert (model != sepreresnet152b or weight_count == 66814296)
assert (model != sepreresnet200 or weight_count == 71828312)
assert (model != sepreresnet200b or weight_count == 71828312)
if is_channels_first():
x = np.zeros((1, 3, 224, 224), np.float32)
else:
x = np.zeros((1, 224, 224, 3), np.float32)
y = net.predict(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| [
"[email protected]"
] | |
1084dd65c5e897d08750a0765d039c5aa79fbda4 | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/nlp/gpt2/src/utils/tensor_manipulations.py | 8ff23330029fad9374e2b614e0f24e24d7e6f763 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 7,159 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
tensor manipulations
"""
from mindspore import Tensor
from mindspore import dtype as mstype
from mindspore.ops import operations as P
def extract_string_from_tensor(input_ids, mode="single", config=None, tokenizer=None):
"""
Args:
input_ids (Tensor): input sentences with shape [batch_size, seq_len].
mode (str): ["pair", "single"]
"pair" for tasks with paired inputs `<bos> A <eos> B <eos>`,
such as summarization task, the dataset format `<bos> Article <eos> Summary <eos>`,
reading comprehension task, the dataset format `<bos> Passage Question <eos> Answer <eos>`.
"single" for tasks with single input `<bos> A <eos>`, such as Language Modeling, Lambada task.
config: the configuration of GPT-2 model.
tokenizer: the tokenizer of GPT-2 model.
Return:
prompt_list (list): list of prompt_text
reference_list (list): list of reference_text, or second part of text
rest_list (list): list of rest_text, or rest part of text
"""
batch_size = config.batch_size
seq_length = config.seq_length
prompt_list = [""] * batch_size
reference_list = [""] * batch_size
eos_text = tokenizer.eos_token
len_eos_text = len(eos_text)
input_ids = P.Reshape()(input_ids, (batch_size, seq_length))
if mode == "pair":
for batch_idx in range(batch_size):
sentence_tensor = input_ids[batch_idx]
sentence_list = sentence_tensor.asnumpy().tolist()[1:]
sentence = tokenizer.decode(sentence_list)
prompt_start = 0
prompt_end = sentence.find(eos_text, 0)
reference_start = prompt_end + len_eos_text
reference_end = sentence[reference_start:].find(
eos_text, 0) + reference_start
prompt_list[batch_idx] = sentence[prompt_start:prompt_end]
reference_list[batch_idx] = sentence[reference_start:reference_end]
return prompt_list, reference_list
# For single output datasets such as WikiText, etc.
if mode == "single":
for batch_idx in range(batch_size):
sentence_tensor = input_ids[batch_idx]
sentence_list = sentence_tensor.asnumpy().tolist()[1:]
sentence = tokenizer.decode(sentence_list)
prompt_start = 0
prompt_end = sentence.find(eos_text, 0)
prompt_list[batch_idx] = sentence[prompt_start:prompt_end]
else:
raise NotImplementedError('mode:{} not supported.'.format(mode))
return prompt_list
def extract_single_token_logits(logits=None, seq_pos=None):
"""
Args
logits: (batch_size,seq_length,vocab_size) e.g. when batchsize is 8,
sequence length is 1024 and vocab_size is 50257,
then logits is a Tensor with shape (8,1024,50257)
seq_pos:(batch_size) list
Return:
output_logits: (batch_size,1,vocab_size) extract the logit to predict the last token.
"""
batch_size = logits.shape[0]
for i in range(batch_size):
logit = logits[i:i + 1:1, seq_pos[i]:seq_pos[i] + 1:1, ::]
if i == 0:
output_logits = logit
else:
output_logits = P.Concat()((output_logits, logit))
return output_logits
def get_last_one_pos(input_mask: Tensor):
"""
Arg:
input_mask (Tensor): (batch_size,seq_length)
Return:
pos (Tensor): (batch_size,)
"""
input_mask_ = P.Cast()(input_mask, mstype.float32)
pos = P.ReduceSum(keep_dims=False)(input_mask_, axis=1) # (batch_size,)
pos = P.Cast()(pos, mstype.int32)
pos = pos - 1
return pos
def get_next_one_pos(input_mask: Tensor):
"""
Arg:
input_mask (Tensor): (batch_size,seq_length)
"""
input_mask_ = P.Cast()(input_mask, mstype.float32)
pos = P.ReduceSum(keep_dims=False)(input_mask_, axis=1) # (batch_size,)
pos = P.Cast()(pos, mstype.int32)
return pos
def add_last_token_mask(input_mask: Tensor, overflow_strategy: str = "shift"):
"""
add last token mask
Args:
input_mask: Tensor
overflow_strategy: str
Returns:
Tensor
"""
pos = get_next_one_pos(input_mask).asnumpy()
input_mask_np = input_mask.asnumpy()
maximum_length = input_mask.shape[1]
batch_size = input_mask.shape[0]
for idx in range(batch_size):
# not overflow
if pos[idx] < maximum_length:
input_mask_np[idx][pos[idx]] = 1
# overflow
else:
if overflow_strategy == "shift":
continue
if overflow_strategy == "truncate":
continue
else:
raise ValueError("{} is not an option in ['shift','truncate'].".format(overflow_strategy))
return Tensor(input_mask_np, dtype=mstype.int32)
def add_last_token(input_ids: Tensor, input_mask: Tensor, overflow_strategy: str = "shift", append_ids=None,
next_token_pos=None):
"""
add last token
Args:
input_ids: Tensor
input_mask: Tensor
overflow_strategy: str
append_ids: Any
next_token_pos: Any
Returns:
Tensor
"""
# get positional list/numpy array
if next_token_pos is None:
pos = get_next_one_pos(input_mask).asnumpy()
else:
pos = next_token_pos
# get numpy of inputs
input_mask_np = input_mask.asnumpy()
input_ids_np = input_ids.asnumpy()
maximum_length = int(input_mask.shape[1])
batch_size = int(input_mask.shape[0])
for idx in range(batch_size):
if append_ids[idx] == -1:
continue
# not overflow
if pos[idx] < maximum_length:
input_mask_np[idx][int(pos[idx])] = 1
input_ids_np[idx][int(pos[idx])] = append_ids[idx]
# overflow
else:
if overflow_strategy == "shift":
# shift one token left
input_ids_np[idx][0:maximum_length - 1] = input_ids_np[idx][1:maximum_length]
input_ids_np[idx][maximum_length - 1] = append_ids[idx]
continue
if overflow_strategy == "truncate":
# do nothing
continue
else:
raise ValueError("{} is not an option in ['shift','truncate'].".format(overflow_strategy))
return Tensor(input_ids_np, dtype=mstype.int32), Tensor(input_mask_np, dtype=mstype.int32)
| [
"[email protected]"
] | |
8b38feee1e7984c093ab2477b1e6c94aa9ae5032 | 9b1446b26e81a79c303f9799fb6a91785c7adb03 | /.history/Code/rearrange_20200119162227.py | a2b26cbc31714c4d2901c190ccccaf9a0c97fe88 | [] | no_license | SamirIngley/CS1.2-Tweet-Gen | 017ea15b1113881a156ff24682828bc654eb6c81 | bcd95fa63e05849cbf8e36230d8e31032b99daaa | refs/heads/master | 2020-12-14T20:19:57.733290 | 2020-08-04T23:19:23 | 2020-08-04T23:19:23 | 234,856,234 | 0 | 0 | null | 2020-06-05T21:13:04 | 2020-01-19T07:05:55 | Python | UTF-8 | Python | false | false | 5,847 | py | import random
def random_rearrange(input_string):
''' Asks user for input of words, then
rearranges those words in a random order
'''
# input_string = input("enter words: ")
words = input_string.split(' ')
len_words = len(words)
# print(words)
word_list = []
for word in range(len_words):
rand = random.randint(0,len_words-1)
# print(rand)
word_list.append(words[rand])
# print(word_list)
space = ' '
sentence = space.join(word_list)
print(sentence)
return sentence
def reverse_order(input_string):
'''
Reverses the order or words inputted by user
'''
# input_string = input("enter words: ")
words = input_string.split(' ')
print(words)
length = len(words) - 1
word_list = []
for word in words:
word_list.append(words[length])
length -= 1
print(word_list)
space = ' '
sentence = space.join(word_list)
print(sentence)
return sentence
def mad_libs():
nouns_string = input('Give me a noun: ')
names_string = input('Give me a name: ')
verbs_string = input('Give me two verbs: ')
nouns = nouns_string.split(' ')
names = names_string.split(' ')
verbs = verbs_string.split(' ')
print(verbs)
print("One day I went to the store to buy myself a {}.".format(nouns[0]))
print("'What's the matter with you {}?' The clerk asked.".format(names[0]))
print("'This fits me well' I said")
print("'Well go on then {} it out so you don't miss out.".format(verbs[0]))
print("'Let me {} first and I'll give you what I have.'".format(verbs[1]))
# def anagram():
# ''' handshake with each letter
# rearrange to see every possible combination of words
# '''
# word = input('Letters/word: ')
# length = len(word)
# current = None
# temp = None
# for letter in word:
# current = letter
# for letter2 in word:
# temp = letter2
# if letter == letter2:
# pass
# else:
def anagram(input_string):
''' takes a word and returns every possible combination of letters
'''
word_string = input_string
new_strings = []
linked_list = LinkedList()
linked_list_swaps = LinkedList()
linked_list.read()
linked_list_swaps.read()
for letter in input_string:
linked_list.insert(letter)
linked_list_swaps.insert(letter)
linked_list.read()
print(len(word_string))
index = 0
while index < len(word_string):
for letter in word_string:
for letter2 in word_string:
linked_list_swaps.swap(letter, letter2)
new_strings.append(linked_list.read() + "\n")
linked_list_swaps.swap(letter2, letter)
index += 1
linked_list_swaps.read()
print(new_strings)
return
class Node():
def __init__(self, data=None, next_pointer=None):
self.data = data
self.next_pointer = next_pointer
def get_data(self):
return self.data
def get_next(self):
return self.next_pointer
def set_next(self, next_node):
self.next_pointer = next_node
class LinkedList():
def __init__(self, head=None):
self.head = head
def insert(self, data):
new_node = Node(data)
new_node.set_next(self.head)
self.head = new_node
def delete(self, data):
current = self.head
previous = None
found = False
while current and found == False:
if current.get_data() == data:
found = True
else:
previous = current
current = current.get_next()
if current == None:
return ValueError("does not exist")
if previous == None:
self.head = current.get_next()
if found == True:
previous.set_next(current.get_next())
def read(self):
current = self.head
read = []
while current:
data = current.get_data()
read.append(data)
current = current.get_next()
no_space = ''
sentence = no_space.join(read)
print(sentence)
return
def swap(self, data1, data2):
node1 = None
node2 = None
current = self.head
if data1 == data2:
print("n/a")
return
while current:
curr_data = current.get_data()
if curr_data == data1:
node1 = current
elif curr_data == data2:
node2 = current
current = current.get_next()
temp1 = node1.get_data()
temp2 = node2.get_data()
node1.data = temp2
node2.data = temp1
return
def size(self):
current = self.head
counter = 0
while current:
counter += 1
current = current.get_next()
print(counter)
return counter
if __name__ == '__main__':
input_string = 'hello yellow fellow'
anagram_string = 'superduper'
# random_rearrange(input_string)
# reverse_order()
# mad_libs()
anagram(anagram_string)
# linked_list = LinkedList()
# linked_list.insert('a')
# linked_list.insert('b')
# linked_list.insert('c')
# linked_list.insert('d')
# linked_list.insert('e')
# linked_list.insert('f')
# linked_list.insert('g')
# linked_list.insert('h')
# linked_list.insert('i')
# linked_list.insert('j')
# linked_list.insert('k')
# linked_list.read()
# linked_list.delete('a')
# linked_list.read()
# print(range(linked_list.size()))
# linked_list.swap([0],[10])
# linked_list.read() | [
"[email protected]"
] | |
655703818b71a380d0ddde23057a56603097cada | e41e2505ff0b0534017e85bda0e06493094d1498 | /frontend/corona_REST/setting.py | 6315adfe2d6fb9e632722dc0d095178b642a7331 | [
"MIT"
] | permissive | luyuliu/COVID19-Dashboard | 5d516f85284ca908321696bee405fdf1da5531d1 | 717f83e2767fa53367232e742c110515957a94fd | refs/heads/master | 2023-09-04T11:59:37.076149 | 2021-11-12T20:32:46 | 2021-11-12T20:32:46 | 253,892,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | DOMAIN = {
'ridership_actual': {'datasource': {'source': 'ridership_actual'}},
'county_info': {'datasource': {'source': 'county_info'}},
'census_occu_pop': {'datasource': {'source': 'census_occu_pop'}},
'corona_cases_state_level': {'datasource': {'source': 'corona_cases_state_level'}},
'census_occupation_population': {'datasource': {'source': 'census_occupation_population'}},
'system_info': {'datasource': {'source': 'system_info'}},
'other_ridership_hourly': {'datasource': {'source': 'other_ridership_hourly'}},
'corona_cases_github': {'datasource': {'source': 'corona_cases_github'}},
'other_ridership': {'datasource': {'source': 'other_ridership'}},
'ridership': {'datasource': {'source': 'ridership'}},
'census_occupation_industry': {'datasource': {'source': 'census_occupation_industry'}},
'ridership_hourly': {'datasource': {'source': 'ridership_hourly'}},
'aggregated_ridership_hourly': {'datasource': {'source': 'aggregated_ridership_hourly'}},
'system_info_backup': {'datasource': {'source': 'system_info_backup'}},
'google_trend': {'datasource': {'source': 'google_trend'}},
'corona_cases_usafacts': {'datasource': {'source': 'corona_cases_usafacts'}},
'census_transit_pop': {'datasource': {'source': 'census_transit_pop'}},
}
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_DBNAME = "corona"
ALLOW_UNKNOWN=True
X_DOMAINS='*'
PAGINATION_LIMIT = 10000
PAGINATION_DEFAULT = 10000 | [
"[email protected]"
] | |
aef9f80055a7aed0d9ee6b1f6e97282e910a9c59 | a8b17b17f9b2a640013064c50e1cebc27a7a68de | /10-Merging-DataFrames-with-Pandas/04-case-study-Summer-Olympics/02-loading-ioc-codes-dataframe.py | 6f36f6445cdf16c2b2857aa63e94ef5d965ab92a | [] | no_license | JohnnyFang/datacamp | 20eae09752521f14006cb3fda600b10bd7b12398 | 0fa8fa7682c23b0eb07bd03e4b75f5b77aeafa75 | refs/heads/master | 2020-04-18T00:27:37.358176 | 2020-02-04T20:54:19 | 2020-02-04T20:54:19 | 167,078,316 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | '''
Read file_path into a DataFrame called ioc_codes. The identifier file_path has been pre-defined with the filename 'Summer Olympic medallists 1896 to 2008 - IOC COUNTRY CODES.csv'.
Select only the columns 'Country' and 'NOC' from ioc_codes.
Print the leading 5 and trailing 5 rows of the DataFrame ioc_codes (there are 200 rows in total). This has been done for you, so hit 'Submit Answer' to see the result!
'''
# Import pandas
import pandas as pd
# Create the file path: file_path
file_path = 'Summer Olympic medallists 1896 to 2008 - IOC COUNTRY CODES.csv'
# Load DataFrame from file_path: ioc_codes
ioc_codes = pd.read_csv(file_path)
# Extract the relevant columns: ioc_codes
ioc_codes = ioc_codes[['Country', 'NOC']]
# Print first and last 5 rows of ioc_codes
print(ioc_codes.head())
print(ioc_codes.tail())
| [
"[email protected]"
] | |
a434c943b8afac2a3ba516952790983f4bebf8d9 | def27d5864764b877b6786835ec97f2bd74c6ba8 | /easy/HammingDistance.py | b9cb3fe45c35fdf770719e3a32aa986bf2a73a40 | [] | no_license | bolan2014/leetcode | f6cf38a49a9250abeb36543ea2498062c58e811d | 1c35fde3a65c4f216218f459736d4c39a29980d5 | refs/heads/master | 2021-04-09T16:59:41.494568 | 2017-05-10T03:47:14 | 2017-05-10T03:47:14 | 46,648,353 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | class Solution(object):
def hammingDistance(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
"""
bix, biy = bin(x)[2:], bin(y)[2:]
if len(bix) > len(biy):
biy = (len(bix) - len(biy)) * '0' + biy
else:
bix = (len(biy) - len(bix)) * '0' + bix
cnt = 0
for i in range(len(bix)):
if bix[i] != biy[i]:
cnt += 1
return cnt
| [
"[email protected]"
] | |
c453f63b56b29011977ee32465c52b69a612a70d | 630fe47bb5aa5e49b45ab101d87c2dd2c53d180f | /venv/Lib/site-packages/com/vmware/nsx/node/aaa/providers/vidm_client.py | b5c31723c754c80b2bea2a739a2388630213feb8 | [] | no_license | shrivastava-himanshu/Leetcode_practice | 467497a58d82ff3ae2569d5e610dc6f27a1f31d6 | 4c59799947c2b17bfd22ca2a08707ef85e84a913 | refs/heads/main | 2023-06-12T13:14:45.381839 | 2021-07-05T04:09:05 | 2021-07-05T04:09:05 | 367,546,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,544 | py | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2021 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx.node.aaa.providers.vidm.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Status(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.node.aaa.providers.vidm.status'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _StatusStub)
self._VAPI_OPERATION_IDS = {}
def get(self):
"""
Read AAA provider vIDM status
:rtype: :class:`com.vmware.nsx.model_client.NodeAuthProviderVidmStatus`
:return: com.vmware.nsx.model.NodeAuthProviderVidmStatus
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get', None)
class _StatusStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/node/aaa/providers/vidm/status',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'NodeAuthProviderVidmStatus'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.node.aaa.providers.vidm.status',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'Status': Status,
}
| [
"[email protected]"
] | |
debe5f15c52bb08f8beadfea06a498d86d7c81c4 | 27880c807b97b3b318d002a547680c6881acf460 | /tests/argparse/special/test_overwrite.py | a4721283725798b1b7e6875be3aed206d66f9fc3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sbrodehl/miniflask | a1ebb809d544fbc235044624af9193982f01aced | 55b350b951ad2120ea13a986f742523206f407c6 | refs/heads/master | 2022-11-05T05:18:43.383396 | 2022-09-14T15:26:17 | 2022-09-14T15:26:17 | 252,498,534 | 0 | 0 | null | 2020-04-02T15:46:39 | 2020-04-02T15:46:39 | null | UTF-8 | Python | false | false | 2,702 | py | from pathlib import Path
import pytest
import miniflask # noqa: E402
def test_setup(capsys):
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults"])
mf.parse_args([
"--var_default_override_twice_and_cli", "1114"
])
captured = capsys.readouterr()
mf.event.print_all()
captured = capsys.readouterr()
assert captured.out == """
modules.defaults.var_default: 1
modules.defaults.var_default_override: 2
modules.defaults.var_default_override_twice: 3
modules.defaults.var_default_override_twice_and_cli: 1114
""".lstrip()
def test_override(capsys):
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults_override"])
mf.parse_args([
"--var_default_override_twice_and_cli", "1114"
])
captured = capsys.readouterr()
mf.event.print_all()
captured = capsys.readouterr()
assert captured.out == """
modules.defaults.var_default: 1
modules.defaults.var_default_override: 12
modules.defaults.var_default_override_twice: 13
modules.defaults.var_default_override_twice_and_cli: 1114
""".lstrip()
def test_override_twice(capsys):
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults_override", "defaults_override_twice"])
mf.parse_args([
"--var_default_override_twice_and_cli", "1114"
])
captured = capsys.readouterr()
mf.event.print_all()
captured = capsys.readouterr()
assert captured.out == """
modules.defaults.var_default: 1
modules.defaults.var_default_override: 12
modules.defaults.var_default_override_twice: 113
modules.defaults.var_default_override_twice_and_cli: 1114
""".lstrip()
def test_override_conflict():
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults2", "defaults_override"])
with pytest.raises(miniflask.exceptions.RegisterError):
mf.parse_args([])
mf.event.print_all()
def test_override_scoped_absolute():
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults2", "defaults_override_scoped_absolute"])
mf.parse_args([])
mf.event.print_all()
def test_override_scoped_relative():
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
mf.load(["defaults", "defaults2", "defaults_override_scoped_relative"])
mf.parse_args([])
mf.event.print_all()
| [
"[email protected]"
] | |
683cb94f99b944c57b75bcff395c4d70823f1021 | 27acd9eeb0d2b9b6326cc0477e7dbb84341e265c | /test/vraag4/src/isbn/156.py | 5d83c65f74ee33e129c19964d85548161b6c4135 | [] | no_license | VerstraeteBert/algos-ds | e0fe35bc3c5b7d8276c07250f56d3719ecc617de | d9215f11cdfa1a12a3b19ade3b95fa73848a636c | refs/heads/master | 2021-07-15T13:46:58.790446 | 2021-02-28T23:28:36 | 2021-02-28T23:28:36 | 240,883,220 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | def isISBN(code):
if not (
isinstance(code, str) and
len(code) == 13 and
( code.startswith('978') or code.startswith('979') ) and
code.isdigit()
):
return 0
controle=0
for i in range(12):
if i%2:
controle += 3* int(code[i])
else:
controle += int(code[i])
cc = controle % 10
cc = (10 - cc) % 10
return cc == int(code[-1])
def overzicht(codes):
groepen = {}
for i in range(11):
groepen[i] = 0
for code in codes:
if not isISBN(code):
groepen[10] += 1
else:
groepen[int(code[3])] += 1
print('Engelstalige landen: {}'.format(groepen[0] + groepen[1]))
print('Franstalige landen: {}'.format(groepen[2]))
print('Duitstalige landen: {}'.format(groepen[3]))
print('Japan: {}'.format(groepen[4]))
print('Russischtalige landen: {}'.format(groepen[5]))
print('China: {}'.format(groepen[7]))
print('Overige landen: {}'.format(groepen[6] + groepen[8] + groepen[9]))
print('Fouten: {}'.format(groepen[10]))
| [
"[email protected]"
] | |
222e0833d388b0280d65ff78eb7ee790a0581964 | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/student/role_helpers.py | ffe0f2c9f20f8f9d2d6244b6ab63b737d5bbcf22 | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 1,412 | py | """
Helpers for student roles
"""
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR,
Role
)
from common.djangoapps.student.roles import (
CourseBetaTesterRole,
CourseInstructorRole,
CourseStaffRole,
GlobalStaff,
OrgInstructorRole,
OrgStaffRole
)
def has_staff_roles(user, course_key):
"""
Return true if a user has any of the following roles
Staff, Instructor, Beta Tester, Forum Community TA, Forum Group Moderator, Forum Moderator, Forum Administrator
"""
forum_roles = [FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR, FORUM_ROLE_ADMINISTRATOR]
is_staff = CourseStaffRole(course_key).has_user(user)
is_instructor = CourseInstructorRole(course_key).has_user(user)
is_beta_tester = CourseBetaTesterRole(course_key).has_user(user)
is_org_staff = OrgStaffRole(course_key.org).has_user(user)
is_org_instructor = OrgInstructorRole(course_key.org).has_user(user)
is_global_staff = GlobalStaff().has_user(user)
has_forum_role = Role.user_has_role_for_course(user, course_key, forum_roles)
if any([is_staff, is_instructor, is_beta_tester, is_org_staff,
is_org_instructor, is_global_staff, has_forum_role]):
return True
return False
| [
"[email protected]"
] | |
28a140f400a6d510811875a29923efe76038cf73 | ebe422519443dbe9c4acd3c7fd527d05cf444c59 | /evaluation_expression.py | ae02e8d4501a759bbab9c83d68ce0494a8051e94 | [] | no_license | SaiSudhaV/coding_platforms | 2eba22d72fdc490a65e71daca41bb3d71b5d0a7b | 44d0f80104d0ab04ef93716f058b4b567759a699 | refs/heads/master | 2023-06-19T18:05:37.876791 | 2021-07-15T18:02:19 | 2021-07-15T18:02:19 | 355,178,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | class Solution:
# @param A : list of strings
# @return an integer
def evalRPN(self, A):
res, opr = [], ['+', '-', '*', '/']
for i in A:
if i not in opr:
res.append(i)
elif len(res) >= 2:
tem1 = str(res.pop())
tem2 = str(res.pop())
p = int(eval(tem2 + i + tem1))
res.append(p)
return res.pop() | [
"[email protected]"
] | |
64b12d49a26a0628242f870670d9e5d34e02cb5e | f850e0f75a76c500f5ba8a9ab6fa6d5f40d22b23 | /pyecharts_demo/demos/Bar/multiple_y_axes.py | e006b619f9172a4af780cb1631e85e41c4e503b7 | [
"MIT"
] | permissive | jay20161013/pywebio-chart-gallery | 805afa2643b0d330a4a2f80f1e0a8827e8f61afe | 11fd8a70b2e9ff5482cf5924b110a11f3469edfc | refs/heads/master | 2023-03-20T01:58:30.979109 | 2021-03-18T12:48:31 | 2021-03-18T12:48:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,851 | py | from pywebio.output import put_html
import pyecharts.options as opts
from pyecharts.charts import Bar, Line
"""
Gallery 使用 pyecharts 1.0.0
参考地址: https://www.echartsjs.com/examples/editor.html?c=multiple-y-axis
目前无法实现的功能:
1、暂无
"""
colors = ["#5793f3", "#d14a61", "#675bba"]
x_data = ["1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"]
legend_list = ["蒸发量", "降水量", "平均温度"]
evaporation_capacity = [
2.0,
4.9,
7.0,
23.2,
25.6,
76.7,
135.6,
162.2,
32.6,
20.0,
6.4,
3.3,
]
rainfall_capacity = [
2.6,
5.9,
9.0,
26.4,
28.7,
70.7,
175.6,
182.2,
48.7,
18.8,
6.0,
2.3,
]
average_temperature = [2.0, 2.2, 3.3, 4.5, 6.3, 10.2, 20.3, 23.4, 23.0, 16.5, 12.0, 6.2]
bar = (
Bar(init_opts=opts.InitOpts(width="1260px", height="720px"))
.add_xaxis(xaxis_data=x_data)
.add_yaxis(
series_name="蒸发量",
yaxis_data=evaporation_capacity,
yaxis_index=0,
color=colors[1],
)
.add_yaxis(
series_name="降水量", yaxis_data=rainfall_capacity, yaxis_index=1, color=colors[0]
)
.extend_axis(
yaxis=opts.AxisOpts(
name="蒸发量",
type_="value",
min_=0,
max_=250,
position="right",
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color=colors[1])
),
axislabel_opts=opts.LabelOpts(formatter="{value} ml"),
)
)
.extend_axis(
yaxis=opts.AxisOpts(
type_="value",
name="温度",
min_=0,
max_=25,
position="left",
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color=colors[2])
),
axislabel_opts=opts.LabelOpts(formatter="{value} °C"),
splitline_opts=opts.SplitLineOpts(
is_show=True, linestyle_opts=opts.LineStyleOpts(opacity=1)
),
)
)
.set_global_opts(
yaxis_opts=opts.AxisOpts(
type_="value",
name="降水量",
min_=0,
max_=250,
position="right",
offset=80,
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color=colors[0])
),
axislabel_opts=opts.LabelOpts(formatter="{value} ml"),
),
tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type="cross"),
)
)
line = (
Line()
.add_xaxis(xaxis_data=x_data)
.add_yaxis(
series_name="平均温度", y_axis=average_temperature, yaxis_index=2, color=colors[2]
)
)
put_html(bar.overlap(line).render_notebook())
| [
"[email protected]"
] | |
13a72f1e1d9a3d638183d21c021fdda9d81e2338 | 22d9d90aa171869bba3d31f2307abe58aadd3d1d | /qtim_tools/qtim_features/extract_features.py | bec0fcbfb0fe59a061988c379ed165343d2c6b95 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | liviust/qtim_tools | a790b66bf2d3cd1d5b8036a61a264be57614b47d | 64d7d68b1335239f0d7707f8c1e28af71706e4ad | refs/heads/master | 2020-05-27T21:19:22.480893 | 2017-02-28T22:30:14 | 2017-02-28T22:30:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,435 | py | """ This 'learner' program, perhaps wrongly named, is the my current
general utility function. It takes in a folder full of images and
labels, parses them into numpy arrays, extracts features from those
arrays, and writes them into an easily accessible .csv. As of yet,
it does not do any learning..
"""
# import
import GLCM
import morphology
import statistics
from qtim_tools.qtim_utilities import nifti_util
import sys, getopt
import glob
import os
import numpy as np
import nibabel as nib
import csv
import fnmatch
from shutil import copy, move
from multiprocessing.pool import Pool
from multiprocessing import freeze_support
from functools import partial
feature_dictionary = {'GLCM': GLCM, 'morphology': morphology, 'statistics': statistics}
def generate_feature_list_batch(folder, features=['GLCM', 'morphology', 'statistics'], recursive=False, labels=False, label_suffix="-label", universal_label='', decisions=False, levels=255, normalize_intensities=True,mask_value=0, use_labels=[-1], erode=[0,0,0], filenames=True, featurenames=True, outfile='', overwrite=True, clear_file=True, write_empty=True, return_output=False, test=False):
total_features, feature_indexes, label_output = generate_feature_indices(features, featurenames)
# This needs to be restructured, probably with a new method to iterate through images. Currently, this will not work
# wtihout an output file. The conflict is between retaining the ability to append to files in real-time (to prevent
# catastrophic errors from wasting eons of processing time) and having a conditional "outfile" parameter.
if outfile != '':
outfile = determine_outfile_name(outfile, overwrite)
if clear_file:
open(outfile, 'w').close()
with open(outfile, 'ab') as writefile:
csvfile = csv.writer(writefile, delimiter=',')
csvfile.writerow(label_output[0,:])
imagepaths, label_images = generate_filename_list(folder, labels, label_suffix, recursive)
numerical_output = np.zeros((1, total_features), dtype=float)
index_output = np.zeros((1, 1), dtype=object)
for imagepath in imagepaths:
print '\n'
print 'Pre-processing data...'
image_list, unmodified_image_list, imagename_list, attributes_list = generate_numpy_images(imagepath, labels=labels, label_suffix=label_suffix, label_images=label_images, levels=levels, mask_value=mask_value, use_labels=use_labels, erode=erode)
if image_list == []:
if write_empty:
empty_output = np.zeros((1, total_features + 1), dtype=object)
empty_output[0,0] = imagepath
csvfile.writerow(empty_output[0,:])
continue
print 'Pre-processing complete!'
for image_idx, image in enumerate(image_list):
print ''
print 'Working on image...'
print imagename_list[image_idx]
print 'Voxel sum...'
print np.sum(image)
print 'Image shape...'
print image.shape
if filenames:
index = imagename_list[image_idx]
else:
index = numerical_output.shape[0]
if numerical_output[0,0] == 0:
numerical_output[0, :] = generate_feature_list_method(image, unmodified_image_list[image_idx], attributes_list[image_idx], features, feature_indexes, total_features, levels, mask_value=mask_value, normalize_intensities=normalize_intensities)
index_output[0,:] = index
else:
numerical_output = np.vstack((numerical_output, generate_feature_list_method(image, unmodified_image_list[image_idx], attributes_list[image_idx], features, feature_indexes, total_features, levels, mask_value=mask_value, normalize_intensities=normalize_intensities)))
index_output = np.vstack((index_output, index))
csvfile.writerow(np.hstack((index_output[-1,:], numerical_output[-1,:])))
final_output = np.hstack((index_output, numerical_output))
print 'Feature writing complete, writing output...'
print '\n'
for row in final_output:
print row
if return_output:
return final_output
def generate_feature_list_single(vol_filename, features=['GLCM', 'morphology', 'statistics'], labels=False, label_filename='',label_suffix="-label", decisions=False, levels=255, filenames=True, featurenames=True, outfile='', overwrite=True, write_empty=True, mask_value=0, test=False, use_labels=[-1], erode=0):
total_features, feature_indexes, label_output = generate_feature_indices(features, featurenames)
if outfile != '':
outfile = determine_outfile_name(outfile, overwrite)
with open(outfile, 'ab') as writefile:
csvfile = csv.writer(writefile, delimiter=',')
csvfile.writerow(label_output[0,:])
numerical_output = np.zeros((1, total_features), dtype=float)
index_output = np.zeros((1, 1), dtype=object)
final_output = write_image_method(vol_filename, label_filename, csvfile, total_features, features, feature_indexes, numerical_output, index_output, labels=False, label_suffix='-label', levels=100, mask_value=0, use_labels=[-1], erode=0, write_empty=False)
print 'Feature writing complete, writing output...'
print '\n'
print final_output
return final_output
def generate_feature_list_parallel(folder, features=['GLCM', 'morphology', 'statistics'], recursive=False, labels=False, label_suffix="-label", decisions=False, levels=255, mask_value=0, use_labels=[-1], erode=[0,0,0], filenames=True, featurenames=True, outfile='', overwrite=True, clear_file=True, write_empty=True, return_output=False, test=False, processes=1):
total_features, feature_indexes, label_output = generate_feature_indices(features, featurenames)
if outfile != '':
outfile = determine_outfile_name(outfile, overwrite)
if clear_file:
open(outfile, 'w').close()
imagepaths, label_images = generate_filename_list(folder, labels, label_suffix, recursive)
numerical_output = np.zeros((1, total_features), dtype=float)
index_output = np.zeros((1, 1), dtype=object)
subunits = []
sublength = np.floor(len(imagepaths) / processes)
print 'Dividing data into ' + str(processes) + ' subgroups of length.. ' + str(int(sublength)) + ' units.'
for i in xrange(processes - 1):
subunits += [[imagepaths[int(i*sublength):int((i+1)*sublength)], label_images[int(i*sublength):int((i+1)*sublength)]]]
subunits += [[imagepaths[int((processes - 1)*sublength):], label_images[int((processes - 1)*sublength):]]]
subprocess = partial(generate_feature_list_chunk, total_features=total_features, feature_indexes=feature_indexes, label_output=label_output, features=features, labels=labels, label_suffix=label_suffix, levels=levels, mask_value=mask_value, use_labels=use_labels, erode=erode, write_empty=write_empty, filenames=filenames)
optimization_pool = Pool(processes)
results = optimization_pool.map(subprocess, subunits)
output_data = label_output[0,:]
stitch_index = 0
for result in results:
output_data = np.vstack((output_data, result))
final_output = output_data
with open(outfile, 'wb') as writefile:
csvfile = csv.writer(writefile, delimiter=',')
for row in final_output:
csvfile.writerow(row)
print 'Feature writing complete, writing output...'
print '\n'
for row in final_output:
print row
if return_output:
return final_output
def generate_feature_list_chunk(data, total_features, feature_indexes, label_output, features=['GLCM', 'morphology', 'statistics'], labels=False, label_suffix="-label", levels=255, mask_value=0, use_labels=[-1], erode=[0,0,0], write_empty=True, filenames=True):
imagepaths = data[0]
label_images = data[1]
numerical_output = np.zeros((1, total_features), dtype=float)
index_output = np.zeros((1, 1), dtype=object)
output_data = np.zeros((1, total_features + 1), dtype=object)
for imagepath in imagepaths:
print '\n'
print 'Pre-processing data...'
image_list, unmodified_image_list, imagename_list, attributes_list = generate_numpy_images(imagepath, labels=labels, label_suffix=label_suffix, label_images=label_images, levels=levels, mask_value=mask_value, use_labels=use_labels, erode=erode)
if image_list == []:
if write_empty:
empty_output = np.zeros((1, total_features + 1), dtype=object)
empty_output[0,0] = imagepath
output_data = np.vstack(output_data, empty_output)
continue
print 'Pre-processing complete!'
for image_idx, image in enumerate(image_list):
print ''
print 'Working on image...'
print imagename_list[image_idx]
print 'Voxel sum...'
print np.sum(image)
print 'Image shape...'
print image.shape
if filenames:
index = imagename_list[image_idx]
else:
index = numerical_output.shape[0]
if numerical_output[0,0] == 0:
numerical_output[0, :] = generate_feature_list_method(image, unmodified_image_list[image_idx], attributes_list[image_idx], features, feature_indexes, total_features, levels, mask_value=0)
index_output[0,:] = index
else:
numerical_output = np.vstack((numerical_output, generate_feature_list_method(image, unmodified_image_list[image_idx], attributes_list[image_idx], features, feature_indexes, total_features, levels, mask_value=0)))
index_output = np.vstack((index_output, index))
output_data = np.vstack((output_data, (np.hstack((index_output[-1,:], numerical_output[-1,:])))))
return output_data
def write_image_method(imagepath, label_images, csvfile, total_features, features, feature_indexes, numerical_output, index_output, labels=False, label_suffix='-label', levels=100, mask_value=0, use_labels=[-1], erode=0, write_empty=False):
# This function is a bit clumsy. So many parameters..
print '\n'
print 'Pre-processing data...'
image_list, unmodified_image_list, imagename_list, attributes_list = generate_numpy_images(imagepath, labels=labels, label_suffix=label_suffix, label_images=label_images, levels=levels, mask_value=mask_value, use_labels=use_labels, erode=erode)
if image_list == []:
if write_empty:
empty_output = np.zeros((1, total_features + 1), dtype=object)
empty_output[0,0] = imagepath
print 'Writing empty row in place of missing data...'
csvfile.writerow(empty_output[0,:])
else:
print 'Pre-processing complete!'
for image_idx, image in enumerate(image_list):
print ''
print 'Working on image...'
print imagename_list[image_idx]
print 'Voxel sum...'
print np.sum(image)
print 'Image shape...'
print image.shape
if filenames:
index = imagename_list[image_idx]
else:
index = numerical_output.shape[0]
if numerical_output[0,0] == 0:
numerical_output[0, :] = generate_feature_list_method(image, unmodified_image_list[image_idx], attributes_list[image_idx], features, feature_indexes, total_features, levels, mask_value=0)
index_output[0,:] = index
else:
numerical_output = np.vstack((numerical_output, generate_feature_list_method(image, unmodified_image_list[image_idx], attributes_list[image_idx], features, feature_indexes, total_features, levels, mask_value=0)))
index_output = np.vstack((index_output, index))
csvfile.writerow(np.hstack((index_output[-1,:], numerical_output[-1,:])))
return np.hstack((index_output, numerical_output))
def determine_outfile_name(outfile, overwrite=True):
write_flag = False
while not write_flag:
if not os.path.isfile(outfile):
write_flag = True
continue
if overwrite:
write_flag = True
else:
split_outfile = str.split(outfile,'.')
print split_outfile
outfile = '.'.join(split_outfile[0:-1]) + '_new.' + split_outfile[-1]
if not os.path.isfile(outfile):
write_flag = True
return outfile
def generate_feature_indices(features=['GLCM', 'morphology', 'statistics'], featurenames=True):
total_features = 0
feature_indexes = [0]
for feature in features:
total_features += feature_dictionary[feature].feature_count()
if feature_indexes == [0]:
feature_indexes = [0, feature_dictionary[feature].feature_count()]
else:
feature_indexes += [feature_indexes[-1] + feature_dictionary[feature].feature_count()]
if featurenames:
label_output = np.zeros((1, total_features+1), dtype=object)
for feature_idx, feature in enumerate(features):
label_output[0, (1+feature_indexes[feature_idx]):(1+feature_indexes[feature_idx+1])] = feature_dictionary[feature].featurename_strings()
label_output[0,0] = 'index'
return [total_features, feature_indexes, label_output]
def generate_filename_list(folder, labels=False, label_suffix='-label', recursive=False):
if recursive:
imagepaths = []
for root, dirnames, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, '*.nii*'):
imagepaths.append(os.path.join(root, filename))
else:
imagepaths = glob.glob(os.path.join(folder, "*.nii*"))
# A bit redundant; this step and the previous step could probably be combined.
imagepaths = [x for x in imagepaths if (x.endswith('.nii') or x.endswith('.nii.gz'))]
if labels:
label_images = [ x for x in imagepaths if label_suffix in x ]
else:
label_images = []
imagepaths = [ x for x in imagepaths if label_suffix not in x ]
if imagepaths == []:
raise ValueError("There are no .nii or .nii.gz images in the provided folder.")
if labels and label_images == []:
raise ValueError("There are no labels with the provided suffix in this folder. If you do not want to use labels, set the \'labels\' flag to \'False\'. If you want to change the label file suffix (default: \'-label\'), then change the \'label_suffix\' flag.")
return [imagepaths, label_images]
def generate_numpy_images(imagepath, labels=False, label_suffix='-label', label_images=[], mask_value=0, levels=255, use_labels=[-1], erode=0):
image_list = []
unmodified_image_list = []
imagename_list = []
attributes_list = []
# nifti_util.save_alternate_nifti(imagepath, levels, mask_value=mask_value)
image = nifti_util.nifti_2_numpy(imagepath)
# This is likely redundant with the basic assert function in nifti_util
if not nifti_util.assert_3D(image):
print 'Warning: image at path ' + imagepath + ' has multiple time points or otherwise greater than 3 dimensions, and will be skipped.'
return [[],[],[],[]]
if labels:
if label_suffix == '':
label_path = label_images
else:
head, tail = os.path.split(imagepath)
split_path = str.split(tail, '.')
label_path = split_path[0] + label_suffix + '.' + '.'.join(split_path[1:])
label_path = os.path.join(head, label_path)
if os.path.isfile(label_path):
label_image = nifti_util.nifti_2_numpy(label_path)
if label_image.shape != image.shape:
print 'Warning: image and label do not have the same dimensions. Imaging padding support has not yet been added. This image will be skipped.'
return [[],[],[],[]]
# In the future: create an option to analyze each frame separately.
if not nifti_util.assert_3D(label_image):
print 'Warning: image at path ' + imagepath + ' has multiple time points or otherwise greater than 3 dimensions, and will be skipped.'
return [[],[],[],[]]
label_image = label_image.astype(int)
label_indices = np.unique(label_image)
if label_indices.size == 1:
print 'Warning: image at path ' + imagepath + ' has an empty label-map, and will be skipped.'
return[[],[],[],[]]
# Will break if someone puts in '0' as a label to use.
if use_labels[0] != -1:
label_indices = np.array([0] + [x for x in label_indices if x in use_labels])
masked_images = nifti_util.mask_nifti(image, label_image, label_indices, mask_value=mask_value)
for masked_image in masked_images:
# nifti_util.check_tumor_histogram(masked_image, second_image_numpy=image, mask_value=mask_value, image_name = str.split(imagepath, '\\')[-1])
# nifti_util.check_image(masked_image, mode="maximal_slice")
unmodified_image_list += [np.copy(masked_image)]
masked_image = nifti_util.coerce_levels(masked_image, levels=levels, reference_image=image, method="divide", mask_value=mask_value)
# nifti_util.check_image(masked_image, mode="maximal_slice")
# It would be nice in the future to check if an image is too small to erode. Maybe a minimum-size parameter?
# Or maybe a "maximum volume reduction by erosion?" Hmm..
masked_image = nifti_util.erode_label(masked_image, iterations=erode)
# nifti_util.check_image(masked_image, mode="maximal_slice")
image_list += [masked_image]
filename = str.split(label_path, '\\')[-1]
if label_indices.size == 2:
imagename_list += [filename]
else:
split_filename = str.split(filename, '.')
for labelval in label_indices[1:]:
filename = split_filename[0] + '_' + str(int(labelval)) + '.' + split_filename[1]
imagename_list += [filename]
attributes_list += [nifti_util.return_nifti_attributes(imagepath)] * (label_indices.size - 1)
print 'Finished... ' + str.split(imagepath, '\\')[-1]
else:
print 'Warning: image at path ' + imagepath + ' has no label-map, and will be skipped.'
return[[],[],[],[]]
else:
image = nifti_util.coerce_levels(image, levels=levels, reference_image=image, method="divide", mask_value=mask_value)
image_list += [image]
unmodified_image_list += [image]
imagename_list += [imagepath]
attributes_list += [nifti_util.return_nifti_attributes(imagepath)]
return [image_list, unmodified_image_list, imagename_list, attributes_list]
def generate_feature_list_method(image, unmodified_image, attributes, features, feature_indexes='', total_features='', levels=-1, mask_value=0, normalize_intensities=False):
if feature_indexes == '' or total_features == '':
total_features = 0
feature_indexes = [0]
for feature in features:
total_features += feature_dictionary[feature].feature_count()
if feature_indexes == [0]:
feature_indexes = [0, feature_dictionary[feature].feature_count()]
else:
feature_indexes += [feature_indexes[-1] + feature_dictionary[feature].feature_count()]
numerical_output = np.zeros((1, total_features), dtype=float)
if (image != mask_value).sum() == 0:
print 'Warning: image is empty, either because it could not survive erosion or because of another error. It will be skipped.'
return numerical_output
for feature_idx, feature in enumerate(features):
if feature == 'GLCM':
# nifti_util.check_tumor_histogram(image, mask_value)
# nifti_util.check_image(image, mode="maximal_slice")
glcm_image = np.copy(image)
glcm_image = glcm_image.astype(int)
levels += 1
print 'Calculating GLCM...'
numerical_output[0, feature_indexes[feature_idx]:feature_indexes[feature_idx+1]] = GLCM.glcm_features(glcm_image, levels=levels)
if feature == 'morphology':
print 'Calculating morphology features...'
numerical_output[0, feature_indexes[feature_idx]:feature_indexes[feature_idx+1]] = morphology.morphology_features(unmodified_image, attributes)
if feature == 'statistics':
# Should intensity statistics be eroded? Currently, they are not, as indicated by the "unmodified image" parameter.
print 'Calculating statistical features...'
if normalize_intensities:
numerical_output[0, feature_indexes[feature_idx]:feature_indexes[feature_idx+1]] = statistics.statistics_features(glcm_image)
else:
numerical_output[0, feature_indexes[feature_idx]:feature_indexes[feature_idx+1]] = statistics.statistics_features(unmodified_image)
print '\n'
return numerical_output
def test_method():
test_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','test_data','test_data_features','MR_Tumor_Shape'))
generate_feature_list_batch(folder=test_folder, features=['morphology', 'statistics'], labels=True, levels=100, outfile='test_feature_results_shape.csv',test=False, mask_value=0, erode=[0,0,0], overwrite=True)
return
def test_parallel():
test_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','test_data','test_data_features','Phantom_GLCM'))
test_folder = '/home/administrator/data/tbData/tbType/TrainingSet'
generate_feature_list_parallel(folder=test_folder, features=['GLCM','morphology', 'statistics'], labels=True, levels=100, outfile='lung_features_results_parallel_500.csv',test=False, mask_value=0, erode=[0,0,0], overwrite=True, processes=35)
return
def parse_command_line(argv):
# This code should be run from the folder above the main "qtim_tools" folder using the command "python -m qtim_tools.qtim_features.test"
# All niftis in this folder will be processed. The program searches for a nifti file, and then checks if there is a matching labelmap file with the suffix '-label'.
# It currently loads from some built in data from the qtim_tools project, but you can change the filepath below to anywhere.
test_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','test_data','test_data_features','Phantom_Intensity'))
# If labels is set to False, the whole image will be processed. This can take a very long time for GLCM features especially, so it is best we stick to labels.
labels = True
# The only available features are 'GLCM', 'morphology', and 'statistics' for now.
features = ['GLCM','morphology', 'statistics']
# In order for GLCM to work correctly, an image has to be reduced to a set amount of gray-levels. Using all available levels in an image will most likely produce a useless result.
# More levels will result in more intensive computation.
levels = 100
# This will save a spreadsheet of all requested feature results.
outfile = 'test_feature_results_intensity.csv'
# If your label is for some reason masked with a value other than zero, change this parameter.
mask_value = 0
# The erode parameter will take [x,y,z] pixels off in each dimension. On many volumes, it is not useful to erode in the z (axial) slice because of high slice thickness.
# Currently, the erode parameter only applies to GLCM. It does not apply to intensity statistic features, although maybe it should.
erode = [0,0,0]
# If overwrite is False, then the program will try to save to the chosen filename with '_copy' appended if the chosen filename already exists.
overwrite = True
extract_features.generate_feature_list_batch(folder=test_folder, features=features, labels=labels, levels=levels, outfile=outfile, mask_value=mask_value, erode=erode, overwrite=overwrite)
def test():
# This code should be run from the folder above the main "qtim_tools" folder using the command "python -m qtim_tools.qtim_features.test"
# All niftis in this folder will be processed. The program searches for a nifti file, and then checks if there is a matching labelmap file with the suffix '-label'.
# It currently loads from some built in data from the qtim_tools project, but you can change the filepath below to anywhere.
test_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','test_data','test_data_features','Phantom_Intensity'))
# If labels is set to False, the whole image will be processed. This can take a very long time for GLCM features especially, so it is best we stick to labels.
labels = True
# The only available features are 'GLCM', 'morphology', and 'statistics' for now.
features = ['GLCM','morphology', 'statistics']
# In order for GLCM to work correctly, an image has to be reduced to a set amount of gray-levels. Using all available levels in an image will most likely produce a useless result.
# More levels will result in more intensive computation.
levels = 100
# This will save a spreadsheet of all requested feature results.
outfile = 'test_feature_results_intensity.csv'
# If your label is for some reason masked with a value other than zero, change this parameter.
mask_value = 0
# The erode parameter will take [x,y,z] pixels off in each dimension. On many volumes, it is not useful to erode in the z (axial) slice because of high slice thickness.
# Currently, the erode parameter only applies to GLCM. It does not apply to intensity statistic features, although maybe it should.
erode = [0,0,0]
# If overwrite is False, then the program will try to save to the chosen filename with '_copy' appended if the chosen filename already exists.
overwrite = True
generate_feature_list_batch(folder=test_folder, features=features, labels=labels, levels=levels, outfile=outfile, mask_value=mask_value, erode=erode, overwrite=overwrite)
def extract_features(folder, outfile, labels=True, features=['GLCM','morphology', 'statistics'], levels = 100, mask_value = 0, erode = [0,0,0], overwrite = True, label_suffix='-label', universal_label=''):
generate_feature_list_batch(folder=folder, outfile=outfile, labels=labels, features=features, levels=levels, mask_value=mask_value, erode=erode, overwrite=overwrite, label_suffix=label_suffix, universal_label=universal_label)
if __name__ == "__main__":
np.set_printoptions(suppress=True, precision=2)
# test_method()
test_parallel() | [
"[email protected]"
] | |
ee0d1f6ab07282ef487a55f8caa50881541945c5 | 48a7b266737b62da330170ca4fe4ac4bf1d8b663 | /molsysmt/form/string_pdb_text/extract.py | 73bb0feea3ace5d705b0963185af3e24f5ad4607 | [
"MIT"
] | permissive | uibcdf/MolSysMT | ddab5a89b8ec2377f383884c5169d147cab01322 | c3d713ba63db24eb8a2426115cf8d9cb3665d225 | refs/heads/main | 2023-08-08T15:04:16.217967 | 2023-08-04T05:49:56 | 2023-08-04T05:49:56 | 137,937,243 | 15 | 3 | MIT | 2023-06-04T20:27:06 | 2018-06-19T19:38:44 | Python | UTF-8 | Python | false | false | 812 | py | from molsysmt._private.exceptions import NotImplementedMethodError
from molsysmt._private.digestion import digest
from molsysmt._private.variables import is_all
@digest(form='string:pdb_text')
def extract(item, atom_indices='all', structure_indices='all', copy_if_all=True):
if is_all(atom_indices) and is_all(structure_indices):
if copy_if_all:
from copy import copy
tmp_item = copy(item)
else:
tmp_item = item
else:
from . import to_molsysmt_MolSys
from ..molsysmt_MolSys import to_string_pdb_text as molsysmt_MolSys_to_string_pdb_text
tmp_item = to_molsysmt_MolSys(item, atom_indices=atom_indices, structure_indices=structure_indices)
tmp_item = molsysmt_MolSys_to_string_pdb_text(tmp_item)
return tmp_item
| [
"[email protected]"
] | |
6394a2ecb06983781a9b4f36dfbe1b467f515d16 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/KCB_YCHF/KCB_YCHF_MM/OMS/YCHF_KCBYCHF_OMS_063.py | bf7954767971a8fe32cc9735084cfdcaf4130323 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,495 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from ARmainservice import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
from env_restart import *
class YCHF_KCBYCHF_OMS_063(xtp_test_case):
def setUp(self):
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YCHF_KCBYCHF_OMS_063')
#clear_data_and_restart_all()
#Api.trade.Logout()
#Api.trade.Login()
pass
#
def test_YCHF_KCBYCHF_OMS_063(self):
title = '停止OMS服务(沪A五档即成转限价未成卖出)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '未成交',
'errorID': 0,
'errorMSG': queryOrderErrorMsg(0),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'报单测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['报单测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['报单测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
## 还原可用资金
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YW_KCB_BAK_000')
#oms_restart()
self.assertEqual(rs['报单测试结果'], True) # 211
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
382b4289c3b1bb000f2690f9c6f2a63fe5e1583c | f33885d6f1e963586f9e7b1e1a46a271d125e2e7 | /ci/nur/fileutils.py | 338149b414047c1411f8783359d43a434d120e33 | [
"MIT"
] | permissive | nix-community/NUR | cad821a31d965ade9869c21f03edf9f7bb4cdf02 | 80012e6c2de5ea9c4101948b0d58c745e7813180 | refs/heads/master | 2023-09-03T05:05:30.497198 | 2023-09-03T04:32:01 | 2023-09-03T04:32:01 | 123,327,588 | 965 | 385 | MIT | 2023-09-12T07:10:52 | 2018-02-28T18:49:50 | Python | UTF-8 | Python | false | false | 921 | py | import json
import os
import shutil
from contextlib import contextmanager
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Any, Generator, Union
PathType = Union[str, Path]
def to_path(path: PathType) -> Path:
if isinstance(path, Path):
return path
else:
return Path(path)
def write_json_file(data: Any, path: PathType) -> None:
path = to_path(path)
f = NamedTemporaryFile(mode="w+", prefix=path.name, dir=str(path.parent))
with f as tmp_file:
json.dump(data, tmp_file, indent=4, sort_keys=True)
shutil.move(tmp_file.name, path)
# NamedTemporaryFile tries to delete the file and fails otherwise
open(tmp_file.name, "a").close()
@contextmanager
def chdir(dest: PathType) -> Generator[None, None, None]:
previous = os.getcwd()
os.chdir(dest)
try:
yield
finally:
os.chdir(previous)
| [
"[email protected]"
] | |
a2455184714558aeedd27f30413d548c77e63c4b | 7e260342bb04eba9bff4289da938e859b8d68b82 | /contrib/scripts.py | d6d2ef643382ab83ba2df65618bc02d78d78ab2f | [
"MIT"
] | permissive | christopherjenness/fava | 72c2d0e201f7792ac32a643be0479fa7623efc27 | 71c25d8a0ae08aa84150e33d464000d0161610ea | refs/heads/master | 2020-04-28T15:29:34.446050 | 2019-03-12T17:58:03 | 2019-03-12T17:58:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,374 | py | #!/usr/bin/env python3
"""Various utilities."""
import json
import os
from beancount.query import query_env
from beancount.query import query_parser
import click
import requests
BASE_PATH = os.path.normpath(
os.path.join(os.path.dirname(__file__), "../fava")
)
LANGUAGES = ["de", "es", "fr", "nl", "pt", "ru", "zh-CN", "sk", "uk"]
@click.group()
def cli():
"""Various utilities."""
def _env_to_list(attributes):
for name in attributes.keys():
if isinstance(name, tuple):
name = name[0]
yield name
@cli.command()
def generate_bql_grammar_json():
"""Generate a JSON file with BQL grammar attributes.
The online code editor needs to have the list of available columns,
functions, and keywords for syntax highlighting and completion.
Should be run whenever the BQL changes."""
target_env = query_env.TargetsEnvironment()
data = {
"columns": sorted(set(_env_to_list(target_env.columns))),
"functions": sorted(set(_env_to_list(target_env.functions))),
"keywords": sorted({kw.lower() for kw in query_parser.Lexer.keywords}),
}
path = os.path.join(
os.path.dirname(__file__),
"../fava/static/javascript/codemirror/bql-grammar.json",
)
with open(path, "w") as json_file:
json.dump(data, json_file)
@cli.command()
def download_translations():
"""Fetch updated translations from POEditor.com."""
token = os.environ.get("POEDITOR_TOKEN")
if not token:
raise click.UsageError(
"The POEDITOR_TOKEN environment variable needs to be set."
)
for language in LANGUAGES:
download_from_poeditor(language, "po", token)
download_from_poeditor(language, "mo", token)
@cli.command()
def upload_translations():
"""Upload .pot message catalog to POEditor.com."""
token = os.environ.get("POEDITOR_TOKEN")
if not token:
raise click.UsageError(
"The POEDITOR_TOKEN environment variable needs to be set."
)
path = os.path.join(BASE_PATH, f"translations/messages.pot")
click.echo(f"Uploading message catalog: {path}")
data = {
"api_token": token,
"id": 90283,
"updating": "terms",
"sync_terms": 1,
}
files = {"file": open(path, "rb")}
request = requests.post(
"https://api.poeditor.com/v2/projects/upload", data=data, files=files
)
click.echo("Done: " + str(request.json()["result"]["terms"]))
def download_from_poeditor(language, format_, token):
"""Download .{po,mo}-file from POEditor and save to disk."""
click.echo(f'Downloading .{format_}-file for language "{language}"')
language_short = language[:2]
data = {
"api_token": token,
"id": 90283,
"language": language,
"type": format_,
}
request = requests.post(
"https://api.poeditor.com/v2/projects/export", data=data
)
url = request.json()["result"]["url"]
content = requests.get(url).content
folder = os.path.join(
BASE_PATH, "translations", language_short, "LC_MESSAGES"
)
if not os.path.exists(folder):
os.makedirs(folder)
path = os.path.join(folder, f"messages.{format_}")
with open(path, "wb") as file_:
file_.write(content)
click.echo(f'Downloaded to "{path}"')
if __name__ == "__main__":
cli()
| [
"[email protected]"
] | |
59778d5cfdb33ed8ffbcd1d7c0f2b05cd15a366d | 5d22d9b2cb5cad7970c1055aeef55d2e2a5acb8e | /py/google/cj2014/round1A/FullBinaryTree.py | df737dafe506eb93570aed7b49ecc60662a2dc43 | [
"MIT"
] | permissive | shhuan/algorithms | 36d70f1ab23dab881bf1a15573fbca7b2a3f4235 | 2830c7e2ada8dfd3dcdda7c06846116d4f944a27 | refs/heads/master | 2021-05-07T14:21:15.362588 | 2017-11-07T08:20:16 | 2017-11-07T08:20:16 | 109,799,698 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | # -*- coding: utf-8 -*-
"""
created by huash06 at 2015-04-08 10:48
"""
__author__ = 'huash06'
import sys
import os
import py.lib.Utils as Utils
from datetime import datetime
# sys.stdin = open('input/sample.txt', 'r')
sys.stdin = open('input/B-large-practice.in', 'r')
# sys.stdout = open('output/B-large-practice.out', 'w')
MAXNN = 301
def count_node(graph, node, parent):
cc = 1
for i in range(len(graph)):
if i != parent and graph[node][i]:
cc += count_node(graph, i, node)
return cc
def dfs(graph, node, parent, memo):
"""
返回以node為根的子樹變成完全二叉樹時,剪掉的節點數量和剩餘的節點數量
:param graph:
:param node:
:param parent:
:param memo: record calculated result
:return: how many node in this full-binary tree rooted at node
"""
max1 = -1
max2 = -1
if memo[node][parent] == -1 or True:
for child in graph[node]:
if child != parent:
nc = dfs(graph, child, node, memo)
if nc > max1:
max2 = max1
max1 = nc
elif nc > max2:
max2 = nc
if max2 == -1:
memo[node][parent] = 1
else:
memo[node][parent] = 1 + max1 + max2
return memo[node][parent]
T = int(sys.stdin.readline())
sys.setrecursionlimit(3000)
# start_time = datetime.now()
for ti in range(1, T + 1):
N = int(sys.stdin.readline())
GRAPH = dict()
for ei in range(1, N+1):
GRAPH[ei] = list()
for ni in range(N-1):
S, T = map(int, sys.stdin.readline().strip().split(' '))
GRAPH[S].append(T)
GRAPH[T].append(S)
count = N
memo = [[-1 for c in range(N+1)] for r in range(N+1)]
for r in range(1, N+1):
c = N - dfs(GRAPH, r, 0, memo)
if c < count:
count = c
print('Case #{}: {}'.format(ti, count))
# end_time = datetime.now()
# time_cost = end_time-start_time
# print('Time Cost: {}s'.format(time_cost.seconds))
| [
"[email protected]"
] | |
d34afd28088c387fc104acc632df1276df76726e | b2c070e09bff49241fcff98bcde825cfa96e93ca | /HackerEarth/Recursion/SubsetGeneration.py | 9af011b3289a694f328f9d18d4a03292e2e93f09 | [
"MIT"
] | permissive | Beryl2208/CI-2 | dcb1b923f9c4f1f8b167c36c8b22a80522322c53 | f671292dad2695e37458866442a6b951ba4e1a71 | refs/heads/master | 2022-12-26T19:11:28.559911 | 2020-10-06T06:27:51 | 2020-10-06T06:27:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | # Subset or Subsequence generation
# Input - "abc", Output - "a", "b", "c", "ab", "ac", "abc", "bc"
# Input - "abcd", Output - "a", "b", "c", "d", "ab", "ac", "ad", "abc", "acd", "abd", "abcd", "bc", "bcd", "bd", "cd"
# "abc" "ab" "ac" "a" "bc" "b" "c" ""
# \ / \ / \ / \ /
# "ab" "a" "b" ""
# \ / \ /
# "a" ""
# \ /
# curr = ""
# Options -
# 1) Consider curr as a part of subset
# 2) Do not consider curr as a part of subset
def Subset(s, index = 0, curr = ''):
if index == len(s):
print(curr, end = ' ')
return
Subset(s, index + 1, curr + s[index])
Subset(s, index + 1, curr)
Subset("abc")
print()
Subset("abcd")
print() | [
"[email protected]"
] | |
2641b37d027fbff1ece30b7f2825fb2fcbd20653 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /leetcode-cn/0150.0_Evaluate_Reverse_Polish_Notation.py | 0a7404c8bbd5ea8d7d771e5b14d18c16066b3ef5 | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | '''
approach: Stack
Time: O(N)
Space: O(N)
执行用时:32 ms, 在所有 Python 提交中击败了60.21%的用户
内存消耗:14.3 MB, 在所有 Python 提交中击败了76.44%的用户
'''
class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
stack = []
for token in tokens:
stack.append(token)
while len(stack) >= 3 and stack[-1] in ['+', '-', '*', '/']:
operator = stack.pop()
operand2 = int(stack.pop())
operand1 = int(stack.pop())
result = 0
if operator == '+':
result = operand1 + operand2
elif operator == '-':
result = operand1 - operand2
elif operator == '*':
result = operand1 * operand2
elif operator == '/':
# Note that division between two integers should truncate toward zero.
result = int(operand1 * 1.0/ operand2)
stack.append(result)
return int(stack[-1])
| [
"[email protected]"
] | |
0d049d8ba10dab9d75bd9355eb364b3565a2349b | 6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a | /azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/generic_container.py | f6e8fff7ae9f79d55e3c6619b9dd2ff2044fb9c6 | [
"MIT"
] | permissive | ashirey-msft/azure-sdk-for-python | d92381d11c48f194ec9f989f5f803db614fb73f2 | e04778e13306dad2e8fb044970215bad6296afb6 | refs/heads/master | 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 | MIT | 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null | UTF-8 | Python | false | false | 2,678 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .protection_container import ProtectionContainer
class GenericContainer(ProtectionContainer):
"""Base class for generic container of backup items.
All required parameters must be populated in order to send to Azure.
:param friendly_name: Friendly name of the container.
:type friendly_name: str
:param backup_management_type: Type of backup managemenent for the
container. Possible values include: 'Invalid', 'AzureIaasVM', 'MAB',
'DPM', 'AzureBackupServer', 'AzureSql', 'AzureStorage', 'AzureWorkload',
'DefaultBackup'
:type backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.models.BackupManagementType
:param registration_status: Status of registration of the container with
the Recovery Services Vault.
:type registration_status: str
:param health_status: Status of health of the container.
:type health_status: str
:param container_type: Required. Constant filled by server.
:type container_type: str
:param fabric_name: Name of the container's fabric
:type fabric_name: str
:param extended_information: Extended information (not returned in List
container API calls)
:type extended_information:
~azure.mgmt.recoveryservicesbackup.models.GenericContainerExtendedInfo
"""
_validation = {
'container_type': {'required': True},
}
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'registration_status': {'key': 'registrationStatus', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
'container_type': {'key': 'containerType', 'type': 'str'},
'fabric_name': {'key': 'fabricName', 'type': 'str'},
'extended_information': {'key': 'extendedInformation', 'type': 'GenericContainerExtendedInfo'},
}
def __init__(self, **kwargs):
super(GenericContainer, self).__init__(**kwargs)
self.fabric_name = kwargs.get('fabric_name', None)
self.extended_information = kwargs.get('extended_information', None)
self.container_type = 'GenericContainer'
| [
"[email protected]"
] | |
0ec032d171d3f69969f5f45b107df6415097393f | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-6283.py | 56c4dc6454c5bcb3750e15efea45835eab1b8d51 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,757 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> $Type:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
48ad1087d1425fbf659db1aec546c48a22425705 | 5491e80f7dc72a8091b16c26a5cfee93381ee30d | /Challenge202E_I_AM_BENDER_Binary_To_Text/challenge202E.py | a35a3a4915220b1f0ced3a8f61896c03fca380db | [] | no_license | tuipopenoe/DailyProgrammer | 87167c2ae275c40c3b1a30ae14497a3289f8797f | 8d42947b576b78456fa72cdf5b886cff9f32b769 | refs/heads/master | 2016-09-05T21:13:30.805504 | 2015-10-16T02:57:20 | 2015-10-16T02:57:20 | 21,139,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | #!/usr/bin/env python
# Tui Popenoe
# challenge202E.py - Binary to String
import sys
import binascii
def i_am_bender(binary):
return binascii.unhexlify('%x' % int(binary, 2))
def main():
if len(sys.argv) > 1:
print(i_am_bender(sys.argv[1]))
else:
print(i_am_bender(sys.stdin.read()))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
5f91841d99dce028ef4112a7f1b5929f5529de42 | 729aa3af1e6de25c0e46192ef62aaf77cc622979 | /comentarios/models.py | 68e967afb7853be71fb6423710c8f2e8619ff015 | [] | no_license | xuting1108/API-Pontos-Tur-sticos | 8b583869006b8570c44eebfc885bb3db7eff4f1d | 7a01434e806a7b3b1409f7c490071ba682525ad3 | refs/heads/master | 2022-11-19T15:09:48.057402 | 2020-06-15T21:38:00 | 2020-06-15T21:38:00 | 267,150,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from django.db import models
from django.contrib.auth.models import User
class Comentario(models.Model):
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
comentarios = models.TextField()
data = models.DateTimeField(auto_now_add=True)
aprovado = models.BooleanField(default=True)
def __str__(self):
return self.usuario.username
| [
"[email protected]"
] | |
d5e1d94b0f4269311fc4634072447854264afac3 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /CDqMdrTvfn2Wa8igp_16.py | 12713c2aa2161258166fab90eabe089a4b047990 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | """
Create a function that returns the next element in an **arithmetic sequence**.
In an arithmetic sequence, each element is formed by adding the same constant
to the previous element.
### Examples
next_element([3, 5, 7, 9]) ➞ 11
next_element([-5, -6, -7]) ➞ -8
next_element([2, 2, 2, 2, 2]) ➞ 2
### Notes
All input arrays will contain **integers only**.
"""
def next_element(lst):
a = lst[-1] - lst[-2]
return lst[-1] + a
| [
"[email protected]"
] | |
4f729df74aa3cb8e7f8acf86cf08033467732bf3 | 5982a9c9c9cb682ec9732f9eeb438b62c61f2e99 | /Problem_234/my_bad_solution.py | d6896b10334da48b8afeefb2a9c1fcca30a0b44b | [] | no_license | chenshanghao/LeetCode_learning | 6fdf98473be8f2240dd86d5586bbd1bbb95d6b0c | acf2395f3b946054009d4543f2a13e83402323d3 | refs/heads/master | 2021-10-23T05:23:01.970535 | 2019-03-15T05:08:54 | 2019-03-15T05:08:54 | 114,688,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
val_list = []
while(head):
val_list.append(head.val)
head = head.next
if val_list == val_list[::-1]:
return True
else:
return False | [
"[email protected]"
] | |
67086c4670dfe4cb66c73ee192fb47a5a8183bcf | 4597f9e8c2772f276904b76c334b4d181fa9f839 | /Python/Compare-Version-Numbers.py | 85b753029af257cf562da8fb4d2fb870da2c0e73 | [] | no_license | xxw1122/Leetcode | 258ee541765e6b04a95e225284575e562edc4db9 | 4c991a8cd024b504ceb0ef7abd8f3cceb6be2fb8 | refs/heads/master | 2020-12-25T11:58:00.223146 | 2015-08-11T02:10:25 | 2015-08-11T02:10:25 | 40,542,869 | 2 | 6 | null | 2020-09-30T20:54:57 | 2015-08-11T13:21:17 | C++ | UTF-8 | Python | false | false | 872 | py | class Solution:
# @param a, a string
# @param b, a string
# @return a boolean
def compareVersion(self, version1, version2):
seq1 = []
seq2 = []
if version1.find('.') >= 0:
seq1 = version1.split('.')
else:
seq1.append(version1)
if version2.find('.') >= 0:
seq2 = version2.split('.')
else:
seq2.append(version2)
for i in range(len(seq1)):
seq1[i] = int(seq1[i])
for i in range(len(seq2)):
seq2[i] = int(seq2[i])
maxlen = max(len(seq1), len(seq2))
for i in range(len(seq1), maxlen):
seq1.append(0)
for i in range(len(seq2), maxlen):
seq2.append(0)
if seq1 < seq2:
return -1
elif seq1 > seq2:
return 1
else:
return 0 | [
"[email protected]"
] | |
bd4bfd2045243258a2936d602e25e747bd5817ce | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_quivered.py | ae5ecb9ccecdd6d0e423ea42fa27b78863065fdc | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
from xai.brain.wordbase.nouns._quiver import _QUIVER
#calss header
class _QUIVERED(_QUIVER, ):
def __init__(self,):
_QUIVER.__init__(self)
self.name = "QUIVERED"
self.specie = 'nouns'
self.basic = "quiver"
self.jsondata = {}
| [
"[email protected]"
] | |
434c4bd312a9abd7b4c412e91f46470e4d93787a | 3151fabc3eb907d6cd1bb17739c215a8e95a6370 | /storagetest/pkgs/pts/compilebench/__init__.py | 2b4e431708e278479b68217206765020f8856961 | [
"MIT"
] | permissive | txu2k8/storage-test | a3afe96dc206392603f4aa000a7df428d885454b | 62a16ec57d619f724c46939bf85c4c0df82ef47c | refs/heads/master | 2023-03-25T11:00:54.346476 | 2021-03-15T01:40:53 | 2021-03-15T01:40:53 | 307,604,046 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@file : __init__.py.py
@Time : 2020/11/12 18:27
@Author: Tao.Xu
@Email : [email protected]
"""
from .compile_bench import *
__all__ = ['CompileBench']
"""
compilebench
==============
https://oss.oracle.com/~mason/compilebench/
https://openbenchmarking.org/test/pts/compilebench
Compilebench tries to age a filesystem by simulating some of the disk IO
common in creating, compiling, patching, stating and reading kernel trees.
It indirectly measures how well filesystems can maintain directory locality
as the disk fills up and directories age.
This current test is setup to use the makej mode with 10 initial directories
Quick and dirty usage: (note the -d option changed in 0.6)
1. Untar compilebench
2. run commands:
./compilebench -D some_working_dir -i 10 -r 30
./compilebench -D some_working_dir -i 10 --makej
./copmilebench -D some_working_dir -i 10 --makej -d /dev/xxx -t trace_file
./compilebench --help for more
"""
if __name__ == '__main__':
pass
| [
"[email protected]"
] | |
7741d2640a25fdf9bfc3c4d3a9f38b475e4ced61 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02753/s804894259.py | 636735cbd5323d345ac8e012b55a33a9143478c1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | line = list(input())
line.sort()
if line[0] == line[2]:
print("No")
else:
print("Yes") | [
"[email protected]"
] | |
b280a2a7d4766e6375a02765b3244e920e0b405b | a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c | /notebook/list_2d_sort.py | ed70c8ed858f38ef3ada5a56ba0468b997f515fc | [
"MIT"
] | permissive | nkmk/python-snippets | a6c66bdf999502e52f4795a3074ced63bf440817 | f9dd286a9cf93f474e20371f8fffc4732cb3c4d5 | refs/heads/master | 2023-08-03T04:20:05.606293 | 2023-07-26T13:21:11 | 2023-07-26T13:21:11 | 98,900,570 | 253 | 77 | MIT | 2020-10-25T01:12:53 | 2017-07-31T14:54:47 | Jupyter Notebook | UTF-8 | Python | false | false | 2,885 | py | import pprint
l_2d = [[20, 3, 100], [1, 200, 30], [300, 10, 2]]
pprint.pprint(l_2d, width=20)
# [[20, 3, 100],
# [1, 200, 30],
# [300, 10, 2]]
pprint.pprint(sorted(l_2d), width=20)
# [[1, 200, 30],
# [20, 3, 100],
# [300, 10, 2]]
pprint.pprint([sorted(l) for l in l_2d], width=20)
# [[3, 20, 100],
# [1, 30, 200],
# [2, 10, 300]]
pprint.pprint([list(x) for x in zip(*[sorted(l) for l in zip(*l_2d)])], width=20)
# [[1, 3, 2],
# [20, 10, 30],
# [300, 200, 100]]
import numpy as np
print(np.sort(l_2d))
# [[ 3 20 100]
# [ 1 30 200]
# [ 2 10 300]]
print(np.sort(l_2d, axis=0))
# [[ 1 3 2]
# [ 20 10 30]
# [300 200 100]]
print(type(np.sort(l_2d)))
# <class 'numpy.ndarray'>
print(np.sort(l_2d).tolist())
# [[3, 20, 100], [1, 30, 200], [2, 10, 300]]
print(type(np.sort(l_2d).tolist()))
# <class 'list'>
l_2d_error = [[1, 2], [3, 4, 5]]
# print(np.sort(l_2d_error))
# ValueError: setting an array element with a sequence. The requested array has an inhomogeneous shape after 1 dimensions. The detected shape was (2,) + inhomogeneous part.
pprint.pprint(sorted(l_2d, key=lambda x: x[1]), width=20)
# [[20, 3, 100],
# [300, 10, 2],
# [1, 200, 30]]
pprint.pprint(sorted(l_2d, key=lambda x: x[2]), width=20)
# [[300, 10, 2],
# [1, 200, 30],
# [20, 3, 100]]
import operator
pprint.pprint(sorted(l_2d, key=operator.itemgetter(1)), width=20)
# [[20, 3, 100],
# [300, 10, 2],
# [1, 200, 30]]
pprint.pprint(sorted(l_2d, key=operator.itemgetter(2)), width=20)
# [[300, 10, 2],
# [1, 200, 30],
# [20, 3, 100]]
l_2d_dup = [[1, 3, 100], [1, 200, 30], [1, 3, 2]]
pprint.pprint(l_2d_dup, width=20)
# [[1, 3, 100],
# [1, 200, 30],
# [1, 3, 2]]
pprint.pprint(sorted(l_2d_dup), width=20)
# [[1, 3, 2],
# [1, 3, 100],
# [1, 200, 30]]
pprint.pprint(sorted(l_2d_dup, key=operator.itemgetter(0, 2)), width=20)
# [[1, 3, 2],
# [1, 200, 30],
# [1, 3, 100]]
pprint.pprint(sorted(l_2d_dup, key=lambda x: (x[0], x[2])), width=20)
# [[1, 3, 2],
# [1, 200, 30],
# [1, 3, 100]]
import pandas as pd
df = pd.DataFrame(l_2d_dup, columns=['A', 'B', 'C'], index=['X', 'Y', 'Z'])
print(df)
# A B C
# X 1 3 100
# Y 1 200 30
# Z 1 3 2
print(df.sort_values('C'))
# A B C
# Z 1 3 2
# Y 1 200 30
# X 1 3 100
print(df.sort_values('Z', axis=1))
# A C B
# X 1 100 3
# Y 1 30 200
# Z 1 2 3
print(df.sort_values(['A', 'C']))
# A B C
# Z 1 3 2
# Y 1 200 30
# X 1 3 100
df = pd.DataFrame(l_2d_dup)
print(df)
# 0 1 2
# 0 1 3 100
# 1 1 200 30
# 2 1 3 2
print(df.sort_values(2))
# 0 1 2
# 2 1 3 2
# 1 1 200 30
# 0 1 3 100
print(df.sort_values(2, axis=1))
# 0 2 1
# 0 1 100 3
# 1 1 30 200
# 2 1 2 3
print(df.sort_values([0, 2]))
# 0 1 2
# 2 1 3 2
# 1 1 200 30
# 0 1 3 100
| [
"[email protected]"
] | |
8c5ed7790f16d81a0c36ea704e83ed858dde2f9b | 71cb8d9eb437a9faf330931f3713ba5dc688405d | /analyze_data.py | 20d0de521c42d33cccb15314658cdb6ae2767102 | [
"MIT"
] | permissive | mattare2/perceptual-acoustic-similarity | 294d967ab2cd47120d33e650f7488d37cec199ca | eced010ee2d1a36c6052c8afd1b8c4af709dc418 | refs/heads/master | 2021-01-18T11:26:36.763005 | 2015-04-21T07:21:22 | 2015-04-21T07:21:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,408 | py | import csv
import os
from functools import partial
from acousticsim.main import acoustic_similarity_mapping
from acousticsim.helper import get_vowel_points
from acousticsim.praat.wrapper import (to_pitch_praat, to_formants_praat,
to_intensity_praat, to_mfcc_praat)
from acousticsim.distance.point import point_distance, euclidean
from acousticsim.distance.dct import dct_distance
from acousticsim.distance.dtw import dtw_distance
from acousticsim.distance.xcorr import xcorr_distance
praat_path = r'C:\Users\michael\Documents\Praat\praatcon.exe'
data_dir = r'C:\Users\michael\Documents\Data\ATI_new'
model_dir = os.path.join(data_dir, 'Models')
shadower_dir = os.path.join(data_dir, 'Shadowers')
female_models = os.listdir(os.path.join(model_dir,'Female'))
male_models = os.listdir(os.path.join(model_dir,'Male'))
female_shadowers = os.listdir(os.path.join(shadower_dir,'Female'))
male_shadowers = os.listdir(os.path.join(shadower_dir,'Male'))
## Representations
# MFCC (acousticsim)
# MFCC (Praat)
# Formants (Praat)
# Intensity (Praat)
# Pitch (Praat)
# AmpEnvs (acousticsim)
## Distance functions
# DTW
# XCorr
# DCT
# Vowel midpoint
# Vowel third
def callback(*value):
print(*value)
praat_mfcc = partial(to_mfcc_praat, praat_path )
praat_formants = partial(to_formants_praat, praat_path)
praat_intensity = partial(to_intensity_praat, praat_path )
praat_pitch = partial(to_pitch_praat, praat_path )
def midpoint_distance(rep_one, rep_two):
base, _ = os.path.splitext(rep_one._filepath)
one_textgrid = base + '.TextGrid'
begin,end = get_vowel_points(one_textgrid, tier_name = 'Vowel', vowel_label = 'V')
if begin is None or end is None:
print(one_textgrid)
point_one = begin + ((end - begin)/2)
base, _ = os.path.splitext(rep_two._filepath)
two_textgrid = base + '.TextGrid'
begin,end = get_vowel_points(two_textgrid, tier_name = 'Vowel', vowel_label = 'V')
if begin is None or end is None:
print(one_textgrid)
point_two = begin + ((end - begin)/2)
return point_distance(rep_one, rep_two, point_one, point_two)
def third_distance(rep_one, rep_two):
base, _ = os.path.splitext(rep_one._filepath)
one_textgrid = base + '.TextGrid'
begin,end = get_vowel_points(one_textgrid, tier_name = 'Vowel', vowel_label = 'V')
point_one = begin + ((end - begin)/3)
base, _ = os.path.splitext(rep_two._filepath)
two_textgrid = base + '.TextGrid'
begin,end = get_vowel_points(two_textgrid, tier_name = 'Vowel', vowel_label = 'V')
point_two = begin + ((end - begin)/3)
return point_distance(rep_one, rep_two, point_one, point_two)
def vowel_dist(dist_func, rep_one, rep_two):
base, _ = os.path.splitext(rep_one._filepath)
one_textgrid = base + '.TextGrid'
one_begin,one_end = get_vowel_points(one_textgrid, tier_name = 'Vowel', vowel_label = 'V')
base, _ = os.path.splitext(rep_two._filepath)
two_textgrid = base + '.TextGrid'
two_begin,two_end = get_vowel_points(two_textgrid, tier_name = 'Vowel', vowel_label = 'V')
return dist_func(rep_one[one_begin, one_end], rep_two[two_begin, two_end])
def duration_distance(rep_one, rep_two):
base, _ = os.path.splitext(rep_one._filepath)
one_textgrid = base + '.TextGrid'
one_begin,one_end = get_vowel_points(one_textgrid, tier_name = 'Vowel', vowel_label = 'V')
if one_begin is None:
one_begin = 0
if one_end is None:
one_end = rep_one._duration
one_durations = [one_begin, one_end - one_begin, rep_one._duration - one_end]
base, _ = os.path.splitext(rep_two._filepath)
two_textgrid = base + '.TextGrid'
two_begin,two_end = get_vowel_points(two_textgrid, tier_name = 'Vowel', vowel_label = 'V')
if two_begin is None:
two_begin = 0
if two_end is None:
two_end = rep_two._duration
two_durations = [two_begin, two_end - two_begin, rep_two._duration - two_end]
return euclidean(one_durations, two_durations)
vowel_dtw = partial(vowel_dist,dtw_distance)
vowel_dct = partial(vowel_dist,dct_distance)
vowel_xcorr = partial(vowel_dist,xcorr_distance)
def load_axb():
path_mapping = list()
with open(os.path.join(data_dir,'axb.txt'),'r') as f:
reader = csv.DictReader(f, delimiter = '\t')
for line in reader:
shadower = line['Shadower'][-3:]
model = line['Model'][-3:]
word = line['Word']
if model in female_models:
model_path = os.path.join(model_dir, 'Female',model, '{}_{}.wav'.format(model,word))
else:
model_path = os.path.join(model_dir, 'Male',model, '{}_{}.wav'.format(model,word))
if shadower in female_shadowers:
baseline_path = os.path.join(shadower_dir, 'Female',shadower, '{}_{}_baseline.wav'.format(shadower,word))
shadowed_path = os.path.join(shadower_dir, 'Female',shadower, '{}_{}_shadowing{}.wav'.format(shadower,word, model))
else:
baseline_path = os.path.join(shadower_dir, 'Male',shadower, '{}_{}_baseline.wav'.format(shadower,word))
shadowed_path = os.path.join(shadower_dir, 'Male',shadower, '{}_{}_shadowing{}.wav'.format(shadower,word, model))
path_mapping.append((baseline_path, model_path, shadowed_path))
return list(set(path_mapping))
def output_acousticsim(path_mapping, output, output_filename):
with open(output_filename, 'w') as f:
writer = csv.writer(f, delimiter = '\t')
writer.writerow(['Shadower', 'Model', 'Word', 'BaseToModel', 'ShadToModel'])
for pm in path_mapping:
baseline_prod = os.path.basename(pm[0])
model_prod = os.path.basename(pm[1])
shad_prod = os.path.basename(pm[2])
shadower = shad_prod[:3]
model,ext = os.path.splitext(model_prod)
model, word = model.split('_')
writer.writerow([shadower, model, word, output[(baseline_prod,model_prod)],
output[(shad_prod,model_prod)]])
def get_mfcc_dtw(path_mapping):
asim = acoustic_similarity_mapping(path_mapping, rep = 'mfcc',
match_function = 'dtw', use_multi=True,
num_cores = 6)
return asim
def get_mfcc_vowel_mid(path_mapping):
asim = acoustic_similarity_mapping(path_mapping, rep = 'mfcc',
match_function = midpoint_distance, use_multi=True,
num_cores = 6, call_back = callback)
return asim
def convert_path_mapping(path_mapping):
new_path_mapping = set()
for mapping in path_mapping:
new_path_mapping.add((mapping[0],mapping[1]))
new_path_mapping.add((mapping[2],mapping[1]))
return list(new_path_mapping)
def calc_asim(path_mapping, rep, match_func, cache = None):
asim, cache = acoustic_similarity_mapping(path_mapping, rep = rep,
match_function = match_func, use_multi=True,
num_cores = 4, cache = cache, return_rep = True)
return asim, cache
if __name__ == '__main__':
rep_dict = {'mfcc': 'mfcc',
'mfcc_praat':praat_mfcc,
'ampenv': 'envelopes',
'pitch_praat': praat_pitch,
'intensity_praat': praat_intensity,
'formants_praat': praat_formants
}
dist_dict = {'dtw': 'dtw',
'dct': 'dct',
'xcorr': 'xcorr',
'dtw_vowel': vowel_dtw,
'dct_vowel': vowel_dct,
'xcorr_vowel': vowel_xcorr,
'midpoint': midpoint_distance,
'third': third_distance}
path_mapping = load_axb()
for_asim = convert_path_mapping(path_mapping)
for k,v in rep_dict.items():
cache = None
for k2,v2 in dist_dict.items():
if os.path.exists('{}_{}.txt'.format(k, k2)):
continue
print(k, k2)
asim, cache = calc_asim(for_asim, v, v2, cache = cache)
output_acousticsim(path_mapping, asim, '{}_{}.txt'.format(k, k2))
#Duration distance
asim, cache = calc_asim(for_asim, v, duration_distance, cache = cache)
output_acousticsim(path_mapping, asim, 'segmental_duration.txt')
| [
"[email protected]"
] | |
e93bfd5399e5ab1d1e5fa8e1374a7859d94a0446 | 512b388a53022f561e2375b4621f78572d3b4f04 | /clients/migrations/0010_auto_20200904_1044.py | cb1046a194005d2c79ecd0cc9708388a797fa99b | [] | no_license | Madoka09/Worker15 | 006d5ac44dc55c3ae7f72d3b8300f3567395cdff | 181012d309052b2df3d4ef99a197e8acef73a185 | refs/heads/master | 2023-03-24T05:29:02.060796 | 2021-03-16T21:56:21 | 2021-03-16T21:56:21 | 336,394,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # Generated by Django 3.0.4 on 2020-09-04 15:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('clients', '0009_auto_20200903_2132'),
]
operations = [
migrations.RenameField(
model_name='clientsaddress',
old_name='altern_phone',
new_name='alternate_phone',
),
]
| [
"[email protected]"
] | |
e8ad7c6fc7df5ae3504281a89fd22b8eadb6cdef | b75918b2ac1dfaf2c1219f40d63004900c9338b1 | /tests/conftest.py | bdc760d2cb724f587868a0e459829b3640bca13f | [] | no_license | solashirai/ExplainableCourseRecommender | e0f036da9814a0187daa5635da0ff2f86386026d | 6a2795cfc4536548ac3679b3d23b953e55a50a37 | refs/heads/main | 2023-04-14T14:27:36.054830 | 2021-04-19T02:29:48 | 2021-04-19T02:29:48 | 302,346,189 | 1 | 0 | null | 2021-04-18T16:13:48 | 2020-10-08T13:17:44 | Python | UTF-8 | Python | false | false | 14,309 | py | import pytest
from escore.models import *
from escore.services.course import GraphCourseQueryService
from frex.stores import LocalGraph, RemoteGraph
from escore.utils.path import DATA_DIR
from escore.pipeline import RecommendCoursesPipeline
from rdflib import URIRef, Namespace
individual_ns = Namespace(
"https://tw.rpi.edu/ontology-engineering/oe2020/course-recommender-individuals/"
)
@pytest.fixture(scope="session")
def course_graph() -> LocalGraph:
return LocalGraph(
file_paths=(
(DATA_DIR / "courses.ttl").resolve(),
(DATA_DIR / "scheduled_courses.ttl").resolve(),
(DATA_DIR / "rpi_departments.ttl").resolve(),
(DATA_DIR / "parsed_grad_requirements.ttl").resolve(),
(DATA_DIR / "users.ttl").resolve(),
)
)
@pytest.fixture(scope="session")
def course_qs(course_graph) -> GraphCourseQueryService:
return GraphCourseQueryService(queryable=course_graph)
@pytest.fixture(scope="session")
def course_rec_pipe(course_qs) -> RecommendCoursesPipeline:
return RecommendCoursesPipeline(course_query_service=course_qs)
@pytest.fixture(scope="session")
def pl_course(csci_dept_code, csci_dept):
return Course(
uri=individual_ns['crs938c5b7e20ea7e1620a2dd6329e6f0af274b46c3'],
course_code=CourseCode(
uri=individual_ns['crsCodeed2eaaf90b6625c9f6a5731e3f1a933357cd88b2'],
name="CSCI-4430",
department_code=csci_dept_code,
course_level=4430.0,
cross_listed=tuple()
),
name="Programming Languages",
credits=4,
department=csci_dept,
description="This course is a study of the important concepts found in current programming languages. "
"Topics include language processing (lexical analysis, parsing, type-checking, interpretation "
"and compilation, run-time environment), the role of abstraction (data abstraction and control "
"abstraction), programming paradigms (procedural, functional, object-oriented, logic-oriented, "
"generic), and formal language definition.",
special_tags=frozenset(),
required_prerequisites=frozenset({
individual_ns['crsd930192130a654416bffd45ce16415ee608df66d'],
individual_ns['crs9797fa54cb6f077d0e7cf31e23bdbafbbe00e8af']
}),
corequisites=frozenset(),
recommended_prerequisites=frozenset(),
topics=frozenset({
TopicArea(
uri=individual_ns['topic00001'],
name='placeholder for topic',
sub_topic_of=frozenset(),
discipline='placeholder discipline'
),
}
),
offering_terms=("FALL",),
offering_period="ANNUAL"
)
@pytest.fixture(scope="session")
def csci_dept_code(csci_dept):
return DepartmentCode(
uri=individual_ns['dptc0026'],
name="CSCI",
department=csci_dept
)
@pytest.fixture(scope="session")
def csci_dept():
return Department(
uri=individual_ns['dpt0026'],
name="Computer Science",
offered_major_uris=tuple(),
offered_degree_uris=tuple()
)
@pytest.fixture(scope="session")
def csci_major(csci_dept):
return Major(
uri=individual_ns['majCSCI'],
name="Computer Science Major",
department=csci_dept
)
@pytest.fixture(scope="session")
def csci_top_level_req(csci_dept):
return Requirement(
uri=individual_ns['reqfd44455d4e7c62e5f83dde9ab7da8583adbfd31e'],
fulfilled_by_requirement_uris=frozenset(),
sub_requirement_uris=frozenset({
individual_ns['req5f790f12a27e66b3f8c6534a79003cb5910d7fde'],
individual_ns['req78e077dfe6014ee50f8fac4e06b2ae06333cc271'],
individual_ns['reqa323d5f642970db3393d958ea2e8c6510032e1e2'],
individual_ns['reqae4289fd7815ec563f927cc14309b63a797ab630'],
individual_ns['reqbbf0c827d4009fdd91575d3974c3e9be28909b6c'],
individual_ns['reqe29978dd3d6ce495c371fda071f87f6c36f0739f'],
}),
share_credits_with_requirement_uris=frozenset({
individual_ns['req5f790f12a27e66b3f8c6534a79003cb5910d7fde'],
individual_ns['req78e077dfe6014ee50f8fac4e06b2ae06333cc271'],
individual_ns['reqa323d5f642970db3393d958ea2e8c6510032e1e2'],
individual_ns['reqae4289fd7815ec563f927cc14309b63a797ab630'],
individual_ns['reqbbf0c827d4009fdd91575d3974c3e9be28909b6c'],
individual_ns['reqe29978dd3d6ce495c371fda071f87f6c36f0739f'],
}),
restriction_requirement_uris=frozenset(),
requires_credits=128,
course_code_restriction=CourseCodeRestriction(
uri=individual_ns['ccr79a36c1af79f9c7271a61771aab09de994fccd4f'],
valid_course_code_names=frozenset(),
required_special_tag_names=frozenset(),
valid_department_code_names=frozenset(),
)
)
@pytest.fixture(scope="session")
def csci_option_req():
return Requirement(
uri=individual_ns['req78e077dfe6014ee50f8fac4e06b2ae06333cc271'],
requires_credits=16,
share_credits_with_requirement_uris=frozenset({
individual_ns['reqfd44455d4e7c62e5f83dde9ab7da8583adbfd31e'],
}),
sub_requirement_uris=frozenset(),
restriction_requirement_uris=frozenset(),
fulfilled_by_requirement_uris=frozenset(),
course_code_restriction=CourseCodeRestriction(
uri=individual_ns['ccr5a9b245b2af51a7b021a10d532b88f33418a97ca'],
valid_course_code_names=frozenset(),
required_special_tag_names=frozenset(),
valid_department_code_names=frozenset({'CSCI'}),
min_level=4000
)
)
@pytest.fixture(scope="session")
def csci_bs_deg(csci_major, csci_top_level_req):
return Degree(
uri=individual_ns['degBSInCSCI'],
name='BS in Computer Science',
major=csci_major,
requirements=(csci_top_level_req,)
)
@pytest.fixture(scope="session")
def owen_pos(csci_major, csci_bs_deg):
return PlanOfStudy(
uri=individual_ns['pos9a8e6844c6ecbac12f9f92da68ac51c5bd67704f'],
class_year=2021,
planned_major=csci_major,
planned_degree=csci_bs_deg,
completed_course_sections=frozenset({
individual_ns["crsSec0838fe4beedeff7709d32d16ca67c9aa2373dba7"],
individual_ns["crsSec0cf0d1a768ef7b1d580ac0aaf258257b8c766ecb"],
individual_ns["crsSec0d060d8550b4d97fa0aa0188e75a213e37114cb5"],
individual_ns["crsSec1d571602ec11f8e32dcde3b985cb277b68b7abb5"],
individual_ns["crsSec40567fef852031bad43995aa8cab7c4877bc0a02"],
individual_ns["crsSec4d3630ed52401a5362753db61595b8e1aec66bd8"],
individual_ns["crsSec5241e24de4b9d40df379b7916e4698ac81354f6f"],
individual_ns["crsSec5fd627bdf533aefd6f25ebb995fccc08e57f8dc2"],
individual_ns["crsSec615e6c5aee4bbf92e6e193f86346602825bba571"],
individual_ns["crsSec663dda052cc6e9647d255c294c71409b1883963f"],
individual_ns["crsSec6a1c91448f2bdb49b519784e470a68c37318b45c"],
individual_ns["crsSec79431f36805f7d501cc79356e3f69b26340e1d98"],
individual_ns["crsSec8102566ff399c31b30351decb38ba3893db8e2f5"],
individual_ns["crsSec8281ac09fc60458b13bdfef54b75f0b8e771837e"],
individual_ns["crsSec8bb40720e14ff5d40a16d71efbfab65bbcd742eb"],
individual_ns["crsSec99b5492130e02e1dcb08692178a020c1c2444195"],
individual_ns["crsSecbc29e94fcaa333888baa92efb31dad194e1718b6"],
individual_ns["crsSecc4b387e96f764565a80950390b36235fc00eabf1"],
individual_ns["crsSeccb117aa26ddc5cf711c70466adcc656492e8a464"],
individual_ns["crsSecce866dba24b0cdf1e707f40e0ee7fbb8de068406"],
individual_ns["crsSecd5c95ece2b749c2e0beb1d2bfde0e23e5ad45d93"],
individual_ns["crsSece04b10767b92aa4d53eb5a5b044ef13673b49448"],
individual_ns["crsSece405364a6acf6b819c02915a204114f26ff8551f"],
individual_ns["crsSecf5a9dafe85e39b30bdbd45b3371eeefd7520569d"],
individual_ns["crsSecf603c709ea539acc6b9bb842d574c3d9eb7c17fa"],
individual_ns["crsSecf7b40623128f286084d451d67cc7fb4b60b11c94"],
individual_ns["crsSecf8b3e82fd2f512b3db0727642c6a1b7153581d47"],
individual_ns["crsSecfb9210e5ca6bd4844b7bf9bdf1cb1c5956f81d08"],
}),
completed_courses=frozenset({
individual_ns["crsafed9cb99a22f3c1c24a461212de74c061147fdc"],
individual_ns["crsd13b01ead0fba8b4aa112ce4a06999a774cf7b2d"],
individual_ns["crs16512f1cf1a0772c4b025c3d6ec1edcd0d8fe1fb"],
individual_ns["crsfb2686b704f12418fbb57e79c573d4bb0fd2f418"],
individual_ns["crsbb2f79ec60f43618cd25567f87e71171d29aee83"],
individual_ns["crs3040f719acb6d5f911e4a1e0efdae1aab16e71d5"],
individual_ns["crs76deeb1ecf1123e7b7b6918afd3e7e9c65a5bbdc"],
individual_ns["crsa9004db87efa99687062b8819ace3f59d4e235cd"],
individual_ns["crs8e3b954b259c3b7c341a8839f81fb05deeff68ea"],
individual_ns["crs938c5b7e20ea7e1620a2dd6329e6f0af274b46c3"],
individual_ns["crs667378d70c52e4a84617225e20e380eb49540f42"],
individual_ns["crsd930192130a654416bffd45ce16415ee608df66d"],
individual_ns["crs11d22a217c292f1bd278d88b96fa770c9a6fa207"],
individual_ns["crs66ece4f97b7ad555666d9477af785bcaa7a40e8a"],
individual_ns["crs547b5ccb36b817d3e2df2a96a09aa18f678bc4e0"],
individual_ns["crs4b79ba1b9717a21b3aff7a7d656a471eea21448a"],
individual_ns["crs0f4511984f6fb0682b0185c2dc94b50dbc4efd2a"],
individual_ns["crs70c201e1b37def5c83e4458b044028e8a44f91c7"],
individual_ns["crs9797fa54cb6f077d0e7cf31e23bdbafbbe00e8af"],
individual_ns["crs1f544a878959fae04cb9d08b258e527007df5491"],
individual_ns["crs61c14eb096ee7002039fb8baee948b4495f08440"],
individual_ns["crsb195823511b1f4a6f4b656734aab626993defec6"],
individual_ns["crs8aabf92b49dce005f10db4d14605ad4d5eb920d7"],
individual_ns["crs2a22ca2e61da1be778732a493f944011f5b30519"],
individual_ns["crs72de52b44f46d5b08b2917495701f202699880ca"],
individual_ns["crsc746a794a800d873f1e5deff86c0c58e25f94848"],
individual_ns["crs622f7a32272ea2f04599f688790c2571325b949a"],
individual_ns["crs7c03aa6fefaf99476e8158ef5943f5ee91ee6146"],
}),
ongoing_course_sections=frozenset(),
planned_courses=frozenset(),
)
@pytest.fixture(scope="session")
def placeholder_advisor():
return Advisor(
uri=individual_ns['PLACEHOLDER-ADVISOR-URI'],
name="Placeholder advisor name",
advises_student_uris=tuple()
)
@pytest.fixture(scope="session")
def owen_student(owen_pos, placeholder_advisor):
return Student(
uri=individual_ns['usrowen'],
study_plan=owen_pos,
name="owen",
class_year=2021,
topics_of_interest=frozenset({TopicArea(
uri=individual_ns['hardcodedUserInterest'],
name='semantic web',
sub_topic_of=frozenset(),
discipline="placeholder discipline",
)}),
registered_courses=frozenset(),
advisor=placeholder_advisor,
)
@pytest.fixture(scope="session")
def blank_student(placeholder_advisor, csci_major, csci_bs_deg):
return Student(
uri=individual_ns['blank_user'],
study_plan=PlanOfStudy(
uri=individual_ns['blank_user_pos'],
class_year=2023,
planned_major=csci_major,
planned_degree=csci_bs_deg,
completed_courses=frozenset({}),
completed_course_sections=frozenset({}),
ongoing_course_sections=frozenset(),
planned_courses=frozenset(),
),
name="blank",
class_year=2023,
topics_of_interest=frozenset({TopicArea(
uri=individual_ns['hardcodedUserInterest'],
name='ontology engineering',
sub_topic_of=frozenset(),
discipline="placeholder discipline",
)}),
registered_courses=frozenset(),
advisor=placeholder_advisor,
)
@pytest.fixture(scope="session")
def bs2(placeholder_advisor, csci_major, csci_bs_deg):
return Student(
uri=individual_ns['blank_user'],
study_plan=PlanOfStudy(
uri=individual_ns['blank_user_pos'],
class_year=2023,
planned_major=csci_major,
planned_degree=csci_bs_deg,
completed_courses=frozenset({}),
completed_course_sections=frozenset({}),
ongoing_course_sections=frozenset(),
planned_courses=frozenset(),
),
name="blank",
class_year=2023,
topics_of_interest=frozenset({TopicArea(
uri=individual_ns['hardcodedUserInterest'],
name='artificial intelligence',
sub_topic_of=frozenset(),
discipline="placeholder discipline",
)}),
registered_courses=frozenset(),
advisor=placeholder_advisor,
)
@pytest.fixture(scope="session")
def bs1(placeholder_advisor, csci_major, csci_bs_deg):
return Student(
uri=individual_ns['blank_user'],
study_plan=PlanOfStudy(
uri=individual_ns['blank_user_pos'],
class_year=2023,
planned_major=csci_major,
planned_degree=csci_bs_deg,
completed_courses=frozenset({}),
completed_course_sections=frozenset({}),
ongoing_course_sections=frozenset(),
planned_courses=frozenset(),
),
name="blank",
class_year=2023,
topics_of_interest=frozenset({TopicArea(
uri=individual_ns['hardcodedUserInterest'],
name='machine learning',
sub_topic_of=frozenset(),
discipline="placeholder discipline",
)}),
registered_courses=frozenset(),
advisor=placeholder_advisor,
) | [
"[email protected]"
] | |
c8bf10335c7c1e07b2176c968917ab7c4d5ace34 | 0f3a0be642cd6a2dd792c548cf7212176761e9b1 | /pywps_services/r_mult.py | 9910ee9228a37f667c6a73112163cb45b3e7d2ec | [] | no_license | huhabla/wps-grass-bridge | 63a5d60735d372e295ec6adabe527eec9e72635a | aefdf1516a7517b1b745ec72e2d2481a78e10017 | refs/heads/master | 2021-01-10T10:10:34.246497 | 2014-01-22T23:40:58 | 2014-01-22T23:40:58 | 53,005,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | # ################################################ #
# This process was generated using GrassXMLtoPyWPS #
# Author: Soeren Gebbert #
# Mail: soerengebbert <at> googlemail <dot> com #
# ################################################ #
from pywps.Process import WPSProcess
from PyWPSGrassModuleStarter import PyWPSGrassModuleStarter
class r_mult(WPSProcess):
def __init__(self):
WPSProcess.__init__(self, identifier = 'r.mult', title = 'Multiplies a raster map with one or more raster maps', version = 1, statusSupported = True, storeSupported = True, metadata = [{'type': 'simple', 'title': 'raster'}, {'type': 'simple', 'title': 'math'}], abstract = 'http://grass.osgeo.org/grass70/manuals/html70_user/r.mult.html')
# Literal and complex inputs
self.addComplexInput(identifier = 'inputs', title = 'Raster maps to multiply', minOccurs = 1, maxOccurs = 1024, formats = [{'mimeType': 'image/tiff'}, {'mimeType': 'image/geotiff'}, {'mimeType': 'application/geotiff'}, {'mimeType': 'application/x-geotiff'}, {'mimeType': 'image/png'}, {'mimeType': 'image/gif'}, {'mimeType': 'image/jpeg'}, {'mimeType': 'application/x-erdas-hfa'}, {'mimeType': 'application/netcdf'}, {'mimeType': 'application/x-netcdf'}])
self.addLiteralInput(identifier = 'grass_resolution_ns', title = 'Resolution of the mapset in north-south direction in meters or degrees', abstract = 'This parameter defines the north-south resolution of the mapset in meter or degrees, which should be used to process the input and output raster data. To enable this setting, you need to specify north-south and east-west resolution.', minOccurs = 0, maxOccurs = 1, type = type(0.0), allowedValues = '*')
self.addLiteralInput(identifier = 'grass_resolution_ew', title = 'Resolution of the mapset in east-west direction in meters or degrees', abstract = 'This parameter defines the east-west resolution of the mapset in meters or degrees, which should be used to process the input and output raster data. To enable this setting, you need to specify north-south and east-west resolution.', minOccurs = 0, maxOccurs = 1, type = type(0.0), allowedValues = '*')
self.addLiteralInput(identifier = 'grass_band_number', title = 'Band to select for processing (default is all bands)', abstract = 'This parameter defines band number of the input raster files which should be processed. As default all bands are processed and used as single and multiple inputs for raster modules.', minOccurs = 0, maxOccurs = 1, type = type(0), allowedValues = '*')
# complex outputs
self.addComplexOutput(identifier = 'output', title = 'The result of the mathematical operation', formats = [{'mimeType': 'image/tiff'}, {'mimeType': 'image/geotiff'}, {'mimeType': 'application/geotiff'}, {'mimeType': 'application/x-geotiff'}, {'mimeType': 'application/x-erdas-hfa'}, {'mimeType': 'application/netcdf'}, {'mimeType': 'application/x-netcdf'}])
def execute(self):
starter = PyWPSGrassModuleStarter()
starter.fromPyWPS("r.mult", self.inputs, self.outputs, self.pywps)
if __name__ == "__main__":
process = r_mult()
process.execute()
| [
"soerengebbert@23da3d23-e2f9-862c-be8f-f61c6c06f202"
] | soerengebbert@23da3d23-e2f9-862c-be8f-f61c6c06f202 |
2d51dc8a47690b543abd5f2196e6d22032e34caf | de3b77cb0927f28cbd85e9142c2dfd7c8be7c27e | /tests/migrations/015_user_demographics_up.py | 9e08957363737d8cf8968f4a19885fea3c67bec4 | [
"MIT"
] | permissive | LoansBot/database | f3dcbccde59fdb80c876d2612f250662946588e6 | eeaed26c2dcfdf0f9637b47ebe15cd1e000d8cc4 | refs/heads/master | 2021-07-02T22:07:18.683278 | 2021-06-02T04:09:38 | 2021-06-02T04:09:38 | 239,400,935 | 0 | 1 | MIT | 2021-06-02T04:14:31 | 2020-02-10T01:06:53 | Python | UTF-8 | Python | false | false | 1,166 | py | import unittest
import helper
class UpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.connection = helper.setup_connection()
cls.cursor = cls.connection.cursor()
@classmethod
def tearDownClass(cls):
cls.cursor.close()
cls.connection.rollback()
helper.teardown_connection(cls.connection)
def tearDown(self):
self.connection.rollback()
def test_user_demographics_exist(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'user_demographics')
)
def test_user_demographic_lookups_exist(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'user_demographic_lookups')
)
def test_user_demographic_views_exist(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'user_demographic_views')
)
def test_user_demographic_history_exist(self):
self.assertTrue(
helper.check_if_table_exist(self.cursor, 'user_demographic_history')
)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
1f25eaacf5c9ccac5ef060cdcaf3e75712ac30ba | 4cc285b0c585241ff4404087e6fbb901195639be | /NeuralNetworkNumbers/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/nn/__init__.py | 422cec64251f38906def1dec89cf3e9f3c1cb091 | [] | no_license | strazhg/NeuralNetworksPython | 815542f4ddbb86e918e657f783158f8c078de514 | 15038e44a5a6c342336c119cdd2abdeffd84b5b1 | refs/heads/main | 2023-04-16T18:51:29.602644 | 2021-04-27T14:46:55 | 2021-04-27T14:46:55 | 361,944,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f9f44f6062a76ea4edc6b57e9980c88ed09cd53ee57337d2e7cebd8696fc0e2f
size 6611
| [
"[email protected]"
] | |
addd10e6193e7e8522a2c5f729c47c0dba75866f | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-5429.py | f003b6f244e54ff69c91f1a5eb2bd1fb0dbdf743 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,755 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
$TypedVar = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
6a244e5d202b43213040fc14188fe4cf309356c2 | a7b78ab632b77d1ed6b7e1fa46c33eda7a523961 | /src/foreign_if/python/UT/src/eigen/test_049.py | 558da13e6a88495e2835d12cb1b59571e2a9938d | [
"BSD-2-Clause"
] | permissive | frovedis/frovedis | 80b830da4f3374891f3646a2298d71a3f42a1b2d | 875ae298dfa84ee9815f53db5bf7a8b76a379a6f | refs/heads/master | 2023-05-12T20:06:44.165117 | 2023-04-29T08:30:36 | 2023-04-29T08:30:36 | 138,103,263 | 68 | 13 | BSD-2-Clause | 2018-12-20T10:46:53 | 2018-06-21T01:17:51 | C++ | UTF-8 | Python | false | false | 926 | py | #!/usr/bin/env python
import sys
from frovedis.exrpc.server import FrovedisServer
from frovedis.linalg import eigsh
from scipy.sparse import csr_matrix
desc = "Testing eigsh() for csr_matrix and which = 'SM': "
# initializing the Frovedis server
argvs = sys.argv
argc = len(argvs)
if argc < 2:
print ('Please give frovedis_server calling command as the first argument \n'
'(e.g. "mpirun -np 2 /opt/nec/frovedis/ve/bin/frovedis_server")')
quit()
FrovedisServer.initialize(argvs[1])
# sample square symmetric sparse matrix (6x6)
mat = csr_matrix([[ 2.,-1., 0., 0.,-1., 0.], [-1., 3.,-1., 0.,-1., 0.],
[ 0.,-1., 2.,-1., 0., 0.], [ 0., 0.,-1., 3.,-1.,-1],
[-1.,-1., 0.,-1., 3., 0.], [ 0., 0., 0.,-1., 0., 1.]])
try:
eigen_vals, eigen_vecs = eigsh(mat, k = 3, which = 'SM')
print(desc, "Passed")
except:
print(desc, "Failed")
FrovedisServer.shut_down() | [
"[email protected]"
] | |
f3b344d9bd81f498554471e88f34378fee094fa7 | 5a5e0a01efa6ef0961992e53bb4f64840f93150b | /RegressionVisualizer/manage.py | b5db558ef481979ffecd909114ebd0e5bdf372b6 | [] | no_license | scotteskridge/RegressionApp | ed059e3205ab54061129779404345b55c0dee75c | 68932a9c94235a1e8bd6cd71a765b545f2266189 | refs/heads/master | 2021-01-19T20:48:13.495541 | 2017-04-25T02:39:49 | 2017-04-25T02:39:56 | 88,555,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RegressionVisualizer.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
print(sys.argv)
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
8469347fb48f63964d990558280d45cfb929ffc9 | c237dfae82e07e606ba9385b336af8173d01b251 | /lib/python/ZPublisher/Client.py | dba20da517921245ef45d6e504060b5b852fa055 | [
"ZPL-2.0"
] | permissive | OS2World/APP-SERVER-Zope | 242e0eec294bfb1ac4e6fa715ed423dd2b3ea6ff | dedc799bd7eda913ffc45da43507abe2fa5113be | refs/heads/master | 2020-05-09T18:29:47.818789 | 2014-11-07T01:48:29 | 2014-11-07T01:48:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,145 | py | #!/bin/sh
""":"
exec python $0 ${1+"$@"}
"""
#"
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Bobo call interface
This module provides tools for accessing web objects as if they were
functions or objects with methods. It also provides a simple call function
that allows one to simply make a single web request.
Function -- Function-like objects that return both header and body
data when called.
Object -- Treat a URL as a web object with methods
call -- Simple interface to call a remote function.
The module also provides a command-line interface for calling objects.
"""
__version__='$Revision: 1.45 $'[11:-2]
import sys, re, socket, mimetools
from httplib import HTTP
from os import getpid
from time import time
from random import random
from base64 import encodestring
from urllib import urlopen, quote
from types import FileType, ListType, DictType, TupleType
from string import translate, maketrans
from urlparse import urlparse
class Function:
username=None
password=None
method=None
timeout=60
def __init__(self,url,
arguments=(),method=None,username=None,password=None,
timeout=None,
**headers):
while url[-1:]=='/': url=url[:-1]
self.url=url
self.headers=headers
if not headers.has_key('Host') and not headers.has_key('host'):
headers['Host']=urlparse(url)[1]
self.func_name=url[url.rfind('/')+1:]
self.__dict__['__name__']=self.func_name
self.func_defaults=()
self.args=arguments
if method is not None: self.method=method
if username is not None: self.username=username
if password is not None: self.password=password
if timeout is not None: self.timeout=timeout
mo = urlregex.match(url)
if mo is not None:
host,port,rurl=mo.group(1,2,3)
if port: port=int(port[1:])
else: port=80
self.host=host
self.port=port
rurl=rurl or '/'
self.rurl=rurl
else: raise ValueError, url
def __call__(self,*args,**kw):
method=self.method
if method=='PUT' and len(args)==1 and not kw:
query=[args[0]]
args=()
else:
query=[]
for i in range(len(args)):
try:
k=self.args[i]
if kw.has_key(k): raise TypeError, 'Keyword arg redefined'
kw[k]=args[i]
except IndexError: raise TypeError, 'Too many arguments'
headers={}
for k, v in self.headers.items(): headers[translate(k,dashtrans)]=v
method=self.method
if headers.has_key('Content-Type'):
content_type=headers['Content-Type']
if content_type=='multipart/form-data':
return self._mp_call(kw)
else:
content_type=None
if not method or method=='POST':
for v in kw.values():
if hasattr(v,'read'): return self._mp_call(kw)
can_marshal=type2marshal.has_key
for k,v in kw.items():
t=type(v)
if can_marshal(t): q=type2marshal[t](k,v)
else: q='%s=%s' % (k,quote(v))
query.append(q)
url=self.rurl
if query:
query='&'.join(query)
method=method or 'POST'
if method == 'PUT':
headers['Content-Length']=str(len(query))
if method != 'POST':
url="%s?%s" % (url,query)
query=''
elif not content_type:
headers['Content-Type']='application/x-www-form-urlencoded'
headers['Content-Length']=str(len(query))
else: method=method or 'GET'
if (self.username and self.password and
not headers.has_key('Authorization')):
headers['Authorization']=(
"Basic %s" %
encodestring('%s:%s' % (self.username,self.password)).replace(
'\012','')
)
try:
h=HTTP()
h.connect(self.host, self.port)
h.putrequest(method, self.rurl)
for hn,hv in headers.items():
h.putheader(translate(hn,dashtrans),hv)
h.endheaders()
if query: h.send(query)
ec,em,headers=h.getreply()
response =h.getfile().read()
except:
raise NotAvailable, RemoteException(
NotAvailable,sys.exc_info()[1],self.url,query)
if (ec - (ec % 100)) == 200:
return (headers,response)
self.handleError(query, ec, em, headers, response)
def handleError(self, query, ec, em, headers, response):
try: v=headers.dict['bobo-exception-value']
except: v=ec
try: f=headers.dict['bobo-exception-file']
except: f='Unknown'
try: l=headers.dict['bobo-exception-line']
except: l='Unknown'
try: t=exceptmap[headers.dict['bobo-exception-type']]
except:
if ec >= 400 and ec < 500: t=NotFound
elif ec == 503: t=NotAvailable
else: t=ServerError
raise t, RemoteException(t,v,f,l,self.url,query,ec,em,response)
def _mp_call(self,kw,
type2suffix={
type(1.0): ':float',
type(1): ':int',
type(1L): ':long',
type([]): ':list',
type(()): ':tuple',
}
):
# Call a function using the file-upload protcol
# Add type markers to special values:
d={}
special_type=type2suffix.has_key
for k,v in kw.items():
if ':' not in k:
t=type(v)
if special_type(t): d['%s%s' % (k,type2suffix[t])]=v
else: d[k]=v
else: d[k]=v
rq=[('POST %s HTTP/1.0' % self.rurl),]
for n,v in self.headers.items():
rq.append('%s: %s' % (n,v))
if self.username and self.password:
c=encodestring('%s:%s' % (self.username,self.password)).replace('\012','')
rq.append('Authorization: Basic %s' % c)
rq.append(MultiPart(d).render())
rq='\r\n'.join(rq)
try:
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect((self.host,self.port))
sock.send(rq)
reply=sock.makefile('rb')
sock=None
line=reply.readline()
try:
[ver, ec, em] = line.split(None, 2)
except ValueError:
raise 'BadReply','Bad reply from server: '+line
if ver[:5] != 'HTTP/':
raise 'BadReply','Bad reply from server: '+line
ec=int(ec)
em=em.strip()
headers=mimetools.Message(reply,0)
response=reply.read()
finally:
if 0:
raise NotAvailable, (
RemoteException(NotAvailable,sys.exc_info()[1],
self.url,'<MultiPart Form>'))
if ec==200: return (headers,response)
self.handleError('', ec, em, headers, response)
class Object:
"""Surrogate object for an object on the web"""
username=None
password=None
method=None
timeout=None
special_methods= 'GET','POST','PUT'
def __init__(self, url,
method=None,username=None,password=None,
timeout=None,
**headers):
self.url=url
self.headers=headers
if not headers.has_key('Host') and not headers.has_key('host'):
headers['Host']=urlparse(url)[1]
if method is not None: self.method=method
if username is not None: self.username=username
if password is not None: self.password=password
if timeout is not None: self.timeout=timeout
def __getattr__(self, name):
if name in self.special_methods:
method=name
url=self.url
else:
method=self.method
url="%s/%s" % (self.url, name)
f=Function(url,
method=method,
username=self.username,
password=self.password,
timeout=self.timeout)
f.headers=self.headers
return f
def call(url,username=None, password=None, **kw):
return apply(Function(url,username=username, password=password), (), kw)
##############################################################################
# Implementation details below here
urlregex=re.compile(r'http://([^:/]+)(:[0-9]+)?(/.+)?', re.I)
dashtrans=maketrans('_','-')
def marshal_float(n,f): return '%s:float=%s' % (n,f)
def marshal_int(n,f): return '%s:int=%s' % (n,f)
def marshal_long(n,f):
value = '%s:long=%s' % (n, f)
if value[-1] == 'L':
value = value[:-1]
return value
def marshal_list(n,l,tname='list', lt=type([]), tt=type(())):
r=[]
for v in l:
t=type(v)
if t is lt or t is tt:
raise TypeError, 'Invalid recursion in data to be marshaled.'
r.append(marshal_whatever("%s:%s" % (n,tname) ,v))
return '&'.join(r)
def marshal_tuple(n,l):
return marshal_list(n,l,'tuple')
type2marshal={
type(1.0): marshal_float,
type(1): marshal_int,
type(1L): marshal_long,
type([]): marshal_list,
type(()): marshal_tuple,
}
def marshal_whatever(k,v):
try: q=type2marshal[type(v)](k,v)
except KeyError: q='%s=%s' % (k,quote(str(v)))
return q
def querify(items):
query=[]
for k,v in items: query.append(marshal_whatever(k,v))
return query and '&'.join(query) or ''
NotFound ='bci.NotFound'
InternalError='bci.InternalError'
BadRequest ='bci.BadRequest'
Unauthorized ='bci.Unauthorized'
ServerError ='bci.ServerError'
NotAvailable ='bci.NotAvailable'
exceptmap ={'AttributeError' :AttributeError,
'BadRequest' :BadRequest,
'EOFError' :EOFError,
'IOError' :IOError,
'ImportError' :ImportError,
'IndexError' :IndexError,
'InternalError' :InternalError,
'KeyError' :KeyError,
'MemoryError' :MemoryError,
'NameError' :NameError,
'NotAvailable' :NotAvailable,
'NotFound' :NotFound,
'OverflowError' :OverflowError,
'RuntimeError' :RuntimeError,
'ServerError' :ServerError,
'SyntaxError' :SyntaxError,
'SystemError' :SystemError,
'SystemExit' :SystemExit,
'TypeError' :TypeError,
'Unauthorized' :Unauthorized,
'ValueError' :ValueError,
'ZeroDivisionError':ZeroDivisionError}
class RemoteException:
def __init__(self,etype=None,evalue=None,efile=None,eline=None,url=None,
query=None,http_code=None,http_msg=None, http_resp=None):
"""Contains information about an exception which
occurs in a remote method call"""
self.exc_type =etype
self.exc_value =evalue
self.exc_file =efile
self.exc_line =eline
self.url =url
self.query =query
self.http_code =http_code
self.http_message=http_msg
self.response =http_resp
def __repr__(self):
return '%s (File: %s Line: %s)\n%s %s for %s' % (
self.exc_value,self.exc_file,self.exc_line,
self.http_code,self.http_message,self.url)
class MultiPart:
def __init__(self,*args):
c=len(args)
if c==1: name,val=None,args[0]
elif c==2: name,val=args[0],args[1]
else: raise ValueError, 'Invalid arguments'
h={'Content-Type': {'_v':''},
'Content-Transfer-Encoding': {'_v':''},
'Content-Disposition': {'_v':''},}
dt=type(val)
b=t=None
if dt==DictType:
t=1
b=self.boundary()
d=[]
h['Content-Type']['_v']='multipart/form-data; boundary=%s' % b
for n,v in val.items():
d.append(MultiPart(n,v))
elif (dt==ListType) or (dt==TupleType):
raise ValueError, 'Sorry, nested multipart is not done yet!'
elif dt==FileType or hasattr(val,'read'):
if hasattr(val,'name'):
fn=val.name.replace( '\\', '/')
fn=fn[(fn.rfind('/')+1):]
ex=(fn[(fn.rfind('.')+1):]).lower()
if self._extmap.has_key(ex):
ct=self._extmap[ex]
else:
ct=self._extmap['']
else:
fn=''
ct=self._extmap[None]
if self._encmap.has_key(ct): ce=self._encmap[ct]
else: ce=''
h['Content-Disposition']['_v'] ='form-data'
h['Content-Disposition']['name'] ='"%s"' % name
h['Content-Disposition']['filename']='"%s"' % fn
h['Content-Transfer-Encoding']['_v']=ce
h['Content-Type']['_v'] =ct
d=[]
l=val.read(8192)
while l:
d.append(l)
l=val.read(8192)
else:
h['Content-Disposition']['_v']='form-data'
h['Content-Disposition']['name']='"%s"' % name
d=[str(val)]
self._headers =h
self._data =d
self._boundary=b
self._top =t
def boundary(self):
return '%s_%s_%s' % (int(time()), getpid(), int(random()*1000000000))
def render(self):
h=self._headers
s=[]
if self._top:
for n,v in h.items():
if v['_v']:
s.append('%s: %s' % (n,v['_v']))
for k in v.keys():
if k != '_v': s.append('; %s=%s' % (k, v[k]))
s.append('\r\n')
p=[]
t=[]
b=self._boundary
for d in self._data: p.append(d.render())
t.append('--%s\n' % b)
t.append(('\n--%s\n' % b).join(p))
t.append('\n--%s--\n' % b)
t=''.join(t)
s.append('Content-Length: %s\r\n\r\n' % len(t))
s.append(t)
return ''.join(s)
else:
for n,v in h.items():
if v['_v']:
s.append('%s: %s' % (n,v['_v']))
for k in v.keys():
if k != '_v': s.append('; %s=%s' % (k, v[k]))
s.append('\r\n')
s.append('\r\n')
if self._boundary:
p=[]
b=self._boundary
for d in self._data: p.append(d.render())
s.append('--%s\n' % b)
s.append(('\n--%s\n' % b).join(p))
s.append('\n--%s--\n' % b)
return ''.join(s)
else:
return ''.join(s+self._data)
_extmap={'': 'text/plain',
'rdb': 'text/plain',
'html': 'text/html',
'dtml': 'text/html',
'htm': 'text/html',
'dtm': 'text/html',
'gif': 'image/gif',
'jpg': 'image/jpeg',
'exe': 'application/octet-stream',
None : 'application/octet-stream',
}
_encmap={'image/gif': 'binary',
'image/jpg': 'binary',
'application/octet-stream': 'binary',
}
def ErrorTypes(code):
if code >= 400 and code < 500: return NotFound
if code >= 500 and code < 600: return ServerError
return 'HTTP_Error_%s' % code
usage="""
Usage: %s [-u username:password] url [name=value ...]
where url is the web resource to call.
The -u option may be used to provide a user name and password.
Optional arguments may be provides as name=value pairs.
In a name value pair, if a name ends in ":file", then the value is
treated as a file name and the file is send using the file-upload
protocol. If the file name is "-", then data are taken from standard
input.
The body of the response is written to standard output.
The headers of the response are written to standard error.
""" % sys.argv[0]
def main():
import getopt
user=None
try:
optlist, args = getopt.getopt(sys.argv[1:],'u:')
url=args[0]
u =filter(lambda o: o[0]=='-u', optlist)
if u:
[user, pw] = u[0][1].split(':')
kw={}
for arg in args[1:]:
[name,v]=arg.split('=')
if name[-5:]==':file':
name=name[:-5]
if v=='-': v=sys.stdin
else: v=open(v, 'rb')
kw[name]=v
except:
print usage
sys.exit(1)
# The "main" program for this module
f=Function(url)
if user: f.username, f.password = user, pw
headers, body = apply(f,(),kw)
sys.stderr.write(''.join(map(lambda h: "%s: %s\n" % h, headers.items()))
+"\n\n")
print body
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
6cc1542064216d2c36184802c5ba5aaf719fec2f | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/ALCATEL-IND1-E-SERVICE-MIB.py | 6ac2637b7fc0d24af9f67b9e0d9c926639877700 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 47,663 | py | #
# PySNMP MIB module ALCATEL-IND1-E-SERVICE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ALCATEL-IND1-E-SERVICE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:17:28 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
softentIND1eService, = mibBuilder.importSymbols("ALCATEL-IND1-BASE", "softentIND1eService")
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Gauge32, Counter32, iso, NotificationType, ModuleIdentity, ObjectIdentity, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Counter64, Unsigned32, Integer32, TimeTicks, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Counter32", "iso", "NotificationType", "ModuleIdentity", "ObjectIdentity", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Counter64", "Unsigned32", "Integer32", "TimeTicks", "Bits")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
alcatelIND1EServiceMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1))
if mibBuilder.loadTexts: alcatelIND1EServiceMIB.setLastUpdated('200705230000Z')
if mibBuilder.loadTexts: alcatelIND1EServiceMIB.setOrganization('Alcatel-Lucent')
if mibBuilder.loadTexts: alcatelIND1EServiceMIB.setContactInfo('Please consult with Customer Service to ensure the most appropriate version of this document is used with the products in question: Alcatel-Lucent, Enterprise Solutions Division (Formerly Alcatel Internetworking, Incorporated) 26801 West Agoura Road Agoura Hills, CA 91301-5122 United States Of America Telephone: North America +1 800 995 2696 Latin America +1 877 919 9526 Europe +31 23 556 0100 Asia +65 394 7933 All Other +1 818 878 4507 Electronic Mail: [email protected] World Wide Web: http://alcatel-lucent.com/wps/portal/enterprise File Transfer Protocol: ftp://ftp.ind.alcatel.com/pub/products/mibs')
if mibBuilder.loadTexts: alcatelIND1EServiceMIB.setDescription('The parameters for configuration of the E-Service feature. The right to make changes in specification and other information contained in this document without prior notice is reserved. No liability shall be assumed for any incidental, indirect, special, or consequential damages whatsoever arising from or related to this document or the information contained herein. Vendors, end-users, and other interested parties are granted non-exclusive license to use this specification in connection with management of the products for which it is intended to be used. Copyright (C) 1995-2006 Alcatel-Lucent ALL RIGHTS RESERVED WORLDWIDE')
class AlaEServiceUNIProfileProtocolTreatment(TextualConvention, Integer32):
description = 'The behavior of the bridge in regards to the given protocols packets received on the UNI. Tunnel (1) enables the packets to be tunneled across the provider network. Discard (2) causes the packets to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("tunnel", 1), ("drop", 2), ("peer", 3))
alcatelIND1eServiceMIBObjects = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1))
if mibBuilder.loadTexts: alcatelIND1eServiceMIBObjects.setStatus('current')
if mibBuilder.loadTexts: alcatelIND1eServiceMIBObjects.setDescription('Branch For E-Service Managed Objects.')
alcatelIND1EServiceMIBConformance = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2))
if mibBuilder.loadTexts: alcatelIND1EServiceMIBConformance.setStatus('current')
if mibBuilder.loadTexts: alcatelIND1EServiceMIBConformance.setDescription('Branch For E-Service Conformance Information.')
alcatelIND1EServiceMIBGroups = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1))
if mibBuilder.loadTexts: alcatelIND1EServiceMIBGroups.setStatus('current')
if mibBuilder.loadTexts: alcatelIND1EServiceMIBGroups.setDescription('Branch For E-Service Units Of Conformance.')
alcatelIND1EServiceMIBCompliances = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 2))
if mibBuilder.loadTexts: alcatelIND1EServiceMIBCompliances.setStatus('current')
if mibBuilder.loadTexts: alcatelIND1EServiceMIBCompliances.setDescription('Branch For E-Service Compliance Statements.')
alaEService = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1))
alaEServiceInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 1))
alaEServiceMode = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("legacyMode", 1), ("eServiceMode", 2))).clone('legacyMode')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alaEServiceMode.setStatus('current')
if mibBuilder.loadTexts: alaEServiceMode.setDescription('The current mode configured for Vlan Stacking and Layer 2 tunnel configuration. legacyMode (1) indicates that the commands from AlcatelIND1VLANStacking.mib are to be used. eServiceMode (2) indicates the commands from this MIB are to be used.')
alaEServiceSapProfileTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2), )
if mibBuilder.loadTexts: alaEServiceSapProfileTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileTable.setDescription('A table that contains service profiles containing performance and control attributes. An entry in this table is created when a new service profile is defined.')
alaEServiceSapProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1), ).setIndexNames((1, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileID"))
if mibBuilder.loadTexts: alaEServiceSapProfileEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileEntry.setDescription('A E-Service Service Profile entry.')
alaEServiceSapProfileID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 31)))
if mibBuilder.loadTexts: alaEServiceSapProfileID.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileID.setDescription('A label given to uniquely identify this profile. Must be at least one character long.')
alaEServiceSapProfileCVLANTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("stackSVLAN", 1), ("translate", 2), ("changeCVLAN", 3))).clone('stackSVLAN')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileCVLANTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileCVLANTreatment.setDescription('The type of VLAN stacking operation to be performed on a customer frame entering this service. Stack Svlan (1) indicates that the SVLAN is to be pre-pended on the frame before any existing 802.1Q tag. Translate (2) means to replace the existing 802.1Q tag with the SVLAN. Change CVLAN (3) indicates that the customer tag is to remain on the frame but its value is to be changed to the supplied value.')
alaEServiceSapProfileReplacementCVLAN = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileReplacementCVLAN.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileReplacementCVLAN.setDescription('The CVLAN ID to use when using the Change CVLAN treatment mode.')
alaEServiceSapProfilePriorityMapMode = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("notAssigned", 0), ("mapInnerPtoOuterP", 1), ("mapInnerDscpToOuterP", 2), ("fixedP", 3))).clone('fixedP')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfilePriorityMapMode.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfilePriorityMapMode.setDescription('This object describes the source of the value for the priority field of the SVLAN 802.1Q tag when pre-pended to the customer data frame.NotAssigned(0), MapInnerPtoOuterP (1) uses the priority field of the incoming frame when tagged to fill in the priority field of the SVLAN tag. mapInnerDscpToOuterP (2) uses the frames priority bits in its IP DSCP field to fill in the priority field of the SVLAN tag. FixedP (3) uses the supplied FixedPriorityValue to fill in the SVLAN tag priority bits.')
alaEServiceSapProfileFixedPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileFixedPriority.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileFixedPriority.setDescription('This object describes the value of the priority field of the 802.1Q SVLAN tag pre-pended to customer data frames when the fixed priority mapping mode is selected.')
alaEServiceSapProfileIngressBW = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 6), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileIngressBW.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileIngressBW.setDescription('This object describes this limit of ingress bandwidth for the traffic to which this profile is applied. If 0, no bandwidth limit is applied. This number represents traffic in units of 1,000,000 bits per second. Note that all CVLAN that belong to this SAP will share this aggregated limit.')
alaEServiceSapProfileBandwidthShare = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notApplicable", 0), ("shared", 1), ("notShared", 2))).clone('shared')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileBandwidthShare.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileBandwidthShare.setDescription('This object describes the use of the bandwidth limit in how it is applied across multiple ports of the SAP. If set to notApplicable(0), the SAP is not used. If set to Shared (1), all the ports that are part of the SAP will use aggregated bandwidth, sharing some part of the bandwidth limit. If set to notShared (2), each port will use its own bandwidth meter for this SAP. This value is not used if ingressBandwidth is 0.')
alaEServiceSapProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileRowStatus.setDescription('The status of this table entry.')
alaEServiceSapProfileEgressBW = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 2, 1, 9), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfileEgressBW.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileEgressBW.setDescription('This object describes this limit of egress bandwidth for each UNI of the SAP to which this profile is applied. If 0, no bandwidth limit is applied. This number represents traffic in units of Megabits per second. Note that all CVLAN that belong to this SAP will share this aggregated limit.')
alaEServiceUNIProfileTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3), )
if mibBuilder.loadTexts: alaEServiceUNIProfileTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileTable.setDescription('A table that contains service profiles containing performance and control attributes. An entry in this table is created when a new service profile is defined.')
alaEServiceUNIProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1), ).setIndexNames((1, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileID"))
if mibBuilder.loadTexts: alaEServiceUNIProfileEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileEntry.setDescription('A E-Service Service Profile entry.')
alaEServiceUNIProfileID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 31)))
if mibBuilder.loadTexts: alaEServiceUNIProfileID.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileID.setDescription('A label given to uniquely identify this profile. Must be at least one character long.')
alaEServiceUNIProfileStpBpduTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 2), AlaEServiceUNIProfileProtocolTreatment().clone('tunnel')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfileStpBpduTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileStpBpduTreatment.setDescription('This object describes the behavior of the bridge in regards to the spanning tree protocol BPDU received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currnetly Peer is not supported for Spanning Tree')
alaEServiceUNIProfile8021xTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 3), AlaEServiceUNIProfileProtocolTreatment().clone('drop')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfile8021xTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfile8021xTreatment.setDescription('This object describes the behavior of the bridge in regards to the IEEE 802.1x PDU frames received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currnetly only drop is supported')
alaEServiceUNIProfile8021ABTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 4), AlaEServiceUNIProfileProtocolTreatment().clone('drop')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfile8021ABTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfile8021ABTreatment.setDescription('This object describes the behavior of the bridge in regards to the IEEE 802.1AB PDU frames received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currently only drop is supported')
alaEServiceUNIProfile8023adTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 5), AlaEServiceUNIProfileProtocolTreatment().clone('peer')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfile8023adTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfile8023adTreatment.setDescription('This object describes the behavior of the bridge in regards to the IEEE 802.1ad PDU frames received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currently only peer is supported')
alaEServiceUNIProfileGvrpTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 6), AlaEServiceUNIProfileProtocolTreatment().clone('tunnel')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfileGvrpTreatment.setStatus('deprecated')
if mibBuilder.loadTexts: alaEServiceUNIProfileGvrpTreatment.setDescription('This object describes the behavior of the bridge in regards to the GVRP PDU frames received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currently peer is not supported for GVRP')
alaEServiceUNIProfileAmapTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 7), AlaEServiceUNIProfileProtocolTreatment().clone('drop')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfileAmapTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileAmapTreatment.setDescription('This object describes the behavior of the bridge in regards to the Alcatel propietary AMAP PDU frames received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currently drop is only supported')
alaEServiceUNIProfileMvrpTreatment = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 8), AlaEServiceUNIProfileProtocolTreatment().clone('tunnel')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfileMvrpTreatment.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileMvrpTreatment.setDescription('This object describes the behavior of the bridge in regards to the MVRP PDU frames received on the UNI. Tunnel (1) enables the PDU to be tunneled across the provider network. Discard (2) causes the PDU of the protocol to be discarded and not enter the provider network. Peer (3) means that on this port the bridge is to participate in the protocol. Currently peer is not supported for MVRP')
alaEServiceUNIProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 3, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceUNIProfileRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileRowStatus.setDescription('The status of this table entry.')
alaEServiceTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 4), )
if mibBuilder.loadTexts: alaEServiceTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceTable.setDescription('A table that contains the services and their assigned SVLAN for the E-Service feature.')
alaEServiceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 4, 1), ).setIndexNames((1, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceID"))
if mibBuilder.loadTexts: alaEServiceEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceEntry.setDescription('The svlan/ipmvlan-port association.')
alaEServiceID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 4, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32)))
if mibBuilder.loadTexts: alaEServiceID.setStatus('current')
if mibBuilder.loadTexts: alaEServiceID.setDescription('A label given to uniquely identify this Service. Must be at least one character long.')
alaEServiceSVLAN = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSVLAN.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSVLAN.setDescription('The SVLAN number of the SVLAN chosen to the be transport for this service.')
alaEServiceVlanType = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("unknown", 0), ("svlan", 1), ("ipmvlan", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceVlanType.setStatus('current')
if mibBuilder.loadTexts: alaEServiceVlanType.setDescription('The type of the vlan this service is going to attach to. When creating the service, the type should match the vlanId specified in the request.')
alaEServiceRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 4, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceRowStatus.setDescription('The status of this table entry. The supported value for set are createAndGo (4) and destroy(6), to add or remove a service. When creating or deleting the service, the user needs to provide both the svlan and the vlantype objects.')
alaEServiceSapTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 5), )
if mibBuilder.loadTexts: alaEServiceSapTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapTable.setDescription("A table that contains the Service Access Points (Sap) listed by ID. This table is used to create, delete, and modify the SAP's profile")
alaEServiceSapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 5, 1), ).setIndexNames((0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapID"))
if mibBuilder.loadTexts: alaEServiceSapEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapEntry.setDescription('The list of SAP.')
alaEServiceSapID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: alaEServiceSapID.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapID.setDescription('A Number given to uniquely identify the SAP.')
alaEServiceSapServiceID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 5, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapServiceID.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapServiceID.setDescription('A label given to uniquely identify the Service this SAP is for. Must be at least one character long.')
alaEServiceSapProfile = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 5, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapProfile.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfile.setDescription('The string identifying the SAP Profile this sap is to use. If specified, must match an existing SAP Profile.')
alaEServiceSapRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 5, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapRowStatus.setDescription('The status of this table entry. The supported value for set are createAndGo (4) and destroy(6), to add or remove a sap. When creating the sap, the user needs to provide the service name in the same set request.')
alaEServiceSapCvlanTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 6), )
if mibBuilder.loadTexts: alaEServiceSapCvlanTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanTable.setDescription('A table that contains the Service Access Points (Sap) where the CVLANs are bound to their service.')
alaEServiceSapCvlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 6, 1), ).setIndexNames((0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapCvlanSapID"), (0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapCvlanCvlan"))
if mibBuilder.loadTexts: alaEServiceSapCvlanEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanEntry.setDescription('The CVLAN to Sap binding.')
alaEServiceSapCvlanSapID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: alaEServiceSapCvlanSapID.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanSapID.setDescription('A Number given to uniquely identify this SAP.')
alaEServiceSapCvlanCvlan = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4094)))
if mibBuilder.loadTexts: alaEServiceSapCvlanCvlan.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanCvlan.setDescription('This object is the CVLAN ID that this binding is targeted at. The CVLAN ID may be 0, which indicates an all or untagged only mapping type.')
alaEServiceSapCvlanMapType = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 6, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("single", 1), ("all", 2), ("untaggedOnly", 3))).clone('single')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapCvlanMapType.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanMapType.setDescription('This object is the mapping type that defines what CVLANs are mapped into this service. Multiple mappings can be defined for CVLAN to service, however only one all (2) or untaggedOnly (3) mapping entry can be created per UNI. A mapping type of Single (1) denotes a specific CVLAN value to bind to the service. A mapping type of All (2) denotes that all customer frames that do not map to any other SAP, will be mapped into this service. A mapping type of Untagged (3) denotes that only the untagged frames will be mapped into this service.')
alaEServiceSapCvlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 6, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapCvlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanRowStatus.setDescription('The status of this table entry. The supported value for set are createAndGo (4) and destroy(6), to add or remove a SAP.')
alaEServicePortTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7), )
if mibBuilder.loadTexts: alaEServicePortTable.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortTable.setDescription('A table that contains the ports used by the EService feature. Both UNI and NNI are listed here.')
alaEServicePortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1), ).setIndexNames((0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortID"))
if mibBuilder.loadTexts: alaEServicePortEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortEntry.setDescription('The list of ports being used by EService.')
alaEServicePortID = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: alaEServicePortID.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortID.setDescription('The IfIndex of this UNI or NNI Port.')
alaEServicePortType = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3))).clone(namedValues=NamedValues(("uni", 1), ("nni", 3))).clone('uni')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortType.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortType.setDescription('The type of port for Vlan Stacking operation. uni (1) represents a customer facing port on which traffic may enter the E-Service. nni (2) respresents a provider network port over which the E-Service may be connected.')
alaEServicePortVendorTpid = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 3), Integer32().clone(33024)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortVendorTpid.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortVendorTpid.setDescription('he TPID for this port if type is NNI. It is used for the incoming data traffic parsing and it is substituted to the 802.1Q standard Tpid for the outgoing data traffic. This is used for compatibility with other vendor equipment. The default value is the standard value 0x8100.')
alaEServicePortLegacyStpBpdu = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notApplicable", 0), ("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortLegacyStpBpdu.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortLegacyStpBpdu.setDescription('The legacy STP BPDU treatment for this port if NNI. It defines the type of processing applied to STP legacy BPDUs on network ports. Legacy BPDU refer to conventional/customer BPDUs with MAC address 01:80:c2:00:00:00 and its processing on network ports can be enabled/disabled by this object.By default the value is disabled i.e provider MAC BPDU with MAC address 01:80:c2:00:00:08 would be processed at network ports.')
alaEServicePortLegacyGvrpPdu = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notApplicable", 0), ("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortLegacyGvrpPdu.setStatus('deprecated')
if mibBuilder.loadTexts: alaEServicePortLegacyGvrpPdu.setDescription('The legacy GVRP PDU treatment for this port if NNI. It defines the type of processing applied to GVRP PDUs on network ports. ')
alaEServicePortUniProfile = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 6), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortUniProfile.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortUniProfile.setDescription('The label of an existing UNI profile that which contains various properties to be applied to this port if UNI.')
alaEServicePortTransBridging = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortTransBridging.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortTransBridging.setDescription('The Transparent Bridging status for the nni Port.')
alaEServicePortLegacyMvrpPdu = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("notApplicable", 0), ("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortLegacyMvrpPdu.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortLegacyMvrpPdu.setDescription('The legacy MVRP PDU treatment for this port if NNI. It defines the type of processing applied to MVRP PDUs on network ports. ')
alaEServicePortRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 7, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServicePortRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortRowStatus.setDescription('The status of this table entry. The supported value for set are createAndGo (4) and destroy(6), to add or remove a binding')
alaEServiceSapUniTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 8), )
if mibBuilder.loadTexts: alaEServiceSapUniTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapUniTable.setDescription('A table that contains the UNI that are bound to each SAP for classifying traffic into each EService. Not that writing to this table may create a new UNI.')
alaEServiceSapUniEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 8, 1), ).setIndexNames((0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapUniSap"), (0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapUniUni"))
if mibBuilder.loadTexts: alaEServiceSapUniEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapUniEntry.setDescription('The list of SAP-UNI bindings being used by EService.')
alaEServiceSapUniSap = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: alaEServiceSapUniSap.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapUniSap.setDescription('The SAP ID that is configured onto this port.')
alaEServiceSapUniUni = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 8, 1, 2), InterfaceIndex())
if mibBuilder.loadTexts: alaEServiceSapUniUni.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapUniUni.setDescription('The IfIndex of this UNI Port.')
alaEServiceSapUniRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 8, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceSapUniRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapUniRowStatus.setDescription('The status of this table entry. The supported value for set are createAndGo (4) and destroy(6), to add or remove a binding')
alaEServiceNniSvlanTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 9), )
if mibBuilder.loadTexts: alaEServiceNniSvlanTable.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanTable.setDescription('A table that contains the SVLANs bound to each NNI for use by the EService feature.')
alaEServiceNniSvlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 9, 1), ).setIndexNames((0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceNniSvlanNni"), (0, "ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceNniSvlanSvlan"))
if mibBuilder.loadTexts: alaEServiceNniSvlanEntry.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanEntry.setDescription('The list of NNI-SVLAN bindings being used by EService.')
alaEServiceNniSvlanNni = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 9, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: alaEServiceNniSvlanNni.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanNni.setDescription('The IfIndex of this NNI Port.')
alaEServiceNniSvlanSvlan = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 9, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 4094)))
if mibBuilder.loadTexts: alaEServiceNniSvlanSvlan.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanSvlan.setDescription('The SVLAN bound to this port. SVLAN cannot be 1.')
alaEServiceNniSvlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 9, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceNniSvlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanRowStatus.setDescription('The status of this table entry. The supported value for set are createAndGo (4) and destroy(6), to add or remove a binding')
alaEServiceNniSvlanVpaType = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 1, 1, 9, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("stp", 1), ("erp", 2))).clone('stp')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: alaEServiceNniSvlanVpaType.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanVpaType.setDescription('The object is used to specify whether the VPA state is to be controlled by an ERP or a STP. By default VPA state is controlled by STP.')
alcatelIND1EServiceMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 2, 1)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapUniGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapCvlanGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceNniSvlanGroup"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceInfoGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alcatelIND1EServiceMIBCompliance = alcatelIND1EServiceMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: alcatelIND1EServiceMIBCompliance.setDescription('Compliance statement for E-Service.')
alaEServiceSapProfileGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 1)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileCVLANTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileReplacementCVLAN"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfilePriorityMapMode"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileFixedPriority"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileIngressBW"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileBandwidthShare"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileRowStatus"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfileEgressBW"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceSapProfileGroup = alaEServiceSapProfileGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapProfileGroup.setDescription('Collection of objects for management of E-Service Sap Profiles.')
alaEServiceUNIProfileGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 2)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileStpBpduTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfile8021xTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfile8021ABTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfile8023adTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileGvrpTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileAmapTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileMvrpTreatment"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceUNIProfileRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceUNIProfileGroup = alaEServiceUNIProfileGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceUNIProfileGroup.setDescription('Collection of objects for management of EService UNI Profiles.')
alaEServiceGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 3)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSVLAN"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceVlanType"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceGroup = alaEServiceGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceGroup.setDescription('Collection of objects for management of E-Services.')
alaEServiceSapGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 4)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapServiceID"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapProfile"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceSapGroup = alaEServiceSapGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapGroup.setDescription('Collection of objects for management of E-Service SAPs.')
alaEServiceSapCvlanGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 5)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapCvlanMapType"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapCvlanRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceSapCvlanGroup = alaEServiceSapCvlanGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapCvlanGroup.setDescription('Collection of objects for management of E-Service SAP CVLAN bindings.')
alaEServicePortGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 6)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortType"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortVendorTpid"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortLegacyStpBpdu"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortLegacyGvrpPdu"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortUniProfile"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortTransBridging"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortLegacyMvrpPdu"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServicePortRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServicePortGroup = alaEServicePortGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServicePortGroup.setDescription('Collection of objects for management of E-Service Ports.')
alaEServiceSapUniGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 7)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceSapUniRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceSapUniGroup = alaEServiceSapUniGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceSapUniGroup.setDescription('Collection of objects for management of E-Service SAP to UNI Binding.')
alaEServiceNniSvlanGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 8)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceNniSvlanRowStatus"), ("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceNniSvlanVpaType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceNniSvlanGroup = alaEServiceNniSvlanGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceNniSvlanGroup.setDescription('Collection of objects for management of E-Service SVLAN to NNI Binding.')
alaEServiceInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 50, 1, 2, 1, 9)).setObjects(("ALCATEL-IND1-E-SERVICE-MIB", "alaEServiceMode"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alaEServiceInfoGroup = alaEServiceInfoGroup.setStatus('current')
if mibBuilder.loadTexts: alaEServiceInfoGroup.setDescription('Collection of objects for management of E-Service Info Binding.')
mibBuilder.exportSymbols("ALCATEL-IND1-E-SERVICE-MIB", alaEServiceUNIProfile8021xTreatment=alaEServiceUNIProfile8021xTreatment, alaEServiceNniSvlanEntry=alaEServiceNniSvlanEntry, alaEServiceUNIProfileGroup=alaEServiceUNIProfileGroup, alaEServicePortTable=alaEServicePortTable, alaEServiceUNIProfileAmapTreatment=alaEServiceUNIProfileAmapTreatment, alaEServiceTable=alaEServiceTable, alcatelIND1EServiceMIB=alcatelIND1EServiceMIB, alaEServiceSapCvlanCvlan=alaEServiceSapCvlanCvlan, alcatelIND1EServiceMIBCompliance=alcatelIND1EServiceMIBCompliance, alaEServiceUNIProfileTable=alaEServiceUNIProfileTable, alaEServicePortType=alaEServicePortType, alaEServiceUNIProfileMvrpTreatment=alaEServiceUNIProfileMvrpTreatment, alaEServiceNniSvlanVpaType=alaEServiceNniSvlanVpaType, alaEServiceInfo=alaEServiceInfo, alaEServicePortUniProfile=alaEServicePortUniProfile, alaEServiceSapProfileReplacementCVLAN=alaEServiceSapProfileReplacementCVLAN, alaEServiceSapProfileTable=alaEServiceSapProfileTable, alaEServiceSapProfileID=alaEServiceSapProfileID, alaEServiceSapEntry=alaEServiceSapEntry, alaEServiceUNIProfileStpBpduTreatment=alaEServiceUNIProfileStpBpduTreatment, alaEServicePortTransBridging=alaEServicePortTransBridging, alaEServicePortEntry=alaEServicePortEntry, alaEServiceSapUniSap=alaEServiceSapUniSap, alaEServiceSapProfileGroup=alaEServiceSapProfileGroup, alaEServiceSapProfileCVLANTreatment=alaEServiceSapProfileCVLANTreatment, alaEServiceSapProfileRowStatus=alaEServiceSapProfileRowStatus, alaEServiceRowStatus=alaEServiceRowStatus, alaEServiceSapProfileEgressBW=alaEServiceSapProfileEgressBW, alaEServicePortLegacyStpBpdu=alaEServicePortLegacyStpBpdu, alaEServiceSapRowStatus=alaEServiceSapRowStatus, alaEServiceUNIProfile8021ABTreatment=alaEServiceUNIProfile8021ABTreatment, alaEServiceSapCvlanGroup=alaEServiceSapCvlanGroup, alaEServiceSapUniEntry=alaEServiceSapUniEntry, alaEServicePortRowStatus=alaEServicePortRowStatus, alaEServiceNniSvlanRowStatus=alaEServiceNniSvlanRowStatus, alaEServiceSapTable=alaEServiceSapTable, alaEServiceNniSvlanSvlan=alaEServiceNniSvlanSvlan, alcatelIND1EServiceMIBGroups=alcatelIND1EServiceMIBGroups, alcatelIND1EServiceMIBConformance=alcatelIND1EServiceMIBConformance, alaEServiceUNIProfileID=alaEServiceUNIProfileID, alaEServiceSapProfilePriorityMapMode=alaEServiceSapProfilePriorityMapMode, alaEServiceSapServiceID=alaEServiceSapServiceID, alaEServiceID=alaEServiceID, alcatelIND1eServiceMIBObjects=alcatelIND1eServiceMIBObjects, alaEServiceSapUniTable=alaEServiceSapUniTable, alaEServiceNniSvlanGroup=alaEServiceNniSvlanGroup, AlaEServiceUNIProfileProtocolTreatment=AlaEServiceUNIProfileProtocolTreatment, alaEServiceSapProfileIngressBW=alaEServiceSapProfileIngressBW, alaEServiceVlanType=alaEServiceVlanType, alaEServiceUNIProfileEntry=alaEServiceUNIProfileEntry, alaEServiceSapID=alaEServiceSapID, alaEServiceSapProfileEntry=alaEServiceSapProfileEntry, alaEServiceSapProfileFixedPriority=alaEServiceSapProfileFixedPriority, alaEService=alaEService, alaEServiceSapCvlanRowStatus=alaEServiceSapCvlanRowStatus, alaEServicePortGroup=alaEServicePortGroup, alaEServiceInfoGroup=alaEServiceInfoGroup, alaEServiceEntry=alaEServiceEntry, alaEServiceSVLAN=alaEServiceSVLAN, alaEServiceMode=alaEServiceMode, alaEServiceSapUniGroup=alaEServiceSapUniGroup, alaEServiceSapUniUni=alaEServiceSapUniUni, alaEServiceNniSvlanTable=alaEServiceNniSvlanTable, alaEServiceSapProfile=alaEServiceSapProfile, alaEServiceUNIProfileRowStatus=alaEServiceUNIProfileRowStatus, alaEServicePortVendorTpid=alaEServicePortVendorTpid, alaEServicePortLegacyGvrpPdu=alaEServicePortLegacyGvrpPdu, alaEServiceSapCvlanEntry=alaEServiceSapCvlanEntry, alaEServicePortID=alaEServicePortID, alaEServiceSapGroup=alaEServiceSapGroup, alaEServicePortLegacyMvrpPdu=alaEServicePortLegacyMvrpPdu, alaEServiceUNIProfile8023adTreatment=alaEServiceUNIProfile8023adTreatment, alaEServiceSapProfileBandwidthShare=alaEServiceSapProfileBandwidthShare, PYSNMP_MODULE_ID=alcatelIND1EServiceMIB, alaEServiceNniSvlanNni=alaEServiceNniSvlanNni, alaEServiceSapCvlanSapID=alaEServiceSapCvlanSapID, alaEServiceGroup=alaEServiceGroup, alaEServiceSapUniRowStatus=alaEServiceSapUniRowStatus, alaEServiceSapCvlanMapType=alaEServiceSapCvlanMapType, alaEServiceSapCvlanTable=alaEServiceSapCvlanTable, alcatelIND1EServiceMIBCompliances=alcatelIND1EServiceMIBCompliances, alaEServiceUNIProfileGvrpTreatment=alaEServiceUNIProfileGvrpTreatment)
| [
"[email protected]"
] | |
3475609803c5fec24d9602e8f2f214ff2e1146fa | 0c66e605e6e4129b09ea14dbb6aa353d18aaa027 | /diventi/products/migrations/0028_auto_20200119_1557.py | e26102391438dd63340bedc439d85503f7d4b02e | [
"Apache-2.0"
] | permissive | flavoi/diventi | 58fbc8c947f387cbcc1ce607878a59a6f2b72313 | c0b1efe2baa3ff816d6ee9a8e86623f297973ded | refs/heads/master | 2023-07-20T09:32:35.897661 | 2023-07-11T19:44:26 | 2023-07-11T19:44:26 | 102,959,477 | 2 | 1 | Apache-2.0 | 2023-02-08T01:03:17 | 2017-09-09T14:10:51 | Python | UTF-8 | Python | false | false | 521 | py | # Generated by Django 2.2.8 on 2020-01-19 14:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0027_auto_20191217_0738'),
]
operations = [
migrations.AlterField(
model_name='product',
name='price',
field=models.PositiveIntegerField(default=0, help_text='This price must be valued in euro cents. For example: 500 for 5.00€, 120 for 1.20€ etc.', verbose_name='price'),
),
]
| [
"[email protected]"
] | |
6a31dadc459e9cff086a628b3036733ed01e6692 | d4f28073663e228e8bd119a70d17a8a21fc849c9 | /algorithms/libHIN/dataStructures.py | 4fda0163757175cd407c888d0454542bb85622da | [] | no_license | wsgan001/embedding_graph | fe81fa6cd81265a1b371d5de0dc4889bf7572763 | 93b49015dd2610e4348b2f7e3dc90405bd803c36 | refs/heads/master | 2021-08-27T18:54:47.803771 | 2017-11-23T09:36:05 | 2017-11-23T09:36:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,748 | py | ## core data structures
import networkx as nx
import numpy as np
import scipy.sparse as sp
from .decomposition import get_calculation_method
class Class:
def __init__(self, lab_id, name, members):
self.name = name
self.id = lab_id
self.index = -1
self.members = members # ids of members calculated using hierarchy of labels
self.member_indices = []
self.train_indices = [] # indices of training instances with this label
self.validate_indices = [] # indices of validate instances with this label
self.test_indices = [] # indices of test instances with this label
self.train_members = set() # ids of train members (intersection of basic_members and the train set)
self.test_members = set() # ids of test members (intersection of basic_members and the test set)
self.validate_members = set() # ids of validate members (intersection of basic_members and the validate set)
self.not_test_members = set()
def __repr__(self):
return self.name
def __str__(self):
return self.name
class HeterogeneousInformationNetwork:
def __init__(self, network, label_delimiter, weight_tag=False, target_tag=True):
self.label_list = [] # list of labels.
self.labels_by_id = {} # IDs of each label
self.graph = network
self.target_tag = target_tag
self.node_list = [] # List of all nodes in decomposition
self.node_indices = {} # Indices of all nodes in decomposition
self.basic_type = None # Basic node type (for decomposition)
self.label_array = None
self.label_matrix = None
self.train_indices = []
self.validate_indices = []
self.test_indices = []
self.train_ids = set()
self.validate_ids = set()
self.test_ids = set()
self.weighted = weight_tag ## include info on weighted edges
self.decomposed = {} # Dictionary of all performed decompositions (self.decomposed['PAP'] is one)
self.pairs = {}
self.midpoints = {}
self.validate_pprs = {}
self.test_pprs = {}
self.train_pprs = {}
self.validate_distances = {}
self.test_distances = {}
self.midpoint_files = {}
self.feature_vectors = {}
if network != None:
self.process_network(label_delimiter)
def add_label(self, node, label_id, label_name=None):
if label_name is None:
label_name = str(label_id)
if label_id in self.labels_by_id:
if self.labels_by_id[label_id] not in self.graph.node[node]['labels']:
self.graph.node[node]['labels'].append(self.labels_by_id[label_id])
self.labels_by_id[label_id].members.append(node)
else:
new_class = Class(label_id, label_name, [node])
self.label_list.append(new_class)
self.labels_by_id[label_id] = new_class
new_class.index = len(self.label_list) - 1
self.graph.node[node]['labels'].append(new_class)
def process_network(self, label_delimiter):
if self.target_tag:
basic_types = set([self.graph.node[x]['type'] for x in self.graph.node if 'labels' in self.graph.node[x]])
if len(basic_types) != 1:
## tukej naredi, da enostavno sejvne grafek, to je uporabno za embedding
raise Exception('Unclear target type!')
self.basic_type = basic_types.pop()
self.node_list = [x for x in self.graph.node if self.graph.node[x]['type'] == self.basic_type]
try:
self.node_list.sort(key=lambda x: float(x))
except ValueError:
self.node_list.sort()
self.node_indices = dict([(item, index) for index, item in enumerate(self.node_list)])
for node_id in self.node_list:
if len(self.graph.node[node_id]['labels']) > 0:
labels = self.graph.node[node_id]['labels'].split(label_delimiter)
self.graph.node[node_id]['labels'] = []
for label in labels:
self.add_label(node_id, label, label_name=label)
for lab in self.label_list:
if lab is not None:
temp_list = [mem for mem in lab.members if self.graph.node[mem]['type'] == self.basic_type]
lab.basic_members = set(temp_list)
self.label_array = - np.ones((max([len(self.graph.node[node]['labels']) for node in self.node_list]), len(self.node_list)))
for node in self.node_list:
tmp = self.graph.node[node]['labels']
self.label_array[:len(tmp), self.node_indices[node]] = [label.index for label in tmp]
self.create_label_matrix()
def create_label_matrix(self, weights=None):
self.label_matrix = np.zeros((len(self.node_list), len(self.label_list)))
for i, label in enumerate(self.label_list):
member_indices = [self.node_indices[x] for x in label.members]
if weights == 'balanced':
self.label_matrix[member_indices, i] = 1.0 / max(len(label.train_indices), 1)
else:
self.label_matrix[member_indices, i] = 1
def calculate_schema(self):
schema = nx.MultiDiGraph()
for node_start in self.graph.node:
for node_end in self.graph[node_start]:
for key in self.graph[node_start][node_end]:
start_type = self.graph.node[node_start]['type']
end_type = self.graph.node[node_end]['type']
edge_type = self.graph[node_start][node_end][key]['type']
has_type = False
if schema.has_edge(start_type, end_type):
for key in schema[start_type][end_type]:
if schema[start_type][end_type][key]['type'] == edge_type:
has_type = True
break
# if schema[start_type][end_type]['type'] != edge_type:
# raise Exception('Multiple edge types between equal node types are not supported!')
if not has_type:
schema.add_edge(start_type, end_type, type=edge_type)
return schema
def calculate_decomposition_candidates(self, max_decomposition_length=10):
schema = self.calculate_schema()
under_construction = [{'node_list': [self.basic_type], 'edge_list': []}]
candidate_lists = []
for i in range(max_decomposition_length - 1):
next_gens = []
for list_so_far in under_construction:
if list_so_far['node_list'][-1] != self.basic_type or len(list_so_far['node_list']) == 1:
current = list_so_far['node_list'][-1]
for neighbor in schema[current]:
if neighbor == self.basic_type:
append_to = candidate_lists
else:
append_to = next_gens
for key in schema[current][neighbor]:
append_to.append({
'node_list': list_so_far['node_list'] + [neighbor],
'edge_list': list_so_far['edge_list'] + [schema[current][neighbor][key]['type']]
})
under_construction = next_gens
return candidate_lists
def split_to_indices(self, train_indices=(), validate_indices=(), test_indices=()):
self.train_indices = train_indices
self.validate_indices = validate_indices
self.test_indices = test_indices
self.train_ids = set([self.node_list[i] for i in self.train_indices])
self.validate_ids = set([self.node_list[i] for i in self.validate_indices])
self.test_ids = set([self.node_list[i] for i in self.test_indices])
# calculate test representatives:
for train_index in self.train_indices:
train_node = self.node_list[train_index]
for label in self.graph.node[train_node]['labels']:
label.train_indices.append(train_index)
label.train_members.add(self.node_list[train_index])
label.not_test_members.add(self.node_list[train_index])
for validate_index in self.validate_indices:
validate_node = self.node_list[validate_index]
for label in self.graph.node[validate_node]['labels']:
label.validate_indices.append(validate_index)
label.validate_members.add(self.node_list[validate_index])
label.not_test_members.add(self.node_list[validate_index])
for test_index in self.test_indices:
test_node = self.node_list[test_index]
for label in self.graph.node[test_node]['labels']:
label.test_indices.append(test_index)
label.test_members.add(self.node_list[test_index])
for label in self.label_list:
label.not_test_members_num = len(label.not_test_members)
def split_to_parts(self,lst,n):
return [lst[i::n] for i in range(n)]
def decompose_from_iterator(self, name, weighing, summing ,generator=None, degrees=None, parallel=True,pool=None):
classes = [lab for lab in self.label_list if lab and len(lab.not_test_members) > 0]
universal_set = list(set(self.train_ids).union(self.validate_ids))
universal_inv = {}
for i, item in enumerate(universal_set):
universal_inv[item] = i
universal_set = set(universal_set)
label_matrix = np.zeros((len(universal_set), len(classes)))
for i, label in enumerate(classes):
label_matrix[[universal_inv[item] for item in label.not_test_members], i] = 1
nn = len(self.node_list)
matrix = sp.csr_matrix((nn, nn))
n = len(universal_set)
importance_calculator = get_calculation_method(weighing)
if generator is None:
raise Exception('No midpoint generator!')
avgdegree = None
if weighing != 'okapi':
degrees = None
avgdegree = None
if degrees is not None:
avgdegree = sum(degrees.values()) * 1.0 / len(degrees)
i=0
tmp_container = []
bsize = 5
if parallel:
## parallel for edge type
while True:
tmp_container = list(next(generator) for _ in range(bsize))
if len(tmp_container) == 0:
break
pinput = []
for j in tmp_container:
pinput.append((classes,universal_set,j,n))
results = pool.starmap(importance_calculator,pinput)
## construct main matrix
for item, importances in zip(tmp_container, results):
importance = np.sum(importances, axis=0)
i1 = [self.node_indices[x] for x in item]
i2 = [[x] for x in i1]
to_add = sp.csr_matrix((nn, nn))
if len(i1) > 1000:
## split to prevent memory leaks when doing hadamand products
parts_first = self.split_to_parts(i1,4)
parts_second = self.split_to_parts(i2,4)
for x in range(len(parts_first)):
to_add[parts_first[x], parts_second[x]] = importance
else:
to_add[i2, i1] = importance
to_add = to_add.tocsr()
matrix += to_add
else:
## non-parallel
for item in generator:
## to za vsak class poracun importance
importances = importance_calculator(classes, universal_set, item, n, degrees=degrees, avgdegree=avgdegree)
importance = np.sum(importances, axis=0)
i1 = [self.node_indices[x] for x in item]
i2 = [[x] for x in i1]
to_add = sp.csr_matrix((nn, nn))
to_add[i2, i1] = importance
to_add = to_add.tocsr() # this prevents memory leaks
matrix += to_add
## hadamand product
self.decomposed[name] = matrix
def midpoint_generator(self, node_sequence, edge_sequence):
if len(node_sequence) % 2 == 0:
raise Exception('In a split of length %i, a midpoint is not well defined!' % len(node_sequence))
middle_type = node_sequence[int(len(node_sequence) / 2)]
# forward_sequence = %TODO: INVERSE SEQUENCES!!!!!!!!!
for node in self.graph:
if self.graph.node[node]['type'] == middle_type:
points = [node]
i = int(len(node_sequence)/2 + 1)
while i < len(node_sequence):
current_type = node_sequence[i]
new_points = []
for point in points:
new_points += [x for x in self.graph[point] if self.graph.node[x]['type'] == current_type]
points = new_points
i += 1
if len(points) > 1:
yield points
| [
"[email protected]"
] | |
5ae8895b70d3c766d80a1f22a634ad71a70d012e | ab1d0fcd4900e0a88d49999cbbde4b06cc441e5d | /Labs/Lab 5/lab05_soln/raytracer_main.py | 9cd89a7bb62e8dba71c76dd33c177a47aecd373e | [] | no_license | ThomasMGilman/ETGG1803_ConceptsOf3DGraphicsAndMath | bf261b7ce16bb686e42b1a2600aa97b4f8984b65 | fdf4e216b117769246154cd360b2c321f4581354 | refs/heads/master | 2020-03-29T23:14:05.715926 | 2018-09-26T17:18:25 | 2018-09-26T17:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,643 | py | import raytracer
import objects3d
import time
import pygame
import math3d
caseNum = 2
# Pygame setup
if caseNum == 1:
win_width = 700; win_height = 150;
elif caseNum == 2:
win_width = 800; win_height = 600;
else:
win_width = 300; win_height = 200;
pygame.display.init()
screen = pygame.display.set_mode((win_width, win_height))
clock = pygame.time.Clock()
done = False
# Raytracer setup
if caseNum == 1:
cameraPos = math3d.VectorN(0, 0, -20)
cameraUp = math3d.VectorN(0, 1, 0)
cameraCoi = math3d.VectorN(0, 0, 0)
cameraNear = 3.2
cameraFov = 45.0
elif caseNum == 2:
cameraPos = math3d.VectorN(5, 7, -20)
cameraUp = math3d.VectorN(1, 10, 0).normalized()
cameraCoi = math3d.VectorN(2, 5, 3)
cameraNear = 1.5
cameraFov = 60.0
elif caseNum == 3:
cameraPos = math3d.VectorN(-5, 7, -30)
cameraUp = math3d.VectorN(0, 1, 0)
cameraCoi = math3d.VectorN(2, 5, 3)
cameraNear = 1.5
cameraFov = 60.0
camera = objects3d.Camera(cameraPos, cameraCoi, cameraUp, screen, cameraFov, cameraNear, True)
sphere1 = objects3d.Sphere(math3d.VectorN(2,5,3), 7.0, math3d.VectorN(1,0,0))
plane1 = objects3d.Plane(math3d.VectorN(0,1,0), 5.0, math3d.VectorN(0,1,0))
plane2 = objects3d.Plane(math3d.VectorN(0.1,1,0), 4.0, math3d.VectorN(0,0,1))
box1 = objects3d.AABB(math3d.VectorN(2, 9, -6), math3d.VectorN(8, 15, 0), math3d.VectorN(1,1,0))
#mesh1 = objects3d.Polymesh("sword.obj", math3d.VectorN(-10,8,3), 1.0, math3d.VectorN(1.0,0.3,0.8))
rt = raytracer.Raytracer(camera)
rt.addObject(sphere1)
rt.addObject(plane1)
rt.addObject(plane2)
rt.addObject(box1)
#rt.addObject(mesh1)
totalTime = 0.0
currentLine = 0
print("\n+==============================================+")
print("| PHASE II tests |")
print("+==============================================+")
if caseNum == 1:
testPts = [(0, 0), (win_width - 1, win_height - 1), (win_width // 2, win_height // 2), (113, 23), (623,83)]
else:
testPts = [(0, 0), (win_width - 1, win_height - 1), (win_width // 2, win_height // 2), (113, 542), (723,11)]
for pygamePos in testPts:
camera.getViewplanePosition(pygamePos[0], pygamePos[1], True)
# Game Loop
while not done:
# Update
if currentLine < win_height:
rt.renderOneLine(currentLine)
currentLine += 1
dt = clock.tick()
totalTime += dt
# Input
event = pygame.event.poll()
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
done = True
# Draw (nothing to do!)
pygame.display.flip()
# Pygame shutdown
pygame.display.quit()
| [
"[email protected]"
] | |
c0c758ec3f45045fd732d1505955fd973d3253de | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc036/D/4119191.py | 5214b136ffa3fbda10cfeb4ddda4f643d5080a9d | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | import sys
stdin = sys.stdin
sys.setrecursionlimit(10**5)
def li(): return map(int, stdin.readline().split())
def li_(): return map(lambda x: int(x)-1, stdin.readline().split())
def lf(): return map(float, stdin.readline().split())
def ls(): return stdin.readline().split()
def ns(): return stdin.readline().rstrip()
def lc(): return list(ns())
def ni(): return int(stdin.readline())
def nf(): return float(stdin.readline())
def dfs(graph:list, par:int, cur:int, mod:int):
children = []
for child in graph[cur]:
if child == par:
continue
children.append(child)
if len(children) == 0:
return 2, 1
else:
topall = 1
topwht = 1
for child in children:
topallchild, topwhtchild = dfs(graph, cur, child, mod)
topwht *= topallchild
topwht %= mod
topall *= topwhtchild
topall %= mod
return (topall+topwht)%mod, topwht
n = ni()
graph = [[] for _ in range(n)]
MOD = 10**9+7
for _ in range(n-1):
a,b = li_()
graph[a].append(b)
graph[b].append(a)
ans, _ = dfs(graph, 0, 0, MOD)
print(ans) | [
"[email protected]"
] | |
2a6b93697a823699f907bd04a3d16ae2b742d3dd | 8b683dd48ad3021990ca5133ec24a1ab260b687c | /worm_plates/collect/refine_coords.py | c86eb3c3b422cbf802411536855a272433f692d0 | [] | no_license | ver228/worm-eggs | fd4afa13cba12f6553c0e8225fb591d9ea3806f1 | 0b2db08d9d81c3b31d9ebcd593059db02b3ee2fe | refs/heads/master | 2022-04-01T06:29:56.358944 | 2020-02-14T15:55:39 | 2020-02-14T15:55:39 | 240,544,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,710 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 16:22:37 2019
@author: avelinojaver
"""
from pathlib import Path
import pandas as pd
import tables
import tqdm
import cv2
import numpy as np
from skimage.feature import peak_local_max
from scipy.spatial.distance import cdist
from scipy.optimize import linear_sum_assignment
#%%
def correct_coords(img_, coords_, min_distance = 1, max_dist = 5):
#%%
peaks = peak_local_max(img_, min_distance = min_distance)
peaks = peaks[:, ::-1]
#remove `peaks` that is not close by to any `coord` by at most `max_dist`
D = cdist(coords_, peaks)
#peaks with an intensity smaller than the coords intensities will be spurious
peaks_ints = img_[peaks[:, 1], peaks[:, 0]]
cc = coords_.astype(np.int)
coords_int = img_[cc[:, 1], cc[:, 0]]
good = (D <= max_dist).any(axis=0)
good &= peaks_ints >= coords_int.min()
D = D[:, good]
valid_peaks = peaks[good]
#find the closest peaks
closest_indexes = np.argmin(D, axis=1)
#we will consider as an easy assigment if the closest peak is assigned to only one coord
u_indexes = np.unique(closest_indexes)
counts = np.bincount(closest_indexes)[u_indexes]
easy_assigments = u_indexes[counts == 1]
valid_pairs = [(ii, x) for ii, x in enumerate(closest_indexes) if x in easy_assigments]
if len(valid_pairs) > 0:
easy_rows, easy_cols = map(np.array, zip(*valid_pairs))
easy_cost = D[easy_rows, easy_cols]
good = easy_cost<max_dist
easy_rows = easy_rows[good]
easy_cols = easy_cols[good]
assert (D[easy_rows, easy_cols] <= max_dist).all()
#now hard assigments are if a peak is assigned to more than one peak
ambigous_rows = np.ones(D.shape[0], np.bool)
ambigous_rows[easy_rows] = False
ambigous_rows, = np.where(ambigous_rows)
ambigous_cols = np.ones(D.shape[1], np.bool)
ambigous_cols[easy_cols] = False
ambigous_cols, = np.where(ambigous_cols)
else:
ambigous_rows = np.arange(D.shape[0])
ambigous_cols = np.arange(D.shape[1])
easy_rows = np.array([], dtype=np.int)
easy_cols = np.array([], dtype=np.int)
D_r = D[ambigous_rows][:, ambigous_cols]
good = (D_r <= max_dist).any(axis=0)
D_r = D_r[:, good]
ambigous_cols = ambigous_cols[good]
#for this one we use the hungarian algorithm for the assigment. This assigment is to slow over the whole matrix
ri, ci = linear_sum_assignment(D_r)
hard_rows, hard_cols = ambigous_rows[ri], ambigous_cols[ci]
assert (D_r[ri, ci] == D[hard_rows, hard_cols]).all()
hard_cost = D[hard_rows, hard_cols]
good = hard_cost<max_dist
hard_rows = hard_rows[good]
hard_cols = hard_cols[good]
#let's combine both and assign the corresponding peak
rows = np.concatenate((easy_rows, hard_rows))
cols = np.concatenate((easy_cols, hard_cols))
new_coords = coords_.copy()
new_coords[rows] = valid_peaks[cols] #coords that do not satisfy the close peak condition will not be changed
return new_coords
#%%
if __name__ == '__main__':
_debug = False
min_distance = 2
max_dist = 5
r = max_dist*2+1
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(r, r))
src_root_dir = Path.home() / 'workspace/localization/data/worm_eggs_adam/'
dst_root_dir = Path.home() / 'workspace/localization/data/worm_eggs_adam_refined/'
src_files = [x for x in src_root_dir.rglob('*.hdf5') if not x.name.startswith('.')]
for src_file in tqdm.tqdm(src_files):
with pd.HDFStore(src_file, 'r') as fid:
df = fid['/coords']
img = fid.get_node('/img')[:]
#%%
#create a mask using the known coordinates
valid_mask = np.zeros_like(img)
cols = df['cx'].astype(np.int)
rows = df['cy'].astype(np.int)
valid_mask[rows, cols] = 1
valid_mask = cv2.dilate(valid_mask, kernel) > 0
#then I will use the inverted maxima to to create local maxima corresponding to the refined eggs peaks
img_peaks = ~img
img_peaks -= img_peaks[valid_mask].min()
img_peaks[~valid_mask] = 0
#img_peaks = cv2.blur(img_peaks, (1,1))
#%%
#finaly use the correct coords function to assing each labelled coords to a local maxima
cc = df[['cx','cy']].values
new_coords = correct_coords(img_peaks, cc, min_distance, max_dist)
coords = pd.DataFrame({'type_id':1, 'cx':new_coords[:,0], 'cy':new_coords[:,1]})
coords = coords.to_records(index=False)
dst_file = str(src_file).replace(str(src_root_dir), str(dst_root_dir))
dst_file = Path(dst_file)
dst_file.parent.mkdir(exist_ok=True, parents=True)
with tables.File(str(dst_file), 'w') as fid:
fid.create_carray('/', 'img', obj = img)
fid.create_table('/', 'coords', obj = coords)
#%%
if _debug:
#%%
import matplotlib.pylab as plt
fig, axs = plt.subplots(1, 2, sharex=True, sharey=True)
axs[0].imshow(img, cmap = 'gray')
axs[1].imshow(img_peaks, cmap = 'gray')
for ax in axs:
ax.plot(df['cx'], df['cy'], '.r')
ax.plot(coords['cx'], coords['cy'], '.g')
plt.show()
#%%
break
| [
"[email protected]"
] | |
f58c19c5218fc279438b07e3ca1976d176013a3a | 2868a3f3bca36328b4fcff5cce92f8adeb25b033 | /+100ns/Co_optimized/step1_dc/set.py | 25b40663a2257d720ef9bd0d368b0791db804c94 | [] | no_license | linfranksong/TM-enzyme_input | 1c2a5e12e69c48febd5b5900aa00fe2339d42298 | 6e46a5b2c451efb93761707b77917a98ca0bfedc | refs/heads/master | 2022-03-19T19:49:09.373397 | 2019-12-04T00:11:59 | 2019-12-04T00:11:59 | 205,220,795 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,131 | py | import os
dir = os.path.dirname(os.path.realpath(__file__)) + '/'
#for a in [150,200,250,300,350,400,450,500,550,600]:
for a in [150]:
#for a in [200,250,300,350,400,450,500,550,600]:
os.system("rm -r %s_dc_repe"%(a))
os.system("cp -r temp/ %s_dc_repe"%(a))
adir=dir+ "%s_dc_repe/"%(a)
os.chdir(adir)
os.system("sed -i 's/MMM/%s/g' */*pbs"%(a))
array= [0,0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078,1.0]
for n in range(1,len(array)-1):
i=array[n]
os.system("rm -r %s"%(i))
os.system("cp -r files %s"%(i))
wdir=adir+"%s/"%(i)
os.chdir(wdir)
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/NNN/%s/g' *.pbs"%(array[n+1]))
os.system("sed -i 's/PPP/%s/g' *.pbs"%(array[n-1]))
os.chdir(adir)
sdir=adir+"0/"
os.chdir(sdir)
i=0
os.system("cp /mnt/gs18/scratch/users/songlin3/run/glx-0904/+100ns/Co_optimized/step0_fep/%s_fep/1.0/%s_1.0_eq_center.rst ."%(a,a))
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("sbatch 0_eq.pbs")
sdir=adir+"1.0/"
os.chdir(sdir)
i=1.0
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("sed -i 's/MMM/%s/g' center.in"%(a))
os.chdir(dir)
| [
"[email protected]"
] | |
acd65c46ffa5dd3f4fa612a415887f694e67e27f | 9a6c5607ae6f6305f1427fe5ee37ab8a0aa9b710 | /0 Python Fundamental/25.c.filter.py | bf848a8ad7d8b4c95625bf195a090ed00fc3af2e | [] | no_license | raviitsoft/Python_Fundamental_DataScience | 3796b957751a6d9125452bcf2aa409e64d7c8d8a | 6f99fdd187646f0d28ffd4ddbe3ace4597c47967 | refs/heads/master | 2020-12-22T19:39:46.814043 | 2020-01-28T09:04:55 | 2020-01-28T09:04:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | ages = [5, 12, 17, 18, 24, 32]
def myFunc(x):
if x < 18:
return False
else:
return True
adults = filter(myFunc, ages)
# print(adults)
# print(list(adults))
#############################
z = filter(lambda a: True if a >= 18 else False, ages)
print(list(z))
z = filter(lambda a: a >= 18, ages)
print(list(z))
############################
x = [1, 2, 3, 4, 5, 99]
y = [1, 2, 6, 7, 8, 99]
z = list(filter(lambda a: a in x, y))
# print(z)
z = list(filter(lambda x: True if x<3 else False, x))
print(z)
z = list(filter(lambda x: x<3, x))
print(z) | [
"[email protected]"
] | |
643b2ad8db2c458d77f96dff2374d2efa0c66723 | a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea | /airflow/api_connexion/schemas/dag_warning_schema.py | 9531eb6b36bc3833a39d24bcef895f01444f9bb6 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ishiis/airflow | 4305794e36b611d01f49e3f2401be3dc49782670 | 292440d54f4db84aaf0c5a98cf5fcf34303f2fa8 | refs/heads/master | 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 | Apache-2.0 | 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null | UTF-8 | Python | false | false | 1,705 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models.dagwarning import DagWarning
class DagWarningSchema(SQLAlchemySchema):
"""Import error schema"""
class Meta:
"""Meta"""
model = DagWarning
dag_id = auto_field(data_key="dag_id", dump_only=True)
warning_type = auto_field()
message = auto_field()
timestamp = auto_field(format="iso")
class DagWarningCollection(NamedTuple):
"""List of dag warnings with metadata"""
dag_warnings: List[DagWarning]
total_entries: int
class DagWarningCollectionSchema(Schema):
"""Import error collection schema"""
dag_warnings = fields.List(fields.Nested(DagWarningSchema))
total_entries = fields.Int()
dag_warning_schema = DagWarningSchema()
dag_warning_collection_schema = DagWarningCollectionSchema()
| [
"[email protected]"
] | |
bf1e15e32502c294cb2398b0ca3de70499a04222 | 8b5d58fc22888d2fb051a3c59936659ca347043a | /NtupleAnalysis/src/Hplus2tbAnalysis/work/plotting/plotHistograms.py | 35ff6c7ca74f8f56b0ceca013609826b3d9da715 | [] | no_license | attikis/HplusHW | c54f4429dd48e99b7e597043fa6d442d7a3573ba | e62ce79c914c6b5bfd1faa44ff94356fb55fe561 | refs/heads/master | 2020-06-15T22:00:43.733407 | 2019-07-05T10:30:07 | 2019-07-05T10:30:07 | 195,402,507 | 1 | 0 | null | 2019-07-05T12:02:22 | 2019-07-05T12:02:22 | null | UTF-8 | Python | false | false | 9,600 | py | #!/usr/bin/env python
'''
Usage (single plot):
./plotHistograms.py -m <pseudo_mcrab_directory> <jsonfile>
Usage (multiple plots):
./plotHistograms.py -m <pseudo_mcrab_directory> json/AfterAllSelections/*.json
or
./plotHistograms.py -m <pseudo_mcrab_directory> json/AfterAllSelections/*.json json/AfterStandardSelections/*.json
Last Used:
./plotHistograms.py -m Hplus2tbAnalysis_161128_082955/ json/AfterAllSelections/BjetPt.json
or
./plotHistograms.py -m Hplus2tbAnalysis_161128_082955/ json/AfterAllSelections/*.json
or
./plotHistograms.py -m Hplus2tbAnalysis_161128_082955/ json/AfterAllSelections/*/*.json
'''
#================================================================================================
# Imports
#================================================================================================
import os
import sys
from optparse import OptionParser
import getpass
import socket
import json
import HiggsAnalysis.NtupleAnalysis.tools.dataset as dataset
import HiggsAnalysis.NtupleAnalysis.tools.tdrstyle as tdrstyle
import HiggsAnalysis.NtupleAnalysis.tools.styles as styles
import HiggsAnalysis.NtupleAnalysis.tools.plots as plots
import HiggsAnalysis.NtupleAnalysis.tools.histograms as histograms
import HiggsAnalysis.NtupleAnalysis.tools.aux as aux
import ROOT
#================================================================================================
# Main
#================================================================================================
def Print(msg, printHeader=False):
fName = __file__.split("/")[-1]
if printHeader==True:
print "=== ", fName
print "\t", msg
else:
print "\t", msg
return
def Verbose(msg, printHeader=True, verbose=False):
if not opts.verbose:
return
Print(msg, printHeader)
return
def GetLumi(datasetsMgr):
Verbose("Determininig Integrated Luminosity")
lumi = 0.0
for d in datasetsMgr.getAllDatasets():
if d.isMC():
continue
else:
lumi += d.getLuminosity()
Verbose("Luminosity = %s (pb)" % (lumi), True )
return lumi
def GetDatasetsFromDir(opts, json):
Verbose("Getting datasets")
if len(json["samples"])<1:
Print("No samples defined in the JSON file. Exit", True)
print __doc__
sys.exit()
else:
return dataset.getDatasetsFromMulticrabDirs([opts.mcrab],
dataEra=json["dataEra"],
searchMode=json["searchMode"],
analysisName=json["analysis"],
includeOnlyTasks="|".join(json["samples"]),
optimizationMode=json["optMode"])
def Plot(jsonfile, opts):
Verbose("Plotting")
with open(os.path.abspath(jsonfile)) as jfile:
j = json.load(jfile)
Print("Plotting %s in %s" % (j["title"], j["saveDir"]), True)
# Setup the style
style = tdrstyle.TDRStyle()
style.setGridX(j["gridX"]=="True")
style.setGridY(j["gridY"]=="True")
# Set ROOT batch mode boolean
ROOT.gROOT.SetBatch(opts.batchMode)
# Setup & configure the dataset manager
datasetsMgr = GetDatasetsFromDir(opts, j)
#datasetsMgr.loadLuminosities()
datasetsMgr.updateNAllEventsToPUWeighted()
if opts.verbose:
datasetsMgr.PrintCrossSections()
datasetsMgr.PrintLuminosities()
# Set/Overwrite cross-sections
for d in datasetsMgr.getAllDatasets():
if "ChargedHiggs" in d.getName():
datasetsMgr.getDataset(d.getName()).setCrossSection(1.0)
plots.mergeRenameReorderForDataMC(datasetsMgr)
# Print dataset information
datasetsMgr.PrintInfo()
# Get Integrated Luminosity
lumi = GetLumi(datasetsMgr)
# Plot the histogram
DataMCPlot(datasetsMgr, j)
return
def DataMCPlot(datasetsMgr, json):
Verbose("Creating Data-MC plot")
# Create the Data-MC Plot
p = plots.DataMCPlot(datasetsMgr, json["histogram"])
# Customise histograms before drawing (before being converted to TGraphs)
if "drawStyle" in json:
p.histoMgr.setHistoDrawStyleAll(json["drawStyle"])
if "rebinx" in json:
p.histoMgr.forEachHisto(lambda h: h.getRootHisto().RebinX(json["rebinX"]))
if "rebiny" in json:
if json["rebinY"] != "None":
p.histoMgr.forEachHisto(lambda h: h.getRootHisto().RebinY(json["rebinY"]))
# Label size (optional. Commonly Used in counters)
xlabelSize = None
if "xlabelsize" in json:
xlabelSize = json["xlabelsize"]
ylabelSize = None
if "ylabelsize" in json:
ylabelSize = json["ylabelsize"]
# Draw a customised plot
saveName = os.path.join(json["saveDir"], json["title"])
plots.drawPlot(p,
saveName,
xlabel = json["xlabel"],
ylabel = json["ylabel"],
rebinX = json["rebinX"],
rebinY = json["rebinY"],
ratioYlabel = json["ratioYlabel"],
ratio = json["ratio"]=="True",
stackMCHistograms = json["stackMCHistograms"]=="True",
ratioInvert = json["ratioInvert"]=="True",
addMCUncertainty = json["addMCUncertainty"]=="True",
addLuminosityText = json["addLuminosityText"]=="True",
addCmsText = json["addCmsText"]=="True",
cmsExtraText = json["cmsExtraText"],
opts = json["opts"],
opts2 = json["ratioOpts"],
log = json["logY"]=="True",
errorBarsX = json["errorBarsX"]=="True",
moveLegend = json["moveLegend"],
# cutLine = json["cutValue"], #cannot have this and "cutBox" defined
cutBox = {"cutValue": json["cutValue"], "fillColor": json["cutFillColour"], "box": json["cutBox"]=="True", "line": json["cutLine"]=="True", "greaterThan": json["cutGreaterThan"]=="True"},
xlabelsize = xlabelSize,
ylabelsize = ylabelSize,
)
# Remove legend?
if json["removeLegend"] == "True":
p.removeLegend()
# Additional text
histograms.addText(json["extraText"].get("x"), json["extraText"].get("y"), json["extraText"].get("text"), json["extraText"].get("size") )
# Save in all formats chosen by user
saveFormats = json["saveFormats"]
for i, ext in enumerate(saveFormats):
Print("%s" % saveName + ext, i==0)
p.saveAs(saveName, formats=saveFormats)
return
def main(opts):
Verbose("main function")
jsonFiles = []
# For-loop: All system script arguments
for arg in sys.argv[1:]:
# Skip if not a json file
if ".json" not in arg:
continue
# Sanity check - File exists
if not os.path.exists(arg):
Print("The JSON file \"%s\" does not seem to be a valid path.. Please check that the file exists. Exit" % (arg), True)
sys.exit()
# Load & append json file
with open(os.path.abspath(arg)) as jsonFile:
try:
json.load(jsonFile)
jsonFiles.append(arg)
except ValueError, e:
Print("Problem loading JSON file %s. Please check the file" % (arg))
sys.exit()
# Sanity check - At least 1 json file found
if len(jsonFiles) == 0:
Print("No JSON files found. Please read the script instructions. Exit", True)
print __doc__
sys.exit()
# For-loop: All json files
for j in jsonFiles:
Print("Processing JSON file \"%s\"" % (j), True)
Plot(j, opts)
return
#================================================================================================
# Main
#================================================================================================
if __name__ == "__main__":
# Default Settings
global opts
BATCHMODE = True
VERBOSE = False
parser = OptionParser(usage="Usage: %prog [options]" , add_help_option=False,conflict_handler="resolve")
parser.add_option("-m", "--mcrab", dest="mcrab", action="store",
help="Path to the multicrab directory for input")
parser.add_option("-b", "--batchMode", dest="batchMode", action="store_false", default=BATCHMODE,
help="Enables batch mode (canvas creation NOT generates a window) [default: %s]" % BATCHMODE)
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=VERBOSE,
help="Enables verbose mode (for debugging purposes) [default: %s]" % VERBOSE)
(opts, parseArgs) = parser.parse_args()
# Require at least two arguments (script-name, path to multicrab)
if opts.mcrab == None:
Print("Not enough arguments passed to script execution. Printing docstring & EXIT.")
print __doc__
sys.exit(0)
# Call the main function
main(opts)
if not opts.batchMode:
raw_input("=== plotHistograms.py: Press any key to quit ROOT ...")
| [
"[email protected]"
] | |
731c66717f6fccb33365c99d8aac3d158051db66 | d954e2f74d1186c8e35be8ea579656513d8d3b98 | /rllib/algorithms/algorithm.py | 9900c03202990821f5dfb9100ad1ead2f61353ee | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vakker/ray | a865de214e60f9e62d61c03ae7ce55ad6030f84c | de238dd626a48a16c8b3cd006f3482db75f63a83 | refs/heads/master | 2023-01-23T22:30:44.839942 | 2022-10-23T01:05:48 | 2022-10-23T01:05:48 | 171,845,804 | 0 | 1 | Apache-2.0 | 2023-01-14T08:01:04 | 2019-02-21T09:54:36 | Python | UTF-8 | Python | false | false | 140,996 | py | from collections import defaultdict
import concurrent
import copy
from datetime import datetime
import functools
import gym
import importlib
import json
import logging
import math
import numpy as np
import os
from packaging import version
import pkg_resources
import tempfile
import time
from typing import (
Callable,
Container,
DefaultDict,
Dict,
List,
Optional,
Set,
Tuple,
Type,
Union,
)
from ray.rllib.offline.offline_evaluator import OfflineEvaluator
import tree
import ray
from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag
from ray.actor import ActorHandle
from ray.air.checkpoint import Checkpoint
import ray.cloudpickle as pickle
from ray.exceptions import GetTimeoutError, RayActorError, RayError
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
from ray.rllib.algorithms.callbacks import DefaultCallbacks
from ray.rllib.algorithms.registry import ALGORITHMS as ALL_ALGORITHMS
from ray.rllib.env.env_context import EnvContext
from ray.rllib.env.utils import _gym_env_creator
from ray.rllib.evaluation.episode import Episode
from ray.rllib.evaluation.metrics import (
collect_episodes,
collect_metrics,
summarize_episodes,
)
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.execution.common import (
STEPS_TRAINED_THIS_ITER_COUNTER, # TODO: Backward compatibility.
)
from ray.rllib.execution.parallel_requests import AsyncRequestsManager
from ray.rllib.execution.rollout_ops import synchronous_parallel_sample
from ray.rllib.execution.train_ops import multi_gpu_train_one_step, train_one_step
from ray.rllib.offline import get_offline_io_resource_bundles
from ray.rllib.offline.estimators import (
OffPolicyEstimator,
ImportanceSampling,
WeightedImportanceSampling,
DirectMethod,
DoublyRobust,
)
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch, concat_samples
from ray.rllib.utils import deep_update, FilterManager, merge_dicts
from ray.rllib.utils.annotations import (
DeveloperAPI,
ExperimentalAPI,
OverrideToImplementCustomLogic,
OverrideToImplementCustomLogic_CallToSuperRecommended,
PublicAPI,
override,
)
from ray.rllib.utils.checkpoints import CHECKPOINT_VERSION, get_checkpoint_info
from ray.rllib.utils.debug import update_global_seed_if_necessary
from ray.rllib.utils.deprecation import (
DEPRECATED_VALUE,
Deprecated,
deprecation_warning,
)
from ray.rllib.utils.error import ERR_MSG_INVALID_ENV_DESCRIPTOR, EnvError
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.metrics import (
NUM_AGENT_STEPS_SAMPLED,
NUM_AGENT_STEPS_SAMPLED_THIS_ITER,
NUM_AGENT_STEPS_TRAINED,
NUM_ENV_STEPS_SAMPLED,
NUM_ENV_STEPS_SAMPLED_THIS_ITER,
NUM_ENV_STEPS_TRAINED,
SYNCH_WORKER_WEIGHTS_TIMER,
TRAINING_ITERATION_TIMER,
)
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO
from ray.rllib.utils.policy import validate_policy_id
from ray.rllib.utils.pre_checks.multi_agent import check_multi_agent
from ray.rllib.utils.replay_buffers import MultiAgentReplayBuffer
from ray.rllib.utils.spaces import space_utils
from ray.rllib.utils.typing import (
AgentID,
AlgorithmConfigDict,
EnvCreator,
EnvInfoDict,
EnvType,
EpisodeID,
PartialAlgorithmConfigDict,
PolicyID,
PolicyState,
ResultDict,
SampleBatchType,
TensorStructType,
TensorType,
)
from ray.tune.execution.placement_groups import PlacementGroupFactory
from ray.tune.experiment.trial import ExportFormat
from ray.tune.logger import Logger, UnifiedLogger
from ray.tune.registry import ENV_CREATOR, _global_registry
from ray.tune.resources import Resources
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.trainable import Trainable
from ray.util import log_once
from ray.util.timer import _Timer
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
@DeveloperAPI
def with_common_config(extra_config: PartialAlgorithmConfigDict) -> AlgorithmConfigDict:
"""Returns the given config dict merged with common agent confs.
Args:
extra_config: A user defined partial config
which will get merged with a default AlgorithmConfig() object and returned
as plain python dict.
Returns:
AlgorithmConfigDict: The merged config dict resulting from AlgorithmConfig()
plus `extra_config`.
"""
return Algorithm.merge_trainer_configs(
AlgorithmConfig().to_dict(), extra_config, _allow_unknown_configs=True
)
@PublicAPI
class Algorithm(Trainable):
"""An RLlib algorithm responsible for optimizing one or more Policies.
Algorithms contain a WorkerSet under `self.workers`. A WorkerSet is
normally composed of a single local worker
(self.workers.local_worker()), used to compute and apply learning updates,
and optionally one or more remote workers (self.workers.remote_workers()),
used to generate environment samples in parallel.
Each worker (remotes or local) contains a PolicyMap, which itself
may contain either one policy for single-agent training or one or more
policies for multi-agent training. Policies are synchronized
automatically from time to time using ray.remote calls. The exact
synchronization logic depends on the specific algorithm used,
but this usually happens from local worker to all remote workers and
after each training update.
You can write your own Algorithm classes by sub-classing from `Algorithm`
or any of its built-in sub-classes.
This allows you to override the `execution_plan` method to implement
your own algorithm logic. You can find the different built-in
algorithms' execution plans in their respective main py files,
e.g. rllib.algorithms.dqn.dqn.py or rllib.algorithms.impala.impala.py.
The most important API methods a Algorithm exposes are `train()`,
`evaluate()`, `save()` and `restore()`.
"""
# Whether to allow unknown top-level config keys.
_allow_unknown_configs = False
# List of top-level keys with value=dict, for which new sub-keys are
# allowed to be added to the value dict.
_allow_unknown_subkeys = [
"tf_session_args",
"local_tf_session_args",
"env_config",
"model",
"optimizer",
"multiagent",
"custom_resources_per_worker",
"evaluation_config",
"exploration_config",
"replay_buffer_config",
"extra_python_environs_for_worker",
"input_config",
"output_config",
]
# List of top level keys with value=dict, for which we always override the
# entire value (dict), iff the "type" key in that value dict changes.
_override_all_subkeys_if_type_changes = [
"exploration_config",
"replay_buffer_config",
]
# List of keys that are always fully overridden if present in any dict or sub-dict
_override_all_key_list = ["off_policy_estimation_methods"]
_progress_metrics = [
"episode_reward_mean",
"evaluation/episode_reward_mean",
"num_env_steps_sampled",
"num_env_steps_trained",
]
@staticmethod
def from_checkpoint(
checkpoint: Union[str, Checkpoint],
policy_ids: Optional[Container[PolicyID]] = None,
policy_mapping_fn: Optional[Callable[[AgentID, EpisodeID], PolicyID]] = None,
policies_to_train: Optional[
Union[
Container[PolicyID],
Callable[[PolicyID, Optional[SampleBatchType]], bool],
]
] = None,
) -> "Algorithm":
"""Creates a new algorithm instance from a given checkpoint.
Note: This method must remain backward compatible from 2.0.0 on.
Args:
checkpoint: The path (str) to the checkpoint directory to use
or an AIR Checkpoint instance to restore from.
policy_ids: Optional list of PolicyIDs to recover. This allows users to
restore an Algorithm with only a subset of the originally present
Policies.
policy_mapping_fn: An optional (updated) policy mapping function
to use from here on.
policies_to_train: An optional list of policy IDs to be trained
or a callable taking PolicyID and SampleBatchType and
returning a bool (trainable or not?).
If None, will keep the existing setup in place. Policies,
whose IDs are not in the list (or for which the callable
returns False) will not be updated.
Returns:
The instantiated Algorithm.
"""
checkpoint_info = get_checkpoint_info(checkpoint)
# Not possible for (v0.1) (algo class and config information missing
# or very hard to retrieve).
if checkpoint_info["checkpoint_version"] == version.Version("0.1"):
raise ValueError(
"Cannot restore a v0 checkpoint using `Algorithm.from_checkpoint()`!"
"In this case, do the following:\n"
"1) Create a new Algorithm object using your original config.\n"
"2) Call the `restore()` method of this algo object passing it"
" your checkpoint dir or AIR Checkpoint object."
)
if checkpoint_info["checkpoint_version"] < version.Version("1.0"):
raise ValueError(
"`checkpoint_info['checkpoint_version']` in `Algorithm.from_checkpoint"
"()` must be 1.0 or later! You are using a checkpoint with "
f"version v{checkpoint_info['checkpoint_version']}."
)
state = Algorithm._checkpoint_info_to_algorithm_state(
checkpoint_info=checkpoint_info,
policy_ids=policy_ids,
policy_mapping_fn=policy_mapping_fn,
policies_to_train=policies_to_train,
)
return Algorithm.from_state(state)
@staticmethod
def from_state(state: Dict) -> "Algorithm":
"""Recovers an Algorithm from a state object.
The `state` of an instantiated Algorithm can be retrieved by calling its
`get_state` method. It contains all information necessary
to create the Algorithm from scratch. No access to the original code (e.g.
configs, knowledge of the Algorithm's class, etc..) is needed.
Args:
state: The state to recover a new Algorithm instance from.
Returns:
A new Algorithm instance.
"""
algorithm_class: Type[Algorithm] = state.get("algorithm_class")
if algorithm_class is None:
raise ValueError(
"No `algorithm_class` key was found in given `state`! "
"Cannot create new Algorithm."
)
# algo_class = get_algorithm_class(algo_class_name)
# Create the new algo.
config = state.get("config")
if not config:
raise ValueError("No `config` found in given Algorithm state!")
new_algo = algorithm_class(config=config)
# Set the new algo's state.
new_algo.__setstate__(state)
# Return the new algo.
return new_algo
@PublicAPI
def __init__(
self,
config: Optional[Union[PartialAlgorithmConfigDict, AlgorithmConfig]] = None,
env: Optional[Union[str, EnvType]] = None,
logger_creator: Optional[Callable[[], Logger]] = None,
**kwargs,
):
"""Initializes an Algorithm instance.
Args:
config: Algorithm-specific configuration dict.
env: Name of the environment to use (e.g. a gym-registered str),
a full class path (e.g.
"ray.rllib.examples.env.random_env.RandomEnv"), or an Env
class directly. Note that this arg can also be specified via
the "env" key in `config`.
logger_creator: Callable that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
**kwargs: Arguments passed to the Trainable base class.
"""
# User provided (partial) config (this may be w/o the default
# Algorithm's Config object). Will get merged with AlgorithmConfig()
# in self.setup().
config = config or {}
# Resolve AlgorithmConfig into a plain dict.
# TODO: In the future, only support AlgorithmConfig objects here.
if isinstance(config, AlgorithmConfig):
config = config.to_dict()
# Convert `env` provided in config into a concrete env creator callable, which
# takes an EnvContext (config dict) as arg and returning an RLlib supported Env
# type (e.g. a gym.Env).
self._env_id, self.env_creator = self._get_env_id_and_creator(
env or config.get("env"), config
)
env_descr = (
self._env_id.__name__ if isinstance(self._env_id, type) else self._env_id
)
# Placeholder for a local replay buffer instance.
self.local_replay_buffer = None
# Create a default logger creator if no logger_creator is specified
if logger_creator is None:
# Default logdir prefix containing the agent's name and the
# env id.
timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
logdir_prefix = "{}_{}_{}".format(str(self), env_descr, timestr)
if not os.path.exists(DEFAULT_RESULTS_DIR):
# Possible race condition if dir is created several times on
# rollout workers
os.makedirs(DEFAULT_RESULTS_DIR, exist_ok=True)
logdir = tempfile.mkdtemp(prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)
# Allow users to more precisely configure the created logger
# via "logger_config.type".
if config.get("logger_config") and "type" in config["logger_config"]:
def default_logger_creator(config):
"""Creates a custom logger with the default prefix."""
cfg = config["logger_config"].copy()
cls = cfg.pop("type")
# Provide default for logdir, in case the user does
# not specify this in the "logger_config" dict.
logdir_ = cfg.pop("logdir", logdir)
return from_config(cls=cls, _args=[cfg], logdir=logdir_)
# If no `type` given, use tune's UnifiedLogger as last resort.
else:
def default_logger_creator(config):
"""Creates a Unified logger with the default prefix."""
return UnifiedLogger(config, logdir, loggers=None)
logger_creator = default_logger_creator
# Metrics-related properties.
self._timers = defaultdict(_Timer)
self._counters = defaultdict(int)
self._episode_history = []
self._episodes_to_be_collected = []
self._remote_workers_for_metrics = []
# Evaluation WorkerSet and metrics last returned by `self.evaluate()`.
self.evaluation_workers: Optional[WorkerSet] = None
# If evaluation duration is "auto", use a AsyncRequestsManager to be more
# robust against eval worker failures.
self._evaluation_async_req_manager: Optional[AsyncRequestsManager] = None
# Initialize common evaluation_metrics to nan, before they become
# available. We want to make sure the metrics are always present
# (although their values may be nan), so that Tune does not complain
# when we use these as stopping criteria.
self.evaluation_metrics = {
"evaluation": {
"episode_reward_max": np.nan,
"episode_reward_min": np.nan,
"episode_reward_mean": np.nan,
}
}
super().__init__(config=config, logger_creator=logger_creator, **kwargs)
# Check, whether `training_iteration` is still a tune.Trainable property
# and has not been overridden by the user in the attempt to implement the
# algos logic (this should be done now inside `training_step`).
try:
assert isinstance(self.training_iteration, int)
except AssertionError:
raise AssertionError(
"Your Algorithm's `training_iteration` seems to be overridden by your "
"custom training logic! To solve this problem, simply rename your "
"`self.training_iteration()` method into `self.training_step`."
)
@OverrideToImplementCustomLogic
@classmethod
def get_default_config(cls) -> AlgorithmConfigDict:
return AlgorithmConfig().to_dict()
@OverrideToImplementCustomLogic_CallToSuperRecommended
@override(Trainable)
def setup(self, config: PartialAlgorithmConfigDict):
# Setup our config: Merge the user-supplied config (which could
# be a partial config dict with the class' default).
self.config = self.merge_trainer_configs(
self.get_default_config(), config, self._allow_unknown_configs
)
self.config["env"] = self._env_id
# Validate the framework settings in config.
self.validate_framework(self.config)
# Set Algorithm's seed after we have - if necessary - enabled
# tf eager-execution.
update_global_seed_if_necessary(self.config["framework"], self.config["seed"])
self.validate_config(self.config)
self._record_usage(self.config)
self.callbacks = self.config["callbacks"]()
log_level = self.config.get("log_level")
if log_level in ["WARN", "ERROR"]:
logger.info(
"Current log_level is {}. For more information, "
"set 'log_level': 'INFO' / 'DEBUG' or use the -v and "
"-vv flags.".format(log_level)
)
if self.config.get("log_level"):
logging.getLogger("ray.rllib").setLevel(self.config["log_level"])
# Create local replay buffer if necessary.
self.local_replay_buffer = self._create_local_replay_buffer_if_necessary(
self.config
)
# Create a dict, mapping ActorHandles to sets of open remote
# requests (object refs). This way, we keep track, of which actors
# inside this Algorithm (e.g. a remote RolloutWorker) have
# already been sent how many (e.g. `sample()`) requests.
self.remote_requests_in_flight: DefaultDict[
ActorHandle, Set[ray.ObjectRef]
] = defaultdict(set)
self.workers: Optional[WorkerSet] = None
self.train_exec_impl = None
# Offline RL settings.
input_evaluation = self.config.get("input_evaluation")
if input_evaluation is not None and input_evaluation is not DEPRECATED_VALUE:
ope_dict = {str(ope): {"type": ope} for ope in input_evaluation}
deprecation_warning(
old="config.input_evaluation={}".format(input_evaluation),
new='config["evaluation_config"]'
'["off_policy_estimation_methods"]={}'.format(
ope_dict,
),
error=True,
help="Running OPE during training is not recommended.",
)
self.config["off_policy_estimation_methods"] = ope_dict
# Deprecated way of implementing Trainer sub-classes (or "templates"
# via the `build_trainer` utility function).
# Instead, sub-classes should override the Trainable's `setup()`
# method and call super().setup() from within that override at some
# point.
# Old design: Override `Trainer._init`.
_init = False
try:
self._init(self.config, self.env_creator)
_init = True
# New design: Override `Trainable.setup()` (as indented by tune.Trainable)
# and do or don't call `super().setup()` from within your override.
# By default, `super().setup()` will create both worker sets:
# "rollout workers" for collecting samples for training and - if
# applicable - "evaluation workers" for evaluation runs in between or
# parallel to training.
# TODO: Deprecate `_init()` and remove this try/except block.
except NotImplementedError:
pass
# Only if user did not override `_init()`:
if _init is False:
# - Create rollout workers here automatically.
# - Run the execution plan to create the local iterator to `next()`
# in each training iteration.
# This matches the behavior of using `build_trainer()`, which
# has been deprecated.
try:
self.workers = WorkerSet(
env_creator=self.env_creator,
validate_env=self.validate_env,
policy_class=self.get_default_policy_class(self.config),
trainer_config=self.config,
num_workers=self.config["num_workers"],
local_worker=True,
logdir=self.logdir,
)
# WorkerSet creation possibly fails, if some (remote) workers cannot
# be initialized properly (due to some errors in the RolloutWorker's
# constructor).
except RayActorError as e:
# In case of an actor (remote worker) init failure, the remote worker
# may still exist and will be accessible, however, e.g. calling
# its `sample.remote()` would result in strange "property not found"
# errors.
if e.actor_init_failed:
# Raise the original error here that the RolloutWorker raised
# during its construction process. This is to enforce transparency
# for the user (better to understand the real reason behind the
# failure).
# - e.args[0]: The RayTaskError (inside the caught RayActorError).
# - e.args[0].args[2]: The original Exception (e.g. a ValueError due
# to a config mismatch) thrown inside the actor.
raise e.args[0].args[2]
# In any other case, raise the RayActorError as-is.
else:
raise e
# By default, collect metrics for all remote workers.
self._remote_workers_for_metrics = self.workers.remote_workers()
# TODO (avnishn): Remove the execution plan API by q1 2023
# Function defining one single training iteration's behavior.
if self.config["_disable_execution_plan_api"]:
# Ensure remote workers are initially in sync with the local worker.
self.workers.sync_weights()
# LocalIterator-creating "execution plan".
# Only call this once here to create `self.train_exec_impl`,
# which is a ray.util.iter.LocalIterator that will be `next`'d
# on each training iteration.
else:
self.train_exec_impl = self.execution_plan(
self.workers, self.config, **self._kwargs_for_execution_plan()
)
# Now that workers have been created, update our policies
# dict in config[multiagent] (with the correct original/
# unpreprocessed spaces).
self.config["multiagent"][
"policies"
] = self.workers.local_worker().policy_dict
# Evaluation WorkerSet setup.
# User would like to setup a separate evaluation worker set.
# Update with evaluation settings:
user_eval_config = copy.deepcopy(self.config["evaluation_config"])
# Merge user-provided eval config with the base config. This makes sure
# the eval config is always complete, no matter whether we have eval
# workers or perform evaluation on the (non-eval) local worker.
eval_config = merge_dicts(self.config, user_eval_config)
self.config["evaluation_config"] = eval_config
if self.config.get("evaluation_num_workers", 0) > 0 or self.config.get(
"evaluation_interval"
):
logger.debug(f"Using evaluation_config: {user_eval_config}.")
# Validate evaluation config.
self.validate_config(eval_config)
# Set the `in_evaluation` flag.
eval_config["in_evaluation"] = True
# Evaluation duration unit: episodes.
# Switch on `complete_episode` rollouts. Also, make sure
# rollout fragments are short so we never have more than one
# episode in one rollout.
if eval_config["evaluation_duration_unit"] == "episodes":
eval_config.update(
{
"batch_mode": "complete_episodes",
"rollout_fragment_length": 1,
}
)
# Evaluation duration unit: timesteps.
# - Set `batch_mode=truncate_episodes` so we don't perform rollouts
# strictly along episode borders.
# Set `rollout_fragment_length` such that desired steps are divided
# equally amongst workers or - in "auto" duration mode - set it
# to a reasonably small number (10), such that a single `sample()`
# call doesn't take too much time and we can stop evaluation as soon
# as possible after the train step is completed.
else:
eval_config.update(
{
"batch_mode": "truncate_episodes",
"rollout_fragment_length": 10
if self.config["evaluation_duration"] == "auto"
else int(
math.ceil(
self.config["evaluation_duration"]
/ (self.config["evaluation_num_workers"] or 1)
)
),
}
)
self.config["evaluation_config"] = eval_config
_, env_creator = self._get_env_id_and_creator(
eval_config.get("env"), eval_config
)
# Create a separate evaluation worker set for evaluation.
# If evaluation_num_workers=0, use the evaluation set's local
# worker for evaluation, otherwise, use its remote workers
# (parallelized evaluation).
self.evaluation_workers: WorkerSet = WorkerSet(
env_creator=env_creator,
validate_env=None,
policy_class=self.get_default_policy_class(self.config),
trainer_config=eval_config,
num_workers=self.config["evaluation_num_workers"],
# Don't even create a local worker if num_workers > 0.
local_worker=False,
logdir=self.logdir,
)
if self.config["enable_async_evaluation"]:
self._evaluation_async_req_manager = AsyncRequestsManager(
workers=self.evaluation_workers.remote_workers(),
max_remote_requests_in_flight_per_worker=1,
return_object_refs=True,
)
self._evaluation_weights_seq_number = 0
self.reward_estimators: Dict[str, OffPolicyEstimator] = {}
ope_types = {
"is": ImportanceSampling,
"wis": WeightedImportanceSampling,
"dm": DirectMethod,
"dr": DoublyRobust,
}
for name, method_config in self.config["off_policy_estimation_methods"].items():
method_type = method_config.pop("type")
if method_type in ope_types:
deprecation_warning(
old=method_type,
new=str(ope_types[method_type]),
error=True,
)
method_type = ope_types[method_type]
elif isinstance(method_type, str):
logger.log(0, "Trying to import from string: " + method_type)
mod, obj = method_type.rsplit(".", 1)
mod = importlib.import_module(mod)
method_type = getattr(mod, obj)
if isinstance(method_type, type) and issubclass(
method_type, OfflineEvaluator
):
# TODO(kourosh) : Add an integration test for all these
# offline evaluators.
policy = self.get_policy()
if issubclass(method_type, OffPolicyEstimator):
method_config["gamma"] = self.config["gamma"]
self.reward_estimators[name] = method_type(policy, **method_config)
else:
raise ValueError(
f"Unknown off_policy_estimation type: {method_type}! Must be "
"either a class path or a sub-class of ray.rllib."
"offline.estimators.off_policy_estimator::OffPolicyEstimator"
)
# Run `on_algorithm_init` callback after initialization is done.
self.callbacks.on_algorithm_init(algorithm=self)
# TODO: Deprecated: In your sub-classes of Trainer, override `setup()`
# directly and call super().setup() from within it if you would like the
# default setup behavior plus some own setup logic.
# If you don't need the env/workers/config/etc.. setup for you by super,
# simply do not call super().setup() from your overridden method.
def _init(self, config: AlgorithmConfigDict, env_creator: EnvCreator) -> None:
raise NotImplementedError
@OverrideToImplementCustomLogic
def get_default_policy_class(self, config: AlgorithmConfigDict) -> Type[Policy]:
"""Returns a default Policy class to use, given a config.
This class will be used inside RolloutWorkers' PolicyMaps in case
the policy class is not provided by the user in any single- or
multi-agent PolicySpec.
This method is experimental and currently only used, iff the Trainer
class was not created using the `build_trainer` utility and if
the Trainer sub-class does not override `_init()` and create it's
own WorkerSet in `_init()`.
"""
return getattr(self, "_policy_class", None)
@override(Trainable)
def step(self) -> ResultDict:
"""Implements the main `Trainer.train()` logic.
Takes n attempts to perform a single training step. Thereby
catches RayErrors resulting from worker failures. After n attempts,
fails gracefully.
Override this method in your Trainer sub-classes if you would like to
handle worker failures yourself.
Otherwise, override only `training_step()` to implement the core
algorithm logic.
Returns:
The results dict with stats/infos on sampling, training,
and - if required - evaluation.
"""
# Do we have to run `self.evaluate()` this iteration?
# `self.iteration` gets incremented after this function returns,
# meaning that e. g. the first time this function is called,
# self.iteration will be 0.
evaluate_this_iter = (
self.config["evaluation_interval"] is not None
and (self.iteration + 1) % self.config["evaluation_interval"] == 0
)
# Results dict for training (and if appolicable: evaluation).
results: ResultDict = {}
local_worker = (
self.workers.local_worker()
if hasattr(self.workers, "local_worker")
else None
)
# Parallel eval + training: Kick off evaluation-loop and parallel train() call.
if evaluate_this_iter and self.config["evaluation_parallel_to_training"]:
(
results,
train_iter_ctx,
) = self._run_one_training_iteration_and_evaluation_in_parallel()
# - No evaluation necessary, just run the next training iteration.
# - We have to evaluate in this training iteration, but no parallelism ->
# evaluate after the training iteration is entirely done.
else:
results, train_iter_ctx = self._run_one_training_iteration()
# Sequential: Train (already done above), then evaluate.
if evaluate_this_iter and not self.config["evaluation_parallel_to_training"]:
results.update(self._run_one_evaluation(train_future=None))
# Attach latest available evaluation results to train results,
# if necessary.
if not evaluate_this_iter and self.config["always_attach_evaluation_results"]:
assert isinstance(
self.evaluation_metrics, dict
), "Trainer.evaluate() needs to return a dict."
results.update(self.evaluation_metrics)
if hasattr(self, "workers") and isinstance(self.workers, WorkerSet):
# Sync filters on workers.
self._sync_filters_if_needed(
from_worker=self.workers.local_worker(),
workers=self.workers,
timeout_seconds=self.config[
"sync_filters_on_rollout_workers_timeout_s"
],
)
# TODO (avnishn): Remove the execution plan API by q1 2023
# Collect worker metrics and add combine them with `results`.
if self.config["_disable_execution_plan_api"]:
episodes_this_iter, self._episodes_to_be_collected = collect_episodes(
local_worker,
self._remote_workers_for_metrics,
self._episodes_to_be_collected,
timeout_seconds=self.config["metrics_episode_collection_timeout_s"],
)
results = self._compile_iteration_results(
episodes_this_iter=episodes_this_iter,
step_ctx=train_iter_ctx,
iteration_results=results,
)
# Check `env_task_fn` for possible update of the env's task.
if self.config["env_task_fn"] is not None:
if not callable(self.config["env_task_fn"]):
raise ValueError(
"`env_task_fn` must be None or a callable taking "
"[train_results, env, env_ctx] as args!"
)
def fn(env, env_context, task_fn):
new_task = task_fn(results, env, env_context)
cur_task = env.get_task()
if cur_task != new_task:
env.set_task(new_task)
fn = functools.partial(fn, task_fn=self.config["env_task_fn"])
self.workers.foreach_env_with_context(fn)
return results
@PublicAPI
def evaluate(
self,
duration_fn: Optional[Callable[[int], int]] = None,
) -> dict:
"""Evaluates current policy under `evaluation_config` settings.
Note that this default implementation does not do anything beyond
merging evaluation_config with the normal trainer config.
Args:
duration_fn: An optional callable taking the already run
num episodes as only arg and returning the number of
episodes left to run. It's used to find out whether
evaluation should continue.
"""
# Call the `_before_evaluate` hook.
self._before_evaluate()
# Sync weights to the evaluation WorkerSet.
if self.evaluation_workers is not None:
self.evaluation_workers.sync_weights(
from_worker=self.workers.local_worker()
)
self._sync_filters_if_needed(
from_worker=self.workers.local_worker(),
workers=self.evaluation_workers,
timeout_seconds=self.config[
"sync_filters_on_rollout_workers_timeout_s"
],
)
self.callbacks.on_evaluate_start(algorithm=self)
if self.config["custom_eval_function"]:
logger.info(
"Running custom eval function {}".format(
self.config["custom_eval_function"]
)
)
metrics = self.config["custom_eval_function"](self, self.evaluation_workers)
if not metrics or not isinstance(metrics, dict):
raise ValueError(
"Custom eval function must return "
"dict of metrics, got {}.".format(metrics)
)
else:
if (
self.evaluation_workers is None
and self.workers.local_worker().input_reader is None
):
raise ValueError(
"Cannot evaluate w/o an evaluation worker set in "
"the Trainer or w/o an env on the local worker!\n"
"Try one of the following:\n1) Set "
"`evaluation_interval` >= 0 to force creating a "
"separate evaluation worker set.\n2) Set "
"`create_env_on_driver=True` to force the local "
"(non-eval) worker to have an environment to "
"evaluate on."
)
# How many episodes/timesteps do we need to run?
# In "auto" mode (only for parallel eval + training): Run as long
# as training lasts.
unit = self.config["evaluation_duration_unit"]
eval_cfg = self.config["evaluation_config"]
rollout = eval_cfg["rollout_fragment_length"]
num_envs = eval_cfg["num_envs_per_worker"]
auto = self.config["evaluation_duration"] == "auto"
duration = (
self.config["evaluation_duration"]
if not auto
else (self.config["evaluation_num_workers"] or 1)
* (1 if unit == "episodes" else rollout)
)
agent_steps_this_iter = 0
env_steps_this_iter = 0
# Default done-function returns True, whenever num episodes
# have been completed.
if duration_fn is None:
def duration_fn(num_units_done):
return duration - num_units_done
logger.info(f"Evaluating current policy for {duration} {unit}.")
metrics = None
all_batches = []
# No evaluation worker set ->
# Do evaluation using the local worker. Expect error due to the
# local worker not having an env.
if self.evaluation_workers is None:
# If unit=episodes -> Run n times `sample()` (each sample
# produces exactly 1 episode).
# If unit=ts -> Run 1 `sample()` b/c the
# `rollout_fragment_length` is exactly the desired ts.
iters = duration if unit == "episodes" else 1
for _ in range(iters):
batch = self.workers.local_worker().sample()
agent_steps_this_iter += batch.agent_steps()
env_steps_this_iter += batch.env_steps()
if self.reward_estimators:
all_batches.append(batch)
metrics = collect_metrics(
self.workers.local_worker(),
keep_custom_metrics=eval_cfg["keep_per_episode_custom_metrics"],
timeout_seconds=eval_cfg["metrics_episode_collection_timeout_s"],
)
# Evaluation worker set only has local worker.
elif self.config["evaluation_num_workers"] == 0:
# If unit=episodes -> Run n times `sample()` (each sample
# produces exactly 1 episode).
# If unit=ts -> Run 1 `sample()` b/c the
# `rollout_fragment_length` is exactly the desired ts.
iters = duration if unit == "episodes" else 1
for _ in range(iters):
batch = self.evaluation_workers.local_worker().sample()
agent_steps_this_iter += batch.agent_steps()
env_steps_this_iter += batch.env_steps()
if self.reward_estimators:
all_batches.append(batch)
# Evaluation worker set has n remote workers.
else:
# How many episodes have we run (across all eval workers)?
num_units_done = 0
_round = 0
while True:
units_left_to_do = duration_fn(num_units_done)
if units_left_to_do <= 0:
break
_round += 1
try:
batches = ray.get(
[
w.sample.remote()
for i, w in enumerate(
self.evaluation_workers.remote_workers()
)
if i * (1 if unit == "episodes" else rollout * num_envs)
< units_left_to_do
],
timeout=self.config["evaluation_sample_timeout_s"],
)
except GetTimeoutError:
logger.warning(
"Calling `sample()` on your remote evaluation worker(s) "
"resulted in a timeout (after the configured "
f"{self.config['evaluation_sample_timeout_s']} seconds)! "
"Try to set `evaluation_sample_timeout_s` in your config"
" to a larger value."
+ (
" If your episodes don't terminate easily, you may "
"also want to set `evaluation_duration_unit` to "
"'timesteps' (instead of 'episodes')."
if unit == "episodes"
else ""
)
)
break
_agent_steps = sum(b.agent_steps() for b in batches)
_env_steps = sum(b.env_steps() for b in batches)
# 1 episode per returned batch.
if unit == "episodes":
num_units_done += len(batches)
# Make sure all batches are exactly one episode.
for ma_batch in batches:
ma_batch = ma_batch.as_multi_agent()
for batch in ma_batch.policy_batches.values():
assert np.sum(batch[SampleBatch.DONES])
# n timesteps per returned batch.
else:
num_units_done += (
_agent_steps if self._by_agent_steps else _env_steps
)
if self.reward_estimators:
# TODO: (kourosh) This approach will cause an OOM issue when
# the dataset gets huge (should be ok for now).
all_batches.extend(batches)
agent_steps_this_iter += _agent_steps
env_steps_this_iter += _env_steps
logger.info(
f"Ran round {_round} of parallel evaluation "
f"({num_units_done}/{duration if not auto else '?'} "
f"{unit} done)"
)
if metrics is None:
metrics = collect_metrics(
self.evaluation_workers.local_worker(),
self.evaluation_workers.remote_workers(),
keep_custom_metrics=self.config["keep_per_episode_custom_metrics"],
timeout_seconds=eval_cfg["metrics_episode_collection_timeout_s"],
)
metrics[NUM_AGENT_STEPS_SAMPLED_THIS_ITER] = agent_steps_this_iter
metrics[NUM_ENV_STEPS_SAMPLED_THIS_ITER] = env_steps_this_iter
# TODO: Remove this key at some point. Here for backward compatibility.
metrics["timesteps_this_iter"] = env_steps_this_iter
# Compute off-policy estimates
estimates = defaultdict(list)
# for each batch run the estimator's fwd pass
for name, estimator in self.reward_estimators.items():
for batch in all_batches:
estimate_result = estimator.estimate(
batch,
split_batch_by_episode=self.config[
"ope_split_batch_by_episode"
],
)
estimates[name].append(estimate_result)
# collate estimates from all batches
if estimates:
metrics["off_policy_estimator"] = {}
for name, estimate_list in estimates.items():
avg_estimate = tree.map_structure(
lambda *x: np.mean(x, axis=0), *estimate_list
)
metrics["off_policy_estimator"][name] = avg_estimate
# Evaluation does not run for every step.
# Save evaluation metrics on trainer, so it can be attached to
# subsequent step results as latest evaluation result.
self.evaluation_metrics = {"evaluation": metrics}
# Trigger `on_evaluate_end` callback.
self.callbacks.on_evaluate_end(
algorithm=self, evaluation_metrics=self.evaluation_metrics
)
# Also return the results here for convenience.
return self.evaluation_metrics
@ExperimentalAPI
def _evaluate_async(
self,
duration_fn: Optional[Callable[[int], int]] = None,
) -> dict:
"""Evaluates current policy under `evaluation_config` settings.
Uses the AsyncParallelRequests manager to send frequent `sample.remote()`
requests to the evaluation RolloutWorkers and collect the results of these
calls. Handles worker failures (or slowdowns) gracefully due to the asynch'ness
and the fact that other eval RolloutWorkers can thus cover the workload.
Important Note: This will replace the current `self.evaluate()` method as the
default in the future.
Args:
duration_fn: An optional callable taking the already run
num episodes as only arg and returning the number of
episodes left to run. It's used to find out whether
evaluation should continue.
"""
# How many episodes/timesteps do we need to run?
# In "auto" mode (only for parallel eval + training): Run as long
# as training lasts.
unit = self.config["evaluation_duration_unit"]
eval_cfg = self.config["evaluation_config"]
rollout = eval_cfg["rollout_fragment_length"]
num_envs = eval_cfg["num_envs_per_worker"]
auto = self.config["evaluation_duration"] == "auto"
duration = (
self.config["evaluation_duration"]
if not auto
else (self.config["evaluation_num_workers"] or 1)
* (1 if unit == "episodes" else rollout)
)
# Call the `_before_evaluate` hook.
self._before_evaluate()
# Put weights only once into object store and use same object
# ref to synch to all workers.
self._evaluation_weights_seq_number += 1
weights_ref = ray.put(self.workers.local_worker().get_weights())
# TODO(Jun): Make sure this cannot block for e.g. 1h. Implement solution via
# connectors.
self._sync_filters_if_needed(
from_worker=self.workers.local_worker(),
workers=self.evaluation_workers,
timeout_seconds=eval_cfg.get("sync_filters_on_rollout_workers_timeout_s"),
)
if self.config["custom_eval_function"]:
raise ValueError(
"`custom_eval_function` not supported in combination "
"with `enable_async_evaluation=True` config setting!"
)
if self.evaluation_workers is None and (
self.workers.local_worker().input_reader is None
or self.config["evaluation_num_workers"] == 0
):
raise ValueError(
"Evaluation w/o eval workers (calling Algorithm.evaluate() w/o "
"evaluation specifically set up) OR evaluation without input reader "
"OR evaluation with only a local evaluation worker "
"(`evaluation_num_workers=0`) not supported in combination "
"with `enable_async_evaluation=True` config setting!"
)
agent_steps_this_iter = 0
env_steps_this_iter = 0
logger.info(f"Evaluating current policy for {duration} {unit}.")
all_batches = []
# Default done-function returns True, whenever num episodes
# have been completed.
if duration_fn is None:
def duration_fn(num_units_done):
return duration - num_units_done
def remote_fn(worker, w_ref, w_seq_no):
# Pass in seq-no so that eval workers may ignore this call if no update has
# happened since the last call to `remote_fn` (sample).
worker.set_weights(weights=w_ref, weights_seq_no=w_seq_no)
batch = worker.sample()
metrics = worker.get_metrics()
return batch, metrics, w_seq_no
rollout_metrics = []
# How many episodes have we run (across all eval workers)?
num_units_done = 0
_round = 0
errors = []
while len(self._evaluation_async_req_manager.workers) > 0:
units_left_to_do = duration_fn(num_units_done)
if units_left_to_do <= 0:
break
_round += 1
# Use the AsyncRequestsManager to get ready evaluation results and
# metrics.
self._evaluation_async_req_manager.call_on_all_available(
remote_fn=remote_fn,
fn_args=[weights_ref, self._evaluation_weights_seq_number],
)
ready_requests = self._evaluation_async_req_manager.get_ready()
batches = []
i = 0
for actor, requests in ready_requests.items():
for req in requests:
try:
batch, metrics, seq_no = ray.get(req)
# Ignore results, if the weights seq-number does not match (is
# from a previous evaluation step) OR if we have already reached
# the configured duration (e.g. number of episodes to evaluate
# for).
if seq_no == self._evaluation_weights_seq_number and (
i * (1 if unit == "episodes" else rollout * num_envs)
< units_left_to_do
):
batches.append(batch)
rollout_metrics.extend(metrics)
except RayError as e:
errors.append(e)
self._evaluation_async_req_manager.remove_workers(actor)
i += 1
_agent_steps = sum(b.agent_steps() for b in batches)
_env_steps = sum(b.env_steps() for b in batches)
# 1 episode per returned batch.
if unit == "episodes":
num_units_done += len(batches)
# Make sure all batches are exactly one episode.
for ma_batch in batches:
ma_batch = ma_batch.as_multi_agent()
for batch in ma_batch.policy_batches.values():
assert np.sum(batch[SampleBatch.DONES])
# n timesteps per returned batch.
else:
num_units_done += _agent_steps if self._by_agent_steps else _env_steps
if self.reward_estimators:
all_batches.extend(batches)
agent_steps_this_iter += _agent_steps
env_steps_this_iter += _env_steps
logger.info(
f"Ran round {_round} of parallel evaluation "
f"({num_units_done}/{duration if not auto else '?'} "
f"{unit} done)"
)
num_recreated_workers = 0
if errors:
num_recreated_workers = self.try_recover_from_step_attempt(
error=errors[0],
worker_set=self.evaluation_workers,
ignore=eval_cfg.get("ignore_worker_failures"),
recreate=eval_cfg.get("recreate_failed_workers"),
)
metrics = summarize_episodes(
rollout_metrics,
keep_custom_metrics=eval_cfg["keep_per_episode_custom_metrics"],
)
metrics["num_recreated_workers"] = num_recreated_workers
metrics[NUM_AGENT_STEPS_SAMPLED_THIS_ITER] = agent_steps_this_iter
metrics[NUM_ENV_STEPS_SAMPLED_THIS_ITER] = env_steps_this_iter
# TODO: Remove this key at some point. Here for backward compatibility.
metrics["timesteps_this_iter"] = env_steps_this_iter
if self.reward_estimators:
# Compute off-policy estimates
metrics["off_policy_estimator"] = {}
total_batch = concat_samples(all_batches)
for name, estimator in self.reward_estimators.items():
estimates = estimator.estimate(total_batch)
metrics["off_policy_estimator"][name] = estimates
# Evaluation does not run for every step.
# Save evaluation metrics on trainer, so it can be attached to
# subsequent step results as latest evaluation result.
self.evaluation_metrics = {"evaluation": metrics}
# Trigger `on_evaluate_end` callback.
self.callbacks.on_evaluate_end(
algorithm=self, evaluation_metrics=self.evaluation_metrics
)
# Return evaluation results.
return self.evaluation_metrics
@OverrideToImplementCustomLogic
@DeveloperAPI
def training_step(self) -> ResultDict:
"""Default single iteration logic of an algorithm.
- Collect on-policy samples (SampleBatches) in parallel using the
Trainer's RolloutWorkers (@ray.remote).
- Concatenate collected SampleBatches into one train batch.
- Note that we may have more than one policy in the multi-agent case:
Call the different policies' `learn_on_batch` (simple optimizer) OR
`load_batch_into_buffer` + `learn_on_loaded_batch` (multi-GPU
optimizer) methods to calculate loss and update the model(s).
- Return all collected metrics for the iteration.
Returns:
The results dict from executing the training iteration.
"""
# Collect SampleBatches from sample workers until we have a full batch.
if self._by_agent_steps:
train_batch = synchronous_parallel_sample(
worker_set=self.workers, max_agent_steps=self.config["train_batch_size"]
)
else:
train_batch = synchronous_parallel_sample(
worker_set=self.workers, max_env_steps=self.config["train_batch_size"]
)
train_batch = train_batch.as_multi_agent()
self._counters[NUM_AGENT_STEPS_SAMPLED] += train_batch.agent_steps()
self._counters[NUM_ENV_STEPS_SAMPLED] += train_batch.env_steps()
# Use simple optimizer (only for multi-agent or tf-eager; all other
# cases should use the multi-GPU optimizer, even if only using 1 GPU).
# TODO: (sven) rename MultiGPUOptimizer into something more
# meaningful.
if self.config.get("simple_optimizer") is True:
train_results = train_one_step(self, train_batch)
else:
train_results = multi_gpu_train_one_step(self, train_batch)
# Update weights and global_vars - after learning on the local worker - on all
# remote workers.
global_vars = {
"timestep": self._counters[NUM_ENV_STEPS_SAMPLED],
}
with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]:
self.workers.sync_weights(global_vars=global_vars)
return train_results
@staticmethod
def execution_plan(workers, config, **kwargs):
raise NotImplementedError(
"It is not longer recommended to use Trainer's `execution_plan` method/API."
" Set `_disable_execution_plan_api=True` in your config and override the "
"`Trainer.training_step()` method with your algo's custom "
"execution logic."
)
@PublicAPI
def compute_single_action(
self,
observation: Optional[TensorStructType] = None,
state: Optional[List[TensorStructType]] = None,
*,
prev_action: Optional[TensorStructType] = None,
prev_reward: Optional[float] = None,
info: Optional[EnvInfoDict] = None,
input_dict: Optional[SampleBatch] = None,
policy_id: PolicyID = DEFAULT_POLICY_ID,
full_fetch: bool = False,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
episode: Optional[Episode] = None,
unsquash_action: Optional[bool] = None,
clip_action: Optional[bool] = None,
# Deprecated args.
unsquash_actions=DEPRECATED_VALUE,
clip_actions=DEPRECATED_VALUE,
# Kwargs placeholder for future compatibility.
**kwargs,
) -> Union[
TensorStructType,
Tuple[TensorStructType, List[TensorType], Dict[str, TensorType]],
]:
"""Computes an action for the specified policy on the local worker.
Note that you can also access the policy object through
self.get_policy(policy_id) and call compute_single_action() on it
directly.
Args:
observation: Single (unbatched) observation from the
environment.
state: List of all RNN hidden (single, unbatched) state tensors.
prev_action: Single (unbatched) previous action value.
prev_reward: Single (unbatched) previous reward value.
info: Env info dict, if any.
input_dict: An optional SampleBatch that holds all the values
for: obs, state, prev_action, and prev_reward, plus maybe
custom defined views of the current env trajectory. Note
that only one of `obs` or `input_dict` must be non-None.
policy_id: Policy to query (only applies to multi-agent).
Default: "default_policy".
full_fetch: Whether to return extra action fetch results.
This is always set to True if `state` is specified.
explore: Whether to apply exploration to the action.
Default: None -> use self.config["explore"].
timestep: The current (sampling) time step.
episode: This provides access to all of the internal episodes'
state, which may be useful for model-based or multi-agent
algorithms.
unsquash_action: Should actions be unsquashed according to the
env's/Policy's action space? If None, use the value of
self.config["normalize_actions"].
clip_action: Should actions be clipped according to the
env's/Policy's action space? If None, use the value of
self.config["clip_actions"].
Keyword Args:
kwargs: forward compatibility placeholder
Returns:
The computed action if full_fetch=False, or a tuple of a) the
full output of policy.compute_actions() if full_fetch=True
or we have an RNN-based Policy.
Raises:
KeyError: If the `policy_id` cannot be found in this Trainer's
local worker.
"""
if clip_actions != DEPRECATED_VALUE:
deprecation_warning(
old="Trainer.compute_single_action(`clip_actions`=...)",
new="Trainer.compute_single_action(`clip_action`=...)",
error=True,
)
clip_action = clip_actions
if unsquash_actions != DEPRECATED_VALUE:
deprecation_warning(
old="Trainer.compute_single_action(`unsquash_actions`=...)",
new="Trainer.compute_single_action(`unsquash_action`=...)",
error=True,
)
unsquash_action = unsquash_actions
# `unsquash_action` is None: Use value of config['normalize_actions'].
if unsquash_action is None:
unsquash_action = self.config["normalize_actions"]
# `clip_action` is None: Use value of config['clip_actions'].
elif clip_action is None:
clip_action = self.config["clip_actions"]
# User provided an input-dict: Assert that `obs`, `prev_a|r`, `state`
# are all None.
err_msg = (
"Provide either `input_dict` OR [`observation`, ...] as "
"args to Trainer.compute_single_action!"
)
if input_dict is not None:
assert (
observation is None
and prev_action is None
and prev_reward is None
and state is None
), err_msg
observation = input_dict[SampleBatch.OBS]
else:
assert observation is not None, err_msg
# Get the policy to compute the action for (in the multi-agent case,
# Trainer may hold >1 policies).
policy = self.get_policy(policy_id)
if policy is None:
raise KeyError(
f"PolicyID '{policy_id}' not found in PolicyMap of the "
f"Trainer's local worker!"
)
local_worker = self.workers.local_worker()
# Check the preprocessor and preprocess, if necessary.
pp = local_worker.preprocessors[policy_id]
if pp and type(pp).__name__ != "NoPreprocessor":
observation = pp.transform(observation)
observation = local_worker.filters[policy_id](observation, update=False)
# Input-dict.
if input_dict is not None:
input_dict[SampleBatch.OBS] = observation
action, state, extra = policy.compute_single_action(
input_dict=input_dict,
explore=explore,
timestep=timestep,
episode=episode,
)
# Individual args.
else:
action, state, extra = policy.compute_single_action(
obs=observation,
state=state,
prev_action=prev_action,
prev_reward=prev_reward,
info=info,
explore=explore,
timestep=timestep,
episode=episode,
)
# If we work in normalized action space (normalize_actions=True),
# we re-translate here into the env's action space.
if unsquash_action:
action = space_utils.unsquash_action(action, policy.action_space_struct)
# Clip, according to env's action space.
elif clip_action:
action = space_utils.clip_action(action, policy.action_space_struct)
# Return 3-Tuple: Action, states, and extra-action fetches.
if state or full_fetch:
return action, state, extra
# Ensure backward compatibility.
else:
return action
@PublicAPI
def compute_actions(
self,
observations: TensorStructType,
state: Optional[List[TensorStructType]] = None,
*,
prev_action: Optional[TensorStructType] = None,
prev_reward: Optional[TensorStructType] = None,
info: Optional[EnvInfoDict] = None,
policy_id: PolicyID = DEFAULT_POLICY_ID,
full_fetch: bool = False,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
episodes: Optional[List[Episode]] = None,
unsquash_actions: Optional[bool] = None,
clip_actions: Optional[bool] = None,
# Deprecated.
normalize_actions=None,
**kwargs,
):
"""Computes an action for the specified policy on the local Worker.
Note that you can also access the policy object through
self.get_policy(policy_id) and call compute_actions() on it directly.
Args:
observation: Observation from the environment.
state: RNN hidden state, if any. If state is not None,
then all of compute_single_action(...) is returned
(computed action, rnn state(s), logits dictionary).
Otherwise compute_single_action(...)[0] is returned
(computed action).
prev_action: Previous action value, if any.
prev_reward: Previous reward, if any.
info: Env info dict, if any.
policy_id: Policy to query (only applies to multi-agent).
full_fetch: Whether to return extra action fetch results.
This is always set to True if RNN state is specified.
explore: Whether to pick an exploitation or exploration
action (default: None -> use self.config["explore"]).
timestep: The current (sampling) time step.
episodes: This provides access to all of the internal episodes'
state, which may be useful for model-based or multi-agent
algorithms.
unsquash_actions: Should actions be unsquashed according
to the env's/Policy's action space? If None, use
self.config["normalize_actions"].
clip_actions: Should actions be clipped according to the
env's/Policy's action space? If None, use
self.config["clip_actions"].
Keyword Args:
kwargs: forward compatibility placeholder
Returns:
The computed action if full_fetch=False, or a tuple consisting of
the full output of policy.compute_actions_from_input_dict() if
full_fetch=True or we have an RNN-based Policy.
"""
if normalize_actions is not None:
deprecation_warning(
old="Trainer.compute_actions(`normalize_actions`=...)",
new="Trainer.compute_actions(`unsquash_actions`=...)",
error=True,
)
unsquash_actions = normalize_actions
# `unsquash_actions` is None: Use value of config['normalize_actions'].
if unsquash_actions is None:
unsquash_actions = self.config["normalize_actions"]
# `clip_actions` is None: Use value of config['clip_actions'].
elif clip_actions is None:
clip_actions = self.config["clip_actions"]
# Preprocess obs and states.
state_defined = state is not None
policy = self.get_policy(policy_id)
filtered_obs, filtered_state = [], []
for agent_id, ob in observations.items():
worker = self.workers.local_worker()
preprocessed = worker.preprocessors[policy_id].transform(ob)
filtered = worker.filters[policy_id](preprocessed, update=False)
filtered_obs.append(filtered)
if state is None:
continue
elif agent_id in state:
filtered_state.append(state[agent_id])
else:
filtered_state.append(policy.get_initial_state())
# Batch obs and states
obs_batch = np.stack(filtered_obs)
if state is None:
state = []
else:
state = list(zip(*filtered_state))
state = [np.stack(s) for s in state]
input_dict = {SampleBatch.OBS: obs_batch}
# prev_action and prev_reward can be None, np.ndarray, or tensor-like structure.
# Explicitly check for None here to avoid the error message "The truth value of
# an array with more than one element is ambiguous.", when np arrays are passed
# as arguments.
if prev_action is not None:
input_dict[SampleBatch.PREV_ACTIONS] = prev_action
if prev_reward is not None:
input_dict[SampleBatch.PREV_REWARDS] = prev_reward
if info:
input_dict[SampleBatch.INFOS] = info
for i, s in enumerate(state):
input_dict[f"state_in_{i}"] = s
# Batch compute actions
actions, states, infos = policy.compute_actions_from_input_dict(
input_dict=input_dict,
explore=explore,
timestep=timestep,
episodes=episodes,
)
# Unbatch actions for the environment into a multi-agent dict.
single_actions = space_utils.unbatch(actions)
actions = {}
for key, a in zip(observations, single_actions):
# If we work in normalized action space (normalize_actions=True),
# we re-translate here into the env's action space.
if unsquash_actions:
a = space_utils.unsquash_action(a, policy.action_space_struct)
# Clip, according to env's action space.
elif clip_actions:
a = space_utils.clip_action(a, policy.action_space_struct)
actions[key] = a
# Unbatch states into a multi-agent dict.
unbatched_states = {}
for idx, agent_id in enumerate(observations):
unbatched_states[agent_id] = [s[idx] for s in states]
# Return only actions or full tuple
if state_defined or full_fetch:
return actions, unbatched_states, infos
else:
return actions
@PublicAPI
def get_policy(self, policy_id: PolicyID = DEFAULT_POLICY_ID) -> Policy:
"""Return policy for the specified id, or None.
Args:
policy_id: ID of the policy to return.
"""
return self.workers.local_worker().get_policy(policy_id)
@PublicAPI
def get_weights(self, policies: Optional[List[PolicyID]] = None) -> dict:
"""Return a dictionary of policy ids to weights.
Args:
policies: Optional list of policies to return weights for,
or None for all policies.
"""
return self.workers.local_worker().get_weights(policies)
@PublicAPI
def set_weights(self, weights: Dict[PolicyID, dict]):
"""Set policy weights by policy id.
Args:
weights: Map of policy ids to weights to set.
"""
self.workers.local_worker().set_weights(weights)
@PublicAPI
def add_policy(
self,
policy_id: PolicyID,
policy_cls: Optional[Type[Policy]] = None,
policy: Optional[Policy] = None,
*,
observation_space: Optional[gym.spaces.Space] = None,
action_space: Optional[gym.spaces.Space] = None,
config: Optional[PartialAlgorithmConfigDict] = None,
policy_state: Optional[PolicyState] = None,
policy_mapping_fn: Optional[Callable[[AgentID, EpisodeID], PolicyID]] = None,
policies_to_train: Optional[
Union[
Container[PolicyID],
Callable[[PolicyID, Optional[SampleBatchType]], bool],
]
] = None,
evaluation_workers: bool = True,
workers: Optional[List[Union[RolloutWorker, ActorHandle]]] = None,
) -> Optional[Policy]:
"""Adds a new policy to this Algorithm.
Args:
policy_id: ID of the policy to add.
IMPORTANT: Must not contain characters that
are also not allowed in Unix/Win filesystems, such as: `<>:"/\|?*`
or a dot `.` or space ` ` at the end of the ID.
policy_cls: The Policy class to use for constructing the new Policy.
Note: Only one of `policy_cls` or `policy` must be provided.
policy: The Policy instance to add to this algorithm. If not None, the
given Policy object will be directly inserted into the Algorithm's
local worker and clones of that Policy will be created on all remote
workers as well as all evaluation workers.
Note: Only one of `policy_cls` or `policy` must be provided.
observation_space: The observation space of the policy to add.
If None, try to infer this space from the environment.
action_space: The action space of the policy to add.
If None, try to infer this space from the environment.
config: The config overrides for the policy to add.
policy_state: Optional state dict to apply to the new
policy instance, right after its construction.
policy_mapping_fn: An optional (updated) policy mapping function
to use from here on. Note that already ongoing episodes will
not change their mapping but will use the old mapping till
the end of the episode.
policies_to_train: An optional list of policy IDs to be trained
or a callable taking PolicyID and SampleBatchType and
returning a bool (trainable or not?).
If None, will keep the existing setup in place. Policies,
whose IDs are not in the list (or for which the callable
returns False) will not be updated.
evaluation_workers: Whether to add the new policy also
to the evaluation WorkerSet.
workers: A list of RolloutWorker/ActorHandles (remote
RolloutWorkers) to add this policy to. If defined, will only
add the given policy to these workers.
Returns:
The newly added policy (the copy that got added to the local
worker). If `workers` was provided, None is returned.
"""
validate_policy_id(policy_id, error=True)
# Worker list is explicitly provided -> Use only those workers (local or remote)
# specified.
if workers is not None:
# Call static utility method.
WorkerSet.add_policy_to_workers(
workers,
policy_id,
policy_cls,
policy,
observation_space=observation_space,
action_space=action_space,
config=config,
policy_state=policy_state,
policy_mapping_fn=policy_mapping_fn,
policies_to_train=policies_to_train,
)
# Add to all our regular RolloutWorkers and maybe also all evaluation workers.
else:
self.workers.add_policy(
policy_id,
policy_cls,
policy,
observation_space=observation_space,
action_space=action_space,
config=config,
policy_state=policy_state,
policy_mapping_fn=policy_mapping_fn,
policies_to_train=policies_to_train,
)
# Add to evaluation workers, if necessary.
if evaluation_workers is True and self.evaluation_workers is not None:
self.evaluation_workers.add_policy(
policy_id,
policy_cls,
policy,
observation_space=observation_space,
action_space=action_space,
config=config,
policy_state=policy_state,
policy_mapping_fn=policy_mapping_fn,
policies_to_train=policies_to_train,
)
# Return newly added policy (from the local rollout worker).
return self.get_policy(policy_id)
@PublicAPI
def remove_policy(
self,
policy_id: PolicyID = DEFAULT_POLICY_ID,
*,
policy_mapping_fn: Optional[Callable[[AgentID], PolicyID]] = None,
policies_to_train: Optional[
Union[
Container[PolicyID],
Callable[[PolicyID, Optional[SampleBatchType]], bool],
]
] = None,
evaluation_workers: bool = True,
) -> None:
"""Removes a new policy from this Algorithm.
Args:
policy_id: ID of the policy to be removed.
policy_mapping_fn: An optional (updated) policy mapping function
to use from here on. Note that already ongoing episodes will
not change their mapping but will use the old mapping till
the end of the episode.
policies_to_train: An optional list of policy IDs to be trained
or a callable taking PolicyID and SampleBatchType and
returning a bool (trainable or not?).
If None, will keep the existing setup in place. Policies,
whose IDs are not in the list (or for which the callable
returns False) will not be updated.
evaluation_workers: Whether to also remove the policy from the
evaluation WorkerSet.
"""
def fn(worker):
worker.remove_policy(
policy_id=policy_id,
policy_mapping_fn=policy_mapping_fn,
policies_to_train=policies_to_train,
)
self.workers.foreach_worker(fn)
if evaluation_workers and self.evaluation_workers is not None:
self.evaluation_workers.foreach_worker(fn)
@DeveloperAPI
def export_policy_model(
self,
export_dir: str,
policy_id: PolicyID = DEFAULT_POLICY_ID,
onnx: Optional[int] = None,
) -> None:
"""Exports policy model with given policy_id to a local directory.
Args:
export_dir: Writable local directory.
policy_id: Optional policy id to export.
onnx: If given, will export model in ONNX format. The
value of this parameter set the ONNX OpSet version to use.
If None, the output format will be DL framework specific.
Example:
>>> from ray.rllib.algorithms.ppo import PPO
>>> # Use an Algorithm from RLlib or define your own.
>>> algo = PPO(...) # doctest: +SKIP
>>> for _ in range(10): # doctest: +SKIP
>>> algo.train() # doctest: +SKIP
>>> algo.export_policy_model("/tmp/dir") # doctest: +SKIP
>>> algo.export_policy_model("/tmp/dir/onnx", onnx=1) # doctest: +SKIP
"""
self.get_policy(policy_id).export_model(export_dir, onnx)
@DeveloperAPI
def export_policy_checkpoint(
self,
export_dir: str,
filename_prefix=DEPRECATED_VALUE, # deprecated arg, do not use anymore
policy_id: PolicyID = DEFAULT_POLICY_ID,
) -> None:
"""Exports Policy checkpoint to a local directory and returns an AIR Checkpoint.
Args:
export_dir: Writable local directory to store the AIR Checkpoint
information into.
policy_id: Optional policy ID to export. If not provided, will export
"default_policy". If `policy_id` does not exist in this Algorithm,
will raise a KeyError.
Raises:
KeyError if `policy_id` cannot be found in this Algorithm.
Example:
>>> from ray.rllib.algorithms.ppo import PPO
>>> # Use an Algorithm from RLlib or define your own.
>>> algo = PPO(...) # doctest: +SKIP
>>> for _ in range(10): # doctest: +SKIP
>>> algo.train() # doctest: +SKIP
>>> algo.export_policy_checkpoint("/tmp/export_dir") # doctest: +SKIP
"""
# `filename_prefix` should not longer be used as new Policy checkpoints
# contain more than one file with a fixed filename structure.
if filename_prefix != DEPRECATED_VALUE:
deprecation_warning(
old="Algorithm.export_policy_checkpoint(filename_prefix=...)",
error=True,
)
policy = self.get_policy(policy_id)
if policy is None:
raise KeyError(f"Policy with ID {policy_id} not found in Algorithm!")
policy.export_checkpoint(export_dir)
@DeveloperAPI
def import_policy_model_from_h5(
self,
import_file: str,
policy_id: PolicyID = DEFAULT_POLICY_ID,
) -> None:
"""Imports a policy's model with given policy_id from a local h5 file.
Args:
import_file: The h5 file to import from.
policy_id: Optional policy id to import into.
Example:
>>> from ray.rllib.algorithms.ppo import PPO
>>> algo = PPO(...) # doctest: +SKIP
>>> algo.import_policy_model_from_h5("/tmp/weights.h5") # doctest: +SKIP
>>> for _ in range(10): # doctest: +SKIP
>>> algo.train() # doctest: +SKIP
"""
self.get_policy(policy_id).import_model_from_h5(import_file)
# Sync new weights to remote workers.
self._sync_weights_to_workers(worker_set=self.workers)
@override(Trainable)
def save_checkpoint(self, checkpoint_dir: str) -> str:
"""Exports AIR Checkpoint to a local directory and returns its directory path.
The structure of an Algorithm checkpoint dir will be as follows::
policies/
pol_1/
policy_state.pkl
pol_2/
policy_state.pkl
rllib_checkpoint.json
algorithm_state.pkl
Note: `rllib_checkpoint.json` contains a "version" key (e.g. with value 0.1)
helping RLlib to remain backward compatible wrt. restoring from checkpoints from
Ray 2.0 onwards.
Args:
checkpoint_dir: The directory where the checkpoint files will be stored.
Returns:
The path to the created AIR Checkpoint directory.
"""
state = self.__getstate__()
# Extract policy states from worker state (Policies get their own
# checkpoint sub-dirs).
policy_states = {}
if "worker" in state and "policy_states" in state["worker"]:
policy_states = state["worker"].pop("policy_states", {})
# Add RLlib checkpoint version.
state["checkpoint_version"] = CHECKPOINT_VERSION
# Write state (w/o policies) to disk.
state_file = os.path.join(checkpoint_dir, "algorithm_state.pkl")
with open(state_file, "wb") as f:
pickle.dump(state, f)
# Write rllib_checkpoint.json.
with open(os.path.join(checkpoint_dir, "rllib_checkpoint.json"), "w") as f:
json.dump(
{
"type": "Algorithm",
"checkpoint_version": str(state["checkpoint_version"]),
"ray_version": ray.__version__,
"ray_commit": ray.__commit__,
},
f,
)
# Write individual policies to disk, each in their own sub-directory.
for pid, policy_state in policy_states.items():
# From here on, disallow policyIDs that would not work as directory names.
validate_policy_id(pid, error=True)
policy_dir = os.path.join(checkpoint_dir, "policies", pid)
os.makedirs(policy_dir, exist_ok=True)
policy = self.get_policy(pid)
policy.export_checkpoint(policy_dir, policy_state=policy_state)
return checkpoint_dir
@override(Trainable)
def load_checkpoint(self, checkpoint: Union[Dict, str]) -> None:
# Checkpoint is provided as a directory name.
# Restore from the checkpoint file or dir.
if isinstance(checkpoint, str):
checkpoint_info = get_checkpoint_info(checkpoint)
checkpoint_data = Algorithm._checkpoint_info_to_algorithm_state(
checkpoint_info
)
# Checkpoint is a checkpoint-as-dict -> Restore state from it as-is.
else:
checkpoint_data = checkpoint
self.__setstate__(checkpoint_data)
@override(Trainable)
def log_result(self, result: ResultDict) -> None:
# Log after the callback is invoked, so that the user has a chance
# to mutate the result.
# TODO: Remove `trainer` arg at some point to fully deprecate the old signature.
self.callbacks.on_train_result(algorithm=self, result=result)
# Then log according to Trainable's logging logic.
Trainable.log_result(self, result)
@override(Trainable)
def cleanup(self) -> None:
# Stop all workers.
if hasattr(self, "workers") and self.workers is not None:
self.workers.stop()
if hasattr(self, "evaluation_workers") and self.evaluation_workers is not None:
self.evaluation_workers.stop()
@OverrideToImplementCustomLogic
@classmethod
@override(Trainable)
def default_resource_request(
cls, config: PartialAlgorithmConfigDict
) -> Union[Resources, PlacementGroupFactory]:
# Default logic for RLlib Algorithms:
# Create one bundle per individual worker (local or remote).
# Use `num_cpus_for_driver` and `num_gpus` for the local worker and
# `num_cpus_per_worker` and `num_gpus_per_worker` for the remote
# workers to determine their CPU/GPU resource needs.
# Convenience config handles.
cf = dict(cls.get_default_config(), **config)
eval_cf = cf["evaluation_config"]
local_worker = {
"CPU": cf["num_cpus_for_driver"],
"GPU": 0 if cf["_fake_gpus"] else cf["num_gpus"],
}
rollout_workers = [
{
"CPU": cf["num_cpus_per_worker"],
"GPU": cf["num_gpus_per_worker"],
**cf["custom_resources_per_worker"],
}
for _ in range(cf["num_workers"])
]
bundles = [local_worker] + rollout_workers
if cf["evaluation_interval"]:
# Evaluation workers.
# Note: The local eval worker is located on the driver CPU.
bundles += [
{
"CPU": eval_cf.get(
"num_cpus_per_worker", cf["num_cpus_per_worker"]
),
"GPU": eval_cf.get(
"num_gpus_per_worker", cf["num_gpus_per_worker"]
),
**eval_cf.get(
"custom_resources_per_worker", cf["custom_resources_per_worker"]
),
}
for _ in range(cf["evaluation_num_workers"])
]
# In case our I/O reader/writer requires conmpute resources.
bundles += get_offline_io_resource_bundles(cf)
# Return PlacementGroupFactory containing all needed resources
# (already properly defined as device bundles).
return PlacementGroupFactory(
bundles=bundles,
strategy=config.get("placement_strategy", "PACK"),
)
@DeveloperAPI
def _before_evaluate(self):
"""Pre-evaluation callback."""
pass
@staticmethod
def _get_env_id_and_creator(
env_specifier: Union[str, EnvType, None], config: PartialAlgorithmConfigDict
) -> Tuple[Optional[str], EnvCreator]:
"""Returns env_id and creator callable given original env id from config.
Args:
env_specifier: An env class, an already tune registered env ID, a known
gym env name, or None (if no env is used).
config: The Algorithm's (maybe partial) config dict.
Returns:
Tuple consisting of a) env ID string and b) env creator callable.
"""
# Environment is specified via a string.
if isinstance(env_specifier, str):
# An already registered env.
if _global_registry.contains(ENV_CREATOR, env_specifier):
return env_specifier, _global_registry.get(ENV_CREATOR, env_specifier)
# A class path specifier.
elif "." in env_specifier:
def env_creator_from_classpath(env_context):
try:
env_obj = from_config(env_specifier, env_context)
except ValueError:
raise EnvError(
ERR_MSG_INVALID_ENV_DESCRIPTOR.format(env_specifier)
)
return env_obj
return env_specifier, env_creator_from_classpath
# Try gym/PyBullet/Vizdoom.
else:
return env_specifier, functools.partial(
_gym_env_creator, env_descriptor=env_specifier
)
elif isinstance(env_specifier, type):
env_id = env_specifier # .__name__
if config.get("remote_worker_envs"):
# Check gym version (0.22 or higher?).
# If > 0.21, can't perform auto-wrapping of the given class as this
# would lead to a pickle error.
gym_version = pkg_resources.get_distribution("gym").version
if version.parse(gym_version) >= version.parse("0.22"):
raise ValueError(
"Cannot specify a gym.Env class via `config.env` while setting "
"`config.remote_worker_env=True` AND your gym version is >= "
"0.22! Try installing an older version of gym or set `config."
"remote_worker_env=False`."
)
@ray.remote(num_cpus=1)
class _wrapper(env_specifier):
# Add convenience `_get_spaces` and `_is_multi_agent`
# methods:
def _get_spaces(self):
return self.observation_space, self.action_space
def _is_multi_agent(self):
from ray.rllib.env.multi_agent_env import MultiAgentEnv
return isinstance(self, MultiAgentEnv)
return env_id, lambda cfg: _wrapper.remote(cfg)
else:
return env_id, lambda cfg: env_specifier(cfg)
# No env -> Env creator always returns None.
elif env_specifier is None:
return None, lambda env_config: None
else:
raise ValueError(
"{} is an invalid env specifier. ".format(env_specifier)
+ "You can specify a custom env as either a class "
'(e.g., YourEnvCls) or a registered env id (e.g., "your_env").'
)
def _sync_filters_if_needed(
self,
from_worker: RolloutWorker,
workers: WorkerSet,
timeout_seconds: Optional[float] = None,
):
if (
from_worker
and self.config.get("observation_filter", "NoFilter") != "NoFilter"
):
FilterManager.synchronize(
from_worker.filters,
workers.remote_workers(),
update_remote=self.config["synchronize_filters"],
timeout_seconds=timeout_seconds,
)
logger.debug("synchronized filters: {}".format(from_worker.filters))
@DeveloperAPI
def _sync_weights_to_workers(
self,
*,
worker_set: Optional[WorkerSet] = None,
workers: Optional[List[RolloutWorker]] = None,
) -> None:
"""Sync "main" weights to given WorkerSet or list of workers."""
assert worker_set is not None
# Broadcast the new policy weights to all evaluation workers.
logger.info("Synchronizing weights to workers.")
weights = ray.put(self.workers.local_worker().get_state())
worker_set.foreach_worker(lambda w: w.set_state(ray.get(weights)))
@classmethod
@override(Trainable)
def resource_help(cls, config: AlgorithmConfigDict) -> str:
return (
"\n\nYou can adjust the resource requests of RLlib agents by "
"setting `num_workers`, `num_gpus`, and other configs. See "
"the DEFAULT_CONFIG defined by each agent for more info.\n\n"
"The config of this agent is: {}".format(config)
)
@classmethod
def merge_trainer_configs(
cls,
config1: AlgorithmConfigDict,
config2: PartialAlgorithmConfigDict,
_allow_unknown_configs: Optional[bool] = None,
) -> AlgorithmConfigDict:
"""Merges a complete Algorithm config dict with a partial override dict.
Respects nested structures within the config dicts. The values in the
partial override dict take priority.
Args:
config1: The complete Algorithm's dict to be merged (overridden)
with `config2`.
config2: The partial override config dict to merge on top of
`config1`.
_allow_unknown_configs: If True, keys in `config2` that don't exist
in `config1` are allowed and will be added to the final config.
Returns:
The merged full algorithm config dict.
"""
config1 = copy.deepcopy(config1)
if "callbacks" in config2 and type(config2["callbacks"]) is dict:
deprecation_warning(
"callbacks dict interface",
"a class extending rllib.algorithms.callbacks.DefaultCallbacks; "
"see `rllib/examples/custom_metrics_and_callbacks.py` for an example.",
error=True,
)
if _allow_unknown_configs is None:
_allow_unknown_configs = cls._allow_unknown_configs
return deep_update(
config1,
config2,
_allow_unknown_configs,
cls._allow_unknown_subkeys,
cls._override_all_subkeys_if_type_changes,
cls._override_all_key_list,
)
@staticmethod
def validate_framework(config: PartialAlgorithmConfigDict) -> None:
"""Validates the config dictionary wrt the framework settings.
Args:
config: The config dictionary to be validated.
"""
_tf1, _tf, _tfv = None, None, None
_torch = None
framework = config["framework"]
tf_valid_frameworks = {"tf", "tf2", "tfe"}
if framework not in tf_valid_frameworks and framework != "torch":
return
elif framework in tf_valid_frameworks:
_tf1, _tf, _tfv = try_import_tf()
else:
_torch, _ = try_import_torch()
def check_if_correct_nn_framework_installed():
"""Check if tf/torch experiment is running and tf/torch installed."""
if framework in tf_valid_frameworks:
if not (_tf1 or _tf):
raise ImportError(
(
"TensorFlow was specified as the 'framework' "
"inside of your config dictionary. However, there was "
"no installation found. You can install TensorFlow "
"via `pip install tensorflow`"
)
)
elif framework == "torch":
if not _torch:
raise ImportError(
(
"PyTorch was specified as the 'framework' inside "
"of your config dictionary. However, there was no "
"installation found. You can install PyTorch via "
"`pip install torch`"
)
)
def resolve_tf_settings():
"""Check and resolve tf settings."""
if _tf1 and config["framework"] in ["tf2", "tfe"]:
if config["framework"] == "tf2" and _tfv < 2:
raise ValueError(
"You configured `framework`=tf2, but your installed "
"pip tf-version is < 2.0! Make sure your TensorFlow "
"version is >= 2.x."
)
if not _tf1.executing_eagerly():
_tf1.enable_eager_execution()
# Recommend setting tracing to True for speedups.
logger.info(
f"Executing eagerly (framework='{config['framework']}'),"
f" with eager_tracing={config['eager_tracing']}. For "
"production workloads, make sure to set eager_tracing=True"
" in order to match the speed of tf-static-graph "
"(framework='tf'). For debugging purposes, "
"`eager_tracing=False` is the best choice."
)
# Tf-static-graph (framework=tf): Recommend upgrading to tf2 and
# enabling eager tracing for similar speed.
elif _tf1 and config["framework"] == "tf":
logger.info(
"Your framework setting is 'tf', meaning you are using "
"static-graph mode. Set framework='tf2' to enable eager "
"execution with tf2.x. You may also then want to set "
"eager_tracing=True in order to reach similar execution "
"speed as with static-graph mode."
)
check_if_correct_nn_framework_installed()
resolve_tf_settings()
@OverrideToImplementCustomLogic_CallToSuperRecommended
@DeveloperAPI
def validate_config(self, config: AlgorithmConfigDict) -> None:
"""Validates a given config dict for this Algorithm.
Users should override this method to implement custom validation
behavior. It is recommended to call `super().validate_config()` in
this override.
Args:
config: The given config dict to check.
Raises:
ValueError: If there is something wrong with the config.
"""
model_config = config.get("model")
if model_config is None:
config["model"] = model_config = {}
# Use DefaultCallbacks class, if callbacks is None.
if config["callbacks"] is None:
config["callbacks"] = DefaultCallbacks
# Check, whether given `callbacks` is a callable.
if not callable(config["callbacks"]):
raise ValueError(
"`callbacks` must be a callable method that "
"returns a subclass of DefaultCallbacks, got "
f"{config['callbacks']}!"
)
# Multi-GPU settings.
simple_optim_setting = config.get("simple_optimizer", DEPRECATED_VALUE)
if simple_optim_setting != DEPRECATED_VALUE:
deprecation_warning(old="simple_optimizer", error=False)
# Validate "multiagent" sub-dict and convert policy 4-tuples to
# PolicySpec objects.
policies, is_multi_agent = check_multi_agent(config)
framework = config.get("framework")
# Multi-GPU setting: Must use MultiGPUTrainOneStep.
if config.get("num_gpus", 0) > 1:
if framework in ["tfe", "tf2"]:
raise ValueError(
"`num_gpus` > 1 not supported yet for "
"framework={}!".format(framework)
)
elif simple_optim_setting is True:
raise ValueError(
"Cannot use `simple_optimizer` if `num_gpus` > 1! "
"Consider not setting `simple_optimizer` in your config."
)
config["simple_optimizer"] = False
# Auto-setting: Use simple-optimizer for tf-eager or multiagent,
# otherwise: MultiGPUTrainOneStep (if supported by the algo's execution
# plan).
elif simple_optim_setting == DEPRECATED_VALUE:
# tf-eager: Must use simple optimizer.
if framework not in ["tf", "torch"]:
config["simple_optimizer"] = True
# Multi-agent case: Try using MultiGPU optimizer (only
# if all policies used are DynamicTFPolicies or TorchPolicies).
elif is_multi_agent:
from ray.rllib.policy.dynamic_tf_policy import DynamicTFPolicy
from ray.rllib.policy.torch_policy import TorchPolicy
default_policy_cls = self.get_default_policy_class(config)
if any(
(p.policy_class or default_policy_cls) is None
or not issubclass(
p.policy_class or default_policy_cls,
(DynamicTFPolicy, TorchPolicy),
)
for p in config["multiagent"]["policies"].values()
):
config["simple_optimizer"] = True
else:
config["simple_optimizer"] = False
else:
config["simple_optimizer"] = False
# User manually set simple-optimizer to False -> Error if tf-eager.
elif simple_optim_setting is False:
if framework in ["tfe", "tf2"]:
raise ValueError(
"`simple_optimizer=False` not supported for "
"framework={}!".format(framework)
)
# Check model config.
# If no preprocessing, propagate into model's config as well
# (so model will know, whether inputs are preprocessed or not).
if config["_disable_preprocessor_api"] is True:
model_config["_disable_preprocessor_api"] = True
# If no action flattening, propagate into model's config as well
# (so model will know, whether action inputs are already flattened or
# not).
if config["_disable_action_flattening"] is True:
model_config["_disable_action_flattening"] = True
# Prev_a/r settings.
prev_a_r = model_config.get("lstm_use_prev_action_reward", DEPRECATED_VALUE)
if prev_a_r != DEPRECATED_VALUE:
deprecation_warning(
"model.lstm_use_prev_action_reward",
"model.lstm_use_prev_action and model.lstm_use_prev_reward",
error=True,
)
model_config["lstm_use_prev_action"] = prev_a_r
model_config["lstm_use_prev_reward"] = prev_a_r
# Check batching/sample collection settings.
if config["batch_mode"] not in ["truncate_episodes", "complete_episodes"]:
raise ValueError(
"`batch_mode` must be one of [truncate_episodes|"
"complete_episodes]! Got {}".format(config["batch_mode"])
)
# Store multi-agent batch count mode.
self._by_agent_steps = (
self.config["multiagent"].get("count_steps_by") == "agent_steps"
)
# Metrics settings.
if (
config.get("metrics_smoothing_episodes", DEPRECATED_VALUE)
!= DEPRECATED_VALUE
):
deprecation_warning(
old="metrics_smoothing_episodes",
new="metrics_num_episodes_for_smoothing",
error=True,
)
config["metrics_num_episodes_for_smoothing"] = config[
"metrics_smoothing_episodes"
]
if config.get("min_iter_time_s", DEPRECATED_VALUE) != DEPRECATED_VALUE:
deprecation_warning(
old="min_iter_time_s",
new="min_time_s_per_iteration",
error=True,
)
config["min_time_s_per_iteration"] = config["min_iter_time_s"] or 0
if config.get("min_time_s_per_reporting", DEPRECATED_VALUE) != DEPRECATED_VALUE:
deprecation_warning(
old="min_time_s_per_reporting",
new="min_time_s_per_iteration",
error=True,
)
config["min_time_s_per_iteration"] = config["min_time_s_per_reporting"] or 0
if (
config.get("min_sample_timesteps_per_reporting", DEPRECATED_VALUE)
!= DEPRECATED_VALUE
):
deprecation_warning(
old="min_sample_timesteps_per_reporting",
new="min_sample_timesteps_per_iteration",
error=True,
)
config["min_sample_timesteps_per_iteration"] = (
config["min_sample_timesteps_per_reporting"] or 0
)
if (
config.get("min_train_timesteps_per_reporting", DEPRECATED_VALUE)
!= DEPRECATED_VALUE
):
deprecation_warning(
old="min_train_timesteps_per_reporting",
new="min_train_timesteps_per_iteration",
error=True,
)
config["min_train_timesteps_per_iteration"] = (
config["min_train_timesteps_per_reporting"] or 0
)
if config.get("collect_metrics_timeout", DEPRECATED_VALUE) != DEPRECATED_VALUE:
# TODO: Warn once all algos use the `training_iteration` method.
# deprecation_warning(
# old="collect_metrics_timeout",
# new="metrics_episode_collection_timeout_s",
# error=False,
# )
config["metrics_episode_collection_timeout_s"] = config[
"collect_metrics_timeout"
]
if config.get("timesteps_per_iteration", DEPRECATED_VALUE) != DEPRECATED_VALUE:
deprecation_warning(
old="timesteps_per_iteration",
new="`min_sample_timesteps_per_iteration` OR "
"`min_train_timesteps_per_iteration`",
error=True,
)
config["min_sample_timesteps_per_iteration"] = (
config["timesteps_per_iteration"] or 0
)
config["timesteps_per_iteration"] = DEPRECATED_VALUE
# Evaluation settings.
# Deprecated setting: `evaluation_num_episodes`.
if config.get("evaluation_num_episodes", DEPRECATED_VALUE) != DEPRECATED_VALUE:
deprecation_warning(
old="evaluation_num_episodes",
new="`evaluation_duration` and `evaluation_duration_unit=episodes`",
error=True,
)
config["evaluation_duration"] = config["evaluation_num_episodes"]
config["evaluation_duration_unit"] = "episodes"
config["evaluation_num_episodes"] = DEPRECATED_VALUE
# If `evaluation_num_workers` > 0, warn if `evaluation_interval` is
# None (also set `evaluation_interval` to 1).
if config["evaluation_num_workers"] > 0 and not config["evaluation_interval"]:
logger.warning(
f"You have specified {config['evaluation_num_workers']} "
"evaluation workers, but your `evaluation_interval` is None! "
"Therefore, evaluation will not occur automatically with each"
" call to `Algorithm.train()`. Instead, you will have to call "
"`Algorithm.evaluate()` manually in order to trigger an "
"evaluation run."
)
# If `evaluation_num_workers=0` and
# `evaluation_parallel_to_training=True`, warn that you need
# at least one remote eval worker for parallel training and
# evaluation, and set `evaluation_parallel_to_training` to False.
elif config["evaluation_num_workers"] == 0 and config.get(
"evaluation_parallel_to_training", False
):
logger.warning(
"`evaluation_parallel_to_training` can only be done if "
"`evaluation_num_workers` > 0! Setting "
"`evaluation_parallel_to_training` to False."
)
config["evaluation_parallel_to_training"] = False
# If `evaluation_duration=auto`, error if
# `evaluation_parallel_to_training=False`.
if config["evaluation_duration"] == "auto":
if not config["evaluation_parallel_to_training"]:
raise ValueError(
"`evaluation_duration=auto` not supported for "
"`evaluation_parallel_to_training=False`!"
)
# Make sure, it's an int otherwise.
elif (
not isinstance(config["evaluation_duration"], int)
or config["evaluation_duration"] <= 0
):
raise ValueError(
"`evaluation_duration` ({}) must be an int and "
">0!".format(config["evaluation_duration"])
)
@staticmethod
@ExperimentalAPI
def validate_env(env: EnvType, env_context: EnvContext) -> None:
"""Env validator function for this Algorithm class.
Override this in child classes to define custom validation
behavior.
Args:
env: The (sub-)environment to validate. This is normally a
single sub-environment (e.g. a gym.Env) within a vectorized
setup.
env_context: The EnvContext to configure the environment.
Raises:
Exception in case something is wrong with the given environment.
"""
pass
def try_recover_from_step_attempt(self, error, worker_set, ignore, recreate) -> int:
"""Try to identify and remove any unhealthy workers (incl. eval workers).
This method is called after an unexpected remote error is encountered
from a worker during the call to `self.step()`. It issues check requests to
all current workers and removes any that respond with error. If no healthy
workers remain, an error is raised.
Returns:
The number of remote workers recreated.
"""
# @ray.remote RolloutWorker failure.
if isinstance(error, RayError):
# Try to recover w/o the failed worker.
if ignore or recreate:
logger.exception(
"Error in training or evaluation attempt! Trying to recover."
)
# Error out.
else:
logger.warning(
"Worker crashed during training or evaluation! "
"To try to continue without failed "
"worker(s), set `ignore_worker_failures=True`. "
"To try to recover the failed worker(s), set "
"`recreate_failed_workers=True`."
)
raise error
# Any other exception.
else:
# Allow logs messages to propagate.
time.sleep(0.5)
raise error
removed_workers, new_workers = [], []
# Search for failed workers and try to recover (restart) them.
if recreate:
removed_workers, new_workers = worker_set.recreate_failed_workers(
local_worker_for_synching=self.workers.local_worker()
)
elif ignore:
removed_workers = worker_set.remove_failed_workers()
# If `worker_set` is the main training WorkerSet: `self.workers`.
if worker_set is getattr(self, "workers", None):
# Call the `on_worker_failures` callback.
self.on_worker_failures(removed_workers, new_workers)
# Recreate execution_plan iterator.
if not self.config.get("_disable_execution_plan_api") and callable(
self.execution_plan
):
logger.warning("Recreating execution plan after failure")
self.train_exec_impl = self.execution_plan(
worker_set, self.config, **self._kwargs_for_execution_plan()
)
elif self._evaluation_async_req_manager is not None and worker_set is getattr(
self, "evaluation_workers", None
):
self._evaluation_async_req_manager.remove_workers(removed_workers)
self._evaluation_async_req_manager.add_workers(new_workers)
return len(new_workers)
def on_worker_failures(
self, removed_workers: List[ActorHandle], new_workers: List[ActorHandle]
):
"""Called after a worker failure is detected.
Args:
removed_workers: List of removed workers.
new_workers: List of new workers.
"""
pass
@override(Trainable)
def _export_model(
self, export_formats: List[str], export_dir: str
) -> Dict[str, str]:
ExportFormat.validate(export_formats)
exported = {}
if ExportFormat.CHECKPOINT in export_formats:
path = os.path.join(export_dir, ExportFormat.CHECKPOINT)
self.export_policy_checkpoint(path)
exported[ExportFormat.CHECKPOINT] = path
if ExportFormat.MODEL in export_formats:
path = os.path.join(export_dir, ExportFormat.MODEL)
self.export_policy_model(path)
exported[ExportFormat.MODEL] = path
if ExportFormat.ONNX in export_formats:
path = os.path.join(export_dir, ExportFormat.ONNX)
self.export_policy_model(path, onnx=int(os.getenv("ONNX_OPSET", "11")))
exported[ExportFormat.ONNX] = path
return exported
def import_model(self, import_file: str):
"""Imports a model from import_file.
Note: Currently, only h5 files are supported.
Args:
import_file: The file to import the model from.
Returns:
A dict that maps ExportFormats to successfully exported models.
"""
# Check for existence.
if not os.path.exists(import_file):
raise FileNotFoundError(
"`import_file` '{}' does not exist! Can't import Model.".format(
import_file
)
)
# Get the format of the given file.
import_format = "h5" # TODO(sven): Support checkpoint loading.
ExportFormat.validate([import_format])
if import_format != ExportFormat.H5:
raise NotImplementedError
else:
return self.import_policy_model_from_h5(import_file)
@PublicAPI
def __getstate__(self) -> Dict:
"""Returns current state of Algorithm, sufficient to restore it from scratch.
Returns:
The current state dict of this Algorithm, which can be used to sufficiently
restore the algorithm from scratch without any other information.
"""
# Add config to state so complete Algorithm can be reproduced w/o it.
state = {
"algorithm_class": type(self),
"config": self.config,
}
if hasattr(self, "workers"):
state["worker"] = self.workers.local_worker().get_state()
# TODO: Experimental functionality: Store contents of replay buffer
# to checkpoint, only if user has configured this.
if self.local_replay_buffer is not None and self.config.get(
"store_buffer_in_checkpoints"
):
state["local_replay_buffer"] = self.local_replay_buffer.get_state()
if self.train_exec_impl is not None:
state["train_exec_impl"] = self.train_exec_impl.shared_metrics.get().save()
else:
state["counters"] = self._counters
return state
@PublicAPI
def __setstate__(self, state) -> None:
"""Sets the algorithm to the provided state.
Args:
state: The state dict to restore this Algorithm instance to. `state` may
have been returned by a call to an Algorithm's `__getstate__()` method.
"""
# TODO (sven): Validate that our config and the config in state are compatible.
# For example, the model architectures may differ.
# Also, what should the behavior be if e.g. some training parameter
# (e.g. lr) changed?
if hasattr(self, "workers") and "worker" in state:
self.workers.local_worker().set_state(state["worker"])
remote_state = ray.put(state["worker"])
for r in self.workers.remote_workers():
r.set_state.remote(remote_state)
if self.evaluation_workers:
# If evaluation workers are used, also restore the policies
# there in case they are used for evaluation purpose.
for r in self.evaluation_workers.remote_workers():
r.set_state.remote(remote_state)
# If necessary, restore replay data as well.
if self.local_replay_buffer is not None:
# TODO: Experimental functionality: Restore contents of replay
# buffer from checkpoint, only if user has configured this.
if self.config.get("store_buffer_in_checkpoints"):
if "local_replay_buffer" in state:
self.local_replay_buffer.set_state(state["local_replay_buffer"])
else:
logger.warning(
"`store_buffer_in_checkpoints` is True, but no replay "
"data found in state!"
)
elif "local_replay_buffer" in state and log_once(
"no_store_buffer_in_checkpoints_but_data_found"
):
logger.warning(
"`store_buffer_in_checkpoints` is False, but some replay "
"data found in state!"
)
if self.train_exec_impl is not None:
self.train_exec_impl.shared_metrics.get().restore(state["train_exec_impl"])
elif "counters" in state:
self._counters = state["counters"]
@staticmethod
def _checkpoint_info_to_algorithm_state(
checkpoint_info: dict,
policy_ids: Optional[Container[PolicyID]] = None,
policy_mapping_fn: Optional[Callable[[AgentID, EpisodeID], PolicyID]] = None,
policies_to_train: Optional[
Union[
Container[PolicyID],
Callable[[PolicyID, Optional[SampleBatchType]], bool],
]
] = None,
) -> Dict:
"""Converts a checkpoint info or object to a proper Algorithm state dict.
The returned state dict can be used inside self.__setstate__().
Args:
checkpoint_info: A checkpoint info dict as returned by
`ray.rllib.utils.checkpoints.get_checkpoint_info(
[checkpoint dir or AIR Checkpoint])`.
policy_ids: Optional list/set of PolicyIDs. If not None, only those policies
listed here will be included in the returned state. Note that
state items such as filters, the `is_policy_to_train` function, as
well as the multi-agent `policy_ids` dict will be adjusted as well,
based on this arg.
policy_mapping_fn: An optional (updated) policy mapping function
to include in the returned state.
policies_to_train: An optional list of policy IDs to be trained
or a callable taking PolicyID and SampleBatchType and
returning a bool (trainable or not?) to include in the returned state.
Returns:
The state dict usable within the `self.__setstate__()` method.
"""
if checkpoint_info["type"] != "Algorithm":
raise ValueError(
"`checkpoint` arg passed to "
"`Algorithm._checkpoint_info_to_algorithm_state()` must be an "
f"Algorithm checkpoint (but is {checkpoint_info['type']})!"
)
with open(checkpoint_info["state_file"], "rb") as f:
state = pickle.load(f)
# New checkpoint format: Policies are in separate sub-dirs.
# Note: Algorithms like ES/ARS don't have a WorkerSet, so we just return
# the plain state here.
if (
checkpoint_info["checkpoint_version"] > version.Version("0.1")
and state.get("worker") is not None
):
worker_state = state["worker"]
# Retrieve the set of all required policy IDs.
policy_ids = set(
policy_ids if policy_ids is not None else worker_state["policy_ids"]
)
# Remove those policies entirely from filters that are not in
# `policy_ids`.
worker_state["filters"] = {
pid: filter
for pid, filter in worker_state["filters"].items()
if pid in policy_ids
}
# Remove policies from multiagent dict that are not in `policy_ids`.
policies_dict = state["config"]["multiagent"]["policies"]
policies_dict = {
pid: spec for pid, spec in policies_dict.items() if pid in policy_ids
}
state["config"]["multiagent"]["policies"] = policies_dict
# Prepare local `worker` state to add policies' states into it,
# read from separate policy checkpoint files.
worker_state["policy_states"] = {}
for pid in policy_ids:
policy_state_file = os.path.join(
checkpoint_info["checkpoint_dir"],
"policies",
pid,
"policy_state.pkl",
)
if not os.path.isfile(policy_state_file):
raise ValueError(
"Given checkpoint does not seem to be valid! No policy "
f"state file found for PID={pid}. "
f"The file not found is: {policy_state_file}."
)
with open(policy_state_file, "rb") as f:
worker_state["policy_states"][pid] = pickle.load(f)
if policy_mapping_fn is not None:
worker_state["policy_mapping_fn"] = policy_mapping_fn
if policies_to_train is not None:
worker_state["is_policy_to_train"] = policies_to_train
return state
@DeveloperAPI
def _create_local_replay_buffer_if_necessary(
self, config: PartialAlgorithmConfigDict
) -> Optional[MultiAgentReplayBuffer]:
"""Create a MultiAgentReplayBuffer instance if necessary.
Args:
config: Algorithm-specific configuration data.
Returns:
MultiAgentReplayBuffer instance based on algorithm config.
None, if local replay buffer is not needed.
"""
if not config.get("replay_buffer_config") or config["replay_buffer_config"].get(
"no_local_replay_buffer" or config.get("no_local_replay_buffer")
):
return
buffer_type = config["replay_buffer_config"]["type"]
return from_config(buffer_type, config["replay_buffer_config"])
@DeveloperAPI
def _kwargs_for_execution_plan(self):
kwargs = {}
if self.local_replay_buffer is not None:
kwargs["local_replay_buffer"] = self.local_replay_buffer
return kwargs
def _run_one_training_iteration(self) -> Tuple[ResultDict, "TrainIterCtx"]:
"""Runs one training iteration (self.iteration will be +1 after this).
Calls `self.training_step()` repeatedly until the minimum time (sec),
sample- or training steps have been reached.
Returns:
The results dict from the training iteration.
"""
# In case we are training (in a thread) parallel to evaluation,
# we may have to re-enable eager mode here (gets disabled in the
# thread).
if (
self.config.get("framework") in ["tf2", "tfe"]
and not tf.executing_eagerly()
):
tf1.enable_eager_execution()
results = None
# Create a step context ...
with TrainIterCtx(algo=self) as train_iter_ctx:
# .. so we can query it whether we should stop the iteration loop (e.g.
# when we have reached `min_time_s_per_iteration`).
num_recreated = 0
while not train_iter_ctx.should_stop(results):
# Try to train one step.
try:
# TODO (avnishn): Remove the execution plan API by q1 2023
with self._timers[TRAINING_ITERATION_TIMER]:
if self.config["_disable_execution_plan_api"]:
results = self.training_step()
else:
results = next(self.train_exec_impl)
# In case of any failures, try to ignore/recover the failed workers.
except Exception as e:
num_recreated += self.try_recover_from_step_attempt(
error=e,
worker_set=self.workers,
ignore=self.config["ignore_worker_failures"],
recreate=self.config["recreate_failed_workers"],
)
results["num_recreated_workers"] = num_recreated
return results, train_iter_ctx
def _run_one_evaluation(
self,
train_future: Optional[concurrent.futures.ThreadPoolExecutor] = None,
) -> ResultDict:
"""Runs evaluation step via `self.evaluate()` and handling worker failures.
Args:
train_future: In case, we are training and avaluating in parallel,
this arg carries the currently running ThreadPoolExecutor
object that runs the training iteration
Returns:
The results dict from the evaluation call.
"""
eval_results = {
"evaluation": {
"episode_reward_max": np.nan,
"episode_reward_min": np.nan,
"episode_reward_mean": np.nan,
}
}
eval_func_to_use = (
self._evaluate_async
if self.config["enable_async_evaluation"]
else self.evaluate
)
num_recreated = 0
try:
if self.config["evaluation_duration"] == "auto":
assert (
train_future is not None
and self.config["evaluation_parallel_to_training"]
)
unit = self.config["evaluation_duration_unit"]
eval_results = eval_func_to_use(
duration_fn=functools.partial(
self._automatic_evaluation_duration_fn,
unit,
self.config["evaluation_num_workers"],
self.config["evaluation_config"],
train_future,
)
)
# Run `self.evaluate()` only once per training iteration.
else:
eval_results = eval_func_to_use()
# In case of any failures, try to ignore/recover the failed evaluation workers.
except Exception as e:
num_recreated = self.try_recover_from_step_attempt(
error=e,
worker_set=self.evaluation_workers,
ignore=self.config["evaluation_config"].get("ignore_worker_failures"),
recreate=self.config["evaluation_config"].get(
"recreate_failed_workers"
),
)
# `self._evaluate_async` handles its own worker failures and already adds
# this metric, but `self.evaluate` doesn't.
if "num_recreated_workers" not in eval_results["evaluation"]:
eval_results["evaluation"]["num_recreated_workers"] = num_recreated
# Add number of healthy evaluation workers after this iteration.
eval_results["evaluation"]["num_healthy_workers"] = (
len(self.evaluation_workers.remote_workers())
if self.evaluation_workers is not None
else 0
)
return eval_results
def _run_one_training_iteration_and_evaluation_in_parallel(
self,
) -> Tuple[ResultDict, "TrainIterCtx"]:
"""Runs one training iteration and one evaluation step in parallel.
First starts the training iteration (via `self._run_one_training_iteration()`)
within a ThreadPoolExecutor, then runs the evaluation step in parallel.
In auto-duration mode (config.evaluation_duration=auto), makes sure the
evaluation step takes roughly the same time as the training iteration.
Returns:
The accumulated training and evaluation results.
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
train_future = executor.submit(lambda: self._run_one_training_iteration())
# Pass the train_future into `self._run_one_evaluation()` to allow it
# to run exactly as long as the training iteration takes in case
# evaluation_duration=auto.
results = self._run_one_evaluation(train_future)
# Collect the training results from the future.
train_results, train_iter_ctx = train_future.result()
results.update(train_results)
return results, train_iter_ctx
@staticmethod
def _automatic_evaluation_duration_fn(
unit, num_eval_workers, eval_cfg, train_future, num_units_done
):
# Training is done and we already ran at least one
# evaluation -> Nothing left to run.
if num_units_done > 0 and train_future.done():
return 0
# Count by episodes. -> Run n more
# (n=num eval workers).
elif unit == "episodes":
return num_eval_workers
# Count by timesteps. -> Run n*m*p more
# (n=num eval workers; m=rollout fragment length;
# p=num-envs-per-worker).
else:
return (
num_eval_workers
* eval_cfg["rollout_fragment_length"]
* eval_cfg["num_envs_per_worker"]
)
def _compile_iteration_results(
self, *, episodes_this_iter, step_ctx, iteration_results=None
):
# Return dict.
results: ResultDict = {}
iteration_results = iteration_results or {}
# Evaluation results.
if "evaluation" in iteration_results:
results["evaluation"] = iteration_results.pop("evaluation")
# Custom metrics and episode media.
results["custom_metrics"] = iteration_results.pop("custom_metrics", {})
results["episode_media"] = iteration_results.pop("episode_media", {})
results["num_recreated_workers"] = iteration_results.pop(
"num_recreated_workers", 0
)
# Learner info.
results["info"] = {LEARNER_INFO: iteration_results}
# Calculate how many (if any) of older, historical episodes we have to add to
# `episodes_this_iter` in order to reach the required smoothing window.
episodes_for_metrics = episodes_this_iter[:]
missing = self.config["metrics_num_episodes_for_smoothing"] - len(
episodes_this_iter
)
# We have to add some older episodes to reach the smoothing window size.
if missing > 0:
episodes_for_metrics = self._episode_history[-missing:] + episodes_this_iter
assert (
len(episodes_for_metrics)
<= self.config["metrics_num_episodes_for_smoothing"]
)
# Note that when there are more than `metrics_num_episodes_for_smoothing`
# episodes in `episodes_for_metrics`, leave them as-is. In this case, we'll
# compute the stats over that larger number.
# Add new episodes to our history and make sure it doesn't grow larger than
# needed.
self._episode_history.extend(episodes_this_iter)
self._episode_history = self._episode_history[
-self.config["metrics_num_episodes_for_smoothing"] :
]
results["sampler_results"] = summarize_episodes(
episodes_for_metrics,
episodes_this_iter,
self.config["keep_per_episode_custom_metrics"],
)
# TODO: Don't dump sampler results into top-level.
results.update(results["sampler_results"])
results["num_healthy_workers"] = len(self.workers.remote_workers())
# Train-steps- and env/agent-steps this iteration.
for c in [
NUM_AGENT_STEPS_SAMPLED,
NUM_AGENT_STEPS_TRAINED,
NUM_ENV_STEPS_SAMPLED,
NUM_ENV_STEPS_TRAINED,
]:
results[c] = self._counters[c]
if self._by_agent_steps:
results[NUM_AGENT_STEPS_SAMPLED + "_this_iter"] = step_ctx.sampled
results[NUM_AGENT_STEPS_TRAINED + "_this_iter"] = step_ctx.trained
# TODO: For CQL and other algos, count by trained steps.
results["timesteps_total"] = self._counters[NUM_AGENT_STEPS_SAMPLED]
# TODO: Backward compatibility.
results[STEPS_TRAINED_THIS_ITER_COUNTER] = step_ctx.trained
else:
results[NUM_ENV_STEPS_SAMPLED + "_this_iter"] = step_ctx.sampled
results[NUM_ENV_STEPS_TRAINED + "_this_iter"] = step_ctx.trained
# TODO: For CQL and other algos, count by trained steps.
results["timesteps_total"] = self._counters[NUM_ENV_STEPS_SAMPLED]
# TODO: Backward compatibility.
results[STEPS_TRAINED_THIS_ITER_COUNTER] = step_ctx.trained
# TODO: Backward compatibility.
results["agent_timesteps_total"] = self._counters[NUM_AGENT_STEPS_SAMPLED]
# Process timer results.
timers = {}
for k, timer in self._timers.items():
timers["{}_time_ms".format(k)] = round(timer.mean * 1000, 3)
if timer.has_units_processed():
timers["{}_throughput".format(k)] = round(timer.mean_throughput, 3)
results["timers"] = timers
# Process counter results.
counters = {}
for k, counter in self._counters.items():
counters[k] = counter
results["counters"] = counters
# TODO: Backward compatibility.
results["info"].update(counters)
return results
def __repr__(self):
return type(self).__name__
def _record_usage(self, config):
"""Record the framework and algorithm used.
Args:
config: Algorithm config dict.
"""
record_extra_usage_tag(TagKey.RLLIB_FRAMEWORK, config["framework"])
record_extra_usage_tag(TagKey.RLLIB_NUM_WORKERS, str(config["num_workers"]))
alg = self.__class__.__name__
# We do not want to collect user defined algorithm names.
if alg not in ALL_ALGORITHMS:
alg = "USER_DEFINED"
record_extra_usage_tag(TagKey.RLLIB_ALGORITHM, alg)
@Deprecated(new="Algorithm.compute_single_action()", error=True)
def compute_action(self, *args, **kwargs):
return self.compute_single_action(*args, **kwargs)
@Deprecated(new="construct WorkerSet(...) instance directly", error=False)
def _make_workers(
self,
*,
env_creator: EnvCreator,
validate_env: Optional[Callable[[EnvType, EnvContext], None]],
policy_class: Type[Policy],
config: AlgorithmConfigDict,
num_workers: int,
local_worker: bool = True,
) -> WorkerSet:
return WorkerSet(
env_creator=env_creator,
validate_env=validate_env,
policy_class=policy_class,
trainer_config=config,
num_workers=num_workers,
local_worker=local_worker,
logdir=self.logdir,
)
@staticmethod
@Deprecated(new="Algorithm.validate_config()", error=True)
def _validate_config(config, trainer_or_none):
assert trainer_or_none is not None
return trainer_or_none.validate_config(config)
# TODO: Create a dict that throw a deprecation warning once we have fully moved
# to AlgorithmConfig() objects (some algos still missing).
COMMON_CONFIG: AlgorithmConfigDict = AlgorithmConfig(Algorithm).to_dict()
class TrainIterCtx:
def __init__(self, algo: Algorithm):
self.algo = algo
def __enter__(self):
# Before first call to `step()`, `results` is expected to be None ->
# Start with self.failures=-1 -> set to 0 before the very first call
# to `self.step()`.
self.failures = -1
self.time_start = time.time()
self.sampled = 0
self.trained = 0
self.init_env_steps_sampled = self.algo._counters[NUM_ENV_STEPS_SAMPLED]
self.init_env_steps_trained = self.algo._counters[NUM_ENV_STEPS_TRAINED]
self.init_agent_steps_sampled = self.algo._counters[NUM_AGENT_STEPS_SAMPLED]
self.init_agent_steps_trained = self.algo._counters[NUM_AGENT_STEPS_TRAINED]
self.failure_tolerance = self.algo.config[
"num_consecutive_worker_failures_tolerance"
]
return self
def __exit__(self, *args):
pass
def should_stop(self, results):
# Before first call to `step()`.
if results is None:
# Fail after n retries.
self.failures += 1
if self.failures > self.failure_tolerance:
raise RuntimeError(
"More than `num_consecutive_worker_failures_tolerance="
f"{self.failure_tolerance}` consecutive worker failures! "
"Exiting."
)
# Continue to very first `step()` call or retry `step()` after
# a (tolerable) failure.
return False
# Stopping criteria: Only when using the `training_iteration`
# API, b/c for the `exec_plan` API, the logic to stop is
# already built into the execution plans via the
# `StandardMetricsReporting` op.
elif self.algo.config["_disable_execution_plan_api"]:
if self.algo._by_agent_steps:
self.sampled = (
self.algo._counters[NUM_AGENT_STEPS_SAMPLED]
- self.init_agent_steps_sampled
)
self.trained = (
self.algo._counters[NUM_AGENT_STEPS_TRAINED]
- self.init_agent_steps_trained
)
else:
self.sampled = (
self.algo._counters[NUM_ENV_STEPS_SAMPLED]
- self.init_env_steps_sampled
)
self.trained = (
self.algo._counters[NUM_ENV_STEPS_TRAINED]
- self.init_env_steps_trained
)
min_t = self.algo.config["min_time_s_per_iteration"]
min_sample_ts = self.algo.config["min_sample_timesteps_per_iteration"]
min_train_ts = self.algo.config["min_train_timesteps_per_iteration"]
# Repeat if not enough time has passed or if not enough
# env|train timesteps have been processed (or these min
# values are not provided by the user).
if (
(not min_t or time.time() - self.time_start >= min_t)
and (not min_sample_ts or self.sampled >= min_sample_ts)
and (not min_train_ts or self.trained >= min_train_ts)
):
return True
else:
return False
# No errors (we got results != None) -> Return True
# (meaning: yes, should stop -> no further step attempts).
else:
return True
| [
"[email protected]"
] | |
162eb2ee34fdecebf7be87ac009e79c0a715e25f | 77077a391973d1f8c05647d08fc135facd04fc5e | /xlsxwriter/test/app/test_app02.py | fa347d734560186995daf0fad3e57c79c5129178 | [
"BSD-2-Clause-Views"
] | permissive | DeltaEpsilon7787/XlsxWriter | 28fb1012eaa42ea0f82e063f28c0c548ca016c5e | 550b9c5bd678c861dcc9f6f4072b33a69566e065 | refs/heads/main | 2023-08-02T09:14:10.657395 | 2021-09-06T10:51:56 | 2021-09-06T10:51:56 | 384,948,081 | 0 | 0 | NOASSERTION | 2021-07-11T12:57:26 | 2021-07-11T12:57:25 | null | UTF-8 | Python | false | false | 2,234 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...app import App
class TestAssembleApp(unittest.TestCase):
"""
Test assembling a complete App file.
"""
def test_assemble_xml_file(self):
"""Test writing an App file."""
self.maxDiff = None
fh = StringIO()
app = App()
app._set_filehandle(fh)
app._add_part_name('Sheet1')
app._add_part_name('Sheet2')
app._add_heading_pair(('Worksheets', 2))
app._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes">
<Application>Microsoft Excel</Application>
<DocSecurity>0</DocSecurity>
<ScaleCrop>false</ScaleCrop>
<HeadingPairs>
<vt:vector size="2" baseType="variant">
<vt:variant>
<vt:lpstr>Worksheets</vt:lpstr>
</vt:variant>
<vt:variant>
<vt:i4>2</vt:i4>
</vt:variant>
</vt:vector>
</HeadingPairs>
<TitlesOfParts>
<vt:vector size="2" baseType="lpstr">
<vt:lpstr>Sheet1</vt:lpstr>
<vt:lpstr>Sheet2</vt:lpstr>
</vt:vector>
</TitlesOfParts>
<Company>
</Company>
<LinksUpToDate>false</LinksUpToDate>
<SharedDoc>false</SharedDoc>
<HyperlinksChanged>false</HyperlinksChanged>
<AppVersion>12.0000</AppVersion>
</Properties>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| [
"[email protected]"
] | |
884954af9fd64a0f3d0508d1272327e2ed3bedf5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03013/s813541273.py | bdbeb4a2fbfb0352ad56b6b9937305511b1f8a7c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | N, M = map(int, input().split(' '))
broken_list = []
if M > 0:
for i in range(M):
broken_list.append(int(input()))
broken_set =set(broken_list)
nums = [0] * (N + 1)
nums[0] = 1
if 1 not in broken_set:
nums[1] = 1
for i in range(2, N + 1):
nums[i] = nums[i - 1] + nums[i - 2]
if i in broken_set:
nums[i] = 0
print(nums[N] % 1000000007) | [
"[email protected]"
] | |
276a93f98115025f9db6a4c5e6df42b82e9feccc | db274b14aa63f4cf40b1e496ffeef918d8654f69 | /manage.py | d6262ef4e1dd3c3abaf89b085aa2ffe2f3d672f2 | [] | no_license | mahmudgithub/demo_pactics_project_seven | 02f98d8373dfa3e9b5d8e06d2e5f01a030d48291 | 4a8aa330a6abfb5e12916c368bd849190788127a | refs/heads/master | 2022-03-28T11:58:34.185598 | 2020-01-29T06:35:22 | 2020-01-29T06:35:22 | 236,919,480 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project_g.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4159cf0257ad3d20a29b9c1d3308026f6be5c1cf | 1925c535d439d2d47e27ace779f08be0b2a75750 | /leetcode/best_time_to_buy_and_sell_stock_4.py | 1d58d8730fa45eba6ecf813ee448ef105a05236d | [] | no_license | arthurDz/algorithm-studies | ee77d716041671c4b8bb757d8d96f3d10b6589f7 | 1e4d23dd0c40df34f58d71c7ca3e6491be732075 | refs/heads/master | 2023-04-27T12:17:06.209278 | 2021-04-30T20:16:18 | 2021-04-30T20:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,462 | py | # Say you have an array for which the ith element is the price of a given stock on day i.
# Design an algorithm to find the maximum profit. You may complete at most k transactions.
# Note:
# You may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).
# Example 1:
# Input: [2,4,1], k = 2
# Output: 2
# Explanation: Buy on day 1 (price = 2) and sell on day 2 (price = 4), profit = 4-2 = 2.
# Example 2:
# Input: [3,2,6,5,0,3], k = 2
# Output: 7
# Explanation: Buy on day 2 (price = 2) and sell on day 3 (price = 6), profit = 6-2 = 4.
# Then buy on day 5 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3.
def maxProfit(k, prices):
if not prices or k < 0: return 0
minimum = prices[0]
profit = 0
for key, v in enumerate(prices):
minimum = min(minimum, v)
if k == 1:
profit = max(profit, v - minimum)
else:
profit = max(profit, v - minimum + maxProfit(k - 1, prices[key + 1:]))
return profit
def maxProfit(k, prices):
n = len(prices)
if n < 2:
return 0
# k is big enougth to cover all ramps.
if k >= n / 2:
return sum(i - j
for i, j in zip(prices[1:], prices[:-1]) if i - j > 0)
globalMax = [[0] * n for _ in xrange(k + 1)]
for i in xrange(1, k + 1):
# The max profit with i transations and selling stock on day j.
localMax = [0] * n
for j in xrange(1, n):
profit = prices[j] - prices[j - 1]
localMax[j] = max(
# We have made max profit with (i - 1) transations in
# (j - 1) days.
# For the last transation, we buy stock on day (j - 1)
# and sell it on day j.
globalMax[i - 1][j - 1] + profit,
# We have made max profit with (i - 1) transations in
# (j - 1) days.
# For the last transation, we buy stock on day j and
# sell it on the same day, so we have 0 profit, apparently
# we do not have to add it.
globalMax[i - 1][j - 1], # + 0,
# We have made profit in (j - 1) days.
# We want to cancel the day (j - 1) sale and sell it on
# day j.
localMax[j - 1] + profit)
globalMax[i][j] = max(globalMax[i][j - 1], localMax[j])
return globalMax[k][-1] | [
"[email protected]"
] | |
7d721c03caa26629e29120c9c88caf4b817914fe | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/codeInsight/smartEnter/colonAfterFinalCaseClauseWithPrecedingIncompleteCaseClause.py | ff245238744da24b5bebf8391bf5e8c4d1ab488c | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 83 | py | match x:
case 1:
pass
case
case 3:
pass
case<caret> | [
"[email protected]"
] | |
f2af3503bf7206c6d28a8f29b727061a682f9706 | 3bafaed1d12e4e1fb221a11998a7b9a858b04644 | /App/migrations/0013_auto_20201230_1553.py | fb1ff2ce8bdd568a36fb4d395ecb6cc782160ba0 | [] | no_license | nian-20/AtroBOM | 8c96e9247292b5f4a3a4f22b7d93a8749f7ed80c | 0370636238e722489b3fddc3a65d4e9ceb7cbfb0 | refs/heads/master | 2023-08-15T09:13:10.042024 | 2021-09-30T19:12:03 | 2021-09-30T19:12:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # Generated by Django 3.1.4 on 2020-12-30 12:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0012_auto_20201230_1544'),
]
operations = [
migrations.AlterField(
model_name='rate',
name='rate',
field=models.CharField(blank=True, max_length=10, null=True, verbose_name=' ضریب مصرف '),
),
]
| [
"[email protected]"
] | |
50a143d4fe47cc7b13e7ca802246ee09743ff7a8 | 2d82d4c6574bd6d32f2cf1c781615f7951f55f66 | /muntjac/event/dd/acceptcriteria/and_.py | 255229b61f9d197892bc0c331d353dba4488b0e7 | [
"Apache-2.0"
] | permissive | metaperl/muntjac | f83f745ee03942a61af92ee7fba7285aa9c46f3c | 8db97712edd81b4d25deaaa48587d2a08010f2c8 | refs/heads/master | 2021-01-15T22:04:25.057862 | 2012-11-09T03:52:59 | 2012-11-09T03:52:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | # @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
"""A compound criterion that accepts the drag if all of its criteria
accepts the drag."""
from muntjac.event.dd.acceptcriteria.client_side_criterion import \
ClientSideCriterion
class And(ClientSideCriterion):
"""A compound criterion that accepts the drag if all of its criteria
accepts the drag.
@see: L{Or}
"""
def __init__(self, *criteria):
"""@param criteria:
criteria of which the And criterion will be composed
"""
self.criteria = criteria
def paintContent(self, target):
super(And, self).paintContent(target)
for crit in self.criteria:
crit.paint(target)
def accept(self, dragEvent):
for crit in self.criteria:
if not crit.accept(dragEvent):
return False
return True
def getIdentifier(self):
return 'com.vaadin.event.dd.acceptcriteria.And'
| [
"[email protected]"
] | |
73a3cec53ce6d0265522dccd62a747fdbcca6834 | f023692f73992354a0b7823d9c49ae730c95ab52 | /AtCoderBeginnerContest/1XX/157/D.py | b0ded2ec31985f6eebca56e6df87d7327321da26 | [] | no_license | corutopi/AtCorder_python | a959e733f9a3549fab7162023e414ac2c99c4abe | a2c78cc647076071549e354c398155a65d5e331a | refs/heads/master | 2023-08-31T09:40:35.929155 | 2023-08-20T06:19:35 | 2023-08-20T06:19:35 | 197,030,129 | 1 | 0 | null | 2022-06-22T04:06:28 | 2019-07-15T15:57:34 | Python | UTF-8 | Python | false | false | 2,140 | py | import sys
sys.setrecursionlimit(10 ** 6)
# from decorator import stop_watch
#
#
# @stop_watch
def solve(N, M, K, ABs, CDs):
friend_map = [[] for _ in range(N + 1)]
for a, b in ABs:
friend_map[a].append(b)
friend_map[b].append(a)
block_map = [[] for _ in range(N + 1)]
for c, d in CDs:
block_map[c].append(d)
block_map[d].append(c)
def dfs(group_num, members, now_n):
belongs[now_n] = group_num
members.append(now_n)
for f in friend_map[now_n]:
if belongs[f] == -1:
members = dfs(group_num, members, f)
return members
friend_groups = []
belongs = [-1] * (N + 1)
for i in range(1, N + 1):
if belongs[i] == -1:
m = dfs(len(friend_groups), [], i)
m.sort()
friend_groups.append(m)
ans = ''
for n in range(1, N + 1):
block = 0
group = friend_groups[belongs[n]]
for b in block_map[n]:
if belongs[n] == belongs[b]:
block += 1
ans += ' ' + str(len(group) - len(friend_map[n]) - block - 1)
print(ans[1:])
if __name__ == '__main__':
# # handmade test
# N, M, K = 2 * 10 ** 5, 10 ** 5, 10 ** 5
# ABs = [[1, i] for i in range(2, 10 ** 5 + 2)]
# CDs = [[i, i + 1] for i in range(2, 10 ** 5 + 2)]
# # handmade random
# import random
# N, M, K = 20, 10, 10
# ABs = []
# while True:
# if len(ABs) == M:
# break
# a = random.randint(1, N - 1)
# b = random.randint(a + 1, N)
# if not [a, b] in ABs:
# ABs.append([a, b])
# CDs = []
# while True:
# if len(CDs) == K:
# break
# c = random.randint(1, N - 1)
# d = random.randint(c + 1, N)
# if not [c, d] in ABs and not [c, d] in CDs:
# CDs.append([c, d])
# print(N, M, K)
# print(ABs)
# print(CDs)
N, M, K = map(int, input().split())
ABs = [[int(i) for i in input().split()] for _ in range(M)]
CDs = [[int(i) for i in input().split()] for _ in range(K)]
solve(N, M, K, ABs, CDs)
| [
"[email protected]"
] | |
6aa7e3d975d5bf066350200717a911882e17e7eb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02572/s151845218.py | 31aa5234e9d20d7b4ae01fd2cf130eac5d0d9908 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | N = int(input()) #入力する整数
A = list(map(int,input().split())) #入力する数列A
SUMA = sum(A) #数列の和
MOD = 10**9 + 7 # mod
C = [0] * (N-1) #累積和数列
for i in range(N-1): #\sum_{j = i+1}^{N}を求めて数列に代入する
SUMA -= A[i]
C[i] = SUMA
ans = 0 #求める答え
for i in range(N-1):
ans += A[i]*C[i]
ans %= MOD #その都度modで割った余りにする
print(ans) #答えを出力する | [
"[email protected]"
] | |
0225bd6623519534724f02704f9d1bdca8fa82b6 | 210af68aec4713e8cbe8dc988d509090815e6ff4 | /0x04-python-more_data_structures/9-multiply_by_2.py | adcaf10fe0fc6a3ad8467a5cb752a4816fcc9910 | [] | no_license | mahdibz97/holbertonschool-higher_level_programming | 8e383d474438ba563311f829a763ce8733931c1a | 7184a1eadcaf76f33135c00effe4390b1c227cbd | refs/heads/master | 2022-12-19T12:29:44.678292 | 2020-09-25T07:56:44 | 2020-09-25T07:56:44 | 259,281,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | #!/usr/bin/python3
def multiply_by_2(a_dictionary):
new = {}
for i in a_dictionary.keys():
new[i] = (a_dictionary[i] * 2)
return new
| [
"[email protected]"
] | |
b67f2769bfefa0625cc6527943ef1b7faf9c0f9a | ff1fe0e31e863ab69e2434b574115fed782d76ad | /set.py | e37f9c7d1e8de9534208c0ced057cebe0e3f014c | [] | no_license | tasnuvaleeya/python_programming | cd7200e0dc0c4ec6bd23c4f9360fc251a7c4a516 | 45a577634e53a1c4cab927eb770cde01a00571ce | refs/heads/master | 2021-04-12T02:47:46.011445 | 2018-03-17T14:54:09 | 2018-03-17T14:54:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | groceries = {'cereal', 'milk','rice', 'beer', 'beer'}
if 'milk' in groceries:
print('you already have milk')
else:
print('oh yes u need milk')
| [
"[email protected]"
] | |
9f46f7e89e19b7e65cfb7e37c5e03e9be0b2d4fe | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/speech-text-file/gTTS/build/lib/gtts/tokenizer/symbols.py | 3d40893c51295eda1b689b6f438f7089a38dc848 | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:a7c43c0c9dfa06ad8af4ec38d5a26b50deffacc6f2b881170eb8a37576f6d970
size 278
| [
"[email protected]"
] | |
79c19d888d893e972115162a390efd937500f92b | 90f39575e1164e928359dd9afb602999bf68a71c | /valuenode.py | b747c749b403c752a05962ee9c650c90a253b6e9 | [] | no_license | sachinjose/Programming_Language | bcf4cbaa147f236b29be4b97936d3540b6e399fe | 1d5749e7339a95b25ce37a93987b447e7e46e85c | refs/heads/main | 2023-06-01T23:56:39.824175 | 2021-06-17T04:39:55 | 2021-06-17T04:39:55 | 374,157,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,381 | py | from strings_with_arrows import *
import string
import os
import math
import constants
from error import *
from position import *
from lexer import *
from nodes import *
from rtresult import *
########################################################
## Value
########################################################
##for storing numbers and operating on them with other numbers.
class Value:
def __init__(self):
self.set_pos()
self.set_context()
def set_pos(self, pos_start = None, pos_end = None): ## if we face an error we need to know where the error is in
self.pos_start = pos_start
self.pos_end = pos_end
return self
def set_context(self, context=None):##Context for error handling
self.context = context
return self
def added_to(self, other):
return None, self.illegal_operation(other)
def subbed_by(self, other):
return None, self.illegal_operation(other)
def multed_by(self, other):
return None, self.illegal_operation(other)
def dived_by(self, other):
return None, self.illegal_operation(other)
def powed_by(self, other):
return None, self.illegal_operation(other)
def get_comparison_eq(self, other):
return None, self.illegal_operation(other)
def get_comparison_ne(self, other):
return None, self.illegal_operation(other)
def get_comparison_lt(self, other):
return None, self.illegal_operation(other)
def get_comparison_gt(self, other):
return None, self.illegal_operation(other)
def get_comparison_lte(self, other):
return None, self.illegal_operation(other)
def get_comparison_gte(self, other):
return None, self.illegal_operation(other)
def anded_by(self, other):
return None, self.illegal_operation(other)
def ored_by(self, other):
return None, self.illegal_operation(other)
def notted(self):
return None, self.illegal_operation(other)
def execute(self, args):
return RTResult().failure(self.illegal_operation())
def copy(self):
raise Exception('No copy method defined')
def is_true(self):
return False
def illegal_operation(self, other=None):
if not other:
other = self
return RTError(self.pos_start, other.pos_end,'Illegal operation',self.context)
class String(Value):
def __init__(self, value):
super().__init__()
self.value = value
def added_to(self, other): ##concatenate
if isinstance(other, String):
return String(self.value + other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def multed_by(self, other):##repeat the string other.values number of time
if isinstance(other, Number):
return String(self.value * other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def is_true(self):
return len(self.value) > 0
def copy(self):
copy = String(self.value)
copy.set_pos(self.pos_start, self.pos_end)
copy.set_context(self.context)
return copy
def __str__(self):
return self.value
def __repr__(self):
return f'"{self.value}"'
class Number(Value):
def __init__(self,value):
self.value = value
self.set_pos()
self.set_context()
def added_to(self,other):
if isinstance(other, Number): ##check if the value that we are operating on is a number
return Number(self.value + other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def subbed_by(self,other):
if isinstance(other, Number): ##check if the value that we are operating on is a number
return Number(self.value - other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def multed_by(self,other):
if isinstance(other, Number): ##check if the value that we are operating on is a number
return Number(self.value * other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def dived_by(self,other):
if isinstance(other, Number): ##check if the value that we are operating on is a number
if other.value == 0:
return None,
return Number(self.value / other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def powed_by(self, other):
if isinstance(other, Number): ##return poer
return Number(self.value ** other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_eq(self, other):
if isinstance(other, Number): ## comparison operator ==
return Number(int(self.value == other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_ne(self, other):
if isinstance(other, Number): ## comparison for !=
return Number(int(self.value != other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lt(self, other):
if isinstance(other, Number): ##compairon for <
return Number(int(self.value < other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gt(self, other):
if isinstance(other, Number): ##comparion for >
return Number(int(self.value > other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lte(self, other):
if isinstance(other, Number): ##comparison for less than or equal to <=
return Number(int(self.value <= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gte(self, other):
if isinstance(other, Number): ##comparison for greater than or equal to >=
return Number(int(self.value >= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def anded_by(self, other):
if isinstance(other, Number): ##comparison for and
return Number(int(self.value and other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def ored_by(self, other):
if isinstance(other, Number): ##comparison for or
return Number(int(self.value or other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def notted(self): ##comparison for not function
if self.value == 0:
return Number(1).set_context(self.context), None
else :
return Number(0).set_context(self.context), None
def is_true(self):
return self.value != 0
def copy(self):
copy = Number(self.value)
copy.set_pos(self.set_pos,self.pos_end)
copy.set_context(self.context)
return copy
def __repr__(self):
return str(self.value)
Number.null = Number(0)
Number.true = Number(1)
Number.false = Number(0)
Number.math_PI = Number(math.pi)
class BaseFunction(Value):
def __init__(self, name):
super().__init__()
self.name = name or "<anonymous>" ##anonymous if it doesnt have a name
def generate_new_context(self): ##new context for new function
new_context = Context(self.name, self.context, self.pos_start)
new_context.symbol_table = SymbolTable(new_context.parent.symbol_table)
return new_context
def check_args(self, arg_names, args): ##check if there are the correct numbr of arguments
res = RTResult()
if len(args) > len(arg_names):
return res.failure(RTError(self.pos_start, self.pos_end,f"{len(args) - len(arg_names)} too many args passed into {self}",self.context))
if len(args) < len(arg_names):
return res.failure(RTError(self.pos_start, self.pos_end,f"{len(arg_names) - len(args)} too few args passed into {self}",self.context))
return res.success(None)
def populate_args(self, arg_names, args, exec_ctx): ##put all arguments to symbol table
for i in range(len(args)):
arg_name = arg_names[i]
arg_value = args[i]
arg_value.set_context(exec_ctx)
exec_ctx.symbol_table.set(arg_name, arg_value)
def check_and_populate_args(self, arg_names, args, exec_ctx): ##check the args and populate them
res = RTResult()
res.register(self.check_args(arg_names, args))
if res.error:
return res
self.populate_args(arg_names, args, exec_ctx)
return res.success(None)
class Function(BaseFunction):
def __init__(self, name, body_node, arg_names, should_auto_return):
super().__init__(name)
self.body_node = body_node
self.arg_names = arg_names
self.should_auto_return = should_auto_return
def execute(self, args): ##execute functions
res = RTResult()
interpreter = Interpreter()
exec_ctx = self.generate_new_context()
res.register(self.check_and_populate_args(self.arg_names, args, exec_ctx))
if res.error:
return res
value = res.register(interpreter.visit(self.body_node, exec_ctx))
if res.should_return() and res.func_return_value == None: return res
ret_value = (value if self.should_auto_return else None) or res.func_return_value or Number.null
return res.success(ret_value)
def copy(self):
copy = Function(self.name, self.body_node, self.arg_names, self.should_auto_return)
copy.set_context(self.context)
copy.set_pos(self.pos_start, self.pos_end)
return copy
def __repr__(self):
return f"<function {self.name}>"
class BuiltInFunction(BaseFunction):
def __init__(self, name):
super().__init__(name)
def execute(self, args):
res = RTResult()
exec_ctx = self.generate_new_context() ##create new exec context
method_name = f'execute_{self.name}' ##create seperate function for every
method = getattr(self, method_name, self.no_visit_method)
res.register(self.check_and_populate_args(method.arg_names, args, exec_ctx))
if res.should_return():
return res
return_value = res.register(method(exec_ctx))
if res.should_return():
return res
return res.success(return_value)
def no_visit_method(self, node, context): ## if method ist defined
raise Exception(f'No execute_{self.name} method defined')
def copy(self):
copy = BuiltInFunction(self.name)
copy.set_context(self.context)
copy.set_pos(self.pos_start, self.pos_end)
return copy
def __repr__(self):
return f"<built-in function {self.name}>"
#####################################
def execute_print(self, exec_ctx):
print(str(exec_ctx.symbol_table.get('value'))) ##print from symbol table
return RTResult().success(Number.null)
execute_print.arg_names = ['value'] ## we get the arg_name methods from the method
def execute_print_ret(self, exec_ctx):
return RTResult().success(String(str(exec_ctx.symbol_table.get('value')))) ##return value that should be printed
execute_print_ret.arg_names = ['value']
def execute_input(self, exec_ctx): ##take inpute
text = input()
return RTResult().success(String(text))
execute_input.arg_names = []
def execute_input_int(self, exec_ctx):
while True:
text = input()
try:
number = int(text)
break
except ValueError:
print(f"'{text}' must be an integer. Try again!")
return RTResult().success(Number(number))
execute_input_int.arg_names = []
def execute_clear(self, exec_ctx):
os.system('cls' if os.name == 'nt' else 'cls') ##clear the terminal
return RTResult().success(Number.null)
execute_clear.arg_names = []
def execute_is_number(self, exec_ctx):
is_number = isinstance(exec_ctx.symbol_table.get("value"), Number)
return RTResult().success(Number.true if is_number else Number.false)
execute_is_number.arg_names = ["value"]
def execute_is_string(self, exec_ctx):
is_number = isinstance(exec_ctx.symbol_table.get("value"), String)
return RTResult().success(Number.true if is_number else Number.false)
execute_is_string.arg_names = ["value"]
def execute_is_list(self, exec_ctx):
is_number = isinstance(exec_ctx.symbol_table.get("value"), List)
return RTResult().success(Number.true if is_number else Number.false)
execute_is_list.arg_names = ["value"]
def execute_is_function(self, exec_ctx):
is_number = isinstance(exec_ctx.symbol_table.get("value"), BaseFunction)
return RTResult().success(Number.true if is_number else Number.false)
execute_is_function.arg_names = ["value"]
def execute_append(self, exec_ctx):
list_ = exec_ctx.symbol_table.get("list")
value = exec_ctx.symbol_table.get("value")
if not isinstance(list_, List):
return RTResult().failure(RTError(self.pos_start, self.pos_end,"First argument must be list",exec_ctx))
list_.elements.append(value)
return RTResult().success(Number.null)
execute_append.arg_names = ["list", "value"]
def execute_pop(self, exec_ctx):
list_ = exec_ctx.symbol_table.get("list")
index = exec_ctx.symbol_table.get("index")
if not isinstance(list_, List):
return RTResult().failure(RTError(
self.pos_start, self.pos_end,
"First argument must be list",
exec_ctx
))
if not isinstance(index, Number):
return RTResult().failure(RTError(
self.pos_start, self.pos_end,
"Second argument must be number",
exec_ctx
))
try:
element = list_.elements.pop(index.value)
except:
return RTResult().failure(RTError(
self.pos_start, self.pos_end,
'Element at this index could not be removed from list because index is out of bounds',
exec_ctx
))
return RTResult().success(element)
execute_pop.arg_names = ["list", "index"]
def execute_extend(self, exec_ctx):
listA = exec_ctx.symbol_table.get("listA")
listB = exec_ctx.symbol_table.get("listB")
if not isinstance(listA, List):
return RTResult().failure(RTError(
self.pos_start, self.pos_end,
"First argument must be list",
exec_ctx
))
if not isinstance(listB, List):
return RTResult().failure(RTError(
self.pos_start, self.pos_end,
"Second argument must be list",
exec_ctx
))
listA.elements.extend(listB.elements)
return RTResult().success(Number.null)
execute_extend.arg_names = ["listA", "listB"]
def execute_len(self, exec_ctx): ##length of a list
list_ = exec_ctx.symbol_table.get("list")
if not isinstance(list_, List):
return RTResult().failure(RTError(
self.pos_start, self.pos_end,
"Argument must be list",
exec_ctx
))
return RTResult().success(Number(len(list_.elements)))
execute_len.arg_names = ["list"]
def execute_run(self, exec_ctx):
fn = exec_ctx.symbol_table.get("fn") ##get file name from symbol table
if not isinstance(fn, String):##raise error if it isnt a string
return RTResult().failure(RTError(self.pos_start, self.pos_end,"Second argument must be string",exec_ctx))
fn = fn.value
try:
with open(fn, "r") as f: ##open file in readmode and assign it to variable f
script = f.read() ##script content of faile
except Exception as e:
return RTResult().failure(RTError(self.pos_start, self.pos_end,f"Failed to load script \"{fn}\"\n" + str(e),exec_ctx))
_, error = run(fn, script)
if error:
return RTResult().failure(RTError(self.pos_start, self.pos_end,f"Failed to finish executing script \"{fn}\"\n" +error.as_string(),exec_ctx))
return RTResult().success(Number.null)
execute_run.arg_names = ["fn"]
BuiltInFunction.print = BuiltInFunction("print")
BuiltInFunction.print_ret = BuiltInFunction("print_ret")
BuiltInFunction.input = BuiltInFunction("input")
BuiltInFunction.input_int = BuiltInFunction("input_int")
BuiltInFunction.clear = BuiltInFunction("clear")
BuiltInFunction.is_number = BuiltInFunction("is_number")
BuiltInFunction.is_string = BuiltInFunction("is_string")
BuiltInFunction.is_list = BuiltInFunction("is_list")
BuiltInFunction.is_function = BuiltInFunction("is_function")
BuiltInFunction.append = BuiltInFunction("append")
BuiltInFunction.pop = BuiltInFunction("pop")
BuiltInFunction.extend = BuiltInFunction("extend")
BuiltInFunction.len = BuiltInFunction("len")
BuiltInFunction.run = BuiltInFunction("run")
class List(Value):
def __init__(self, elements):
super().__init__()
self.elements = elements
def added_to(self, other):
new_list = self.copy()
new_list.elements.append(other)
return new_list, None
def subbed_by(self, other):
if isinstance(other, Number):
new_list = self.copy()
try: ##if the element doesnt exist
new_list.elements.pop(other.value)
return new_list, None
except:
return None, RTError(other.pos_start, other.pos_end,'Element at this index could not be removed from list because index is out of bounds',self.context)
else:
return None, Value.illegal_operation(self, other)
def dived_by(self, other):
if isinstance(other, Number):
try:
return self.elements[other.value], None
except:
return None, RTError(other.pos_start, other.pos_end,'Element at this index could not be retrieved from list because index is out of bounds',self.context)
else:
return None, Value.illegal_operation(self, other)
def copy(self):
copy = List(self.elements)
copy.set_pos(self.pos_start, self.pos_end)
copy.set_context(self.context)
return copy
def __repr__(self):
return f'[{", ".join([str(x) for x in self.elements])}]'
| [
"[email protected]"
] | |
4b063dac8fbb9c047f40f60e35b317e14d6ab716 | ba2f34ff8a7b2c36ae88a2f02ca495ad084bb6ab | /Cryptanalysis/break_autokey.py | aecc051205c004e9a18d31b229c6ec47d72a3899 | [
"MIT"
] | permissive | BlackLuny/cyberweapons | bc05e07cdc67f58c9cf68178762eb541c8c0cc55 | dfd4623f323ba702bae7c9f71132b4584636d2e5 | refs/heads/master | 2021-05-16T07:28:35.651835 | 2017-09-16T21:04:50 | 2017-09-16T21:04:50 | 103,801,285 | 1 | 0 | null | 2017-09-17T03:50:18 | 2017-09-17T03:50:18 | null | UTF-8 | Python | false | false | 2,037 | py | from ngram_score import ngram_score
from pycipher import Autokey
import re
from itertools import permutations
qgram = ngram_score('quadgrams.txt')
trigram = ngram_score('trigrams.txt')
ctext = 'isjiqymdebvuzrvwhmvysibugzhyinmiyeiklcvioimbninyksmmnjmgalvimlhspjxmgfiraqlhjcpvolqmnyynhpdetoxemgnoxl'
ctext = re.sub(r'[^A-Z]','',ctext.upper())
# keep a list of the N best things we have seen, discard anything else
class nbest(object):
def __init__(self,N=1000):
self.store = []
self.N = N
def add(self,item):
self.store.append(item)
self.store.sort(reverse=True)
self.store = self.store[:self.N]
def __getitem__(self,k):
return self.store[k]
def __len__(self):
return len(self.store)
#init
N=100
for KLEN in range(3,20):
rec = nbest(N)
for i in permutations('ABCDEFGHIJKLMNOPQRSTUVWXYZ',3):
key = ''.join(i) + 'A'*(KLEN-len(i))
pt = Autokey(key).decipher(ctext)
score = 0
for j in range(0,len(ctext),KLEN):
score += trigram.score(pt[j:j+3])
rec.add((score,''.join(i),pt[:30]))
next_rec = nbest(N)
for i in range(0,KLEN-3):
for k in xrange(N):
for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
key = rec[k][1] + c
fullkey = key + 'A'*(KLEN-len(key))
pt = Autokey(fullkey).decipher(ctext)
score = 0
for j in range(0,len(ctext),KLEN):
score += qgram.score(pt[j:j+len(key)])
next_rec.add((score,key,pt[:30]))
rec = next_rec
next_rec = nbest(N)
bestkey = rec[0][1]
pt = Autokey(bestkey).decipher(ctext)
bestscore = qgram.score(pt)
for i in range(N):
pt = Autokey(rec[i][1]).decipher(ctext)
score = qgram.score(pt)
if score > bestscore:
bestkey = rec[i][1]
bestscore = score
print bestscore,'autokey, klen',KLEN,':"'+bestkey+'",',Autokey(bestkey).decipher(ctext)
| [
"[email protected]"
] | |
bff7768f9a5f3a84f3142fcac45842c549f8bd13 | d5b60325d88d59bb3c6cde58036514921abfd6e9 | /DjangoChat/DjangoChat/wsgi.py | c2d57315e9c4b78413c290b4da11fa09adacfd85 | [] | no_license | dagrishin/DjangoChat | 472044874bbd1a91efe5a7e6611af02aa485acd1 | d800fff81ac3632752e3486a90c062dde4b18780 | refs/heads/master | 2022-12-22T06:56:57.676392 | 2020-09-29T07:14:50 | 2020-09-29T07:14:50 | 299,532,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for DjangoChat project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoChat.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
7cc166e065fe935c41d23495250403d7dcdf2d32 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /pytorch/source/caffe2/python/workspace_test.py | 93bcb115e685bccfd0f46ea8cc663fdb6cd3d849 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 26,342 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import os
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util, workspace, model_helper, brew
import caffe2.python.hypothesis_test_util as htu
import hypothesis.strategies as st
from hypothesis import given
class TestWorkspace(unittest.TestCase):
def setUp(self):
self.net = core.Net("test-net")
self.testblob_ref = self.net.ConstantFill(
[], "testblob", shape=[1, 2, 3, 4], value=1.0)
workspace.ResetWorkspace()
def testRootFolder(self):
self.assertEqual(workspace.ResetWorkspace(), True)
self.assertEqual(workspace.RootFolder(), ".")
self.assertEqual(
workspace.ResetWorkspace("/tmp/caffe-workspace-test"), True)
self.assertEqual(workspace.RootFolder(), "/tmp/caffe-workspace-test")
def testWorkspaceHasBlobWithNonexistingName(self):
self.assertEqual(workspace.HasBlob("non-existing"), False)
def testRunOperatorOnce(self):
self.assertEqual(
workspace.RunOperatorOnce(
self.net.Proto().op[0].SerializeToString()
), True
)
self.assertEqual(workspace.HasBlob("testblob"), True)
blobs = workspace.Blobs()
self.assertEqual(len(blobs), 1)
self.assertEqual(blobs[0], "testblob")
def testGetOperatorCost(self):
op = core.CreateOperator(
"Conv2D",
["X", "W"], ["Y"],
stride_h=1,
stride_w=1,
pad_t=1,
pad_l=1,
pad_b=1,
pad_r=1,
kernel=3,
)
X = np.zeros((1, 8, 8, 8))
W = np.zeros((1, 1, 3, 3))
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
flops, _ = workspace.GetOperatorCost(op.SerializeToString(), ["X", "W"])
self.assertEqual(flops, 1152)
def testRunNetOnce(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testCurrentWorkspaceWrapper(self):
self.assertNotIn("testblob", workspace.C.Workspace.current.blobs)
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertIn("testblob", workspace.C.Workspace.current.blobs)
workspace.ResetWorkspace()
self.assertNotIn("testblob", workspace.C.Workspace.current.blobs)
def testRunPlan(self):
plan = core.Plan("test-plan")
plan.AddStep(core.ExecutionStep("test-step", self.net))
self.assertEqual(
workspace.RunPlan(plan.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testRunPlanInBackground(self):
plan = core.Plan("test-plan")
plan.AddStep(core.ExecutionStep("test-step", self.net))
background_plan = workspace.RunPlanInBackground(plan)
while not background_plan.is_done():
pass
self.assertEqual(background_plan.is_succeeded(), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testConstructPlanFromSteps(self):
step = core.ExecutionStep("test-step-as-plan", self.net)
self.assertEqual(workspace.RunPlan(step), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testResetWorkspace(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertEqual(workspace.ResetWorkspace(), True)
self.assertEqual(workspace.HasBlob("testblob"), False)
def testTensorAccess(self):
ws = workspace.C.Workspace()
""" test in-place modification """
ws.create_blob("tensor").feed(np.array([1.1, 1.2, 1.3]))
tensor = ws.blobs["tensor"].tensor()
tensor.data[0] = 3.3
val = np.array([3.3, 1.2, 1.3])
np.testing.assert_array_equal(tensor.data, val)
np.testing.assert_array_equal(ws.blobs["tensor"].fetch(), val)
""" test in-place initialization """
tensor.init([2, 3], core.DataType.INT32)
for x in range(2):
for y in range(3):
tensor.data[x, y] = 0
tensor.data[1, 1] = 100
val = np.zeros([2, 3], dtype=np.int32)
val[1, 1] = 100
np.testing.assert_array_equal(tensor.data, val)
np.testing.assert_array_equal(ws.blobs["tensor"].fetch(), val)
""" strings cannot be initialized from python """
with self.assertRaises(RuntimeError):
tensor.init([3, 4], core.DataType.STRING)
""" feed (copy) data into tensor """
val = np.array([[b'abc', b'def'], [b'ghi', b'jkl']], dtype=np.object)
tensor.feed(val)
self.assertEquals(tensor.data[0, 0], b'abc')
np.testing.assert_array_equal(ws.blobs["tensor"].fetch(), val)
val = np.array([1.1, 10.2])
tensor.feed(val)
val[0] = 5.2
self.assertEquals(tensor.data[0], 1.1)
""" fetch (copy) data from tensor """
val = np.array([1.1, 1.2])
tensor.feed(val)
val2 = tensor.fetch()
tensor.data[0] = 5.2
val3 = tensor.fetch()
np.testing.assert_array_equal(val, val2)
self.assertEquals(val3[0], 5.2)
def testFetchFeedBlob(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.FetchBlob("testblob")
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
self.assertEqual(workspace.FeedBlob("testblob", fetched), True)
fetched_again = workspace.FetchBlob("testblob")
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
def testFetchFeedBlobViaBlobReference(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.FetchBlob(self.testblob_ref)
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
self.assertEqual(workspace.FeedBlob(self.testblob_ref, fetched), True)
fetched_again = workspace.FetchBlob("testblob") # fetch by name now
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
def testFetchFeedBlobTypes(self):
for dtype in [np.float16, np.float32, np.float64, np.bool,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16]:
try:
rng = np.iinfo(dtype).max * 2
except ValueError:
rng = 1000
data = ((np.random.rand(2, 3, 4) - 0.5) * rng).astype(dtype)
self.assertEqual(workspace.FeedBlob("testblob_types", data), True)
fetched_back = workspace.FetchBlob("testblob_types")
self.assertEqual(fetched_back.shape, (2, 3, 4))
self.assertEqual(fetched_back.dtype, dtype)
np.testing.assert_array_equal(fetched_back, data)
def testFetchFeedBlobBool(self):
"""Special case for bool to ensure coverage of both true and false."""
data = np.zeros((2, 3, 4)).astype(np.bool)
data.flat[::2] = True
self.assertEqual(workspace.FeedBlob("testblob_types", data), True)
fetched_back = workspace.FetchBlob("testblob_types")
self.assertEqual(fetched_back.shape, (2, 3, 4))
self.assertEqual(fetched_back.dtype, np.bool)
np.testing.assert_array_equal(fetched_back, data)
def testGetBlobSizeBytes(self):
for dtype in [np.float16, np.float32, np.float64, np.bool,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16]:
data = np.random.randn(2, 3).astype(dtype)
self.assertTrue(workspace.FeedBlob("testblob_sizeBytes", data), True)
self.assertEqual(
workspace.GetBlobSizeBytes("testblob_sizeBytes"),
6 * np.dtype(dtype).itemsize)
strs1 = np.array([b'Hello World!', b'abcd'])
strs2 = np.array([b'element1', b'element2'])
strs1_len, strs2_len = 0, 0
for str in strs1:
strs1_len += len(str)
for str in strs2:
strs2_len += len(str)
self.assertTrue(workspace.FeedBlob("testblob_str1", strs1), True)
self.assertTrue(workspace.FeedBlob("testblob_str2", strs2), True)
# size of blob "testblob_str1" = size_str1 * meta_.itemsize() + strs1_len
# size of blob "testblob_str2" = size_str2 * meta_.itemsize() + strs2_len
self.assertEqual(
workspace.GetBlobSizeBytes("testblob_str1") -
workspace.GetBlobSizeBytes("testblob_str2"), strs1_len - strs2_len)
def testFetchFeedBlobZeroDim(self):
data = np.empty(shape=(2, 0, 3), dtype=np.float32)
self.assertEqual(workspace.FeedBlob("testblob_empty", data), True)
fetched_back = workspace.FetchBlob("testblob_empty")
self.assertEqual(fetched_back.shape, (2, 0, 3))
self.assertEqual(fetched_back.dtype, np.float32)
def testFetchFeedLongStringTensor(self):
# long strings trigger array of object creation
strs = np.array([
b' '.join(10 * [b'long string']),
b' '.join(128 * [b'very long string']),
b'small \0\1\2 string',
b"Hello, world! I have special \0 symbols \1!"])
workspace.FeedBlob('my_str_tensor', strs)
strs2 = workspace.FetchBlob('my_str_tensor')
self.assertEqual(strs.shape, strs2.shape)
for i in range(0, strs.shape[0]):
self.assertEqual(strs[i], strs2[i])
def testFetchFeedShortStringTensor(self):
# small strings trigger NPY_STRING array
strs = np.array([b'elem1', b'elem 2', b'element 3'])
workspace.FeedBlob('my_str_tensor_2', strs)
strs2 = workspace.FetchBlob('my_str_tensor_2')
self.assertEqual(strs.shape, strs2.shape)
for i in range(0, strs.shape[0]):
self.assertEqual(strs[i], strs2[i])
def testFetchFeedPlainString(self):
# this is actual string, not a tensor of strings
s = b"Hello, world! I have special \0 symbols \1!"
workspace.FeedBlob('my_plain_string', s)
s2 = workspace.FetchBlob('my_plain_string')
self.assertEqual(s, s2)
def testFetchBlobs(self):
s1 = b"test1"
s2 = b"test2"
workspace.FeedBlob('s1', s1)
workspace.FeedBlob('s2', s2)
fetch1, fetch2 = workspace.FetchBlobs(['s1', 's2'])
self.assertEquals(s1, fetch1)
self.assertEquals(s2, fetch2)
def testFetchFeedViaBlobDict(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.blobs["testblob"]
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
workspace.blobs["testblob"] = fetched
fetched_again = workspace.blobs["testblob"]
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
self.assertTrue("testblob" in workspace.blobs)
self.assertFalse("non_existant" in workspace.blobs)
self.assertEqual(len(workspace.blobs), 1)
for key in workspace.blobs:
self.assertEqual(key, "testblob")
class TestMultiWorkspaces(unittest.TestCase):
def setUp(self):
workspace.SwitchWorkspace("default")
workspace.ResetWorkspace()
def testCreateWorkspace(self):
self.net = core.Net("test-net")
self.net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True
)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertEqual(workspace.SwitchWorkspace("test", True), None)
self.assertEqual(workspace.HasBlob("testblob"), False)
self.assertEqual(workspace.SwitchWorkspace("default"), None)
self.assertEqual(workspace.HasBlob("testblob"), True)
try:
# The following should raise an error.
workspace.SwitchWorkspace("non-existing")
# so this should never happen.
self.assertEqual(True, False)
except RuntimeError:
pass
workspaces = workspace.Workspaces()
self.assertTrue("default" in workspaces)
self.assertTrue("test" in workspaces)
@unittest.skipIf(not workspace.has_gpu_support
and not workspace.has_hip_support, "No gpu support.")
class TestWorkspaceGPU(test_util.TestCase):
def setUp(self):
workspace.ResetWorkspace()
self.net = core.Net("test-net")
self.net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
self.net.RunAllOnGPU()
def testFetchBlobGPU(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.FetchBlob("testblob")
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
self.assertEqual(workspace.FeedBlob("testblob", fetched), True)
fetched_again = workspace.FetchBlob("testblob")
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
def testGetGpuPeerAccessPattern(self):
pattern = workspace.GetGpuPeerAccessPattern()
self.assertEqual(type(pattern), np.ndarray)
self.assertEqual(pattern.ndim, 2)
self.assertEqual(pattern.shape[0], pattern.shape[1])
self.assertEqual(pattern.shape[0], workspace.NumGpuDevices())
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestWorkspaceIDEEP(test_util.TestCase):
def testFeedFetchBlobIDEEP(self):
arr = np.random.randn(2, 3).astype(np.float32)
workspace.FeedBlob(
"testblob_ideep", arr, core.DeviceOption(caffe2_pb2.IDEEP))
fetched = workspace.FetchBlob("testblob_ideep")
np.testing.assert_array_equal(arr, fetched)
class TestImmedibate(test_util.TestCase):
def testImmediateEnterExit(self):
workspace.StartImmediate(i_know=True)
self.assertTrue(workspace.IsImmediate())
workspace.StopImmediate()
self.assertFalse(workspace.IsImmediate())
def testImmediateRunsCorrectly(self):
workspace.StartImmediate(i_know=True)
net = core.Net("test-net")
net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
self.assertEqual(
workspace.ImmediateBlobs(), ["testblob"])
content = workspace.FetchImmediate("testblob")
# Also, the immediate mode should not invade the original namespace,
# so we check if this is so.
with self.assertRaises(RuntimeError):
workspace.FetchBlob("testblob")
np.testing.assert_array_equal(content, 1.0)
content[:] = 2.0
self.assertTrue(workspace.FeedImmediate("testblob", content))
np.testing.assert_array_equal(
workspace.FetchImmediate("testblob"), 2.0)
workspace.StopImmediate()
with self.assertRaises(RuntimeError):
content = workspace.FetchImmediate("testblob")
def testImmediateRootFolder(self):
workspace.StartImmediate(i_know=True)
# for testing we will look into the _immediate_root_folder variable
# but in normal usage you should not access that.
self.assertTrue(len(workspace._immediate_root_folder) > 0)
root_folder = workspace._immediate_root_folder
self.assertTrue(os.path.isdir(root_folder))
workspace.StopImmediate()
self.assertTrue(len(workspace._immediate_root_folder) == 0)
# After termination, immediate mode should have the root folder
# deleted.
self.assertFalse(os.path.exists(root_folder))
class TestCppEnforceAsException(test_util.TestCase):
def testEnforce(self):
op = core.CreateOperator("Relu", ["X"], ["Y"])
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
class TestCWorkspace(htu.HypothesisTestCase):
def test_net_execution(self):
ws = workspace.C.Workspace()
self.assertEqual(ws.nets, {})
self.assertEqual(ws.blobs, {})
net = core.Net("test-net")
net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
ws.create_net(net)
# If we do not specify overwrite, this should raise an error.
with self.assertRaises(RuntimeError):
ws.create_net(net)
# But, if we specify overwrite, this should pass.
ws.create_net(net, True)
# Overwrite can also be a kwarg.
ws.create_net(net, overwrite=True)
self.assertIn("testblob", ws.blobs)
self.assertEqual(len(ws.nets), 1)
net_name = net.Proto().name
self.assertIn("test-net", net_name)
net = ws.nets[net_name].run()
blob = ws.blobs["testblob"]
np.testing.assert_array_equal(
np.ones((1, 2, 3, 4), dtype=np.float32),
blob.fetch())
@given(name=st.text(), value=st.floats(min_value=-1, max_value=1.0))
def test_operator_run(self, name, value):
ws = workspace.C.Workspace()
op = core.CreateOperator(
"ConstantFill", [], [name], shape=[1], value=value)
ws.run(op)
self.assertIn(name, ws.blobs)
np.testing.assert_allclose(
[value], ws.blobs[name].fetch(), atol=1e-4, rtol=1e-4)
@given(blob_name=st.text(),
net_name=st.text(),
value=st.floats(min_value=-1, max_value=1.0))
def test_net_run(self, blob_name, net_name, value):
ws = workspace.C.Workspace()
net = core.Net(net_name)
net.ConstantFill([], [blob_name], shape=[1], value=value)
ws.run(net)
self.assertIn(blob_name, ws.blobs)
self.assertNotIn(net_name, ws.nets)
np.testing.assert_allclose(
[value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
@given(blob_name=st.text(),
net_name=st.text(),
plan_name=st.text(),
value=st.floats(min_value=-1, max_value=1.0))
def test_plan_run(self, blob_name, plan_name, net_name, value):
ws = workspace.C.Workspace()
plan = core.Plan(plan_name)
net = core.Net(net_name)
net.ConstantFill([], [blob_name], shape=[1], value=value)
plan.AddStep(core.ExecutionStep("step", nets=[net], num_iter=1))
ws.run(plan)
self.assertIn(blob_name, ws.blobs)
self.assertIn(net.Name(), ws.nets)
np.testing.assert_allclose(
[value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
@given(blob_name=st.text(),
net_name=st.text(),
value=st.floats(min_value=-1, max_value=1.0))
def test_net_create(self, blob_name, net_name, value):
ws = workspace.C.Workspace()
net = core.Net(net_name)
net.ConstantFill([], [blob_name], shape=[1], value=value)
ws.create_net(net).run()
self.assertIn(blob_name, ws.blobs)
self.assertIn(net.Name(), ws.nets)
np.testing.assert_allclose(
[value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
@given(name=st.text(),
value=htu.tensor(),
device_option=st.sampled_from(htu.device_options))
def test_array_serde(self, name, value, device_option):
ws = workspace.C.Workspace()
ws.create_blob(name).feed(value, device_option=device_option)
self.assertIn(name, ws.blobs)
blob = ws.blobs[name]
np.testing.assert_equal(value, ws.blobs[name].fetch())
serde_blob = ws.create_blob("{}_serde".format(name))
serde_blob.deserialize(blob.serialize(name))
np.testing.assert_equal(value, serde_blob.fetch())
@given(name=st.text(), value=st.text())
def test_string_serde(self, name, value):
value = value.encode('ascii', 'ignore')
ws = workspace.C.Workspace()
ws.create_blob(name).feed(value)
self.assertIn(name, ws.blobs)
blob = ws.blobs[name]
self.assertEqual(value, ws.blobs[name].fetch())
serde_blob = ws.create_blob("{}_serde".format(name))
serde_blob.deserialize(blob.serialize(name))
self.assertEqual(value, serde_blob.fetch())
def test_exception(self):
ws = workspace.C.Workspace()
with self.assertRaises(TypeError):
ws.create_net("...")
class TestPredictor(unittest.TestCase):
def _create_model(self):
m = model_helper.ModelHelper()
y = brew.fc(m, "data", "y",
dim_in=4, dim_out=2,
weight_init=('ConstantFill', dict(value=1.0)),
bias_init=('ConstantFill', dict(value=0.0)),
axis=0)
m.net.AddExternalOutput(y)
return m
# Use this test with a bigger model to see how using Predictor allows to
# avoid issues with low protobuf size limit in Python
#
# def test_predictor_predefined(self):
# workspace.ResetWorkspace()
# path = 'caffe2/caffe2/test/assets/'
# with open(path + 'squeeze_predict_net.pb') as f:
# self.predict_net = f.read()
# with open(path + 'squeeze_init_net.pb') as f:
# self.init_net = f.read()
# self.predictor = workspace.Predictor(self.init_net, self.predict_net)
# inputs = [np.zeros((1, 3, 256, 256), dtype='f')]
# outputs = self.predictor.run(inputs)
# self.assertEqual(len(outputs), 1)
# self.assertEqual(outputs[0].shape, (1, 1000, 1, 1))
# self.assertAlmostEqual(outputs[0][0][0][0][0], 5.19026289e-05)
def test_predictor_memory_model(self):
workspace.ResetWorkspace()
m = self._create_model()
workspace.FeedBlob("data", np.zeros([4], dtype='float32'))
self.predictor = workspace.Predictor(
workspace.StringifyProto(m.param_init_net.Proto()),
workspace.StringifyProto(m.net.Proto()))
inputs = np.array([1, 3, 256, 256], dtype='float32')
outputs = self.predictor.run([inputs])
np.testing.assert_array_almost_equal(
np.array([[516, 516]], dtype='float32'), outputs)
class TestTransform(htu.HypothesisTestCase):
@given(input_dim=st.integers(min_value=1, max_value=10),
output_dim=st.integers(min_value=1, max_value=10),
batch_size=st.integers(min_value=1, max_value=10))
def test_simple_transform(self, input_dim, output_dim, batch_size):
m = model_helper.ModelHelper()
fc1 = brew.fc(m, "data", "fc1", dim_in=input_dim, dim_out=output_dim)
fc2 = brew.fc(m, fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
conv = brew.conv(m, fc2, "conv",
dim_in=output_dim,
dim_out=output_dim,
use_cudnn=True,
engine="CUDNN",
kernel=3)
conv.Relu([], conv)\
.Softmax([], "pred") \
.LabelCrossEntropy(["label"], ["xent"]) \
.AveragedLoss([], "loss")
transformed_net_proto = workspace.ApplyTransform(
"ConvToNNPack",
m.net.Proto())
self.assertEqual(transformed_net_proto.op[2].engine, "NNPACK")
@given(input_dim=st.integers(min_value=1, max_value=10),
output_dim=st.integers(min_value=1, max_value=10),
batch_size=st.integers(min_value=1, max_value=10))
def test_registry_invalid(self, input_dim, output_dim, batch_size):
m = model_helper.ModelHelper()
brew.fc(m, "data", "fc1", dim_in=input_dim, dim_out=output_dim)
with self.assertRaises(RuntimeError):
workspace.ApplyTransform(
"definitely_not_a_real_transform",
m.net.Proto())
@given(value=st.floats(min_value=-1, max_value=1))
def test_apply_transform_if_faster(self, value):
init_net = core.Net("init_net")
init_net.ConstantFill([], ["data"], shape=[5, 5, 5, 5], value=value)
init_net.ConstantFill([], ["conv_w"], shape=[5, 5, 3, 3], value=value)
init_net.ConstantFill([], ["conv_b"], shape=[5], value=value)
self.assertEqual(
workspace.RunNetOnce(init_net.Proto().SerializeToString()), True)
m = model_helper.ModelHelper()
conv = brew.conv(m, "data", "conv",
dim_in=5,
dim_out=5,
kernel=3,
use_cudnn=True,
engine="CUDNN")
conv.Relu([], conv)\
.Softmax([], "pred") \
.AveragedLoss([], "loss")
self.assertEqual(
workspace.RunNetOnce(m.net.Proto().SerializeToString()), True)
proto = workspace.ApplyTransformIfFaster(
"ConvToNNPack",
m.net.Proto(),
init_net.Proto())
self.assertEqual(
workspace.RunNetOnce(proto.SerializeToString()), True)
proto = workspace.ApplyTransformIfFaster(
"ConvToNNPack",
m.net.Proto(),
init_net.Proto(),
warmup_runs=10,
main_runs=100,
improvement_threshold=2.0)
self.assertEqual(
workspace.RunNetOnce(proto.SerializeToString()), True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
10ea87ac6afba40de0a3d96e81db5dc69ef6df3d | 7c3ad63b17b868672ff14e798bb965109c10d403 | /src/kNN_single.py | 6b257b9b63560794a04b98462bedff7409e85679 | [] | no_license | ternaus/kaggle_liberty | 87cc6e5259e1ea4ce69726a83e4e642db85d8e22 | 5eb17b6bf1f6f6f6f4f6eab880592547ad41007d | refs/heads/master | 2016-09-11T02:13:22.121760 | 2015-08-26T22:23:47 | 2015-08-26T22:23:47 | 39,865,075 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | from __future__ import division
__author__ = 'Vladimir Iglovikov'
from operator import itemgetter
from sklearn import metrics
from gini_normalized import normalized_gini
import numpy as np
import pandas as pd
from sklearn.grid_search import GridSearchCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
import time
joined = pd.read_csv('../data/joined.csv')
train = joined[joined['Hazard'] != -1]
test = joined[joined['Hazard'] == -1]
y = train['Hazard']
X = train.drop(['Hazard', 'Id'], 1)
X_test = test.drop(['Hazard', 'Id'], 1)
scaler = StandardScaler()
print 'scaling train'
X = scaler.fit_transform(X)
print 'scaling test'
X_test = scaler.transform(X_test)
clf = KNeighborsRegressor(n_neighbors=550)
print 'fitting'
clf.fit(X, y)
print 'predicting'
prediction = clf.predict(X_test)
submission = pd.DataFrame()
submission['Id'] = test['Id']
submission['Hazard'] = prediction
submission.to_csv('kNN/kNN_{timestamp}.csv'.format(timestamp=time.time()), index=False)
| [
"[email protected]"
] | |
01a1ef6dc25aacb7b99e3bb2d2e912e04233c3cc | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_outgoes.py | 710d7af255478e9b9f5ce4bf9bc34b044eb81186 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py |
#calss header
class _OUTGOES():
def __init__(self,):
self.name = "OUTGOES"
self.definitions = outgo
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['outgo']
| [
"[email protected]"
] | |
5a6960680cae86c401d945eb77b50e792096b7ac | 464850ba426263b17084fc71363ca14b8278b15e | /80.py | c539164e19aa8d461121a1829efe084c3408f060 | [] | no_license | eng-arvind/python | 8442c30ec10f979f913b354458b4f910539d8728 | 249f5f35f245a3f1742b10310de37ca6c6023af2 | refs/heads/master | 2020-12-23T06:40:16.911269 | 2020-02-02T18:42:01 | 2020-02-02T18:42:01 | 237,069,973 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | n = 7
for i in range(n):
for j in range(n):
if i + j == n//2 or i - j == n//2 or i + j == (n//2)*3 or j - i == n//2:
print("*", end="")
else:
print(end=" ")
print()
| [
"[email protected]"
] | |
fb3b2fd6f3497e8dd1ded9a6c54a330aac22db31 | 3fa1b23746232975b3b014db2f525007a3b49991 | /anna_code/demographics/rct_consented/subset_values_to_randomized_people.py | a4791706554ee798896de773f5da39c3e0e96e89 | [] | no_license | AshleyLab/myheartcounts | ba879e10abbde085b5c9550f0c13ab3f730d7d03 | 0f80492f7d3fc53d25bdb2c69f14961326450edf | refs/heads/master | 2021-06-17T05:41:58.405061 | 2021-02-28T05:33:08 | 2021-02-28T05:33:08 | 32,551,526 | 7 | 1 | null | 2020-08-17T22:37:43 | 2015-03-19T23:25:01 | OpenEdge ABL | UTF-8 | Python | false | false | 325 | py | import pandas as pd
import sys
import pdb
data=pd.read_csv(sys.argv[1],header=None,sep='\t')
subjects=pd.read_csv('subjects.txt',header=None)
subset=data[data[0].isin(subjects[0])]
#nums=pd.to_numeric(subset[1],errors='coerce')
#mean_val=nums.mean()
#print(mean_val)
#std_val=nums.std()
#print(std_val)
pdb.set_trace()
| [
"[email protected]"
] | |
63d46a52a9c3929779b4d498745424b1505a9754 | 17f29e8f3eab9deb724b10bc7e61c73f1fca21c6 | /backend/home/migrations/0004_auto_20200320_0813.py | 8596cdb6cafc9245c067cfa29396a8d0c4ff6f09 | [] | no_license | crowdbotics-apps/mobilemobapp-dev-2035 | 91df345e8f6e42760c4156a7dd73a6d8b17250e0 | 041b1c20c4a14b4595fbcca943cdf46dec445497 | refs/heads/master | 2022-04-12T06:06:17.910111 | 2020-03-20T08:13:11 | 2020-03-20T08:13:11 | 248,153,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | # Generated by Django 2.2.11 on 2020-03-20 08:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0003_customtext_test'),
]
operations = [
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='Testing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='Testtt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('testt', models.BinaryField()),
],
),
migrations.RemoveField(
model_name='customtext',
name='test',
),
migrations.AddField(
model_name='customtext',
name='name',
field=models.BinaryField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
53ff44496cb0984d03f5da6f7271f4c8652cc91d | 14561adc9918f32b7f9334fa4dde08a3bfa17c26 | /pipeline/Bacteria_denovo/Bacteria_denovo.pipeline.py | d4951738835c6a9781c9201f9ea8cd6c6fcab482 | [] | no_license | ZhikunWu/awesome-metagenomic | b932169f505d39864a91067283ad7ce954280923 | 71183f262aa539a3983af4de47f7cc69be8cf7a6 | refs/heads/master | 2021-10-08T00:00:00.181593 | 2018-12-06T02:07:42 | 2018-12-06T02:07:42 | 111,966,593 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,029 | py | #!/usr/bin/env python
import yaml
import os
import sys
IN_PATH = config["IN_PATH"]
PIPE_DIR = config["PIPE_DIR"]
THREADS = config["THREADS"]
ThreadFold = config["ThreadFold"]
SAMPLES = config["SAMPLES"]
PROJECTS = config["PROJECTS"]
include: PIPE_DIR + "/Nano_QualityControl.rule.py"
include: PIPE_DIR + "/GenePridiction.rule.py"
rule all:
input:
expand(IN_PATH + "/clean/{sample}.fastq", sample=SAMPLES),
expand(IN_PATH + "/qualityControl/raw/nanoQC/{sample}/nanoQC.html", sample=SAMPLES),
expand(IN_PATH + "/qualityControl/raw/NanoPlot/{sample}/NanoPlot-report.html", sample=SAMPLES),
expand(IN_PATH + '/annotation/{project}/Prokka/assembly.faa', project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/tRNAscan/assembly_tRNA_gene.fna", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/RepeatMasker/assembly.fasta.out", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/RepeatModeler/assembly_RepeatModeler.txt", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/LTRFinder/LTR.txt", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/TandemRepeatFinder/TandemRepeat.txt", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/LTRFinder/finder.scn", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/LTRretriever/assembly.fasta.mod.pass.list", project=PROJECTS),
expand(IN_PATH + "/assembly/{project}/assembly.fasta.mod.out.LAI", project=PROJECTS),
expand(IN_PATH + "/assembly/{project}/assembly_index.esq", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/LTRharvest/assembly_ltrharvest.gff3", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/LTRharvest/assembly_ltrdigest.gff3", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/RepeatScout/seq_freq.txt", project=PROJECTS),
expand(IN_PATH + "/annotation/{project}/RepeatScout/seq_repeat.txt", project=PROJECTS), | [
"[email protected]"
] | |
fa959aa6f4a922c56b0970dcb74658e61c42d1f2 | 4ef98e50c40dc9f79ac9e422a208427f034f804d | /maps/models.py | 1e2a9a1d04f3ff48376a6325fbc92a1d1d52468a | [] | no_license | couleurmate/DeweyMaps | 5bd4eef11d429a7f252b8fb3141a7a47697154b4 | 063e9e7e412d57d2fdaf976728aaff66eb5fd38a | refs/heads/master | 2021-01-17T04:51:22.226762 | 2015-07-05T10:38:57 | 2015-07-05T10:38:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | py | from django.contrib.gis.db import models
from closet.models import Subcategory
class Marker(models.Model):
name = models.CharField(blank=False, max_length=255)
position = models.PointField(geography=True, blank=False)
comment = models.TextField(blank=True, null=False, default="")
subcategories = models.ManyToManyField(Subcategory)
web = models.URLField(default="")
phone = models.CharField(max_length=15, default="")
adress = models.CharField(max_length=1000, default="")
public = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
objects = models.GeoManager()
def __str__(self):
return self.name
@property
def content(self):
return self.comment
@property
def lat(self):
return self.position.y
@property
def lon(self):
return self.position.x
@property
def popup(self):
tpl = """<h5>{0.name}</h5>"""
if self.adress != "":
tpl += "<em>Adresse</em> : {0.adress}<br><br>"
if self.phone != "":
tpl += "<em>Téléphone</em> : {0.phone}<br><br>"
if self.web != "":
tpl += '<b><a target="_blank" href="{0.web}">Site web</a></b><br><br>'
tpl += "{0.comment}<br><br>"
tpl += '<a href="http://dewey.be/contact.html">Signaler un problème</a>'
return tpl.format(self)
| [
"[email protected]"
] | |
1093b9c3c57519cf4dc597bf6df497b6e31fe0fe | e15f86312db3109bbda053063557693518af4ead | /pcsk9/select_fam.py | 35318362eec5e7e8604254ceeeedd5879854dcdc | [] | no_license | heichiyidui/dev | 3aecf0f92e4af4184b4eae2b1935f281b7746c86 | 73c20c19928eb94d9aec10f0d307604b147b8088 | refs/heads/master | 2020-12-29T01:54:24.236229 | 2016-07-01T14:51:01 | 2016-07-01T14:51:01 | 35,271,765 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | #!/usr/bin/env python3
# tail -n +2 plink.genome | awk '{print $2,$4}' > t.in
edges = []
ifile =open('t.in')
for line in ifile:
cols = line[:-1].split()
edges.append([cols[0],cols[1]])
ifile.close()
import collections
node_dgres = collections.Counter()
nodes_1 = [x[0] for x in edges]
nodes_2 = [x[1] for x in edges]
node_dgres.update(nodes_1)
node_dgres.update(nodes_2)
# lets remove nodes according to their connection degrees
to_remove_list = []
for l in range(10000):
if edges == []:
break
# find the most connected node
to_remove_id = node_dgres.most_common(1)[0][0]
to_remove_list.append(to_remove_id)
# update edge set
new_edges = [x for x in edges if to_remove_id not in x]
edges = new_edges
# update node connection degree
node_dgres = collections.Counter()
nodes_1 = [x[0] for x in edges]
nodes_2 = [x[1] for x in edges]
node_dgres.update(nodes_1)
node_dgres.update(nodes_2)
for id in to_remove_list:
print(id) | [
"[email protected]"
] | |
fd038588e1514db2ce8a3b98d9a04bf9c08b8692 | 9c3c83007c5bf0f36635b0045b2aad7f8a11ac11 | /novice/04-05/graphql/venv/lib/python3.6/site-packages/graphql/utils/value_from_ast.py | 7ad52bca43bf423c08c5f077dd51404ba8164137 | [
"MIT"
] | permissive | septiannurtrir/praxis-academy | bc58f9484db36b36c202bf90fdfd359482b72770 | 1ef7f959c372ae991d74ccd373123142c2fbc542 | refs/heads/master | 2021-06-21T17:04:58.379408 | 2019-09-13T16:46:08 | 2019-09-13T16:46:08 | 203,007,994 | 1 | 0 | MIT | 2021-03-20T01:43:24 | 2019-08-18T13:38:23 | Python | UTF-8 | Python | false | false | 2,920 | py | from ..language import ast
from ..type import (
GraphQLEnumType,
GraphQLInputObjectType,
GraphQLList,
GraphQLNonNull,
GraphQLScalarType,
)
# Necessary for static type checking
if False: # flake8: noqa
from ..language.ast import Node
from ..type.definition import GraphQLType
from typing import Dict, Union, Optional, List
def value_from_ast(value_ast, type, variables=None):
# type: (Optional[Node], GraphQLType, Optional[Dict[str, Union[List, Dict, int, float, bool, str, None]]]) -> Union[List, Dict, int, float, bool, str, None]
"""Given a type and a value AST node known to match this type, build a
runtime value."""
if isinstance(type, GraphQLNonNull):
# Note: we're not checking that the result of coerceValueAST is non-null.
# We're assuming that this query has been validated and the value used here is of the correct type.
return value_from_ast(value_ast, type.of_type, variables)
if value_ast is None:
return None
if isinstance(value_ast, ast.Variable):
variable_name = value_ast.name.value
if not variables or variable_name not in variables:
return None
# Note: we're not doing any checking that this variable is correct. We're assuming that this query
# has been validated and the variable usage here is of the correct type.
return variables.get(variable_name)
if isinstance(type, GraphQLList):
item_type = type.of_type
if isinstance(value_ast, ast.ListValue):
return [
value_from_ast(item_ast, item_type, variables)
for item_ast in value_ast.values
]
else:
return [value_from_ast(value_ast, item_type, variables)]
if isinstance(type, GraphQLInputObjectType):
fields = type.fields
if not isinstance(value_ast, ast.ObjectValue):
return None
field_asts = {}
for field_ast in value_ast.fields:
field_asts[field_ast.name.value] = field_ast
obj = {}
for field_name, field in fields.items():
if field_name not in field_asts:
if field.default_value is not None:
# We use out_name as the output name for the
# dict if exists
obj[field.out_name or field_name] = field.default_value
continue
field_ast = field_asts[field_name]
field_value_ast = field_ast.value
field_value = value_from_ast(field_value_ast, field.type, variables)
# We use out_name as the output name for the
# dict if exists
obj[field.out_name or field_name] = field_value
return type.create_container(obj)
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), "Must be input type"
return type.parse_literal(value_ast)
| [
"[email protected]"
] | |
7e5dbb102fab53228104ce9a43c6407ab1972c45 | 50989266203628be7649d152392f4a1789997b90 | /lisp.py | 9c96a7942a34631c24cce5c62058308aa3242b27 | [] | no_license | cheery/snakelisp | b2820819959be4ed0b62a60c511b15623ae5589e | c62c0401e7d8cbd63afb8a7242850f7740420614 | refs/heads/master | 2020-05-15T08:53:26.443191 | 2014-09-16T15:55:43 | 2014-09-16T15:55:43 | 23,539,541 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,257 | py | #!/usr/bin/env python
from pythonboot.blip import ListNode, TextNode, MarkNode, isList, isText, isMark, open_list
import json
import transpiler
from cps import Call, Lambda, Assign, Variable, Constant, Environ, null, true, false
import subprocess
import sys
import re
# call = Call([arguments]), call[i]
# lambda = Lambda([arguments], body), lambda[i]
# Assign(var, val, body)
# Variable(name, value)
# Constant(value)
def main():
path = sys.argv[1]
mks = []
env = Environ()
ret = env.new_argument("cont", False)
env = env.new_environ()
ret = env.new_argument('cont', False)
exprs = open_list(path).strip_rec()
#exprs = list(open_list("base.sl")) + list(open_list(path))
program = env.close(compile_list(exprs, env, ret))
program = program.coalesce()
snakesource = "snakelisp.c"
rootdecl = re.compile(r'newRoot\("(.+)",')
with open(snakesource) as fd:
src = fd.read()
c_roots = dict((decl, "(root+{})".format(i)) for i, decl in enumerate(rootdecl.findall(src)))
c_api = {
"uncallable-hook": "&uncallable_hook",
"type-error-hook": "&type_error_hook",
}
c_use = set()
for var in env.seal():
if var.name in c_roots:
var.c_handle = c_roots[var.name]
continue
var.c_handle = c_api[var.name]
c_use.add(var.c_handle)
cdefns = ["extern value_t {};".format(value[1:]) for value in c_use]
#import visuals
#visuals.create_graph("demo.png", program)
source = transpiler.transpile(program, cdefns, path)
open(path+'.c', 'w').write(source)
subprocess.call(["gcc", path+'.c', snakesource, "-I.", "-lm"])
constants = {'null': null, 'true':true, 'false':false}
def compile(expr, env, k):
if isList(expr, 'include') and isText(expr[0]):
return compile_list(open_list(expr[0].text).strip_rec(), env, k)
if isList(expr, 'let') and isText(expr[0]):
var = env.get_local(expr[0].text)
return compile(expr[1], env,
(lambda val: Assign(var, val, retrieve(k, val))))
if isList(expr, 'set') and isText(expr[0]):
var = env.lookup(expr[0].text)
return compile(expr[1], env,
(lambda val: Assign(var, val, retrieve(k, val))))
if isList(expr, 'cond'):
return compile_cond(expr, env, k)
if isList(expr, 'while'):
return compile_while(expr, env, k)
if isList(expr, 'func'):
env = env.new_environ()
ret = env.new_argument('cont', False)
for sym in expr[0]:
assert sym.label == ''
env.new_argument(sym.text)
return retrieve(k, env.close(compile_list(expr[1:], env, ret)))
if isList(expr, 'infix') and len(expr) == 3:
return compile(ListNode([expr[1], expr[0], expr[2]]), env, k)
if isList(expr, ''):
params = []
seq = list(expr)
def next_parameter(param):
params.append(param)
if len(seq) > 0:
return compile(seq.pop(0), env, next_parameter)
else:
callee = params.pop(0)
return Call([callee, lift(k)] + params)
return compile(seq.pop(0), env, next_parameter)
#if expr.group == 'integer':
# return retrieve(k, Constant(expr.value))
#if expr.group == 'double':
# return retrieve(k, Constant(expr.value))
if isText(expr, "string"):
return retrieve(k, Constant(expr.text))
if isText(expr, ''):
if expr.text[:1].isdigit():
return retrieve(k, Constant(int(expr.text)))
if expr.text in constants:
param = constants[expr.text]
else:
param = env.lookup(expr.text)
return retrieve(k, param)
raise Exception("what is {}?".format(expr))
def compile_list(exprs, env, k):
seq = list(exprs)
def next_parameter(param):
if len(seq) > 1:
return compile(seq.pop(0), env, next_parameter)
else:
return compile(seq.pop(0), env, k)
if len(exprs) == 0:
return retrieve(k, null)
return next_parameter(null)
def retrieve(k, param):
if callable(k):
return k(param)
else:
return Call([k, param])
def lift(k):
if callable(k):
x = Variable()
return Lambda([x], k(x))
else:
return k
def compile_cond(expr, env, k):
seq = list(expr[0:])
if len(seq) == 0:
return retrieve(k, null)
def next_cond(k):
if len(seq) == 0:
return retrieve(k, null)
head = seq.pop(0)
if len(seq) == 0 and isList(head, 'else'):
return compile_list(head[0:], env, k)
if isList(head, 'else'):
raise Exception("invalid cond expression")
return compile(head[0], env,
(lambda truth: pick(env, k, truth,
enclose(head[1:], env),
lambdaCont(next_cond))))
return next_cond(k)
def compile_while(expr, env, k):
self = Variable()
seq = expr[1:]
def compile_body(k):
return compile_list(expr[1:], env, (lambda _: Call([self, lift(k)])))
cont = Variable()
looplambda = Lambda([cont], compile(expr[0], env,
(lambda truth: pick(env, cont, truth, lambdaCont(compile_body), lambdaNull()))))
return Assign(self, looplambda, Call([self, lift(k)]), True)
def pick(env, k, truth, yes, no):
return Call([env.new_implicit('pick'), lift(k), truth, yes, no])
def lambdaNull():
cont = Variable()
return Lambda([cont], Call([cont, null]))
def lambdaCont(func):
cont = Variable()
return Lambda([cont], func(cont))
def enclose(exprs, env):
cont = Variable()
return Lambda([cont], compile_list(exprs, env, cont))
#def open_list(path):
# with open(path, 'r') as fd:
# plop = json.load(fd)
# return decodeJson(plop)
#
#def decodeJson(node):
# if node["type"] == "list":
# return ListNode([decodeJson(a) for a in node["list"]], node["label"] or '').strip()
# elif node["type"] == 'text':
# return TextNode(node["text"], node["label"] or '')
# elif node["type"] == 'mark':
# return MarkNode(node["label"] or '')
# else:
# raise Exception("unknown {}".format(node))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
992f8823515ccee3a140f890755137552e8928d4 | 438ee853669a67cd46537f6d02cf356d05e03681 | /doctor_dashboard/urls.py | 47694bb78b753fc56cdb14fe68d5c7380a309fe8 | [] | no_license | tngeene/doc_appointment | a6648bed5c3d1d27e25131945910c5c425468fa1 | 6d1f320db03ad9fcc42b09e19a0d0a73e5af233a | refs/heads/master | 2023-02-22T05:37:36.510685 | 2021-01-19T11:46:01 | 2021-01-19T11:46:01 | 324,834,090 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | from django.urls import path, include
app_name = "doctor_dashboard"
urlpatterns = [
path('', include('doctor_dashboard.routes.index')),
path('appointments/', include('doctor_dashboard.routes.appointments')),
# path('doctors/', include('doctor_dashboard.routes.doctors')),
# path('patients/', include('doctor_dashboard.routes.patients')),
path('events/', include('doctor_dashboard.routes.events')),
]
| [
"[email protected]"
] | |
d29ecd2dab536aba7307bb95697055dbc30cf2aa | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog_tags/initial_3377.py | 561d811f19c812512cfb3db4c9e030dcd1210575 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,331 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((262, 533, 768), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((911, 601, 823), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((932, 878, 424), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((897, 147, 198), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((654, 184, 344), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((563, 71, 808), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((515, 319, 492), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((194, 440, 798), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((535, 777, 166), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((143, 239, 358), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((320, 498, 370), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((288, 147, 63), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((949, 360, 485), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((436, 819, 284), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((44, 825, 43), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((11, 479, 395), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((991, 520, 392), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((788, 680, 50), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((475, 141, 883), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((184, 381, 961), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((694, 467, 322), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
fc0bbcd096df9fe751b943cfd1fd20e466ee4baf | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Poppler/PageLayout.py | 605c3665f4dee50e741e5800be5b7e77e834cdc8 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 13,686 | py | # encoding: utf-8
# module gi.repository.Poppler
# from /usr/lib64/girepository-1.0/Poppler-0.18.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gobject as __gobject
class PageLayout(__gobject.GEnum):
# no doc
def as_integer_ratio(self): # real signature unknown; restored from __doc__
"""
Return integer ratio.
Return a pair of integers, whose ratio is exactly equal to the original int
and with a positive denominator.
>>> (10).as_integer_ratio()
(10, 1)
>>> (-10).as_integer_ratio()
(-10, 1)
>>> (0).as_integer_ratio()
(0, 1)
"""
pass
def bit_length(self): # real signature unknown; restored from __doc__
"""
Number of bits necessary to represent self in binary.
>>> bin(37)
'0b100101'
>>> (37).bit_length()
6
"""
pass
def conjugate(self, *args, **kwargs): # real signature unknown
""" Returns self, the complex conjugate of any int. """
pass
def from_bytes(self, *args, **kwargs): # real signature unknown
"""
Return the integer represented by the given array of bytes.
bytes
Holds the array of bytes to convert. The argument must either
support the buffer protocol or be an iterable object producing bytes.
Bytes and bytearray are examples of built-in objects that support the
buffer protocol.
byteorder
The byte order used to represent the integer. If byteorder is 'big',
the most significant byte is at the beginning of the byte array. If
byteorder is 'little', the most significant byte is at the end of the
byte array. To request the native byte order of the host system, use
`sys.byteorder' as the byte order value.
signed
Indicates whether two's complement is used to represent the integer.
"""
pass
def to_bytes(self, *args, **kwargs): # real signature unknown
"""
Return an array of bytes representing an integer.
length
Length of bytes object to use. An OverflowError is raised if the
integer is not representable with the given number of bytes.
byteorder
The byte order used to represent the integer. If byteorder is 'big',
the most significant byte is at the beginning of the byte array. If
byteorder is 'little', the most significant byte is at the end of the
byte array. To request the native byte order of the host system, use
`sys.byteorder' as the byte order value.
signed
Determines whether two's complement is used to represent the integer.
If signed is False and a negative integer is given, an OverflowError
is raised.
"""
pass
def __abs__(self, *args, **kwargs): # real signature unknown
""" abs(self) """
pass
def __add__(self, *args, **kwargs): # real signature unknown
""" Return self+value. """
pass
def __and__(self, *args, **kwargs): # real signature unknown
""" Return self&value. """
pass
def __bool__(self, *args, **kwargs): # real signature unknown
""" self != 0 """
pass
def __ceil__(self, *args, **kwargs): # real signature unknown
""" Ceiling of an Integral returns itself. """
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __divmod__(self, *args, **kwargs): # real signature unknown
""" Return divmod(self, value). """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __float__(self, *args, **kwargs): # real signature unknown
""" float(self) """
pass
def __floordiv__(self, *args, **kwargs): # real signature unknown
""" Return self//value. """
pass
def __floor__(self, *args, **kwargs): # real signature unknown
""" Flooring an Integral returns itself. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __index__(self, *args, **kwargs): # real signature unknown
""" Return self converted to an integer, if self is suitable for use as an index into a list. """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __int__(self, *args, **kwargs): # real signature unknown
""" int(self) """
pass
def __invert__(self, *args, **kwargs): # real signature unknown
""" ~self """
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lshift__(self, *args, **kwargs): # real signature unknown
""" Return self<<value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
def __mod__(self, *args, **kwargs): # real signature unknown
""" Return self%value. """
pass
def __mul__(self, *args, **kwargs): # real signature unknown
""" Return self*value. """
pass
def __neg__(self, *args, **kwargs): # real signature unknown
""" -self """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __or__(self, *args, **kwargs): # real signature unknown
""" Return self|value. """
pass
def __pos__(self, *args, **kwargs): # real signature unknown
""" +self """
pass
def __pow__(self, *args, **kwargs): # real signature unknown
""" Return pow(self, value, mod). """
pass
def __radd__(self, *args, **kwargs): # real signature unknown
""" Return value+self. """
pass
def __rand__(self, *args, **kwargs): # real signature unknown
""" Return value&self. """
pass
def __rdivmod__(self, *args, **kwargs): # real signature unknown
""" Return divmod(value, self). """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __rfloordiv__(self, *args, **kwargs): # real signature unknown
""" Return value//self. """
pass
def __rlshift__(self, *args, **kwargs): # real signature unknown
""" Return value<<self. """
pass
def __rmod__(self, *args, **kwargs): # real signature unknown
""" Return value%self. """
pass
def __rmul__(self, *args, **kwargs): # real signature unknown
""" Return value*self. """
pass
def __ror__(self, *args, **kwargs): # real signature unknown
""" Return value|self. """
pass
def __round__(self, *args, **kwargs): # real signature unknown
"""
Rounding an Integral returns itself.
Rounding with an ndigits argument also returns an integer.
"""
pass
def __rpow__(self, *args, **kwargs): # real signature unknown
""" Return pow(value, self, mod). """
pass
def __rrshift__(self, *args, **kwargs): # real signature unknown
""" Return value>>self. """
pass
def __rshift__(self, *args, **kwargs): # real signature unknown
""" Return self>>value. """
pass
def __rsub__(self, *args, **kwargs): # real signature unknown
""" Return value-self. """
pass
def __rtruediv__(self, *args, **kwargs): # real signature unknown
""" Return value/self. """
pass
def __rxor__(self, *args, **kwargs): # real signature unknown
""" Return value^self. """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Returns size in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __sub__(self, *args, **kwargs): # real signature unknown
""" Return self-value. """
pass
def __truediv__(self, *args, **kwargs): # real signature unknown
""" Return self/value. """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Truncating an Integral returns itself. """
pass
def __xor__(self, *args, **kwargs): # real signature unknown
""" Return self^value. """
pass
denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the denominator of a rational number in lowest terms"""
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the numerator of a rational number in lowest terms"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
value_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
value_nick = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ONE_COLUMN = 2
SINGLE_PAGE = 1
TWO_COLUMN_LEFT = 3
TWO_COLUMN_RIGHT = 4
TWO_PAGE_LEFT = 5
TWO_PAGE_RIGHT = 6
UNSET = 0
__class__ = type
__dict__ = None # (!) real value is "mappingproxy({'__module__': 'gi.repository.Poppler', '__dict__': <attribute '__dict__' of 'PageLayout' objects>, '__doc__': None, '__gtype__': <GType PopplerPageLayout (94391899009456)>, '__enum_values__': {0: <enum POPPLER_PAGE_LAYOUT_UNSET of type Poppler.PageLayout>, 1: <enum POPPLER_PAGE_LAYOUT_SINGLE_PAGE of type Poppler.PageLayout>, 2: <enum POPPLER_PAGE_LAYOUT_ONE_COLUMN of type Poppler.PageLayout>, 3: <enum POPPLER_PAGE_LAYOUT_TWO_COLUMN_LEFT of type Poppler.PageLayout>, 4: <enum POPPLER_PAGE_LAYOUT_TWO_COLUMN_RIGHT of type Poppler.PageLayout>, 5: <enum POPPLER_PAGE_LAYOUT_TWO_PAGE_LEFT of type Poppler.PageLayout>, 6: <enum POPPLER_PAGE_LAYOUT_TWO_PAGE_RIGHT of type Poppler.PageLayout>}, '__info__': gi.EnumInfo(PageLayout), 'UNSET': <enum POPPLER_PAGE_LAYOUT_UNSET of type Poppler.PageLayout>, 'SINGLE_PAGE': <enum POPPLER_PAGE_LAYOUT_SINGLE_PAGE of type Poppler.PageLayout>, 'ONE_COLUMN': <enum POPPLER_PAGE_LAYOUT_ONE_COLUMN of type Poppler.PageLayout>, 'TWO_COLUMN_LEFT': <enum POPPLER_PAGE_LAYOUT_TWO_COLUMN_LEFT of type Poppler.PageLayout>, 'TWO_COLUMN_RIGHT': <enum POPPLER_PAGE_LAYOUT_TWO_COLUMN_RIGHT of type Poppler.PageLayout>, 'TWO_PAGE_LEFT': <enum POPPLER_PAGE_LAYOUT_TWO_PAGE_LEFT of type Poppler.PageLayout>, 'TWO_PAGE_RIGHT': <enum POPPLER_PAGE_LAYOUT_TWO_PAGE_RIGHT of type Poppler.PageLayout>})"
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
}
__gtype__ = None # (!) real value is '<GType PopplerPageLayout (94391899009456)>'
__info__ = gi.EnumInfo(PageLayout)
| [
"[email protected]"
] | |
82f573ab57442baca38130076f8b17ddd1163034 | a665f561b103a51404785f35d0026c60f0083cb4 | /0x05-python-exceptions/101-safe_function.py | 38683ee508361b035c621dad79ea63525fad197f | [] | no_license | Joshua-Enrico/holbertonschool-higher_level_programming | c5f3c9ab55167ea2e7ea3b31dd8edf2e22a18bde | 8c1559f9c772b60186e899e17c67d299f88de726 | refs/heads/main | 2023-07-31T17:45:16.723947 | 2021-09-23T00:29:25 | 2021-09-23T00:29:25 | 361,960,411 | 1 | 5 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | #!/usr/bin/python3
def safe_function(fct, *args):
try:
div = fct(*args)
return div
except Exception as error:
import sys
print("Exception: {}".format(error), file=sys.stderr)
return None
| [
"[email protected]"
] | |
d4c44550df6570a3c03d89d628513a25c2868572 | 0ae589f33fbf37a6af830dd7494cc576f267f202 | /scenario/settings.py | ea8db96a3b7c5d412a773b2d60a74cbfa2abfd55 | [] | no_license | vamsi9477/sosioHosting | 85be712762738604625a13569f85aa986c31d5b0 | 42dbe2171a32b4cf40d202f16d89c49db9b3c10e | refs/heads/master | 2020-04-05T01:09:02.486917 | 2018-11-06T18:03:07 | 2018-11-06T18:03:07 | 156,425,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,136 | py | """
Django settings for scenario project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*l#4^7y1%o0r9p01f)lz7mcdw-nc9#2iet=ak3ma9rj53f+zyh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sc1.apps.Sc1Config',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'scenario.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'scenario.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
eeedc6e06be66be4ba83b0914b71cabc517a8dc2 | ad010f3ecdaa260b2d8732b8b784d58b3c812b9e | /spider_admin_pro/config/yaml_config.py | a43dc91138192f1c70a92ea9429b25cabd30f721 | [] | no_license | laashub-soa/spider-admin-pro | 52261816015afa672176423f38d0206f9bbafa15 | 5faefebd25ad6a163a6a7d18076dc10adba7d970 | refs/heads/master | 2023-08-14T01:24:15.659796 | 2021-09-27T04:15:52 | 2021-09-27T04:15:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | # -*- coding: utf-8 -*-
#################################
# 读取用户自定义变量
#################################
import os
import yaml
from spider_admin_pro.config import env_config
from spider_admin_pro.logger import logger
config_file = os.path.join(os.getcwd(), 'config.yml')
logger.info('config_file: %s', config_file)
if os.path.exists(config_file):
f = open(config_file, "rb")
config = yaml.safe_load(f)
f.close()
else:
config = {}
# flask 服务配置
FLASK_PORT = config.get('PORT', env_config.FLASK_PORT)
FLASK_HOST = config.get('HOST', env_config.FLASK_HOST)
# 登录账号密码
BASIC_AUTH_USERNAME = config.get('USERNAME', env_config.BASIC_AUTH_USERNAME)
BASIC_AUTH_PASSWORD = config.get('PASSWORD', env_config.BASIC_AUTH_PASSWORD)
BASIC_AUTH_JWT_KEY = config.get('JWT_KEY', env_config.BASIC_AUTH_JWT_KEY)
# token过期时间,单位天
BASIC_AUTH_EXPIRES = config.get('EXPIRES', env_config.BASIC_AUTH_EXPIRES)
# scrapyd地址, 结尾不要加斜杆
SCRAPYD_SERVER = config.get('SCRAPYD', env_config.SCRAPYD_SERVER)
# 调度器 调度历史存储设置
# mysql or sqlite and other, any database for peewee support
SCHEDULE_HISTORY_DATABASE_URL = config.get('SCHEDULE_HISTORY_DATABASE_URL',
env_config.SCHEDULE_HISTORY_DATABASE_URL)
# 调度器 定时任务存储地址
JOB_STORES_DATABASE_URL = config.get('JOB_STORES_DATABASE_URL', env_config.JOB_STORES_DATABASE_URL)
# 日志文件夹
LOG_DIR = config.get("LOG_DIR", env_config.LOG_DIR)
| [
"[email protected]"
] | |
096bc1c7152955fc7efee92dc96b6923843848ec | ee41311a11a1c6baedafd9a914d5a1f8330fe8a9 | /SANEF_LIVE/venv/Lib/site-packages/anaconda_navigator/widgets/tabs/tests/test_environments_tab.py | 2e4d36bd2647c721b4161cbc2957d1664db066a3 | [] | no_license | sethnanati/CodeRepoPython | 2dffb7263620bd905bf694f348485d894a9513db | b55e66611d19b35e9926d1b1387320cf48e177c8 | refs/heads/master | 2023-07-07T11:16:12.958401 | 2021-02-13T10:09:48 | 2021-02-13T10:09:48 | 376,531,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,911 | py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016-2017 Anaconda, Inc.
#
# May be copied and distributed freely only as part of an Anaconda or
# Miniconda installation.
# -----------------------------------------------------------------------------
"""Tests for environments tab."""
# yapf: disable
# Standard library imports
import sys
# Third party imports
from qtpy.QtCore import Qt
import pytest
# Local imports
from anaconda_navigator.api.conda_api import CondaAPI
from anaconda_navigator.utils.fixtures import tmpfile, tmpfolder
from anaconda_navigator.widgets.dialogs import MessageBoxError
from anaconda_navigator.widgets.tabs.environments import EnvironmentsTab
# yapf: enable
tmpfile
tmpfolder
PY3 = sys.version_info >= (3, 4)
xfail = pytest.mark.xfail
@pytest.fixture()
def env_tab(qtbot, tmpfile):
widget = EnvironmentsTab()
qtbot.addWidget(widget)
widget.show()
widget.setup_tab(metadata={})
widget.load_environment()
with qtbot.waitSignal(widget.sig_status_updated) as blocker:
blocker
return widget, qtbot, tmpfile
MessageBoxError.exec_ = lambda *args: True
class TestEnvironmentsTab:
def package_version(self, pkg, name='root'):
api = CondaAPI()
return api.package_version(name=name, pkg=pkg, build=True)
def remove_env(self, widget):
worker = widget.packages_widget.remove_environment(
name='navigatortest'
)
worker.communicate() # run create
@xfail
def test_bad_create(self, env_tab): # analysis:ignore
widget, qtbot, tmpfile = env_tab
with open(tmpfile, 'w') as f:
raw = "name: navigatortest\ndependencies:\n- not-real=0.0.0=py36_0"
f.write(raw)
worker = widget.packages_widget.import_yaml(
name="navigatortest", yaml=tmpfile
)
with qtbot.waitSignal(widget.sig_error_popped_up, timeout=5000):
with qtbot.waitSignal(worker.sig_finished, timeout=5000):
worker.name = "navigatortest"
worker.sig_finished.connect(widget._environment_created)
@xfail
def test_ipython_option(self, env_tab, tmpfolder):
widget, qtbot, tmpfile = env_tab
pyver = 'python={0}'.format(self.package_version('python'))
self.remove_env(widget)
worker = widget.packages_widget.create_environment(
name='navigatortest', packages=[pyver]
)
worker.name = 'navigatortest'
worker.communicate() # run create
widget._environment_created(worker, "", "")
widget.menu_list.exec_ = lambda *args: True
qtbot.mouseClick(
widget.list_environments.currentItem().button_options,
Qt.LeftButton
)
is_action_enabled = widget.menu_list.actions()[2].isEnabled()
assert not is_action_enabled
worker = widget.packages_widget.api.conda_install(
name='navigatortest', pkgs=['jupyter-core']
)
worker.communicate()
qtbot.mouseClick(
widget.list_environments.currentItem().button_options,
Qt.LeftButton
)
assert not widget.menu_list.actions()[2].isEnabled()
worker = widget.packages_widget.api.conda_install(
name='navigatortest', pkgs=['ipython']
)
worker.communicate()
qtbot.mouseClick(
widget.list_environments.currentItem().button_options,
Qt.LeftButton
)
assert widget.menu_list.actions()[2].isEnabled()
worker = widget.packages_widget.remove_environment(
name='navigatortest'
)
worker.communicate() # run create
self.remove_env(widget)
| [
"[email protected]"
] | |
2ab57e0a422988c3ab9c0a083d6d0b87670ebc16 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_116/ch117_2020_03_23_21_27_19_174351.py | cc5e23b34f33ab76256d64d25a4ac3b24699c59a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from math import sin
def snell_descartes(n1,n2,θ1):
sin(θ2)=n1*(sin(θ1))/n2
return θ2
| [
"[email protected]"
] | |
013f62d1095057fe79083d89f83110ecb8c70f3f | dc99d95671170444cd7bf02e37da6ecda4a5f19e | /apps/courses/views.py | 744bef09fd1699bd93842b7c1d3d4a04ab7d3ca9 | [] | no_license | bbright3493/python_real_war | 734d49ed9f7e1800d24dc754424a07b69d7d8c1f | 6e43bb7d814920222f3310bd6fd9f04cb3d5bbf1 | refs/heads/master | 2020-03-30T06:08:40.249185 | 2018-10-22T07:33:41 | 2018-10-22T07:33:41 | 150,841,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,305 | py | # -*- coding: utf-8 -*-
import json
from django.shortcuts import render, HttpResponse
from django.views.generic.base import View
from pure_pagination import Paginator, EmptyPage, PageNotAnInteger
from utils.mixin_utils import LoginRequiredMixin
from .models import Course, Lesson, ChoiceQuestion, Video, ProgramQuestion, ChoiceBank, ProgramUpload, CourseCategory, \
Faq
from .models import CourseDirection, KnowledgePoint
from article.models import Article
from operation.models import UserCourse, UserPass, UserErrorChoice
from integral.models import UserIntergral, IntergralDemand
from .forms import ProgramUploadForm
from project.models import ProjectShow
# Create your views here.
class CourseListView(View):
"""
课程列表页
"""
def get(self, request):
all_category = CourseCategory.objects.all()
all_direction = CourseDirection.objects.all()
click_direction = request.GET.get("direction", "all")
click_category = request.GET.get("category", "all")
click_degree = request.GET.get("degree", "all")
click_sort = request.GET.get("sort", "new")
print(click_direction)
if click_direction == "all":
if click_category == "all":
if click_degree == "all":
all_course = Course.objects.all().order_by("-add_time")
if click_sort == "hot":
all_course = Course.objects.all().order_by("-students")
else:
all_course = Course.objects.filter(degree=click_degree).order_by("-add_time")
if click_sort == "hot":
all_course = all_course.order_by("-students")
else:
if click_degree == "all":
all_course = Course.objects.filter(coursecategory__category=click_category).order_by("-add_time")
if click_sort == "hot":
all_course = all_course.order_by("-students")
else:
all_course = Course.objects.filter(coursecategory__category=click_category,
degree=click_category).order_by("-add_time")
if click_sort == "hot":
all_course = all_course.order_by("-students")
else:
all_course = Course.objects.filter(direction=click_direction)
print("all:", all_course)
if click_category == "all":
if click_degree == "all":
all_course = all_course.order_by("-add_time")
if click_sort == "hot":
all_course = all_course.order_by("-students")
else:
all_course = all_course.filter(degree=click_degree).order_by("-add_time")
if click_sort == "hot":
all_course = all_course.order_by("-students")
else:
if click_degree == "all":
all_course = all_course.filter(coursecategory__category=click_category).order_by("-add_time")
if click_sort == "hot":
all_course = all_course.order_by("-students")
else:
all_course = all_course.filter(coursecategory__category=click_category,
degree=click_category).order_by("-add_time")
if click_sort == "hot":
all_course = all_course.order_by("-students")
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
p = Paginator(all_course, 6, request=request)
courses = p.page(page)
print(type(courses))
print("session:", request.session.get('Linux', default="Html"))
return render(request, 'course_list.html', {
"all_direction": all_direction,
"all_course": courses,
"all_category": all_category,
"click_direction": click_direction,
"click_category": click_category,
"click_degree": click_degree,
"click_sort": click_sort,
})
class CourseLevelView(View):
"""
课程关卡列表页
显示关卡时同时查询用户关卡信息 需要显示用户对应关卡对学习情况
"""
def get(self, request, course_id):
course = Course.objects.get(id=int(course_id))
# 查询用户是否已经关联了该课程
try:
user_course = UserCourse.objects.get(user=request.user, course=course)
except:
user_course = UserCourse(user=request.user, course=course)
user_course.save()
students = UserCourse.objects.all().count()
course.students = int(students)
course.save()
all_level = Lesson.objects.filter(course=course_id).order_by("add_time")
this_level = Lesson.objects.filter(course=course_id).first()
# 对该课程对所有关卡 查询对应对用户关卡表 如果没有 则新建
for level in all_level:
try:
cur_user_level = UserPass.objects.get(user=request.user, lesson=level)
except:
cur_user_level = UserPass(user=request.user, lesson=level)
cur_user_level.save()
user_levels = UserPass.objects.filter(user=request.user, lesson__course=course).order_by("lesson")
# # 下一关关卡
# try:
# next_level = Lesson.objects.filter(course=course_id).order_by("add_time")[Level_num+1]
# except IndexError:
# next_level = Lesson.objects.filter(course=course_id).order_by("add_time")[Level_num]
#
# # 开通下一关关卡
# if this_level.pass_level:
# next_level.pass_level = True
last_level = Lesson.objects.filter(course=course_id).last()
projects = ProjectShow.objects.filter(course=course)
return render(request, 'course_level.html', locals())
class CourseDetailView(View):
"""
关卡详情页
"""
def get(self, request, course_id, lesson_id):
course = Course.objects.get(id=course_id)
# 查询用户课程状态 如果是未学习 则将状态改为正在学习
user_course = UserCourse.objects.get(user=request.user, course=course)
if user_course.study_status == 1:
user_course.study_status = 2
user_course.save()
lesson = Lesson.objects.get(id=lesson_id)
try:
user_lesson = UserPass.objects.get(user=request.user, lesson=lesson)
except:
user_lesson = UserPass(user=request.user, lesson=lesson)
user_lesson.save()
print(lesson)
# user_intergral = UserIntergral.objects.get(user=request.user)
extend_demand = IntergralDemand.objects.get(lesson_id=int(lesson_id), demand='extend_download')
# explain_demand = IntergralDemand.objects.get(lesson_id=int(lesson_id), demand='pro_explain')
all_vedio = Video.objects.filter(lesson_id=lesson_id)
all_article = Article.objects.filter(lesson=lesson).order_by('no')
choice_bank = lesson.get_choice_bank()
program_bank = lesson.get_program_bank()
faqs = Faq.objects.filter(lesson=lesson)
knowledge_points = KnowledgePoint.objects.filter(lesson=lesson)
lesson_projects = ProjectShow.objects.filter(lesson=lesson)
return render(request, 'course_detail.html', locals())
class ChoiceQuestionAnswerView(View):
"""
选择题题目
"""
def get(self, request, course_id, lesson_id, choice_id):
lesson_choices = ChoiceQuestion.objects.filter(lesson_id=int(lesson_id))
this_question = ChoiceQuestion.objects.get(id=choice_id)
all_question_num = ChoiceQuestion.objects.filter(lesson_id=int(lesson_id)).count()
is_last = False
if int(choice_id) == all_question_num:
is_last = True
return render(request, 'choice_answer.html', locals())
class ChoiceQuestionView(View):
"""
选择题题目
"""
def get(self, request, course_id, lesson_id, choice_id):
lesson_choices = ChoiceQuestion.objects.filter(lesson_id=int(lesson_id))
this_question = ChoiceQuestion.objects.get(id=choice_id)
all_question_num = ChoiceQuestion.objects.filter(lesson_id=int(lesson_id)).count()
print(all_question_num)
if int(this_question.question_num) == 1:
request.session['right_count'] = 0
request.session['error'] = []
is_last = False
if this_question.question_num == all_question_num:
is_last = True
next_question = this_question
else:
next_question = ChoiceQuestion.objects.get(question_num=this_question.question_num+1,
choicebank=this_question.choicebank)
return render(request, 'choice_pra.html', locals())
class NextQuestionView(View):
"""
下一题
"""
def post(self, request):
this_question = request.POST.get("practice_num", 1)
user_answer = request.POST.get("user_answer", "")
if int(user_answer) != -1:
# 得到本题的正确答案
right = ChoiceQuestion.objects.get(id=int(this_question))
right_answer = right.answer
if int(user_answer) + 1 == right_answer:
print("答对本题")
request.session['right_count'] = request.session.get('right_count', default=0) + 1
else:
print("本题错误")
l = request.session['error']
l.append(right.id)
request.session['error'] = l
user_pass = UserPass.objects.get(user=request.user, lesson=right.lesson)
# 判断是否第一次提交答案
if user_pass.choice_status == 0:
user_course = UserCourse.objects.get(user=request.user, course=right.lesson.course)
# 判断是否开通课程vip或者关卡vip
if user_course.course_status == 2 or user_pass.status == 2:
error_question = UserErrorChoice()
error_question.user = request.user
error_question.choice = right
error_question.user_answer = int(user_answer) + 1
error_question.save()
value = {"status": "success"}
print("session", request.session['right_count'])
return HttpResponse(json.dumps(value), content_type='application/json')
else:
return HttpResponse('{"status":"fail", "msg":"没有进行选择"}', content_type='application/json')
class ChoiceResultView(View):
"""
选择题结果
"""
def get(self, request, course_id, lesson_id):
right_nums = request.session.get('right_count', default=0)
user_errors = request.session.get('error', default=[])
errors = []
for error in user_errors:
errors.append(ChoiceQuestion.objects.get(id=int(error)))
print("right_nums:", right_nums)
all_question_num = ChoiceQuestion.objects.filter(lesson_id=int(lesson_id)).count()
print("all_num:", all_question_num)
right_rate = int(int(right_nums) / float(all_question_num) * 100)
print(right_rate)
lesson = Lesson.objects.get(id=lesson_id)
course = Course.objects.get(id=course_id)
# 保存提交状态 只有开通了vip的用户才修改该状态
user_pass = UserPass.objects.get(user=request.user, lesson=lesson)
user_course = UserCourse.objects.get(user=request.user, course=course)
if user_pass.choice_status == 0 and (user_pass.status == 2 or user_course.course_status == 2):
user_pass.choice_status = 1
user_pass.save()
return render(request, 'choice_result.html', locals())
class ProgramView(View):
"""
编程题
"""
def get(self, request, course_id, lesson_id, program_id):
# program_file = ProgramQuestion.objects.get(course=int(course_id), lesson=int(lesson_id), id=int(program_id))
program = ProgramQuestion.objects.get(id=program_id)
all_question_num = ProgramQuestion.objects.filter(program_bank=program.program_bank).count()
if int(program.question_num) == 1:
request.session['right_count_program'] = 0
request.session['error_program'] = []
is_last = False
#判断是否最后一题
if program.question_num == all_question_num:
is_last = True
next_program = program
else:
next_program = ProgramQuestion.objects.get(question_num=program.question_num+1, program_bank=program.program_bank )
try:
program_result = ProgramUpload.objects.get(programquestion_id=int(program_id), user=request.user)
except ProgramUpload.DoesNotExist:
program_result = ProgramUpload.objects.all()
print(program_result)
return render(request, 'program.html', locals())
class ProgramingView(View):
"""
编程题的编程页面
"""
def get(self, request, course_id, lesson_id, program_id):
# program_file = ProgramQuestion.objects.get(course=int(course_id), lesson=int(lesson_id), id=int(program_id))
program = ProgramQuestion.objects.get(id=program_id)
try:
program_result = ProgramUpload.objects.get(programquestion_id=int(program_id), user=request.user)
except ProgramUpload.DoesNotExist:
program_result = ProgramUpload.objects.all()
print(program_result)
return render(request, 'program_start.html', locals())
class ProgramingCommitView(View):
"""
代码提交的处理
"""
def post(self, request):
user_answer = request.POST.get("code", "")
program_id = request.POST.get("program_id", "")
print(user_answer)
program_question = ProgramQuestion.objects.get(id=int(program_id))
if program_question.result == user_answer:
value = {"status": "success", "result": "ok"}
else:
value = {"status": "success", "result": "error"}
return HttpResponse(json.dumps(value), content_type='application/json')
class NextProgramView(View):
"""
下一题
"""
def post(self, request):
this_question_num = request.POST.get("practice_num", 1)
this_question_id = request.POST.get("practice_id", 1)
result = request.POST.get("result", "")
this_program = ProgramQuestion.objects.get(id=this_question_id)
if int(result) == 1:
print("答对本题")
request.session['right_count_program'] = request.session.get('right_count_program', default=0) + 1
else:
print("本题错误")
l = request.session['error_program']
l.append(this_program.id)
request.session['error_program'] = l
value = {"status": "success"}
print("session", request.session['right_count_program'])
return HttpResponse(json.dumps(value), content_type='application/json')
class ProgramCommitView(View):
"""
编程题
"""
def get(self, request, course_id, lesson_id, program_id):
# program_file = ProgramQuestion.objects.get(course=int(course_id), lesson=int(lesson_id), id=int(program_id))
try:
program_result = ProgramUpload.objects.get(programquestion_id=int(program_id), user=request.user)
except ProgramUpload.DoesNotExist:
program_result = ProgramUpload.objects.all()
print(program_result)
return render(request, 'program-commit.html', {
# "program_file": program_file,
"program_result": program_result,
"program_id": program_id,
})
class ProgramUploadView(View):
"""
编程项目上传
"""
def post(self, request):
file_obj = request.FILES.get('file')
image_obj = request.FILES.get('image')
learned_obj = request.POST.get('learn')
programId_obj = request.POST.get('programId')
program = ProgramQuestion.objects.get(id=int(programId_obj))
# user = request.user
if file_obj and image_obj:
program_result = ProgramUpload()
program_result.programquestion = program
program_result.user = request.user
program_result.upload = file_obj
program_result.image = image_obj
program_result.learned = learned_obj
program_result.is_complete = True
program_result.save()
return HttpResponse('{"status":"success"}', content_type='application/json')
else:
return HttpResponse('{"status":"fail"}', content_type='application/json')
class ProgramResultView(View):
"""
编程题结果
"""
def get(self, request, course_id, lesson_id):
right_nums = request.session.get('right_count_program', default=0)
user_errors = request.session.get('error_program', default=[])
errors = []
for error in user_errors:
errors.append(ProgramQuestion.objects.get(id=int(error)))
errors = list(set(errors))
print("right_nums:", right_nums)
all_question_num = ProgramQuestion.objects.filter(lesson_id=int(lesson_id)).count()
print("all_num:", all_question_num)
right_rate = int(int(right_nums) / float(all_question_num) * 100)
print(right_rate)
lesson = Lesson.objects.get(id=lesson_id)
course = Course.objects.get(id=course_id)
return render(request, 'program_result.html', locals())
class PostView(View):
def post(self, request):
import time
import os
from programing import settings
file_obj = request.FILES.get('file')
image_obj = request.FILES.get('image')
if file_obj: # 处理附件上传到方法
request_set = {}
print('file--obj', file_obj)
# user_home_dir = "upload/%s" % (request.user.userprofile.id)
accessory_dir = settings.BASE_DIR
if not os.path.isdir(accessory_dir):
os.mkdir(accessory_dir)
upload_file = "%s/%s" % (accessory_dir, file_obj.name)
recv_size = 0
with open(upload_file, 'wb') as new_file:
for chunk in file_obj.chunks():
new_file.write(chunk)
order_id = time.strftime("%Y%m%d%H%M%S", time.localtime())
# cache.set(order_id, upload_file)
return HttpResponse(order_id)
class CompleteView(View):
"""
关卡完成
"""
def post(self, request):
course_id = request.POST.get("course_id", 1)
lesson_id = request.POST.get("lesson_id", 1)
print(type(int(course_id)), int(course_id))
this_lesson = Lesson.objects.get(course_id=int(course_id), id=int(lesson_id))
print(this_lesson)
this_lesson.pass_level = True
last_level = Lesson.objects.filter(course=int(course_id)).last()
print(last_level)
choice_bank = ChoiceBank.objects.get(lesson=int(lesson_id))
print(choice_bank)
program_question = ProgramQuestion.objects.get(lesson=int(lesson_id))
if choice_bank.is_complete == True and program_question.is_complete == True:
if int(lesson_id) != last_level.id:
next_level = Lesson.objects.filter(course=int(course_id)).order_by("-add_time")[int(lesson_id) + 1]
print("next:", next_level)
next_level.open_level = True
this_lesson.pass_level = True
this_lesson.save()
next_level.save()
else:
this_lesson.pass_level = True
this_lesson.save()
value = {"status": "success", "msg": "已完成"}
return HttpResponse(json.dumps(value), content_type='application/json')
else:
value = {"status": "fail", "msg": "你还没有完成全部任务"}
return HttpResponse(json.dumps(value), content_type='application/json')
# class ProjectShowView(View):
# """
# 项目展示
# """
#
# def get(self, request):
# all_category = CourseCategory.objects.all()
#
# click_category = request.GET.get("category", "all")
# click_course = request.GET.get("course", "all")
# click_level = request.GET.get("level", "all")
# all_level = Lesson.objects.filter(course__name=click_course)
#
# if click_category == "all":
# print("category:", click_category)
# all_project = ProgramUpload.objects.filter(is_show=True).order_by("-add_time")
# all_course = Course.objects.filter()
# else:
# all_course = Course.objects.filter(coursecategory__category=click_category)
# if click_course == "all":
#
# all_project = ProgramUpload.objects.filter(lesson__course__coursecategory__category=click_category,
# is_show=True)
# else:
# if click_level == "all":
#
# all_project = ProgramUpload.objects.filter(lesson__course__coursecategory__category=click_category,
# lesson__course__name=click_course,
# is_show=True)
# else:
#
# all_project = ProgramUpload.objects.filter(lesson__course__coursecategory__category=click_category,
# lesson__course__name=click_course,
# lesson__name=click_level,
# is_show=True)
# # 对课程进行分页
# try:
# page = request.GET.get('page', 1)
# except PageNotAnInteger:
# page = 1
#
# p = Paginator(all_project, 6, request=request)
# projects = p.page(page)
#
# return render(request, "project_show.html", {
# "all_category": all_category,
# "click_category": click_category,
# "all_course": all_course,
# "click_course": click_course,
# "all_level": all_level,
# "click_level": click_level,
# "projects": projects,
#
# })
# class ProjectResultView(View):
# """
# 项目展示结果
# """
#
# def get(self, request, lesson):
# try:
# program_result = ProgramUpload.objects.get(lesson__name=lesson, user=request.user)
# except ProgramUpload.DoesNotExist:
# program_result = ProgramUpload.objects.all()
# print(program_result)
# return render(request, "project_result.html", {
# "program_result": program_result
# })
class DownloadUrlView(View):
"""链接下载"""
def post(self, request):
course_id = request.POST.get("course_id", 1)
lesson_id = request.POST.get("lesson_id", 1)
type = request.POST.get("type", "")
user_intergral = UserIntergral.objects.get(user=request.user)
demand_intergral = IntergralDemand.objects.get(lesson_id=int(lesson_id), demand=type)
if user_intergral.grade >= demand_intergral.intergral:
user_intergral.grade = user_intergral.grade - demand_intergral.intergral
user_intergral.save()
demand_intergral.download_count += 1
demand_intergral.save()
value = {"status": "success", "re_url": demand_intergral.url}
return HttpResponse(json.dumps(value), content_type='application/json')
else:
value = {"status": "fail", "msg": "您的积分不足,请充值!"}
return HttpResponse(json.dumps(value), content_type='application/json')
| [
"[email protected]"
] | |
382ec61b5d92e38174ded2840080940b3653dd40 | d72505a7961bf7f96094a6c7013f3c794495044b | /client.py | 4856fbb4f78c0f80314f35362b41858153512a26 | [] | no_license | 520hacker/websocket-connetions-benchmark | fa6ce757ec9cd68c5bcd60a5421700af6ae4814b | af609d775742cfeca5714133cddea32c8b0c51c0 | refs/heads/master | 2020-06-05T19:20:43.277616 | 2019-02-13T08:08:55 | 2019-02-13T08:08:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# debug in python 3.6
#__author__ == 'ipcpu'
import websocket
import time
import threading
import json
import multiprocessing
from threadpool import ThreadPool, makeRequests
#修改成自己的websocket地址
WS_URL = "ws://10.140.12.45:8888/"
#定义进程数
processes=5
#定义线程数(每个文件可能限制1024个,可以修改fs.file等参数)
thread_num=5000
def on_message(ws, message):
print(message)
pass
def on_error(ws, error):
print(error)
pass
def on_close(ws):
print("### closed ###")
pass
def on_open(ws):
def send_trhead():
#设置你websocket的内容
send_info = {"cmd": "refresh", "data": {"room_id": "58", "wx_user_id": 56431}}
#每隔10秒发送一下数据使链接不中断
while True:
time.sleep(10)
ws.send(json.dumps(send_info))
t = threading.Thread(target=send_trhead)
t.start()
def on_start(num):
time.sleep(num%20)
websocket.enableTrace(True)
ws = websocket.WebSocketApp(WS_URL + str(num),
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.on_open = on_open
ws.run_forever()
def thread_web_socket():
#线程池
pool = ThreadPool(thread_num)
num = list()
#设置开启线程的数量
for ir in range(thread_num):
num.append(ir)
requests = makeRequests(on_start, num)
[pool.putRequest(req) for req in requests]
pool.wait()
if __name__ == "__main__":
#进程池
pool = multiprocessing.Pool(processes=processes)
#设置开启进程的数量
for i in range(processes):
pool.apply_async(thread_web_socket)
pool.close()
pool.join()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.