id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
4906044
|
<reponame>sMedX/facenet
"""Train facenet classifier.
"""
# MIT License
#
# Copyright (c) 2020 SMedX
import click
from tqdm import tqdm
from pathlib import Path
import tensorflow as tf
import numpy as np
from facenet import config, facenet, faceclass, ioutils
class ConfusionMatrix:
def __init__(self, embeddings, classifier):
nrof_classes = len(embeddings)
nrof_positive_class_pairs = nrof_classes
nrof_negative_class_pairs = nrof_classes * (nrof_classes - 1) / 2
tp = tn = fp = fn = 0
for i in range(nrof_classes):
for k in range(i):
outs = classifier.predict(embeddings[i], embeddings[k])
mean = np.mean(outs)
fp += mean
tn += 1 - mean
outs = classifier.predict(embeddings[i])
mean = np.mean(outs)
tp += mean
fn += 1 - mean
tp /= nrof_positive_class_pairs
fn /= nrof_positive_class_pairs
fp /= nrof_negative_class_pairs
tn /= nrof_negative_class_pairs
self.classifier = classifier
self.accuracy = (tp + tn) / (tp + fp + tn + fn)
self.precision = tp / (tp + fp)
self.tp_rate = tp / (tp + fn)
self.tn_rate = tn / (tn + fp)
def __repr__(self):
return (f'{self.__class__.__name__}\n' +
f'{str(self.classifier)}\n' +
f'accuracy {self.accuracy}\n' +
f'precision {self.precision}\n' +
f'tp rate {self.tp_rate}\n' +
f'tn rate {self.tn_rate}\n')
def binary_cross_entropy_loss(logits, options):
# define upper-triangle indices
batch_size = options.nrof_classes_per_batch * options.nrof_examples_per_class
triu_indices = [(i, k) for i, k in zip(*np.triu_indices(batch_size, k=1))]
# compute labels for embeddings
labels = []
for i, k in triu_indices:
if (i // options.nrof_examples_per_class) == (k // options.nrof_examples_per_class):
# label 1 means inner class distance
labels.append(1)
else:
# label 0 means across class distance
labels.append(0)
pos_weight = len(labels) / sum(labels) - 1
logits = tf.gather_nd(logits, triu_indices)
labels = tf.constant(labels, dtype=logits.dtype)
# initialize cross entropy loss
cross_entropy = tf.nn.weighted_cross_entropy_with_logits(labels, logits, pos_weight)
loss = tf.reduce_mean(cross_entropy)
return loss
@click.command()
@click.option('--config', default=None, type=Path,
help='Path to yaml config file with used options for the application.')
def main(**options):
options = config.train_classifier(__file__, options)
embeddings = facenet.Embeddings(options.embeddings)
ioutils.write_text_log(options.logfile, embeddings)
print(embeddings)
embarray = embeddings.data(normalize=options.embeddings.normalize)
next_elem = facenet.equal_batches_input_pipeline(embarray, options)
embeddings_batch = tf.placeholder(tf.float32, shape=[None, embeddings.length], name='embeddings_batch')
# define classifier
if options.embeddings.normalize:
model = faceclass.FaceToFaceNormalizedEmbeddingsClassifier()
else:
model = faceclass.FaceToFaceDistanceClassifier()
logits = model(embeddings_batch)
cross_entropy = binary_cross_entropy_loss(logits, options)
# define train operations
global_step = tf.Variable(0, trainable=False, name='global_step')
dtype = tf.float64
initial_learning_rate = tf.constant(options.train.learning_rate_schedule.initial_value, dtype=dtype)
decay_rate = tf.constant(options.train.learning_rate_schedule.decay_rate, dtype=dtype)
if not options.train.learning_rate_schedule.decay_steps:
decay_steps = tf.constant(options.train.epoch.size, dtype=dtype)
else:
decay_steps = tf.constant(options.train.learning_rate_schedule.decay_steps, dtype=dtype)
lr_decay_factor = tf.math.pow(decay_rate, tf.math.floor(tf.cast(global_step, dtype=dtype) / decay_steps))
learning_rate = initial_learning_rate * lr_decay_factor
train_ops = facenet.train_op(options.train, cross_entropy, global_step, learning_rate, tf.global_variables())
tensor_ops = {
'global_step': global_step,
'loss': cross_entropy,
'vars': tf.trainable_variables(),
'learning_rate': learning_rate
}
print('start training')
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
for epoch in range(options.train.epoch.max_nrof_epochs):
with tqdm(total=options.train.epoch.size) as bar:
for _ in range(options.train.epoch.size):
embeddings_batch_np = session.run(next_elem)
feed_dict = {embeddings_batch: embeddings_batch_np}
_, outs = session.run([train_ops, tensor_ops], feed_dict=feed_dict)
postfix = f"variables {outs['vars']}, loss {outs['loss']}"
bar.set_postfix_str(postfix)
bar.update()
info = f"epoch [{epoch + 1}/{options.train.epoch.max_nrof_epochs}], learning rate {outs['learning_rate']}"
print(info)
conf_mat = ConfusionMatrix(embarray, model)
print(conf_mat)
ioutils.write_text_log(options.logfile, info)
ioutils.write_text_log(options.logfile, conf_mat)
print(f'Model has been saved to the directory: {options.classifier.path}')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3352415
|
<reponame>T-Cube-AI/Time-dependent-SEIRD-Model-API
#!/usr/bin/python3
import json
import re
from computeInsightsFunction import computeInsights
PREDICTIONS_DIR = './Predictions/US'
DATASETS_DIR = './Datasets/US'
INSIGHTS_DIR = './Insights/US'
usStatesPopulationFile = "./US-population.json"
usStatesPopulation = json.load(open(usStatesPopulationFile))
stateToHeatFactorsMap = dict()
totalUSObject = {'State': 'Total'}
usStatesPopulation.append(totalUSObject)
def replaceSpecialCharacters(filename):
filename = re.sub(r'\s+', ' ', filename)
filename = filename.replace(' ', '-')
filename = filename.replace('(', '')
filename = filename.replace('.', '')
filename = filename.replace(')', '')
return filename
def currentValues(filename):
fhandle = open(filename)
data = fhandle.readlines()
lastLine = data[-1]
lastData = lastLine.split(',')
confirmed = int(lastData[2])
recovered = int(lastData[3])
deaths = int(lastData[4])
return confirmed, recovered, deaths
def projectionValues(filename):
fhandle = open(filename)
data = json.load(fhandle)
predictions = data['overallPredictions']
lastWeek = predictions[2]['Week-3']["predictions"]
lastData = lastWeek[-1]
confirmed = lastData["confirmed"]
deaths = lastData["deaths"]
return confirmed, deaths
for stateObject in usStatesPopulation:
stateName = stateObject["State"]
filename = replaceSpecialCharacters(stateName)
infile = DATASETS_DIR + '/' + filename + '.csv'
outfile = PREDICTIONS_DIR + '/' + filename + '_projections.json'
insightsFile = INSIGHTS_DIR + '/' + filename + '.json'
try:
currentConfirmed, currentRecovered, currentDeaths = currentValues(
infile)
projectedConfirmed, projectedDeaths = projectionValues(outfile)
currentInsights = computeInsights(currentConfirmed, currentDeaths)
projectedInsights = computeInsights(
projectedConfirmed, projectedDeaths)
insightsObject = {
"Current": currentInsights,
"Projected": projectedInsights,
}
insightsFileHandle = open(insightsFile, 'w')
json.dump(insightsObject, insightsFileHandle)
insightsFileHandle.close()
except Exception as e:
print(stateName, e)
|
StarcoderdataPython
|
11255196
|
<reponame>harry-consulting/SAEF1<gh_stars>0
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import path
from . import views
app_name = "saef"
urlpatterns = [
path('ajax/update_notifications/', views.update_notifications, name='update_notifications')
]
urlpatterns += staticfiles_urlpatterns()
|
StarcoderdataPython
|
5187276
|
from antarest.core.jwt import JWTUser, JWTGroup
from antarest.core.roles import RoleType
from antarest.login.model import Group, User
def test_is_site_admin():
jwt = JWTUser(
id=0,
impersonator=0,
type="users",
groups=[JWTGroup(id="admin", name="admin", role=RoleType.ADMIN)],
)
assert jwt.is_site_admin()
assert not JWTUser(
id=0,
impersonator=0,
type="users",
).is_site_admin()
def test_is_group_admin():
jwt = JWTUser(
id=0,
impersonator=0,
type="users",
groups=[JWTGroup(id="group", name="group", role=RoleType.ADMIN)],
)
assert jwt.is_group_admin(Group(id="group"))
assert not JWTUser(
id=0,
impersonator=0,
type="users",
).is_group_admin(Group(id="group"))
def test_is_himself():
jwt = JWTUser(
id=1,
impersonator=0,
type="users",
)
assert jwt.is_himself(User(id=1))
assert not JWTUser(
id=0,
impersonator=0,
type="users",
).is_himself(User(id=1))
|
StarcoderdataPython
|
8141012
|
# -*- coding:utf-8 -*-
"""File I/O related operations, such as list files, import/export or remove files/folder."""
__author__ = "<NAME>"
import os
import shutil
import json
import pickle
import zipfile
import urllib
import wget
DependencyFlag = False #Check if dependencies are satisfied. If not, some advanced functions will not be defined.
try:
import networkx as nx
from networkx.readwrite import json_graph
import numpy as np
import scipy.io
DependencyFlag = True
except Exception:
DependencyFlag = False
def ListApkFiles(ApkDirectory):
'''
Get the Apk file names for an ApkDirectory in a sorted order. Rerurn an empty list if ApkDirectory=="".
:param String ApkDirectory: Path of a apk file directory
:return: ListOfApkFiles: The list of Paths of Apks under ApkDirectory
:rtype: List[String]
'''
ListOfApkFiles=[]
if(ApkDirectory==""):
raise ValueError('Directory is empty!')
filenames = os.listdir(ApkDirectory)
for filename in filenames:
#list filenames
#get the Path for the files
Path=os.path.abspath(os.path.join(ApkDirectory, filename))
#get the Path for the files
if os.path.splitext(filename)[1]==".apk":
if os.path.isfile(Path):
ListOfApkFiles.append(Path)
return sorted(ListOfApkFiles)
def ListFiles(Directory, Extension, All = False):
'''
Given an extension, get the file names for a Directory in a sorted order. Rerurn an empty list if Directory == "".
:param String/List Directory: Path/Paths of a file directory
:param String Extension: Extension of the files you want. Better include "." in the Extension. Use "." to list all files. Use ""(empty string) to list all folders.
:param Boolean All: Whether to include all files in sub-directories
:return: ListOfFiles: The list of Paths of the files you want under Directory
:rtype: List[String]
'''
ListOfFiles=[]
if(Directory == "" or Directory == []):
return []
if(type(Directory) != list and os.path.isdir(Directory) == False):
raise ValueError(Directory, 'Directory is not a directory!')
if(type(Extension)!=str):
raise ValueError(Extension, 'Extension is not a string!')
if(Extension):
if(Extension[0] != "."):
Extension = "." + Extension
if type(Directory) == list:
Directories = Directory
if All:
for Directory in Directories:
ListOfFiles.extend(_ListAllFiles(Directory, Extension))
else:
for Directory in Directories:
filenames = os.listdir(Directory)
for filename in filenames:
#list filenames
#get the Path for the files
Path=os.path.abspath(os.path.join(Directory, filename))
#get the Path for the files
if Extension == "": #Need to get all folders instead of files
if os.path.isdir(Path):
ListOfFiles.append(Path)
else:
if os.path.splitext(filename)[1]==Extension or Extension == ".":
if os.path.isfile(Path):
ListOfFiles.append(Path)
else:
if All:
ListOfFiles = _ListAllFiles(Directory, Extension)
else:
filenames = os.listdir(Directory)
for filename in filenames:
#list filenames
#get the Path for the files
Path=os.path.abspath(os.path.join(Directory, filename))
#get the Path for the files
if Extension == "": #Need to get all folders instead of files
if os.path.isdir(Path):
ListOfFiles.append(Path)
else:
if os.path.splitext(filename)[1]==Extension or Extension == ".":
if os.path.isfile(Path):
ListOfFiles.append(Path)
return sorted(ListOfFiles)
def _ListAllFiles(Directory, Extension):
'''
Given an extension, get the file names for a Directory and all its sub-directories in a sorted order. Rerurn an empty list if Directory == "".
:param String Directory: Path of a file directory
:param String Extension: Extension of the files you want. Better include "." in the Extension. Use "." to list all files. Use ""(empty string) to list all folders.
:return: ListOfFiles: The list of Paths of the files you want under Directory
:rtype: List[String]
'''
ListOfFiles=[]
if(Directory == ""):
raise ValueError(Directory, 'Directory is empty!')
if(os.path.isdir(Directory) == False):
raise ValueError(Directory, 'Directory is not a directory!')
if(type(Extension)!=str):
raise ValueError(Extension, 'Extension is not a string!')
if(Extension):
if(Extension[0] != "."):
Extension = "." + Extension
for root, dirs, files in os.walk(Directory):
if Extension == "":#Need to get all folders instead of files
ListOfFiles.append(os.path.abspath(root))
else:
for filename in files:
#list filenames
#get the Path for the files
Path = os.path.abspath(os.path.join(root, filename))
#get the Path for the files
if os.path.splitext(filename)[1] == Extension or Extension == ".":
if os.path.isfile(Path):
ListOfFiles.append(Path)
if Extension == "":
ListOfFiles = ListOfFiles[1:] #Remove Directory in the list since the list contains the path of Directory itself
return sorted(ListOfFiles)
def CopyFolderStructure(SourceFolder, DestinationFolder, Root = False):
'''
Copy a folder structure without copying any of the files inside of it.
:param String Directory: Path of the source folder
:param String Directory: Path of the destination folder that the source folder structure will be copied
:param Boolean Root: DestinationAsRoot. If this is True, the DestinationFolder will be ragarded as a folder of the same level of SourceFolder, otherwise SourceFolder will be copied into the DestinationFolder
'''
ListOfFolders = ListFiles(SourceFolder, "", All = True)
os.makedirs(DestinationFolder, exist_ok = True)
if Root is False:
for Folder in ListOfFolders:
os.makedirs(os.path.join(DestinationFolder, os.path.split(SourceFolder)[-1], os.path.relpath(Folder, SourceFolder)), exist_ok = True)
else:
for Folder in ListOfFolders:
os.makedirs(os.path.join(DestinationFolder, os.path.relpath(Folder, SourceFolder)), exist_ok = True)
def FileExist(FilePath):
'''
Given file path, determine a file exist or not.
:param String FilePath: Path of a file or directory
:rtype: Boolean
'''
if os.path.exists(FilePath)==True:
return True
else:
#if os.path.isdir(ApkFilePath)==False:
# if(os.path.basename(ApkFilePath)) in os.listdir(os.getcwd()):
# return True
return False
def RemoveDirectory(Folder):
'''
Given Folder path, remove this folder(include all content inside).
:param String Folder: Path of a directory
:rtype: Boolean
'''
if(FileExist(Folder) == False):
raise IOError("Directory not found!")
else:
shutil.rmtree(Folder)
def ExportToJson(Path, Content):
'''
Export something to json file.
Will automatic convert Set content into List.
:param String Path: Path to store the json file
:param Variant Content: something you want to export
'''
if(isinstance(Content,set)):
Content = list(Content)
#if(isinstance(Content, collections.defaultdict)):
# Content = dict(Content)
with open(Path, "w", encoding = "utf8") as f:
json.dump(Content, f, indent=4)
def ExportToPkl(Path,Content):
'''
Export something to pickle file.
Will automatic convert Set content into List.
:param String Path: Path to store the json file
:param Variant Content: something you want to export
'''
if(isinstance(Content, set)):
Content = list(Content)
#if(isinstance(Content, collections.defaultdict)):
# Content = dict(Content)
with open(Path, "wb") as fd:
pickle.dump(Content, fd)
def ImportFromPkl(Path):
'''
Import something from pickle file.
:param String Path: Path of the pickle file
:return: Content: Content in the pickle file
:rtype: Variant
'''
with open(Path,"rb") as fd:
Content = pickle.load(fd)
return Content
def ImportFromJson(Path):
'''
Import something from json file.
:param String Path: Path of the json file
:return: Content: Content in the json file
:rtype: Variant
'''
with open(Path,"r") as File:
Content=json.load(File, encoding = "utf-8")
return Content
def CompressFiles(Paths, CompressedFilePath, Format = "zip"):
'''
Compress files into a (zip) file.
:param List Paths: Paths of the files you want to compress. These paths will be under the root of the compressed file.(You may want to use ListFiles to pass in all paths)
:param String CompressedFilePath: Path of the compressed file you want to store.
:param String Format: The format of the compressed file.
'''
if Format == "zip":
CompressedFile = zipfile.ZipFile(CompressedFilePath, "w", compression = zipfile.ZIP_DEFLATED)
for Path in Paths:
parent_folder = os.path.dirname(Path)
if os.path.isdir(Path):
for root, folders, files in os.walk(Path):
# Include all subfolders, including empty ones.
for folder_name in folders:
absolute_path = os.path.join(root, folder_name)
relative_path = absolute_path.replace(parent_folder, '')
CompressedFile.write(absolute_path, relative_path)
for file_name in files:
absolute_path = os.path.join(root, file_name)
relative_path = absolute_path.replace(parent_folder, '')
CompressedFile.write(absolute_path, relative_path)
else:
relative_path = os.path.split(Path)[-1]
CompressedFile.write(Path, relative_path)
CompressedFile.close()
else:
raise NotImplementedError
def DecompressFiles(Paths, TargetFolder, Format = "zip"):
'''
Decompress files from a (zip) file/files.
:param List Paths: Paths of the files you want to decompress.
:param String TargetFolder: Path of the decompressed files you want to store.
:param String Format: The format of the compressed file.
'''
if Format == "zip":
for Path in Paths:
CompressedFile = zipfile.ZipFile(Path, "r")
CompressedFile.extractall(TargetFolder)
CompressedFile.close()
else:
raise NotImplementedError
def DownloadFile(URL, Destination = "./download", ExpectedBytes = None, IsDestinationFolder = None):
"""
Download a file if not present, and make sure it's the right size.
:param String URL: URL of the file you want to download.
:param String Destination: Path of the file you want to store, it can be a.
:param String Format: The format of the compressed file.
"""
if IsDestinationFolder is None: #Try to indicate from Destination
if os.path.basename(Destination).find(".") >= 0:
IsDestinationFolder = False
else:
IsDestinationFolder = True
if IsDestinationFolder is True:
if os.path.isdir(Destination):
pass
else:
os.makedirs(Destination)
Request = urllib.request.Request(URL, method = "HEAD")
Headers = dict(urllib.request.urlopen(Request).info().items())
if IsDestinationFolder:
FilePath = os.path.join(Destination, wget.detect_filename(URL, '', Headers))
else:
FilePath = wget.detect_filename(URL, Destination, Headers)
if not os.path.exists(FilePath):
FileName = wget.download(URL, Destination)
else:
FileName = FilePath
StatInfo = os.stat(FileName)
if ExpectedBytes is None or StatInfo.st_size == ExpectedBytes:
print('Found and verified', FileName)
else:
print(StatInfo.st_size)
raise FileExistsError(
'Failed to verify ' + FileName + '. File exists or corrupted. Can you get to it with a browser?')
return FileName
if DependencyFlag:
def ExportToJsonNodeLinkData(Path,GraphContent):
'''
Export graph node link date to json file.
:param String Path: Path to store the json file
:param nxGraph GraphContent: some graph you want to export
'''
with open(Path,"wb") as f:
Content=json_graph.node_link_data(GraphContent)
json.dump(Content, f, indent=4)
def ExportToGML(Path, GraphContent):
'''
Export graph node link date to json file.
:param String Path: Path to store the json file
:param nxGraph GraphContent: some graph you want to export
'''
nx.write_gml(GraphContent, Path)
def ImportFromJsonNodeLinkData(Path):
'''
Import graph node link date from json file.
:param String Path: Path of the json file
:return: GraphContent: Graph content in the json file
:rtype: nxGraph
'''
with open(Path,"rb") as f:
Content=json.load(f)
GraphContent=json_graph.node_link_graph(Content)
return GraphContent
def ExportNpArray(Path, NpArray, Format = "%f"):
'''
Export a Numpy array to a file.
:param String Path: The stored file location.
:param numpy.array NpArray: The Numpy array you want to store.
:param String Format: How to print each element, e.g. %i, %10.5f
'''
np.savetxt(Path, NpArray, fmt = Format)
def ImportNpArray(Path, DataType, ndmin = 0):
'''
Import a Numpy array from a file.
:param String Path: The stored file location.
:param data-type DataType: How to match each element, e.g. int, float
:param int ndmin: How many dimensions of array at least you will have.
:return: NpArray: NpArray in the file
:rtype: NpArray
'''
NpArray = np.loadtxt(Path, dtype = DataType, ndmin = ndmin)
return NpArray
def ExportSparseMatrix(Path, SparseMatrix):
'''
Export a scipy sparse matrix to a file using matrix market format.
Please refer to http://math.nist.gov/MatrixMarket/formats.html for more information about this format.
:param String Path: The stored file location.
:param scipy sparse matrix SparseMatrix: The scipy sparse matrix you want to store.
'''
with open(Path, "wb+") as File:
scipy.io.mmwrite(File, SparseMatrix)
def ImportSparseMatrix(Path):
'''
Import a scipy sparse matrix from a file using matrix market format.
:param String Path: The stored file location.
:return: SparseMatrix: (converted) scipy csr_matrix in the file
:rtype: Scipy Sparse Matrix
'''
SparseMatrix = scipy.io.mmread(Path)
SparseMatrix = SparseMatrix.tocsr()
return SparseMatrix
|
StarcoderdataPython
|
88838
|
<reponame>alsbi/docker_rest_service
# -*- coding: utf-8 -*-
__author__ = 'alsbi'
import json
class ApiError(Exception):
def __str__(self):
return json.dumps({'error': self.error, 'message': self.message}, indent = 4)
class ExecutionError(ApiError):
def __init__(self, error=None, message=None):
super(ExecutionError, self).__init__(message)
self.message = message
self.error = error
class BadParameter(ApiError):
def __init__(self, error=None, message=None):
super(BadParameter, self).__init__(message)
self.message = message
self.error = error
class Conflict(ApiError):
def __init__(self, error=None, message=None):
super(Conflict, self).__init__(message)
self.message = message
self.error = error
class NotFound(ApiError):
def __init__(self, error=None, message=None):
super(NotFound, self).__init__(message)
self.message = message
self.error = error
class NotRunning(ApiError):
def __init__(self, error=None, message=None):
super(NotRunning, self).__init__(message)
self.message = message
self.error = error
def check_error(error):
if error == 500:
return ExecutionError
elif error == 404:
return NotFound
elif error == 400:
return BadParameter
elif error == 409:
return Conflict
elif error == 304:
return NotRunning
|
StarcoderdataPython
|
171242
|
N = int(input())
a = list(map(int, input().split()))
a.sort(reverse=True)
print(sum(a[1::2][:N]))
|
StarcoderdataPython
|
3566503
|
#librarys for video
import os
import shutil
import time
import sys
if not os.path.exists(str(sys.argv[1])):
os.makedirs(str(sys.argv[1]))
#start recording
src = "../live"
dest = "./"+str(sys.argv[1])
src_files = os.listdir(src)
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if os.path.isfile(full_file_name) and full_file_name[-2:]=='ts' and os.path.getsize(full_file_name)>10:
shutil.copy(full_file_name, dest)
time.sleep(6)
src_files = os.listdir(src)
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if os.path.isfile(full_file_name) and full_file_name[-2:]=='ts' and os.path.getsize(full_file_name)>10:
shutil.copy(full_file_name, dest)
os.chdir(dest)
names = list()
for file_ in os.listdir("."):
if file_[-2:]=="ts":
os.rename(file_, file_[8:])
names.append(int(file_[8:-3]))
names.sort()
print(names)
with open("mylist.txt", 'w') as names_files:
for name in names:
names_files.write("file "+str(name)+".ts\n")
#os.system("(for %i in (*.ts) do @echo file '%i') > mylist.txt")
name=sys.argv[1]
os.system('ffmpeg -f concat -safe 0 -i mylist.txt -c copy '+name+'.mp4')
for file_ in os.listdir("."):
if file_[-2:]=="ts":
os.remove(file_)
#end recording
#update html
readFile = open("../../../templates/menu/registros.html")
mensaje = """<h3> Video """+str(name)+" ; Fecha: "+sys.argv[2]+" ; Nivel: "+sys.argv[3]+"""</h3>
<video controls width="320" height="208">
<source src="{% static 'registros/"""+str(dest)[2:]+"/"+str(name)+""".mp4' %}" type="video/mp4">
Tu navegador no implementa el elemento <code>video</code>.
</video>
{% endblock %}"""
lines = readFile.readlines()
lines[-1] = mensaje
readFile.close()
f = open("../../../templates/menu/registros.html",'w')
f.writelines([item for item in lines])
#f.write(mensaje)
f.close()
|
StarcoderdataPython
|
9724148
|
<reponame>AlexanderHurst/AlgoFinalProject<gh_stars>1-10
from collections import deque
# returns the longest substring in n*n time
# algorithm idea credit to <NAME>
# from stack overflow modified to include substring locations
# this was rendered obsolete for determining key length by
# coincidence index and is no longer used in this project
def find_longest_substring_location(string):
# create a list from the string for fast +
# easy access to elements
string_list = [i for i in string]
# create a queue for fast access and removal of
# the head of a list O(1) rather than O(n)
compare_queue = deque(string[1:])
# create a list to keep track of the longest
# substring that has been found so far
longest_substring = []
# create a list to keep track of the longest
# substring location
longest_substring_location = []
# create a list to keep track of the current
# substring
substring = []
# runs while compare queue has elements
# pops the left element of compare queue
# then looks for the matches in the string
# indexes, as if the string were shifted left
while compare_queue:
# for each letter in the compare queue
# check with the unshifted list to see which
# letters line up
for i, letter in enumerate(compare_queue):
# if a letter lines up add it to the substring list
# and keep checking subsequent indexes until they dont line up
if string_list[i] == letter:
# if the list is empty keep track of the indexes where the
# first character lined up
if substring == []:
# the first index occurs at i
x = i
# the second index occurs at the number of letters shifted
# which can be found by the difference in string lengths plus i
y = (len(string_list) - len(compare_queue)) + i
# add the letter to the current substring
substring.append(letter)
# once the lined up letters no longer match check if the new substring
# is longer than the previous longest one, if so replace it and its location
else:
if len(longest_substring) < len(substring):
longest_substring = substring
longest_substring_location = [x, y]
# empty the substring to begin searching again
substring = []
substring = []
# shift the string
compare_queue.popleft()
# append the length of the substring to the location for ease of access and return it
longest_substring_location.append(len(longest_substring))
return longest_substring_location
# tester function if the program is run as main
if __name__ == "__main__":
string = "ZICVTWQNGRZGVTWAVZHCQYGLMGJ"
substring_loc = find_longest_substring_location(string)
first_substring = string[substring_loc[0]
: substring_loc[0] + substring_loc[2]]
second_substring = string[substring_loc[1]
: substring_loc[1] + substring_loc[2]]
print("String:\t\t\t", string)
print("Substring length:\t", substring_loc[2])
print("First Occurence:\t", substring_loc[0])
print("Second Occurence:\t", substring_loc[1])
print("First Substring:\t", first_substring)
print("Second Substring:\t", second_substring)
|
StarcoderdataPython
|
4964844
|
import rhinoscriptsyntax as rs
import math
import System
# created on 9th Jan 2015
# this class defines a single floorPanel or Tile as an Object with neccessary functions to control it
class floorPanel:
h1 = 0
h2 = 0
h3 = 0
p1 = None
p2 = None
slv = None
panelID = None
def __init__(self,panel1, panel2, sleeve):
self.p1 = panel1
self.p2 = panel2
self.slv = sleeve
self.panelID = rs.AddGroup()
rs.AddObjectsToGroup([panel1, panel2, sleeve], self.panelID)
def setPanelState(self, newH1, newH2, newH3):
dh1 = newH1 - self.h1
dh2 = newH2 - self.h2
dh3 = newH3 - self.h3
rs.MoveObject(self.p1, [0,0,dh1])
rs.MoveObject(self.p2, [0,0,dh2])
rs.MoveObject(self.slv, [0,0,dh3])
self.h1 = newH1
self.h2 = newH2
self.h3 = newH3
def reset(self):
self.setPanelState(0,0,0)
def delete(self):
rs.DeleteObjects([self.p1, self.p2, self.slv])
self.p1 = None
self.p2 = None
self.slv = None
# asks the user to load a bitmap preset and uses it on the 'panels' 2d array
def loadPreset(panels):
rs.EnableRedraw(False)
file = rs.OpenFileName()
bitmap = System.Drawing.Bitmap.FromFile(file)
useBitmap(file, panels)
rs.EnableRedraw(True)
# resets the floor corresponding to the passed 2d array
def resetFloor(panels):
rs.EnableRedraw(False)
x = 0
while x < len(panels):
y = 0
while y < len(panels[x]):
panels[x][y].reset()
y += 1
x += 1
rs.EnableRedraw(True)
# deletes the entire floor defined by the 2d list - panels and all its components
def deleteFloor(panels):
rs.EnableRedraw(False)
for col in panels:
for tile in col:
tile.delete()
rs.EnableRedraw(True)
# uses the bitmap located in the filePath on the floor defined by the 2d array 'panels'
def useBitmap(filePath, panels):
bitmap = System.Drawing.Bitmap.FromFile(filePath)
xN = 0
while (xN < xNum):
yN = 0
while (yN < yNum):
color = System.Drawing.Bitmap.GetPixel(bitmap, xN, bitmap.Height-1-yN)
#accounting for the inversion of y-axis from bitmap to 3d space in the above line
h1 = maxHeight*(color.R+1)/255
h2 = maxHeight*(color.G+1)/255
h3 = maxHeight*(color.B+1)/255
panels[xN][yN].setPanelState(h1,h2,h3)
yN += 1
xN += 1
panel1 = '3b0c40dc-f66f-4b69-85c9-b530e0bdf7ed'
panel2 = 'f9291d72-ec21-47e5-b251-1df1355097ae'
slv = '0b304346-4e53-4290-afab-b2485c846203'
# defining the size of the tile (square shape) and the max Height achivable by it
maxHeight = 3000
moduleSize = 500
# now scaling the imported module to the correct module size
boundingBox = rs.BoundingBox([panel1,panel2,slv])
trueSize = rs.Distance(boundingBox[0], boundingBox[1])
trueHeight = rs.Distance(boundingBox[0], boundingBox[4])
scFxy = moduleSize/trueSize # scaling factor in x and y directions
scFz = maxHeight/trueHeight # scaling factor in z direction
rs.ScaleObjects([panel1, panel2, slv], boundingBox[4], [scFxy, scFxy, scFz])
# defining the number of tiles in the x and y directions - defined by the user
xNum = rs.GetInteger('Enter the number of tiles in the x-direction',10)
yNum = rs.GetInteger('Enter the number of tiles in the y-direction',10)
# these arrays will contain the identifiers of all the different objects
# these are 2d arrays corresponding to positions
primaryPanel = [] # the top panel
secondaryPanel = [] # the secondary panel underneath the top panel
sleeve = [] # the square cross section sleeve
panelUnit = [] # and integrated panelUnit with one each of the above three objects
# creating copeis of these panels and making a floor
rs.EnableRedraw(False)
xN = 0
while (xN < xNum):
#adding a new empty column to all the 2d arrays
primaryPanel.append([])
secondaryPanel.append([])
sleeve.append([])
panelUnit.append([])
yN = 0
while (yN < yNum):
#appending the newly created objects into those arrays
primaryPanel[xN].append(rs.CopyObject(panel1,[moduleSize*xN, moduleSize*yN,0]))
secondaryPanel[xN].append(rs.CopyObject(panel2,[moduleSize*xN, moduleSize*yN,0]))
sleeve[xN].append(rs.CopyObject(slv,[moduleSize*xN, moduleSize*yN,0]))
panelUnit[xN].append(floorPanel(primaryPanel[xN][yN], secondaryPanel[xN][yN], sleeve[xN][yN]))
yN += 1
xN += 1
#now hiding the original prototype panel from which we generated all the copies in the 2d array
rs.HideObjects([panel1, panel2, slv])
rs.EnableRedraw(True)
#user will now load a preset by selecting an iage file
loadPreset(panelUnit)
# this loop allows the user to go through more presets
while True:
response = rs.GetString('Do you want to load another preset? (y/n)','y')
print(response)
if response == 'y' or response == 'Y':
resetFloor(panelUnit)
loadPreset(panelUnit)
elif response == 'n' or response == 'N':
break
else:
print('Invalid response')
continue
#this loop forces the user to choose between exporting the results to a new file or discard the changes
#this loop resets the file in the end
while True:
response = rs.GetString('Do you want to save this model? (y/n)','y')
if response == 'y':
rs.Command('_SelAll _Export')
deleteFloor(panelUnit)
rs.ShowObjects([panel1, panel2, slv])
break
elif response == 'n':
exitPrompt = 'Are you sure? You will lose the loaded preset (press y if you want to discard the changes and exit)'
response = rs.GetString(exitPrompt,'y')
if response == 'y':
deleteFloor(panelUnit)
rs.ShowObjects([panel1, panel2, slv])
break
else:
continue
|
StarcoderdataPython
|
1741743
|
import pymongo
from pymongo import MongoClient
from config import DATABASE_CONNECTION_STRING
cluster = MongoClient(DATABASE_CONNECTION_STRING)
db = cluster['telegram_db']
collection = db['telegram_db']
def test():
print(cluster.list_database_names())
if __name__ == '__main__':
test()
|
StarcoderdataPython
|
8085990
|
# this file is needed for at least setup.py
__author__ = "<NAME>"
|
StarcoderdataPython
|
1970999
|
import torch
from lanenet.model import HNetLoss
def test_hnet():
gt_labels = torch.tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 1.0]],
[[1.0, 1.0, 1.0], [2.0, 2.0, 1.0], [3.0, 3.0, 1.0]]],
dtype=torch.float32).view(6,3)
transformation_coffecient = torch.tensor([0.58348501, -0.79861236, 2.30343866,
-0.09976104, -1.22268307, 2.43086767],
dtype=torch.float32)
# import numpy as np
# c_val = [0.58348501, -0.79861236, 2.30343866,
# -0.09976104, -1.22268307, 2.43086767]
# R = np.zeros([3, 3], np.float32)
# R[0, 0] = c_val[0]
# R[0, 1] = c_val[1]
# R[0, 2] = c_val[2]
# R[1, 1] = c_val[3]
# R[1, 2] = c_val[4]
# R[2, 1] = c_val[5]
# R[2, 2] = 1
#
# print(np.mat(R).I)
hnet_loss = HNetLoss(gt_labels, transformation_coffecient, 'loss')
hnet_inference = HNetLoss(gt_labels, transformation_coffecient, 'inference')
_loss = hnet_loss._hnet_loss()
_pred = hnet_inference._hnet_transformation()
print("loss: ", _loss)
print("pred: ", _pred)
|
StarcoderdataPython
|
11299451
|
from datetime import datetime, timedelta
from django.db import models
from domain.models.base_model import BaseModel
def get_default_expiration_date():
return datetime.now() + timedelta(days=365)
class UrlBank(BaseModel):
actual_url = models.CharField(max_length=3000)
expiration_date = models.DateTimeField(default=get_default_expiration_date)
md_five_hash = models.CharField(max_length=32, db_index=True)
actual_url_shortened = models.CharField(max_length=10, db_index=True)
class Meta:
db_table = 'url_bank'
app_label = 'domain'
|
StarcoderdataPython
|
3299250
|
class Event:
def __init__(self, msg, task, time):
self.msg = msg
self.task = task
self.time = time
def get_message(self):
return self.msg
def get_task(self):
return self.task
def get_time(self):
return self.time
def update_time(self, _time):
self.time = _time
|
StarcoderdataPython
|
1970321
|
<filename>playground/addressbook.py
"""
Class to represent an address book with multiple contacts.
"""
class AddressBook:
"""This will provide you with contact information about your friends."""
def __init__(self,contacts=[]):
self.contact_list = contacts
#...
def addentry(self,name,phone_number,address,email):
newcontact = Contact(name,phone_number,address,email)
self.contact_list.append(newcontact)
#...
def print_contacts(self,sorted_order=False):
"""
Print each of the contacts stored in this address book.
If sorted is True, print them in sorted order by name.
"""
def returnSortKey(contact):
if contact.name != "":
return contact.name
elif contact.email != "":
return contact.email
elif contact.phone_number != "":
return contact.phone_number
elif contact.address != "":
return contact.address
else:
return ""
if sorted_order == True:
y = sorted(self.contact_list,key=lambda z: returnSortKey(z))
for x in y:
print "_____________"
x.print_entry()
else:
for x in self.contact_list:
print "_____________"
x.print_entry()
#...
#...
class Contact:
"""
Class to store contact information for a single person.
"""
def __init__(self,name="",phone_number="",address="",email=""):
self.name = name
self.phone_number = phone_number
self.address = address
self.email = email
#...
def print_entry(self):
"""
Pretty-print the entries in this contact.
Empty values are not printed.
"""
if self.name != "":
print "Name:", self.name
if self.phone_number != "":
print "Phone Number:", self.phone_number
if self.address != "":
print "Address:", self.address
if self.email != "":
print "Email:", self.email
# if self.name == "" and self.phone_number == "" and self.address == "" and self.email == "":
# print ""
# elif self.name == "" and self.phone_number == "" and self.address == "":
# print "Email:", self.email
# elif self.phone_number == "" and self.address == "":
# print "Name:", self.name
# print "Email:", self.email
# elif self.address == "":
# print "Name:", self.name
# print "Phone Number:", self.phone_number
# print "Email:", self.email
# elif self.email == "":
# print "Name:", self.name
# print "Phone Number:", self.phone_number
# print "Address:", self.address
# else:
# print "Name:", self.name
# print "Phone Number:", self.phone_number
# print "Address:", self.address
# print "Email:", self.email
#...
#...
newcontact2 = Contact(name="Sagar", email="<EMAIL>")
newcontact = Contact("John","123-4567","somehwere, USA","<EMAIL>")
newcontact3 = Contact(email="<EMAIL>")
newcontact4 = Contact(address="Over there, USA")
newcontact5 = Contact("Anthony","324-9842","New Haven, CT","<EMAIL>")
newcontact6 = Contact("Bob","458-0983","New York, NY","<EMAIL>")
newbook = AddressBook([newcontact,newcontact2,newcontact6,newcontact3,newcontact4,newcontact5])
newbook.print_contacts(sorted_order=True)
|
StarcoderdataPython
|
1887327
|
from django.apps import AppConfig
class CollaborationConfig(AppConfig):
name = "api.collaboration"
|
StarcoderdataPython
|
9791760
|
<reponame>Jin-Tao-208/web_science_coursework<gh_stars>0
from collections import OrderedDict
import json
from math import exp
import os
from BurstySegmentExtractor import BurstySegmentExtractor
from Segment import Segment
from TimeWindow import SubWindow
from TweetSegmenter import SEDTWikSegmenter
from utils.pyTweetCleaner import TweetCleaner
class TwitterEventDetector():
def __init__(self, wiki_titles_file, seg_prob_file, wiki_Qs_file, remove_retweets=False, max_segment_length=4,
hashtag_wt=3,
use_retweet_count=True, use_followers_count=True, default_seg_prob=0.000001, entities_only=False):
self.segmenter = SEDTWikSegmenter(wiki_titles_file, max_segment_length, hashtag_wt, entities_only)
self.remove_retweets = remove_retweets
self.bse = BurstySegmentExtractor(seg_prob_file, use_retweet_count, use_followers_count, default_seg_prob)
# prob that a segment is anchor text in all pages containing that segment
with open(wiki_Qs_file, 'r') as f:
self.wiki_prob = json.load(f)
def clean_tweets_in_directory(self, root_dir, target_dir):
"""
clean tweets in root_dir using pyTweetCleaner and save cleaned files in target_dir
This need to be done just once and then the cleaned tweets can be used afterward
"""
print('Cleaning all tweets in given directory')
tc = TweetCleaner(True, self.remove_retweets)
if not os.path.isdir(target_dir): os.mkdir(target_dir)
for dir_path, _, file_list in os.walk(root_dir):
dir_path = dir_path.replace('\\',
'/') # make windows-like path to unix-like path which can be used for both
dir_name = dir_path.replace(root_dir, '')
print('Found directory: %s' % dir_name)
target_file_path = target_dir + '/' + dir_name
if not os.path.isdir(target_file_path): os.mkdir(target_file_path)
for fname in file_list:
print(fname)
tc.clean_tweets(input_file=dir_path + '/' + fname, output_file=target_file_path + '/' + fname)
print('Cleaned all tweets and saved to', target_dir)
def read_subwindow(self, file_path):
"""
read a SubWindow from a file
all tweets in given file belong to the subwindow
"""
segments = {}
tweet_count = 0
f = open(file_path, 'rb')
for line in f:
line = line.decode().replace('\n', '')
if line == '': continue
json_tweet = json.loads(line)
# json_tweet = line
tweet_count += 1
user_id = json_tweet['user']['id']
retweet_count = json_tweet['retweet_count']
followers_count = json_tweet['user']['followers_count']
segmentation = self.segmenter.tweet_segmentation(json_tweet)
tweet_text = ' '.join(list(OrderedDict.fromkeys(
segmentation))) # because of hashtag_wt, some segments might be multiple in tweet text after joining so remove them
tweet_text = ''.join([c for c in tweet_text if ord(
c) < 256]) # dont know why but some non ascii chars like \u0441='c'still survived segmentation!!!
for seg in segmentation:
if not seg in segments:
new_seg = Segment(seg)
new_seg.newsworthiness = self.get_segment_newsworthiness(seg)
segments[seg] = new_seg
segments[seg].add_tweet(user_id, tweet_text, retweet_count, followers_count)
f.close()
sw = SubWindow(segments, tweet_count)
return sw
def get_segment_newsworthiness(self, seg):
"""
return max exp(Q(l))-1 from all sub phrases 'l' in seg(string)
"""
seg = seg.split(' ')
n = len(seg)
# max_sub_phrase_prob = max([self.get_wiki_Qs_prob(seg[i:i+j+1]) for i in range(n) for j in range(n-i)])
# return exp(max_sub_phrase_prob)-1
if n == 1:
return exp(self.get_wiki_Qs_prob(seg))
else:
max_sub_phrase_prob = max([self.get_wiki_Qs_prob(seg[i:i + j + 1]) for i in range(n) for j in range(n - i)])
return exp(max_sub_phrase_prob) - 1
def get_wiki_Qs_prob(self, seg):
"""
return prob that seg(list of words) is an anchor text from all pages containing seg
"""
return self.wiki_prob.get(' '.join(seg), 0)
|
StarcoderdataPython
|
1629596
|
import os
from selene import config
from selene.browser import set_driver, driver
from tests.acceptance.helpers.helper import get_test_driver
from tests.examples.order.app_model.order_widgets import Order
def setup_function(m):
config.timeout = 4
set_driver(get_test_driver())
config.app_host = 'file://' + os.path.abspath(os.path.dirname(__file__)) + '/../../resources/orderapp/'
def teardown_function(m):
driver().quit()
config.app_host = ''
def test_it_fills_order():
order = Order()
order.open()
order.details.fill_with(first_name='Johanna', last_name='Smith', salutation='Mrs')
item = order.add_item_with(name='New Test Item', other_data='Some other specific data')
item.show_advanced_options_selector.click()
item.add_advanced_options(
[{'option_type': 'type1'}, {'scope': 'optionscope2fortype1'}],
[{'option_type': 'type2'}, {'scope': 'optionscope3fortype2'}]
)
item.show_advanced_options.click()
item.advanced_options.should_be('optionscope2fortype1', 'optionscope3fortype2')
item.clear_options.click()
item.advanced_options.should_be_empty()
item.show_advanced_options_selector.click()
item.advanced_options_selector.should_be_hidden()
|
StarcoderdataPython
|
1693920
|
""" CSeq C Sequentialization Framework
scope-based variable renaming module
written by <NAME>, University of Southampton.
"""
VERSION = 'varnames-0.0-2015.07.08'
#VERSION = 'varnames-0.0-2014.12.24' # CSeq-1.0beta
#VERSION = 'varnames-0.0-2014.10.26' # CSeq-Lazy-0.6: newseq-0.6a, newseq-0.6c, SVCOMP15
#VERSION = 'varnames-0.0-2014.10.26'
#VERSION = 'varnames-0.0-2014.10.15'
#VERSION = 'varnames-0.0-2014.03.14'
#VERSION = 'varnames-0.0-2014.03.08' first version (Cseq-Lazy-0.2)
"""
This module performs variable renaming based on variable scope,
so that no two functions share a variable id after it.
to make function inlining easier:
(doing so avoids future problems with the inliner module, see regression/102,103 )
At the end of the renaming, the map of variable name changes
is available in newIDs (useful for counterexamples,
to translate back variable names to the corrisponding original name).
Transformation:
int f(int P) {
int L;
}
into:
int f(int __cs_f_P) {
int __cs_f_L;
}
TODO:
the new variables introduced should be guaranteed not to reuse existing symbols
Changelog:
2015.07.08 map with variable renames returned as an output parameter
2014.12.09 further code refactory to match the new organisation of the CSeq framework
2014.10.27 different prefixes for local variables and function parameters
2014.10.26 changed __stack to stack (to inherit stack handling from module.py)
2014.10.15 removed visit() and moved visit call-stack handling to module class (module.py)
2014.03.14 further code refactory to match module.Module class interface
"""
import inspect, os, sys, getopt, time
import pycparser.c_parser, pycparser.c_ast, pycparser.c_generator
import core.common, core.module, core.parser, core.utils
class varnames(core.module.Translator):
__debug = False
__visitingStructRef = False # to avoid considering struct fields as local variables
prefix = '__cs_local_' # prefix for local variables
paramprefix = '__cs_param_' # prefix for function params
newIDs = {} # mapping of old variable names to new variable names
varmap = {}
__currentFunction = ''
__visitingParam = 0 # depth of params in a function prototype
__visitingDecl = 0
__visitFuncDef = 0
__visitStructUnionEnum = 0
def init(self):
self.addOutputParam('varnamesmap')
def loadfromstring(self, string, env):
super(self.__class__, self).loadfromstring(string, env)
self.setOutputParam('varnamesmap', self.varmap)
#print str(self.newIDs).replace(', ','\n')
def visit_Decl(self, n, no_type=False):
# no_type is used when a Decl is part of a DeclList, where the type is
# explicitly only for the first delaration in a list.
#
self.__visitingDecl += 1
s = n.name if no_type else self._generate_decl(n)
if n.bitsize: s += ' : ' + self.visit(n.bitsize)
if n.init:
if isinstance(n.init, pycparser.c_ast.InitList):
s += ' = {' + self.visit(n.init) + '}'
elif isinstance(n.init, pycparser.c_ast.ExprList):
s += ' = (' + self.visit(n.init) + ')'
else:
s += ' = ' + self.visit(n.init)
self.__visitingDecl -= 1
return s
def visit_FuncDef(self, n):
self.__visitFuncDef += 1
s = super(self.__class__, self).visit_FuncDef(n)
self.__visitFuncDef -= 1
return s
def visit_ParamList(self, n):
out = ''
for i, p in enumerate(n.params):
spacer = '' if i==0 else ', '
self.__visitingParam += 1
out += spacer + self.visit(p)
self.__visitingParam -= 1
#return ', '.join(self.visit(param) for param in n.params)
return out
def visit_Struct(self, n):
self.__visitStructUnionEnum += 1
s = super(self.__class__, self).visit_Struct(n)
self.__visitStructUnionEnum -= 1
return s
def visit_Union(self, n):
self.__visitStructUnionEnum += 1
s = super(self.__class__, self).visit_Union(n)
self.__visitStructUnionEnum -= 1
return s
def visit_Enum(self, n):
self.__visitStructUnionEnum += 1
s = super(self.__class__, self).visit_Enum(n)
self.__visitStructUnionEnum -= 1
return s
def visit_Typedef(self, n):
self.__visitStructUnionEnum += 1
s = super(self.__class__, self).visit_Typedef(n)
self.__visitStructUnionEnum -= 1
return s
def visit_FuncDef(self, n):
self.__currentFunction = n.decl.name
f = super(self.__class__, self).visit_FuncDef(n)
self.__currentFunction = ''
return f
def visit_StructRef(self, n):
sref = self._parenthesize_unless_simple(n.name)
oldvisitingStructRef = False
self.__visitingStructRef = True
if self.__debug: print "------- ------- ------- ------- ------- ------- VISITING STRUCT REF START (%s)" % sref
retval = sref + n.type + self.visit(n.field)
if self.__debug: print "------- ------- ------- ------- ------- ------- VIITING STRUCT REF END"
self.__visitingStructRef = oldvisitingStructRef
return retval
def visit_ID(self, n):
prefix = ''
#if n.name in self.Parser.varNames[self.__currentFunction] and self.Parser.varKind[self.__currentFunction,n.name] == 'p':
if n.name in self.Parser.varNames[self.__currentFunction]:
if self.__debug: print "visiting ID: [%s,%s]" % (self.__currentFunction,n.name)
if (n.name in self.Parser.varNames[self.__currentFunction] and
self.__currentFunction != '' and
not self.__visitingStructRef ):
#str(self.stack[len(self.stack)-2]) != 'StructRef' ): # e.g. visiting ID: x->ID or x.ID (this is not a local var, but a field)
if self.__debug: print " local PARAMETER"
if self.__debug: print " stack: "+ str(self.stack) + ' prev:' + str(self.stack[len(self.stack)-2])
prefix = self.newIDs[self.__currentFunction, super(self.__class__, self).visit_ID(n)]
self.varmap[prefix+super(self.__class__, self).visit_ID(n)] = super(self.__class__, self).visit_ID(n)
#self.warn('%s -> %s' % (prefix+super(self.__class__, self).visit_ID(n), super(self.__class__, self).visit_ID(n)))
return prefix + super(self.__class__, self).visit_ID(n)
def _generate_type(self, n, modifiers=[]):
""" Recursive generation from a type node. n is the type node.
modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers
encountered on the way down to a TypeDecl, to allow proper
generation from it.
"""
typ = type(n)
#~ print(n, modifiers)
if typ == pycparser.c_ast.TypeDecl:
s = ''
if n.quals: s += ' '.join(n.quals) + ' '
s += self.visit(n.type)
# Local variables & parameter renaming.
#
# Variable name substitution only applies to local variables or parameters names within function prototypes
# (thus, global variables and function names need to be excluded)
#
# case 1: level-0 function parameters (no remanimg for nested parameters)
# case 2: local variable declaration (thus excluding functions, global vars, struct-enum-union fields, nested parameters)
#
if self.__visitingParam == 1: # case 1
if self.__debug: print "SETTING NEWID for [%s,%s] (case I)" % (self.__currentFunction,n.declname)
self.newIDs[self.__currentFunction,n.declname] = self.paramprefix + self.__currentFunction + '_'
n.declname = (self.paramprefix + self.__currentFunction + '_' + n.declname) if n.declname else ''
elif (self.__visitingParam == 0 and # case 2
self.__visitFuncDef == 0 and
n.declname not in self.Parser.funcName and
#n.declname not in self.Parser.varNames[''] and
self.__currentFunction != '' and
self.__visitStructUnionEnum == 0):
if self.__debug: print "SETTING NEWID for [%s,%s] (case II)" % (self.__currentFunction,n.declname)
self.newIDs[self.__currentFunction,n.declname] = self.prefix + self.__currentFunction + '_'
n.declname = self.prefix + self.__currentFunction + '_' + n.declname if n.declname else ''
nstr = n.declname if n.declname else ''
# Resolve modifiers.
# Wrap in parens to distinguish pointer to array and pointer to
# function syntax.
#
for i, modifier in enumerate(modifiers):
if isinstance(modifier, pycparser.c_ast.ArrayDecl):
if (i != 0 and isinstance(modifiers[i - 1], pycparser.c_ast.PtrDecl)):
nstr = '(' + nstr + ')'
nstr += '[' + self.visit(modifier.dim) + ']'
elif isinstance(modifier, pycparser.c_ast.FuncDecl):
if (i != 0 and isinstance(modifiers[i - 1], pycparser.c_ast.PtrDecl)):
nstr = '(' + nstr + ')'
nstr += '(' + self.visit(modifier.args) + ')'
elif isinstance(modifier, pycparser.c_ast.PtrDecl):
if modifier.quals:
nstr = '* %s %s' % (' '.join(modifier.quals), nstr)
else:
nstr = '*' + nstr
if nstr: s += ' ' + nstr
return s
elif typ == pycparser.c_ast.Decl:
return self._generate_decl(n.type)
elif typ == pycparser.c_ast.Typename:
return self._generate_type(n.type)
elif typ == pycparser.c_ast.IdentifierType:
return ' '.join(n.names) + ' '
elif typ in (pycparser.c_ast.ArrayDecl, pycparser.c_ast.PtrDecl, pycparser.c_ast.FuncDecl):
return self._generate_type(n.type, modifiers + [n])
else:
return self.visit(n)
|
StarcoderdataPython
|
4961953
|
<reponame>sisoe24/Nuke-Python-Stubs
from numbers import Number
from typing import *
import nuke
from . import *
class String_Knob(Knob):
"""
A knob which holds a string value. Appears as a text entry field in a Node panel.
"""
def __hash__(self, ):
"""
Return hash(self).
"""
return None
def __init__(self, *args, **kwargs):
"""
Initialize self. See help(type(self)) for accurate signature.
"""
return None
def __new__(self,*args, **kwargs):
"""
Create and return a new object. See help(type) for accurate signature.
"""
return None
def getText(self, oc=None):
"""
self.getText(oc) -> string
Get the non-evaluated value of this knob - also see `value()`
@param oc: Optional parameter specifying the output context.
Return text associated with knob.
"""
return str()
def setValue(self, val:Any, view='default'):
"""
self.setValue(val, view='default') -> None
Set value of knob.
@param val: The new value.
@param view: Optional parameter specifying which view to set the value for. If omitted, the value will be set for the default view.
@return: None
"""
return None
def value(self, oc=None):
"""
self.value(oc) -> str
Get the evaluated value of this knob as a string - also see `getText()`.
@param oc: Optional parameter specifying the output context.
@return: String value.
"""
return str()
def value(self, oc=None):
"""
self.value(oc) -> str
Get the evaluated value of this knob as a string - also see `getText()`.
@param oc: Optional parameter specifying the output context.
@return: String value.
"""
return str()
def setValue(self, val:Any, view='default'):
"""
self.setValue(val, view='default') -> None
Set value of knob.
@param val: The new value.
@param view: Optional parameter specifying which view to set the value for. If omitted, the value will be set for the default view.
@return: None
"""
return None
def splitView(self, view=None):
"""
self.splitView(view) -> None.
Split the view away from the current knob value.
@param view: Optional view. Default is current view.
@return: None.
"""
return None
def unsplitView(self, view=None):
"""
self.unsplitView(view) -> None.
Unsplit the view so that it shares a value with other views.
@param view: Optional view. Default is current view.
@return: None.
"""
return None
|
StarcoderdataPython
|
11278377
|
<reponame>GitHK/CarND-Advanced-Lane-Lines
import logging
from collections import deque
import cv2
import numpy as np
logger = logging.getLogger(__name__)
Y_ARRAY_INDEX_OF_BOTTOM_ELEMENT = 0
class LaneInfo:
def __init__(self):
self.left_fit = None
self.right_fit = None
self.left_fitx = None
self.right_fitx = None
self.ploty = None
# curve radius of the left and right curves
self.left_curverad_m = None
self.right_curverad_m = None
# position from the center of the lane in meters < 0 left >0 right
self.lane_position = None
# width of the lane in meters
self.lane_width = None
# keep track of minimum and maximum coordinate of y axis
self.min_left_y = None
self.max_left_y = None
self.min_right_y = None
self.max_right_y = None
def full_line_search(wraped_binarized, ym_per_pix=30 / 720, xm_per_pix=3.7 / 700, with_debug_image=True):
TOTAL_VERTICAL_STRIDES = 9
# Set minimum number of pixels found to recenter window
MIN_PIXELS_TO_RECENTER = 10
DRAWN_CURVE_LINE_WIDTH = 4 # width of final curve in pixels
scan_window_width = int(wraped_binarized.shape[1] * 0.13)
half_scam_window_width = int(scan_window_width / 2)
scan_window_height = int(wraped_binarized.shape[0] / TOTAL_VERTICAL_STRIDES)
debug_output = np.dstack((wraped_binarized, wraped_binarized, wraped_binarized)) * 255
if with_debug_image:
logger.info("Search params (strides, window width, window height): (%s, %s, %s)" % (
TOTAL_VERTICAL_STRIDES, scan_window_width, scan_window_height))
histogram = np.sum(wraped_binarized[wraped_binarized.shape[0] // 2 + 100:, :], axis=0)
# Used only for debug
# from matplotlib import pyplot as plt
# plt.plot(histogram)
# plt.show()
midpoint = np.int(histogram.shape[0] // 2)
left_search_base = np.argmax(histogram[:midpoint])
right_search_base = np.argmax(histogram[midpoint:]) + midpoint
nonzero = wraped_binarized.nonzero()
nonzero_y = np.array(nonzero[0])
nonzero_x = np.array(nonzero[1])
total_height = wraped_binarized.shape[0]
r_bottom_center = [right_search_base, total_height]
l_bottom_center = [left_search_base, total_height]
left_lane_indexes = deque()
right_lane_indexes = deque()
for stride in range(TOTAL_VERTICAL_STRIDES):
# upper and lower margin of the scanning window
y_bottom = total_height - (stride + 1) * scan_window_height
y_top = total_height - stride * scan_window_height
# left search window right & left boundaries
l_search_window_left_x = l_bottom_center[0] - half_scam_window_width
l_search_window_right_x = l_bottom_center[0] + half_scam_window_width
# right search window right & left boundaries
r_search_window_left_x = r_bottom_center[0] - half_scam_window_width
r_search_window_right_x = r_bottom_center[0] + half_scam_window_width
# Draw the windows on the visualization image
if with_debug_image:
cv2.rectangle(debug_output, (l_search_window_left_x, y_bottom), (l_search_window_right_x, y_top),
(0, 255, 0), 2)
cv2.rectangle(debug_output, (r_search_window_left_x, y_bottom), (r_search_window_right_x, y_top),
(0, 255, 0), 2)
left_indexes = ((nonzero_y >= y_bottom) & (nonzero_y < y_top) &
(nonzero_x >= l_search_window_left_x) & (nonzero_x < l_search_window_right_x)).nonzero()[0]
right_indexes = ((nonzero_y >= y_bottom) & (nonzero_y < y_top) &
(nonzero_x >= r_search_window_left_x) & (nonzero_x < r_search_window_right_x)).nonzero()[0]
# Append these indices to the lists
left_lane_indexes.append(left_indexes)
right_lane_indexes.append(right_indexes)
# If you found > MIN_PIXELS_TO_RECENTER, recenter next window on their mean position
if len(left_indexes) > MIN_PIXELS_TO_RECENTER:
l_bottom_center[0] = np.int(np.mean(nonzero_x[left_indexes]))
if len(right_indexes) > MIN_PIXELS_TO_RECENTER:
r_bottom_center[0] = np.int(np.mean(nonzero_x[right_indexes]))
left_lane_indexes = np.concatenate(left_lane_indexes)
right_lane_indexes = np.concatenate(right_lane_indexes)
# Extract left and right line pixel positions
left_x = nonzero_x[left_lane_indexes]
left_y = nonzero_y[left_lane_indexes]
right_x = nonzero_x[right_lane_indexes]
right_y = nonzero_y[right_lane_indexes]
# Fit a second order polynomial to each
left_fit = np.polyfit(left_y, left_x, 2)
right_fit = np.polyfit(right_y, right_x, 2)
ploty = np.linspace(0, wraped_binarized.shape[0] - 1, wraped_binarized.shape[0]).astype(np.int)
# Generate x and y values for plotting
left_fitx = (left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]).astype(np.int)
right_fitx = (right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]).astype(np.int)
lane_position = get_lane_position(wraped_binarized, left_fitx, right_fitx, xm_per_pix)
lane_width = get_lane_width(left_fitx, right_fitx, xm_per_pix)
if with_debug_image:
logger.info('lane_position %s m' % lane_position)
logger.info('lane_width %s m' % lane_width)
debug_output[nonzero_y[left_lane_indexes], nonzero_x[left_lane_indexes]] = [255, 0, 0]
debug_output[nonzero_y[right_lane_indexes], nonzero_x[right_lane_indexes]] = [0, 0, 255]
# Print detected line on top of image
draw_fit_curves_on_image(debug_output, left_fitx, right_fitx, ploty, DRAWN_CURVE_LINE_WIDTH)
# curvature in meters
left_curverad_m, right_curverad_m = curvatures_in_meters(
left_x, left_y, ploty, right_x, right_y, xm_per_pix, ym_per_pix)
if with_debug_image:
logger.info("Curvature right: %s m, left: %s m" % (left_curverad_m, right_curverad_m))
lane_info = LaneInfo()
lane_info.left_fit = left_fit
lane_info.right_fit = right_fit
lane_info.left_fitx = left_fitx
lane_info.right_fitx = right_fitx
lane_info.ploty = ploty
lane_info.left_curverad_m = left_curverad_m
lane_info.right_curverad_m = right_curverad_m
lane_info.lane_position = lane_position
lane_info.lane_width = lane_width
lane_info.min_left_y = 0
lane_info.max_left_y = wraped_binarized.shape[0]
lane_info.min_right_y = 0
lane_info.max_right_y = wraped_binarized.shape[0]
return debug_output, lane_info
def local_line_search(wraped_binarized, left_fit, right_fit, ym_per_pix=30 / 720, xm_per_pix=3.7 / 700,
margin=100, with_debug_image=True):
DRAWN_CURVE_LINE_WIDTH = 4 # width of final curve in pixels
nonzero = wraped_binarized.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
left_lane_indx = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] - margin)) &
(nonzerox < (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] + margin)))
right_lane_indx = ((nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] - margin)) &
(nonzerox < (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
left_x = nonzerox[left_lane_indx]
left_y = nonzeroy[left_lane_indx]
right_x = nonzerox[right_lane_indx]
right_y = nonzeroy[right_lane_indx]
if len(left_y) == 0 or len(right_y) == 0 or len(left_x) != len(left_y) or len(right_y) != len(right_x):
return None, None
# Fit a second order polynomial to each
left_fit = np.polyfit(left_y, left_x, 2)
right_fit = np.polyfit(right_y, right_x, 2)
# Generate x and y values for plotting
ploty = (np.linspace(0, wraped_binarized.shape[0] - 1, wraped_binarized.shape[0])).astype(np.int)
left_fitx = (left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]).astype(np.int)
right_fitx = (right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]).astype(np.int)
lane_position = get_lane_position(wraped_binarized, left_fitx, right_fitx, xm_per_pix)
lane_width = get_lane_width(left_fitx, right_fitx, xm_per_pix)
result = np.dstack((wraped_binarized, wraped_binarized, wraped_binarized)) * 255
if with_debug_image:
logger.info('lane_position %s m' % lane_position)
logger.info('lane_width %s m' % lane_width)
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((wraped_binarized, wraped_binarized, wraped_binarized)) * 255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_indx], nonzerox[left_lane_indx]] = [255, 0, 0]
out_img[nonzeroy[right_lane_indx], nonzerox[right_lane_indx]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
draw_fit_curves_on_image(result, left_fitx, right_fitx, ploty, DRAWN_CURVE_LINE_WIDTH)
# curvature in meters
left_curverad_m, right_curverad_m = curvatures_in_meters(
left_x, left_y, ploty, right_x, right_y, xm_per_pix, ym_per_pix)
if with_debug_image:
logger.info("Curvature right: %s m, left: %s m" % (left_curverad_m, right_curverad_m))
lane_info = LaneInfo()
lane_info.left_fit = left_fit
lane_info.right_fit = right_fit
lane_info.left_fitx = left_fitx
lane_info.right_fitx = right_fitx
lane_info.ploty = ploty
lane_info.left_curverad_m = left_curverad_m
lane_info.right_curverad_m = right_curverad_m
lane_info.lane_position = lane_position
lane_info.lane_width = lane_width
lane_info.min_left_y = 0
lane_info.max_left_y = wraped_binarized.shape[0]
lane_info.min_right_y = 0
lane_info.max_right_y = wraped_binarized.shape[0]
return result, lane_info
def get_lane_position(wraped_binarized, left_fitx, right_fitx, xm_per_pix):
"""
Retruns the position from the center of the lane. Positive number means that the car is on the right side
of the lane, negative left side of the lane
"""
lane_center = compute_midpoint(left_fitx, right_fitx)
image_center = wraped_binarized.shape[1] / 2
lane_position = (lane_center - image_center) * xm_per_pix
return lane_position
def get_lane_width(left_fitx, right_fitx, xm_per_pix):
""" Returns the lane width expressed in meters """
a = left_fitx[Y_ARRAY_INDEX_OF_BOTTOM_ELEMENT]
b = right_fitx[Y_ARRAY_INDEX_OF_BOTTOM_ELEMENT]
return (b - a) * xm_per_pix
def draw_fit_curves_on_image(image, left_fitx, right_fitx, ploty, line_width):
""" Prints detected line on top fo the image """
for l_x, r_x, y in zip(left_fitx, right_fitx, ploty):
half_line_width = int(line_width / 2)
# this implementation could be better, but a try catch is needed when drawing near the edges of the matrix
for x in range(l_x - half_line_width, l_x + half_line_width):
try:
image[y, x] = [0, 255, 255]
except IndexError:
pass
for x in range(r_x - half_line_width, r_x + half_line_width):
try:
image[y, x] = [0, 255, 255]
except IndexError:
pass
def curvatures_in_meters(left_x, left_y, ploty, right_x, right_y, xm_per_pix, ym_per_pix):
""" Returns the curvature in meters for the left and right lanes """
left_fit_cr = np.polyfit(left_y * ym_per_pix, left_x * xm_per_pix, 2)
right_fit_cr = np.polyfit(right_y * ym_per_pix, right_x * xm_per_pix, 2)
left_curverad_m = compute_curvature(ploty, left_fit_cr, ym_per_pix)
right_curverad_m = compute_curvature(ploty, right_fit_cr, ym_per_pix)
return left_curverad_m, right_curverad_m
def compute_curvature(ploty, fit, ym_per_pix):
""" Conputes the curvature of a line """
y_eval_m = np.max(ploty) * ym_per_pix # in meters
return ((1 + (2 * fit[0] * y_eval_m + fit[1]) ** 2) ** 1.5) / np.absolute(2 * fit[0])
def compute_midpoint(left_fitx, right_fitx):
""" Returns the midpoint of the lane """
a = left_fitx[Y_ARRAY_INDEX_OF_BOTTOM_ELEMENT]
b = right_fitx[Y_ARRAY_INDEX_OF_BOTTOM_ELEMENT]
return a + (b - a) / 2
def lane_lines_directixes(lane_info):
"""
Computes the parabola directrix for both curves.
"""
def directrix(coefficients):
a, b, c = coefficients
return (b ** 2 - 4 * a * c + 1) / 4 * a
return directrix(lane_info.left_fit), directrix(lane_info.right_fit)
def can_use_this_frame(current_lane, previous_lane):
"""
Sanity checking, if this frame can be used.
Checking that they have similar curvature
Checking that the current lane is not too small
Checking that they are separated by approximately the right distance horizontally
Checking that they are roughly parallel
"""
SIMILAR_CURVATURE_THRESHOLD = 1000 # in meters almost 2 km in average
MIN_LANE_LENGTH = 2 # in meters, while it may not be the min length it ensures lanes not collapsing on top of each other
SIMILAR_LANE_DISTANCE_THRESHOLD = 0.3 # in meters
ROUGHLY_PARALLEL_THRESHOLD = 0.0001
# Checking that they have similar curvature
left_curv_diff = np.abs(current_lane.left_curverad_m - previous_lane.left_curverad_m)
right_curv_diff = np.abs(current_lane.right_curverad_m - previous_lane.right_curverad_m)
curvature_ok = left_curv_diff <= SIMILAR_CURVATURE_THRESHOLD and right_curv_diff <= SIMILAR_CURVATURE_THRESHOLD
# Checking that the current lane is not too small
length_ok = current_lane.lane_width > MIN_LANE_LENGTH
# Checking that they are separated by approximately the right distance horizontally
lane_width_diff = np.abs(current_lane.lane_width - previous_lane.lane_width)
distance_ok = lane_width_diff <= SIMILAR_LANE_DISTANCE_THRESHOLD
# Checking that they are roughly parallel
current_left_directrix, current_right_directrix = lane_lines_directixes(current_lane)
current_directrix_diff = np.abs(current_left_directrix - current_right_directrix)
current_directrix_ok = current_directrix_diff <= ROUGHLY_PARALLEL_THRESHOLD
return curvature_ok and distance_ok and current_directrix_ok and length_ok
MAX_FRAMES_TO_SKIP_BEFORE_RESET = 20
skipped_frames_counter = 0
def detect_on_video_frame(wraped_binarized, with_debug_image):
""" Will take a video frame and will apply a metod to extract lane lines """
global last_valid_lanes
is_first_frame = len(last_valid_lanes) == 0
if is_first_frame:
# the first time trying to find a lane from scratch
detected_debug, current_lane = full_line_search(wraped_binarized, with_debug_image=with_debug_image)
# this video frame can always be used
last_valid_lanes.append(current_lane)
else:
previous_lane = last_valid_lanes[-1]
detected_debug, current_lane = local_line_search(wraped_binarized, previous_lane.left_fit,
previous_lane.right_fit,
with_debug_image=with_debug_image,
margin=100)
# IF fast search fails, use slow search to check for imaages
if not current_lane:
print('slow search, last one failed')
detected_debug, current_lane = full_line_search(wraped_binarized, with_debug_image=with_debug_image)
use_this_frame = can_use_this_frame(current_lane, previous_lane)
# check to see if this frame can be used
global skipped_frames_counter
if use_this_frame:
skipped_frames_counter = 0
last_valid_lanes.append(current_lane)
else:
# this frame was skipped
skipped_frames_counter += 1
if skipped_frames_counter >= MAX_FRAMES_TO_SKIP_BEFORE_RESET:
# Reset pipeline, starting with a new full search
detected_debug, current_lane = full_line_search(wraped_binarized, with_debug_image=with_debug_image)
last_valid_lanes = deque(maxlen=TOTAL_LAST_ENTRIES_TO_KEEP)
last_valid_lanes.append(current_lane)
if current_lane is None:
print('Something bad happened, but why!?')
print(detected_debug)
print(current_lane)
return detected_debug, current_lane
def get_averaged_left_right_lane_fits():
left = deque()
right = deque()
for lane in last_valid_lanes:
left.append(lane.left_fit)
right.append(lane.right_fit)
left_averaged_fit = np.array(left).mean(axis=0)
right_averaged_fit = np.array(right).mean(axis=0)
return left_averaged_fit, right_averaged_fit
def average_curves_and_get_lane(wraped_binarized, left_fit, right_fit, min_left_y, max_left_y, min_right_y, max_right_y,
ym_per_pix=30 / 720, xm_per_pix=3.7 / 700):
""" Creates a new lane from the last prediticions """
ploty = np.linspace(0, wraped_binarized.shape[0] - 1, wraped_binarized.shape[0]).astype(np.int)
# Generate x and y values for plotting
left_fitx = (left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]).astype(np.int)
right_fitx = (right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]).astype(np.int)
lane_position = get_lane_position(wraped_binarized, left_fitx, right_fitx, xm_per_pix)
lane_width = get_lane_width(left_fitx, right_fitx, xm_per_pix)
# Interpolate and rebuild data
def eval_poly_2(fitted, x):
a, b, c = fitted
return a * x ** 2 + b * x + c
left_y = np.array([x for x in range(min_left_y, max_left_y)], dtype=np.int)
left_x = np.array([eval_poly_2(left_fit, y) for y in left_y], dtype=np.int)
right_y = np.array([x for x in range(min_right_y, max_right_y)], dtype=np.int)
right_x = np.array([eval_poly_2(right_fit, y) for y in right_y], dtype=np.int)
if len(left_x) == 0 or len(left_y) == 0:
print('What happend?')
print(min_left_y, max_left_y, left_fit)
print(min_right_y, max_right_y, right_fit)
print(left_y)
print(left_x)
# curvature in meters
left_curverad_m, right_curverad_m = curvatures_in_meters(left_x, left_y, ploty, right_x, right_y, xm_per_pix,
ym_per_pix)
lane_info = LaneInfo()
lane_info.left_fit = left_fit
lane_info.right_fit = right_fit
lane_info.left_fitx = left_fitx
lane_info.right_fitx = right_fitx
lane_info.ploty = ploty
lane_info.left_curverad_m = left_curverad_m
lane_info.right_curverad_m = right_curverad_m
lane_info.lane_position = lane_position
lane_info.lane_width = lane_width
lane_info.min_left_y = min_left_y
lane_info.max_left_y = max_left_y
lane_info.min_right_y = min_right_y
lane_info.max_right_y = max_right_y
return lane_info
TOTAL_LAST_ENTRIES_TO_KEEP = 20
last_valid_lanes = deque(maxlen=TOTAL_LAST_ENTRIES_TO_KEEP)
def combined_line_detector(wraped_binarized, with_debug_image, is_video_frame, detection_history):
"""
Used to detect lanes. There are two main approaches:
- one for images (a full search is applied on each image)
- one for videos (a mix between full search and local search is applied to successive frames)
"""
if not is_video_frame:
detected_debug, current_lane = full_line_search(wraped_binarized, with_debug_image=with_debug_image)
return detected_debug, current_lane
detected_debug, current_lane = detect_on_video_frame(wraped_binarized, with_debug_image)
left_averaged_fit, right_averaged_fit = get_averaged_left_right_lane_fits()
average_lane_info = average_curves_and_get_lane(wraped_binarized, left_averaged_fit, right_averaged_fit,
current_lane.min_left_y, current_lane.max_left_y,
current_lane.min_right_y, current_lane.max_right_y,
ym_per_pix=30 / 720, xm_per_pix=3.7 / 700)
# no debug result is provided for videos
return None, average_lane_info
|
StarcoderdataPython
|
1690175
|
import sublime
import json
import codecs
import os
is_sublime_text_3 = int(sublime.version()) >= 3000
if is_sublime_text_3:
from .settings import Settings
else:
from settings import Settings
class ProcessCache():
_procs = []
last_task_name = None
@classmethod
def get_from_storage(cls):
return cls.storage().read() or []
@classmethod
def get(cls):
return cls._procs[:]
@classmethod
def refresh(cls):
def remove_dead(process):
if not process.is_alive():
cls.remove(process)
cls.each(remove_dead)
@classmethod
def add(cls, process):
cls.last_task_name = process.get_task_name()
if process not in cls._procs:
cls._procs.append(process)
process = process.to_json()
cls.storage().update(lambda procs: procs + [process] if process not in procs else procs)
@classmethod
def remove(cls, process):
if process in cls._procs:
cls._procs.remove(process)
cls.storage().update(lambda procs: [proc for proc in procs if proc['pid'] != process.pid])
@classmethod
def kill_all(cls):
cls.each(lambda process: process.kill())
cls.clear()
@classmethod
def each(cls, fn):
for process in cls.get():
fn(process)
@classmethod
def empty(cls):
return len(cls._procs) == 0
@classmethod
def clear(cls):
del cls._procs[:]
cls.storage().write([])
@classmethod
def storage(cls):
if Settings.get_from_shared_data("track_processes", True):
return CacheFile(Settings.package_path())
else:
return Cache()
class Cache():
def exists(self):
pass
def remove(self):
pass
def open(self, mode="r"):
pass
def read(self):
pass
def write(self, data):
pass
def update(self, fn):
pass
class CacheFile(Cache):
def __init__(self, working_dir):
self.working_dir = working_dir
self.cache_path = os.path.join(self.working_dir, Settings.CACHE_FILE_NAME)
def exists(self):
return os.path.exists(self.cache_path)
def remove(self):
return os.remove(self.cache_path)
def open(self, mode="r"):
return codecs.open(self.cache_path, mode, "utf-8", errors='replace')
def read(self):
data = None
cache_file = self.open()
try:
data = json.load(cache_file)
except ValueError:
data = []
finally:
cache_file.close()
return data
def write(self, data):
cache_file = self.open("w")
try:
json_data = json.dumps(data, ensure_ascii=False)
if not json_data:
json_data = '[]'
cache_file.write(json_data)
finally:
cache_file.close()
def update(self, fn):
cache_file = codecs.open(self.cache_path, "r+", "utf-8", errors='replace')
current_data = None
try:
current_data = json.load(cache_file)
except ValueError:
current_data = []
try:
cache_file.seek(0)
new_data = fn(current_data)
cache_file.write(json.dumps(new_data))
cache_file.truncate()
finally:
cache_file.close()
|
StarcoderdataPython
|
11399533
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_waas_address_rate_limiting_facts
short_description: Fetches details about a AddressRateLimiting resource in Oracle Cloud Infrastructure
description:
- Fetches details about a AddressRateLimiting resource in Oracle Cloud Infrastructure
- Gets the address rate limiting settings of the Web Application Firewall configuration for a WAAS policy.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
waas_policy_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the WAAS policy.
type: str
aliases: ["id"]
required: true
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific address_rate_limiting
oci_waas_address_rate_limiting_facts:
# required
waas_policy_id: "ocid1.waaspolicy.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
address_rate_limiting:
description:
- AddressRateLimiting resource
returned: on success
type: complex
contains:
is_enabled:
description:
- Enables or disables the address rate limiting Web Application Firewall feature.
returned: on success
type: bool
sample: true
allowed_rate_per_address:
description:
- The number of allowed requests per second from one IP address. If unspecified, defaults to `1`.
returned: on success
type: int
sample: 56
max_delayed_count_per_address:
description:
- The maximum number of requests allowed to be queued before subsequent requests are dropped. If unspecified, defaults to `10`.
returned: on success
type: int
sample: 56
block_response_code:
description:
- "The response status code returned when a request is blocked. If unspecified, defaults to `503`. The list of available response codes: `400`,
`401`, `403`, `404`, `405`, `408`, `409`, `411`, `412`, `413`, `414`, `415`, `416`, `422`, `494`, `495`, `496`, `497`, `499`, `500`, `501`,
`502`, `503`, `504`, `507`."
returned: on success
type: int
sample: 56
sample: {
"is_enabled": true,
"allowed_rate_per_address": 56,
"max_delayed_count_per_address": 56,
"block_response_code": 56
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.waas import WaasClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class AddressRateLimitingFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"waas_policy_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_waf_address_rate_limiting,
waas_policy_id=self.module.params.get("waas_policy_id"),
)
AddressRateLimitingFactsHelperCustom = get_custom_class(
"AddressRateLimitingFactsHelperCustom"
)
class ResourceFactsHelper(
AddressRateLimitingFactsHelperCustom, AddressRateLimitingFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(waas_policy_id=dict(aliases=["id"], type="str", required=True),)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="address_rate_limiting",
service_client_class=WaasClient,
namespace="waas",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
else:
resource_facts_helper.fail()
module.exit_json(address_rate_limiting=result)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
8124340
|
"""
Runs AuTuMN apps
You can access this script from your CLI by running:
python -m autumn db --help
"""
import click
@click.group()
def db():
"""Database utilities"""
@db.command("fetch")
def download_input_data():
"""
Fetch input data from external sources for input database.
"""
from autumn.core.inputs import fetch_input_data
fetch_input_data()
@db.command("build")
def build_input_db():
"""
Build a new input database from input data files.
"""
from autumn.core.inputs import build_input_database
build_input_database(rebuild=True)
@db.command("feather2sql")
@click.argument("src_db_path", type=str)
@click.argument("dest_db_path", type=str)
def feather2sql(src_db_path, dest_db_path):
"""
Convert a Feather DB to a SQLite DB
"""
from autumn.core import db as autumn_db
assert autumn_db.FeatherDatabase.is_compatible(
src_db_path
), "Source DB must be FeatherDatabase compatible"
src_db = autumn_db.FeatherDatabase(src_db_path)
autumn_db.database.convert_database(src_db, autumn_db.database.Database, dest_db_path)
@db.command("parquet2sql")
@click.argument("src_db_path", type=str)
@click.argument("dest_db_path", type=str)
def parquet2sql(src_db_path, dest_db_path):
"""
Convert a Feather DB to a SQLite DB
"""
from autumn.core import db as autumn_db
assert autumn_db.ParquetDatabase.is_compatible(
src_db_path
), "Source DB must be ParquetDatabase compatible"
src_db = autumn_db.ParquetDatabase(src_db_path)
autumn_db.database.convert_database(src_db, autumn_db.database.Database, dest_db_path)
|
StarcoderdataPython
|
3500499
|
from typing import Any, List, Tuple, Union
from pathlib import Path
import os
import unittest
from pandas import DataFrame
from schematics.types import ListType, IntType, StringType
import numpy as np
import skimage.io
from hidebound.core.specification_base import SpecificationBase
import hidebound.core.traits as tr
import hidebound.core.validators as vd
# ------------------------------------------------------------------------------
class DatabaseTestBase(unittest.TestCase):
columns = [
'specification',
'extension',
'filename',
'filepath',
'file_error',
'file_traits',
'asset_name',
'asset_path',
'asset_type',
'asset_traits',
'asset_error',
'asset_valid',
] # type: List[str]
def get_data(self, root, nans=False):
# type: (str, bool) -> DataFrame
data = [
[0, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v001', 'p-proj001_s-spec001_d-pizza_v001_c0000-0001_f0001.png', None ], # noqa E501 E241
[0, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v001', 'p-proj001_s-spec001_d-pizza_v001_c0000-0001_f0002.png', None ], # noqa E501 E241
[0, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v001', 'p-proj001_s-spec001_d-pizza_v001_c0000-0001_f0003.png', None ], # noqa E501 E241
[1, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0000_f0001.png', None ], # noqa E501 E241
[1, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0000_f0002.png', None ], # noqa E501 E241
[1, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0000_f0003.png', None ], # noqa E501 E241
[1, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0000_f0004.png', None ], # noqa E501 E241
[2, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0001_f0001.png', None ], # noqa E501 E241
[2, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0001_f0002.png', None ], # noqa E501 E241
[2, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0001_f0003.png', None ], # noqa E501 E241
[2, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0001_f0004.png', None ], # noqa E501 E241
[3, False, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v003', 'p-proj001_s-spec001_d-kiwi_v003_c0000-0001_f0001.png', ' Inconsistent descriptor field token'], # noqa E501 E241
[3, False, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v003', 'p-proj001_s-spec001_d-pizza_v003_c0000-0001_f0002.png', None ], # noqa E501 E241
[3, False, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v003', 'p-proj001_s-spec001_d-PIZZA_v003_c0000-0001_f0003.png', 'Illegal descriptor field token' ], # noqa E501 E241
[3, False, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v003', 'p-proj001_s-spec001_d-pizza_v003_c0000-0001_f0004.png', None ], # noqa E501 E241
[3, False, np.nan, None, 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v003', 'p-proj001_s-spec0001_d-pizza_v003_c0000-0001_f0005.png', 'Illegal specification field token' ], # noqa E501 E241
[3, False, np.nan, None, 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v003', 'misc.txt', 'SpecificationBase not found' ], # noqa E501 E241
[4, True, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v001', 'p-proj001_s-spec002_d-taco_v001_f0000.jpg', None ], # noqa E501 E241
[4, True, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v001', 'p-proj001_s-spec002_d-taco_v001_f0001.jpg', None ], # noqa E501 E241
[4, True, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v001', 'p-proj001_s-spec002_d-taco_v001_f0002.jpg', None ], # noqa E501 E241
[5, False, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v002', 'p-proj001_s-spec002_d-taco_v001_f0000.jpg', 'Invalid asset directory name' ], # noqa E501 E241
[5, False, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v002', 'p-proj001_s-spec002_d-taco_v002_f0001.jpg', None ], # noqa E501 E241
[5, False, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v002', 'p-proj001_s-spec002_d-taco_v002', 'Expected "_"' ], # noqa E501 E241
[5, False, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v002', 'p-proj001_s-spec002_d-taco_v02_f0003.jpg', 'Illegal version field token' ], # noqa E501 E241
[6, False, 'sequence', 'vdb001', 'proj002/vdb001', 'p-proj002_s-vdb001_d-bagel_v001.vdb', 'Specification not found' ], # noqa E501 E241
[6, False, 'sequence', 'vdb001', 'proj002/vdb001', 'p-proj002_s-vdb001_d-bagel_v002.vdb', 'Specification not found' ], # noqa E501 E241
[6, False, 'sequence', 'vdb001', 'proj002/vdb001', 'p-proj002_s-vdb001_d-bagel_v003.vdb', 'Specification not found' ], # noqa E501 E241
] # type: Any
data = DataFrame(data)
data.columns = [
'asset_id', 'asset_valid', 'asset_type', 'specification',
'asset_path', 'filename', 'file_error'
]
data.asset_path = data.asset_path.apply(lambda x: root + '/' + x)
data['asset_name'] = data.asset_path.apply(lambda x: x.split('/')[-1])
data['filepath'] = data\
.apply(lambda x: Path(root, x.asset_path, x.filename), axis=1)
Spec001, Spec002, BadSpec = self.get_specifications()
specs = {
Spec001.name: Spec001,
Spec002.name: Spec002,
None: np.nan,
'vdb001': np.nan,
}
data['specification_class'] = data.specification\
.apply(lambda x: specs[x])
if nans:
data = data.applymap(lambda x: np.nan if x is None else x)
return data
def create_files(self, root):
# type: (Union[str, Path]) -> "DataFrame"
root = Path(root).as_posix()
data = self.get_data(root)
for filepath in data.filepath.tolist():
os.makedirs(filepath.parent, exist_ok=True)
ext = os.path.splitext(filepath)[-1][1:]
if ext in ['png', 'jpg']:
img = np.zeros((5, 4, 3), dtype=np.uint8)
img[:, :, 2] = 128
skimage.io.imsave(filepath.as_posix(), img)
else:
with open(filepath, 'w') as f:
f.write('')
return data
def get_directory_to_dataframe_data(self, root):
# type: (str) -> "DataFrame"
files = self.get_data(root)
data = DataFrame()
data['filename'] = files.filename
data['filepath'] = files.asset_path
data.filepath = data\
.apply(lambda x: Path(x.filepath, x.filename), axis=1)
data['extension'] = files\
.filename.apply(lambda x: os.path.splitext(x)[1:])
return data
def get_specifications(self):
# type: () -> Tuple[Any, Any, Any]
class Spec001(SpecificationBase):
name = 'spec001'
filename_fields = [
'project',
'specification',
'descriptor',
'version',
'coordinate',
'frame',
'extension',
]
coordinate = ListType(ListType(IntType()), required=True)
frame = ListType(IntType(), required=True)
extension = ListType(
StringType(),
required=True,
validators=[lambda x: vd.is_eq(x, 'png')]
)
height = ListType(
IntType(),
required=True, validators=[lambda x: vd.is_eq(x, 5)]
)
width = ListType(
IntType(),
required=True, validators=[lambda x: vd.is_eq(x, 4)]
)
channels = ListType(
IntType(),
required=True, validators=[lambda x: vd.is_eq(x, 3)]
)
file_traits = dict(
width=tr.get_image_width,
height=tr.get_image_height,
channels=tr.get_num_image_channels,
)
def get_asset_path(self, filepath):
return Path(filepath).parents[0]
class Spec002(SpecificationBase):
name = 'spec002'
filename_fields = [
'project',
'specification',
'descriptor',
'version',
'frame',
'extension',
]
frame = ListType(IntType(), required=True)
extension = ListType(
StringType(),
required=True,
validators=[lambda x: vd.is_eq(x, 'jpg')]
)
height = ListType(
IntType(),
required=True, validators=[lambda x: vd.is_eq(x, 5)]
)
width = ListType(
IntType(),
required=True, validators=[lambda x: vd.is_eq(x, 4)]
)
channels = ListType(
IntType(),
required=True, validators=[lambda x: vd.is_eq(x, 3)]
)
file_traits = dict(
width=tr.get_image_width,
height=tr.get_image_height,
channels=tr.get_num_image_channels,
)
def get_asset_path(self, filepath):
return Path(filepath).parents[0]
class BadSpec:
pass
return Spec001, Spec002, BadSpec
|
StarcoderdataPython
|
9730653
|
<filename>K_Mathematical_Modeling/Section 2/solutionODEsExercise10.py
from IPython.display import display, Latex, Math
print("Yes it is a linear system. In matrix form it can be written:")
display(Latex('$ dY/dt = A*Y+Y_0$'))
print("Where Y is the vector formed by the two unknown, time-dependent concentrations A(t) and B(t):")
display(Latex(' Y = \\begin{bmatrix} A(t) \\end{bmatrix} \n \\begin{bmatrix} B(t) \\end{bmatrix} '))
print("Where Y_0 is the vector formed by the constants in the right hand side:")
display(Latex(' Y_0 = \\begin{bmatrix} 1 \\end{bmatrix} \n \\begin{bmatrix} 0 \\end{bmatrix} '))
print("and the matrix A contains, on the first line, the coefficients of A(t) and B(t) in the right hand side of the first equation (in this order), and on the second line, the coefficients of A(t) and B(t) in the right hand side of the second equation:")
display(Latex(' A = \\begin{bmatrix} 0 & -k_{B->A} \\end{bmatrix} \n \\begin{bmatrix} k_{A->B} & 0 \\end{bmatrix} '))
print("if the two rates are equal to k, the matrix simplifies to:")
display(Latex(' A = -k* \\begin{bmatrix} 0 & 1 \\end{bmatrix} \n \\begin{bmatrix} -1 & 0 \\end{bmatrix} '))
print("we can recognize the form of the matrix whose exponential has sin(t) and cos(t) terms (see formulas above), up to a multiplicative constant -k:")
display(Latex('´ A = -k* \\begin{bmatrix} 0 & 1 \\end{bmatrix} \n \\begin{bmatrix} -1 & 0 \\end{bmatrix} '))
print("So we know, because it is a linear system, that we can formally write the solution Y(t) as:")
display(Latex('$ Y(t)= e^{A*t} * (Y(t=0)+ A^{-1}*Y_0) - A^{-1}*Y_0 $'))
print("but thanks to the particular form of the matrix A, we know how to compute the exponential")
display(Latex(' e^{A*t} = \\begin{bmatrix} \cos(k*t) & -\sin(k*t) \\end{bmatrix} \n \\begin{bmatrix} \sin(k*t) & \cos(k*t) \\end{bmatrix} '))
print("and we can also remark that (or compute)")
display(Latex(' A^{-1} =1/k * \\begin{bmatrix} 0 & 1 \\end{bmatrix} \n \\begin{bmatrix} -1 & 0 \\end{bmatrix} '))
print("So that we can compute all terms in the solution. For instance,")
display(Latex(' A^{-1} * Y_0 = \\begin{bmatrix} 0 \\end{bmatrix} \n \\begin{bmatrix} -1/k \\end{bmatrix} '))
print("The rest of the calculations will depend on the particular initial conditions, the vector Y(t=0). Let's assume that there is no A and no B at t=0, as in the simulations exercise 8, thus Y(t=0)=0. We can finish the matrix operations and we get the general solution (the 2 components of Y(t), in this order):")
display(Latex('$A(t)= \sin(k*t)/k $'))
display(Latex('$B(t)= -\cos(k*t)/k+1/k $'))
print("We obtain oscillatory functions of time. If kA->B was different from kB->A, the matrix A wouldn't have one of the particular forms with which the calculation of the exponential is straightforward. So we would have needed to diagonalize the matrix using a transformation P such that D=P^-1*A*P is diagonal, compute the exponential of the diagonal matrix, and invert the transformation to get exp(A). This procedure would have mixed the oscillatory functions, generating damped or amplified oscillations depending on the choice of parameters. A good exercise to do!")
|
StarcoderdataPython
|
9739856
|
# -*- coding: utf-8 -*-
# putcall
# -------
# Collection of classical option pricing formulas.
#
# Author: sonntagsgesicht, based on a fork of Deutsche Postbank [pbrisk]
# Version: 0.2, copyright Wednesday, 18 September 2019
# Website: https://github.com/sonntagsgesicht/putcall
# License: Apache License 2.0 (see LICENSE file)
import math
from mathtoolspy.distribution.normal_distribution import cdf_abramowitz_stegun as normal_cdf
def hw_discount_bond_option(forward_value, strike_value, implied_vol_value, time_value, is_call_bool,
time_to_bond_value, mean_reversion_value, maturity_discount_value):
"""
discount bond option pricing formula in the Hull White framework
:param float forward_value: forward price of underlying (discount bond) at bond maturity (D(t,Tau,r))
:param float strike_value: strike price
:param float implied_vol_value: volatility of the spot rate
:param float time_value: year fraction until exercise date (option maturity date)
:param float bool is_call_bool: call -> True, put -> False
:param float time_to_bond_value: year fraction between option's maturity and bond's maturity
:param float maturity_discount_value: forward price of underlying (discount bond) at option maturity date (D(t,T,r))
:param float mean_reversion_value: mean reversion / alpha
:return: float
discount bond option pricing formula in the Hull White framework
as described in <NAME>, *Efficient Methods for Valuing Interest Rate Derivatives*, 2000, pp. 50
"""
if time_value == 0:
return 0.0
else:
strike = maturity_discount_value * strike_value
A = forward_value / strike
B = (1 - math.exp(-mean_reversion_value * (time_to_bond_value))) / mean_reversion_value
var = ((implied_vol_value ** 2) / (2 * mean_reversion_value)) * \
(1 - math.exp(-2 * mean_reversion_value * time_value))
sigma = B * math.sqrt(var)
h = (math.log(A) / sigma) + 0.5 * sigma
if is_call_bool:
# call
option_bond_price = forward_value * normal_cdf(h) - strike * normal_cdf(h - sigma)
else:
# put
option_bond_price = strike * normal_cdf(-h + sigma) - forward_value * normal_cdf(-h)
return option_bond_price
def hw_cap_floor_let(forward_rate_value, strike_value, implied_vol_value, time_value, is_call_bool, year_fraction_value,
mean_reversion_value, discount_value):
"""
pricing formula of a caplet/floorlet under the Hull White framework
:param float forward_rate_value: forward rate (LIBOR,EURIBOR...)
:param float discount_value: zero bond price between pricing time and start of the caplet (D(t,T,r))
:param float mean_reversion_value: mean reversion in the Hull White model
:param float strike_value: strike of the option
:param float implied_vol_value: volatility of the spot rate
:param float year_fraction_value: year fraction between start and maturity = tenor of the rate
:param float time_value: year_fraction between pricing date (e.g. start of the Cap) and start of the caplet Y(t,T)
:param bool is_call_bool: call(caplet) -> True, put(floorlet) -> False
:return: float
pricing formula of a caplet/floorlet under the Hull White framework
as described in <NAME>, *Efficient Methods for Valuing Interest Rate Derivatives*, 2000, pp. 57
"""
discount_forward_value = 1 / (1 + year_fraction_value * forward_rate_value) # same as D(T,Tau,r)
forward = discount_value * discount_forward_value
strike = 1 / (1 + year_fraction_value * strike_value)
vol = implied_vol_value
t = time_value
yf = year_fraction_value
df = discount_value
mr = mean_reversion_value
hw_bond_opt = hw_discount_bond_option(forward, strike, vol, t, not is_call_bool, yf, mr, df)
return (1 + year_fraction_value * strike_value) * hw_bond_opt
|
StarcoderdataPython
|
8192631
|
from aioconsole.stream import aprint
import discord
from discord import AutoShardedClient
from discord.ext import commands
from .channel import CLIChannel
from aioconsole import ainput
from .utils import fancy_print, Response
import sys, asyncio, time
class CLI(commands.Bot):
def __init__(self, command_prefix, description=None, **options):
super().__init__(command_prefix, description=description, **options)
channel_id = options.pop("channel_id")
receive_author = options.pop("receive_author", None)
self.channel_id = channel_id
self.receive_author = receive_author
self.channel = CLIChannel(channel_id, bot=self)
self._message_printing = False
self._timeout = False
self.started = False
async def wait_until(self, delegate, timeout: int):
end = time.time() + timeout
while time.time() < end:
if delegate():
return True
else:
while True:
await asyncio.sleep(5)
if delegate():
return True
await asyncio.sleep(5)
if delegate():
return True
await asyncio.sleep(5)
if delegate():
return True
await asyncio.sleep(5)
if delegate():
return True
await asyncio.sleep(5)
if delegate():
return True
await asyncio.sleep(5)
if delegate():
return True
await asyncio.sleep(5)
if delegate():
return True
await asyncio.sleep(5)
if delegate():
return True
await asyncio.sleep(5)
if delegate():
return True
await asyncio.sleep(5)
if delegate():
return True
if delegate():
return True
await asyncio.sleep(5)
if delegate():
return True
await asyncio.sleep(5)
if delegate():
return True
await asyncio.sleep(5)
if delegate():
return True
await asyncio.sleep(5)
return False
def check(self):
self._message_printing != True
async def on_message(self, message : discord.Message):
if message.channel.id == self.channel_id:
if self._message_printing:
await self.wait_until(self.check, 60)
self._message_printing = True
if message.author.bot:
return await self.process_commands(message)
if self.receive_author:
if message.author.id == self.receive_author:
await fancy_print(f"[{message.author.name+'#'+message.author.discriminator}]: {message.content}\n", 0.2)
await self.send_message_prompt(message)
self._message_printing = False
else:
await fancy_print(f"[{message.author.name+'#'+message.author.discriminator}]: {message.content}\n", 0.2)
await self.send_message_prompt(message)
self._message_printing = False
return await self.process_commands(message)
async def send_prompt(self):
await fancy_print("Would you like to start the cli? [y/n] ", 0.10)
data = await ainput()
data = data.lower()
if data == "y":
return Response("YES")
else:
return Response("NO")
async def send_message_prompt(self, message : discord.Message):
data = await ainput("[SERVER] Send a message: ")
if data == "reply":
id = await ainput("Send the message id to wich you want to reply to. ")
id = int(id)
content = await ainput("[SERVER] Send the content you want to reply with. ")
msg = await message.channel.fetch_message(id)
return await msg.reply(content)
await self.channel.send(data)
async def _start(self, token : str, **options):
responses = ["YES", "NO"]
data = await self.send_prompt()
if data.response == responses[0]:
await self.start(token, **options)
await self.send_message_prompt()
elif data.response == responses[1]:
sys.exit(1)
def run(self, token, **options):
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(self._start(token, **options))
class ShardedCLI(CLI, AutoShardedClient):
pass
|
StarcoderdataPython
|
3567906
|
#!/usr/bin/python3
# This 'recreates' the prior version from the Wikipedia pages incase file lost
import csv
import os
import re
import sys
import traceback
from mwclient import Site
#
# Configuration
#
if 'WIKI_WORKING_DIR' not in os.environ:
sys.stderr.write('ERROR: WIKI_WORKING_DIR environment variable not set\n')
sys.exit(1)
if 'WIKI_CONFIG_DIR' not in os.environ:
sys.stderr.write('ERROR: WIKI_CONFIG_DIR environment variable not set\n')
sys.exit(1)
STORAGE = os.environ['WIKI_WORKING_DIR'] + '/Dois/doi-registrants-prior'
BOTINFO = os.environ['WIKI_CONFIG_DIR'] + '/bot-info.txt'
#
# Functions
#
def extractRecords(contents):
# extract the doi information from the page contents
# prefix, crossref registrant, wikipedia registrant, crossref target, wikipedia target
records = []
for line in contents.splitlines():
prefix = ''
crossrefRegistrant = ''
wikipediaRegistrant = ''
crossrefTarget = ''
wikipediaTarget = ''
# two possible patterns
match = re.search(r'^{{JCW-DOI-prefix\|(.+?)\|(.+?)\|(.+?)\|4=Crossref = \[\[(.+?)\]\]<br/>Wikipedia = \[\[(.+?)\]\]', line)
if match:
prefix = match.group(1)
crossrefRegistrant = match.group(2)
wikipediaRegistrant = match.group(3)
crossrefTarget = match.group(4)
wikipediaTarget = match.group(5)
else:
match = re.search(r'^{{JCW-DOI-prefix\|(.+?)\|(.+?)\|(.+?)\|(.+?)}}', line)
if match:
prefix = match.group(1)
crossrefRegistrant = match.group(2)
wikipediaRegistrant = match.group(3)
crossrefTarget = 'NONE'
wikipediaTarget = match.group(4)
# if either pattern found
if (prefix):
if crossrefRegistrant == '-':
crossrefRegistrant = 'NONE'
if wikipediaRegistrant == '-':
wikipediaRegistrant = 'NONE'
if crossrefTarget == '-':
crossrefTarget = 'NONE'
if wikipediaTarget == '-':
wikipediaTarget = 'NONE'
if crossrefTarget.startswith(':'):
crossrefTarget = crossrefTarget[1:]
if wikipediaTarget.startswith(':'):
wikipediaTarget = wikipediaTarget[1:]
records.append((prefix, crossrefRegistrant, wikipediaRegistrant, crossrefTarget, wikipediaTarget))
return records
def getPages(site):
# find pages from summary page
title = 'User:JL-Bot/DOI'
page = site.pages[title]
pages = []
for line in page.text().splitlines():
match = re.search(r'^\* \[\[User:JL-Bot/DOI/\d+.\d+\|(\d+.\d+)\]\]$', line)
if match:
pages.append(match.group(1))
return pages
def getUserInfo(filename):
# read in bot userinfo
userinfo = {}
try:
with open(filename, 'r') as file:
for line in file:
match = re.search(r'^USERNAME = (.+?)\s*$', line)
if match:
userinfo['username'] = match.group(1)
match = re.search(r'^PASSWORD = (.+?)\s*$', line)
if match:
userinfo['password'] = match.group(1)
if 'username' not in userinfo:
sys.stderr.write('ERROR: username not found\n')
sys.exit(1)
if 'password' not in userinfo:
sys.stderr.write('ERROR: password not found\n')
sys.exit(1)
except Exception:
traceback.print_exc()
sys.exit(1)
return userinfo
def retrievePage(doi):
# retrieve contents of Wikipedia page
title = 'User:JL-Bot/DOI/' + doi
page = site.pages[title]
return page.text()
def writeRecords(file, records):
# write the records to output file
for record in records:
file.write('\t'.join(record) + '\n')
return
#
# Main
#
# initiate bot
userinfo = getUserInfo(BOTINFO)
try:
site = Site('en.wikipedia.org')
site.login(userinfo['username'], userinfo['password'])
except Exception:
traceback.print_exc()
sys.exit(1)
# find pages and iterate through them
pages = getPages(site)
try:
file = open(STORAGE, 'w')
for page in pages:
print('Precessing', page, '...')
contents = retrievePage(page)
records = extractRecords(contents)
writeRecords(file, records)
except Exception:
traceback.print_exc()
sys.exit(1)
|
StarcoderdataPython
|
1661230
|
<reponame>reinforcementdriving/cvat
import json
import base64
from PIL import Image
import io
from model_loader import ModelLoader
import numpy as np
import yaml
def init_context(context):
context.logger.info("Init context... 0%")
functionconfig = yaml.safe_load(open("/opt/nuclio/function.yaml"))
labels_spec = functionconfig['metadata']['annotations']['spec']
labels = {item['id']: item['name'] for item in json.loads(labels_spec)}
model_handler = ModelLoader(labels)
setattr(context.user_data, 'model_handler', model_handler)
context.logger.info("Init context...100%")
def handler(context, event):
context.logger.info("Run tf.matterport.mask_rcnn model")
data = event.body
buf = io.BytesIO(base64.b64decode(data["image"].encode('utf-8')))
threshold = float(data.get("threshold", 0.2))
image = Image.open(buf)
results = context.user_data.model_handler.infer(np.array(image), threshold)
return context.Response(body=json.dumps(results), headers={},
content_type='application/json', status_code=200)
|
StarcoderdataPython
|
1761812
|
<gh_stars>0
from django.contrib import admin
from app.models import TopSecret
# Register your models here.
admin.site.register(TopSecret)
|
StarcoderdataPython
|
132679
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas.api.types import CategoricalDtype
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex, Categorical)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_stable_categorial(self):
# GH 16793
df = DataFrame({
'x': pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)
})
expected = df.copy()
sorted_df = df.sort_values('x', kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['C', 'B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_nat_values_in_int_column(self):
# GH 14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT))
float_values = (2.0, -1.797693e308)
df = DataFrame(dict(int=int_values, float=float_values),
columns=["int", "float"])
df_reversed = DataFrame(dict(int=int_values[::-1],
float=float_values[::-1]),
columns=["int", "float"],
index=[1, 0])
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT],
float=float_values), columns=["datetime", "float"])
df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")],
float=float_values[::-1]),
columns=["datetime", "float"],
index=[1, 0])
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ['2016-01-01', '2015-01-01',
np.nan, '2016-01-01']]
d2 = [Timestamp(x) for x in ['2017-01-01', '2014-01-01',
'2016-01-01', '2015-01-01']]
df = pd.DataFrame({'a': d1, 'b': d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ['2015-01-01', '2016-01-01',
'2016-01-01', np.nan]]
d4 = [Timestamp(x) for x in ['2014-01-01', '2015-01-01',
'2017-01-01', '2016-01-01']]
expected = pd.DataFrame({'a': d3, 'b': d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=['a', 'b'], )
tm.assert_frame_equal(sorted_df, expected)
class TestDataFrameSortIndexKinds(TestData):
def test_sort_index_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
assert a_id != id(df['A'])
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.iloc[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_index_duplicates(self):
# with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5, 9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by=['a'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4, 2),
columns=MultiIndex.from_tuples([('a', 0), ('a', 1)]))
with tm.assert_raises_regex(ValueError, 'level'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'level'):
df.sort_values(by='a')
# convert tuples to a list of tuples
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=[('a', 1)])
expected = df.sort_values(by=[('a', 1)])
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=('a', 1))
result = df.sort_values(by=('a', 1))
assert_frame_equal(result, expected)
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sort_index(level='A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sort_index(level=['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_index_categorical_index(self):
df = (DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca'))
.astype(CategoricalDtype(list('cab')))})
.set_index('B'))
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[3, 2, 5, 1, 0, 4]]
assert_frame_equal(result, expected)
def test_sort_index(self):
# GH13496
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ['A', 0]) # GH 21052
def test_sort_index_multiindex(self, level):
# GH13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples([
[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples([
[1, 1, 1],
[2, 1, 2],
[2, 1, 3]], names=list('ABC'))
expected = pd.DataFrame([
[5, 6],
[3, 4],
[1, 2]], index=expected_mi)
result = df.sort_index(level=level)
assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples([
[1, 1, 1],
[2, 1, 3],
[2, 1, 2]], names=list('ABC'))
expected = pd.DataFrame([
[5, 6],
[1, 2],
[3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)),
bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=['Y', 'X1', 'X2'])
result = model.groupby(['X1', 'X2'], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0),
(0.0, 0.5), (0.5, 3.0)],
closed='right')
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
def test_sort_index_na_position_with_categories(self):
# GH 22556
# Positioning missing value properly when column is Categorical.
categories = ['A', 'B', 'C']
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = 'first'
na_position_last = 'last'
column_name = 'c'
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices, reverse=True)
df = pd.DataFrame({
column_name: pd.Categorical(['A', np.nan, 'B', np.nan, 'C'],
categories=categories,
ordered=True)})
# sort ascending with na first
result = df.sort_values(by=column_name,
ascending=True,
na_position=na_position_first)
expected = DataFrame({
column_name: Categorical(list_of_nans + categories,
categories=categories,
ordered=True)
}, index=na_indices + category_indices)
assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(by=column_name,
ascending=True,
na_position=na_position_last)
expected = DataFrame({
column_name: Categorical(categories + list_of_nans,
categories=categories,
ordered=True)
}, index=category_indices + na_indices)
assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(by=column_name,
ascending=False,
na_position=na_position_first)
expected = DataFrame({
column_name: Categorical(list_of_nans + reversed_categories,
categories=categories,
ordered=True)
}, index=reversed_na_indices + reversed_category_indices)
assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(by=column_name,
ascending=False,
na_position=na_position_last)
expected = DataFrame({
column_name: Categorical(reversed_categories + list_of_nans,
categories=categories,
ordered=True)
}, index=reversed_category_indices + reversed_na_indices)
assert_frame_equal(result, expected)
def test_sort_index_na_position_with_categories_raises(self):
df = pd.DataFrame({
'c': pd.Categorical(['A', np.nan, 'B', np.nan, 'C'],
categories=['A', 'B', 'C'],
ordered=True)})
with pytest.raises(ValueError):
df.sort_values(by='c',
ascending=False,
na_position='bad_position')
|
StarcoderdataPython
|
155193
|
<reponame>a-harrison/repinger
#!/usr/bin/env python
import sys
import logging
import getpass
from optparse import OptionParser
import json
import os
import ConfigParser
# Dependencies
import sleekxmpp
from slack_message_client import SlackMessageClient
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
config = ConfigParser.ConfigParser();
config.read('./app.config');
slack_url= "https://slack.com/api/chat.postMessage"
SLACK_TOKEN = os.environ.get('SLACK_TOKEN')
SLACK_CHANNEL = os.environ.get('SLACK_CHANNEL')
XMPP_JID = os.environ.get('XMPP_JID')
XMPP_PASSWORD = os.environ.get('XMPP_PASSWORD')
if SLACK_TOKEN is None:
SLACK_TOKEN = config.get('Configuration', 'SLACK_TOKEN')
if SLACK_CHANNEL is None:
SLACK_CHANNEL = config.get('Configuration', 'SLACK_CHANNEL')
if XMPP_JID is None:
XMPP_JID = config.get('Configuration', 'XMPP_JID')
if XMPP_PASSWORD is None:
XMPP_PASSWORD = config.get('Configuration', 'XMPP_PASSWORD')
class EchoBot(sleekxmpp.ClientXMPP):
def __init__(self, jid, password):
super(EchoBot, self).__init__(jid, password)
self.add_event_handler('session_start', self.start)
self.add_event_handler('message', self.message)
self.slack_client = SlackMessageClient(
slack_url,
SLACK_TOKEN,
SLACK_CHANNEL
)
def start(self, start):
startup_payload = self.slack_client.build_payload("Listener started.", None)
self.slack_client.send_message(startup_payload)
self.send_presence()
self.get_roster()
def message(self, msg):
print "Type: %s" % msg['type']
print "From: %s" % msg['from']
print "To: %s" % msg['to']
print "Body: %s" % msg['body']
attachment = self.slack_client.build_attachment(
"pleaseignore.com",
"#D00000",
"",
msg['body']
)
payload = self.slack_client.build_payload("", attachment)
# Post Slack Message
self.slack_client.send_message(payload)
if __name__ == '__main__':
# Here we will configure and read command line options
optp = OptionParser()
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option("-j", "--jid", dest="jid", help="JID to use")
optp.add_option("-p", "--password", dest="password", help="password to use")
opts, args = optp.parse_args()
# if opts.jid is None:
# opts.jid = raw_input("Username: ")
# if opts.password is None:
# opts.password = <PASSWORD>("Password: ")
if opts.jid is not None:
XMPP_JID = opts.jid
if opts.password is not None:
XMPP_PASSWORD = opts.password
logging.basicConfig(level=opts.loglevel, format='%(levelname)-8s %(message)s')
# Here we will instantiate our echo bot
if (XMPP_JID is None or XMPP_PASSWORD is None):
print "Connection values not defined."
else:
xmpp = EchoBot(XMPP_JID, XMPP_PASSWORD)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0199') # Pings
# Finally, we connect the bott and start listening for messages
print "Connecting."
if xmpp.connect():
print "Connected!"
xmpp.process(block=True)
else:
print('Unable to connect')
|
StarcoderdataPython
|
1654822
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: <NAME>
# @Date: Monday, February 18th 2019, 1:46:37 pm
import logging
import struct
import sys
import serial
import iblrig.params as params
log = logging.getLogger('iblrig')
def main(comport: str, command: int):
if not comport:
comport = params.get_board_comport()
ser = serial.Serial(port=comport, baudrate=115200, timeout=1)
ser.write(struct.pack('cB', b':', command))
ser.close()
log.debug(f"Sent <:{command}> to {comport}")
return
if __name__ == "__main__":
if len(sys.argv) == 2:
comport = params.get_board_comport()
command = sys.argv[1]
else:
comport = sys.argv[1]
command = sys.argv[2]
main(comport, int(command))
|
StarcoderdataPython
|
6622374
|
import random
import sys
from os import path
import tensorflow as tf
import numpy as np
import pandas as pd
from Bio.Seq import Seq
import threading
import pybedtools
import os
import glob
from maxatac.architectures.dcnn import get_dilated_cnn
from maxatac.utilities.constants import BP_RESOLUTION, BATCH_SIZE, CHR_POOL_SIZE, INPUT_LENGTH, INPUT_CHANNELS, \
BP_ORDER, TRAIN_SCALE_SIGNAL, BLACKLISTED_REGIONS, DEFAULT_CHROM_SIZES
from maxatac.utilities.genome_tools import load_bigwig, load_2bit, get_one_hot_encoded, build_chrom_sizes_dict
from maxatac.utilities.system_tools import get_dir, remove_tags, replace_extension
class MaxATACModel(object):
"""
This object will organize the input model parameters and initialize the maxATAC model
The methods are:
__get_interpretation_attributes: This will import the interpretation inputs if interpretation module is being used.
__get_model: This will get the correct architecture and parameters based on the user input
"""
def __init__(self,
arch,
seed,
output_directory,
prefix,
threads,
meta_path,
weights,
dense=False,
target_scale_factor=TRAIN_SCALE_SIGNAL,
output_activation="sigmoid",
interpret=False,
interpret_cell_type=""
):
"""
Initialize the maxATAC model with the input parameters and architecture
:param arch: Neural network architecture to use: DCNN, resNet, UNet, multi-modal
:param seed: Random seed to use
:param output_directory: Path to output directory
:param prefix: Prefix to use for filename
:param threads: Number of threads to use
:param meta_path: Path to the meta file associated with the run
:param output_activation: The activation function to use in the output layer
:param dense: Whether to use a dense layer on output
:param weights: Input weights to use for model
:param interpret: Boolean for whether this is training or interpretation
"""
self.arch = arch
self.seed = seed
self.output_directory = get_dir(output_directory)
self.model_filename = prefix + "_{epoch}" + ".h5"
self.results_location = path.join(self.output_directory, self.model_filename)
self.log_location = replace_extension(remove_tags(self.results_location, "_{epoch}"), ".csv")
self.tensor_board_log_dir = get_dir(path.join(self.output_directory, "tensorboard"))
self.threads = threads
self.training_history = ""
self.meta_path = meta_path
self.output_activation = output_activation
self.dense = dense
self.weights = weights
self.target_scale_factor = target_scale_factor
# Set the random seed for the model
random.seed(seed)
# Import meta txt as dataframe
self.meta_dataframe = pd.read_csv(self.meta_path, sep='\t', header=0, index_col=None)
# Find the unique number of cell types in the meta file
self.cell_types = self.meta_dataframe["Cell_Line"].unique().tolist()
self.train_tf = self.meta_dataframe["TF"].unique()[0]
self.nn_model = self.__get_model()
if interpret:
assert (interpret_cell_type is not None, "Set the interpretation cell type argument")
self.interpret_cell_type = interpret_cell_type
self.__get_interpretation_attributes()
def __get_interpretation_attributes(self):
self.interpret_location = get_dir(path.join(self.output_directory, 'interpret'))
self.metacluster_patterns_location = get_dir(path.join(self.interpret_location, 'metacluster_patterns'))
self.meme_query_pattern_location = get_dir(path.join(self.interpret_location, 'meme_query'))
self.interpret_model_file = path.join(self.interpret_location, 'tmp.model')
def __get_model(self):
# Get the neural network model based on the specified model architecture
if self.arch == "DCNN_V2":
return get_dilated_cnn(output_activation=self.output_activation,
target_scale_factor=self.target_scale_factor,
dense_b=self.dense,
weights=self.weights
)
else:
sys.exit("Model Architecture not specified correctly. Please check")
def DataGenerator(
sequence,
meta_table,
roi_pool,
cell_type_list,
rand_ratio,
chroms,
bp_resolution=BP_RESOLUTION,
target_scale_factor=1,
batch_size=BATCH_SIZE,
shuffle_cell_type=False,
rev_comp_train=False
):
"""
Initiate a data generator that will yield a batch of examples for training. This generator will mix samples from a
pool of random regions and a pool of regions of interest based on the user defined ratio. The examples will be
returned as a list of numpy arrays.
_________________
Workflow Overview
1) Create the random regions pool
2) Create the roi generator
3) Create the random regions generator
4) Combine the roi and random regions batches according to the rand_ratio value
:param sequence: The input 2bit DNA sequence
:param meta_table: The run meta table with locations to ATAC and ChIP-seq data
:param roi_pool: The pool of regions to use centered on peaks
:param cell_type_list: The training cell lines to use
:param rand_ratio: The number of random examples to use per batch
:param chroms: The training chromosomes
:param bp_resolution: The resolution of the predictions to use
:param batch_size: The number of examples to use per batch of training
:param shuffle_cell_type: Shuffle the ROI cell type labels if True
:param rev_comp_train: use the reverse complement to train
:return A generator that will yield a batch with number of examples equal to batch size
"""
# Calculate the number of ROIs to use based on the total batch size and proportion of random regions to use
n_roi = round(batch_size * (1. - rand_ratio))
# Calculate number of random regions to use each batch
n_rand = round(batch_size - n_roi)
if n_rand > 0:
# Generate the training random regions pool
train_random_regions_pool = RandomRegionsPool(chroms=build_chrom_sizes_dict(chroms, DEFAULT_CHROM_SIZES),
chrom_pool_size=CHR_POOL_SIZE,
region_length=INPUT_LENGTH,
preferences=False # can be None
)
# Initialize the random regions generator
rand_gen = create_random_batch(sequence=sequence,
meta_table=meta_table,
cell_type_list=cell_type_list,
n_rand=n_rand,
regions_pool=train_random_regions_pool,
bp_resolution=bp_resolution,
target_scale_factor=target_scale_factor,
rev_comp_train=rev_comp_train
)
# Initialize the ROI generator
roi_gen = create_roi_batch(sequence=sequence,
meta_table=meta_table,
roi_pool=roi_pool,
n_roi=n_roi,
cell_type_list=cell_type_list,
bp_resolution=bp_resolution,
target_scale_factor=target_scale_factor,
shuffle_cell_type=shuffle_cell_type,
rev_comp_train=rev_comp_train
)
while True:
# roi_batch.shape = (n_samples, 1024, 6)
if 0. < rand_ratio < 1.:
roi_input_batch, roi_target_batch = next(roi_gen)
rand_input_batch, rand_target_batch = next(rand_gen)
inputs_batch = np.concatenate((roi_input_batch, rand_input_batch), axis=0)
targets_batch = np.concatenate((roi_target_batch, rand_target_batch), axis=0)
elif rand_ratio == 1.:
rand_input_batch, rand_target_batch = next(rand_gen)
inputs_batch = rand_input_batch
targets_batch = rand_target_batch
else:
roi_input_batch, roi_target_batch = next(roi_gen)
inputs_batch = roi_input_batch
targets_batch = roi_target_batch
yield inputs_batch, targets_batch # change to yield
def get_input_matrix(signal_stream,
sequence_stream,
chromosome,
start, # end - start = cols
end,
rows=INPUT_CHANNELS,
cols=INPUT_LENGTH,
bp_order=BP_ORDER,
use_complement=False,
reverse_matrix=False
):
"""
Get a matrix of values from the corresponding genomic position. You can supply whether you want to use the
complement sequence. You can also choose whether you want to reverse the whole matrix.
:param rows: Number of rows == channels
:param cols: Number of cols == region length
:param signal_stream: Signal bigwig stream
:param sequence_stream: 2bit DNA sequence stream
:param bp_order: BP order
:param chromosome: chromosome
:param start: start
:param end: end
:param use_complement: use complement strand for training
:param reverse_matrix: reverse the input matrix
:return: a matrix (rows x cols) of values from the input bigwig files
"""
input_matrix = np.zeros((rows, cols))
for n, bp in enumerate(bp_order):
# Get the sequence from the interval of interest
target_sequence = Seq(sequence_stream.sequence(chromosome, start, end))
if use_complement:
# Get the complement of the sequence
target_sequence = target_sequence.complement()
# Get the one hot encoded sequence
input_matrix[n, :] = get_one_hot_encoded(target_sequence, bp)
signal_array = np.array(signal_stream.values(chromosome, start, end))
input_matrix[4, :] = signal_array
# If reverse_matrix then reverse the matrix. This changes the left to right orientation.
if reverse_matrix:
input_matrix = input_matrix[::-1]
return input_matrix.T
def create_roi_batch(sequence,
meta_table,
roi_pool,
n_roi,
cell_type_list,
bp_resolution=1,
target_scale_factor=1,
shuffle_cell_type=False,
rev_comp_train=False
):
"""
Create a batch of examples from regions of interest. The batch size is defined by n_roi. This code will randomly
generate a batch of examples based on an input meta file that defines the paths to training data. The cell_type_list
is used to randomly select the cell type that the training signal is drawn from.
:param sequence: The input 2bit DNA sequence
:param meta_table: The meta file that contains the paths to signal and peak files
:param roi_pool: The pool of regions that we want to sample from
:param n_roi: The number of regions that go into each batch
:param cell_type_list: A list of unique training cell types
:param bp_resolution: The resolution of the output bins. i.e. 32 bp
:param shuffle_cell_type: Whether to shuffle cell types during training
:param rev_comp_train: use reverse complement for training
:return: np.array(inputs_batch), np.array(targets_batch)
"""
while True:
# Create empty lists that will hold the signal tracks
inputs_batch, targets_batch = [], []
# Get the shape of the ROI pool
roi_size = roi_pool.shape[0]
# Randomly select n regions from the pool
curr_batch_idxs = random.sample(range(roi_size), n_roi)
# Extract the signal for every sample
for row_idx in curr_batch_idxs:
roi_row = roi_pool.iloc[row_idx, :]
# If shuffle_cell_type the cell type will be randomly chosen
if shuffle_cell_type:
cell_line = random.choice(cell_type_list)
else:
cell_line = roi_row['Cell_Line']
# Get the paths for the cell type of interest.
meta_row = meta_table[(meta_table['Cell_Line'] == cell_line)]
meta_row = meta_row.reset_index(drop=True)
# Rename some variables. This just helps clean up code downstream
chrom_name = roi_row['Chr']
start = int(roi_row['Start'])
end = int(roi_row['Stop'])
signal = meta_row.loc[0, 'ATAC_Signal_File']
binding = meta_row.loc[0, 'Binding_File']
# Choose whether to use the reverse complement of the region
if rev_comp_train:
rev_comp = random.choice([True, False])
else:
rev_comp = False
with \
load_2bit(sequence) as sequence_stream, \
load_bigwig(signal) as signal_stream, \
load_bigwig(binding) as binding_stream:
# Get the input matrix of values and one-hot encoded sequence
input_matrix = get_input_matrix(signal_stream=signal_stream,
sequence_stream=sequence_stream,
chromosome=chrom_name,
start=start,
end=end,
use_complement=rev_comp,
reverse_matrix=rev_comp
)
# Append the sample to the inputs batch.
inputs_batch.append(input_matrix)
# Some bigwig files do not have signal for some chromosomes because they do not have peaks
# in those regions
# Our workaround for issue#42 is to provide a zero matrix for that position
try:
# Get the target matrix
target_vector = np.array(binding_stream.values(chrom_name, start, end)).T
except:
# TODO change length of array
target_vector = np.zeros(1024)
# change nan to numbers
target_vector = np.nan_to_num(target_vector, 0.0)
# If reverse compliment, reverse the matrix
if rev_comp:
target_vector = target_vector[::-1]
# get the number of 32 bp bins across the input sequence
n_bins = int(target_vector.shape[0] / bp_resolution)
# Split the data up into 32 x 32 bp bins.
split_targets = np.array(np.split(target_vector, n_bins, axis=0))
# TODO we might want to test what happens if we change the
bin_sums = np.sum(split_targets, axis=1)
bin_vector = np.where(bin_sums > 0.5 * bp_resolution, 1.0, 0.0)
# Append the sample to the target batch
targets_batch.append(bin_vector)
yield np.array(inputs_batch), np.array(targets_batch) # change to yield
def create_random_batch(
sequence,
meta_table,
cell_type_list,
n_rand,
regions_pool,
bp_resolution=1,
target_scale_factor=1,
rev_comp_train=False
):
"""
This function will create a batch of examples that are randomly generated. This batch of data is created the same
as the roi batches.
"""
while True:
inputs_batch, targets_batch = [], []
for idx in range(n_rand):
cell_line = random.choice(cell_type_list) # Randomly select a cell line
chrom_name, seq_start, seq_end = regions_pool.get_region() # returns random region (chrom_name, start, end)
meta_row = meta_table[(meta_table['Cell_Line'] == cell_line)] # get meta row for selected cell line
meta_row = meta_row.reset_index(drop=True)
signal = meta_row.loc[0, 'ATAC_Signal_File']
binding = meta_row.loc[0, 'Binding_File']
with \
load_2bit(sequence) as sequence_stream, \
load_bigwig(signal) as signal_stream, \
load_bigwig(binding) as binding_stream:
if rev_comp_train:
rev_comp = random.choice([True, False])
else:
rev_comp = False
input_matrix = get_input_matrix(signal_stream=signal_stream,
sequence_stream=sequence_stream,
chromosome=chrom_name,
start=seq_start,
end=seq_end,
use_complement=rev_comp,
reverse_matrix=rev_comp
)
inputs_batch.append(input_matrix)
try:
# Get the target matrix
target_vector = np.array(binding_stream.values(chrom_name, start, end)).T
except:
# TODO change length of array
target_vector = np.zeros(1024)
target_vector = np.nan_to_num(target_vector, 0.0)
if rev_comp:
target_vector = target_vector[::-1]
n_bins = int(target_vector.shape[0] / bp_resolution)
split_targets = np.array(np.split(target_vector, n_bins, axis=0))
bin_sums = np.sum(split_targets, axis=1)
bin_vector = np.where(bin_sums > 0.5 * bp_resolution, 1.0, 0.0)
targets_batch.append(bin_vector)
yield np.array(inputs_batch), np.array(targets_batch) # change to yield
class RandomRegionsPool:
"""
Generate a pool of random genomic regions
"""
def __init__(
self,
chroms, # in a form of {"chr1": {"length": 249250621, "region": [0, 249250621]}}, "region" is ignored
chrom_pool_size,
region_length,
preferences=None # bigBed file with ranges to limit random regions selection
):
self.chroms = chroms
self.chrom_pool_size = chrom_pool_size
self.region_length = region_length
self.preferences = preferences
# self.preference_pool = self.__get_preference_pool() # should be run before self.__get_chrom_pool()
self.preference_pool = False
self.chrom_pool = self.__get_chrom_pool()
# self.chrom_pool_size is updated to ensure compatibility between HG19 and HG38
self.chrom_pool_size = min(chrom_pool_size, len(self.chrom_pool))
self.__idx = 0
def get_region(self):
if self.__idx == self.chrom_pool_size:
random.shuffle(self.chrom_pool)
self.__idx = 0
chrom_name, chrom_length = self.chrom_pool[self.__idx]
self.__idx += 1
if self.preference_pool:
preference = random.sample(self.preference_pool[chrom_name], 1)[0]
start = round(random.randint(preference[0], preference[1] - self.region_length))
else:
start = round(random.randint(0, chrom_length - self.region_length))
end = start + self.region_length
return chrom_name, start, end
def __get_preference_pool(self):
preference_pool = {}
if self.preferences is not None:
with load_bigwig(self.preferences) as input_stream:
for chrom_name, chrom_data in self.chroms.items():
for entry in input_stream.entries(chrom_name, 0, chrom_data["length"], withString=False):
if entry[1] - entry[0] < self.region_length:
continue
preference_pool.setdefault(chrom_name, []).append(list(entry[0:2]))
return preference_pool
def __get_chrom_pool(self):
"""
TODO: rewrite to produce exactly the same number of items
as chrom_pool_size regardless of length(chroms) and
chrom_pool_size
"""
sum_lengths = sum(self.chroms.values())
frequencies = {
chrom_name: round(chrom_length / sum_lengths * self.chrom_pool_size)
for chrom_name, chrom_length in self.chroms.items()
}
labels = []
for k, v in frequencies.items():
labels += [(k, self.chroms[k])] * v
random.shuffle(labels)
return labels
class ROIPool(object):
"""
Import genomic regions of interest for training
"""
def __init__(self,
chroms,
roi_file_path,
meta_file,
prefix,
output_directory,
shuffle,
tag
):
"""
:param chroms: Chromosomes to limit the analysis to
:param roi_file_path: User provided ROI file path
:param meta_file: path to meta file
:param prefix: Prefix for saving output file
:param output_directory: Output directory to save files to
:param shuffle: Whether to shuffle the input ROI file
:param tag: Tag to use for writing the file.
"""
self.chroms = chroms
self.roi_file_path = roi_file_path
self.meta_file = meta_file
self.prefix = prefix
self.output_directory = output_directory
self.tag = tag
# If an ROI path is provided import it as the ROI pool
if self.roi_file_path:
self.ROI_pool = self.__import_roi_pool__(shuffle=shuffle)
# Import the data from the meta file.
else:
regions = GenomicRegions(meta_path=self.meta_file,
region_length=1024,
chromosomes=self.chroms,
chromosome_sizes_dictionary=build_chrom_sizes_dict(self.chroms,
DEFAULT_CHROM_SIZES),
blacklist=BLACKLISTED_REGIONS)
regions.write_data(self.prefix,
output_dir=self.output_directory,
set_tag=tag)
self.ROI_pool = regions.combined_pool
def __import_roi_pool__(self, shuffle=False):
"""
Import the ROI file containing the regions of interest. This file is similar to a bed file, but with a header
The roi DF is read in from a TSV file that is formatted similarly as a BED file with a header. The following
columns are required:
Chr | Start | Stop | ROI_Type | Cell_Line
The chroms list is used to filter the ROI df to make sure that only training chromosomes are included.
:param shuffle: Whether to shuffle the dataframe upon import
:return: A pool of regions to use for training or validation
"""
roi_df = pd.read_csv(self.roi_file_path, sep="\t", header=0, index_col=None)
roi_df = roi_df[roi_df['Chr'].isin(self.chroms)]
if shuffle:
roi_df = roi_df.sample(frac=1)
return roi_df
class SeqDataGenerator(tf.keras.utils.Sequence):
# ‘Generates data for Keras’
def __init__(self, batches, generator):
# ‘Initialization’
self.batches = batches
self.generator = generator
def __len__(self):
# ‘Denotes the number of batches per epoch’
return self.batches
def __getitem__(self, index):
# ‘Generate one batch of data’
# Generate indexes of the batch
# Generate data
return next(self.generator)
def model_selection(training_history, output_dir):
"""
This function will take the training history and output the best model based on the dice coefficient value.
"""
# Create a dataframe from the history object
df = pd.DataFrame(training_history.history)
epoch = df['val_dice_coef'].idxmax() + 1
# Get the realpath to the best model
out = pd.DataFrame([glob.glob(output_dir + "/*" + str(epoch) + ".h5")], columns=['Best_Model_Path'])
# Write the location of the best model to a file
out.to_csv(output_dir + "/" + "best_epoch.txt", sep='\t', index=None, header=None)
return epoch
class GenomicRegions(object):
"""
This class will generate a pool of examples based on regions of interest defined by ATAC-seq and ChIP-seq peaks.
"""
def __init__(self,
meta_path,
chromosomes,
chromosome_sizes_dictionary,
blacklist,
region_length
):
"""
When the object is initialized it will import all of the peaks in the meta files and parse them into training
and validation regions of interest. These will be output in the form of TSV formatted file similar to a BED
file.
:param meta_path: Path to the meta file
:param chromosomes: List of chromosomes to use
:param chromosome_sizes_dictionary: A dictionary of chromosome sizes
:param blacklist: The blacklist file of BED regions to exclude
:param region_length: Length of the input regions
"""
self.meta_path = meta_path
self.chromosome_sizes_dictionary = chromosome_sizes_dictionary
self.chromosomes = chromosomes
self.blacklist = blacklist
self.region_length = region_length
# Import meta txt as dataframe
self.meta_dataframe = pd.read_csv(self.meta_path, sep='\t', header=0, index_col=None)
# Select Training Cell lines
self.meta_dataframe = self.meta_dataframe[self.meta_dataframe["Train_Test_Label"] == 'Train']
# Get a dictionary of {Cell Types: Peak Paths}
self.atac_dictionary = pd.Series(self.meta_dataframe.ATAC_Peaks.values,
index=self.meta_dataframe.Cell_Line).to_dict()
self.chip_dictionary = pd.Series(self.meta_dataframe.CHIP_Peaks.values,
index=self.meta_dataframe.Cell_Line).to_dict()
# You must generate the ROI pool before you can get the final shape
self.atac_roi_pool = self.__get_roi_pool(self.atac_dictionary, "ATAC", )
self.chip_roi_pool = self.__get_roi_pool(self.chip_dictionary, "CHIP")
self.combined_pool = pd.concat([self.atac_roi_pool, self.chip_roi_pool])
self.atac_roi_size = self.atac_roi_pool.shape[0]
self.chip_roi_size = self.chip_roi_pool.shape[0]
def __get_roi_pool(self, dictionary, roi_type_tag):
"""
Build a pool of regions of interest from BED files.
:param dictionary: A dictionary of Cell Types and their associated BED files
:param roi_type_tag: Tag used to name the type of ROI being generated. IE Chip or ATAC
:return: A dataframe of BED regions that are formatted for maxATAC training.
"""
bed_list = []
for roi_cell_tag, bed_file in dictionary.items():
bed_list.append(self.__import_bed(bed_file,
ROI_type_tag=roi_type_tag,
ROI_cell_tag=roi_cell_tag))
return pd.concat(bed_list)
def write_data(self, prefix="ROI_pool", output_dir="./ROI", set_tag="training"):
"""
Write the ROI dataframe to a tsv and a bed for for ATAC, CHIP, and combined ROIs
:param set_tag: Tag for training or validation
:param prefix: Prefix for filenames to use
:param output_dir: Directory to output the bed and tsv files
:return: Write BED and TSV versions of the ROI data
"""
output_directory = get_dir(output_dir)
combined_BED_filename = os.path.join(output_directory, prefix + "_" + set_tag + "_ROI.bed.gz")
stats_filename = os.path.join(output_directory, prefix + "_" + set_tag + "_ROI_stats")
total_regions_stats_filename = os.path.join(output_directory,
prefix + "_" + set_tag + "_ROI_totalregions_stats")
self.combined_pool.to_csv(combined_BED_filename, sep="\t", index=False, header=False)
group_ms = self.combined_pool.groupby(["Chr", "Cell_Line", "ROI_Type"], as_index=False).size()
len_ms = self.combined_pool.shape[0]
group_ms.to_csv(stats_filename, sep="\t", index=False)
file = open(total_regions_stats_filename, "a")
file.write('Total number of regions found for ' + set_tag + ' are: {0}\n'.format(len_ms))
file.close()
def get_regions_list(self,
n_roi):
"""
Generate a batch of regions of interest from the input ChIP-seq and ATAC-seq peaks
:param n_roi: Number of regions to generate per batch
:return: A batch of training examples centered on regions of interest
"""
random_roi_pool = self.combined_pool.sample(n=n_roi, replace=True, random_state=1)
return random_roi_pool.to_numpy().tolist()
def __import_bed(self,
bed_file,
ROI_type_tag,
ROI_cell_tag):
"""
Import a BED file and format the regions to be compatible with our maxATAC models
:param bed_file: Input BED file to format
:param ROI_type_tag: Tag to use in the description column
:param ROI_cell_tag: Tag to use in the description column
:return: A dataframe of BED regions compatible with our model
"""
# Import dataframe
df = pd.read_csv(bed_file,
sep="\t",
usecols=[0, 1, 2],
header=None,
names=["Chr", "Start", "Stop"],
low_memory=False)
# Make sure the chromosomes in the ROI file frame are in the target chromosome list
df = df[df["Chr"].isin(self.chromosomes)]
# Find the length of the regions
df["length"] = df["Stop"] - df["Start"]
# Find the center of each peak.
# We might want to use bedtools to window the regions of interest around the peak.
df["center"] = np.floor(df["Start"] + (df["length"] / 2)).apply(int)
# The start of the interval will be the center minus 1/2 the desired region length.
df["Start"] = np.floor(df["center"] - (self.region_length / 2)).apply(int)
# the end of the interval will be the center plus 1/2 the desired region length
df["Stop"] = np.floor(df["center"] + (self.region_length / 2)).apply(int)
# The chromosome end is defined as the chromosome length
df["END"] = df["Chr"].map(self.chromosome_sizes_dictionary)
# Make sure the stop is less than the end
df = df[df["Stop"].apply(int) < df["END"].apply(int)]
# Make sure the start is greater than the chromosome start of 0
df = df[df["Start"].apply(int) > 0]
# Select for the first three columns to clean up
df = df[["Chr", "Start", "Stop"]]
# Import the dataframe as a pybedtools object so we can remove the blacklist
BED_df_bedtool = pybedtools.BedTool.from_dataframe(df)
# Import the blacklist as a pybedtools object
blacklist_bedtool = pybedtools.BedTool(self.blacklist)
# Find the intervals that do not intersect blacklisted regions.
blacklisted_df = BED_df_bedtool.intersect(blacklist_bedtool, v=True)
# Convert the pybedtools object to a pandas dataframe.
df = blacklisted_df.to_dataframe()
# Rename the columns
df.columns = ["Chr", "Start", "Stop"]
df["ROI_Type"] = ROI_type_tag
df["Cell_Line"] = ROI_cell_tag
return df
|
StarcoderdataPython
|
4983917
|
<filename>src/auth.py
"""
Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
import functools
from flask import Blueprint, flash, g, redirect, render_template, request, session, url_for
from werkzeug.security import check_password_hash, generate_password_hash
from src.db import get_db
bp = Blueprint('auth', __name__, url_prefix='/auth')
@bp.route('/register', methods=('GET', 'POST'))
def register():
"""
:return:
"""
g.user = None
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db = get_db()
error = None
if not username:
error = 'Username is required.'
elif not password:
error = 'Password is required.'
elif db.execute(
'SELECT id FROM user WHERE username = ?', (username,)
).fetchone() is not None:
error = 'User {} is already registered.'.format(username)
if error is None:
db.execute(
'INSERT INTO user (username, password) VALUES (?, ?)',
(username, generate_password_hash(password))
)
db.commit()
return redirect(url_for('auth.login'))
flash(error)
return render_template('auth/register.html')
@bp.route('/login', methods=('GET', 'POST'))
def login():
"""
:return:
"""
g.user = None
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db = get_db()
error = None
user = db.execute(
'SELECT * FROM user WHERE username = ?', (username,)
).fetchone()
if user is None:
error = 'Incorrect username.'
elif not check_password_hash(user['password'], password):
error = 'Incorrect password.'
if error is None:
session.clear()
session['user_id'] = user['id']
session['username'] = user['username']
session['password'] = user['password']
return redirect(url_for('portal.home'))
flash(error)
return render_template('auth/login.html')
@bp.before_app_request
def load_logged_in_user():
"""
:return:
"""
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
g.user = get_db().execute(
'SELECT * FROM user WHERE id = ?', (user_id,)
).fetchone()
@bp.route('/logout')
def logout():
"""
:return:
"""
session.clear()
return redirect(url_for('auth.login'))
def login_required(view):
"""
:param view:
:return:
"""
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for('auth.login'))
return view(**kwargs)
return wrapped_view
|
StarcoderdataPython
|
6691793
|
# Download data from tidepool
# See http://support.tidepool.org/article/37-export-your-account-data
import requests
from requests.auth import HTTPBasicAuth
import json, flatten_json, sys, csv
TIDEPOOL_LOGIN_URL="https://api.tidepool.org/auth/login"
TIDEPOOL_API_URL="https://api.tidepool.org/data/{userid}"
TIDEPOOL_TOKEN_HEADER="x-tidepool-session-token"
TIDEPOOL_CONTENT_ENCODING="utf-8" # assumed
def download_tidepool(email, password, fp=sys.stdout, login_url=TIDEPOOL_LOGIN_URL, format='json'):
login_response = requests.post(login_url, auth=HTTPBasicAuth(email, password))
token = login_response.headers[TIDEPOOL_TOKEN_HEADER]
login_content = json.loads(login_response.content)
userid = login_content["userid"]
api_url = TIDEPOOL_API_URL.format(userid=userid)
content_type="application/json"
api_response = requests.get(api_url, headers={ TIDEPOOL_TOKEN_HEADER: token, "Content-Type": content_type})
global ar
ar = api_response
if format == 'json':
# Write JSON content as is
fp.write(api_response.content.decode(TIDEPOOL_CONTENT_ENCODING))
else:
data = json.loads(api_response.content)
if format == 'json_lines':
for line in data:
fp.write(json.dumps(line)+"\n")
elif format in ['csv', 'flat_json', 'flat_json_lines']:
# Flatten JSON objects
flat_data = [flatten_json.flatten(i, separator=".") for i in data]
if format == 'flat_json':
json.dump(flat_data, fp)
elif format == 'flat_json_lines':
for line in flat_data:
fp.write(json.dumps(line)+"\n")
else: # csv
# get all possible keys
allkeys = set([])
for d in flat_data:
allkeys = allkeys.union(set(d.keys()))
allkeys=sorted(allkeys)
csvwriter = csv.writer(fp)
csvwriter.writerow(allkeys)
for f in flat_data:
to_write = [f.get(key) for key in allkeys]
csvwriter.writerow(to_write)
else:
raise Exception("Format not supported: {0}".format(format))
def main():
import argparse, os, getpass
parser = argparse.ArgumentParser(description="Download type 1 diabetes data from Tidepool.org")
parser.add_argument("email")
parser.add_argument("--format", default="csv")
args = parser.parse_args()
password = os.environ.get("TIDEPOOL_PASS")
if not password:
password = getpass.getpass(prompt="Enter your Tidepool.org password: ")
print("Hint: you can also set the TIDEPOOL_PASS environment variable",file=sys.stderr)
download_tidepool(args.email, password, sys.stdout, format=args.format)
if __name__=="__main__":
main()
|
StarcoderdataPython
|
1767377
|
"""Post-process the HTML produced by Sphinx.
Some modifications can be done more easily on the finished HTML.
This module defines a simple pipeline:
1. Read all HTML files
2. Parse them with `BeautifulSoup`
3. Perform a chain of actions on the tree in place
See the `_modify_html()` function for the list of
transformations.
Note: This file is not processed by Webpack; don't use Tailwind utility classes.
They might not show up in the final CSS.
:copyright: Copyright <NAME>.
:license: MIT, see LICENSE.
"""
import os
from typing import Any, Dict, List, Optional
from bs4 import BeautifulSoup, Comment
from sphinx.application import Sphinx
from sphinx.util import logging
from . import __version__
from .icons import ICONS
logger = logging.getLogger(__name__)
def _get_html_files(outdir: str) -> List[str]:
"""Get a list of HTML files."""
html_list = []
for root, _, files in os.walk(outdir):
html_list.extend(
[os.path.join(root, file) for file in files if file.endswith(".html")]
)
return html_list
def _collapsible_nav(tree: BeautifulSoup) -> None:
"""Restructure the navigation links to make them collapsible.
First, all links in the navigation sidebar are wrapped in a ``div``.
This allows them to be 'block' and 'position relative' for the
'expand' icon to be positioned against.
Second, an icon is inserted right before the link.
Adding the icon as separate DOM element allows click events to be
captured separately between the icon and the link.
"""
for link in tree.select(".nav-toc a"):
link["data-action"] = "click->sidebar#close"
# Don't add the nav-link class twice (#166)
if "nav-link" not in link.parent.get("class", []):
# First, all links should be wrapped in a div.nav-link
link.wrap(tree.new_tag("div", attrs={"class": "nav-link"}))
# Next, insert a span.expand before the link, if the #nav-link
# has any sibling elements (a ``ul`` in the navigation menu)
if link.parent.next_sibling:
# create the icon
svg = BeautifulSoup(ICONS["chevron_right"], "html.parser").svg
svg["tabindex"] = "0"
svg["class"] = ["expand"]
svg[
"data-action"
] = "click->sidebar#expand keydown->sidebar#expandKeyPressed"
link.insert_before(svg)
def _expand_current(tree: BeautifulSoup) -> None:
"""Add the ``.expanded`` class to li.current elements."""
for li in tree("li", class_="current"):
if "expanded" not in li.get("class", []):
li["class"] += ["expanded"]
def _remove_span_pre(tree: BeautifulSoup) -> None:
"""Unwrap unnecessary spans.
This gets added by visit_Text(). If I overwrite it there,
it's 20 lines of code for only 1 line of change.
"""
for span in tree("span", class_="pre"):
span.unwrap()
def _remove_empty_toctree(tree: BeautifulSoup) -> None:
"""Remove empty toctree divs.
If you include a `toctree` with the `hidden` option,
an empty `div` is inserted. Remove them.
The empty `div` contains a single `end-of-line` character.
"""
for div in tree("div", class_="toctree-wrapper"):
children = list(div.children)
if len(children) == 1 and not children[0].strip():
div.extract()
def _headerlinks(tree: BeautifulSoup) -> None:
"""Enhance the headerlink experience."""
for link in tree("a", class_="headerlink"):
link["data-controller"] = "clipboard"
link["data-action"] = "click->clipboard#copyHeaderLink"
link["aria-label"] = "Click to copy this link"
del link["title"]
link["class"].extend(["tooltipped", "tooltipped-ne"])
def _external_links(tree: BeautifulSoup) -> None:
"""Add `rel="nofollow noopener"` to external links."""
for link in tree("a", class_="reference external"):
link["rel"] = "nofollow noopener"
def _strip_comments(tree: BeautifulSoup) -> None:
"""Remove HTML comments from documents."""
comments = tree.find_all(string=lambda text: isinstance(text, Comment))
for c in comments:
c.extract()
def _modify_html(html_filename: str, app: Sphinx) -> None:
"""Modify a single HTML document.
1. The HTML document is parsed into a BeautifulSoup tree.
2. The modifications are performed in order and in place.
3. After these modifications, the HTML is written into a file,
overwriting the original file.
"""
with open(html_filename, encoding="utf-8") as html:
tree = BeautifulSoup(html, "html.parser")
_expand_current(tree)
_collapsible_nav(tree)
_remove_span_pre(tree)
_remove_empty_toctree(tree)
_external_links(tree)
if app.config.html_awesome_headerlinks:
_headerlinks(tree)
_strip_comments(tree)
with open(html_filename, "w") as out_file:
out_file.write(str(tree))
def post_process_html(app: Sphinx, exc: Optional[Exception]) -> None:
"""Perform modifications on the HTML after building.
This is an extra function, that gets a list from all HTML
files in the output directory, then runs the ``_modify_html``
function on each of them.
"""
if app.builder is not None and app.builder.name not in ["html", "dirhtml"]:
return
if exc is None:
html_files = _get_html_files(app.outdir)
for doc in html_files:
_modify_html(doc, app)
def setup(app: "Sphinx") -> Dict[str, Any]:
"""Set this up as internal extension."""
app.connect("build-finished", post_process_html)
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
StarcoderdataPython
|
6462839
|
<gh_stars>0
import foreverbull.data.stock_data # noqa: F401
from foreverbull.data.data import Database
__all__ = [Database]
|
StarcoderdataPython
|
6629057
|
<reponame>jczaja/Paddle
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.v2.fluid.core as core
import numpy as np
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.framework import Program, program_guard
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.backward import append_backward
class TestCPULoDTensorArrayOps(unittest.TestCase):
def place(self):
return core.CPUPlace()
def test_split_and_merge_lod_tensor_no_lod(self):
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place())
mask_np = np.array([0, 0, 1, 1, 1, 1, 0, 0, 0, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, self.place())
expect_true_tensor = np.array([2, 3, 4, 5]).astype('int32')
expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1)
expect_true = core.LoDTensor()
expect_true.set(expect_true_tensor, self.place())
expect_false_tensor = np.array([0, 1, 6, 7, 8, 9]).astype('int32')
expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1)
expect_false = core.LoDTensor()
expect_false.set(expect_false_tensor, self.place())
self.main(
tensor=tensor,
mask=mask,
expect_true=expect_true,
expect_false=expect_false,
expect_out=tensor)
def test_split_and_merge_lod_tensor_level_0(self):
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place())
tensor.set_lod([[0, 3, 9, 10]])
mask_np = np.array([0, 1, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, self.place())
expect_true_tensor = np.array([3, 4, 5, 6, 7, 8]).astype('int32')
expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1)
expect_true = core.LoDTensor()
expect_true.set(expect_true_tensor, self.place())
expect_true.set_lod([[0, 6]])
expect_false_tensor = np.array([0, 1, 2, 9]).astype('int32')
expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1)
expect_false_lod = [[0, 3, 4]]
expect_false = core.LoDTensor()
expect_false.set(expect_false_tensor, self.place())
expect_false.set_lod(expect_false_lod)
self.main(
tensor=tensor,
mask=mask,
expect_true=expect_true,
expect_false=expect_false,
expect_out=tensor)
def main(self, tensor, mask, expect_true, expect_false, expect_out,
level=0):
place = self.place()
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[1])
x.persistable = True
y = layers.data(name='y', shape=[1])
y.persistable = True
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out_true.persistable = True
out_false.persistable = True
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
out.persistable = True
exe = Executor(place)
scope = core.Scope()
exe.run(program,
feed={'x': tensor,
'y': mask},
scope=scope,
return_numpy=False)
var_true = scope.find_var(out_true.name).get_tensor()
var_false = scope.find_var(out_false.name).get_tensor()
var_out = scope.find_var(out.name).get_tensor()
self.check_tensor_same(var_true, expect_true)
self.check_tensor_same(var_false, expect_false)
self.check_tensor_same(var_out, expect_out)
def check_tensor_same(self, actual, expect):
self.assertTrue(np.allclose(np.array(actual), np.array(expect)))
self.assertEqual(actual.lod(), expect.lod())
class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
def test_grad(self):
place = core.CPUPlace()
program = Program()
with program_guard(program):
x = layers.data(
name='x', shape=[1], dtype='float32', stop_gradient=False)
y = layers.data(
name='y', shape=[1], dtype='bool', stop_gradient=False)
level = 0
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
mean = layers.mean(x=out)
append_backward(mean)
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place)
tensor.set_lod([[0, 3, 9, 10]])
mask_np = np.array([0, 1, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, place)
exe = Executor(place)
scope = core.Scope()
g_vars = program.global_block().var(x.name + "@GRAD")
g_out = [
item.sum()
for item in map(np.array,
exe.run(program,
feed={'x': tensor,
'y': mask},
fetch_list=[g_vars],
scope=scope,
return_numpy=False))
]
g_out_sum = np.array(g_out).sum()
self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1653228
|
<reponame>YZNIU/Cirq
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cirq
def test_gate_calls_validate():
class ValiGate(cirq.Gate):
def validate_args(self, qubits):
if len(qubits) == 3:
raise ValueError()
g = ValiGate()
q00 = cirq.QubitId()
q01 = cirq.QubitId()
q10 = cirq.QubitId()
_ = g.on(q00)
_ = g.on(q01)
_ = g.on(q00, q10)
with pytest.raises(ValueError):
_ = g.on(q00, q10, q01)
_ = g(q00)
_ = g(q00, q10)
with pytest.raises(ValueError):
_ = g(q10, q01, q00)
def test_named_qubit_str():
q = cirq.NamedQubit('a')
assert q.name == 'a'
assert str(q) == 'a'
# Python 2 gives a different repr due to unicode strings being prefixed with u.
@cirq.testing.only_test_in_python3
def test_named_qubit_repr():
q = cirq.NamedQubit('a')
assert repr(q) == "NamedQubit('a')"
def test_operation_init():
q = cirq.QubitId()
g = cirq.Gate()
v = cirq.Operation(g, (q,))
assert v.gate == g
assert v.qubits == (q,)
def test_operation_eq():
g1 = cirq.Gate()
g2 = cirq.Gate()
r1 = [cirq.QubitId()]
r2 = [cirq.QubitId()]
r12 = r1 + r2
r21 = r2 + r1
eq = cirq.testing.EqualsTester()
eq.make_equality_pair(lambda: cirq.Operation(g1, r1))
eq.make_equality_pair(lambda: cirq.Operation(g2, r1))
eq.make_equality_pair(lambda: cirq.Operation(g1, r2))
eq.make_equality_pair(lambda: cirq.Operation(g1, r12))
eq.make_equality_pair(lambda: cirq.Operation(g1, r21))
eq.add_equality_group(cirq.Operation(cirq.CZ, r21),
cirq.Operation(cirq.CZ, r12))
# Interchangeable subsets.
class PairGate(cirq.Gate, cirq.InterchangeableQubitsGate):
def qubit_index_to_equivalence_group_key(self, index: int):
return index // 2
p = PairGate()
a0, a1, b0, b1, c0 = cirq.LineQubit.range(5)
eq.add_equality_group(p(a0, a1, b0, b1), p(a1, a0, b1, b0))
eq.add_equality_group(p(b0, b1, a0, a1))
eq.add_equality_group(p(a0, a1, b0, b1, c0), p(a1, a0, b1, b0, c0))
eq.add_equality_group(p(a0, b0, a1, b1, c0))
eq.add_equality_group(p(a0, c0, b0, b1, a1))
eq.add_equality_group(p(b0, a1, a0, b1, c0))
def test_operation_pow():
Y = cirq.Y
qubit = cirq.QubitId()
assert (Y ** 0.5)(qubit) == Y(qubit) ** 0.5
|
StarcoderdataPython
|
1949287
|
<reponame>csaid/bokeh
from os.path import abspath
import webbrowser
from . import settings
def get_browser_controller(browser=None):
browser = settings.browser(browser)
if browser is not None:
if browser == 'none':
class DummyWebBrowser(object):
def open(self, url, new=0, autoraise=True):
pass
controller = DummyWebBrowser()
else:
controller = webbrowser.get(browser)
else:
controller = webbrowser
return controller
def view(filename, browser=None, new=False, autoraise=True):
""" Opens a browser to view the file pointed to by this sessions.
**new** can be None, "tab", or "window" to view the file in the
existing page, a new tab, or a new windows. **autoraise** causes
the browser to be brought to the foreground; this may happen
automatically on some platforms regardless of the setting of this
variable.
"""
new_map = { False: 0, "window": 1, "tab": 2 }
file_url = "file://" + abspath(filename)
try:
controller = get_browser_controller(browser)
controller.open(file_url, new=new_map[new], autoraise=autoraise)
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
|
StarcoderdataPython
|
6559142
|
import asyncio
class It:
def __iter__(self):
print("It.__iter__")
yield self # !!! this __iter__ itself is a generator
return None
def f():
it = It()
print("before yield from iterator obj")
yield from it
fut = asyncio.Future()
print("before")
res = yield from fut
print("after")
g = f()
res = g.send(None)
print(f"{res = }")
print("---")
res = g.send(None)
print(f"{res = }")
|
StarcoderdataPython
|
6566190
|
from math import gcd
def gcdi(a, b):
return gcd(a, b)
def lcmu(a, b):
return abs(a*b//gcd(a, b))
def som(a, b):
return a+b
def maxi(a, b):
return max(a, b)
def mini(a, b):
return min(a, b)
def oper_array(fct, arr, init):
res=[fct(arr[0], init)]
for i in range(1, len(arr)):
res.append(fct(arr[i], res[-1]))
return res
|
StarcoderdataPython
|
3466520
|
<reponame>EladGabay/pulumi-oci
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['IdpGroupMappingArgs', 'IdpGroupMapping']
@pulumi.input_type
class IdpGroupMappingArgs:
def __init__(__self__, *,
group_id: pulumi.Input[str],
identity_provider_id: pulumi.Input[str],
idp_group_name: pulumi.Input[str]):
"""
The set of arguments for constructing a IdpGroupMapping resource.
:param pulumi.Input[str] group_id: (Updatable) The OCID of the IAM Service [group](https://docs.cloud.oracle.com/iaas/api/#/en/identity/20160918/Group/) you want to map to the IdP group.
:param pulumi.Input[str] identity_provider_id: The OCID of the identity provider.
:param pulumi.Input[str] idp_group_name: (Updatable) The name of the IdP group you want to map.
"""
pulumi.set(__self__, "group_id", group_id)
pulumi.set(__self__, "identity_provider_id", identity_provider_id)
pulumi.set(__self__, "idp_group_name", idp_group_name)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Input[str]:
"""
(Updatable) The OCID of the IAM Service [group](https://docs.cloud.oracle.com/iaas/api/#/en/identity/20160918/Group/) you want to map to the IdP group.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="identityProviderId")
def identity_provider_id(self) -> pulumi.Input[str]:
"""
The OCID of the identity provider.
"""
return pulumi.get(self, "identity_provider_id")
@identity_provider_id.setter
def identity_provider_id(self, value: pulumi.Input[str]):
pulumi.set(self, "identity_provider_id", value)
@property
@pulumi.getter(name="idpGroupName")
def idp_group_name(self) -> pulumi.Input[str]:
"""
(Updatable) The name of the IdP group you want to map.
"""
return pulumi.get(self, "idp_group_name")
@idp_group_name.setter
def idp_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "idp_group_name", value)
@pulumi.input_type
class _IdpGroupMappingState:
def __init__(__self__, *,
compartment_id: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
identity_provider_id: Optional[pulumi.Input[str]] = None,
idp_group_name: Optional[pulumi.Input[str]] = None,
inactive_state: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering IdpGroupMapping resources.
:param pulumi.Input[str] compartment_id: The OCID of the tenancy containing the `IdentityProvider`.
:param pulumi.Input[str] group_id: (Updatable) The OCID of the IAM Service [group](https://docs.cloud.oracle.com/iaas/api/#/en/identity/20160918/Group/) you want to map to the IdP group.
:param pulumi.Input[str] identity_provider_id: The OCID of the identity provider.
:param pulumi.Input[str] idp_group_name: (Updatable) The name of the IdP group you want to map.
:param pulumi.Input[str] inactive_state: The detailed status of INACTIVE lifecycleState.
:param pulumi.Input[str] state: The mapping's current state.
:param pulumi.Input[str] time_created: Date and time the mapping was created, in the format defined by RFC3339. Example: `2016-08-25T21:10:29.600Z`
"""
if compartment_id is not None:
pulumi.set(__self__, "compartment_id", compartment_id)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if identity_provider_id is not None:
pulumi.set(__self__, "identity_provider_id", identity_provider_id)
if idp_group_name is not None:
pulumi.set(__self__, "idp_group_name", idp_group_name)
if inactive_state is not None:
pulumi.set(__self__, "inactive_state", inactive_state)
if state is not None:
pulumi.set(__self__, "state", state)
if time_created is not None:
pulumi.set(__self__, "time_created", time_created)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the tenancy containing the `IdentityProvider`.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of the IAM Service [group](https://docs.cloud.oracle.com/iaas/api/#/en/identity/20160918/Group/) you want to map to the IdP group.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="identityProviderId")
def identity_provider_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the identity provider.
"""
return pulumi.get(self, "identity_provider_id")
@identity_provider_id.setter
def identity_provider_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity_provider_id", value)
@property
@pulumi.getter(name="idpGroupName")
def idp_group_name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The name of the IdP group you want to map.
"""
return pulumi.get(self, "idp_group_name")
@idp_group_name.setter
def idp_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "idp_group_name", value)
@property
@pulumi.getter(name="inactiveState")
def inactive_state(self) -> Optional[pulumi.Input[str]]:
"""
The detailed status of INACTIVE lifecycleState.
"""
return pulumi.get(self, "inactive_state")
@inactive_state.setter
def inactive_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "inactive_state", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The mapping's current state.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
Date and time the mapping was created, in the format defined by RFC3339. Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
class IdpGroupMapping(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
group_id: Optional[pulumi.Input[str]] = None,
identity_provider_id: Optional[pulumi.Input[str]] = None,
idp_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
This resource provides the Idp Group Mapping resource in Oracle Cloud Infrastructure Identity service.
Creates a single mapping between an IdP group and an IAM Service
[group](https://docs.cloud.oracle.com/iaas/api/#/en/identity/20160918/Group/).
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_idp_group_mapping = oci.identity.IdpGroupMapping("testIdpGroupMapping",
group_id=oci_identity_group["test_group"]["id"],
identity_provider_id=oci_identity_identity_provider["test_identity_provider"]["id"],
idp_group_name=var["idp_group_mapping_idp_group_name"])
```
## Import
IdpGroupMappings can be imported using the `id`, e.g.
```sh
$ pulumi import oci:identity/idpGroupMapping:IdpGroupMapping test_idp_group_mapping "identityProviders/{identityProviderId}/groupMappings/{mappingId}"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] group_id: (Updatable) The OCID of the IAM Service [group](https://docs.cloud.oracle.com/iaas/api/#/en/identity/20160918/Group/) you want to map to the IdP group.
:param pulumi.Input[str] identity_provider_id: The OCID of the identity provider.
:param pulumi.Input[str] idp_group_name: (Updatable) The name of the IdP group you want to map.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IdpGroupMappingArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource provides the Idp Group Mapping resource in Oracle Cloud Infrastructure Identity service.
Creates a single mapping between an IdP group and an IAM Service
[group](https://docs.cloud.oracle.com/iaas/api/#/en/identity/20160918/Group/).
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_idp_group_mapping = oci.identity.IdpGroupMapping("testIdpGroupMapping",
group_id=oci_identity_group["test_group"]["id"],
identity_provider_id=oci_identity_identity_provider["test_identity_provider"]["id"],
idp_group_name=var["idp_group_mapping_idp_group_name"])
```
## Import
IdpGroupMappings can be imported using the `id`, e.g.
```sh
$ pulumi import oci:identity/idpGroupMapping:IdpGroupMapping test_idp_group_mapping "identityProviders/{identityProviderId}/groupMappings/{mappingId}"
```
:param str resource_name: The name of the resource.
:param IdpGroupMappingArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IdpGroupMappingArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
group_id: Optional[pulumi.Input[str]] = None,
identity_provider_id: Optional[pulumi.Input[str]] = None,
idp_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IdpGroupMappingArgs.__new__(IdpGroupMappingArgs)
if group_id is None and not opts.urn:
raise TypeError("Missing required property 'group_id'")
__props__.__dict__["group_id"] = group_id
if identity_provider_id is None and not opts.urn:
raise TypeError("Missing required property 'identity_provider_id'")
__props__.__dict__["identity_provider_id"] = identity_provider_id
if idp_group_name is None and not opts.urn:
raise TypeError("Missing required property 'idp_group_name'")
__props__.__dict__["idp_group_name"] = idp_group_name
__props__.__dict__["compartment_id"] = None
__props__.__dict__["inactive_state"] = None
__props__.__dict__["state"] = None
__props__.__dict__["time_created"] = None
super(IdpGroupMapping, __self__).__init__(
'oci:identity/idpGroupMapping:IdpGroupMapping',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
identity_provider_id: Optional[pulumi.Input[str]] = None,
idp_group_name: Optional[pulumi.Input[str]] = None,
inactive_state: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None) -> 'IdpGroupMapping':
"""
Get an existing IdpGroupMapping resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compartment_id: The OCID of the tenancy containing the `IdentityProvider`.
:param pulumi.Input[str] group_id: (Updatable) The OCID of the IAM Service [group](https://docs.cloud.oracle.com/iaas/api/#/en/identity/20160918/Group/) you want to map to the IdP group.
:param pulumi.Input[str] identity_provider_id: The OCID of the identity provider.
:param pulumi.Input[str] idp_group_name: (Updatable) The name of the IdP group you want to map.
:param pulumi.Input[str] inactive_state: The detailed status of INACTIVE lifecycleState.
:param pulumi.Input[str] state: The mapping's current state.
:param pulumi.Input[str] time_created: Date and time the mapping was created, in the format defined by RFC3339. Example: `2016-08-25T21:10:29.600Z`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IdpGroupMappingState.__new__(_IdpGroupMappingState)
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["group_id"] = group_id
__props__.__dict__["identity_provider_id"] = identity_provider_id
__props__.__dict__["idp_group_name"] = idp_group_name
__props__.__dict__["inactive_state"] = inactive_state
__props__.__dict__["state"] = state
__props__.__dict__["time_created"] = time_created
return IdpGroupMapping(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Output[str]:
"""
The OCID of the tenancy containing the `IdentityProvider`.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[str]:
"""
(Updatable) The OCID of the IAM Service [group](https://docs.cloud.oracle.com/iaas/api/#/en/identity/20160918/Group/) you want to map to the IdP group.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter(name="identityProviderId")
def identity_provider_id(self) -> pulumi.Output[str]:
"""
The OCID of the identity provider.
"""
return pulumi.get(self, "identity_provider_id")
@property
@pulumi.getter(name="idpGroupName")
def idp_group_name(self) -> pulumi.Output[str]:
"""
(Updatable) The name of the IdP group you want to map.
"""
return pulumi.get(self, "idp_group_name")
@property
@pulumi.getter(name="inactiveState")
def inactive_state(self) -> pulumi.Output[str]:
"""
The detailed status of INACTIVE lifecycleState.
"""
return pulumi.get(self, "inactive_state")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The mapping's current state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> pulumi.Output[str]:
"""
Date and time the mapping was created, in the format defined by RFC3339. Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
|
StarcoderdataPython
|
4968823
|
<gh_stars>10-100
from typing import Tuple, List
from nltk import wordpunct_tokenize
def tokenize_and_clean_text(text: str) -> str:
return ' '.join([token.lower() for token in wordpunct_tokenize(text)
if token.isalpha() and token.lower()])
def clean_formatting(text: List[str]) -> str:
return tokenize_and_clean_text(' '.join(text))
def preprocess_data(extracted_data: List[Tuple[str, str]]) -> List[str]:
"""
Transform data to get compliant with fasttext expected
format: __label__[label] [text]
"""
return [f'__label__{data[0]} {clean_formatting(data[1])}' for data in extracted_data]
|
StarcoderdataPython
|
4852091
|
#from flask import Flask, request
from flask.ext.restful import Api
from penguicontrax import app
#api modules
import submissions
import tags
import tracks
import users
import presenters
#import json for date encoder
import json
#override for json encoder to handle datetime objects
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
api = Api(app)
#define routes
@api.representation('application/json')
def json_date(data, code, headers=None):
#If it's a string, just return it as-is
resp = app.make_response(data if type(data) is str else json.dumps(data, cls=DateEncoder))
resp.headers.extend(headers or {})
resp.status_code = code
return resp
#submissions
##api.add_resource(submissions.SubmissionAPI,
## '/api/submission/<string:submission_id>/rsvp')
api.add_resource(submissions.SubmissionAPI,
'/api/submission/<string:submission_id>',
'/api/submission/<string:submission_id>/<string:noun>')
api.add_resource(submissions.SubmissionsAPI,
'/api/submissions')
#tags
api.add_resource(tags.TagsAPI,
'/api/tags')
api.add_resource(tags.UserTagsAPI,
'/api/user-tags')
api.add_resource(tags.UserTagAPI,
'/api/user-tag/<string:name>')
#tracks
api.add_resource(tracks.TracksAPI,
'/api/tracks')
#users
api.add_resource(users.UsersAPI,
'/api/users')
api.add_resource(users.UserAPI,
'/api/user/<int:id>')
api.add_resource(users.UserSubmissionsAPI,
'/api/user/<int:id>/submissions')
api.add_resource(users.UserPresentationsAPI,
'/api/user/<int:id>/presentations')
#persons
api.add_resource(presenters.PresentersAPI,
'/api/presenters')
|
StarcoderdataPython
|
8172794
|
# -*- coding: utf-8 -*-
"""
flaskext.session
~~~~~~~~~~~~~~~~
Adds server session support to your application.
:copyright: (c) 2014 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.3.0'
import os
from .sessions import NullSessionInterface, RedisSessionInterface, \
MemcachedSessionInterface, FileSystemSessionInterface, \
MongoDBSessionInterface, SqlAlchemySessionInterface, \
PeeweeSessionInterface
class Session(object):
"""This class is used to add Server-side Session to one or more Flask
applications.
There are two usage modes. One is initialize the instance with a very
specific Flask application::
app = Flask(__name__)
Session(app)
The second possibility is to create the object once and configure the
application later::
sess = Session()
def create_app():
app = Flask(__name__)
sess.init_app(app)
return app
By default Flask-Session will use :class:`NullSessionInterface`, you
really should configurate your app to use a different SessionInterface.
.. note::
You can not use ``Session`` instance directly, what ``Session`` does
is just change the :attr:`~flask.Flask.session_interface` attribute on
your Flask applications.
"""
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
"""This is used to set up session for your app object.
:param app: the Flask app object with proper configuration.
"""
app.session_interface = self._get_interface(app)
def _get_interface(self, app):
config = app.config.copy()
config.setdefault('SESSION_TYPE', 'null')
config.setdefault('SESSION_PERMANENT', True)
config.setdefault('SESSION_USE_SIGNER', False)
config.setdefault('SESSION_KEY_PREFIX', 'session:')
config.setdefault('SESSION_REDIS', None)
config.setdefault('SESSION_MEMCACHED', None)
config.setdefault('SESSION_FILE_DIR',
os.path.join(os.getcwd(), 'flask_session'))
config.setdefault('SESSION_FILE_THRESHOLD', 500)
config.setdefault('SESSION_FILE_MODE', 384)
config.setdefault('SESSION_MONGODB', None)
config.setdefault('SESSION_MONGODB_DB', 'flask_session')
config.setdefault('SESSION_MONGODB_COLLECT', 'sessions')
config.setdefault('SESSION_SQLALCHEMY', None)
config.setdefault('SESSION_SQLALCHEMY_TABLE', 'sessions')
config.setdefault('SESSION_PEEWEE_TABLE', 'sessions')
if config['SESSION_TYPE'] == 'redis':
session_interface = RedisSessionInterface(
config['SESSION_REDIS'], config['SESSION_KEY_PREFIX'],
config['SESSION_USE_SIGNER'], config['SESSION_PERMANENT'])
elif config['SESSION_TYPE'] == 'memcached':
session_interface = MemcachedSessionInterface(
config['SESSION_MEMCACHED'], config['SESSION_KEY_PREFIX'],
config['SESSION_USE_SIGNER'], config['SESSION_PERMANENT'])
elif config['SESSION_TYPE'] == 'filesystem':
session_interface = FileSystemSessionInterface(
config['SESSION_FILE_DIR'], config['SESSION_FILE_THRESHOLD'],
config['SESSION_FILE_MODE'], config['SESSION_KEY_PREFIX'],
config['SESSION_USE_SIGNER'], config['SESSION_PERMANENT'])
elif config['SESSION_TYPE'] == 'mongodb':
session_interface = MongoDBSessionInterface(
config['SESSION_MONGODB'], config['SESSION_MONGODB_DB'],
config['SESSION_MONGODB_COLLECT'],
config['SESSION_KEY_PREFIX'], config['SESSION_USE_SIGNER'],
config['SESSION_PERMANENT'])
elif config['SESSION_TYPE'] == 'sqlalchemy':
session_interface = SqlAlchemySessionInterface(
app, config['SESSION_SQLALCHEMY'],
config['SESSION_SQLALCHEMY_TABLE'],
config['SESSION_KEY_PREFIX'], config['SESSION_USE_SIGNER'],
config['SESSION_PERMANENT'])
elif config['SESSION_TYPE'] == 'peewee':
session_interface = PeeweeSessionInterface(
config.get('SESSION_DB'),
config['SESSION_PEEWEE_CONFIG'],
config['SESSION_DB_CLASS'],
config['SESSION_PEEWEE_TABLE'],
config['SESSION_KEY_PREFIX'], config['SESSION_USE_SIGNER'],
config['SESSION_PERMANENT'])
else:
session_interface = NullSessionInterface()
return session_interface
|
StarcoderdataPython
|
4880876
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import sys
import types
from contextlib import contextmanager
import six
try:
import numpy
except ImportError:
numpy = None
ignore_vars = [
"__ipy_scope__", # Added by nbtutor to the calling frame globals
"__name__",
"__builtin__",
"__builtins__",
"__module__",
"__qualname__",
"__doc__",
"__dict__",
"__package__",
"__weakref__",
]
float_types = [
float,
]
primitive_types = [
int,
float,
bool,
type(None),
complex,
]
sequence_types = [
set,
tuple,
list,
]
array_types = [
]
key_value_types = [
dict,
]
primitive_types.extend(list(six.string_types))
if numpy is not None:
new_types = []
np_types = list(set(numpy.typeDict.values()))
np_type_names = [t.__name__ for t in np_types]
for _type in primitive_types:
for i, name in enumerate(np_type_names):
if _type.__name__ in name or isinstance(np_types[i], (_type, )):
new_types.append(np_types[i])
for i, name in enumerate(np_type_names):
if 'float' in name or isinstance(np_types[i], (float, )):
float_types.append(np_types[i])
primitive_types.extend(new_types)
array_types.append(numpy.ndarray)
float_types = tuple(float_types)
primitive_types = tuple(primitive_types)
sequence_types = tuple(sequence_types)
array_types = tuple(array_types)
key_value_types = tuple(key_value_types)
def filter_dict(d, exclude):
"""Return a new dict with specified keys excluded from the origional dict
Args:
d (dict): origional dict
exclude (list): The keys that are excluded
"""
ret = {}
for key, value in d.items():
if key not in exclude:
ret.update({key: value})
return ret
@contextmanager
def redirect_stdout(new_stdout):
"""Redirect the stdout
Args:
new_stdout (io.StringIO): New stdout to use instead
"""
old_stdout, sys.stdout = sys.stdout, new_stdout
try:
yield None
finally:
sys.stdout = old_stdout
def format(obj, options):
"""Return a string representation of the Python object
Args:
obj: The Python object
options: Format options
"""
formatters = {
float_types: lambda x: '{:.{}g}'.format(x, options.digits),
}
for _types, fmtr in formatters.items():
if isinstance(obj, _types):
return fmtr(obj)
try:
if six.PY2 and isinstance(obj, six.string_types):
return str(obj.encode('utf-8'))
return str(obj)
except:
return 'OBJECT'
def get_type_info(obj):
"""Get type information for a Python object
Args:
obj: The Python object
Returns:
tuple: (object type "catagory", object type name)
"""
if isinstance(obj, primitive_types):
return ('primitive', type(obj).__name__)
if isinstance(obj, sequence_types):
return ('sequence', type(obj).__name__)
if isinstance(obj, array_types):
return ('array', type(obj).__name__)
if isinstance(obj, key_value_types):
return ('key-value', type(obj).__name__)
if isinstance(obj, types.ModuleType):
return ('module', type(obj).__name__)
if isinstance(obj, (types.FunctionType, types.MethodType)):
return ('function', type(obj).__name__)
if isinstance(obj, type):
if hasattr(obj, '__dict__'):
return ('class', obj.__name__)
if isinstance(type(obj), type):
if hasattr(obj, '__dict__'):
cls_name = type(obj).__name__
if cls_name == 'classobj':
cls_name = obj.__name__
return ('class', '{}'.format(cls_name))
if cls_name == 'instance':
cls_name = obj.__class__.__name__
return ('instance', '{} instance'.format(cls_name))
return ('unknown', type(obj).__name__)
|
StarcoderdataPython
|
11358684
|
<gh_stars>0
import json
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def process_text(request):
print("Received request")
text = json.loads(request.body)["text"]
return JsonResponse({"response": "You said: %s" % text})
|
StarcoderdataPython
|
8152498
|
# Author: <NAME>
# E-mail: <EMAIL>
# Author: <NAME>
# E-mail: <EMAIL>
# Author: <NAME>
# E-mail: <EMAIL>
# imports
try:
from .packages.preprocessing import pad_batches, process_raw_data
from .packages.preprocessing import load_embedding, TSVDataset
from .packages.preprocessing import RNNDataset, collate_fn
from .packages.ann_models import MLPModel, RNNModel
except:
from packages.preprocessing import pad_batches, process_raw_data
from packages.preprocessing import load_embedding, TSVDataset
from packages.preprocessing import RNNDataset, collate_fn
from packages.ann_models import MLPModel, RNNModel
from sklearn.metrics import accuracy_score, f1_score
from torch._C import device
from torch.utils.data import DataLoader
import pandas as pd
import numpy as np
import torch
import time
import pickle
# configure optimal parameters
## data specific
batch_size = 20 # TODO check if this is desired batch size with fab and eiv
device_ = 'cpu'
pos_tagged = False
random_state = 21 # TODO change 3 times
store = True
train_data_url = 'data/stanford_sentiment_binary.tsv.gz'
train_prop = 0.75 # use full train set on optimal model
verbose = False
vocab_path = '/cluster/shared/nlpl/data/vectors/latest/22.zip' # other good ones were 40, 82, ...
# vocab_path = 'data/22.zip' # other good ones were 40, 82, ...
## model specific
model_params = {
'n_hl': 3, # rnn param
'dropout': 0.2, # not needed, bc singular output. left here for ref
'epochs': 50, # anywhere between 30 and 60 seemed reasonable
'units': 50, # between 25 and 50, w/ 50 giving optimal score
'lr': 0.01, # dependent on momentum
'momentum': 0.9, # dependent on learning rate
'device': "cpu", # take as str not torch.device -> easier json print
'loss_funct': "cross-entropy", # most stable throughout experiments
'random_state': 1, # to test different splits
'verbose': verbose,
'rnn_type': 'gru', # rnn, lstm, gru
'bidirectional': True, # checks in first study
'freeze': True, # only difference in training time, True -> faster
'lr_scheduler': True, # major help for score improvement
'factor': 0.01, # not too much affect, but still optimal
'patience': 2, # same as above
'pool_type': 'first', # first assuming bidirectional, last otherwise
}
def run(random_state=random_state):
# load embeding
embedder = load_embedding(vocab_path)
embedder.add('<unk>', weights=torch.rand(embedder.vector_size))
embedder.add('<pad>', weights=torch.zeros(embedder.vector_size))
pad_token = embedder.vocab['<pad>'].index
# load data set/loaders
df_train, df_test = process_raw_data(
data_url=train_data_url,
train_prop=train_prop,
verbose=verbose,
pos_tagged=pos_tagged,
random_state=random_state
)
train_data = RNNDataset(
embedder=embedder,
df=df_train,
device=device_,
random_state=random_state,
label_vocab=None,
verbose=verbose
)
test_data = RNNDataset(
embedder=embedder,
df=df_test,
device=device_,
random_state=random_state,
label_vocab=train_data.label_vocab,
verbose=verbose
)
del df_test, df_train
train_loader = DataLoader(
train_data,
batch_size=batch_size,
shuffle=True,
collate_fn=lambda x: collate_fn(x, pad_token, device=device_)
)
test_loader = DataLoader(
test_data,
batch_size=len(test_data),
collate_fn=lambda x: collate_fn(x, pad_token, device=device_)
)
# build/fit the model
model = RNNModel(
emb=embedder,
num_features=embedder.vector_size,
**model_params
)
model.fit(
loader=train_loader,
verbose=verbose,
test=None # no need to evaluate along the way
)
# test the model
y_test, y_pred = model.predict_classes(test_loader)
print(f'Random state: {random_state}')
print(f'F1 Score: {f1_score(y_test, y_pred)}')
print(f'Accuracy: {accuracy_score(y_test, y_pred)}')
return model, train_data
if __name__=='__main__':
# model_1337 = run(1337)
# model_42 = run(42)
# model_13032021 = run(13032021)
# # will need to store the model
# if store:
# with open('model_1337.pkl', 'wb+') as f:
# pickle.dump(model_1337, f)
# with open('model_42.pkl', 'wb+') as f:
# pickle.dump(model_42, f)
# with open('model_13032021.pkl', 'wb+') as f:
# pickle.dump(model_13032021, f)
train_prop = 0.999 # use all the data as training data for optimal model
model, data = run()
torch.save(model.state_dict(), "model.pt")
with open('train_data.pkl', 'wb+') as f:
pickle.dump(data, f)
|
StarcoderdataPython
|
3370847
|
<filename>problemas-resolvidos/neps-academy/programacao-basica-competicoes-c++/python/Exercicio-28-Titulo.py<gh_stars>0
frase = input().split()
def maiuscula(s):
saida = ''
for i in s:
temp = i.lower()
saida = saida + temp.capitalize() + " "
return (saida)
print(maiuscula(frase))
|
StarcoderdataPython
|
4893382
|
#!/usr/bin/env python
"""
Base class / functionality for an (illumination) amplitude control device.
Hazen 04/17
"""
from PyQt5 import QtCore
import storm_control.hal4000.halLib.halMessage as halMessage
import storm_control.sc_hardware.baseClasses.hardwareModule as hardwareModule
class AmplitudeWorker(hardwareModule.HardwareWorker):
pass
class AmplitudeMixin(object):
"""
These are the methods that illumination.illumination will
expect an amplitude functionality to have.
Note that illumination uses sliders to control the amplitude
minimum and maximum must be integers.
"""
def __init__(self, display_normalized = True, minimum = 0, maximum = 10, used_during_filming = True, **kwds):
super().__init__(**kwds)
assert isinstance(display_normalized, bool)
assert isinstance(minimum, int)
assert isinstance(maximum, int)
assert isinstance(used_during_filming, bool)
self.display_normalized = display_normalized
self.maximum = maximum
self.minimum = minimum
self.used_during_filming = used_during_filming
def getDisplayNormalized(self):
return self.display_normalized
def getMaximum(self):
return self.maximum
def getMinimum(self):
return self.minimum
def getUsedDuringFilming(self):
return self.used_during_filming
def onOff(self, power, state):
"""
This is usually called when the illumination channel check box is toggled. Devices
like lasers and AOTFs are expected to go to 'power' and then not respond to further
power changes if state is False. Others devices like filter wheels will likely
just ignore this as they are usually not used to also turn the channel on/off.
"""
assert False
def output(self, power):
"""
This is usually called when the illumination channel slider is moved. Some
channels will ignore this when they are turned off, others like filter wheels
might still move.
"""
assert False
class AmplitudeFunctionality(hardwareModule.HardwareFunctionality, AmplitudeMixin):
"""
Base class for an amplitude functionality.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
class AmplitudeFunctionalityBuffered(hardwareModule.BufferedFunctionality, AmplitudeMixin):
"""
Base class for a buffered amplitude functionality.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
class AmplitudeModule(hardwareModule.HardwareModule):
"""
These modules will always provide a single functionality with the
name 'module_name.amplitude_modulation'. This functionality is
primarily used by illumination.illumination.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.device_mutex = QtCore.QMutex()
def getFunctionality(self, message):
pass
def processMessage(self, message):
if message.isType("get functionality"):
self.getFunctionality(message)
elif message.isType("start film"):
self.startFilm(message)
elif message.isType("stop film"):
self.stopFilm(message)
def startFilm(self, message):
pass
def stopFilm(self, message):
pass
|
StarcoderdataPython
|
1882383
|
import subprocess
def get_git_repo_head_branch(repo):
"""
A helper method to get the reference of the HEAD branch of a git remote repo.
https://stackoverflow.com/a/41925348
"""
out = subprocess.check_output(
["git", "ls-remote", "--symref", repo, "HEAD"]
).decode()
head_branch = out.split()[1]
return head_branch
|
StarcoderdataPython
|
1836383
|
<gh_stars>10-100
from sacrerouge.datasets.nytimes.subcommand import NYTimesSubcommand
|
StarcoderdataPython
|
3329400
|
<filename>src/run_locally.py<gh_stars>1-10
"""
Start a simulation
"""
import compile_erl
import clean_state
from lib_run import *
def main():
compile_erl.compile_all()
clean_state.clear()
ip_addr = get_ip_addr()
args = sys.argv
n_cars = string_to_int(args[1]) if len(args) == 2 else 1
# Run our system
cloud_name = run_cloud(ip_addr)
run_user(cloud_name, ip_addr)
run_cars(n_cars,cloud_name, ip_addr)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
8047392
|
<reponame>tantalum7/manf
class Index(object):
def __init__(self, app):
# Store ref to top level app and database
self._app = app
self._db = app.database
def fetch(self, id=None, EPN=None):
pass
def search(self, query):
pass
def list_all(self, filter=None, projection=None):
pass
|
StarcoderdataPython
|
1909445
|
<reponame>jshulkinVSA/VSA-Challenges<gh_stars>0
# Name:
# Date:
# proj07: Word Game
import random
import string
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k':
5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u':
1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
WORDLIST_FILENAME = "words.txt"
def load_words():
print "Loading word list from file..."
inFile = open(WORDLIST_FILENAME, 'r', 0)
wordlist = []
for line in inFile:
wordlist.append(line.strip().lower())
print " ", len(wordlist), "words loaded."
return wordlist
def get_frequency_dict(sequence):
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
def get_word_score(word, n):
word_score = 0
for letter in word:
word_score = word_score+SCRABBLE_LETTER_VALUES[letter]
word_score = word_score*len(word)
if n == len(word):
word_score = word_score+50
return word_score
def display_hand(hand):
for letter in hand.keys():
for j in range(hand[letter]):
print letter,
print
def deal_hand(n):
hand={}
num_vowels = n / 3
for i in range(num_vowels):
x = VOWELS[random.randrange(0,len(VOWELS))]
hand[x] = hand.get(x, 0) + 1
for i in range(num_vowels, n):
x = CONSONANTS[random.randrange(0,len(CONSONANTS))]
hand[x] = hand.get(x, 0) + 1
return hand
def update_hand(hand, word):
updated_hand = hand.copy()
for letter in word:
if letter in hand:
updated_hand[letter]=updated_hand[letter]-1
return updated_hand
def is_valid_word(word, hand, word_list):
copy_hand = hand.copy()
word_copy = word
answer = True
if word not in word_list:
answer = False
for letter in word_copy:
if letter in copy_hand:
if copy_hand[letter] != 0:
copy_hand[letter] = copy_hand[letter]-1
else:
answer=False
elif letter not in copy_hand:
answer = False
return answer
def calculate_handlen(hand):
handlen = 0
for v in hand.values():
handlen += v
return handlen
def play_hand(hand, word_list):
word = ""
score = 0
while HAND_SIZE != 0 and word != ".":
print "Current Hand",
display_hand(hand)
word = raw_input("Enter word, or a '.' to indicate that you are finished: ")
word = word.lower()
validity = is_valid_word(word,hand, word_list)
if validity is False and word != ".":
print "That is not a valid word, try again"
elif word != ".":
score = score+get_word_score(word, HAND_SIZE)
print word, "earned", get_word_score(word,HAND_SIZE), "points. Total:", score, "points"
hand = update_hand(hand, word)
print "Thanks for playing. You earned", score, "points."
return score
def play_game(word_list):
hand = deal_hand(HAND_SIZE)
play_hand(hand, word_list)
player_input = ""
while player_input != "e":
player_input = raw_input("Type 'n' to to play a new hand, type 'r' to replay hand, or type 'e' to exit the game: ")
if player_input == "e":
print "Thanks for playing."
quit()
elif player_input == "r":
play_hand(hand, word_list)
elif player_input == "n":
hand= deal_hand(HAND_SIZE)
play_hand(hand, word_list)
if __name__ == '__main__':
word_list = load_words()
play_game(word_list)
|
StarcoderdataPython
|
63416
|
from Xlib import X, display
def lock_screen(display: display.Display, screen_nb: int):
screen = display.screen(screen_nb)
root = screen.root
display_width = screen.width_in_pixels
display_height = screen.height_in_pixels
window = root.create_window(0, 0, display_width, display_height,
0, screen.root_depth, window_class=X.CopyFromParent,
visual=screen.root_visual,
override_redirect=1, background_pixel=screen.black_pixel)
pixmap = window.create_pixmap(8, 8, 1)
invisible_cursor = pixmap.create_cursor(pixmap, (0, 0, 0), (0, 0, 0), 0, 0)
window.change_attributes(cursor=invisible_cursor) # what XDefineCursor does under the hood
pointer_mask = X.ButtonPressMask | X.ButtonReleaseMask | X.PointerMotionMask
window.grab_pointer(False, event_mask=pointer_mask,
pointer_mode=X.GrabModeAsync, keyboard_mode=X.GrabModeAsync,
confine_to=X.NONE, cursor=invisible_cursor, time=X.CurrentTime)
window.grab_keyboard(True, pointer_mode=X.GrabModeAsync,
keyboard_mode=X.GrabModeAsync, time=X.CurrentTime)
window.map()
def lock(display: display.Display):
for screen in range(display.screen_count()):
lock_screen(display, screen)
display.sync()
|
StarcoderdataPython
|
1682372
|
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides class GRPCBridge."""
from enum import Enum
from threading import Condition
from typing import Iterator, Optional
from flwr.proto.transport_pb2 import ClientMessage, ServerMessage
class GRPCBridgeClosed(Exception):
"""Error signaling that GRPCBridge is closed."""
class Status(Enum):
"""Status through which the bridge can transition."""
AWAITING_SERVER_MESSAGE = 1
SERVER_MESSAGE_AVAILABLE = 2
AWAITING_CLIENT_MESSAGE = 3
CLIENT_MESSAGE_AVAILABLE = 4
CLOSED = 5
class GRPCBridge:
"""GRPCBridge holding client_message and server_message.
For understanding this class it is recommended to understand how
the threading.Condition class works. See here:
- https://docs.python.org/3/library/threading.html#condition-objects
"""
def __init__(self) -> None:
"""Init bridge."""
# Disable all unsubscriptable-object violations in __init__ method
# pylint: disable=unsubscriptable-object
self._cv = Condition() # cv stands for condition variable
self._status = Status.AWAITING_SERVER_MESSAGE
self._server_message: Optional[ServerMessage] = None
self._client_message: Optional[ClientMessage] = None
def _is_closed(self) -> bool:
"""Return True if closed and False otherwise."""
return self._status == Status.CLOSED
def _raise_if_closed(self) -> None:
if self._status == Status.CLOSED:
raise GRPCBridgeClosed()
def _transition(self, next_status: Status) -> None:
"""Validate status transition and set next status.
The caller of the transition method will have to aquire
conditional variable.
"""
if next_status == Status.CLOSED:
self._status = next_status
elif (
self._status == Status.AWAITING_SERVER_MESSAGE
and next_status == Status.SERVER_MESSAGE_AVAILABLE
and self._server_message is not None
and self._client_message is None
):
self._status = next_status
elif (
self._status == Status.SERVER_MESSAGE_AVAILABLE
and next_status == Status.AWAITING_CLIENT_MESSAGE
and self._server_message is None
and self._client_message is None
):
self._status = next_status
elif (
self._status == Status.AWAITING_CLIENT_MESSAGE
and next_status == Status.CLIENT_MESSAGE_AVAILABLE
and self._server_message is None
and self._client_message is not None
):
self._status = next_status
elif (
self._status == Status.CLIENT_MESSAGE_AVAILABLE
and next_status == Status.AWAITING_SERVER_MESSAGE
and self._server_message is None
and self._client_message is None
):
self._status = next_status
else:
raise Exception(f"Invalid transition: {self._status} to {next_status}")
self._cv.notify_all()
def close(self) -> None:
"""Set bridge status to closed."""
with self._cv:
self._transition(Status.CLOSED)
def request(self, server_message: ServerMessage) -> ClientMessage:
"""Set server massage and wait for client message."""
# Set server message and transition to SERVER_MESSAGE_AVAILABLE
with self._cv:
self._raise_if_closed()
if self._status != Status.AWAITING_SERVER_MESSAGE:
raise Exception("This should not happen")
self._server_message = server_message # Write
self._transition(Status.SERVER_MESSAGE_AVAILABLE)
# Read client message and transition to AWAITING_SERVER_MESSAGE
with self._cv:
self._cv.wait_for(
lambda: self._status in [Status.CLOSED, Status.CLIENT_MESSAGE_AVAILABLE]
)
self._raise_if_closed()
client_message = self._client_message # Read
self._client_message = None # Reset
self._transition(Status.AWAITING_SERVER_MESSAGE)
if client_message is None:
raise Exception("Client message can not be None")
return client_message
def server_message_iterator(self) -> Iterator[ServerMessage]:
"""Return iterator over server messages."""
while not self._is_closed():
with self._cv:
self._cv.wait_for(
lambda: self._status
in [Status.CLOSED, Status.SERVER_MESSAGE_AVAILABLE]
)
self._raise_if_closed()
server_message = self._server_message # Read
self._server_message = None # Reset
# Transition before yielding as after the yield the execution of this
# function is paused and will resume when next is called again.
# Also release condition variable by exiting the context
self._transition(Status.AWAITING_CLIENT_MESSAGE)
if server_message is None:
raise Exception("Server message can not be None")
yield server_message
def set_client_message(self, client_message: ClientMessage) -> None:
"""Set client message for consumption."""
with self._cv:
self._raise_if_closed()
if self._status != Status.AWAITING_CLIENT_MESSAGE:
raise Exception("This should not happen")
self._client_message = client_message # Write
self._transition(Status.CLIENT_MESSAGE_AVAILABLE)
|
StarcoderdataPython
|
6496346
|
<reponame>h77h7/tvm-04.26<gh_stars>10-100
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test legalize pass"""
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm.contrib import graph_executor
from tvm.relay import transform, analysis
from tvm.relay.testing.temp_op_attr import TempOpAttr
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_legalize():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def legalize_conv2d(attrs, inputs, types):
data, weight = inputs
weight = relay.multiply(weight, relay.const(2.0, "float32"))
return relay.nn.conv2d(data, weight, **attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
relay.multiply(weight, relay.const(2.0, "float32")),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.conv2d", "FTVMLegalize", legalize_conv2d):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_legalize_none():
"""Test doing nothing by returning 'None' """
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
y = relay.nn.global_max_pool2d(x)
y = relay.Function([x], y)
return y
called = [False]
def legalize_conv2d(attrs, inputs, types):
called[0] = True
return None
with TempOpAttr("nn.global_max_pool2d", "FTVMLegalize", legalize_conv2d):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(before(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
assert called[0]
def test_legalize_multiple_ops():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def legalize_conv2d(attrs, inputs, types):
data, weight = inputs
weight = relay.multiply(weight, relay.const(2.0, "float32"))
return relay.nn.conv2d(data, weight, **attrs)
def legalize_relu(attrs, inputs, types):
data = inputs[0]
add = relay.add(tvm.relay.const(0, "float32"), data)
return relay.nn.relu(add)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
relay.multiply(weight, relay.const(2.0, "float32")),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.add(tvm.relay.const(0, "float32"), y)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.conv2d", "FTVMLegalize", legalize_conv2d):
with TempOpAttr("nn.relu", "FTVMLegalize", legalize_relu):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_legalize_multi_input():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
y = relay.var("y", shape=(1, 64, 56, 20))
z = relay.var("z", shape=(1, 64, 56, 10))
func = relay.concatenate([x, y, z], axis=3)
func = relay.Function([x, y, z], func)
return func
def legalize_concatenate(attrs, inputs, types):
# Check that the correct multi-input case is handled.
assert len(inputs) == 1
assert isinstance(inputs[0], tvm.relay.expr.Tuple)
assert len(types) == 2
assert isinstance(types[0], tvm.relay.ty.TupleType)
assert isinstance(types[1], tvm.relay.ty.TensorType)
return None
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
y = relay.var("y", shape=(1, 64, 56, 20))
z = relay.var("z", shape=(1, 64, 56, 10))
func = relay.concatenate([x, y, z], axis=3)
func = relay.Function([x, y, z], func)
return func
with TempOpAttr("concatenate", "FTVMLegalize", legalize_concatenate):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
if __name__ == "__main__":
test_legalize()
test_legalize_none()
test_legalize_multiple_ops()
test_legalize_multi_input()
|
StarcoderdataPython
|
8122778
|
'''
测试文件以test_开头(以_test结尾也可以)
测试类以Test开头,并且不能带有 init 方法
测试函数以test_开头
断言使用基本的assert即可
'''
#! /usr/bin/env python
#coding=utf-8
import random
import pytest
def bubble_sort(nums):
for i in range(len(nums)-1):
for j in range(len(nums)-i-1):
if nums[j] > nums[j+1]:
nums[j], nums[j+1] = nums[j+1], nums[j]
return random.choice([nums,None,10])
#失败重跑应用
@pytest.mark.flaky(reruns=5)
def test_sort():
assert bubble_sort([9,5,8,6]) == [5,6,8,9]
|
StarcoderdataPython
|
3510341
|
import torch as t
class Config:
model_path = None# 预训练模型,None表示重新训练
model = 'SqueezeNet1_0'#加载的模型,模型名必须与models/__init__.py中的名字一致
'''
ShuffleNetV2,MobileNetV2,SqueezeNet1_0,SqueezeNet1_1
VGG11,VGG13,VGG16,VGG19
ResNet18,ResNet34,ResNet50
'''
lr = 0.0005 #学习率
use_gpu = True #是否使用gpu
MEAN=(0.485, 0.456, 0.406)
STD=(0.229, 0.224, 0.225)#均值和方差
train_epoch = 1 # 将数据集训练多少次
save_every = 1 # 每训练多少轮保存一次模型
# imagenet得出的较好的值,具体过程参考
# https://cloud.tencent.com/developer/ask/153881
test_num = 16 # 选择攻击和测试的样本数量
batch_size = 128 # 每次喂入多少数据
print_freq = 500 # 每训练多少批次就打印一次
num_workers = 8 #加载数据集的线程数
def _parese(self):
self.device = t.device('cuda') if self.use_gpu else t.device('cpu')
print('Caculate on {}'.format(self.device))
print('user config:')
for k, v in self.__class__.__dict__.items():
if not k.startswith('_'):
print(k, getattr(self, k))
|
StarcoderdataPython
|
8193711
|
<filename>zaza/openstack/charm_tests/charm_upgrade/tests.py
#!/usr/bin/env python3
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define class for Charm Upgrade."""
import logging
import unittest
import zaza.model
from zaza.openstack.utilities import (
cli as cli_utils,
upgrade_utils as upgrade_utils,
)
from zaza.openstack.charm_tests.nova.tests import LTSGuestCreateTest
class FullCloudCharmUpgradeTest(unittest.TestCase):
"""Class to encapsulate Charm Upgrade Tests."""
@classmethod
def setUpClass(cls):
"""Run setup for Charm Upgrades."""
cli_utils.setup_logging()
cls.lts = LTSGuestCreateTest()
cls.target_charm_namespace = '~openstack-charmers-next'
def get_upgrade_url(self, charm_url):
"""Return the charm_url to upgrade to.
:param charm_url: Current charm url.
:type charm_url: str
"""
charm_name = upgrade_utils.extract_charm_name_from_url(
charm_url)
next_charm_url = zaza.model.get_latest_charm_url(
"cs:{}/{}".format(self.target_charm_namespace, charm_name))
return next_charm_url
def test_200_run_charm_upgrade(self):
"""Run charm upgrade."""
self.lts.test_launch_small_instance()
applications = zaza.model.get_status().applications
groups = upgrade_utils.get_charm_upgrade_groups()
for group_name, group in groups.items():
logging.info("About to upgrade {} ({})".format(group_name, group))
for application, app_details in applications.items():
if application not in group:
continue
target_url = self.get_upgrade_url(app_details['charm'])
if target_url == app_details['charm']:
logging.warn(
"Skipping upgrade of {}, already using {}".format(
application,
target_url))
else:
logging.info("Upgrading {} to {}".format(
application,
target_url))
zaza.model.upgrade_charm(
application,
switch=target_url)
logging.info("Waiting for charm url to update")
zaza.model.block_until_charm_url(application, target_url)
zaza.model.block_until_all_units_idle()
self.lts.test_launch_small_instance()
|
StarcoderdataPython
|
6630111
|
"""
(Testing FPS)
Pixel Difference Networks for Efficient Edge Detection (accepted as an ICCV 2021 oral)
See paper in https://arxiv.org/abs/2108.07009
Author: <NAME>, <NAME>
Date: Aug 22, 2020
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import argparse
import os
import time
import models
from utils import *
from edge_dataloader import BSDS_VOCLoader, BSDS_Loader, Multicue_Loader, NYUD_Loader
from torch.utils.data import DataLoader
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
parser = argparse.ArgumentParser(description='PyTorch Diff Convolutional Networks (Train)')
parser.add_argument('--datadir', type=str, default='../data',
help='dir to the dataset')
parser.add_argument('--dataset', type=str, default='BSDS',
help='data settings for BSDS, Multicue and NYUD datasets')
parser.add_argument('--model', type=str, default='baseline',
help='model to train the dataset')
parser.add_argument('--sa', action='store_true',
help='use attention in diffnet')
parser.add_argument('--dil', action='store_true',
help='use dilation in diffnet')
parser.add_argument('--config', type=str, default='nas-all',
help='model configurations, please refer to models/config.py for possible configurations')
parser.add_argument('--seed', type=int, default=None,
help='random seed (default: None)')
parser.add_argument('--gpu', type=str, default='',
help='gpus available')
parser.add_argument('--epochs', type=int, default=150,
help='number of total epochs to run')
parser.add_argument('-j', '--workers', type=int, default=4,
help='number of data loading workers')
parser.add_argument('--eta', type=float, default=0.3,
help='threshold to determine the ground truth')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
def main():
global args
### Refine args
if args.seed is None:
args.seed = int(time.time())
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
args.use_cuda = torch.cuda.is_available()
dataset_setting_choices = ['BSDS', 'NYUD-image', 'NYUD-hha', 'Multicue-boundary-1',
'Multicue-boundary-2', 'Multicue-boundary-3', 'Multicue-edge-1', 'Multicue-edge-2', 'Multicue-edge-3']
if not isinstance(args.dataset, list):
assert args.dataset in dataset_setting_choices, 'unrecognized data setting %s, please choose from %s' % (str(args.dataset), str(dataset_setting_choices))
args.dataset = list(args.dataset.strip().split('-'))
print(args)
### Create model
model = getattr(models, args.model)(args)
### Transfer to cuda devices
if args.use_cuda:
model = torch.nn.DataParallel(model).cuda()
print('cuda is used, with %d gpu devices' % torch.cuda.device_count())
else:
print('cuda is not used, the running might be slow')
### Load Data
if 'BSDS' == args.dataset[0]:
test_dataset = BSDS_VOCLoader(root=args.datadir, split="test", threshold=args.eta)
elif 'Multicue' == args.dataset[0]:
test_dataset = Multicue_Loader(root=args.datadir, split="test", threshold=args.eta, setting=args.dataset[1:])
elif 'NYUD' == args.dataset[0]:
test_dataset = NYUD_Loader(root=args.datadir, split="test", setting=args.dataset[1:])
else:
raise ValueError("unrecognized dataset setting")
test_loader = DataLoader(
test_dataset, batch_size=1, num_workers=args.workers, shuffle=False)
test(test_loader, model, args)
return
def test(test_loader, model, args):
model.eval()
end = time.perf_counter()
torch.cuda.synchronize()
for idx, (image, img_name) in enumerate(test_loader):
with torch.no_grad():
image = image.cuda() if args.use_cuda else image
_, _, H, W = image.shape
results = model(image)
torch.cuda.synchronize()
end = time.perf_counter() - end
print('fps: %f' % (len(test_loader) / end))
if __name__ == '__main__':
main()
print('done')
|
StarcoderdataPython
|
3340422
|
from typing import List
from nameko.standalone.rpc import ClusterRpcProxy
import logging
import time
from isharp.evalengine.core import Evaluator,EvalMethod
from isharp.datahub.core import DatahubTarget
logger = logging.getLogger(__name__)
def remote_config(net_location: str):
return {
'serializer': 'pickle',
'AMQP_URI': 'pyamqp://guest:guest@{}'.format(net_location),
'rpc_exchange': 'nameko-rpc',
'max_workers': 10,
'parent_calls_tracked': 10
}
class AsyncEvalServiceInvoker:
def __init__(self, conf):
self.rpc_proxy = ClusterRpcProxy(conf)
self.proxy = self.rpc_proxy.start()
def eval(self, method: EvalMethod, inputs: List[DatahubTarget]) -> List[DatahubTarget]:
result = self.proxy.evaluation_service.eval(method, inputs)
print (result)
def stop(self):
self.rpc_proxy.stop()
invoker = AsyncEvalServiceInvoker(remote_config("localhost"))
invoker.eval(None,[])
|
StarcoderdataPython
|
1816037
|
from gevent import monkey
monkey.patch_all()
from flask import Flask, render_template, session, request
from flask.ext.socketio import SocketIO, emit, join_room, leave_room, \
close_room, disconnect
from crontab import CronTab
from datetime import datetime
# cron = CronTab(user=True)
# # get already created crontab
# for job in cron.find_comment("clock"):
# if job.hour == hour and job.minute == minute:
# pass
# else :
# #remove old cron
# cron.remove_all(comment="clock")
# job = cron.new(command="python /home/pi/sunlight_alarm_clock/light/python/transition.py", comment='clock weekday')
# job.hour.on(hour)
# job.minute.on(minute)
# schedule = job.schedule(date_from=datetime.now())
# cron.write()
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
@app.route('/')
def index():
return render_template('index.html')
@socketio.on('onConnect', namespace='/test')
def test_connect():
print("Connected")
@socketio.on('onDisconnect', namespace='/test')
def test_disconnect():
print('Client disconnected')
@socketio.on('onChangeTime', namespace='/test')
def time_change(message):
hour, minute = message["data"].split(":")
print("New wake up time: {}-{}".format(hour, minute))
TEST = True
if TEST:
cron = CronTab(tabfile="test.CronTab")
else:
cron = CronTab(user=True)
# get already created crontab
for job in cron.find_comment("clock weekday"):
print(job)
if job.hour == hour and job.minute == minute:
pass
else :
#remove old cron
cron.remove_all(comment="clock weekday")
job = cron.new(command="python /home/pi/sunlight_alarm_clock/light/python/transition.py", comment='clock weekday')
job.hour.on(hour)
job.minute.on(minute)
schedule = job.schedule(date_from=datetime.now())
if TEST:
cron.write("test.CronTab")
else:
cron.write()
@socketio.on('disconnect request', namespace='/test')
def disconnect_request():
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my response',
{'data': 'Disconnected!', 'count': session['receive_count']})
disconnect()
if __name__ == '__main__':
socketio.run(app, host="0.0.0.0")
|
StarcoderdataPython
|
11209519
|
import numpy as np
import pickle
import os
SEA_MONSTER = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
],
dtype=np.bool,
)
def parse_tile(raw_tile):
header, rows = raw_tile.split("\n", 1)
tile_id = int(header[5:-1])
tile_data = [[c == "#" for c in row] for row in rows.split("\n")]
tile_data = np.array(tile_data, np.bool)
return tile_id, tile_data
def transformations(tile):
rot90 = np.rot90(tile, 1)
rot180 = np.rot90(tile, 2)
rot270 = np.rot90(tile, 3)
return [
tile,
rot90,
rot180,
rot270,
np.flip(tile, axis=0),
np.flip(rot90, axis=0),
np.flip(rot180, axis=0),
np.flip(rot270, axis=0),
]
def can_place(image, tile, r, c):
if r > 0:
tile_above = image[r - 1, c][1]
if not np.array_equal(tile_above[-1], tile[0]):
return False
if c > 0:
tile_left = image[r, c - 1][1]
if not np.array_equal(tile_left[:, -1], tile[:, 0]):
return False
return True
def build_image(tiles):
if os.path.exists("arranged-tiles.pkl"):
with open("arranged-tiles.pkl", "rb") as fp:
return pickle.load(fp)
print("Arranging tiles...")
unplaced_tiles = set(tiles.keys())
image = dict()
n = int(np.sqrt(len(tiles)))
def _build_image(pos):
if pos == n ** 2:
return True
r = pos // n
c = pos % n
for tile_id in tiles.keys():
if tile_id not in unplaced_tiles:
continue
for tile in transformations(tiles[tile_id]):
if can_place(image, tile, r, c):
image[r, c] = (tile_id, tile)
unplaced_tiles.remove(tile_id)
if _build_image(pos + 1):
return True
del image[r, c]
unplaced_tiles.add(tile_id)
return False
_build_image(0)
with open("arranged-tiles.pkl", "wb") as fp:
pickle.dump(image, fp)
return image
def glue_image(image):
n = int(np.sqrt(len(image)))
glued_rows = []
for r in range(n):
glued_rows.append(np.hstack([image[r, c][1][1:-1, 1:-1] for c in range(n)]))
return np.vstack(glued_rows)
def get_monster_locations(image):
h, w = SEA_MONSTER.shape
n = image.shape[0]
monster_locations = np.zeros_like(image)
for image_view, monster_view in zip(
transformations(image), transformations(monster_locations)
):
for r in range(n - h):
for c in range(n - w):
if np.array_equal(
SEA_MONSTER, image_view[r : r + h, c : c + w] & SEA_MONSTER
):
monster_view[r : r + h, c : c + w] |= SEA_MONSTER
return monster_locations
def main():
with open("input.txt", "r") as fp:
tiles = fp.read().strip().split("\n\n")
tiles = dict(map(parse_tile, tiles))
image = build_image(tiles)
n = int(np.sqrt(len(tiles)))
corner_product = np.prod(
[
image[0, 0][0],
image[0, n - 1][0],
image[n - 1, 0][0],
image[n - 1, n - 1][0],
]
)
print("Part I:", corner_product)
image = glue_image(image)
monster_locations = get_monster_locations(image)
water_roughness = np.count_nonzero(image) - np.count_nonzero(monster_locations)
print("Part II:", water_roughness)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
8103024
|
# Copyright 2016 Semaphore Solutions, Inc.
# ---------------------------------------------------------------------------
from ._internal import WrappedXml, FieldsMixin, ClarityElement
from ._internal.props import subnode_property, subnode_element_list, attribute_property, subnode_link
from s4.clarity.file import File
from s4.clarity.configuration.stage import Stage
from s4.clarity.process import Process
QC_PASSED = "PASSED"
QC_FAILED = "FAILED"
QC_UNKNOWN = "UNKNOWN"
STAGE_STATUS_QUEUED = "QUEUED"
STAGE_STATUS_REMOVED = "REMOVED"
STAGE_STATUS_IN_PROGRESS = "IN_PROGRESS"
class WorkflowStageHistory(WrappedXml):
uri = attribute_property("uri")
status = attribute_property("status")
name = attribute_property("name")
stage = subnode_link(Stage, ".")
class ReagentLabel(WrappedXml):
name = attribute_property("name", readonly=True)
class Artifact(FieldsMixin, ClarityElement):
"""
Reference: https://www.genologics.com/files/permanent/API/latest/data_art.html#artifact
"""
UNIVERSAL_TAG = "{http://genologics.com/ri/artifact}artifact"
type = subnode_property("type")
output_type = subnode_property("output-type")
location_value = subnode_property("location/value")
workflow_stages = subnode_element_list(WorkflowStageHistory, "workflow-stages", "workflow-stage", readonly=True)
reagent_labels = subnode_element_list(ReagentLabel, ".", "reagent-label", readonly=True)
parent_process = subnode_link(Process, 'parent-process')
def __init__(self, lims, uri=None, xml_root=None, name=None, limsid=None):
super(Artifact, self).__init__(lims, uri, xml_root, name, limsid)
@property
def parent_step(self):
""":type: Step"""
return self.lims.steps.from_link_node(self.xml_find("./parent-process"))
@property
def sample(self):
""":type: Sample"""
return self.lims.samples.from_link_node(self.xml_find("./sample"))
@property
def samples(self):
""":type: list[Sample]"""
return self.lims.samples.from_link_nodes(self.xml_findall("./sample"))
@property
def file(self):
""":type: File"""
f = self.lims.files.from_link_node(self.xml_find('./{http://genologics.com/ri/file}file'))
if f is None:
f = File.new_empty(self)
f.name = self.name
return f
@property
def is_control(self):
""":type: bool"""
return self.xml_find('./control-type') is not None
@property
def control_type(self):
""":type: ControlType"""
return self.lims.control_types.from_link_node(self.xml_find("./control-type"))
@property
def queued_stages(self):
""":type: set[Stage]"""
queued_stages = set()
for stage_history in self.workflow_stages:
if stage_history.status == STAGE_STATUS_QUEUED:
queued_stages.add(stage_history.stage)
elif stage_history.status == STAGE_STATUS_REMOVED or stage_history.status == STAGE_STATUS_IN_PROGRESS:
# It is possible a QUEUED stage history was left in the list. Remove if present
queued_stages.discard(stage_history.stage)
return queued_stages
@property
def qc(self):
"""
Whether QC is marked as PASSED or FAILED on the artifact
============= ==========
Clarity Value Bool Value
============= ==========
PASSED True
FAILED False
UNKNOWN None
============= ==========
:type: bool
"""
qctext = self.get_node_text('qc-flag')
if qctext == QC_PASSED:
return True
elif qctext == QC_FAILED:
return False
else:
return None
@qc.setter
def qc(self, value):
"""
:type value: bool
"""
self.set_qc_flag(value)
def qc_passed(self):
""":rtype: bool"""
return self.get_node_text('qc-flag') == QC_PASSED
def qc_failed(self):
""":rtype: bool"""
return self.get_node_text('qc-flag') == QC_FAILED
def set_qc_flag(self, value):
"""
The `qc` property should be used in favor of this method.
:type value: bool
:param value: `True` if PASSED, `False` if FAILED, `None` to unset.
"""
if value is None:
qc = QC_UNKNOWN
elif value:
qc = QC_PASSED
else:
qc = QC_FAILED
self.set_subnode_text('qc-flag', qc)
# TODO: move this to another file/class, or something.
def open_file(self, mode, only_write_locally=False, name=None):
"""
:type mode: str
:param mode: 'r', 'r+', 'w', 'a', 'rb', 'r+b', 'wb', 'ab'.
NOTE: 'r+' sets initial file position to the beginning, 'a' sets it to the end.
:type only_write_locally: bool
:param only_write_locally: if true, don't upload this file to Clarity.
:type name: str
:param name: The name that will be used if you are creating a new file.
:rtype: File
"""
f = self.file
if name is not None:
f.name = name
f.only_write_locally = only_write_locally
f.mode = mode
if "w" in mode:
f.truncate()
elif "r" in mode:
f.writeable = False
elif "a" in mode:
f.seek_to_end()
return f
@property
def container(self):
"""
From "location.container". For XML value "location.value", use Python property ``.location_value``.
:type: Container
"""
return self.lims.containers.from_link_node(self.xml_find("./location/container"))
@property
def reagent_label_names(self):
# type: () -> list[str]
""":type: list[str]"""
return [l.name for l in self.reagent_labels]
@property
def reagent_label_name(self):
# type: () -> str
""":type: str"""
label_names = self.reagent_label_names
num_labels = len(label_names)
if num_labels > 1:
raise Exception("Artifact has multiple reagent labels.")
if num_labels == 0:
return None
return label_names[0]
@reagent_label_name.setter
def reagent_label_name(self, reagent_label_name):
# type: (str) -> None
"""
:type reagent_label_name: str
"""
reagent_label = self.make_subelement_with_parents("./reagent-label")
reagent_label.set("name", reagent_label_name)
def _get_attach_to_key(self):
return self.type, ""
|
StarcoderdataPython
|
6677571
|
<reponame>qmeeus/Object-detection
import os, os.path as p
import sys
from time import sleep, time
import numpy as np
import ffmpeg
import subprocess
from queue import PriorityQueue
from multiprocessing import Pool, log_to_stderr
from realtime_object_detection.detection import ObjectDetection
from realtime_object_detection.io import InputStream, OutputFile, OutputStream
from realtime_object_detection.utils.logger import logger
BATCH_SIZE = 1
QUEUE_SIZE = 10000
FPS = 30
def abspath(relpath):
return p.abspath(p.join(p.dirname(__file__), '..', relpath))
logger.info(' REALTIME OBJECT DETECTION ')
input_stream = InputStream(os.environ['INPUT_STREAM_URL'], QUEUE_SIZE, FPS)
input_stream.start()
# fps = input_stream.getFPS()
output_file = OutputFile(abspath(os.environ['OUTPUT_FILE']), 'XVID', {
'fps': FPS, 'frameSize': input_stream.video_size
})
output_stream = OutputStream(
os.environ['OUTPUT_STREAM_URL'],
input_cfg={"format": "rawvideo", "s": "{}x{}".format(*input_stream.video_size)},
filter_cfg={"fps": FPS, "round": "up"},
output_cfg={
"format": "flv", "pix_fmt": "yuv420p", 'preset': 'slower',
"movflags": "faststart", "qscale:v": 3
}
)
outputs = [
output_file,
output_stream
]
try:
logger.info('+++ Processing starts +++')
detector = ObjectDetection()
while not input_stream.is_empty:
logger.debug(f'{input_stream.size} remaining frames in queue')
batch_size = min(BATCH_SIZE, input_stream.size)
frame_ids, frames = map(list, zip(*((input_stream.read() for _ in range(batch_size)))))
assert type(frame_ids) is list
assert type(frames) is list
frames_out = detector.detect_objects(frames)
for frame_out in frames_out:
for output in outputs:
output.write_frame(frame_out)
except KeyboardInterrupt:
logger.info('Cleaning outputs')
for output in outputs:
output.clean()
detector.clean()
|
StarcoderdataPython
|
5002002
|
# Modified from https://github.com/yenchanghsu/out-of-distribution-detection/blob/master/methods/ood_detection.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
import numpy as np
from sklearn import metrics
import copy
import os
from metrics.ood import tnr_at_tpr95, detection, AverageMeter
from utils.misc import cov
class Baseline(object):
def __init__(self, args, cnn, train_loader, val_loader, num_classes):
self.config = args
self.num_classes = num_classes
self.cnn = cnn
self.cnn.cuda()
self.cnn.zero_grad()
self.prepare(train_loader, val_loader)
# self.in_domain_scores = self.get_scores(val_loader)
self.perturb_magnitude = 0.0
def prepare(self, train_loader, val_loader):
return
def scoring(self, x):
# max softmax
if isinstance(x, dict):
logits = x['logits']
else:
logits = x
prob = F.softmax(logits, dim=1)
score, score_idx = prob.max(dim=1)
return score, score_idx
def get_scores(self, dataloader):
self.cnn.eval()
scores, scores_idx = [], []
for input, target in dataloader:
input = input.cuda()
# target = target.cuda()
if 'cosine' in self.config.model_type:
# get features before last layer for Mahalanobis methods
_, _, output = self.cnn.forward(input)
else:
output = self.cnn.forward(input)
score, score_idx = self.scoring(output)
scores.extend(score.cpu().detach().numpy())
scores_idx.extend(score_idx.cpu().detach().numpy())
return scores, scores_idx
def ood_eval(self, val_loader, ood_loader):
score_in, _ = self.get_scores(val_loader)
score_out, _ = self.get_scores(ood_loader)
score_all = np.concatenate([score_in, score_out])
domain_labels = np.zeros(len(score_all))
domain_labels[:len(score_in)] = 1
score_in = np.array(score_in)
score_out = np.array(score_out)
score_all = np.array(score_all)
tnr = tnr_at_tpr95(score_in, score_out)
detection_error, _ = detection(score_in, score_out)
auroc = metrics.roc_auc_score(domain_labels, score_all)
aupr_in = metrics.average_precision_score(domain_labels, score_all)
aupr_out = metrics.average_precision_score(-1 * domain_labels + 1, 1 - score_all)
return {'tnr(\u2191)': tnr,
'det_err(\u2193)': detection_error,
'auroc(\u2191)': auroc,
'aupr_in(\u2191)': aupr_in,
'aupr_out(\u2191)': aupr_out,
'score_avg(\u2191)': '{:.2e}'.format(score_out.mean()),
'score_std(\u2193)': '{:.2e}'.format(score_out.std())
}
def get_ood_model(self, ood_model_file=None):
self.ood_cnn = copy.deepcopy(self.cnn)
def prepare_ood(self, ood_model_file=None):
self.get_ood_model(ood_model_file)
return self.ood_cnn, self.perturb_magnitude
class InputPreProcess(Baseline):
def prepare(self, train_loader, val_loader):
self.perturb_magnitude = self.search_perturb_magnitude(val_loader)
print('Inputs are perturbed with magnitude', self.perturb_magnitude)
def search_perturb_magnitude(self, dataloader):
if len(self.config.data_perturb_magnitude) == 1:
return self.config.data_perturb_magnitude[0]
else:
magnitude_list = self.config.data_perturb_magnitude
print('Searching the best perturbation magnitude on in-domain data. Magnitude:', magnitude_list)
self.cnn.eval()
loss_list = {}
for m in magnitude_list:
loss_meter = AverageMeter()
for input, _ in dataloader: # Here we don't need labels
input = input.cuda().requires_grad_(True)
output = self.cnn.forward(input)
loss_score, _ = self.scoring(output)
loss = -loss_score.mean()
loss.backward()
gradient = torch.ge(input.grad.data, 0)
gradient = (gradient.float() - 0.5) * 2
modified_input = torch.add(input.detach(), -m, gradient)
output = self.cnn.forward(modified_input)
loss, _ = self.scoring(output)
loss = -loss
loss_meter.update(loss.mean(), len(loss))
loss_list[m] = loss_meter.avg
print('Magnitude:', m, 'loss:', loss_list[m])
best_m = min(loss_list, key=(lambda key: loss_list[key]))
return best_m
def get_scores(self, dataloader):
self.cnn.eval()
scores, scores_idx = [], []
for input, _ in dataloader:
input = input.cuda().requires_grad_(True)
if 'cosine' in self.config.model_type:
_, _, output = self.cnn.forward(input)
else:
output = self.cnn.forward(input)
loss_score, _ = self.scoring(output)
loss = -loss_score.mean()
loss.backward()
gradient = torch.ge(input.grad.data, 0)
gradient = (gradient.float() - 0.5) * 2
modified_input = torch.add(input.detach(), -self.perturb_magnitude, gradient)
if 'cosine' in self.config.model_type:
_, _, output = self.cnn.forward(modified_input)
else:
output = self.cnn.forward(modified_input)
score, score_idx = self.scoring(output)
scores.extend(score.cpu().detach().numpy())
scores_idx.extend(score_idx.cpu().detach().numpy())
return scores, scores_idx
class ODIN(InputPreProcess):
# def get_scores(self, dataloader):
# self.cnn.eval()
# scores = []
# for input, target in dataloader:
#
# input = input.cuda()
# target = target.cuda()
#
# if 'cosine' in self.config.model_type:
# output, _, _ = self.cnn.forward(input)
# else:
# output = self.cnn.forward(input)
#
# score = self.scoring(output)
#
# scores.extend(score.cpu().detach().numpy())
#
# return scores
# Temperature scaling + Input preprocess
def scoring(self, x):
# max softmax
if isinstance(x, dict):
logits = x['logits']
else:
logits = x
logits /= 1000 # Temperature=1000 as suggested in ODIN paper
prob = F.softmax(logits, dim=1)
score, score_idx = prob.max(dim=1)
return score, score_idx
class Mahalanobis(Baseline):
def prepare(self, train_loader, val_loader):
self.ood_cnn = copy.deepcopy(self.cnn)
# self.init_mahalanobis(train_loader)
self.init_mahalanobis(val_loader)
def init_mahalanobis(self, dataloader):
if 'cosine' not in self.config.model_type:
self.cnn.module.fc = torch.nn.Identity() # So we extract the features
print('Init: Calculating Mahalanobis ...', len(dataloader))
all_feat = []
all_label = []
for input, target in dataloader:
input = input.cuda()
target = target.cuda()
if 'cosine' in self.config.model_type:
_, _, feat = self.cnn.forward(input)
else:
feat = self.cnn.forward(input)
all_feat.extend(feat.cpu().detach().numpy())
all_label.extend(target.cpu().detach().numpy())
all_feat = torch.from_numpy(np.array(all_feat))
all_label = torch.from_numpy(np.array(all_label))
assert all_feat.ndimension() == 2
all_feat = all_feat.cuda()
all_label = all_label.cuda()
self.centers = torch.zeros(self.num_classes, all_feat.size(1), device=all_feat.device)
for i in range(self.num_classes):
self.centers[i] = all_feat[all_label == i].mean(dim=0)
X = all_feat - torch.index_select(self.centers, dim=0, index=all_label)
# self.precision = X.var(dim=0).pow(-1).diagflat() # This simplification will cause a significant performance drop
self.precision = cov(X).pinverse()
def scoring(self, x):
diff = x.unsqueeze(dim=1) - self.centers.unsqueeze(dim=0) # Broadcasting operation
for i in range(self.num_classes):
zero_f = diff[:, i]
term_gau = -0.5 * torch.mm(torch.mm(zero_f, self.precision), zero_f.t()).diag()
if i == 0:
gaussian_score = term_gau.view(-1, 1)
else:
gaussian_score = torch.cat((gaussian_score, term_gau.view(-1, 1)), 1)
score, score_idx = gaussian_score.max(dim=1)
return score, score_idx
def get_ood_model(self, ood_model_file=None):
print('Preparing Mahalanobis OOD Model...')
if (ood_model_file is not None) and (os.path.isfile(ood_model_file)):
self.ood_cnn.load_state_dict(torch.load(ood_model_file))
else:
with torch.no_grad():
# set weight, bias in fc layer
new_weight = self.ood_cnn.module.fc.weight
new_bias = self.ood_cnn.module.fc.bias
for i in range(self.num_classes):
center = self.centers[i].unsqueeze(dim=1)
new_weight[i] = torch.mm(center.t(), self.precision)
new_bias[i] = -0.5 * torch.mm(torch.mm(center.t(), self.precision), center).diag()
self.ood_cnn.module.fc.weight = torch.nn.Parameter(new_weight)
self.ood_cnn.module.fc.bias = torch.nn.Parameter(new_bias)
class Mahalanobis_IPP(Mahalanobis, InputPreProcess):
def prepare(self, train_loader, val_loader):
self.ood_cnn = copy.deepcopy(self.cnn)
# self.init_mahalanobis(train_loader)
self.init_mahalanobis(val_loader)
self.perturb_magnitude = self.search_perturb_magnitude(val_loader)
print('Inputs are perturbed with magnitude', self.perturb_magnitude)
class DeepMahalanobis(Baseline):
def prepare(self, train_loader, val_loader):
self.init_mahalanobis(train_loader)
def init_mahalanobis(self, dataloader):
def new_forward(self, x):
return self.features(x)
# return (self.layer1(x), self.layer2(x), self.layer3(x), self.layer4(x))
self.cnn.module.__class__.forward = new_forward
print('Init: Calculating DeepMahalanobis ...', len(dataloader))
input, _ = dataloader.dataset[0]
input = input.unsqueeze(0).cuda()
# get all features (e.g. 4 layer features for resnet)
feats = self.cnn.forward(input)
self.num_out = len(feats)
all_feat = {i: [] for i in range(self.num_out)}
self.centers = {i: [] for i in range(self.num_out)}
self.precision = {i: [] for i in range(self.num_out)}
all_label = []
for input, target in dataloader:
input = input.cuda()
target = target.cuda()
feats = self.cnn.forward(input)
for i in range(self.num_out):
all_feat[i].extend(feats[i].mean(-1).mean(-1).cpu().detach().numpy())
all_label.extend(target.cpu().detach().numpy())
for i in range(self.num_out):
all_feat[i] = torch.from_numpy(np.array(all_feat[i]))
all_feat[i] = all_feat[i].cuda()
all_label = torch.from_numpy(np.array(all_label))
all_label = all_label.cuda()
for i in range(self.num_out):
# feats = torch.cat(all_feat[i])
feats = all_feat[i]
assert feats.ndimension() == 2
self.centers[i] = torch.zeros(self.num_classes, feats.size(1), device=feats.device)
for c in range(self.num_classes):
self.centers[i][c] = feats[all_label == c].mean(dim=0)
X = feats - torch.index_select(self.centers[i], dim=0, index=all_label)
self.precision[i] = cov(X).pinverse()
# def get_scores(self, dataloader):
# self.cnn.eval()
# scores = []
# for input, target in dataloader:
#
# input = input.cuda()
# target = target.cuda()
#
# output = self.cnn.forward(input)
#
# score = self.scoring(output)
#
# scores.extend(score.cpu().detach().numpy())
#
# return scores
def scoring(self, x):
deep_scores = torch.zeros(x[0].size(0), self.num_out, device=x[0].device)
for i in range(self.num_out):
feat = x[i].mean(-1).mean(-1)
diff = feat.unsqueeze(dim=1) - self.centers[i].unsqueeze(dim=0) # Broadcasting operation
for c in range(self.num_classes):
zero_f = diff[:, c]
term_gau = -0.5 * torch.mm(torch.mm(zero_f, self.precision[i]), zero_f.t()).diag()
if c == 0:
gaussian_score = term_gau.view(-1, 1)
else:
gaussian_score = torch.cat((gaussian_score, term_gau.view(-1, 1)), 1)
deep_scores[:, i], score_idx = gaussian_score.max(dim=1)
return deep_scores.sum(dim=1), score_idx
class DeepMahalanobis_IPP(DeepMahalanobis, InputPreProcess):
def prepare(self, train_loader, val_loader):
self.init_mahalanobis(train_loader)
self.perturb_magnitude = self.search_perturb_magnitude(val_loader)
print('Inputs are perturbed with magnitude', self.perturb_magnitude)
# class Disentangle_SYD(Baseline):
# def scoring(self, x):
# assert isinstance(x, dict), 'The model doesnt provide disentangled results'
# return x['S_YD']
#
#
# class Disentangle_SYD_IPP(InputPreProcess):
# def scoring(self, x):
# assert isinstance(x, dict), 'The model doesnt provide disentangled results'
# return x['S_YD']
#
#
# class Disentangle_SD(Baseline):
# def scoring(self, x):
# assert isinstance(x, dict), 'The model doesnt provide disentangled results'
# return x['S_D']
#
#
# class Disentangle_SD_IPP(InputPreProcess):
# def scoring(self, x):
# assert isinstance(x, dict), 'The model doesnt provide disentangled results'
# return x['S_D']
#
#
# class OfflineDisentangle_SYD(Baseline):
# def __init__(self, baseObject):
# super(OfflineDisentangle_SYD, self).__init__(baseObject)
#
# def ood_prepare(self, dataloader):
#
# self.log('Offline disentangle ...')
# all_out = []
# all_label = []
# for input, target, _ in dataloader:
# # The task id is ignored here
#
# input = input.cuda()
# target = target.cuda()
#
# out = self.forward(input)
# all_out.append(out)
# all_label.append(target)
# all_out = torch.cat(all_out)
# all_label = torch.cat(all_label)
# self.num_classes = len(torch.unique(all_label))
# self.std = torch.zeros(self.num_classes, device=input.device)
# for c in range(self.num_classes):
# self.std[c] = all_out[all_label == c].std()
# print('Distance std:', self.std.mean())
#
# def score(self, x):
# S_YD = x / self.std.view(1, -1)
# return super(Disentangle, self).score(S_YD)
|
StarcoderdataPython
|
1763878
|
from enum import Enum
import matplotlib.pyplot as plt
from simalia.data import index
class PlotTypes(Enum):
LINE = 1
BAR = 2
PIE = 3
DONUT = 4
class Plot:
def __init__(self, title="", legend=False):
self.title = title
self.legend = legend
self.type = None
self._legend_data = []
self._keys = []
self._values = []
def dict(self, dictionary):
self._keys = dictionary.keys()
self._values.append(dictionary.values())
return self
def keys(self, keys):
self._keys = keys
return self
def values(self, *values):
self._values += values
return self
def mean(self, val):
plt.plot(list(self._keys), [val] * len(self._keys), label="mean", linestyle='--')
return self
def line(self, markers=True):
self.type = PlotTypes.LINE
keys = list(self._keys)
for val in self._values:
plt.plot(keys, list(val), marker='o' if markers else None, linestyle='solid')
return self
def bar(self, horizontal=False):
self.type = PlotTypes.BAR
fig, ax = plt.subplots()
width = 0.3
for i, val in enumerate(self._values):
positions = [x + (float(i) * width) for x in index(val)]
if horizontal:
ax.barh(positions, val, height=width)
else:
ax.bar(positions, val, width=width)
label_pos = [x + (len(self._values) - 1) / 2 * width for x in index(self._keys)]
if horizontal:
ax.set_yticks(label_pos)
ax.set_yticklabels(self._keys)
ax.invert_yaxis()
else:
ax.set_xticks(label_pos)
ax.set_xticklabels(self._keys)
return self
def pie(self, highlight_first=True):
self.type = PlotTypes.PIE
self._legend_data = self._keys
explode = None
if highlight_first:
explode = [0.] * len(self._keys)
explode[0] = 0.05
plt.pie(self._values[0], labels=self._labels(), explode=explode)
return self
def donut(self):
self.type = PlotTypes.DONUT
self._legend_data = self._keys
plt.pie(self._values[0], labels=self._labels(), wedgeprops=dict(width=0.4))
return self
def _labels(self):
if self.legend:
return None
return self._keys
def draw(self):
if self.legend:
loc = self.legend if self.legend is not True else "best"
plt.legend(self._legend_data, loc=loc)
plt.title(self.title)
plt.draw()
plt.style.use("seaborn")
|
StarcoderdataPython
|
4930099
|
print('='*10, 'Desafio 62', '='*10)
primeiro = int(input('Digite o primeiro termo da PA: '))
razão = int(input('Digite a razão da PA: '))
cont = 1
termo = primeiro
total = 0
mais = 10
while mais != 0:
total = total + mais
while cont <= total:
print('{} → '.format(termo), end=' ')
termo += razão
cont += 1
mais = int(input('Quantos termos você quer mostrar a mais? '))
print('Fim!')
print('='*10, 'Desafio 62', '='*10)
|
StarcoderdataPython
|
9725374
|
<gh_stars>0
from flask_wtf import FlaskForm
from wtforms import BooleanField
from wtforms import PasswordField
from wtforms import StringField
from wtforms import SubmitField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Login')
remember_me = BooleanField('Remember Me')
|
StarcoderdataPython
|
1782852
|
<gh_stars>1-10
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Import numpy, you'll need this
import numpy as np
# Create medium: observations with cars_per_cap between 100 and 500
cpc = cars['cars_per_cap']
between = np.logical_and(cpc > 10, cpc < 80)
medium = cars[between]
# Print medium
print(medium)
#another filter
print("cars_per_capita between 100 and 500")
cpc = cars['cars_per_cap']
between = np.logical_and(cpc > 100, cpc < 500)
medium = cars[between]
print(medium)
|
StarcoderdataPython
|
12824952
|
import json
from mysystem import *
from utils import trans2json
import pull_global_vars as gv
from pull_util import *
from hash import Hash
#from redis_oper import write2redis
import base64
import time
MEDIA_REQ_TIMEOUT = 3
def query_hash(data):
result_hash_list = []
start_time=time.time()
if data['params'].has_key('url'):
if data['params']['url']['hash'] != None and data['params']['url']['hash'] != '':
ret_code, result = query_vddb_async(
data['params']['url']['hash'], data)
if ret_code == 1:
end_time = time.time()
#gv.statsd_conn.timing("thunder.querybroker_qbpull", (end_time-start_time)*1000)
return ret_code, result
result_hash_list.append((ret_code, result))
if data['params']['thunder_hash'] != None and data['params']['thunder_hash'] != '':
ret_code, result = query_vddb_async(
data['params']['thunder_hash'], data)
if ret_code == 1:
end_time = time.time()
#gv.statsd_conn.timing("thunder.querybroker_qbpull", (end_time-start_time)*1000)
return ret_code, result
result_hash_list.append((ret_code, result))
if data['params'].has_key('seed_file'):
seed_file_hash = ''
if data['params']['seed_file']['hash'] != '':
seed_file_hash = data['params']['seed_file']['hash']
else:
ret_code, bt_file_name = download_file(
data['params']['seed_file']['path'], gv.file_tmpdir)
if ret_code:
client_id = data['params']['additional_info']['client_id']
with open(bt_file_name, 'rb') as fp:
seed_file_content = fp.read()
seed_file_hash = Hash(
filename=bt_file_name, content=seed_file_content).value
data['params']['seed_file']['hash'] = seed_file_hash
try:
os.remove(bt_file_name)
except OSError:
g_logger.error(trans2json(
"delete bt file %s error %s" % (bt_file_name, traceback.format_exc())))
ret_code, result = query_vddb_async(seed_file_hash, data)
if ret_code == 1:
end_time = time.time()
#gv.statsd_conn.timing("thunder.querybroker_qbpull", (end_time-start_time)*1000)
return ret_code, result
result_hash_list.append((ret_code, result))
if data['params'].has_key('files'):
hash_list = []
data_list = []
for i in data['params']['files']:
dna_hash = i['hash']
hash_list.append(dna_hash)
data_list.append(data)
result_list = map(query_vddb_async, hash_list, data_list)
for i in range(len(result_list)):
if result_list[i][0] == 1:
end_time = time.time()
#gv.statsd_conn.timing("thunder.querybroker_qbpull", (end_time-start_time)*1000)
return result_list[i][0], result_list[i][1]
end_time = time.time()
#gv.statsd_conn.timing("thunder.querybroker_qbpull", (end_time-start_time)*1000)
return 3, None
def url_scheme(url):
scheme = None
parts = url.split('://', 1)
if len(parts) >= 2:
scheme = parts[0]
return scheme
def query_vddb_async(req_hash, data):
g_logger.debug(trans2json("query vddb async by hash %s" % str(req_hash)))
mysystem = mysystem(gv.mysystem_user, gv.mysystem_passwd,
gv.mysystem_url, False, MEDIA_REQ_TIMEOUT, g_logger)
uuid = data['params']['external_id']
ret, status_listing = mysystem.query(req_hash, uuid)
working_cnt = 0
copyrighted_cnt = 0
uncopyrighted_cnt = 0
status_cnt = len(status_listing)
for status in status_listing:
if status['status'] == STATUS_COPYRIGHTED:
copyrighted_cnt += 1
if status['status'] == STATUS_UNCOPYRIGHTED:
uncopyrighted_cnt += 1
if status['status'] == STATUS_WORKING:
working_cnt += 1
# all can not check
if ret == STATUS_UNDETECTED:
ret_code = 2
return ret_code, status_listing
if status_cnt > 0:
if copyrighted_cnt == status_cnt or working_cnt == status_cnt or uncopyrighted_cnt == status_cnt:
ret_code = 1
return ret_code, status_listing
return 4, None
|
StarcoderdataPython
|
5171507
|
# -*- coding: utf-8 -*-
"""
Progress component
"""
from bowtie._component import Component
class Progress(Component):
"""This component is used by all visual components and
is not meant to be used alone.
By default, it is not visible.
It is an opt-in feature and you can happily use Bowtie
without using the progress indicators at all.
It is useful for indicating progress to the user for long-running processes.
It can be accessed through the ``.progress`` accessor.
Examples
--------
>>> plotly = Plotly()
>>> def callback(x):
>>> plotly.progress.do_visible(True)
>>> plotly.progress.do_percent(0)
>>> compute1()
>>> plotly.progress.do_inc(50)
>>> compute2()
>>> plotly.progress.do_visible(False)
"""
_TEMPLATE = 'progress.jsx'
_COMPONENT = 'CProgress'
_PACKAGE = None
_TAG = ('<CProgress '
'socket={{socket}} '
'uuid={{{uuid}}} '
'>')
def _instantiate(self):
return self._TAG.format(
uuid="'{}'".format(self._uuid)
)
# pylint: disable=no-self-use
def do_percent(self, percent):
"""Set the percentage of the progress.
Parameters
----------
percent : number
Sets the progress to this percentage.
Returns
-------
None
"""
return percent
def do_inc(self, inc):
"""Increments the progress indicator.
Parameters
----------
inc : number
Value to increment the progress.
Returns
-------
None
"""
return inc
def do_visible(self, visible):
"""Hides and shows the progress indicator.
Parameters
----------
visible : bool
If ``True`` shows the progress indicator
otherwise it is hidden.
Returns
-------
None
"""
return visible
def do_active(self):
"""Hides and shows the progress indicator.
Returns
-------
None
"""
pass
def do_success(self):
"""Hides and shows the progress indicator.
Returns
-------
None
"""
pass
def do_error(self):
"""Hides and shows the progress indicator.
Returns
-------
None
"""
pass
|
StarcoderdataPython
|
6547037
|
<gh_stars>1-10
import tensorflow as tf
import tensorflow.contrib.slim as tfslim
mobilenetv3_large = {
'kernel': [3, 3, 3, 5, 5, 5, 3, 3, 3, 3, 3, 3, 5, 5, 5],
'expand':
[16, 64, 72, 72, 120, 120, 240, 200, 184, 184, 480, 672, 672, 672, 960],
'output':
[16, 24, 24, 40, 40, 40, 80, 80, 80, 80, 112, 112, 160, 160, 160, 960],
'SE': [0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1],
'activation': [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'stride': [1, 2, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1]
}
mobilenetv3_samll = {
'kernel': [3, 3, 3, 5, 5, 5, 5, 5, 5, 5, 5],
'expand': [16, 72, 88, 96, 240, 240, 120, 144, 288, 576, 576],
'output': [16, 24, 24, 40, 40, 40, 48, 48, 96, 96, 96, 576],
'SE': [1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'activation': [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
'stride': [2, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1]
}
CONFIGURATIONS = {'0.5': mobilenetv3_samll, '1.0': mobilenetv3_large}
BATCH_NORM_MOMENTUM = 0.997
BATCH_NORM_EPSILON = 1e-36
expand_multiplier = 3 # scaled to 1/3
def MobileNetV3(x, is_training, num_classes=200, depth_multiplier='0.5'):
configs = CONFIGURATIONS[depth_multiplier]
num_bnecks = len(configs['kernel'])
def batch_norm(x):
x = tf.layers.batch_normalization(x,
axis=3,
center=True,
scale=True,
training=is_training,
momentum=BATCH_NORM_MOMENTUM,
epsilon=BATCH_NORM_EPSILON,
fused=True,
name='batch_norm')
return x
params = {'normalizer_fn': batch_norm}
with tf.variable_scope('MobileNetV3'):
with tfslim.arg_scope([tfslim.conv2d, depthwise_conv], **params):
x = tfslim.conv2d(x,
num_outputs=16,
kernel_size=3,
stride=2,
activation_fn=Hswish,
scope='conv1')
for unit in range(num_bnecks):
with tf.variable_scope('bneck' + str(unit + 1)):
ratio = expand_multiplier
if unit == 0:
ratio = 1
if configs['stride'][unit] == 1:
x = basic_block(x,
num_outputs=configs['output'][unit],
expand=configs['expand'][unit] //
ratio,
kernel_size=configs['kernel'][unit],
is_SE=configs['SE'][unit],
is_Hswish=configs['activation'][unit])
else:
x = downsample(x,
num_outputs=configs['output'][unit],
expand=configs['expand'][unit] // ratio,
kernel_size=configs['kernel'][unit],
is_SE=configs['SE'][unit],
is_Hswish=configs['activation'][unit])
x = tfslim.conv2d(x,
num_outputs=configs['output'][num_bnecks],
kernel_size=1,
activation_fn=Hswish,
scope='conv' + str(2 + num_bnecks))
with tf.variable_scope('global_pool'):
x = tf.reduce_mean(x, axis=[1, 2])
x = Hswish(x)
x = tfslim.fully_connected(x,
num_outputs=1280,
activation_fn=Hswish,
scope='fc')
x = tfslim.fully_connected(x,
num_outputs=num_classes,
activation_fn=None,
scope='classifier')
return x
def basic_block(x,
num_outputs,
expand,
kernel_size=3,
stride=1,
is_SE=0,
is_Hswish=0):
in_channels = x.shape[3].value
activation = tf.nn.relu6 if not is_Hswish else Hswish
with tf.variable_scope('residual'):
y = tfslim.conv2d(x,
num_outputs=expand,
kernel_size=1,
activation_fn=activation,
scope='conv1x1_before')
y = depthwise_conv(y, kernel=kernel_size, activation_fn=activation)
y = tfslim.conv2d(y,
num_outputs=num_outputs,
kernel_size=1,
activation_fn=None,
scope='conv1x1_after')
if is_SE:
y = SE_block(y)
if in_channels != num_outputs:
x = tfslim.conv2d(x,
num_outputs=num_outputs,
kernel_size=1,
activation_fn=tf.nn.relu6,
scope='shortcut')
y = y + x
return y
def downsample(x,
num_outputs,
expand,
kernel_size=3,
stride=2,
is_SE=0,
is_Hswish=0):
activation = tf.nn.relu6 if not is_Hswish else Hswish
x = tfslim.conv2d(x,
num_outputs=expand,
kernel_size=1,
activation_fn=activation,
scope='conv1x1_before')
x = depthwise_conv(x,
kernel=kernel_size,
stride=stride,
activation_fn=activation)
x = tfslim.conv2d(x,
num_outputs=num_outputs,
kernel_size=1,
activation_fn=None,
scope='conv1x1_after')
if is_SE:
x = SE_block(x)
return x
def Hswish(x):
return x * (tf.nn.relu6(x + 3.0) / 6.0)
@tf.contrib.framework.add_arg_scope
def depthwise_conv(x,
kernel=3,
stride=1,
padding='SAME',
activation_fn=tf.nn.relu6,
normalizer_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
data_format='NHWC',
scope='depthwise_conv'):
with tf.variable_scope(scope):
assert data_format == 'NHWC'
in_channels = x.shape[3].value
W = tf.get_variable('depthwise_weights',
[kernel, kernel, in_channels, 1],
dtype=tf.float32,
initializer=weights_initializer)
x = tf.nn.depthwise_conv2d(x,
W, [1, stride, stride, 1],
padding,
data_format='NHWC')
x = normalizer_fn(
x) if normalizer_fn is not None else x # batch normalization
x = activation_fn(
x) if activation_fn is not None else x # nonlinearity
return x
def SE_block(x):
in_channels = x.shape[3].value
with tf.variable_scope('SE'):
y = tf.reduce_mean(x, axis=[1, 2], name='global_pool')
y = tfslim.fully_connected(y,
num_outputs=in_channels // 16,
activation_fn=tf.nn.relu6,
scope='fc1')
y = tfslim.fully_connected(y,
num_outputs=in_channels,
activation_fn=tf.nn.sigmoid,
scope='fc2')
y = tf.reshape(y, [-1, 1, 1, in_channels])
x = x * y
return x
|
StarcoderdataPython
|
297048
|
"""
Shows how to receive a file over OBEX.
"""
import lightblue
# bind the socket, and advertise an OBEX service
sock = lightblue.socket()
try:
sock.bind(("", 0)) # bind to 0 to bind to a dynamically assigned channel
lightblue.advertise("LightBlue example OBEX service", sock, lightblue.OBEX)
# Receive a file and save it as MyFile.txt.
# This will wait and block until a file is received.
print "Waiting to receive file on channel %d..." % sock.getsockname()[1]
lightblue.obex.recvfile(sock, "MyFile.txt")
finally:
sock.close()
print "Saved received file to MyFile.txt!"
# Please note:
#
# To use a file through this example, the other device must send the file to
# the correct channel. E.g. if this example prints "Waiting to receive file on
# channel 5..." the remote device must send the file specifically to channel 5.
#
# * But what if you can't specify a channel or service?
#
# If you can send a file to a specific channel - e.g. by using
# lightblue.obex.sendfile(), as the send_file.py example does - then you
# should be fine.
#
# But, if you're just using the system's default OBEX file-sending tool on
# the other device (e.g. "Send file..." from the Bluetooth drop-down menu on
# Mac OS X, or "Send ... Via Bluetooth" on Series 60 phones), it may only
# allow you to choose a device to send the file to, without choosing a
# specific channel or service on the device. In this case, the tool is
# probably just choosing the first available OBEX service on the device.
#
# So if you switch off all other related services, this example's service
# should automatically receive all OBEX files. E.g. if you're running this
# example on Mac OS X, go to the System Preferences' Bluetooth panel: on
# Mac OS X 10.4, go to the "Sharing" tab, and uncheck the "On" checkboxes for
# the "Bluetooth File Transfer" and "Bluetooth File Exchange" services.
# On Mac OS X 10.3, go to the "File Exchange" tab, and for "When receiving
# items", select "Refuse all", and uncheck "Allow other devices to browse
# files on this computer".
|
StarcoderdataPython
|
9769821
|
'''
Author : <NAME>
Mail : <EMAIL> @ g<EMAIL>.com
'''
num = float(input("Enter your number :"))
if num >= 80.00:
print("Your grade is A+.")
elif num >= 70.00:
print("Your grade is A.")
elif num >= 60.00:
print("Your grade is A-.")
elif num >= 50.00:
print("Your grade is B.")
elif num >= 40.00:
print("Your grade is C.")
elif num >= 33.00:
print("Your grade is D.")
else:
print("You are fail")
|
StarcoderdataPython
|
12851004
|
"""empty message
Revision ID: 783682226c9b
Revises: <KEY>
Create Date: 2019-10-19 10:07:14.923441
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"prices", "internal_product_id", existing_type=sa.INTEGER(), type_=sa.String(), existing_nullable=True
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"prices", "internal_product_id", existing_type=sa.String(), type_=sa.INTEGER(), existing_nullable=True
)
# ### end Alembic commands ###
|
StarcoderdataPython
|
3408945
|
<reponame>Aquarium1222/Electricity-Forecasting
import pandas as pd
import numpy as np
import torch.utils.data as data
from sklearn.preprocessing import MinMaxScaler
import config
class ElectricDataset(data.Dataset):
def __init__(self, preprocessor):
const = config.Constant
hp = config.Hyperparameter
df = pd.read_csv(const.RESERVE_MARGIN).drop(columns=['日期', '備轉容量率(%)'])
y = preprocessor.preprocessing(np.expand_dims(df['備轉容量(萬瓩)'].to_numpy(), axis=1) * 10, 'y')
x = preprocessor.preprocessing(np.expand_dims(df['備轉容量(萬瓩)'].to_numpy(), axis=1) * 10, 'x')
# x = preprocessor.preprocessing(df.to_numpy(), 'x')
self.__data = []
self.__result = []
for i in range(hp.INPUT_SEQ_LEN, len(x) - (hp.OUTPUT_SEQ_LEN - 1)):
self.__data.append(x[i-hp.INPUT_SEQ_LEN:i])
self.__result.append(y[i:i+hp.OUTPUT_SEQ_LEN])
self.__data = np.array(self.__data)
self.__result = np.array(self.__result)
def __getitem__(self, item):
return self.__data[item], self.__result[item]
def __len__(self):
return len(self.__data)
|
StarcoderdataPython
|
302470
|
<gh_stars>0
from datetime import datetime
from typing import NewType, TypeVar
PythonType = TypeVar('PythonType')
UUID = NewType('UUID', str)
String = NewType('String', str)
Boolean = NewType('Boolean', bool)
Integer = NewType('Integer', int)
Float = NewType('Float', float)
Text = NewType('Text', str)
LongText = NewType('LongText', str)
DateTime = NewType('DateTime', datetime)
DatastoreType = TypeVar('DatastoreType', UUID, String, Boolean, Integer, Float, Text, LongText)
known_definitions = (UUID, Boolean, DateTime, Integer, Float, String, Text,LongText)
|
StarcoderdataPython
|
3236305
|
"""
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sqs.html#SQS.Client.send_message
"""
from pprint import pprint
import boto3
import time
def send_message(queueu_url: str, region_name: str, message: str):
sqs_client = boto3.client("sqs", region_name=region_name)
response = sqs_client.send_message(QueueUrl=queueu_url, MessageBody=message)
return response
def receive_message(queueu_url: str, region_name: str, nr_of_messages: int = 1):
sqs_client = boto3.client("sqs", region_name=region_name)
response = sqs_client.receive_message(
QueueUrl=queueu_url,
MaxNumberOfMessages=nr_of_messages, # max 10, default 1
WaitTimeSeconds=0,
)
return response
def delete_message(queueu_url: str, region_name: str, receipt_handle: str):
sqs_client = boto3.client("sqs", region_name=region_name)
response = sqs_client.delete_message(
QueueUrl=queueu_url,
ReceiptHandle=receipt_handle,
)
return response
if __name__ == "__main__":
queueu_url = "https://eu-central-1.queue.amazonaws.com/717687450252/example-queue"
region_name = "eu-central-1"
print("sending message:")
send_response = send_message(
queueu_url=queueu_url,
region_name=region_name,
message='{"important-key":"important-value-1"}',
)
pprint(send_response)
time.sleep(2)
receive_response = receive_message(
queueu_url=queueu_url, region_name=region_name, nr_of_messages=10
)
send_message_id = send_response["MessageId"]
send_message_md5 = send_response["MD5OfMessageBody"]
print(
f"\n\n\nMessage that was send to the queue has id {send_message_id} and md5 {send_message_md5}.\n\n"
)
to_delete = []
print("Messages in the queue:")
for msg in receive_response["Messages"]:
pprint(msg)
to_delete.append(msg["ReceiptHandle"])
print(f"Deleting messages with the following receipt handles:")
for item in to_delete:
print(item)
for receipt_handle in to_delete:
delete_response = delete_message(
queueu_url=queueu_url,
region_name=region_name,
receipt_handle=receipt_handle,
)
pprint(delete_response)
|
StarcoderdataPython
|
215968
|
<reponame>guillp/binapy
from binapy import binapy_checker, binapy_decoder, binapy_encoder
@binapy_decoder("hex")
def decode_hex(bp: bytes) -> bytes:
return bytes.fromhex(bp.decode())
@binapy_encoder("hex")
def encode_hex(bp: bytes) -> bytes:
return bp.hex().encode()
@binapy_checker("hex")
def is_hex(bp: bytes) -> bool:
return (
len(bp) % 2 == 0
and bp.isalnum()
and set(bp.lower()).issubset(b"abcdef0123456789")
)
|
StarcoderdataPython
|
1928462
|
# -*- coding: utf-8 -*-
# Copyright 2021, CS GROUP - France, http://www.c-s.fr
#
# This file is part of EODAG project
# https://www.github.com/CS-SI/EODAG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from operator import attrgetter
from pathlib import Path
import pkg_resources
from eodag.config import load_config
from eodag.plugins.apis.base import Api
from eodag.plugins.authentication.base import Authentication
from eodag.plugins.base import EODAGPluginMount
from eodag.plugins.crunch.base import Crunch
from eodag.plugins.download.base import Download
from eodag.plugins.search.base import Search
from eodag.utils import GENERIC_PRODUCT_TYPE
from eodag.utils.exceptions import UnsupportedProvider
logger = logging.getLogger("eodag.plugins.manager")
class PluginManager(object):
"""A manager for the plugins.
The role of instances of this class (normally only one instance exists,
created during instantiation of :class:`~eodag.api.core.EODataAccessGateway`.
But nothing is done to enforce this) is to instantiate the plugins
according to the providers configuration, keep track of them in memory, and
manage a cache of plugins already constructed. The providers configuration contains
information such as the name of the provider, the internet endpoint for accessing
it, and the plugins to use to perform defined actions (search, download,
authenticate, crunch).
:param providers_config: The configuration with all information about the providers
supported by the `eodag`
:type providers_config: dict
"""
supported_topics = {"search", "download", "crunch", "auth", "api"}
def __init__(self, providers_config):
self.providers_config = providers_config
# Load all the plugins. This will make all plugin classes of a particular
# type to be available in the base plugin class's 'plugins' attribute.
# For example, by importing module 'eodag.plugins.search.resto', the plugin
# 'RestoSearch' will be available in self.supported_topics['search'].plugins
for topic in self.supported_topics:
# This way of discovering plugins means that anyone can create eodag
# plugins as a separate python package (though it must require eodag), and
# have it discovered as long as they declare an entry point of the type
# 'eodag.plugins.search' for example in its setup script. See the setup
# script of eodag for an example of how to do this.
for entry_point in pkg_resources.iter_entry_points(
"eodag.plugins.{}".format(topic)
):
try:
entry_point.load()
except ImportError:
import traceback as tb
logger.warning("Unable to load plugin: %s.", entry_point.name)
logger.warning("Reason:\n%s", tb.format_exc())
logger.warning(
"Check that the plugin module (%s) is importable",
entry_point.module_name,
)
if entry_point.dist.project_name != "eodag":
# use plugin providers if any
plugin_providers_config_path = [
x
for x in Path(entry_point.dist.location).rglob("providers.yml")
if all(i not in str(x) for i in ["build", ".tox"])
]
if plugin_providers_config_path:
self.providers_config.update(
load_config(plugin_providers_config_path[0])
)
self.product_type_to_provider_config_map = {}
for provider_config in self.providers_config.values():
for product_type in provider_config.products:
product_type_providers = (
self.product_type_to_provider_config_map.setdefault( # noqa
product_type, []
)
)
product_type_providers.append(provider_config)
product_type_providers.sort(key=attrgetter("priority"), reverse=True)
self._built_plugins_cache = {}
def get_search_plugins(self, product_type=None, provider=None):
"""Build and return all the search plugins supporting the given product type,
ordered by highest priority, or the search plugin of the given provider
:param product_type: (Optional) The product type that the constructed plugins
must support
:type product_type: str
:param provider: (Optional) The provider on which to get the search plugin
:type provider: str
:returns: All the plugins supporting the product type, one by one (a generator
object)
:rtype: types.GeneratorType(:class:`~eodag.plugins.search.Search`)
:raises: :class:`~eodag.utils.exceptions.UnsupportedProvider`
:raises: :class:`~eodag.utils.exceptions.UnsupportedProductType`
:raises: StopIteration
.. versionchanged::
1.0
* ``product_type`` is now optional. If no product type is provided,
return all search plugins, ordered by priority
* A new optional parameter ``provider`` which defaults to ``None``, if
we want to build the search plugin of that provider
"""
def get_plugin():
try:
config.search.products = config.products
config.search.priority = config.priority
plugin = self._build_plugin(config.name, config.search, Search)
except AttributeError:
config.api.products = config.products
config.api.priority = config.priority
plugin = self._build_plugin(config.name, config.api, Api)
return plugin
if provider is not None:
try:
config = self.providers_config[provider]
except KeyError:
raise UnsupportedProvider
yield get_plugin()
# Signal the end of iteration as we already have what we wanted
# In a for-loop, this exception is automatically catched
raise StopIteration
if product_type is None:
for config in sorted(
self.providers_config.values(), key=attrgetter("priority"), reverse=True
):
yield get_plugin()
# Signal the end of iteration as we already have what we wanted
# In a for-loop, this exception is automatically catched
raise StopIteration
try:
for config in self.product_type_to_provider_config_map[product_type]:
yield get_plugin()
except KeyError:
logger.info(
"UnsupportedProductType: %s, using generic settings", product_type
)
for config in self.product_type_to_provider_config_map[
GENERIC_PRODUCT_TYPE
]:
yield get_plugin()
def get_download_plugin(self, product):
"""Build and return the download plugin capable of downloading the given
product.
:param product: The product to get a download plugin for
:type product: :class:`~eodag.api.product._product.EOProduct`
:returns: The download plugin capable of downloading the product
:rtype: :class:`~eodag.plugins.download.Download`
"""
plugin_conf = self.providers_config[product.provider]
try:
plugin_conf.download.priority = plugin_conf.priority
plugin = self._build_plugin(
product.provider, plugin_conf.download, Download
)
return plugin
except AttributeError:
plugin_conf.api.priority = plugin_conf.priority
plugin = self._build_plugin(product.provider, plugin_conf.api, Api)
return plugin
def get_auth_plugin(self, provider):
"""Build and return the authentication plugin for the given product_type and
provider
:param provider: The provider for which to get the authentication plugin
:type provider: str
:returns: The Authentication plugin for the provider
:rtype: :class:`~eodag.plugins.authentication.Authentication`
.. versionchanged::
1.0
* ``product_type`` is no longer needed to find the auth plugin
"""
plugin_conf = self.providers_config[provider]
try:
plugin_conf.auth.priority = plugin_conf.priority
plugin = self._build_plugin(provider, plugin_conf.auth, Authentication)
return plugin
except AttributeError:
# We guess the plugin being built is of type Api, therefore no need
# for an Auth plugin.
return None
@staticmethod
def get_crunch_plugin(name, **options):
"""Instantiate a eodag Crunch plugin whom class name is `name`, and configure
it with the `options`
:param name: The name of the Crunch plugin to instantiate
:type name: str
:param options: The configuration parameters of the cruncher
:type options: dict
:return: The cruncher named `name`
:rtype: :class:`~eodag.plugins.crunch.Crunch`
"""
Klass = Crunch.get_plugin_by_class_name(name)
return Klass(options)
def set_priority(self, provider, priority):
"""Set the priority of the given provider
:param provider: The provider which is assigned the priority
:type provider: str
:param priority: The priority to assign to the provider
:type priority: int
"""
# Update the priority in the configurations so that it is taken into account
# when a plugin of this provider is latterly built
for (
product_type,
provider_configs,
) in self.product_type_to_provider_config_map.items():
for config in provider_configs:
if config.name == provider:
config.priority = priority
# Sort the provider configs, taking into account the new priority order
provider_configs.sort(key=attrgetter("priority"), reverse=True)
# Update the priority of already built plugins of the given provider
for provider_name, topic_class in self._built_plugins_cache:
if provider_name == provider:
self._built_plugins_cache[(provider, topic_class)].priority = priority
def _build_plugin(self, provider, plugin_conf, topic_class):
"""Build the plugin of the given topic with the given plugin configuration and
registered as the given provider
:param provider: The provider for which to build the plugin
:type provider: str
:param plugin_conf: The configuration of the plugin to be built
:type plugin_conf: :class:`~eodag.config.PluginConfig`
:param topic_class: The type of plugin to build
:type topic_class: :class:`~eodag.plugin.base.PluginTopic`
:returns: The built plugin
:rtype: :class:`~eodag.plugin.search.Search` or
:class:`~eodag.plugin.download.Download` or
:class:`~eodag.plugin.authentication.Authentication` or
:class:`~eodag.plugin.crunch.Crunch`
"""
cached_instance = self._built_plugins_cache.setdefault(
(provider, topic_class.__name__), None
)
if cached_instance is not None:
return cached_instance
plugin_class = EODAGPluginMount.get_plugin_by_class_name(
topic_class, getattr(plugin_conf, "type")
)
plugin = plugin_class(provider, plugin_conf)
self._built_plugins_cache[(provider, topic_class.__name__)] = plugin
return plugin
|
StarcoderdataPython
|
3317813
|
import os
import sys
from os.path import join, isdir, realpath, exists
from shutil import rmtree
NODE_MODULES = "node_modules"
test = False
repoRoot = sys.argv[1]
rootPath = realpath(repoRoot)
if not exists(rootPath):
print("'{0}' does not exist. Exiting.").format(rootPath)
sys.exit(1)
if len(sys.argv) >= 3 and sys.argv[2] == "-t":
test = True
print("Deleting '{0}' dirs in '{1}'").format(NODE_MODULES, rootPath)
for projectDir in os.listdir(rootPath):
pdPath = realpath(join(rootPath, projectDir))
if isdir(pdPath):
for subDir in os.listdir(pdPath):
sdPath = realpath(join(pdPath, subDir))
if subDir == NODE_MODULES and isdir(sdPath):
if test:
print("[TEST] Deleting {0}").format(sdPath)
else:
print("Deleting {0}").format(sdPath)
rmtree(sdPath,ignore_errors=False)
|
StarcoderdataPython
|
12840566
|
<filename>058.py
from random import randint, choice
from time import sleep
print('_' * 40)
print('Vou pensar em um número entre 0 e 10.\n')
print('PROCESSANDO...')
sleep(3)
print('pronto!')
sleep(0.5)
n = int(input('Em que número eu pensei? '))
print('-' * 80)
sleep(2)
x = randint(0, 10)
cont = 0
while n != x:
if n < x:
n = int(input('EROOOOOOOOOOUUUUUUUUUUU! É um valor maior. Tente novamente: '))
cont += 1
else:
n = int(input('EROOOOOOOOOOUUUUUUUUUUU! É um valor menor. Tente novamente: '))
cont += 1
print(f'Parabéns, você acertou com {cont} jogadas! ')
|
StarcoderdataPython
|
11397487
|
<filename>test_app/forms.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from thecut.ordering.forms import OrderMixin
from django.forms import ModelForm
from .models import OrderingTestModel
class OrderingTestNoOrderFieldForm(OrderMixin, ModelForm):
# A class to provide useful testing of the ordermixin
class Meta:
model = OrderingTestModel
fields = ['id']
class OrderingTestHasOrderFieldForm(OrderMixin, ModelForm):
# A class to provide useful testing of the ordermixin
class Meta:
model = OrderingTestModel
fields = ['id', 'order']
|
StarcoderdataPython
|
3343974
|
import argparse
from utils import process_config
def get_eval_config():
parser = argparse.ArgumentParser("Visual Transformer Evaluation")
# basic config
parser.add_argument("--n-gpu", type=int, default=1, help="number of gpus to use")
parser.add_argument("--model-arch", type=str, default="b16", help='model setting to use', choices=['b16', 'b32', 'l16', 'l32', 'h14'])
parser.add_argument("--checkpoint-path", type=str, default=None, help="model checkpoint to load weights")
parser.add_argument("--image-size", type=int, default=384, help="input image size", choices=[224, 384])
parser.add_argument("--batch-size", type=int, default=32, help="batch size")
parser.add_argument("--num-workers", type=int, default=8, help="number of workers")
parser.add_argument("--data-dir", type=str, default='../data', help='data folder')
parser.add_argument("--dataset", type=str, default='ImageNet', help="dataset for fine-tunning/evaluation")
parser.add_argument("--num-classes", type=int, default=1000, help="number of classes in dataset")
config = parser.parse_args()
# model config
config = eval("get_{}_config".format(config.model_arch))(config)
print_config(config)
return config
def get_train_config():
parser = argparse.ArgumentParser("Visual Transformer Train/Fine-tune")
# basic config
parser.add_argument("--exp-name", type=str, default="ft", help="experiment name")
parser.add_argument("--n-gpu", type=int, default=1, help="number of gpus to use")
parser.add_argument("--tensorboard", default=False, action='store_true', help='flag of turnning on tensorboard')
parser.add_argument("--model-arch", type=str, default="b16", help='model setting to use', choices=['b16', 'b32', 'l16', 'l32', 'h14'])
parser.add_argument("--checkpoint-path", type=str, default=None, help="model checkpoint to load weights")
parser.add_argument("--image-size", type=int, default=384, help="input image size", choices=[224, 384])
parser.add_argument("--vit-image-size", type=int, default=384, help="input image size for ViT", choices=[224, 384])
parser.add_argument("--batch-size", type=int, default=32, help="batch size")
parser.add_argument("--num-workers", type=int, default=8, help="number of workers")
parser.add_argument("--train-steps", type=int, default=10000, help="number of training/fine-tunning steps")
parser.add_argument("--train-epochs", type=int, default=10, help="number of training/fine-tunning steps")
parser.add_argument("--lr", type=float, default=1e-3, help="learning rate")
parser.add_argument("--wd", type=float, default=1e-4, help='weight decay')
parser.add_argument("--warmup-steps", type=int, default=100, help='learning rate warm up steps')
parser.add_argument("--data-dir", type=str, default='../data', help='data folder')
parser.add_argument("--dataset", type=str, default='ImageNet', help="dataset for fine-tunning/evaluation")
parser.add_argument("--num-classes", type=int, default=1000, help="number of classes in dataset")
parser.add_argument("--momentum", type=float, default=0.9, help='momentum')
## hyperparameters below are for oracle training
parser.add_argument("--random-seed", type=int, default=-1, help='Learning rate warm up steps')
parser.add_argument("--classifier", type=str, default="transformer", help='Model setting to use, transformer|resnet|efficientnet')
parser.add_argument("--oracle-loss", type=str, default='ce', help="Oracle loss, options: ce|focal|tcp|steep")
parser.add_argument("--oracle-type", type=str, default='transformer', help="transformer|resnet")
parser.add_argument("--oracle-feat-dim", type=int, default=768, help="Dimension of output feature of oracle backbone, options: 768|2048")
parser.add_argument("--oracle-model-arch", type=str, default="b16", help='Model setting to use', choices=['b16', 'b32', 'l16', 'l32', 'h14'])
parser.add_argument("--oracle-checkpoint-path", type=str, default=None, help="Oracle checkpoint to load weights for initialization")
parser.add_argument("--oracle-pretrained", type=str, default=None, help="Oracle checkpoint to load weights for fine-tunning")
parser.add_argument("--oracle-outdim", type=int, default=1, help="Oracle output dimension")
parser.add_argument('--oracle-class-weight', nargs='*', type=float, help='Class weight')
parser.add_argument('--oracle-loss-hyperparam', nargs='*', type=float, help='Loss hyperparameters')
config = parser.parse_args()
# model config
config = eval("get_{}_config".format(config.model_arch))(config)
process_config(config)
print_config(config)
return config
def get_b16_config(config):
""" ViT-B/16 configuration """
config.patch_size = 16
config.emb_dim = 768
config.mlp_dim = 3072
config.num_heads = 12
config.num_layers = 12
config.attn_dropout_rate = 0.0
config.dropout_rate = 0.1
return config
def get_b32_config(config):
""" ViT-B/32 configuration """
config = get_b16_config(config)
config.patch_size = 32
return config
def get_l16_config(config):
""" ViT-L/16 configuration """
config.patch_size = 16
config.emb_dim = 1024
config.mlp_dim = 4096
config.num_heads = 16
config.num_layers = 24
config.attn_dropout_rate = 0.0
config.dropout_rate = 0.1
return config
def get_l32_config(config):
""" Vit-L/32 configuration """
config = get_l16_config(config)
config.patch_size = 32
return config
def get_h14_config(config):
""" ViT-H/14 configuration """
config.patch_size = 14
config.emb_dim = 1280
config.mlp_dim = 5120
config.num_heads = 16
config.num_layers = 32
config.attn_dropout_rate = 0.0
config.dropout_rate = 0.1
return config
def print_config(config):
message = ''
message += '----------------- Config ---------------\n'
for k, v in sorted(vars(config).items()):
comment = ''
message += '{:>35}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
|
StarcoderdataPython
|
288886
|
import abc
from pydnameth.config.experiment.types import Task
from pydnameth.infrastucture.load.cpg import load_cpg
from pydnameth.infrastucture.load.table import load_table_dict
class LoadStrategy(metaclass=abc.ABCMeta):
@abc.abstractmethod
def load(self, config, configs_child):
pass
class CPGLoadStrategy(LoadStrategy):
def load(self, config, configs_child):
load_cpg(config)
config.base_list = config.cpg_list
config.base_dict = config.cpg_dict
config.base_data = config.cpg_data
for config_child in configs_child:
config_child.base_list = config.base_list
config_child.base_dict = config.base_dict
config_child.base_data = config.base_data
if config.experiment.task == Task.table or config.experiment.task == Task.clock:
for config_child in configs_child:
if config_child.experiment.task == Task.table:
config_child.advanced_data = load_table_dict(config_child)
config_child.advanced_list = config_child.base_list
config_child.advanced_dict = {}
row_id = 0
for item in config_child.advanced_data['item']:
config_child.advanced_dict[item] = row_id
row_id += 1
class AttributesLoadStrategy(LoadStrategy):
def load(self, config, configs_child):
pass
|
StarcoderdataPython
|
107954
|
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# model
model = Model()
i1 = Input("input", "TENSOR_FLOAT32", "{2, 2}")
perms = Input("perms", "TENSOR_INT32", "{0}")
output = Output("output", "TENSOR_FLOAT32", "{2, 2}")
model = model.Operation("TRANSPOSE", i1, perms).To(output)
# Additional data type
quant8 = DataTypeConverter().Identify({
i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
output: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
})
# Instantiate an example
Example({
i1: [1.0, 2.0,
3.0, 4.0],
perms: [],
output: [1.0, 3.0,
2.0, 4.0]
}).AddVariations("relaxed", quant8)
# TRANSPOSE of data type TENSOR_FLOAT32 and TENSOR_QUANT8_ASYMM is introduced in V1_1.
Example.SetVersion("V1_1",
"transpose_v1_2",
"transpose_v1_2_all_inputs_as_internal",
"transpose_v1_2_quant8",
"transpose_v1_2_quant8_all_inputs_as_internal")
# zero-sized input
# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
# Use ROI_ALIGN op to convert into zero-sized feature map.
layout = BoolScalar("layout", False) # NHWC
i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
# TRANSPOSE op with numBatches = 0.
o3 = Output("out", "TENSOR_FLOAT32", "{0, 1, 2, 2}") # out
model = model.Operation("TRANSPOSE", zero_sized, [0, 3, 1, 2]).To(o3)
quant8 = DataTypeConverter().Identify({
p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
})
Example({
i1: [1],
o1: [],
o2: [],
o3: [],
}).AddVariations("relaxed", quant8, "float16")
|
StarcoderdataPython
|
3583362
|
import requests
#api_key='<KEY>'
api_key='<KEY>'
#url = 'http://172.17.0.3:9002//api/external/all-mooring/'+api_key+'/?mooring_specification=private'
url = 'https://mooring-api-dev-oim01.dbca.wa.gov.au/api/external/vessel-create-update/'+api_key+'/'
myobj = {'rego_no': 'D8888','vessel_size': 1.1, 'vessel_draft': 1.2, 'vessel_beam': 1.3, 'vessel_weight' : '1.4',}
x = requests.post(url, data = myobj)
print (x.text)
|
StarcoderdataPython
|
1884658
|
def display_board(board):
"""显示棋盘"""
print("\t{0} | {1} | {2}".format(board[0], board[1], board[2]))
print("\t—-+-—-+-—")
print("\t{0} | {1} | {2}".format(board[3], board[4], board[5]))
print("\t—-+-—-+-—")
print("\t{0} | {1} | {2}".format(board[6], board[7], board[8]))
def legal_moves(board):
"""返回可落子的位置列表"""
moves = [] # 存放的是int类型
for i in range(0, 9):
if board[i] in list("012345678"):
moves.append(i)
return moves
def getPlayerMove(board):
"""询问并确定玩家的选择落子位置,无效位置时重复询问"""
move = 9
while move not in legal_moves(board):
move = int(input("请选择落子位置(0-8):"))
return move
def getComputerMove(board, computerLetter, playerLetter):
"""核心算法:计算人工智能AI的落子位置"""
boardcopy = board.copy()
# 规则一:判断如果某位置落子可以获胜,则选择该位置
for move in legal_moves(boardcopy):
boardcopy[move] = computerLetter
if isWinner(boardcopy):
return move
boardcopy[move] = str(move)
# 规则二:某个位置玩家下一步落子可以获胜,则选择该位置
for move in legal_moves(boardcopy):
boardcopy[move] = playerLetter
if isWinner(boardcopy):
return move
boardcopy[move] = str(move)
# 规则三:按照中心、角、边的顺序选择空的位置
for move in (4, 0, 2, 6, 8, 1, 3, 5, 7):
if move in legal_moves(board):
return move
def isWinner(board):
"""判断所给的棋子是否获胜"""
WAYS_TO_WIN = {(0, 1, 2), (3, 4, 5), (6, 7, 8), (0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8), (2, 4, 6)}
for r in WAYS_TO_WIN:
if board[r[0]] == board[r[1]] == board[r[2]]:
return True
return False
def isTie(board):
"""判断是否平局"""
for i in list("012345678"):
if i in board:
return False
return True
def tic_tac_toe():
"""井字棋"""
board = list("012345678")
playerLetter = input("请选择棋子X或者O(X先走,O后走):")
if playerLetter in ("X", "x"):
turn = "player"
playerLetter = "X"
computerLetter = "O"
else:
turn = "computer"
computerLetter = "X"
playerLetter = "O"
print("{}先走!".format(turn))
while True:
display_board(board)
if turn == 'player':
move = getPlayerMove(board)
board[move] = playerLetter
if isWinner(board):
display_board(board)
print("恭喜玩家获胜!")
break
else:
turn = "computer"
else:
move = getComputerMove(board, computerLetter, playerLetter)
print("计算机人工智能AI落子位置:", move)
board[move] = computerLetter
if isWinner(board):
display_board(board)
print("计算机人工智能AI获胜!")
break
else:
turn = "player"
if isTie(board):
display_board(board)
print('平局!!!')
break
if __name__ == '__main__':
tic_tac_toe()
|
StarcoderdataPython
|
4937867
|
# -*- coding: utf-8 -*-
import csv
import codecs
import decimal
path_pomodone_log = 'pomodone-log.csv'
path_trello_archived = 'Archived trello.csv'
path_trello = 'trello.csv'
path_output = 'output.csv'
rfile_pomodone_log = codecs.open(path_pomodone_log, 'rb', encoding="utf-8")
rfile_trello_archived = codecs.open(
path_trello_archived, 'rb', encoding="big5")
rfile_trello = codecs.open(path_trello, 'rb', encoding="big5")
csv_pomodone_log = csv.DictReader(rfile_pomodone_log, delimiter=',')
csv_trello_archived = csv.DictReader(rfile_trello_archived, delimiter=',')
csv_trello = csv.DictReader(rfile_trello, delimiter=',')
write_output = codecs.open(path_output, 'w', encoding='utf-8')
dict_task = {} # name
dict_time = {} # time spent
dict_count = {} # excution times
dict_label = {} # labels
list_test = []
task_date = ""
task_date_temp = ""
count_date = 0
# read pomodone logs as dictionary
for temp in csv_pomodone_log:
task_id = temp['description'][temp['description'].find('c/') + 2:]
if task_date_temp == str(temp['date']):
pass
else:
task_date_temp = str(temp['date'])
count_date += 1
if len(task_id) == 0:
continue
task_time = (
int(temp['time spent'][:2]) * 60 * 60 +
int(temp['time spent'][3:5]) * 60 +
int(temp['time spent'][7:]))
# task id dictionary
try:
test = dict_task[task_id]
except KeyError:
dict_task[task_id] = task_id
# 時間
try:
test = dict_time[task_id]
task_time_temp = dict_time[task_id]
dict_time[task_id] = task_time_temp + task_time
except KeyError:
dict_time[task_id] = task_time
# 計數
if int(temp['time spent'][:2]) * 60 * 60 > 1500:
counter = 2
else:
counter = 1
try:
test = dict_time[task_id]
task_count_temp = dict_count[task_id]
dict_count[task_id] += counter
except KeyError:
dict_count[task_id] = counter
# 標籤
try:
test = dict_label[task_id]
except KeyError:
dict_label[task_id] = task_id
try:
list_test.index(task_id)
except ValueError:
list_test.append(task_id)
for temp in csv_trello_archived:
task_id = temp['Card URL'][temp['Card URL'].find('c/') + 2:]
if len(task_id) == 0:
continue
try:
test = dict_task[task_id]
task_title = temp['Title'][temp['Title'].find('] ') + 2:]
task_label = temp['Labels']
# 項目
dict_task[task_id] = task_title
dict_label[task_id] = task_label
except KeyError:
pass
for temp in csv_trello:
task_id = temp['Card URL'][temp['Card URL'].find('c/') + 2:]
try:
test = dict_task[task_id]
task_title = temp['Title']
task_label = temp['Labels']
# 項目
dict_task[task_id] = task_title
dict_label[task_id] = task_label
except KeyError:
pass
write_output.write('id,工項,總工時(秒),總工時(分),總工時(時),總工時(日),\
佔用工作日,總工作日佔比,執行次數,日均執行,標籤\n')
def wfile_output(task_id, task, time, count, labels):
write_output.write('"' + str(task_id) + '",') # id
write_output.write('"' + str(task) + '",') # 工項
write_output.write('"' + str(time) + '",') # 總工時(秒)
write_output.write('"' + str(decimal.Decimal(
time / 60).quantize(decimal.Decimal('0.01'))) + '",') # 總工時(分)
write_output.write('"' + str(
decimal.Decimal(time / 60 / 60).quantize(
decimal.Decimal('0.01'))) + '",') # 總工時(時)
write_output.write('"' + str(
decimal.Decimal(time / 60 / 60 / 24).quantize(
decimal.Decimal('0.01'))) + '",') # 總工時(日)
write_output.write('"' + str(decimal.Decimal(
time / 60 / time_avg_mins).quantize(
decimal.Decimal('0.01'))) + '",') # 佔用工作日
write_output.write('"' + str(decimal.Decimal(
time / 60 / time_avg_mins / count_date).quantize(
decimal.Decimal('0.0001'))) + '",') # 總工作日佔比
write_output.write('"' + str(count) + '",') # 執行次數
write_output.write('"' + str(
decimal.Decimal(count / count_date).quantize(
decimal.Decimal('0.01'))) + '",') # 日均執行
write_output.write('"' + str(labels) + '"\n') # 標籤
time_total_secs = 0
for temp in dict_task:
time_total_secs += dict_time[temp]
time_avg_mins = time_total_secs / 60 / count_date
time_avg_hours = time_total_secs / 60 / 60 / count_date
for temp in dict_task:
wfile_output(
temp,
dict_task[temp],
dict_time[temp],
dict_count[temp],
dict_label[temp])
|
StarcoderdataPython
|
1878168
|
import sys
import os.path
# sys.path.insert(0, os.path.abspath("./simple-dnn"))
import tensorflow as tf
import numpy as np
import tensorflow.contrib.slim as slim
import scipy.misc
import time
class BaseGAN(object):
""" Base class for Generative Adversarial Network implementation.
"""
def __init__(self,
x_dims, x_ch, y_dim,
generator=None, # Generator Net
discriminator=None, # Discriminator Net
x_reshape=None,
x_scale=None,
x_inverse_scale=None,
z_dim=100,
d_optimizer=tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5),
g_optimizer=tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5),
d_label_smooth=0.75,
batch_size=128, iterations=2000,
display_step=100, save_step=1000,
oracle=None,
graph=None, sess=None,
sample_writer=None, # Object of SampleWriter class.
model_directory=None, #Directory to save trained model to.
):
"""
Args:
x_dims - list; the width of and hight of image x.
x_ch - int; the number of channels(depth) of input x.
y_dim - int; number of data labeles.
z_dim - int; number of units for the latent variable z.
generator - an callable or an object with __call__ method; for creating G network.
discriminator - an callable or an object with __call__ method; for creating D network.
x_reshape - a callable; for reshaping input. It is advised to rescale input between [-1, 1]
x_scale - a callable; for rescaling input to range between [-1, 1].
x_inverse_scale - callable; for reversing the scale from [-1, 1] to original input range.
d_optimizer - optimizer for D network.
g_optimizer - optimizer for G network.
d_label_smooth - Desired probability for real class, to enable one side label smotiong
as suggensted in http://papers.nips.cc/paper/6124-improved-techniques-for-training-gans
batch_size - training batch size,
iterations - number of training iterations.
display_step - intervals to display training stats.
save_step - intervals to save trained model.
oracle - If used the oracle is a callable for measuring the quality of generated samples.
It should be a callable or a class with __call__ function implemented.
the callable should take (X, reformat=False) as input an return a single float value.
graph - The tensorflow graph to use. If None new graph is created.
sess - The tenserflow session to use. If None new session is created.
sample_writer - Object of SampleWriter class.
model_directory - model saving directory. Defaults is None.
"""
# Data Config
self.x_dims = x_dims
self.x_ch = x_ch
self.y_dim = y_dim
self.z_size = z_dim
self.x_reshape = x_reshape
if x_scale is not None or x_inverse_scale is not None:
# If one is not none the both should be not none
assert x_scale is not None and x_inverse_scale is not None
self.x_scale = x_scale
self.x_inverse_scale = x_inverse_scale
######################## Generator and Discriminator Networks
self.generator = generator
self.discriminator = discriminator
######################## Training config
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.d_label_smooth = d_label_smooth
self.iterations = iterations
self.batch_size = batch_size
self.display_step = display_step
self.save_step = save_step
self.sample_writer = sample_writer
self.model_directory = model_directory
self.oracle = oracle
if graph:
self.graph = graph
else:
self.graph = tf.Graph()
with self.graph.as_default():
self.build_model()
if sess:
self.sess = sess
else:
self.sess = tf.Session()
# To save and restore checkpoints.
self.saver = tf.train.Saver()
def build_model(self):
pass
def fit(self, X, y=None, val_x=None, val_y=None):
pass
def _iter_stats(self, i, start_time, gLoss, dLoss,
xs=None, ys=None, zs=None, ys_fake=None,
val_x=None, val_y=None):
pass
def generate(self, ys=None, n_samples=None):
pass
def x_reformat(self, xs):
""" Rescale and reshape x if x_scale and x_reshape functions are provided.
"""
if self.x_scale is not None:
xs = self.x_scale(xs)
if self.x_reshape is not None:
xs = self.x_reshape(xs)
return xs
def _save_samples(self, i):
if self.sample_writer is None:
return
n_samples = 36
generated_x = self.generate(n_samples)
self.sample_writer.write(generated_x, str(i))
def _next_batch(self, x, y):
start_index = np.random.randint(0, x.shape[0] - self.batch_size)
return x[start_index:(start_index + self.batch_size)], \
y[start_index:(start_index + self.batch_size)]
def _accuracy(self, val_x, val_y, reformat=True):
pred_y = self.predict(val_x, reformat=reformat)
return (np.argmax(val_y, axis=1) == pred_y).mean()
def predict(self, X, reformat=True):
probs = self.predict_prob(X, reformat=reformat)
if self.y_dim == 1:
pred = np.zeros_like(probs)
pred[probs > 0.5] = 1
else:
pred = np.argmax(probs, axis=1)
return pred
def predict_prob(self, X, reformat=True):
self.discriminator.is_training = False
probs_list = []
with self.graph.as_default():
for i in range(0, X.shape[0], self.batch_size):
start = i
end = min(i+self.batch_size, X.shape[0])
if reformat:
xs = self.x_reformat(X[start:end])
else:
xs = X[start:end]
if self.y_dim == 1:
probs_list.append(self.sess.run(tf.sigmoid(logits=self.Dx), feed_dict={self.real_in:xs}))
else:
probs_list.append(self.sess.run(tf.nn.softmax(logits=self.Dx), feed_dict={self.real_in:xs}))
self.discriminator.is_training = True
return np.vstack(probs_list)
def save_model(self, model_file_name):
if self.model_directory is None:
return 'ERROR: Model directory is None'
if not os.path.exists(self.model_directory):
os.makedirs(self.model_directory)
return self.saver.save(self.sess, os.path.join(self.model_directory, model_file_name))
def restore_model(self, model_file):
with self.graph.as_default():
self.saver.restore(self.sess, model_file)
def generate(self, n_samples=36, ys=None):
""" Generate samples.
:param n_samples: number of samples to generate if ys is not specified.
"""
if ys is not None:
n_samples = ys.shape[0]
self.discriminator.is_training = False
generated_x_list = []
batch = self.batch_size
for i in range(0, n_samples, batch):
start = i
end = min(i+batch, n_samples)
zs = np.random.uniform(-1.0,1.0,
size=[end-start,self.z_size]).astype(np.float32)
if self.conditional:
if ys is None:
gen_ys = np.random.multinomial(1, [1.0 / float(self.y_dim+1)]*(self.y_dim+1), end-start)
else:
gen_ys = np.concatenate((ys[start:end], np.zeros((end-start, 1))), axis=1)
generated_x_list.append(self.sess.run(self.Gz, feed_dict={self.z_in:zs,
self.real_label:gen_ys}))
else:
generated_x_list.append(self.sess.run(self.Gz, feed_dict={self.z_in:zs}))
generated_x = np.vstack(generated_x_list)
self.discriminator.is_training = True
return self.x_inverse_scale(generated_x) if self.x_inverse_scale is not None \
else generated_x
class MultiClassGAN(BaseGAN):
""" Implementation of Deep Convolutional Conditional Generative Adversarial Network.
"""
def __init__(self,
x_dims, x_ch, y_dim,
generator=None, # Generator Net
discriminator=None, # Discriminator Net
x_reshape=None,
x_scale=None,
x_inverse_scale=None,
z_dim=100,
d_optimizer=tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5),
g_optimizer=tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5),
g_loss_fn='default',
g_target=1.0,
d_label_smooth=0.75,
sigmoid_alpha=10,
l2_penalty=0.01,
conditional=False,
batch_size=128, iterations=2000,
display_step=100, save_step=1000,
oracle=None,
graph=None, sess=None,
sample_writer=None, #Directory to save sample images from generator in.
model_directory=None, #Directory to save trained model to.
):
"""
Args:
x_dims - list; the width of and hight of image x.
x_ch - int; the number of channels(depth) of input x.
y_dim - int; number of data labeles.
z_dim - int; number of units for the latent variable z.
generator - an callable or an object with __call__ method; for creating G network.
discriminator - an callable or an object with __call__ method; for creating D network.
x_reshape - a callable; for reshaping input. It is advised to rescale input between [-1, 1]
x_scale - a callable; for rescaling input to range between [-1, 1].
x_inverse_scale - callable; for reversing the scale from [-1, 1] to original input range.
d_optimizer - optimizer for D network.
g_optimizer - optimizer for G network.
g_loss_fn - type of loss function used for G. Options include:
['default', 'smoothed', 'sigmoid', 'feature_matching',
'feature_default', 'l2_default', 'least_square']
g_target - the target probability when g_loss_fn='smoothed'.
For Generated instances. For G smoting set to value < 1.0.
d_label_smooth - Desired probability for real class, to enable one side label smotiong
as suggensted in http://papers.nips.cc/paper/6124-improved-techniques-for-training-gans
sigmoid_alpha - alpha values when g_loss_fn='sigmoid'
l2_penalty - l2 penalty coefficient when g_loss_fn='l2_default'
batch_size - training batch size,
iterations - number of training iterations.
display_step - intervals to display training stats.
save_step - intervals to save trained model.
oracle - If used the oracle is a callable for measuring the quality of generated samples.
It should be a callable or a class with __call__ function implemented.
the callable should take (X, reformat=False) as input an return a single float value.
graph - The tensorflow graph to use. If None new graph is created.
sess - The tenserflow session to use. If None new session is created.
sample_writer - Object of SampleWriter class.
model_directory - model saving directory. Defaults is None.
"""
######################## Training config
assert g_loss_fn in ['default', 'smoothed', 'sigmoid', 'feature_matching',
'feature_default', 'l2_default', 'least_square']
self.g_loss_fn = g_loss_fn
if self.g_loss_fn == 'feature_matching' or self.g_loss_fn == 'feature_default':
assert matching_layer == -1 or matching_layer < len(conv_units)
self.matching_layer = matching_layer if matching_layer != -1 else len(conv_units) - 1
self.sigmoid_alpha = sigmoid_alpha
self.g_target = g_target
self.l2_penalty = l2_penalty
self.conditional = conditional
super(MultiClassGAN, self).__init__(
x_dims, x_ch, y_dim, generator=generator, discriminator=discriminator, z_dim=z_dim,
x_reshape=x_reshape, x_scale=x_scale, x_inverse_scale=x_inverse_scale,
d_optimizer=d_optimizer, g_optimizer=g_optimizer, d_label_smooth=d_label_smooth,
batch_size=batch_size, iterations=iterations, display_step=display_step,
save_step=save_step, oracle=oracle, graph=graph, sess=sess,
sample_writer=sample_writer, model_directory=model_directory)
@staticmethod
def sigmoid_cost(input, alpha):
exp = tf.exp(-alpha * (input - 0.5))
return tf.divide(1.0, 1 + exp)
def build_model(self):
with self.graph.as_default():
# Placeholders
self.z_in = tf.placeholder(name='z_in', shape=[None,self.z_size], dtype=tf.float32) #Random vector
self.real_in = tf.placeholder(name='real_in',
shape=[None] + self.x_dims + [self.x_ch], dtype=tf.float32) #Real images
self.real_label = tf.placeholder(name='real_label',
shape=[None, self.y_dim + 1], dtype=tf.float32) #real image labels
self.fake_label = tf.placeholder(name='fake_label',
shape=[None, self.y_dim + 1], dtype=tf.float32) #fake image labels
# One side D label smoothing
self.real_label = self.real_label * self.d_label_smooth
self.Gz = self.generator(self.z_in, ys=self.real_label if self.conditional else None) # Condition generator on real labels
self.Dx, fm_layer_x = self.discriminator(
self.real_in, logits=True,
matching_layer=self.matching_layer if self.g_loss_fn == 'feature_matching' else None)
self.Dg, fm_layer_g = self.discriminator(
self.Gz, reuse=True, logits=True,
matching_layer=self.matching_layer if self.g_loss_fn == 'feature_matching' else None)
Dx_softmax = tf.nn.softmax(logits=self.Dx)
Dg_softmax = tf.nn.softmax(logits=self.Dg)
# d_loss and g_loss together define the optimization objective of the GAN.
if self.g_loss_fn == 'least_square':
ls_dx = 0.5 * tf.reduce_mean(tf.square(tf.subtract(Dx_softmax, self.real_label)))
ls_dg = 0.5 * tf.reduce_mean(tf.square(tf.subtract(Dg_softmax, self.fake_label)))
self.d_loss = ls_dx + ls_dg
else:
d_loss_real = tf.nn.softmax_cross_entropy_with_logits(logits=self.Dx,
labels=self.real_label)
d_loss_fake = tf.nn.softmax_cross_entropy_with_logits(logits=self.Dg,
labels=self.fake_label)
self.d_loss = tf.reduce_mean(d_loss_real + d_loss_fake)
tvars = tf.trainable_variables()
d_vars = [var for var in tvars if 'd_' in var.name]
g_vars = [var for var in tvars if 'g_' in var.name]
if self.g_loss_fn == 'smoothed':
self.g_loss = -tf.reduce_mean(
(1 - self.g_target) * tf.log(Dg_softmax[:, -1]) +
self.g_target * tf.log(1. - Dg_softmax[:, -1])
)
elif self.g_loss_fn == 'sigmoid':
self.g_loss = -tf.reduce_mean(tf.log(1 - MultiClassGAN.sigmoid_cost(
Dg_softmax[:, -1], self.sigmoid_alpha)))
elif self.g_loss_fn == 'feature_matching':
self.g_loss = tf.reduce_mean(tf.square(tf.subtract(fm_layer_x, fm_layer_g)))
elif self.g_loss_fn == 'feature_default':
self.g_loss = -tf.reduce_mean(tf.log(1. - Dg_softmax[:, -1])) + \
tf.reduce_mean(tf.square(tf.subtract(fm_layer_x, fm_layer_g)))
elif self.g_loss_fn == 'l2_default':
g_l2_loss = 0.
for w in g_vars:
g_l2_loss += (self.l2_penalty * tf.reduce_mean(tf.nn.l2_loss(w)))
self.g_loss = -tf.reduce_mean(tf.log(1. - Dg_softmax[:, -1])) + g_l2_loss
elif self.g_loss_fn == 'least_square': # based on https://arxiv.org/abs/1611.04076
self.g_loss = 0.5 * tf.reduce_mean(tf.square((1. - Dg_softmax[:, -1]) - 1))
else:
self.g_loss = -tf.reduce_mean(tf.log(1. - Dg_softmax[:, -1]))
# Compute gradients
trainerD = self.d_optimizer
trainerG = self.g_optimizer
d_grads = trainerD.compute_gradients(self.d_loss, d_vars) #Only update the weights for the discriminator network.
g_grads = trainerG.compute_gradients(self.g_loss, g_vars) #Only update the weights for the generator network.
## For Debuging
d_grads_decomposed, _ = list(zip(*d_grads))
g_grads_decomposed, _ = list(zip(*g_grads))
self.d_grad_norm = tf.global_norm(d_grads_decomposed)
self.g_grad_norm = tf.global_norm(g_grads_decomposed)
self.d_w_norm = tf.global_norm(d_vars)
self.g_w_norm = tf.global_norm(g_vars)
##
self.update_D = trainerD.apply_gradients(d_grads)
self.update_G = trainerG.apply_gradients(g_grads)
def _iter_stats(self, i, start_time, gLoss, dLoss,
xs=None, ys=None, zs=None, ys_fake=None,
val_x=None, val_y=None):
d_grad_norm, g_grad_norm, oracle_x, d_w_norm, g_w_norm = self.sess.run(
(self.d_grad_norm, self.g_grad_norm, self.Gz, self.d_w_norm, self.g_w_norm),
feed_dict={self.z_in:zs, self.real_in:xs,
self.real_label:ys, self.fake_label:ys_fake})
tr_acc = None
if xs is not None and ys is not None and ys_fake is not None:
tr_x = np.concatenate((xs, oracle_x), axis=0)
tr_y = np.concatenate((ys, ys_fake), axis=0)
tr_acc = self._accuracy(tr_x, tr_y, reformat=False)
v_acc = None
if val_x is not None and val_y is not None:
v_acc = self._accuracy(val_x, val_y)
oracle_acc = None
if self.oracle is not None:
oracle_acc = self.oracle(oracle_x)
if i == 0:
print('{0:5}| {1:6}| {2:5}| {3:4}| {4:6}| {5:6}| {6:6}| {7:5}| {8:4}| {9:6}| {10:6}'.format(
'i', 'GLOSS', 'DLOSS', 'TIME', 'GGRAD', 'DGRAD', 'TR_ACC','V_ACC', 'ORA', 'DW', 'GW'))
print('{0:5}| {1:5.3}| {2:5.3}| {3:4}s| {4}| {5}| {6}| {7}| {8}| {9}| {10}'.format(
i, gLoss, dLoss, int(time.time()-start_time),
' ' if g_grad_norm is None else '{:6.4}'.format(g_grad_norm),
' ' if d_grad_norm is None else '{:6.4}'.format(d_grad_norm),
' ' if tr_acc is None else '{:6.3}'.format(tr_acc),
' ' if v_acc is None else '{:5.3}'.format(v_acc),
' ' if oracle_acc is None else '{:4.2}'.format(oracle_acc),
' ' if d_w_norm is None else '{:6.4}'.format(d_w_norm),
' ' if g_w_norm is None else '{:6.4}'.format(g_w_norm)))
def fit(self, X, y=None, val_x=None, val_y=None):
start = time.time()
self.discriminator.is_training = True
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
for i in range(self.iterations):
zs = np.random.uniform(-1.0, 1.0,
size=[self.batch_size, self.z_size]).astype(np.float32)
xs, ys = self._next_batch(X, y)
xs = self.x_reformat(xs)
# Create space for the fake class label for the real data labels
ys = np.concatenate((ys, np.zeros_like(ys[:,0])[:,None]), axis=1)
# Create the labels for the generated data.
ys_fake = np.zeros_like(ys)
ys_fake[:,-1] = 1
_, dLoss = self.sess.run(
[self.update_D, self.d_loss],
feed_dict={self.z_in:zs, self.real_in:xs,
self.real_label:ys, self.fake_label:ys_fake})
_, gLoss = self.sess.run(
[self.update_G, self.g_loss],
feed_dict={self.z_in:zs, self.real_in:xs, self.real_label:ys})
if i % self.display_step == 0:
self._iter_stats(i, start, gLoss, dLoss,
xs=xs, ys=ys, zs=zs, ys_fake=ys_fake,
val_x=val_x, val_y=val_y)
self._save_samples(i)
if i % self.save_step == 0 and i != 0 and self.model_directory is not None:
self.save_model('model-'+str(i)+'.cptk')
print("Saved Model")
self._iter_stats(i, start, gLoss, dLoss,
xs=xs, ys=ys, zs=zs, ys_fake=ys_fake,
val_x=val_x, val_y=val_y)
self._save_samples(i)
if self.model_directory is not None:
self.save_model('model-'+str(i)+'.cptk')
print("Saved Model")
self.discriminator.is_training = False
class FlatGAN(BaseGAN):
""" Implementation of Deep Convolutional Conditional Generative Adversarial Network.
"""
def __init__(self,
x_dims, x_ch, y_dim,
generator=None, # Generator Net
discriminator=None, # Discriminator Net
x_reshape=None,
x_scale=None,
x_inverse_scale=None,
z_dim=100,
d_optimizer=tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5),
g_optimizer=tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5),
d_label_smooth=1.,
batch_size=128, iterations=2000,
display_step=100, save_step=1000,
d_iter=1, # number of discriminator update for each generator update
conditional=False,
oracle=None,
graph=None, sess=None,
sample_writer=None, #Directory to save sample images from generator in.
model_directory=None, #Directory to save trained model to.
):
######################## Training config
self.d_iter = d_iter
self.conditional = conditional
super(FlatGAN, self).__init__(
x_dims, x_ch, y_dim, generator=generator, discriminator=discriminator, z_dim=z_dim,
x_reshape=x_reshape, x_scale=x_scale, x_inverse_scale=x_inverse_scale,
d_optimizer=d_optimizer, g_optimizer=g_optimizer, d_label_smooth=d_label_smooth,
batch_size=batch_size, iterations=iterations, display_step=display_step,
save_step=save_step, oracle=oracle, graph=graph, sess=sess,
sample_writer=sample_writer, model_directory=model_directory)
def build_model(self):
with self.graph.as_default():
n_features = 1
for dim in self.x_dims:
n_features *= dim
n_features *= self.x_ch
# Placeholders
self.z_in = tf.placeholder(shape=[None,self.z_size], dtype=tf.float32)
self.real_in = tf.placeholder(
shape=[None, n_features], dtype=tf.float32) #Real samples
self.real_label = tf.placeholder(
shape=[None, self.y_dim + (0 if self.y_dim == 1 else 1)], dtype=tf.float32) #real sample labels
self.fake_label = tf.placeholder(
shape=[None, self.y_dim + (0 if self.y_dim == 1 else 1)], dtype=tf.float32) #fake sample labels
# One side D label smoothing
self.real_label = self.real_label * self.d_label_smooth
# Condition generator on real labels
self.Gz = self.generator(self.z_in, ys=self.real_label if self.conditional else None)
self.Dx, _ = self.discriminator(self.real_in, logits=True)
self.Dg, _ = self.discriminator(self.Gz, reuse=True, logits=True)
if self.y_dim == 1:
Dg_softmax = tf.sigmoid(self.Dg)
else:
Dg_softmax = tf.nn.softmax(logits=self.Dg)
# D Loss
d_loss_real = tf.nn.softmax_cross_entropy_with_logits(logits=self.Dx,
labels=self.real_label)
d_loss_fake = tf.nn.softmax_cross_entropy_with_logits(logits=self.Dg,
labels=self.fake_label)
self.d_loss = tf.reduce_mean(d_loss_real + d_loss_fake)
# G Loss
if self.y_dim == 1:
self.g_loss = -tf.reduce_mean(tf.log(Dg_softmax))
else:
self.g_loss = -tf.reduce_mean(tf.log(1. - Dg_softmax[:, -1]))
tvars = tf.trainable_variables()
d_vars = [var for var in tvars if 'd_' in var.name]
g_vars = [var for var in tvars if 'g_' in var.name]
# Compute gradients
trainerD = self.d_optimizer
trainerG = self.g_optimizer
d_grads = trainerD.compute_gradients(self.d_loss, d_vars) #Only update the weights for the discriminator network.
g_grads = trainerG.compute_gradients(self.g_loss, g_vars) #Only update the weights for the generator network.
## For Debuging
d_grads_decomposed, _ = list(zip(*d_grads))
g_grads_decomposed, _ = list(zip(*g_grads))
self.d_grad_norm = tf.global_norm(d_grads_decomposed)
self.g_grad_norm = tf.global_norm(g_grads_decomposed)
self.d_w_norm = tf.global_norm(d_vars)
self.g_w_norm = tf.global_norm(g_vars)
##
self.update_D = trainerD.apply_gradients(d_grads)
self.update_G = trainerG.apply_gradients(g_grads)
def _iter_stats(self, i, start_time, gLoss, dLoss,
xs=None, ys=None, zs=None, ys_fake=None,
val_x=None, val_y=None):
d_grad_norm, g_grad_norm, oracle_x, d_w_norm, g_w_norm = self.sess.run(
(self.d_grad_norm, self.g_grad_norm, self.Gz, self.d_w_norm, self.g_w_norm),
feed_dict={self.z_in:zs, self.real_in:xs,
self.real_label:ys, self.fake_label:ys_fake})
tr_acc = None
if xs is not None and ys is not None and ys_fake is not None:
tr_x = np.concatenate((xs, oracle_x), axis=0)
tr_y = np.concatenate((ys, ys_fake), axis=0)
tr_acc = self._accuracy(tr_x, tr_y, reformat=False)
v_acc = None
if val_x is not None and val_y is not None:
v_acc = self._accuracy(val_x, val_y)
oracle_acc = None
if self.oracle is not None:
oracle_acc = self.oracle(oracle_x)
if i == 0:
print ('{0:5}| {1:6}| {2:5}| {3:4}| {4:6}| {5:6}| {6:6}| {7:5}| {8:4}| {9:6}| {10:6}'.format(
'i', 'GLOSS', 'DLOSS', 'TIME', 'GGRAD', 'DGRAD', 'TR_ACC','V_ACC', 'ORA', 'DW', 'GW'))
print('{0:5}| {1:5.3}| {2:5.3}| {3:4}s| {4}| {5}| {6}| {7}| {8}| {9}| {10}'.format(
i, gLoss, dLoss, int(time.time()-start_time),
' ' if g_grad_norm is None else '{:6.4}'.format(g_grad_norm),
' ' if d_grad_norm is None else '{:6.4}'.format(d_grad_norm),
' ' if tr_acc is None else '{:6.3}'.format(tr_acc),
' ' if v_acc is None else '{:5.3}'.format(v_acc),
' ' if oracle_acc is None else '{:4.2}'.format(oracle_acc),
' ' if d_w_norm is None else '{:6.4}'.format(d_w_norm),
' ' if g_w_norm is None else '{:6.4}'.format(g_w_norm)))
def fit(self, X, y=None, val_x=None, val_y=None):
start = time.time()
self.discriminator.is_training = True
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
for i in range(self.iterations):
for j in range(self.d_iter):
zs = np.random.uniform(-1.0, 1.0,
size=[self.batch_size, self.z_size]).astype(np.float32)
xs, ys = self._next_batch(X, y)
xs = self.x_reformat(xs)
if self.y_dim != 1:
# Create space for the fake class label for the real data labels
ys = np.concatenate((ys, np.zeros_like(ys[:,0])[:,None]), axis=1)
# Create the labels for the generated data.
ys_fake = np.zeros_like(ys)
if self.y_dim != 1:
ys_fake[:,-1] = 1
_, dLoss = self.sess.run(
[self.update_D, self.d_loss],
feed_dict={self.z_in:zs, self.real_in:xs,
self.real_label:ys, self.fake_label:ys_fake})
_, gLoss = self.sess.run(
[self.update_G, self.g_loss],
feed_dict={self.z_in:zs, self.real_in:xs, self.real_label:ys})
if i % self.display_step == 0:
self._iter_stats(i, start, gLoss, dLoss,
xs=xs, ys=ys, zs=zs, ys_fake=ys_fake,
val_x=val_x, val_y=val_y)
self._save_samples(i)
if i % self.save_step == 0 and i != 0 and self.model_directory is not None:
self.save_model('model-'+str(i)+'.cptk')
print("Saved Model")
self._iter_stats(i, start, gLoss, dLoss,
xs=xs, ys=ys, zs=zs, ys_fake=ys_fake,
val_x=val_x, val_y=val_y)
self._save_samples(i)
if self.model_directory is not None:
self.save_model('model-'+str(i)+'.cptk')
print("Saved Model")
self.discriminator.is_training = False
|
StarcoderdataPython
|
6523447
|
from django.core.cache import cache
def get_cache_key(document, revision):
cache_key = "discussion_length_{}_{}".format(document.pk, revision)
return cache_key
def get_discussion_length(revision):
"""Get the number of remarkes on a revision.
This is a helper method to return a cached value. Settings the cache must
be done elsewhere. (Currently in discussion/signals.py/update_cache.)
"""
cache_key = get_cache_key(revision.document, revision.revision)
length = cache.get(cache_key, 0)
return length
|
StarcoderdataPython
|
3543450
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from itertools import product
from unittest.mock import MagicMock
import numpy as np
from ax.models.model_utils import best_observed_point, check_duplicate
from ax.utils.common.testutils import TestCase
class ModelUtilsTest(TestCase):
def setUp(self):
pass
def testBestObservedPoint(self):
model = MagicMock()
X1 = np.array(list(product(np.arange(0.0, 10.0), np.arange(0.0, 10.0))))
X2 = np.array(list(product(np.arange(5.0, 15.0), np.arange(5.0, 15.0))))
# Overlap of 5x5=25 points
X3 = np.array(list(product(np.arange(20.0, 30.0), np.arange(20.0, 30.0))))
# X3 not used in objective or constraints
model.Xs = [X1, X2, X3]
bounds = [(0.0, 8.0), (0.0, 8.0)] # Filters to 4x4=16 points
fixed_features = {1: 6.0} # Filters to 4 points
linear_constraints = (
np.array([[2.0, 2.0], [0.0, 1.0]]),
np.array([[27.0], [7.0]]),
)
# Filters to 3
objective_weights = np.array([-1.0, 1.0, 0.0])
outcome_constraints = (
np.array([[0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]),
np.array([[10.0], [24.0]]),
)
# f and cov constructed to give objectives [0, 4, 6] and pfeas [1, 0.5, 0.25]
f = np.array([[1.0, 1.0, -1.0], [6.0, 10.0, -1.0], [5.0, 11.0, -1.0]])
cov = np.tile(np.diag([1, 1, 1]), (3, 1, 1))
model.predict.return_value = (f, cov)
# Test with defaults
xbest = best_observed_point(
model=model,
bounds=bounds,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
)
X_obs = model.predict.mock_calls[0][1][0]
self.assertEqual(X_obs.shape, (3, 2))
self.assertTrue(np.array_equal(X_obs[1, :], xbest)) # 1 should be best
# Test with specified utility baseline
xbest = best_observed_point(
model=model,
bounds=bounds,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
options={"utility_baseline": 4.0},
)
X_obs = model.predict.mock_calls[1][1][0]
self.assertEqual(X_obs.shape, (3, 2))
self.assertTrue(np.array_equal(X_obs[2, :], xbest)) # 2 should be best
# Test with feasibility threshold
xbest = best_observed_point(
model=model,
bounds=bounds,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
options={"best_point_method": "feasible_threshold"},
)
X_obs = model.predict.mock_calls[2][1][0]
self.assertEqual(X_obs.shape, (3, 2))
self.assertTrue(np.array_equal(X_obs[0, :], xbest)) # 0 should be best
# Parameter infeasible
xbest = best_observed_point(
model=model,
bounds=bounds,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
linear_constraints=linear_constraints,
fixed_features={1: 100},
options={"best_point_method": "feasible_threshold"},
)
self.assertIsNone(xbest)
# Outcome infeasible
xbest = best_observed_point(
model=model,
bounds=bounds,
objective_weights=objective_weights,
outcome_constraints=(np.array([[1.0, 0.0, 0.0]]), np.array([[-100.0]])),
linear_constraints=linear_constraints,
fixed_features=fixed_features,
options={"best_point_method": "feasible_threshold"},
)
self.assertIsNone(xbest)
# No objective.
with self.assertRaises(ValueError):
xbest = best_observed_point(
model=model,
bounds=bounds,
objective_weights=np.zeros(3),
outcome_constraints=outcome_constraints,
linear_constraints=linear_constraints,
fixed_features={1: 100},
options={"method": "feasible_threshold"},
)
with self.assertRaises(ValueError):
delattr(model, "Xs")
xbest = best_observed_point(
model=model,
bounds=bounds,
objective_weights=np.zeros(3),
outcome_constraints=outcome_constraints,
linear_constraints=linear_constraints,
fixed_features={1: 100},
options={"method": "feasible_threshold"},
)
def testCheckDuplicate(self):
duplicate_point = np.array([0, 1])
not_duplicate_point = np.array([9, 9])
points = np.array([[0, 1], [0, 2], [0, 1]])
self.assertTrue(check_duplicate(duplicate_point, points))
self.assertFalse(check_duplicate(not_duplicate_point, points))
|
StarcoderdataPython
|
11273721
|
<gh_stars>0
"""
This module contains constants representing the kinds of user that can be logged in, based on their roles and permissions.
"""
from .role_kinds import ADMIN # noqa F401
from .role_kinds import ASSIGNABLE_COACH # noqa F401
from .role_kinds import COACH # noqa F401
LEARNER = 'learner'
SUPERUSER = 'superuser'
ANONYMOUS = 'anonymous'
CAN_MANAGE_CONTENT = 'can manage content'
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.