hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7945cc3e4f19d33f11a3deee9cf47e0ab17f6c2f | 7,862 | py | Python | baseline.py | renll/MonoDemand | e3e5b8ffa4db53fc7203579eed68ca6b620bc508 | [
"Apache-2.0"
] | null | null | null | baseline.py | renll/MonoDemand | e3e5b8ffa4db53fc7203579eed68ca6b620bc508 | [
"Apache-2.0"
] | null | null | null | baseline.py | renll/MonoDemand | e3e5b8ffa4db53fc7203579eed68ca6b620bc508 | [
"Apache-2.0"
] | null | null | null | # import some libraries
import numpy as np
import pandas as pd
import statsmodels.api as sm
import random
from scipy.stats import t, f
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from sklearn import metrics
from sklearn import decomposition
from sklearn import manifold
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import copy
import random
import time
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
# Generate the simulated data
# Initialize seed and parameters
# number of data points
n_S = 1000000
n_T = int(0.001 * n_S)
M = 14
# Model parameters
a = np.asarray([[1.1, -0.1, 0, 0.1, 0, 0.2, 0, 0.1, -0.1, 0, 0, 0.1, -0.1, 0.2, -0.2]])
b = (-1) * np.asarray([[0.5, 0.1, -0.1, 0, 0, 0, 0, 0.2, 0.1, 0.2, 0, 0.2, -0.1, -0.2, 0]])
# independent variable in Sorce domain
mu_S = np.repeat(1, M)
cov_S = 0.2 * np.identity(M, dtype=float)
X0_S = np.random.multivariate_normal(mu_S, cov_S, n_S)
p_S = np.random.uniform(low=0.2, high=2.0, size=n_S)
# add column of ones for intercept
X_S = sm.add_constant(X0_S)
print(X_S.shape)
print(a.shape)
# dependent variable (i.e., demand ) in Sorce domain
d_S = a @ X_S.T+ (b @ X_S.T) * p_S + np.random.normal(0,0.1, n_S)
# revenue
r_S = d_S * p_S
# independent variable in Target domain
#mu_T = np.repeat(0, M)
#cov_T = 0.05 * np.identity(M, dtype=float)
#X0_T = np.random.multivariate_normal(mu_T, cov_T, n_T)
df_T = 10
X0_T = stats.chi2.rvs(df_T, size=(n_T,M))
p_T = np.random.uniform(low=0.2, high=2.0, size=n_T)
# add column of ones for intercept
X_T = sm.add_constant(X0_T)
X_T[:,8:]=0
print(X_T.shape)
print(a.shape)
# dependent variable (i.e., demand ) in Target domain
d_T = a @ X_T.T+ (b @ X_T.T) * p_T + np.random.normal(0,0.1, n_T)
# revenue
r_T = d_T * p_T
def rescale(d_S):
return (d_S-d_S.min())/(d_S.max()-d_S.min())
d_S =rescale(d_S)
d_T=rescale(d_T)
p_S = rescale(p_S)
p_T =rescale(p_T)
print(X_T.shape,p_T.shape)
print(d_S.max(),d_S.min())
#res = stats.linregress(np.concatenate((X_T,np.expand_dims(p_T,axis=1)),axis=1),d_T.T)
d_S=torch.tensor(d_S).transpose(0,1).float()
p_S=torch.tensor(p_S).unsqueeze(1).float()
x_S=torch.tensor(X_S).float()
d_T=torch.tensor(d_T).transpose(0,1).float()
p_T=torch.tensor(p_T).unsqueeze(1).float()
x_T=torch.tensor(X_T).float()
d_S = torch.cat([d_S,torch.zeros_like(d_S)],dim=-1)
d_T = torch.cat([d_T,torch.ones_like(d_T)],dim=-1)
d= torch.cat([d_S,d_T], dim=0)
p= torch.cat([p_S,p_T], dim=0)
x= torch.cat([x_S,x_T], dim=0)
print(d.shape ,p.shape, x.shape)
pdS_dataset = data.TensorDataset(torch.cat([p_S,x_S],dim=-1), d_S)
pdT_dataset = data.TensorDataset(torch.cat([p_T,x_T],dim=-1), d_T)
VALID_RATIO = 0.8
n_train_examples = int(d_S.shape[0] * VALID_RATIO)
n_valid_examples = (d_S.shape[0] - n_train_examples)//2
n_test_examples = (d_S.shape[0] - n_train_examples)//2
pdS_train, pdS_valid, pdS_test= data.random_split(pdS_dataset,
[n_train_examples, n_valid_examples,n_test_examples])
VALID_RATIO = 0.8
n_train_examples = int(d_T.shape[0] * VALID_RATIO)
n_valid_examples = (d_T.shape[0] - n_train_examples)//2
n_test_examples = (d_T.shape[0] - n_train_examples)//2
pdT_train, pdT_valid, pdT_test= data.random_split(pdT_dataset,
[n_train_examples, n_valid_examples,n_test_examples])
pd_train = pdT_train
pd_valid = pdT_valid
pd_test = pdT_test
lamb = 0.1
def train(model, iterator, optimizer, criterion, device):
epoch_loss = 0
model.train()
epoch_rl = 0
epoch_el = 0
epoch_dl = 0
epoch_gl = 0
for (x, y) in iterator:
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
R, d_hat = model(x)
r_loss = (R).mean()
est_loss = criterion[0](d_hat, y[:,:1])
#dom_loss = criterion[1](dom_cls, y[:,1:])
#grad_loss = 1e6*grad_loss
loss = est_loss#+dom_loss#+grad_loss
loss.backward()
optimizer.step()
if r_loss >1000:
print(r_loss)
epoch_loss += loss.item()
epoch_rl += r_loss.item()
epoch_el += est_loss.item()
#epoch_dl += dom_loss.item()
#epoch_gl += grad_loss.item()
print('train', epoch_rl/len(iterator), epoch_el/len(iterator), epoch_dl/len(iterator),epoch_gl/len(iterator))
return epoch_loss / len(iterator)
def evaluate(model, iterator, criterion, device):
epoch_loss = 0
model.eval()
epoch_rl = 0
epoch_el = 0
epoch_dl = 0
with torch.no_grad():
for (x, y) in iterator:
x = x.to(device)
y = y.to(device)
R, d_hat = model(x)
r_loss = (R).mean()
est_loss = criterion[0](d_hat, y[:,:1])
#dom_loss = criterion[1](dom_cls, y[:,1:])
#loss = -lamb*r_loss+est_loss
#epoch_loss += loss.item()
epoch_rl += r_loss.item()
epoch_el += est_loss.item()
#epoch_dl += dom_loss.item()
print('val', epoch_rl/len(iterator), epoch_el/len(iterator), epoch_dl/len(iterator))
return epoch_loss / len(iterator)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
BATCH_SIZE = 64
train_data, valid_data, test_data = pd_train, pd_valid, pd_test
train_iterator = data.DataLoader(train_data,
shuffle = True,
batch_size = BATCH_SIZE)
valid_iterator = data.DataLoader(valid_data,
batch_size = BATCH_SIZE)
test_iterator = data.DataLoader(test_data,
batch_size = BATCH_SIZE)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
def init_weight(l):
if isinstance(l,nn.Linear):
nn.init.normal_(l.weight,mean=0,std=0.02)
#l.bias.data.fill_(0)
class Hack(nn.Module):
def __init__(self,):
super().__init__()
self.l1 = nn.Linear(15,1,bias=False)
self.l2= nn.Linear(15,1,bias=False)
def forward(self,x):
p=x[:,0].unsqueeze(1)
xx=x[:,1:]
a = self.l2(xx)
b = self.l1(xx)
x = b+a*p
p_opt= -b/(2*a)
r = (p_opt*a+b)*p_opt
return r, x
model=Hack()
#model=Model()
model.apply(init_weight)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
EPOCHS = 20
optimizer = optim.AdamW(model.parameters(),lr=1e-5,weight_decay=0.01)
#criterion = nn.L1Loss()
model = model.to(device)
criterion = (nn.MSELoss().to(device), nn.BCELoss().to(device))
best_valid_loss = float('inf')
model_name = "baseline.pt"
for epoch in range(EPOCHS):
start_time = time.monotonic()
train_loss = train(model, train_iterator, optimizer, criterion, device)
valid_loss = evaluate(model, valid_iterator, criterion, device)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), model_name)
end_time = time.monotonic()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} ')
print(f'\t Val. Loss: {valid_loss:.3f} ')
model.load_state_dict(torch.load(model_name))
test_loss= evaluate(model, test_iterator, criterion, device)
print(f'Test Loss: {test_loss:.3f}')
| 25.69281 | 113 | 0.641313 |
7945cc9d79842654f374503cc3b7cc3645e24008 | 6,915 | py | Python | main.py | IsharMhzn/Asuma | d92d427caec378f9c9e5ecea0ee07f2a1b31fae7 | [
"MIT"
] | null | null | null | main.py | IsharMhzn/Asuma | d92d427caec378f9c9e5ecea0ee07f2a1b31fae7 | [
"MIT"
] | null | null | null | main.py | IsharMhzn/Asuma | d92d427caec378f9c9e5ecea0ee07f2a1b31fae7 | [
"MIT"
] | null | null | null | import discord
import requests
import json
from itertools import cycle
from PIL import Image, ImageDraw
from io import BytesIO
from utils.languages import LANGUAGES
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from dotenv import load_dotenv
load_dotenv()
import os
from google_trans_new import google_translator
translator = google_translator()
from keep_alive import keep_alive
from discord.ext import commands, tasks
client = commands.Bot(command_prefix = '#')
statuses = cycle(["Kaun Banega Crorepati", "with life", "chess"])
client.remove_command("help")
#events
@client.event
async def on_ready():
change_status.start()
print("Bot is online")
#tasks
@tasks.loop(seconds = 60)
async def change_status():
await client.change_presence(status=discord.Status.online, activity = discord.Game(next(statuses)))
#commands
@client.command(help="Returns latency")
async def ping(ctx):
await ctx.send(f'**PONG!** {round(client.latency * 1000)}ms')
def get_quote():
response = requests.get("https://zenquotes.io/api/random")
json_data = json.loads(response.text)
quote = json_data[0]['q'] + ' -' + json_data[0]['a']
return quote
@client.group(invoke_without_command=True)
async def help(ctx):
embed = discord.Embed(title = "Help", colour = discord.Colour.orange())
embed.add_field(name = "#ping", value = "Returns latency", inline = False)
embed.add_field(name = "#inspire", value = "Returns an inspiring quote", inline = False)
embed.add_field(name = "#clear [number]", value = "Clears latest messages (for moderators only)", inline = False)
embed.add_field(name = "#warn @[username] (reason)", value = "Gives warning to users", inline = False)
embed.add_field(name = "#wanted @[username] (reason)", value = "Displays a wanted poster", inline = False)
embed.add_field(name = "#rip @[username]", value = "Displays a RIP picture", inline = False)
embed.add_field(name = "#whois @[username]", value = "Gives info about a user", inline = False)
embed.add_field(name = "#meme [optional:subreddit]", value = "Displays a meme", inline = False)
embed.add_field(name = "#tictactoe @[username] @[username]", value = "Starts a tictactoe game", inline = False)
embed.add_field(name = "#place [int]", value = "Places a cross or a circle in the tictactoe game", inline = False)
embed.add_field(name = "#translate [from] [to] [text]", value = "Translates text from one language to another", inline = False)
embed.add_field(name = "#help translate", value = "Shows help for translation", inline = False)
embed.add_field(name = "#random <search_text> <amount>", value = "Returns random number of queried image", inline = False)
embed.add_field(name = "#guesser", value = "Starts the guesser game", inline = False)
embed.add_field(name = "#guess <word>", value = "For guessing word in the game", inline = False)
embed.add_field(name = "#scoreboard", value = "Shows scoreboard for the guesser game", inline = False)
await ctx.send(embed=embed)
@client.command()
async def translate(ctx, fromlang, tolang, *, message):
translated = translator.translate(message,lang_src=fromlang,lang_tgt=tolang, pronounce=True)
embed = discord.Embed(colour = discord.Colour.orange())
embed.add_field(name = "Input text", value = message, inline = False)
embed.add_field(name = "Translated text", value = translated[0] + " ( "+ translated[2]+ " )", inline = False)
await ctx.send(embed=embed)
@help.command()
async def translate(ctx):
await ctx.send("Languages Supported for Translation")
await ctx.send(LANGUAGES)
@client.command(aliases =['motivate'])
async def inspire(ctx):
quote = get_quote()
await ctx.send(quote)
@commands.has_role('Moderators')
@client.command()
async def clear(ctx, amount : int):
await ctx.channel.purge(limit = amount+1)
@commands.has_role('Moderators')
@client.command()
async def warn(ctx,userr, *, msg=" "):
await clear(ctx,0)
user = userr
reason = msg
await ctx.send(f"{user} You've been WARNED {reason}.")
@client.command()
async def summon(ctx,userr):
await clear(ctx,0)
user = userr
await ctx.send(f"{user}, You've been SUMMONED.")
@commands.has_role('Moderators')
@client.command()
async def say(ctx, *, msg):
await clear(ctx,0)
await ctx.send(msg)
@client.command()
async def wanted(ctx, user: discord.Member = None, *, msg):
await clear(ctx,0)
if user==None:
await ctx.send("No user included")
wanted = Image.open("assets/wantedBackG.jpg")
asset = user.avatar_url_as(size = 128)
data = BytesIO(await asset.read())
ppic = Image.open(data)
ppic = ppic.resize((165,165))
wanted.paste(ppic,(110,195))
write = ImageDraw.Draw(wanted)
write.text((70,380), msg, (0,0,0))
wanted.save("temp.jpg")
await ctx.send(file = discord.File("temp.jpg"))
@client.command()
async def rip(ctx, user: discord.Member = None):
await clear(ctx,0)
if user==None:
await ctx.send("No user included")
wanted = Image.open("assets/ripBackG.png")
asset = user.avatar_url_as(size = 128)
data = BytesIO(await asset.read())
ppic = Image.open(data)
ppic = ppic.resize((200,200))
wanted.paste(ppic,(304,346))
wanted.save("temp.jpg")
await ctx.send(file = discord.File("temp.jpg"))
@client.command()
async def whois(ctx, member:discord.Member):
embed = discord.Embed(title = member.name, description = member.mention, color = discord.Colour.green())
embed.add_field(name = "ID", value=member.id, inline = True)
embed.set_thumbnail(url = member.avatar_url)
embed.set_footer(icon_url = ctx.author.avatar_url, text = f"Requested by {ctx.author.name}")
await ctx.send(embed = embed)
#error handling
@client.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send("Command not Found!")
@clear.error
async def clear_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Please also include the amount of messages to delete!")
#cogs
@commands.has_role('Moderators')
@client.command()
async def load(ctx, extension):
client.load_extension(f'cogs.{extension}')
await ctx.send("Loaded Successfully")
@commands.has_role('Moderators')
@client.command()
async def unload(ctx, extension):
client.unload_extension(f'cogs.{extension}')
await ctx.send("Unloaded Successfully")
@commands.has_role('Moderators')
@client.command()
async def reload(ctx, extension):
client.unload_extension(f'cogs.{extension}')
client.load_extension(f'cogs.{extension}')
await ctx.send("Reloaded Successfully")
for filename in os.listdir("./cogs"):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
keep_alive()
client.run(os.getenv('TOKEN'))
| 36.587302 | 131 | 0.698337 |
7945cdebf21aa607c17c132051496bfb37aab736 | 211 | py | Python | portfolio/views.py | 31-13/portfolio | 86d69abc05ead28823db5def49622f04af0ebfd2 | [
"MIT"
] | null | null | null | portfolio/views.py | 31-13/portfolio | 86d69abc05ead28823db5def49622f04af0ebfd2 | [
"MIT"
] | null | null | null | portfolio/views.py | 31-13/portfolio | 86d69abc05ead28823db5def49622f04af0ebfd2 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .models import Project
# Create your views here.
def home(request):
projects = Project.objects.all()
return render(request, 'home.html', {"project": projects})
| 23.444444 | 62 | 0.729858 |
7945cea94ec6d8b133b943dd1496efdc86bd6cc1 | 1,092 | py | Python | src/poo_functions/dictionaries_functions.py | alfmorais/poo-python | 78d09ee6fe9595b445c152f270c9dcf6f3e1d242 | [
"MIT"
] | null | null | null | src/poo_functions/dictionaries_functions.py | alfmorais/poo-python | 78d09ee6fe9595b445c152f270c9dcf6f3e1d242 | [
"MIT"
] | null | null | null | src/poo_functions/dictionaries_functions.py | alfmorais/poo-python | 78d09ee6fe9595b445c152f270c9dcf6f3e1d242 | [
"MIT"
] | null | null | null | questions = {
'question1': {
'question': 'How much are 4 + 2?',
'answers': {
'a': 6,
'b': 2,
'c': 5,
},
'correct_answear': 'a',
},
'question2': {
'question': 'How much are 4 + 4?',
'answers': {
'a': 6,
'b': 8,
'c': 5,
},
'correct_answear': 'b',
},
'question3': {
'question': 'How much are 4 - 4?',
'answers': {
'a': 6,
'b': 8,
'c': 0,
},
'correct_answear': 'c',
},
}
points = 0
for qk, qv in questions.items():
question = qv['question']
a = qv['answers']['a']
b = qv['answers']['b']
c = qv['answers']['c']
correct_answear = qv['correct_answear']
print(f'{qk}: {question}\n')
print(f'a) {a}, b) {b}, c) {c}\n')
answear = str(input('Choose a, b , c: \n'))
if answear == correct_answear:
print('You are write!\n')
points = points + 1
else:
print('You are wrong!\n')
print(f'SCORE: {points} points')
| 21.84 | 47 | 0.411172 |
7945cfdfb4426b5cb66fd37848aaf29166cdbb31 | 18,128 | py | Python | ns1/__init__.py | fakela/ns1-python | 1e4e125b574f7dec4fd5dfcefaf8f8a5c767e945 | [
"MIT"
] | null | null | null | ns1/__init__.py | fakela/ns1-python | 1e4e125b574f7dec4fd5dfcefaf8f8a5c767e945 | [
"MIT"
] | null | null | null | ns1/__init__.py | fakela/ns1-python | 1e4e125b574f7dec4fd5dfcefaf8f8a5c767e945 | [
"MIT"
] | null | null | null | #
# Copyright (c) 2014 NSONE, Inc.
#
# License under The MIT License (MIT). See LICENSE in project root.
#
from .config import Config
version = "0.16.0"
class NS1:
def __init__(self, apiKey=None, config=None, configFile=None, keyID=None):
"""
Create a new top level NS1 API object
:param str apiKey: if given, initialize config with this API key \
(obtainable via creation in NS1 portal)
:param ns1.config.Config config: if given, uses a separately \
constructed and configured Config object
:param str configFile: if given, load configuration from the given \
json configuration file
:param str keyID: if given, use the specified key config in the \
multi-key configuration file
"""
self.config = config
if self.config is None:
self._loadConfig(apiKey, configFile)
if keyID:
self.config.useKeyID(keyID)
def _loadConfig(self, apiKey, configFile):
self.config = Config()
if apiKey:
self.config.createFromAPIKey(apiKey)
else:
configFile = (
Config.DEFAULT_CONFIG_FILE if not configFile else configFile
)
self.config.loadFromFile(configFile)
# REST INTERFACE
def zones(self):
"""
Return a new raw REST interface to zone resources
:rtype: :py:class:`ns1.rest.zones.Zones`
"""
import ns1.rest.zones
return ns1.rest.zones.Zones(self.config)
def records(self):
"""
Return a new raw REST interface to record resources
:rtype: :py:class:`ns1.rest.records.Records`
"""
import ns1.rest.records
return ns1.rest.records.Records(self.config)
def addresses(self):
"""
Return a new raw REST interface to address resources
:rtype: :py:class:`ns1.rest.ipam.Adresses`
"""
import ns1.rest.ipam
return ns1.rest.ipam.Addresses(self.config)
def networks(self):
"""
Return a new raw REST interface to network resources
:rtype: :py:class:`ns1.rest.ipam.Networks`
"""
import ns1.rest.ipam
return ns1.rest.ipam.Networks(self.config)
def scope_groups(self):
"""
Return a new raw REST interface to scope_group resources
:rtype: :py:class:`ns1.rest.ipam.Scopegroups`
"""
import ns1.rest.ipam
return ns1.rest.ipam.Scopegroups(self.config)
def reservations(self):
"""
Return a new raw REST interface to reservation resources
:rtype: :py:class:`ns1.rest.ipam.Reservations`
"""
import ns1.rest.ipam
return ns1.rest.ipam.Reservations(self.config)
def scopes(self):
"""
Return a new raw REST interface to scope resources
:rtype: :py:class:`ns1.rest.ipam.Scopes`
"""
import ns1.rest.ipam
return ns1.rest.ipam.Scopes(self.config)
def optiondefs(self):
"""
Return a new raw REST interface to optiondefs resources
:rtype: :py:class:`ns1.rest.ipam.Optiondefs`
"""
import ns1.rest.ipam
return ns1.rest.ipam.Optiondefs(self.config)
def stats(self):
"""
Return a new raw REST interface to stats resources
:rtype: :py:class:`ns1.rest.stats.Stats`
"""
import ns1.rest.stats
return ns1.rest.stats.Stats(self.config)
def datasource(self):
"""
Return a new raw REST interface to datasource resources
:rtype: :py:class:`ns1.rest.data.Source`
"""
import ns1.rest.data
return ns1.rest.data.Source(self.config)
def datafeed(self):
"""
Return a new raw REST interface to feed resources
:rtype: :py:class:`ns1.rest.data.Feed`
"""
import ns1.rest.data
return ns1.rest.data.Feed(self.config)
def monitors(self):
"""
Return a new raw REST interface to monitors resources
:rtype: :py:class:`ns1.rest.monitoring.Monitors`
"""
import ns1.rest.monitoring
return ns1.rest.monitoring.Monitors(self.config)
def notifylists(self):
"""
Return a new raw REST interface to notify list resources
:rtype: :py:class:`ns1.rest.monitoring.NotifyLists`
"""
import ns1.rest.monitoring
return ns1.rest.monitoring.NotifyLists(self.config)
def monitoring_jobtypes(self):
"""
Return a new raw REST interface to monitoring jobtypes resources
:rtype: :py:class:`ns1.rest.monitoring.JobTypes`
"""
import ns1.rest.monitoring
return ns1.rest.monitoring.JobTypes(self.config)
def monitoring_regions(self):
"""
Return a new raw REST interface to monitoring regions resources
:rtype: :py:class:`ns1.rest.monitoring.Regions`
"""
import ns1.rest.monitoring
return ns1.rest.monitoring.Regions(self.config)
def plan(self):
"""
Return a new raw REST interface to account plan
:rtype: :py:class:`ns1.rest.account.Plan`
"""
import ns1.rest.account
return ns1.rest.account.Plan(self.config)
def team(self):
"""
Return a new raw REST interface to team resources
:rtype: :py:class:`ns1.rest.team.Team`
"""
import ns1.rest.team
return ns1.rest.team.Team(self.config)
def user(self):
"""
Return a new raw REST interface to user resources
:rtype: :py:class:`ns1.rest.user.User`
"""
import ns1.rest.user
return ns1.rest.user.User(self.config)
def apikey(self):
"""
Return a new raw REST interface to API key resources
:rtype: :py:class:`ns1.rest.apikey.APIKey`
"""
import ns1.rest.apikey
return ns1.rest.apikey.APIKey(self.config)
# HIGH LEVEL INTERFACE
def loadZone(self, zone, callback=None, errback=None):
"""
Load an existing zone into a high level Zone object.
:param str zone: zone name, like 'example.com'
:rtype: :py:class:`ns1.zones.Zone`
"""
import ns1.zones
zone = ns1.zones.Zone(self.config, zone)
return zone.load(callback=callback, errback=errback)
def searchZone(
self, zone, q=None, has_geo=False, callback=None, errback=None
):
"""
Search a zone for a given search query (e.g., for geological data, etc)
:param zone: NOT a string like loadZone - an already loaded ns1.zones.Zone, like one returned from loadZone
:return:
"""
return zone.search(q, has_geo, callback=callback, errback=errback)
def createZone(
self, zone, zoneFile=None, callback=None, errback=None, **kwargs
):
"""
Create a new zone, and return an associated high level Zone object.
Several optional keyword arguments are available to configure the SOA
record.
If zoneFile is specified, upload the specific zone definition file
to populate the zone with.
:param str zone: zone name, like 'example.com'
:param str zoneFile: absolute path of a zone file
:keyword int retry: retry time
:keyword int refresh: refresh ttl
:keyword int expiry: expiry ttl
:keyword int nx_ttl: nxdomain TTL
:rtype: :py:class:`ns1.zones.Zone`
"""
import ns1.zones
zone = ns1.zones.Zone(self.config, zone)
return zone.create(
zoneFile=zoneFile, callback=callback, errback=errback, **kwargs
)
def loadRecord(
self, domain, type, zone=None, callback=None, errback=None, **kwargs
):
"""
Load an existing record into a high level Record object.
:param str domain: domain name of the record in the zone, for example \
'myrecord'. You may leave off the zone, if it is specified in the \
zone parameter. This is recommended. You can pass a fully \
qualified domain and not pass the zone argument, but this will \
not work as expected if there are any dots in the domain, e.g. \
`foo.example.com` is OK, `foo.bar.example.com` will not work as
expected.
:param str type: record type, such as 'A', 'MX', 'AAAA', etc.
:param str zone: zone name, like 'example.com'
:rtype: :py:class:`ns1.records`
"""
import ns1.zones
if zone is None:
# extract from record string
parts = domain.split(".")
if len(parts) <= 2:
zone = ".".join(parts)
else:
zone = ".".join(parts[1:])
z = ns1.zones.Zone(self.config, zone)
return z.loadRecord(
domain, type, callback=callback, errback=errback, **kwargs
)
def loadMonitors(self, callback=None, errback=None, **kwargs):
"""
Load all monitors
"""
import ns1.monitoring
monitors_list = self.monitors().list(callback, errback)
return [ns1.monitoring.Monitor(self.config, m) for m in monitors_list]
def createMonitor(self, callback=None, errback=None, **kwargs):
"""
Create a monitor
"""
import ns1.monitoring
monitor = ns1.monitoring.Monitor(self.config)
return monitor.create(callback=callback, errback=errback, **kwargs)
def loadNetworkbyID(self, id, callback=None, errback=None):
"""
Load an existing Network by ID into a high level Network object
:param int id: id of an existing Network
"""
import ns1.ipam
network = ns1.ipam.Network(self.config, id=id)
return network.load(callback=callback, errback=errback)
def loadNetworkbyName(self, name, callback=None, errback=None):
"""
Load an existing Network by name into a high level Network object
:param str name: Name of an existing Network
"""
import ns1.ipam
network = ns1.ipam.Network(self.config, name=name)
return network.load(callback=callback, errback=errback)
def createNetwork(
self, name, scope_group_id=None, callback=None, errback=None, **kwargs
):
"""
Create a new Network
For the list of keywords available, see :attr:`ns1.rest.ipam.Networks.INT_FIELDS` and :attr:`ns1.rest.ipam.Networks.PASSTHRU_FIELDS`
:param str name: Name of the Network to be created
:param int scope_group: (Optional) id of an existing scope group to associate with
"""
import ns1.ipam
if scope_group_id is not None:
scope_group = ns1.ipam.Scopegroup(
self.config, id=scope_group_id
).load()
kwargs["scope_group"] = scope_group
network = ns1.ipam.Network(self.config, name=name)
return network.create(callback=callback, errback=errback, **kwargs)
def loadAddressbyID(self, id, callback=None, errback=None):
"""
Load an existing address by ID into a high level Address object
:param int id: id of an existing Address
"""
import ns1.ipam
address = ns1.ipam.Address(self.config, id=id)
return address.load(callback=callback, errback=errback)
def loadAddressbyPrefix(
self, prefix, status, network_id, callback=None, errback=None
):
"""
Load an existing address by prefix, status and network into a high level Address object
:param str prefix: CIDR prefix of an existing Address
:param str status: The status of address assignment (planned or assigned)
:param int network_id: network_id associated with the address
"""
import ns1.ipam
network = ns1.ipam.Network(self.config, id=network_id).load()
address = ns1.ipam.Address(
self.config, prefix=prefix, status=status, network=network
)
return address.load(callback=callback, errback=errback)
def createAddress(
self, prefix, status, network_id, callback=None, errback=None, **kwargs
):
"""
Create a new Address
For the list of keywords available, see :attr:`ns1.rest.ipam.Addresses.INT_FIELDS` and :attr:`ns1.rest.ipam.Addresses.PASSTHRU_FIELDS`
:param str prefix: CIDR prefix of the address to be created
:param str status: The status of address assignment (planned or assigned)
:param int network_id: network_id associated with the address
"""
import ns1.ipam
network = ns1.ipam.Network(self.config, id=network_id).load()
address = ns1.ipam.Address(
self.config, prefix=prefix, status=status, network=network
)
return address.create(callback=callback, errback=errback, **kwargs)
def loadScopeGroup(self, id, callback=None, errback=None):
"""
Load an existing Scope Group into a high level Scope Group object
:param int id: id of an existing ScopeGroup
"""
import ns1.ipam
scope_group = ns1.ipam.Scopegroup(self.config, id=id)
return scope_group.load(callback=callback, errback=errback)
def createScopeGroup(
self,
name,
service_def_id,
dhcp4,
dhcp6,
callback=None,
errback=None,
**kwargs
):
"""
Create a new Scope Group
For the list of keywords available, see :attr:`ns1.rest.ipam.ScopeGroups.INT_FIELDS` and :attr:`ns1.rest.ipam.ScopeGroups.PASSTHRU_FIELDS`
:param str name: Name of the Scope Group to be created
:param int service_group_id: id of the service group the Scope group is associated with
:param ns1.ipam.DHCPIOptions dhcp4: DHCPOptions object that contains the options for dhcpv4
:param ns1.ipam.DHCPIOptions dhcp6: DHCPOptions object that contains the options for dhcpv6
"""
import ns1.ipam
scope_group = ns1.ipam.Scopegroup(
self.config, name=name, service_def_id=service_def_id
)
return scope_group.create(
dhcp4=dhcp4,
dhcp6=dhcp6,
callback=callback,
errback=errback,
**kwargs
)
def createReservation(
self,
scopegroup_id,
address_id,
mac,
dhcp_options=None,
callback=None,
errback=None,
**kwargs
):
"""
Create a new Reservation
For the list of keywords available, see :attr:`ns1.rest.ipam.Reservation.INT_FIELDS` and :attr:`ns1.rest.ipam.Reservation.PASSTHRU_FIELDS`
:param int scopegroup_id: id of the scope group
:param int address_id: id of the address the reservation is associated with
:param str mac: mac address of the reservation
:param list options: dhcp options of the reservation
"""
import ns1.ipam
reservation = ns1.ipam.Reservation(
self.config, scopegroup_id, address_id, dhcp_options, mac
)
return reservation.create(callback=callback, errback=errback, **kwargs)
def loadReservation(
self, scopegroup_id, address_id, callback=None, errback=None
):
import ns1.ipam
reservation = ns1.ipam.Reservation(
self.config, scopegroup_id, address_id
)
return reservation.load(callback=callback, errback=errback)
def createScope(
self,
scopegroup_id,
address_id,
dhcp_options=None,
callback=None,
errback=None,
**kwargs
):
"""
Create a new Scope
For the list of keywords available, see :attr:`ns1.rest.ipam.Scope.INT_FIELDS` and :attr:`ns1.rest.ipam.Scope.PASSTHRU_FIELDS`
:param int scopegroup_id: id of the scope group
:param int address_id: id of the address the scope is associated with
:param DHCPOptions options: DHCPOptions object that contains the settings for the scope
"""
import ns1.ipam
scope = ns1.ipam.Scope(
self.config, scopegroup_id, address_id, dhcp_options
)
return scope.create(callback=callback, errback=errback, **kwargs)
def loadScope(
self, scopegroup_id, address_id, callback=None, errback=None
):
import ns1.ipam
scope = ns1.ipam.Scope(self.config, scopegroup_id, address_id)
return scope.load(callback=callback, errback=errback)
def loadLeases(
self,
scope_group_id=None,
scope_id=None,
limit=None,
offset=None,
callback=None,
errback=None,
):
import ns1.ipam
lease = ns1.ipam.Lease(self.config)
return lease.load(
scope_group_id,
scope_id,
limit,
offset,
callback=callback,
errback=errback,
)
def generateDHCPOptionsTemplate(self, address_family):
"""
Generate boilerplate dictionary to hold dhcp options
:param str address_family: dhcpv4 or dhcpv6
:return: dict containing valid option set for address family
"""
from ns1.ipam import DHCPOptions
options = {}
for option in DHCPOptions.OPTIONS[address_family]:
options[option] = ""
return options
def loadDHCPOptions(self, address_family, options):
"""
Create a high level DHCPOptions object
:param str address_family: Address family of the options. Can be either dhcpv4 or dhcpv6
:param dict options: Dictionary containing the option set to apply for this address family. Note: only those specified will be applied. Allowed options can be found in :attr:`ns1.ipam.DHCPOptions.OPTIONS`
"""
import ns1.ipam
return ns1.ipam.DHCPOptions(address_family, options)
| 30.013245 | 212 | 0.613967 |
7945d01679f8c513592fc61060f3c11ffe79160c | 4,051 | py | Python | cardinal_pythonlib/django/fields/restrictedcontentfile.py | RudolfCardinal/pythonlib | 4c583ad1aae3c1166a4e6f964df87eb6c02a73cb | [
"Apache-2.0"
] | 10 | 2015-09-30T02:46:48.000Z | 2021-07-23T05:03:38.000Z | cardinal_pythonlib/django/fields/restrictedcontentfile.py | RudolfCardinal/pythonlib | 4c583ad1aae3c1166a4e6f964df87eb6c02a73cb | [
"Apache-2.0"
] | 9 | 2019-07-04T11:10:31.000Z | 2021-09-23T21:11:42.000Z | cardinal_pythonlib/django/fields/restrictedcontentfile.py | RudolfCardinal/pythonlib | 4c583ad1aae3c1166a4e6f964df87eb6c02a73cb | [
"Apache-2.0"
] | 4 | 2017-07-17T15:17:44.000Z | 2021-07-23T05:03:41.000Z | #!/usr/bin/env python
# cardinal_pythonlib/django/fields/restrictedcontentfile.py
"""
===============================================================================
Original code copyright (C) 2009-2021 Rudolf Cardinal ([email protected]).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Django field class storing a file (by reference to a disk file, as for
"django.db.models.FileField") but also implementing limits on the maximum
upload size.**
"""
from typing import Any
# noinspection PyUnresolvedReferences
from django import forms
# noinspection PyUnresolvedReferences
from django.core.files.uploadedfile import UploadedFile
# noinspection PyUnresolvedReferences
from django.db import models
# noinspection PyUnresolvedReferences
from django.template.defaultfilters import filesizeformat
# noinspection PyUnresolvedReferences
from django.utils.translation import ugettext_lazy
# =============================================================================
# ContentTypeRestrictedFileField
# =============================================================================
class ContentTypeRestrictedFileField(models.FileField):
"""
Same as ``FileField``, but you can specify:
- ``content_types`` - list containing allowed content_types.
Example: ``['application/pdf', 'image/jpeg']``
- ``max_upload_size`` - a number indicating the maximum file size allowed
for upload.
.. code-block:: none
2.5MB - 2621440
5MB - 5242880
10MB - 10485760
20MB - 20971520
50MB - 5242880
100MB - 104857600
250MB - 214958080
500MB - 429916160
See:
- https://djangosnippets.org/snippets/2206/
- https://docs.djangoproject.com/en/1.8/ref/files/uploads/
- https://stackoverflow.com/questions/2472422/django-file-upload-size-limit
"""
def __init__(self, *args, **kwargs) -> None:
self.content_types = kwargs.pop("content_types", None)
if self.content_types is None:
self.content_types = []
self.max_upload_size = kwargs.pop("max_upload_size", None)
super().__init__(*args, **kwargs)
def clean(self, *args, **kwargs) -> Any:
data = super().clean(*args, **kwargs)
# log.debug("data: {!r}", data)
f = data.file
if not isinstance(f, UploadedFile): # RNC
# no new file uploaded; there won't be a content-type to check
return data
# log.debug("f: {!r}", f)
content_type = f.content_type
if content_type not in self.content_types:
raise forms.ValidationError(ugettext_lazy(
'Filetype not supported.'))
if hasattr(f, "size"): # e.g. Django 2.1.2
uploaded_file_size = f.size
elif hasattr(f, "_size"): # e.g. Django 1.8 ?
# noinspection PyProtectedMember,PyUnresolvedReferences
uploaded_file_size = f._size
else:
raise AssertionError(
f"Don't know how to get file size from {f!r}")
if (self.max_upload_size is not None and
uploaded_file_size > self.max_upload_size):
raise forms.ValidationError(ugettext_lazy(
'Please keep filesize under %s. Current filesize %s')
% (filesizeformat(self.max_upload_size),
filesizeformat(uploaded_file_size)))
return data
| 36.827273 | 79 | 0.61565 |
7945d16591585efe07166c8e50d83b5b7a38af8e | 6,439 | py | Python | sdk/python/pulumi_azure_nextgen/storage/v20200801preview/blob_inventory_policy.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/storage/v20200801preview/blob_inventory_policy.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/storage/v20200801preview/blob_inventory_policy.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['BlobInventoryPolicy']
class BlobInventoryPolicy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
blob_inventory_policy_name: Optional[pulumi.Input[str]] = None,
policy: Optional[pulumi.Input[pulumi.InputType['BlobInventoryPolicySchemaArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The storage account blob inventory policy.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param pulumi.Input[str] blob_inventory_policy_name: The name of the storage account blob inventory policy. It should always be 'default'
:param pulumi.Input[pulumi.InputType['BlobInventoryPolicySchemaArgs']] policy: The storage account blob inventory policy object. It is composed of policy rules.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
__props__['blob_inventory_policy_name'] = blob_inventory_policy_name
if policy is None and not opts.urn:
raise TypeError("Missing required property 'policy'")
__props__['policy'] = policy
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['last_modified_time'] = None
__props__['name'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:storage:BlobInventoryPolicy"), pulumi.Alias(type_="azure-nextgen:storage/latest:BlobInventoryPolicy"), pulumi.Alias(type_="azure-nextgen:storage/v20190601:BlobInventoryPolicy"), pulumi.Alias(type_="azure-nextgen:storage/v20210101:BlobInventoryPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(BlobInventoryPolicy, __self__).__init__(
'azure-nextgen:storage/v20200801preview:BlobInventoryPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'BlobInventoryPolicy':
"""
Get an existing BlobInventoryPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return BlobInventoryPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="lastModifiedTime")
def last_modified_time(self) -> pulumi.Output[str]:
"""
Returns the last modified date and time of the blob inventory policy.
"""
return pulumi.get(self, "last_modified_time")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def policy(self) -> pulumi.Output['outputs.BlobInventoryPolicySchemaResponse']:
"""
The storage account blob inventory policy object. It is composed of policy rules.
"""
return pulumi.get(self, "policy")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.666667 | 340 | 0.667029 |
7945d1bb47f980e768d1bd4f608e23b6ad11083d | 3,825 | py | Python | tests/unit/bokeh/util/test_deprecation.py | g-parki/bokeh | 664ead5306bba64609e734d4105c8aa8cfb76d81 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/bokeh/util/test_deprecation.py | g-parki/bokeh | 664ead5306bba64609e734d4105c8aa8cfb76d81 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/bokeh/util/test_deprecation.py | g-parki/bokeh | 664ead5306bba64609e734d4105c8aa8cfb76d81 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
from mock import MagicMock, patch
# Module under test
import bokeh.util.deprecation as dep # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
def foo(): pass
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@patch('warnings.warn')
def test_message(mock_warn: MagicMock) -> None:
dep.deprecated('test')
assert mock_warn.called
assert mock_warn.call_args[0] == ("test", dep.BokehDeprecationWarning)
assert mock_warn.call_args[1] == {'stacklevel': 2}
def test_message_no_extra_args() -> None:
with pytest.raises(ValueError):
dep.deprecated('test', 'foo')
with pytest.raises(ValueError):
dep.deprecated('test', old='foo')
with pytest.raises(ValueError):
dep.deprecated('test', new='foo')
with pytest.raises(ValueError):
dep.deprecated('test', extra='foo')
def test_since_missing_extra_args() -> None:
with pytest.raises(ValueError):
dep.deprecated((1,2,3))
with pytest.raises(ValueError):
dep.deprecated((1,2,3), old="foo")
with pytest.raises(ValueError):
dep.deprecated((1,2,3), new="foo")
def test_since_bad_tuple() -> None:
with pytest.raises(ValueError):
dep.deprecated((1,), old="foo", new="bar")
with pytest.raises(ValueError):
dep.deprecated((1,2), old="foo", new="bar")
with pytest.raises(ValueError):
dep.deprecated((1,2,3,4), old="foo", new="bar")
with pytest.raises(ValueError):
dep.deprecated((1,2,-4), old="foo", new="bar")
with pytest.raises(ValueError):
dep.deprecated((1,2,"3"), old="foo", new="bar")
@patch('warnings.warn')
def test_since(mock_warn: MagicMock) -> None:
dep.deprecated((1,2,3), old="foo", new="bar")
assert mock_warn.called
assert mock_warn.call_args[0] == ("foo was deprecated in Bokeh 1.2.3 and will be removed, use bar instead.", dep.BokehDeprecationWarning)
assert mock_warn.call_args[1] == {'stacklevel': 2}
@patch('warnings.warn')
def test_since_with_extra(mock_warn: MagicMock) -> None:
dep.deprecated((1,2,3), old="foo", new="bar", extra="baz")
assert mock_warn.called
assert mock_warn.call_args[0] == ("foo was deprecated in Bokeh 1.2.3 and will be removed, use bar instead. baz", dep.BokehDeprecationWarning)
assert mock_warn.call_args[1] == {'stacklevel': 2}
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 39.43299 | 145 | 0.453595 |
7945d1cc948b8443d7f83370af19cd433c20e528 | 8,740 | py | Python | static/python/app.py | louis-threedots/louis-threedots.github.io | 89450f5ace80ff380795a80aed4461d06b02ff1a | [
"MIT"
] | 1 | 2020-04-11T16:26:12.000Z | 2020-04-11T16:26:12.000Z | static/python/app.py | louis-threedots/louis-threedots.github.io | 89450f5ace80ff380795a80aed4461d06b02ff1a | [
"MIT"
] | 12 | 2020-03-26T16:03:16.000Z | 2020-04-09T13:14:38.000Z | static/python/app.py | louis-threedots/website | 89450f5ace80ff380795a80aed4461d06b02ff1a | [
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class App(ABC):
def __init__(self, name):
self.name = name
self.instruction = "Welcome to " + self.name + ". This application does not have instructions yet."
self.settings = self.load_settings()
@abstractmethod
def on_start(self):
# Actions that an app wants to perform on app start
pass
def on_quit(self):
# Actions that an app wants to perform when quitting the app
glob.mainApp.audio.speak("The app will now close itself. Goodbye.")
self.save_settings()
self.reset_cells()
# return to main thread
glob.mainApp.main_menu()
def confirm_quit(self):
text = "Would you like to quit this application?"
if self.name == 'main':
text = "Would you like to quit louis?"
glob.mainApp.audio.speak(text)
response = self.await_response(["yes","no"])
# take answer from the user
if response == "yes":
self.on_quit()
elif response == "no":
glob.mainApp.audio.speak("You're returning to the app.")
def reset_cells(self, to='zero'):
for cell in reversed(glob.mainApp.cells):
cell.reset(to=to)
def load_settings(self):
# Rehydrate app settings from local file system
if not(os.path.exists(self.filepath)):
with open(self.filepath, 'w') as f:
settings = {}
f.write(json.dumps(settings, indent=4))
with open(self.filepath, 'r') as f:
return json.loads(f.read())
# module = getattr(__import__('app_states', fromlist=[filename]), filename)
# return module.settings
def save_settings(self):
# Save app settings to local file system
with open(self.filepath, 'w') as f:
f.write(json.dumps(self.settings, indent=4))
def edit_settings(self):
if 'editable' not in self.settings:
glob.mainApp.audio.speak("This app does not have any editable settings.")
return
glob.mainApp.audio.speak("Which setting would you like to change? Say 'home' to exit the settings menu.")
options = [setting['name'] for setting in self.settings['editable'].values()]
settings = [setting for setting in self.settings['editable'].values()]
response = self.await_response(options + ['home'])
if response == 'home':
glob.mainApp.audio.speak("Exiting the settings menu.")
return
setting_idx = options.index(response)
setting = settings[setting_idx]
glob.mainApp.audio.speak(setting['description'])
commands = list(setting['values'].keys())
values = [value for value in setting['values'].values()]
for command in commands:
glob.mainApp.audio.speak("- " + command)
response = self.await_response(commands + ['home'])
if response == 'home':
glob.mainApp.audio.speak("Exiting the settings menu.")
return
value_idx = commands.index(response)
value = values[value_idx]
setting['value'] = value
glob.mainApp.audio.speak("The setting has been updated. Returning to 'home'.")
def app_instruction(self):
glob.mainApp.audio.speak(self.instruction)
def get_pressed_button(self):
# Returns the index of the pressed cell button
return glob.mainApp.arduino.get_pressed_button()
def wait_for_all_cells_finished(self):
# Returns true if all the cells have finished rendering
cells_finished_rotating = [False] * len(glob.mainApp.cells)
while False in cells_finished_rotating:
for cell in glob.mainApp.cells:
cells_finished_rotating[cell.index - 1] = cell.has_finished_rotating()
def print_character_all_cells(self, c):
for cell in reversed(glob.mainApp.cells):
cell.print_character(c)
self.wait_for_all_cells_finished()
self.print_cells_to_terminal()
def print_text(self, text):
prepared_text = []
i = 0
while i < len(text):
letter = text[i]
if i>=1 and letter.isalpha() and text[i-1].isdigit():
prepared_text.append('LETTER')
if letter.isupper():
prepared_text.append('CAPITAL')
letter = letter.lower()
elif letter.isdigit():
insert_letter_ind = True
# indicator not necessary when previous char is digit
if i>=1 and text[i-1].isdigit():
insert_letter_ind = False
# indicator not necessary when part of number (e.g. 1,496.2)
if i>=2 and text[i-2].isdigit() and (text[i-1] == '.' or text[i-1] == ','):
insert_letter_ind = False
if insert_letter_ind:
prepared_text.append('NUMBER')
if glob.mainApp.settings['editable']['contractions']['value'] == True:
found_contraction = False
for contraction in contraction_dict:
if text[i:(i + len(contraction))] == contraction:
prepared_text.append(contraction)
i += len(contraction)
found_contraction = True
break
if found_contraction:
continue
prepared_text.append(letter)
i += 1
to_print = []
for i in range(0,len(prepared_text)):
to_print.append(prepared_text[i])
if len(to_print) == len(glob.mainApp.cells) or i == len(prepared_text)-1 :
# Letters need to be passed in reverse in order to be processed in parallel
padding = len(glob.mainApp.cells) - len(to_print)
to_print = to_print + list(" " * padding)
for j in range(len(to_print)-1,-1,-1):
glob.mainApp.cells[j].print_character(to_print[j])
self.print_cells_to_terminal()
# Wait for pagination. Exiting turns out to be more difficult since wait_for_button_press blocks the execution.
glob.mainApp.cells[-1].wait_for_button_press()
to_print = []
def print_cells_to_terminal(self):
dots_print = ['.', 'o']
top_row, middle_row, bottom_row, character_row = '', '', '', ''
for cell in glob.mainApp.cells:
extra_padding = len(cell.character) - 1
if cell.character not in character_dict:
dots = character_dict['UNKNOWN']['dots']
else:
dots = character_dict[cell.character]['dots']
character_row = character_row + ' ' + str(cell.character) + ' '
top_row = top_row + '|' + dots_print[dots[0]] + ' ' + dots_print[dots[3]] + '|' + (' ' * extra_padding)
middle_row = middle_row + '|' + dots_print[dots[1]] + ' ' + dots_print[dots[4]] + '|' + (' ' * extra_padding)
bottom_row = bottom_row + '|' + dots_print[dots[2]] + ' ' + dots_print[dots[5]] + '|' + (' ' * extra_padding)
glob.cust_print(top_row)
glob.cust_print(middle_row)
glob.cust_print(bottom_row)
glob.cust_print(character_row)
def await_response(self, desired_responses = []):
answer = glob.mainApp.audio.recognize_speech()["transcription"]
answer = answer.lower()
invalid = True
if answer.find("options") != -1:
glob.mainApp.audio.speak("Your options are:")
for option in desired_responses:
glob.mainApp.audio.speak("- "+option)
invalid = False
# quit / exit command listener
elif answer.find('quit') != -1 or answer.find('exit') != -1:
self.confirm_quit()
invalid = False
# help listener
elif answer.find('help') != -1:
self.app_instruction()
invalid = False
# settings listener
elif answer.find('settings') != -1:
self.edit_settings()
invalid = False
if len(desired_responses) == 0:
return answer
else:
for d_r in desired_responses:
if answer.find(d_r) != -1:
response = d_r
print("You said: " + response)
return response
if answer != "" and invalid:
glob.mainApp.audio.speak("Invalid option, please try again.")
response = self.await_response(desired_responses)
return response
| 40.091743 | 127 | 0.569336 |
7945d2387f7989053c137241c942e633d1d8a5e7 | 180 | py | Python | doltcli/misc_mixin.py | sidphbot/doltcli | 059a7c6dd31f036771b3ce94cd5f064696d64229 | [
"Apache-2.0"
] | 6 | 2021-03-23T20:58:23.000Z | 2022-03-31T22:07:11.000Z | doltcli/misc_mixin.py | sidphbot/doltcli | 059a7c6dd31f036771b3ce94cd5f064696d64229 | [
"Apache-2.0"
] | 22 | 2021-03-10T21:06:22.000Z | 2021-11-06T20:07:12.000Z | doltcli/misc_mixin.py | sidphbot/doltcli | 059a7c6dd31f036771b3ce94cd5f064696d64229 | [
"Apache-2.0"
] | 6 | 2021-06-14T19:07:39.000Z | 2021-11-05T18:50:59.000Z | class MiscMixin:
def read_tables(self):
pass
def gc(self):
pass
def filter_branch(self):
pass
def verify_constraints(self):
pass
| 13.846154 | 33 | 0.566667 |
7945d339ce34bec163617da0f742c3a877a5d1ec | 1,878 | py | Python | game/gamesrc/objects/world/combat.py | GhostshipSoftware/avaloria | 3dbbc281c8746afdcc094d87a4fdd414fd240e77 | [
"BSD-3-Clause"
] | null | null | null | game/gamesrc/objects/world/combat.py | GhostshipSoftware/avaloria | 3dbbc281c8746afdcc094d87a4fdd414fd240e77 | [
"BSD-3-Clause"
] | null | null | null | game/gamesrc/objects/world/combat.py | GhostshipSoftware/avaloria | 3dbbc281c8746afdcc094d87a4fdd414fd240e77 | [
"BSD-3-Clause"
] | null | null | null | from ev import Object
class CombatManager(Object):
"""
Main Combat management object.
"""
def at_object_creation(self):
self.db.pc_combatant = None
self.db.npc_combatant = None
self.db.round = 0
def do_round(self):
pc_initiative = self.db.pc_combatant.get_initiative()
npc_initiative = self.db.npc_combatant.get_initiative()
pc = self.db.pc_combatant
npc = self.db.npc_combatant
if pc_initiative > npc_initiative:
print "in attack block pc"
pc.do_attack_phase()
pc.do_skill_phase()
npc.do_attack_phase()
npc.do_skill_phase()
else:
print "in attack block npc"
npc.do_attack_phase()
npc.do_skill_phase()
pc.do_attack_phase()
pc.do_skill_phase()
print "going to stats"
self.check_stats()
def check_stats(self):
print "in stats"
pc = self.db.pc_combatant
npc = self.db.npc_combatant
if pc.db.attributes['temp_health'] <= 0:
pc.msg("{RYou have been slain by %s unmercifully, death awaits..{n" % npc.name)
pc.db.in_combat = False
pc.unconcious()
if npc.db.attributes['temp_health'] <= 0:
pc.db.in_combat = False
pc.msg("{CYou have destroyed %s!{n" % npc.name)
if npc.db.attributes['exp_reward'] > 0:
pc.award_exp(npc.db.attributes['exp_reward'])
pc.award_exp(npc.db.attributes['exp_reward'], archtype='soldier')
if len(npc.db.attributes['currency_reward'].keys()) > 0:
for ct in npc.db.attributes['currency_reward']:
pc.award_currency(npc.db.attributes['currency_reward'][ct], type=ct)
npc.death()
| 36.823529 | 91 | 0.563898 |
7945d3a7f413c6f544ef811edb267a29d60deefc | 100 | py | Python | src/test.py | yangzoudreamer/toolkit_test_gpu | 767debb3cd168f6cb33c9a2cf0b896268f85c586 | [
"MIT"
] | null | null | null | src/test.py | yangzoudreamer/toolkit_test_gpu | 767debb3cd168f6cb33c9a2cf0b896268f85c586 | [
"MIT"
] | null | null | null | src/test.py | yangzoudreamer/toolkit_test_gpu | 767debb3cd168f6cb33c9a2cf0b896268f85c586 | [
"MIT"
] | null | null | null | import os
import torch
#os.environ["CUDA_VISIBLE_DEVICES"] = "11"
print(torch.cuda.is_available())
| 16.666667 | 42 | 0.76 |
7945d8dba3915a3b65a739fe4a26a09364405230 | 61,381 | py | Python | caffe2onnx/src/caffe2onnx.py | vsrad/caffe2onnx | 96576bd9340a89f64e14d52fe11244065a708db1 | [
"BSD-3-Clause"
] | null | null | null | caffe2onnx/src/caffe2onnx.py | vsrad/caffe2onnx | 96576bd9340a89f64e14d52fe11244065a708db1 | [
"BSD-3-Clause"
] | null | null | null | caffe2onnx/src/caffe2onnx.py | vsrad/caffe2onnx | 96576bd9340a89f64e14d52fe11244065a708db1 | [
"BSD-3-Clause"
] | null | null | null | import caffe2onnx.src.OPs as op
from caffe2onnx.src.c2oObject import *
from onnx import helper
import copy
import numpy as np
from caffe2onnx.src.op_layer_info import *
import random
import sys
from typing import *
import onnx
class Caffe2Onnx():
def __init__(self, net, model, onnxname):
# Initialize a c2oGraph object
self.onnxmodel = c2oGraph(onnxname)
# Network and parameters
self.netLayerCaffe = self.GetNetLayerCaffe(net)
self.netModelCaffe = self.GetNetModelCaffe(model)
# Model input name and input dimension
self.model_input_name = []
self.model_input_shape = []
# Node list
self.onnxNodeList = []
# Get layer list
LayerList = self.AddInputsTVIAndGetLayerList(net)
self.GenerateOnnxNodeList(LayerList)
self.AddOutputsTVIAndValueInfo()
# Get the network layer
def GetNetLayerCaffe(self, net):
if len(net.layer) == 0 and len(net.layers) != 0:
return net.layers
elif len(net.layer) != 0 and len(net.layers) == 0:
return net.layer
else:
print("prototxt layer error")
return -1
# Get parameter layer
def GetNetModelCaffe(self, model):
if len(model.layer) == 0 and len(model.layers) != 0:
return model.layers
elif len(model.layer) != 0 and len(model.layers) == 0:
return model.layer
else:
print("caffemodel layer error")
return -1
# Add model input information to Inputs and get a list of subsequent layers
def AddInputsTVIAndGetLayerList(self, net):
# If the type of the first layer is Input, and no net.input exists
if net.input == [] and self.netLayerCaffe[0].type == "Input":
layer_list = []
# Considering that the entire network will have multiple inputs
for lay in self.netLayerCaffe:
if lay.type == "Input":
if len(lay.top) == 1 and lay.top[0] != lay.name:
input_layer_name = lay.top[0]
else:
input_layer_name = lay.name
in_tvi = helper.make_tensor_value_info(
input_layer_name + "_input", TensorProto.FLOAT,
lay.input_param.shape[0].dim)
self.model_input_name.append(input_layer_name + "_input")
self.model_input_shape.append(lay.input_param.shape[0].dim)
self.onnxmodel.addInputsTVI(in_tvi)
else:
layer_list.append(lay)
return layer_list
# If net.input exists
elif net.input != []:
if bool(net.input_dim):
input_dim = net.input_dim
elif bool(net.input_shape):
input_dim = net.input_shape[0].dim
else:
raise RuntimeError("Input shape missing!")
in_tvi = helper.make_tensor_value_info("input", TensorProto.FLOAT, input_dim)
self.model_input_name.append("input")
self.model_input_shape.append(input_dim)
self.onnxmodel.addInputsTVI(in_tvi)
return self.netLayerCaffe
# None of the above situations, then the caffe model has no input, there is a problem
else:
raise ValueError("the caffe model has no input")
# Get the parameter shape of layer
def GetParamsShapeAndData(self, layer):
ParamShape = []
ParamData = []
# According to the layer name, find out the parameters in the corresponding caffemodel
for model_layer in self.netModelCaffe:
if layer.name == model_layer.name:
Params = copy.deepcopy(model_layer.blobs)
ParamShape = [p.shape.dim for p in Params]
ParamData = [p.data for p in Params]
if layer.type == "BatchNorm" or layer.type == "BN":
if len(ParamShape) == 3:
# If it is a bn layer, the sliding coefficient of the last layer is not used
ParamShape = ParamShape[:-1]
ParamData = ParamData[:-1]
elif len(ParamShape) == 2 and len(ParamShape[0]) != 1:
ParamShape = [[ParamShape[0][1]], [ParamShape[1][1]]]
ParamData = ParamData
return ParamShape, ParamData
def get_param_shape(self, params):
shapes = []
for p in params:
if p.shape.dim != []:
shape = p.shape.dim
shapes.append(shape)
else:
shape = [p.num, p.channels, p.height, p.width]
shapes.append(shape)
return shapes
# Add parameters to Inputs and generate tensor storage data
def AddInputsTVIFromParams(self, layer, ParamName, ParamType):
ParamShape = []
ParamData = []
# Find out the parameters in the corresponding caffemodel based on the layer name
for model_layer in self.netModelCaffe:
if layer.name == model_layer.name:
Params = copy.deepcopy(model_layer.blobs)
#ParamShape = [p.shape.dim for p in Params]
ParamShape = self.get_param_shape(Params)
ParamData = [p.data for p in Params]
if layer.type == "BatchNorm" or layer.type == "BN":
if len(ParamShape) == 3:
# If it is bn layer and params is [mean, var, s], you need to divide mean and var by sliding coefficient s
ParamShape = ParamShape[:-1]
ParamData = [
[q / (Params[-1].data[0])
for q in p.data] if i == 0 else
[q / (Params[-1].data[0] + 1e-5) for q in p.data]
for i, p in enumerate(Params[:-1])
] # with s
elif len(ParamShape) == 2 and len(ParamShape[0]) == 4:
ParamShape = [[ParamShape[0][1]], [ParamShape[1][1]]]
ParamData = [[q / 1. for q in p.data] if i == 0 else
[q / (1. + 1e-5) for q in p.data]
for i, p in enumerate(Params)]
if layer.type == "Reshape":
ParamShape = [[len(model_layer.reshape_param.shape.dim)]]
ParamData = [model_layer.reshape_param.shape.dim]
if layer.type == "Convolution" or layer.type == "ConvolutionDepthwise":
if len(ParamShape) == 2:
ParamShape[1] = [ParamShape[0][0]]
if layer.type == "InnerProduct":
if len(ParamShape[0]) > 2:
ParamShape[0] = [ParamShape[0][2], ParamShape[0][3]]
if len(ParamShape) == 2:
if len(ParamShape[1]) > 2:
ParamShape[1] = [ParamShape[1][2], ParamShape[1][3]]
if layer.type == "Normalize":
if len(ParamShape) == 1:
ParamShape[0] = [1, ParamShape[0][0], 1, 1]
# comment it for tvm because tvm use broadcast at prelu layer
# if layer.type == 'PReLU':
# ParamShape = [[ParamShape[0][0], 1, 1]]
break
# Judge whether there is Param
if ParamShape != []:
ParamName = ParamName[0:len(ParamShape)]
ParamType = ParamType[0:len(ParamShape)]
for i in range(len(ParamShape)):
ParamName[i] = layer.name + ParamName[i]
p_tvi = helper.make_tensor_value_info(ParamName[i],
ParamType[i],
ParamShape[i])
p_t = helper.make_tensor(ParamName[i], ParamType[i],
ParamShape[i], ParamData[i])
self.onnxmodel.addInputsTVI(p_tvi)
self.onnxmodel.addInitTensor(p_t)
#print("add parameters " + Param_Name[i] + " input information and tensor data")
if layer.type == "BatchNorm" or layer.type == "BN" or layer.type == "Scale":
return ParamName, ParamShape
return ParamName
# Manually add parameters to the input information and generate tensor storage data
def AddInputsTVIMannul(self, layer, param_names, param_types, param_shapes,
param_data):
node_names = copy.deepcopy(param_names)
for i in range(len(param_shapes)):
node_names[i] = layer.name + param_names[i]
p_tvi = helper.make_tensor_value_info(node_names[i],
param_types[i],
param_shapes[i])
p_t = helper.make_tensor(node_names[i], param_types[i],
param_shapes[i], param_data[i])
self.onnxmodel.addInputsTVI(p_tvi)
self.onnxmodel.addInitTensor(p_t)
return node_names
# # Due to the special input of Slice, special processing is required
# if layer.type == 'Slice':
# for i in range(len(ParamShape)):
# p_tvi = helper.make_tensor_value_info(Param_Name[i], ParamType[i], ParamShape[i])
# p_t = helper.make_tensor(Param_Name[i], ParamType[i], ParamShape[i], ParamData[i])
# self.onnxmodel.addInputsTVI(p_tvi)
# self.onnxmodel.addInitTensor(p_t)
# return Param_Name
# else:
# for i in range(len(ParamShape)):
# Param_Name[i] = layer.name + ParamName[i]
# p_tvi = helper.make_tensor_value_info(Param_Name[i], ParamType[i], ParamShape[i])
# p_t = helper.make_tensor(Param_Name[i], ParamType[i], ParamShape[i], ParamData[i])
# self.onnxmodel.addInputsTVI(p_tvi)
# self.onnxmodel.addInitTensor(p_t)
# return Param_Name
# Get the output name of the previous layer (that is, the input of the current layer)
def GetLastLayerOutNameAndShape(self, layer):
output_name = []
outshape = []
# flag is True: The input of the model is not overwritten
# flag is False: The input of the model has been overwritten
flag = True
# If the node list is empty, or the bottom of the current layer is in input_name, then the input of the previous layer must be Input
if self.onnxNodeList == []:
output_name += self.model_input_name
outshape += self.model_input_shape
else:
for i in range(len(layer.bottom)):
# Because top and bottom have the same name in prototxt, but layer.bottom can only correspond to one node, so for each layer.bottom,
# find the last node with the same name as the upper node
name = None
shape = None
for node in self.onnxNodeList:
for j in range(len(node.top) if node.node.op_type != "MaxPool" else 1):
if layer.bottom[i] == node.top[j]:
name = node.outputs_name[j]
shape = node.outputs_shape[j]
for k in range(len(node.bottom)):
if node.top[j] == node.bottom[k]:
for w in range(len(self.model_input_name)):
if node.top[j] + '_input' == self.model_input_name[w]:
flag = False
for j in range(len(self.model_input_name)):
if layer.bottom[i] + '_input' == self.model_input_name[j] and flag:
output_name.append(self.model_input_name[j])
outshape.append(self.model_input_shape[j])
if name:
output_name.append(name)
outshape.append(shape)
try:
assert output_name, "Failed at layer %s, layer's bottom not detected ..." % (layer.name)
except:
print("Failed at layer %s, layer's bottom not detected ..." % (layer.name))
exit(-1)
return output_name, outshape
# Get the output name of the current layer, that is, layer name
def GetCurrentLayerOutName(self, layer):
# return [layer.name]
# Consider the case of multiple outputs
# # TODO: Why use layer.name instead?
if layer.top == layer.bottom and len(layer.top) == 1:
return [layer.name]
return [out for out in layer.top]
def GenerateOnnxNodeList(self, Layers):
for i in range(len(Layers)):
print("convert layer: " + Layers[i].name)
# Convolution
if Layers[i].type == "Convolution" or Layers[i]. type == Layer_CONVOLUTION:
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
# 2.Generate node parameter tensor value info, get the node parameter name, and add the parameter name to the node input name list
conv_pname = self.AddInputsTVIFromParams(Layers[i], op_pname["Conv"], op_ptype["Conv"])
input_name.extend(conv_pname)
# 3.Build conv_node
conv_node = op.createConv(Layers[i], node_name, input_name, output_name, input_shape)
# 4.Add node to node list
self.onnxNodeList.append(conv_node)
elif Layers[i].type == "ConvolutionDepthwise" or Layers[i].type == Layer_CONVOLUTION:
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
# 2.Generate node parameter tensor value info, get the node parameter name, and add the parameter name to the node input name list
conv_pname = self.AddInputsTVIFromParams(Layers[i], op_pname["Conv"], op_ptype["Conv"])
input_name.extend(conv_pname)
# 3.Build conv_node
conv_node = op.createConv(Layers[i], node_name, input_name, output_name, input_shape)
# 4.Add node to node list
self.onnxNodeList.append(conv_node)
# BatchNorm+Scale
elif Layers[i].type == "BatchNorm" or Layers[i].type == "BN":
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i]) # Get a list of input names and input shapes
output_name = self.GetCurrentLayerOutName(Layers[i]) # Get a list of output names
node_name = Layers[i].name
# 2.Generate node parameter tensor value info, get the node parameter name, and add the parameter name to the node input name list
if i < len(Layers) - 1 and Layers[i + 1].type == "Scale":
scale_pname, scale_pshape = self.AddInputsTVIFromParams(Layers[i + 1], op_pname["Scale"],
op_ptype["Scale"])
bn_pname, bn_pshape = self.AddInputsTVIFromParams(Layers[i], op_pname["BatchNorm"],
op_ptype["BatchNorm"])
assert bn_pshape == scale_pshape, "BatchNorm and Scale params should share the same shape"
input_name.extend(scale_pname)
input_name.extend(bn_pname)
else:
bn_pshape, _ = self.GetParamsShapeAndData(Layers[i])
custom_params = [np.ones(shape=bn_pshape[0], dtype=np.float),
0.001 + np.zeros(shape=bn_pshape[1], dtype=np.float)]
scale_pname = self.AddInputsTVIMannul(Layers[i], op_pname["Scale"], op_ptype["Scale"], bn_pshape,
custom_params)
bn_pname, bn_pshape = self.AddInputsTVIFromParams(Layers[i], op_pname["BatchNorm"],
op_ptype["BatchNorm"])
input_name.extend(scale_pname)
input_name.extend(bn_pname)
# 3.Build bn_node
bn_node = op.createBN(Layers[i], node_name, input_name, output_name, input_shape)
# 4.Add node to node list
self.onnxNodeList.append(bn_node)
elif Layers[i].type == "Scale":
if i > 0 and (Layers[i - 1].type == "BatchNorm" or Layers[i - 1].type == "BN"):
# bn + scale
continue
# signal scale
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i]) # Get a list of input names and input shapes
output_name = self.GetCurrentLayerOutName(Layers[i]) # Get a list of output names
# node_name = Layers[i].name + random.choice('1234567890abcdefghijklmnopqrst')
node_name = Layers[i].name
has_two_input: bool = False
if len(input_name) > 1:
has_two_input = True
if has_two_input and op.need_add_reshape(input_shape):
reshape_layer = copy.deepcopy(Layers[i])
# add reshape layer
reshape_node_name = input_name[1] + '_reshap_' + random.choice('1234567890abcdefghijklmnopqrst')
reshape_input_name = input_name[1]
reshape_input_shape = input_shape[1]
reshape_shape_data = op.get_param_shape(input_shape)
reshape_shape_shape = np.shape(reshape_shape_data)
reshape_params = self.AddInputsTVIMannul(Layers[i], [reshape_node_name + 'shape'], [TensorProto.INT64],
[reshape_shape_shape], [reshape_shape_data])
reshape_output_name = [reshape_input_name + '_output_name']
reshape_node = op.createReshape(reshape_layer, reshape_node_name, [reshape_input_name, reshape_params[0]],
reshape_output_name, reshape_input_shape, output_shape=[reshape_shape_data])
self.onnxNodeList.append(reshape_node)
# add mul node
input_name[1] = reshape_output_name[0]
input_shape[1] = reshape_shape_data
mul_node = op.create_mul_node(Layers[i], node_name, input_name, output_name, input_shape)
self.onnxNodeList.append(mul_node)
else:
param_shape, param_data = self.GetParamsShapeAndData(Layers[i])
# Scale = Mul + Add
if len(param_shape) == 2:
# create mul
param_scale_shape = [1, param_shape[0][0], 1, 1]
param_scale_data = param_data[0]
param_scale_name = self.AddInputsTVIMannul(Layers[i], ["_scale"], [TensorProto.FLOAT], [param_scale_shape], [param_scale_data])
mul_node_name = node_name + "_mul"
mul_input_name = [input_name[0], param_scale_name[0]]
mul_output_name = [output_name[0] + "_mul"]
mul_input_shape = [input_shape[0], param_scale_shape]
mul_node = op.create_mul_node(Layers[i], mul_node_name, mul_input_name, mul_output_name, mul_input_shape)
self.onnxNodeList.append(mul_node)
param_bias_shape = [1, param_shape[1][0], 1, 1]
param_bias_data = param_data[1]
param_bias_name = self.AddInputsTVIMannul(Layers[i], ["_bias"], [TensorProto.FLOAT], [param_bias_shape], [param_bias_data])
add_node_name = node_name + "_add"
add_input_name = [mul_output_name[0], param_bias_name[0]]
add_output_name = output_name
add_input_shape = [input_shape[0], param_bias_shape]
add_node = op.create_add_node(Layers[i], add_node_name, add_input_name, add_output_name, add_input_shape)
self.onnxNodeList.append(add_node)
# Scale = Mul
if len(param_shape) == 1:
# create mul
param_scale_shape = [1, param_shape[0][0], 1, 1]
param_scale_data = param_data[0]
param_scale_name = self.AddInputsTVIMannul(
Layers[i], ["_scale"], [TensorProto.FLOAT],
[param_scale_shape], [param_scale_data])
mul_input_name = [input_name[0], param_scale_name[0]]
mul_input_shape = [input_shape[0], param_scale_shape]
mul_node = op.create_mul_node(Layers[i], node_name,
mul_input_name,
output_name,
mul_input_shape)
self.onnxNodeList.append(mul_node)
# Pooling
elif Layers[i].type == "Pooling" or Layers[i].type == Layer_POOLING:
# TODO:
# Pooling <= Pad + Pool
# NOTE: Because Caffe and ONNX handle the AveragePool differently, you need to add the Pad node before the pool node
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i]) # Get a list of input names and input shapes
output_name = self.GetCurrentLayerOutName(Layers[i]) # Get a list of output names
node_name = Layers[i].name
# create pad node
pads = op.get_pool_pads(Layers[i])
pads_shape = [np.shape(pads)]
pads_name = node_name + "_output"
pads_output_name = [node_name + "_output"]
pad_output_shape = op.calculate_pad_output_shape(input_shape, pads)
pads_param = self.AddInputsTVIMannul(Layers[i], ["_pad"], [TensorProto.INT64], pads_shape, [pads])
input_name.extend(pads_param)
pool_type = op.pooling_type(Layers[i])
if pool_type == "GlobalMaxPool" or pool_type == "MaxPool":
constant_value = [-sys.float_info.max]
constant_shape = [np.shape(constant_value)]
constant_value_param = self.AddInputsTVIMannul(Layers[i], ["_constant_value"], [TensorProto.FLOAT],
constant_shape, [constant_value])
input_name.extend(constant_value_param)
pad_node = op.create_pad_node(Layers[i], pads_name, input_name, pads_output_name, input_shape)
self.onnxNodeList.append(pad_node)
# 2.Build pool_node
pool_node = op.create_pooling_node(Layers[i], node_name, pads_output_name, output_name,
pad_output_shape)
# 3.Add node to node list
self.onnxNodeList.append(pool_node)
# MaxUnPool
elif Layers[i].type == "MaxUnpool":
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i]) # Get a list of input names and input shapes
output_name = self.GetCurrentLayerOutName(Layers[i]) # Get a list of output names
node_name = Layers[i].name
# 2.Build unpool_node
unpool_node = op.createUnPooling(Layers[i], node_name, input_name, output_name, input_shape)
# 3.Add node to node list
self.onnxNodeList.append(unpool_node)
# Eltwise
elif Layers[i].type == "Eltwise" or Layers[i].type == Layer_ELTWISE:
# 1.Get node input name, input dimension, output name, node name
output_name = self.GetCurrentLayerOutName(Layers[i]) # Get a list of output names
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i]) # Get a list of input names and input shapes
node_name = Layers[i].name
# 2.Build eltwise_node
eltwise_node = op.createEltwise(Layers[i], node_name, input_name, output_name, input_shape)
# 3.Add node to node list
self.onnxNodeList.append(eltwise_node)
# Softmax
elif Layers[i].type == "Softmax" or Layers[i].type == Layer_SOFTMAX:
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i]) # Get a list of input names and input shapes
output_name = self.GetCurrentLayerOutName(Layers[i]) # Get a list of output names
node_name = Layers[i].name
# 2.Build softmax_node
# HACK: In ONNX, softmax is applied to a 2D tensor along the second axis. N-dimensional inputs
# are coerced to 2D tensors as [T_0 T_1 ... T_{axis-1}], [T_{axis} ... T_{n}]. Therefore, Caffe's
# NCHW format needs to be transposed to HWNC to preserve the behavior of softmax.
nchw_to_hwnc = [2, 3, 0, 1]
hwnc_input_shape = [[input_shape[0][d] for d in nchw_to_hwnc]]
transpose_output = [input_name[0] + "_transpose"]
transpose_node = c2oNode(Layers[i], node_name + "_transpose", "Transpose", input_name, transpose_output, input_shape, hwnc_input_shape, {"perm": nchw_to_hwnc})
softmax_output = [input_name[0] + "_softmax"]
softmax_node = op.createSoftmax(Layers[i], node_name, transpose_output, softmax_output, hwnc_input_shape)
transpose_back_node = c2oNode(Layers[i], node_name + "_transpose_back", "Transpose", softmax_output, output_name, hwnc_input_shape, input_shape, {"perm": nchw_to_hwnc})
# 3.Add node to node list
self.onnxNodeList.append(transpose_node)
self.onnxNodeList.append(softmax_node)
self.onnxNodeList.append(transpose_back_node)
# Relu
elif Layers[i].type == "ReLU" or Layers[i].type == Layer_RELU:
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i]) # Get a list of input names and input shapes
output_name = self.GetCurrentLayerOutName(Layers[i]) # Get a list of output names
node_name = Layers[i].name
# letters = '1234567890abcdefghijklmnopqrst'
# length = random.randrange(5, 16)
# randstr = ''.join(random.choice(letters) for _ in range(length))
# node_name = node_name
# for i in range(len(output_name)):
# output_name[i] = output_name[i] + random.choice('1234567890abcdef')
#print(output_name)
# 2.Build relu_node
relu_node = op.createRelu(Layers[i], node_name, input_name, output_name, input_shape)
# 3.Add node to node list
self.onnxNodeList.append(relu_node)
# PRelu
elif Layers[i].type == "PReLU":
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
# 2.Generate node parameter tensor value info, get the node parameter name, and add the parameter name to the node input name list
pname = self.AddInputsTVIFromParams(Layers[i], op_pname["PRelu"], op_ptype["PRelu"])
input_name.extend(pname)
# 3.Build PRelu_node
PRelu_node = op.createPRelu(Layers[i], node_name, input_name, output_name, input_shape)
# 4.Add node to node list
self.onnxNodeList.append(PRelu_node)
# relu6
elif Layers[i].type == 'ReLU6':
# relu6 = clip(0, 6)
# add relu node
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
min_value = np.float(0)
max_value = np.float(6)
shape = np.shape([min_value])
min_param = self.AddInputsTVIMannul(Layers[i], ["_min"],
[TensorProto.FLOAT], [shape],
[[min_value]])
input_name.extend(min_param)
max_param = self.AddInputsTVIMannul(Layers[i], ['_max'],
[TensorProto.FLOAT], [shape],
[[max_value]])
input_name.extend(max_param)
relu6_node = op.create_clip_node(Layers[i], node_name, input_name, output_name, input_shape)
self.onnxNodeList.append(relu6_node)
elif Layers[i].type == "Sigmoid":
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i]) # Get a list of input names and input shapes
output_name = self.GetCurrentLayerOutName(Layers[i]) # Get a list of output names
node_name = Layers[i].name
# 2.Build relu_node
sigmoid_node = op.createSigmoid(Layers[i], node_name, input_name, output_name, input_shape)
# 3.Add node to node list
self.onnxNodeList.append(sigmoid_node)
elif Layers[i].type == 'Log':
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i]) # Get a list of input names and input shapes
output_name = self.GetCurrentLayerOutName(Layers[i]) # Get a list of output names
node_name = Layers[i].name
log_node = op.create_log_node(Layers[i], node_name, input_name, output_name, input_shape)
self.onnxNodeList.append(log_node)
# LRN
elif Layers[i].type == "LRN" or Layers[i].type == Layer_LRN:
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
# 2.Build LRN_node
LRN_node = op.createLRN(Layers[i], node_name, input_name, output_name, input_shape)
# 3.Add node to node list
self.onnxNodeList.append(LRN_node)
# Dropout
elif Layers[i].type == "Dropout" or Layers[i].type == Layer_DROPOUT:
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
# 2.Build Dropout_node
Dropout_node = op.createDropout(Layers[i], node_name, input_name, output_name, input_shape)
# 3.Add node to node list
self.onnxNodeList.append(Dropout_node)
# Upsample
elif Layers[i].type == "Upsample" or Layers[i].type == Layer_UPSAMPLE:
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
# 2.Generate node parameter tensor value info, get the node parameter name, and add the parameter name to the node input name list
# add roi input
# add scales input
paramshape = [[8, 1],
[4, 1]]
paramdata = [[1, 1, 1, 1, 2, 2, 2, 2],
[1.0, 1.0, Layers[i].upsample_param.scale, Layers[i].upsample_param.scale]]
pname = self.AddInputsTVIMannul(Layers[i], op_pname["Upsample"], op_ptype["Upsample"], paramshape,
paramdata)
input_name.extend(pname)
# 3.Build Upsample_node
Upsample_node = op.create_resize_node(Layers[i], node_name, input_name, output_name, input_shape)
# 4.Add node to node list
self.onnxNodeList.append(Upsample_node)
elif Layers[i].type == 'Interp':
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
interp_node = op.create_interp_node(Layers[i], node_name, input_name, output_name, input_shape)
self.onnxNodeList.append(interp_node)
# Concat
elif Layers[i].type == "Concat" or Layers[i].type == Layer_CONCAT:
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
# 2.Build Concat_node
Concat_node = op.createConcat(Layers[i], node_name, input_name, output_name, input_shape)
# 3.Add node to node list
self.onnxNodeList.append(Concat_node)
elif Layers[i].type == 'Slice':
# 1. Get node book input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name_list = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
starts, ends, axes = op.analyzeLayer(Layers[i], input_shape)
SliceLayer = copy.deepcopy(Layers[i])
for i in range(len(output_name_list)):
# The reason for putting it here is
slice_name = copy.deepcopy(input_name)
# The shape of starts ends axes is the same
shape = [np.shape([1])]
starts_param = self.AddInputsTVIMannul(SliceLayer, ['_starts' + str(i)],
[TensorProto.INT64], shape,
[[starts[i]]])
ends_param = self.AddInputsTVIMannul(SliceLayer, ['_ends' + str(i)],
[TensorProto.INT64], shape,
[[ends[i]]])
axes_param = self.AddInputsTVIMannul(SliceLayer, ['_axes' + str(i)],
[TensorProto.INT64], shape,
[[axes[i]]])
slice_name.extend(starts_param)
slice_name.extend(ends_param)
slice_name.extend(axes_param)
Slice_node = op.createSlice(SliceLayer, output_name_list[i], slice_name, [output_name_list[i]],
input_shape, starts[i], ends[i], axes[i])
# 3. Add node to node list
self.onnxNodeList.append(Slice_node)
# Reshape
elif Layers[i].type == "Reshape":
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
# 2.Generate node parameter tensor value info, get the node parameter name, and add the parameter name to the node input name list
reshape_param = op.get_reshape_param(Layers[i], input_shape)
reshape_param_shape = [np.shape(reshape_param)]
pname = self.AddInputsTVIMannul(Layers[i], op_pname["Reshape"], op_ptype["Reshape"], reshape_param_shape,
[reshape_param])
input_name.extend(pname)
# 3.Build reshape节点
reshape_node = op.createReshape(Layers[i], node_name, input_name, output_name, input_shape)
# 4.添加点到节点列表
self.onnxNodeList.append(reshape_node)
# InnerProduct
# Since there is no fully connected layer in onnx, it needs to be split. There are two ways to split (Reshape+Gemm, Reshape+MatMul+Add)
elif Layers[i].type == "InnerProduct" or Layers[i].type == Layer_INNER_PRODUCT:
node_layer = copy.deepcopy(Layers[i]) # Deep copy
node_input_name, node_input_shape = self.GetLastLayerOutNameAndShape(node_layer) # Get a list of input names and input shapes
reshape_outname = ""
reshape_output_shape = op.getReshapeOutShape(Layers[i], node_input_shape)
need_reshape = 0 if reshape_output_shape[0] == node_input_shape[0] else 1
if need_reshape:
#### reshape
# 1.Get node input name, input dimension, output name, node name
reshape_outname = [node_layer.name + "_Reshape"]
reshape_nodename = node_layer.name + "_Reshape"
# 2.Generate node parameter tensor value info, get the node parameter name, and add the parameter name to the node input name list
paramshape = [[2]]
reshape_pname = self.AddInputsTVIMannul(node_layer, op_pname["Reshape"], op_ptype["Reshape"],
paramshape, reshape_output_shape)
node_input_name.extend(reshape_pname)
# 3.Build reshape_node
reshape_node = op.createReshape(node_layer, reshape_nodename, node_input_name, reshape_outname,
node_input_shape)
# 4.Add node to node list
self.onnxNodeList.append(reshape_node)
# import ipdb; ipdb.set_trace()
#### Second, Gemm's last node output keeps the original name
gemm_layer = copy.deepcopy(Layers[i]) # Deep copy
# 1.Get node input name, input dimension, output name, node name
gemm_inname = reshape_outname if need_reshape == 1 else node_input_name
gemm_input_shape = reshape_output_shape if need_reshape == 1 else node_input_shape
gemm_outname = [gemm_layer.name]
gemm_nodename = gemm_layer.name
# 2.Generate node parameter tensor value info, get the node parameter name, and add the parameter name to the node input name list
gemm_pname = self.AddInputsTVIFromParams(gemm_layer, op_pname["InnerProduct"], op_ptype[
"InnerProduct"]) # Get input parameters. For add, the bias stored in blobs[1] is not needed, so get blobs[0] directly
gemm_inname.extend(gemm_pname)
# 3.Build gemm_node
matmul_node = op.createGemm(gemm_layer, gemm_nodename, gemm_inname, gemm_outname, gemm_input_shape,
gemm_layer.inner_product_param.num_output)
# 4.Add node to node list
self.onnxNodeList.append(matmul_node)
elif Layers[i].type == 'ShuffleChannel':
# TODO support ShuffleChannel
# reshape [N, C, H, W] tensor to [N, G, C', H, W]
node_layer = copy.deepcopy(Layers[i]) # Deep copy
node_input_name, node_input_shape = self.GetLastLayerOutNameAndShape(node_layer) # Get a list of input names and input shapes
reshape_outname = ""
reshape_output_shape = op.getReshapeOutShape(Layers[i], node_input_shape)
need_reshape = 0 if reshape_output_shape[0] == node_input_shape[0] else 1
if need_reshape:
# 一. reshape [N, C, H, W] tensor to [N, G, C', H, W]
# 1.Get node input name, input dimension, output name, node name
reshape_outname = [node_layer.name + "_Reshape"]
reshape_nodename = node_layer.name + "_Reshape"
# 2.Generate node parameter tensor value info, get the node parameter name, and add the parameter name to the node input name list
param_data = op.getReshapeOutShape(node_layer, node_input_shape)
param_shape = np.array([1, 2, 3, 4, 5], np.int).shape
reshape_pname = self.AddInputsTVIMannul(node_layer, op_pname["Reshape"], op_ptype["Reshape"],
[param_shape], param_data)
node_input_name.extend(reshape_pname)
# There is no need to expand the input here, because the input has not increased
# node_input_name.extend(reshape_pname)
# 3.Build reshape_node
reshape_node = op.createReshape(node_layer,
reshape_nodename,
node_input_name,
reshape_outname,
node_input_shape)
# 4.Add node to node list
self.onnxNodeList.append(reshape_node)
# 2. transpose [N, C', G, H, W]
transpose_layer = copy.deepcopy(Layers[i]) # Deep copy
# 1.Get node input name, input dimension, output name, node name
transpose_input_name = reshape_outname if need_reshape == 1 else node_input_name
transpose_input_shape = reshape_output_shape if need_reshape == 1 else node_input_shape
transpose_output_name = [node_layer.name + "_Transpose"]
transpose_node_name = node_layer.name + "_Transpose"
# 2.Generate node parameter tensor value info, get the node parameter name, and add the parameter name to the node input name list
# Get input parameters. For add, the bias stored in blobs[1] is not needed, so get blobs[0] directly
# TODO Why does this place choose to use AddInputsTVIMannul? Depends on what?
# ANSWER: Depends on the type of onnx to be converted
# TODO param_date What is it? Why set this variable
param_data = [[2]]
# transpose_pname = self.AddInputsTVIMannul(transpose_layer,
# op_pname["Transpose"],
# op_ptype['Transpose'],
# param_data,
# transpose_input_shape)
# transpose_input_name.extend(transpose_pname)
# 3.
transpose_node = op.createTranspose(transpose_layer,
transpose_node_name,
transpose_input_name,
transpose_output_name,
transpose_input_shape)
# 4.Add node to node list
self.onnxNodeList.append(transpose_node)
# 三、 Reshape [N, C', G, H, W] tensor to [N, C, H, W]
#
end_layer = copy.deepcopy(Layers[i])
end_layer.type = "DeReshape"
# The last output node should keep the original name, this is to generate the node and keep the link open
end_output_name = [end_layer.name]
end_node_name = end_layer.name
# The output of the previous layer is the input of this layer
end_input_name = transpose_node.outputs_name
end_input_shape = transpose_node.outputs_shape
# Finally, keep the shape of the output and input consistent
end_output_shape = [[node_input_shape[0][0], -1, node_input_shape[0][2], node_input_shape[0][3]]]
param_shape = [np.array([1, 2, 3, 4], dtype=np.int).shape]
end_pname = self.AddInputsTVIMannul(node_layer, op_pname["DouReshape"], op_ptype["DouReshape"],
param_shape, end_output_shape)
end_input_name.extend(end_pname)
# Build
end_node = op.createReshape(end_layer,
end_node_name,
end_input_name,
end_output_name,
end_input_shape)
self.onnxNodeList.append(end_node)
# Deconvolution
elif Layers[i].type == "Deconvolution":
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
# 2.Generate node parameter tensor value info, get the node parameter name, and add the parameter name to the node input name list
conv_pname = self.AddInputsTVIFromParams(Layers[i], op_pname["ConvTranspose"],
op_ptype["ConvTranspose"])
input_name.extend(conv_pname)
# 3.Build conv_node
conv_node = op.createConvTranspose(Layers[i], node_name, input_name, output_name, input_shape)
# if True:
# self.__print_debug_info(node_name, input_name, output_name, input_shape, conv_node.outputs_shape)
# 4.Add node to node list
self.onnxNodeList.append(conv_node)
# Flatten
elif Layers[i].type == "Flatten":
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
# Since there are problems with the optimization of Flatten later, we will first Flatten -> reshape
# flatten_node = op.create_flatten_node(layers[i], node_name, input_name,
# output_name, input_shape)
# self.onnxnodelist.append(flatten_nodelatten_node)
# continue
# Flatten -> Reshape
# import ipdb; ipdb.set_trace()
# # 2.Generate node parameter tensor value info, get the node parameter name, and add the parameter name to the node input name list
paramshape = [[2]]
paramdata = op.getReshapeOutShape(Layers[i], input_shape)
reshape_pname = self.AddInputsTVIMannul(Layers[i], op_pname["Reshape"], op_ptype["Reshape"], paramshape,
paramdata)
input_name.extend(reshape_pname)
# 3.Build reshape_node
reshape_node = op.createReshape(Layers[i], node_name, input_name, output_name, input_shape)
# 4.Add node to node list
self.onnxNodeList.append(reshape_node)
elif Layers[i].type == "Permute":
# Permute -> Transpose
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
transpose_node = op.createTranspose(Layers[i], node_name, input_name, output_name, input_shape)
self.onnxNodeList.append(transpose_node)
elif Layers[i].type == "PriorBox":
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
priorbox_node = op.create_priorbox_node(Layers[i], node_name, input_name, output_name, input_shape)
self.onnxNodeList.append(priorbox_node)
elif Layers[i].type == "DetectionOutput":
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
detection_output_node = op.create_detection_output(Layers[i], node_name, input_name, output_name, input_shape)
self.onnxNodeList.append(detection_output_node)
elif Layers[i].type == "Axpy":
# axpy = mul + add
# top = bottom[0] * bottom[1] + bottom[2]
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
# create mul node
mul_node = op.create_axpy_mul_node(Layers[i], node_name, input_name, output_name, input_shape)
self.onnxNodeList.append(mul_node)
# create add node
add_node = op.create_axpy_add_node(Layers[i], node_name, input_name, output_name, input_shape)
self.onnxNodeList.append(add_node)
elif Layers[i].type == "Normalize":
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
lp_normalization_output_name = [output_name[0] + "_lp"]
lp_normalization_node = op.create_Lp_Normalization(Layers[i], node_name, input_name,
lp_normalization_output_name, input_shape)
self.onnxNodeList.append(lp_normalization_node)
# get Normalize
scale_shape, scale_data = self.GetParamsShapeAndData(Layers[i])
scale_shape = [1, scale_shape[0][0], 1, 1]
scale_input = self.AddInputsTVIFromParams(Layers[i], ["_scale"], [TensorProto.FLOAT])
mul_input_name = [lp_normalization_output_name[0], node_name + "_scale"]
mul_input_shape = [input_shape[0], scale_shape]
mul_node = op.create_mul_node(Layers[i], node_name + "_mul", mul_input_name, output_name,
mul_input_shape)
self.onnxNodeList.append(mul_node)
elif Layers[i].type == "Power":
# Power: Mul + Add + Pow
# create mul node
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
power, scale, shift = op.get_power_param(Layers[i])
scale_node_name = self.AddInputsTVIMannul(Layers[i], ["_scale"], [TensorProto.FLOAT], [np.shape(scale)], [scale])
mul_input_name = [input_name[0], scale_node_name[0]]
mul_node = op.create_mul_node(Layers[i], node_name + "_mul", mul_input_name, [output_name[0] + "_mul"],
[input_shape[0], np.shape(power)])
self.onnxNodeList.append(mul_node)
# create Add node
shift_param_name = self.AddInputsTVIMannul(Layers[i], ["_shift"], [TensorProto.FLOAT], [np.shape(scale)],
[shift])
add_input_name = [output_name[0] + "_mul", shift_param_name[0]]
add_node = op.create_add_node(Layers[i], node_name + "_add", add_input_name, [output_name[0] + "_add"], [input_shape[0], np.shape(shift)])
self.onnxNodeList.append(add_node)
# create Pow
power_param_name = self.AddInputsTVIMannul(Layers[i], ["_param_power"], [TensorProto.FLOAT], [np.shape(power)],[power])
power_input_name = [output_name[0] + "_add", power_param_name[0]]
power_node = op.create_power_node(Layers[i], node_name + "_power", power_input_name, output_name,
[input_shape[0], np.shape(power)])
self.onnxNodeList.append(power_node)
elif Layers[i].type == "TanH":
# 1.Get node input name, input dimension, output name, node name
input_name, input_shape = self.GetLastLayerOutNameAndShape(
Layers[i]) # Get a list of input names and input shapes
output_name = self.GetCurrentLayerOutName(Layers[i]) # Get a list of output names
node_name = Layers[i].name
# 2.Build tanh_node
tanh_node = op.createTanh(
Layers[i], node_name, input_name, output_name, input_shape)
# 3.Add node to node list
self.onnxNodeList.append(tanh_node)
elif Layers[i].type == "Crop":
# Crop: Slice
# create Slice node
input_name, input_shape = self.GetLastLayerOutNameAndShape(Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
starts, ends, axes = op.get_crop_param(Layers[i],input_shape)
Crop_name=[]
Crop_name.append(input_name[0])
starts_param = self.AddInputsTVIMannul(Layers[i],
['_starts' + str(i)],
[TensorProto.INT64],
[np.shape(starts)],
[starts])
ends_param = self.AddInputsTVIMannul(Layers[i],
['_ends' + str(i)],
[TensorProto.INT64],
[np.shape(ends)], [ends])
axes_param = self.AddInputsTVIMannul(Layers[i],
['_axes' + str(i)],
[TensorProto.INT64],
[np.shape(axes)], [axes])
Crop_name.extend(starts_param)
Crop_name.extend(ends_param)
Crop_name.extend(axes_param)
crop_node = op.create_crop_node(Layers[i], node_name, Crop_name, output_name,
input_shape)
self.onnxNodeList.append(crop_node)
# MVN
elif Layers[i].type == "MVN":
# MVN: InstanceNormalization
# create InstanceNormalization
if Layers[i].mvn_param.normalize_variance == False or Layers[i].mvn_param.across_channels == True:
print("Failed type not support: " + Layers[i].type)
exit(-1)
input_name, input_shape = self.GetLastLayerOutNameAndShape(
Layers[i])
output_name = self.GetCurrentLayerOutName(Layers[i])
node_name = Layers[i].name
MVN_name = []
MVN_name.append(input_name[0])
scale, bias = op.get_InstanceNorm_param(Layers[i],input_shape)
scale_param = self.AddInputsTVIMannul(Layers[i],
['_scale' + str(i)],
[TensorProto.FLOAT],
[np.shape(scale)],
[scale])
bias_param = self.AddInputsTVIMannul(Layers[i],
['_bias' + str(i)],
[TensorProto.FLOAT],
[np.shape(bias)], [bias])
MVN_name.extend(scale_param)
MVN_name.extend(bias_param)
MVN_node = op.create_InstanceNorm_op(Layers[i], node_name,
MVN_name, output_name,
input_shape)
self.onnxNodeList.append(MVN_node)
elif Layers[i].type == "Silence":
print("skip layer: " + Layers[i].name + " (Silence)")
else:
print("Failed type not support: " + Layers[i].type)
exit(-1)
# Determine whether the current node is an output node
def JudgeOutput(self, current_node, nodelist):
for output_name in current_node.outputs_name:
for node in nodelist:
if output_name in node.inputs_name:
return False
return True
# Add model output information and intermediate node information
def AddOutputsTVIAndValueInfo(self):
for i in range(len(self.onnxNodeList)):
if self.JudgeOutput(self.onnxNodeList[i], self.onnxNodeList): # Build 输出节点信息
lastnode = self.onnxNodeList[i]
for j in range(len(lastnode.outputs_shape)):
output_tvi = helper.make_tensor_value_info(lastnode.outputs_name[j], TensorProto.FLOAT,
lastnode.outputs_shape[j])
self.onnxmodel.addOutputsTVI(output_tvi)
else: # Build
innernode = self.onnxNodeList[i]
for k in range(len(innernode.outputs_shape)):
hid_out_tvi = helper.make_tensor_value_info(innernode.outputs_name[k], TensorProto.FLOAT,
innernode.outputs_shape[k])
self.onnxmodel.addValueInfoTVI(hid_out_tvi)
#print("add model output information and model intermediate output information")
# Create a model
def createOnnxModel(self):
node_def = [Node.node for Node in self.onnxNodeList]
graph_def = helper.make_graph(
node_def,
self.onnxmodel.name,
self.onnxmodel.in_tvi,
self.onnxmodel.out_tvi,
self.onnxmodel.init_t,
value_info=self.onnxmodel.hidden_out_tvi
)
model_def = helper.make_model(graph_def, producer_name='caffe')
print("converting caffe model to onnx model completed successfully")
return model_def
| 54.03257 | 184 | 0.541894 |
7945da5be91ec74a20a9997567aaed456f6afe5b | 2,451 | py | Python | build-support/kudu_util.py | ZeweiChen11/kudu | d97316e70b99a155f5677a8c29545b2b65f40a01 | [
"Apache-2.0"
] | 2 | 2016-09-12T06:53:49.000Z | 2016-09-12T15:47:46.000Z | build-support/kudu_util.py | ZeweiChen11/kudu | d97316e70b99a155f5677a8c29545b2b65f40a01 | [
"Apache-2.0"
] | null | null | null | build-support/kudu_util.py | ZeweiChen11/kudu | d97316e70b99a155f5677a8c29545b2b65f40a01 | [
"Apache-2.0"
] | 2 | 2018-04-03T05:49:03.000Z | 2020-05-29T21:18:46.000Z | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This script generates a header file which contains definitions
# for the current Kudu build (eg timestamp, git hash, etc)
import os
import subprocess
import sys
class Colors(object):
""" ANSI color codes. """
def __on_tty(x):
if not os.isatty(sys.stdout.fileno()):
return ""
return x
RED = __on_tty("\x1b[31m")
GREEN = __on_tty("\x1b[32m")
YELLOW = __on_tty("\x1b[33m")
RESET = __on_tty("\x1b[m")
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def confirm_prompt(prompt):
"""
Issue the given prompt, and ask the user to confirm yes/no. Returns true
if the user confirms.
"""
while True:
print prompt, "[Y/n]:",
if not os.isatty(sys.stdout.fileno()):
print "Not running interactively. Assuming 'N'."
return False
pass
r = raw_input().strip().lower()
if r in ['y', 'yes', '']:
return True
elif r in ['n', 'no']:
return False
def get_my_email():
""" Return the email address in the user's git config. """
return check_output(['git', 'config', '--get', 'user.email']).strip()
| 29.53012 | 74 | 0.689514 |
7945da85c627e3e5a7b194d397143c5c0f23eb54 | 6,269 | py | Python | sacred/host_info.py | j3soon-pr/sacred | 106731d9e08bb33ca7b33329fa8fa38a7c398f74 | [
"MIT"
] | 1 | 2022-03-28T14:21:07.000Z | 2022-03-28T14:21:07.000Z | sacred/host_info.py | sthagen/IDSIA-sacred | 98a2d08dc8ac9b6330f81de555635b03a33749a1 | [
"MIT"
] | null | null | null | sacred/host_info.py | sthagen/IDSIA-sacred | 98a2d08dc8ac9b6330f81de555635b03a33749a1 | [
"MIT"
] | null | null | null | """Helps to collect information about the host of an experiment."""
import os
import platform
import re
import subprocess
from xml.etree import ElementTree
import warnings
from typing import List
import cpuinfo
from sacred.utils import optional_kwargs_decorator
from sacred.settings import SETTINGS
__all__ = ("host_info_gatherers", "get_host_info", "host_info_getter")
# Legacy global dict of functions that are used
# to collect the host information.
host_info_gatherers = {}
class IgnoreHostInfo(Exception):
"""Used by host_info_getters to signal that this cannot be gathered."""
class HostInfoGetter:
def __init__(self, getter_function, name):
self.getter_function = getter_function
self.name = name
def __call__(self):
return self.getter_function()
def get_info(self):
return self.getter_function()
def host_info_gatherer(name):
def wrapper(f):
return HostInfoGetter(f, name)
return wrapper
def check_additional_host_info(additional_host_info: List[HostInfoGetter]):
names_taken = [x.name for x in _host_info_gatherers_list]
for getter in additional_host_info:
if getter.name in names_taken:
error_msg = (
"Key {} used in `additional_host_info` already exists as a "
"default gatherer function. Do not use the following keys: "
"{}"
).format(getter.name, names_taken)
raise KeyError(error_msg)
def get_host_info(additional_host_info: List[HostInfoGetter] = None):
"""Collect some information about the machine this experiment runs on.
Returns
-------
dict
A dictionary with information about the CPU, the OS and the
Python version of this machine.
"""
additional_host_info = additional_host_info or []
# can't use += because we don't want to modify the mutable argument.
additional_host_info = additional_host_info + _host_info_gatherers_list
all_host_info_gatherers = host_info_gatherers.copy()
for getter in additional_host_info:
all_host_info_gatherers[getter.name] = getter
host_info = {}
for k, v in all_host_info_gatherers.items():
try:
host_info[k] = v()
except IgnoreHostInfo:
pass
return host_info
@optional_kwargs_decorator
def host_info_getter(func, name=None):
"""
The decorated function is added to the process of collecting the host_info.
This just adds the decorated function to the global
``sacred.host_info.host_info_gatherers`` dictionary.
The functions from that dictionary are used when collecting the host info
using :py:func:`~sacred.host_info.get_host_info`.
Parameters
----------
func : callable
A function that can be called without arguments and returns some
json-serializable information.
name : str, optional
The name of the corresponding entry in host_info.
Defaults to the name of the function.
Returns
-------
The function itself.
"""
warnings.warn(
"The host_info_getter is deprecated. "
"Please use the `additional_host_info` argument"
" in the Experiment constructor.",
DeprecationWarning,
)
name = name or func.__name__
host_info_gatherers[name] = func
return func
# #################### Default Host Information ###############################
@host_info_gatherer(name="hostname")
def _hostname():
return platform.node()
@host_info_gatherer(name="os")
def _os():
return [platform.system(), platform.platform()]
@host_info_gatherer(name="python_version")
def _python_version():
return platform.python_version()
@host_info_gatherer(name="cpu")
def _cpu():
if platform.system() == "Windows":
return _get_cpu_by_pycpuinfo()
try:
if platform.system() == "Darwin":
return _get_cpu_by_sysctl()
elif platform.system() == "Linux":
return _get_cpu_by_proc_cpuinfo()
except Exception:
# Use pycpuinfo only if other ways fail, since it takes about 1 sec
return _get_cpu_by_pycpuinfo()
@host_info_gatherer(name="gpus")
def _gpus():
if not SETTINGS.HOST_INFO.INCLUDE_GPU_INFO:
return
try:
xml = subprocess.check_output(["nvidia-smi", "-q", "-x"]).decode(
"utf-8", "replace"
)
except (FileNotFoundError, OSError, subprocess.CalledProcessError):
raise IgnoreHostInfo()
gpu_info = {"gpus": []}
for child in ElementTree.fromstring(xml):
if child.tag == "driver_version":
gpu_info["driver_version"] = child.text
if child.tag != "gpu":
continue
fb_memory_usage = child.find("fb_memory_usage").find("total").text
if fb_memory_usage == "Insufficient Permissions":
# for Multi-Instance GPU (MIG) instances
mig = child.find("mig_devices").find("mig_device")
fb_memory_usage = mig.find("fb_memory_usage").find("total").text
gpu = {
"model": child.find("product_name").text,
"total_memory": int(fb_memory_usage.split()[0]),
"persistence_mode": (child.find("persistence_mode").text == "Enabled"),
}
gpu_info["gpus"].append(gpu)
return gpu_info
@host_info_gatherer(name="ENV")
def _environment():
keys_to_capture = SETTINGS.HOST_INFO.CAPTURED_ENV
return {k: os.environ[k] for k in keys_to_capture if k in os.environ}
_host_info_gatherers_list = [_hostname, _os, _python_version, _cpu, _gpus, _environment]
# ################### Get CPU Information ###############################
def _get_cpu_by_sysctl():
os.environ["PATH"] += ":/usr/sbin"
command = ["sysctl", "-n", "machdep.cpu.brand_string"]
return subprocess.check_output(command).decode().strip()
def _get_cpu_by_proc_cpuinfo():
command = ["cat", "/proc/cpuinfo"]
all_info = subprocess.check_output(command).decode()
model_pattern = re.compile(r"^\s*model name\s*:")
for line in all_info.split("\n"):
if model_pattern.match(line):
return model_pattern.sub("", line, 1).strip()
def _get_cpu_by_pycpuinfo():
return cpuinfo.get_cpu_info().get("brand", "Unknown")
| 29.7109 | 88 | 0.661349 |
7945dbd77d1e541b5336bc8290cf833921c0b9b7 | 2,780 | py | Python | bdlb/core/registered.py | uai-paper-839/bdlb | 0530f4cd90c868de7edaeb26b34c4f9a64165650 | [
"Apache-2.0"
] | 1 | 2020-07-15T22:21:52.000Z | 2020-07-15T22:21:52.000Z | bdlb/core/registered.py | uai-paper-839/bdlb | 0530f4cd90c868de7edaeb26b34c4f9a64165650 | [
"Apache-2.0"
] | 4 | 2020-09-25T19:06:35.000Z | 2021-11-10T19:40:34.000Z | bdlb/core/registered.py | icml-paper-2959/bdlb | de746d1911d6f8373435856a551a17773d6f25f7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 BDL Benchmarks Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks registry handlers and definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, Optional, Text, Union
from ..core.benchmark import Benchmark
from ..core.levels import Level
from ..diabetic_retinopathy_diagnosis.benchmark import DiabeticRetinopathyDiagnosisBecnhmark
# Internal registry containing <str registered_name, Benchmark subclass>
_BENCHMARK_REGISTRY: Dict[Text, Benchmark] = {
"diabetic_retinopathy_diagnosis": DiabeticRetinopathyDiagnosisBecnhmark
}
def load(
benchmark: Text,
level: Union[Text, Level] = "realworld",
data_dir: Optional[Text] = None,
download_and_prepare: bool = True,
**dtask_kwargs,
) -> Benchmark:
"""Loads the named benchmark into a `bdlb.Benchmark`.
Args:
benchmark: `str`, the registerd name of `bdlb.Benchmark`.
level: `bdlb.Level` or `str`, which level of the benchmark to load.
data_dir: `str` (optional), directory to read/write data.
Defaults to "~/.bdlb/data".
download_and_prepare: (optional) `bool`, if the data is not available
it downloads and preprocesses it.
dtask_kwargs: key arguments for the benchmark contructor.
Returns:
A registered `bdlb.Benchmark` with `level` at `data_dir`.
Raises:
BenchmarkNotFoundError: if `name` is unrecognised.
"""
if not benchmark in _BENCHMARK_REGISTRY:
raise BenchmarkNotFoundError(benchmark)
# Fetch benchmark object
return _BENCHMARK_REGISTRY.get(benchmark)(
level=level,
data_dir=data_dir,
download_and_prepare=download_and_prepare,
**dtask_kwargs,
)
class BenchmarkNotFoundError(ValueError):
"""The requested `bdlb.Benchmark` was not found."""
def __init__(self, name: Text):
all_denchmarks_str = "\n\t- ".join([""] + list(_BENCHMARK_REGISTRY.keys()))
error_str = (
"Benchmark {name} not found. Available denchmarks: {benchmarks}\n",
format(name=name, benchmarks=all_denchmarks_str))
super(BenchmarkNotFoundError, self).__init__(error_str) | 36.578947 | 92 | 0.718705 |
7945dc29e0b5b7cb7f15abc847956f6eb56adb3c | 3,042 | py | Python | api/microsoft/tests/test_views.py | suricats/surirobot-api-emotions | 1036f8d9470da016bf8b8d75211382a67116d0a0 | [
"MIT"
] | null | null | null | api/microsoft/tests/test_views.py | suricats/surirobot-api-emotions | 1036f8d9470da016bf8b8d75211382a67116d0a0 | [
"MIT"
] | null | null | null | api/microsoft/tests/test_views.py | suricats/surirobot-api-emotions | 1036f8d9470da016bf8b8d75211382a67116d0a0 | [
"MIT"
] | null | null | null | import json
from unittest.mock import patch
from flask import url_for
from api.exceptions import BadHeaderException, MissingHeaderException, UnknownAPIException, OperationFailedException
from api.microsoft.constants import SUPPORTED_FORMATS
@patch('api.microsoft.views.microsoft_analyse_picture', autospec=True)
def test_analyse_picture_success(mock_microsoft_analyse_picture, client, happy_file, analyse_picture_result):
mock_microsoft_analyse_picture.return_value = analyse_picture_result
res = client.post(
url_for('emo_microsoft.analyse'),
content_type='image/jpeg',
data=happy_file
)
assert res.status_code == 200
assert sorted(json.loads(res.data).items()) == sorted(analyse_picture_result.items())
assert mock_microsoft_analyse_picture.call_count == 1
@patch('api.microsoft.views.microsoft_analyse_picture', autospec=True)
def test_analyse_picture_bad_content_type(mock_microsoft_analyse_picture, client, happy_file):
res = client.post(
url_for('emo_microsoft.analyse'),
content_type='image/xxx',
data=happy_file
)
expected_result = {'errors': [BadHeaderException('Content-Type', valid_values=SUPPORTED_FORMATS).to_dict()]}
assert res.status_code == 400
assert sorted(json.loads(res.data).items()) == sorted(expected_result.items())
assert mock_microsoft_analyse_picture.call_count == 0
@patch('api.microsoft.views.microsoft_analyse_picture', autospec=True)
def test_analyse_picture_missing_content_type(mock_microsoft_analyse_picture, client, happy_file):
res = client.post(
url_for('emo_microsoft.analyse'),
data=happy_file
)
expected_result = {'errors': [MissingHeaderException('Content-Type').to_dict()]}
assert res.status_code == 400
assert sorted(json.loads(res.data).items()) == sorted(expected_result.items())
assert mock_microsoft_analyse_picture.call_count == 0
@patch('api.microsoft.views.microsoft_analyse_picture', autospec=True)
def test_analyse_picture_unknown_exception(mock_microsoft_analyse_picture, client, happy_file):
mock_microsoft_analyse_picture.side_effect = Exception()
res = client.post(
url_for('emo_microsoft.analyse'),
content_type='image/jpeg',
data=happy_file
)
expected_result = {'errors': [UnknownAPIException().to_dict()]}
assert res.status_code == 500
assert sorted(json.loads(res.data).items()) == sorted(expected_result.items())
@patch('api.microsoft.views.microsoft_analyse_picture', autospec=True)
def test_analyse_picture_known_exception(mock_microsoft_analyse_picture, client, happy_file):
mock_microsoft_analyse_picture.side_effect = OperationFailedException()
res = client.post(
url_for('emo_microsoft.analyse'),
content_type='image/jpeg',
data=happy_file
)
expected_result = {'errors': [OperationFailedException().to_dict()]}
assert res.status_code == 422
assert sorted(json.loads(res.data).items()) == sorted(expected_result.items())
| 36.650602 | 116 | 0.753123 |
7945dd047c72d0ffabe57060c631e49310130159 | 7,900 | py | Python | run_xgboost.py | tan1889/gce | 92ad1e6f35c6a3f2df4403b0d3c40af4a66a3133 | [
"MIT"
] | 1 | 2020-10-30T02:39:27.000Z | 2020-10-30T02:39:27.000Z | run_xgboost.py | tan1889/gce | 92ad1e6f35c6a3f2df4403b0d3c40af4a66a3133 | [
"MIT"
] | null | null | null | run_xgboost.py | tan1889/gce | 92ad1e6f35c6a3f2df4403b0d3c40af4a66a3133 | [
"MIT"
] | null | null | null | import argparse
from launcher import launch
parser = argparse.ArgumentParser(description='XGBOOST ALGORITHM LAUNCHER')
parser.add_argument('--desc', type=str, default='', metavar='D',
help='Experiment description, prefix of the output filename, good for grouping output files.')
# Dataset params
parser.add_argument('--dataset', type=str, default='iris', metavar='DS',
help='DS = mnist | mnist_t | cifar10 | cifar10_t | cifar10_f '
'| covertype | kddcup99 | susy | susy_r | pufs | pufs_r | msd | housing | transcode '
'| boston | iris | diabetes | digits | wine | breast_cancer.\n'
'postfixes: '
'"_r": posing a two class classification problem as a regression [-1, 1] problem, '
'"_t": transformation is applied (elastic for MNIST and random crop/flip for CIFAR10), '
'"_f": (only for cifar10) cifar10_t go through features_net cifar10_342f.pt. '
'Note: Datasets are from UCI ML Repository and sklearn')
parser.add_argument('--preprocessing', type=str, default=None, metavar='P',
help='P {None=auto, normalize, squash ...}')
parser.add_argument('--validation-ratio', type=float, default=0.2, metavar='R',
help='Use R * n_train samples from train_data for validation. '
'if R = 0 (default): use test_data for validation (to see improvement in test performance)')
parser.add_argument('--expansion', type=int, default=0, metavar='E',
help='This option only works for MNIST and CIFAR10 '
'If E >= 2, expand the dataset using the appropriate transformation (new_size=E*old_size). '
'Note: if transformation is enabled (by using dataset name + _t), and E <= 1, the '
'transformation is on-the-fly, so each request to the same data item will be returned with'
'a different (transformed) item. If E >= 2, the transformation is offline, the dataset is'
'E times larger and static, with the first n items identical to original dataset, and the '
'next items are their transformation in the same ordering. However, if E>MAX_CACHEABLE (10) '
'we simulate online transformation using Ex offline transformations: dataset_size = n, but '
'query to image i is returned with one of its E transformed instances.')
parser.add_argument('--batch-size', type=int, default=128, metavar='B',
help='Batch size for training (default: B = 128)')
parser.add_argument('--test-batch-size', type=int, default=128, metavar='BT',
help='Batch size for testing (default: BT = 128)')
parser.add_argument('--n-workers-dl', type=int, default=1, metavar='NW',
help='Number of workers for dataloader. None means 0: run in the same process. '
'5 means using min(n_cores, 5). -5 means using max(1, n_cores-5). '
'-200 means using n_cores//2. -300 means using n_cores//3!')
parser.add_argument('--no-shuffle', action='store_true', default=False,
help='Disable shuffling of training set. Normally not required.')
# XGBoost params. 'objective' is automatically set depending on the dataset (regression or classification
parser.add_argument('--criterion', type=str, default='auto', metavar='C',
help='The eval_metric param: Eval criterion (e.g. for eval validation set for early stopping). '
'None or auto means using rmse for regression, error for 2-class, merror for multi-class. '
'Alternative: mae for regression, logloss for multi-class, auc for 2-class.')
parser.add_argument('--n-estimators', type=int, default=1000, metavar='NE',
help='The num_rounds parameter: Number of rounds (each round adds one estimator): '
'Typical value: 10 - 2000')
parser.add_argument('--early-stopping', type=int, default=20, metavar='NE',
help='Number of early stopping rounds. 0 means disabled')
parser.add_argument('--max-depth', type=int, default=5, metavar='MD',
help='The maximum depth of a tree. Used to control over-fitting as higher depth will allow the '
'model to learn relations very specific to a particular sample.. Typical value: 3 - 10')
parser.add_argument('--eta', type=float, default=0.1, metavar='LR',
help='eta is learning rate. Typical value: 0.01 - 0.2')
parser.add_argument('--min-child-weight', type=int, default=1, metavar='CW',
help='Defines the minimum sum of weights of all observations required in a child. Used to control '
'over-fitting. Higher values prevent a model from learning relations which might be highly '
'specific to the particular sample selected for a tree. Typical value: 0 - 1e3')
parser.add_argument('--gamma', type=float, default=0.1, metavar='LR',
help='Gamma specifies the minimum loss reduction required to make a split. Typical value: 0 - 0.5')
parser.add_argument('--subsample', type=float, default=0.8, metavar='LR',
help='The fraction of observations to be randomly sampled for each tree. Lower values make the '
'algorithm more conservative and prevents overfitting but too small values might lead to '
'under-fitting. Typical value: 0.5 - 1')
parser.add_argument('--colsample-bytree', type=float, default=0.8, metavar='LR',
help='The fraction of columns to be randomly sampled for each tree. Typical value: 0.5 - 1')
parser.add_argument('--reg-lambda', type=float, default=1., metavar='LR',
help='L2 regularization term on weights. Commonly, gamma is used for regularization, but this '
'can be used as well. Typical value: 0 - 1e2.')
parser.add_argument('--reg-alpha', type=float, default=0., metavar='LR',
help='L1 regularization term on weights. Commonly, gamma is used for regularization, but this '
'can be used as well, specially for high dimension problem. Typical value: 0 - 1e2.')
parser.add_argument('--scale-pos-weight', type=float, default=1., metavar='LR',
help='Control the balance of positive and negative weights, useful for unbalanced classes. '
'A typical value to consider: sum(negative instances) / sum(positive instances)')
# Other params
parser.add_argument('--n-workers', type=int, default=4, metavar='NW',
help='Number of workers for the algorithm. None means 1, -k ... means using n_cores-k cores. '
'-200 means using n_cores//2 cores, -300 means n_cores//3.')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disable CUDA training (default: False)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='Random seed, set to make the result reproducible. 0=disabled.')
parser.add_argument('--save-result', type=int, default=1, metavar='SR',
help='SR = 0 | 1 | 2. 0: no saving. 1: save log file. 2: save log and best model.')
parser.add_argument('--verbose', type=int, default=5, metavar='V',
help='Level of verbose: 0 (silent), 1 (warning), 2 (info), '
'3 (model:summary), 4 (model:warning), 5 (model:details), 6 (model:debug).')
args = parser.parse_args()
args.algorithm = 'misc'
args.optimizer = 'xgboost'
result = launch(args)
| 77.45098 | 119 | 0.617848 |
7945dd429d585b412fd1f581a4b587a9c9d7b60f | 1,145 | py | Python | LeetCode/0014_longest_common_prefix.py | adityaarakeri/PythonAlgorithms | 4a042abaf1b501cdc4aed46ef5b57d58dadb5625 | [
"MIT"
] | null | null | null | LeetCode/0014_longest_common_prefix.py | adityaarakeri/PythonAlgorithms | 4a042abaf1b501cdc4aed46ef5b57d58dadb5625 | [
"MIT"
] | null | null | null | LeetCode/0014_longest_common_prefix.py | adityaarakeri/PythonAlgorithms | 4a042abaf1b501cdc4aed46ef5b57d58dadb5625 | [
"MIT"
] | null | null | null | class Solution(object):
def bruteforce(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
result = ''
if len(strs) == 0:
return ''
i = 0
d = {i: len(v) for i,v in enumerate(strs)}
count = min(d.values())
for i in range(1, count+1):
prefix = strs[0][:i]
for s in strs:
if s[:i] != prefix:
return result
result = prefix
return result
def optimized(self, strs):
result = ""
for n in zip(*strs):
if (len(set(n))) == 1:
result = result + n[0]
else:
return result
return result
s = Solution()
input_1 = ["flower","flow","flight"]
input_2 = ["dog","racecar","car"]
print(f'Input 1: {input_1}')
print(f'Bruteforce Solution: \n{s.bruteforce(input_1)}')
print(f'Optimized Solution: \n{s.bruteforce(input_1)}')
print(f'\nInput 2: {input_2}')
print(f'Bruteforce Solution: \n{s.bruteforce(input_2)}')
print(f'Optimized Solution: \n{s.bruteforce(input_2)}')
| 24.891304 | 56 | 0.494323 |
7945dd4a53885d5823e272681e258dc114c53dbb | 9,261 | py | Python | ResourceBundle/BundleTypes/RawResourceBundle.py | felix-zenk/ResourceBundle | 2ffe464d572b38fe286636c4431d40f2505a9a5a | [
"MIT"
] | null | null | null | ResourceBundle/BundleTypes/RawResourceBundle.py | felix-zenk/ResourceBundle | 2ffe464d572b38fe286636c4431d40f2505a9a5a | [
"MIT"
] | null | null | null | ResourceBundle/BundleTypes/RawResourceBundle.py | felix-zenk/ResourceBundle | 2ffe464d572b38fe286636c4431d40f2505a9a5a | [
"MIT"
] | null | null | null | import re
from typing import List, Type
from os.path import exists, isfile, join
from ..util.Locale import Locale, ROOT_LOCALE, from_iso
from ..exceptions import NotInResourceBundleError, MissingResourceBundleError
_STANDARD_FILE_EXTENSION = "properties"
class RawResourceBundle:
_cached_bundles = {}
def __init__(self, path: str = None, root: str = "."):
"""
Class that handles access to a resource across different locales.
:param path: The path to the resource file
:type path: str
:param root: The resources root directory path
:type root: str
"""
self._root = "." # Initialize root
self._parent = None
self._lookup = {}
self._reader = None
self.set_resources_root(root) # Set correct root
self._name = "INVALID" if path is None else path
def _load(self, path: str) -> None:
"""
Loads keys and values into this BasicResourceBundle instance.
:param path: The path to the resource file
:type path: str
:return: Nothing
:rtype: None
"""
if self._root not in path:
self._reader.load(join(self._root, path))
else:
self._reader.load(path)
self._lookup = self._reader.get()
def _needs_formatting(self, value: str) -> bool:
return re.findall(r'{[^}]*}', value)
def _format(self, value, *args, **kwargs):
if self._needs_formatting(value):
try:
return self._format(value.format(*args, **kwargs, **self._lookup))
except KeyError:
return self._parent._format(value, *args, **kwargs)
else:
return value
def _handle_get_object(self, key, *args, **kwargs) -> object:
"""
Searches the given key in this ResourceBundle and returns its value if found, else None.
:param key:
:type key:
:return:
:rtype:
"""
try:
return self._format(self._lookup[key], *args, **kwargs) \
if self._needs_formatting(self._lookup[key]) \
else self._lookup[key]
except KeyError:
return None
def _set_parent(self, parent) -> None:
"""
Sets the parent for this bundle.
:param parent: The new parent
:type parent: BasicResourceBundle
:return: Nothing
:rtype: None
"""
self._parent = parent
def set_resources_root(self, path: str) -> None:
"""
Sets the resources root.
:param path: The new path
:type path: str
:return: Nothing
:rtype: None
"""
path = path.replace("\\", "/")
if path.endswith("/"):
path = path[:-1]
if not exists(path):
raise FileNotFoundError("'" + path + "' could not be found")
if isfile(path):
raise NotADirectoryError("'" + path + "' is not a directory")
self._root = path
if self._parent is not None:
self._parent.set_resources_root(path)
def generate_parent_chain(self, base_name: str, locale_: Locale, root: str = None) -> None:
"""
Generates the parent chain for this BasicResourceBundle.
:param bundle_type: The type of bundle to create
:type bundle_type: RawResourceBundle
:param base_name: The base name of this bundle
:type base_name: str
:param locale_: The Locale of this ResourceBundle
:type locale_: Locale
:param root: The resources root directory path
:type root: str
:return: Nothing
:rtype: None
"""
top_locale = locale_.get_top_locale()
self._cached_bundles[_to_bundle_name(base_name, locale_)] = self
if top_locale is None:
return
else:
try:
bundle = self._cached_bundles[_to_bundle_name(base_name, top_locale)]
bundle.set_resources_root(root)
except KeyError:
bundle = _new_bundle(base_name, top_locale, self._name.split(".")[-1], root=root, bundle_type=type(self))
self._set_parent(bundle)
def get(self, key: str, *args, **kwargs) -> str:
"""
Gets an object from the BasicResourceBundle.
:param key: The key of the desired object
:type key: str
:return: The object
:rtype: str
"""
obj = self._handle_get_object(key, *args, **kwargs)
if obj is None:
if self._parent is not None:
obj = self._parent.get(key, *args, **kwargs)
if obj is None:
raise NotInResourceBundleError(self._name, key)
return obj
def get_name(self) -> str:
"""
Getter for the name of this BasicResourceBundle.
:return: The name
:rtype: str
"""
return self._name
def get_keys(self) -> List[str]:
"""
Gets the currently loaded keys.
:return: The keys
:rtype: List[str]
"""
return list(self._lookup.keys())
def get_values(self) -> List[str]:
"""
Gets the currently loaded values.
:return: The values
:rtype: List[str]
"""
return list(self._lookup.values())
def get_all_keys(self) -> List[str]:
"""
Gets all keys from this BasicResourceBundle and its parents.
Due to casting to set the order of the keys can vary.
:return: The keys
:rtype: List[str]
"""
if self._parent is not None:
return list(set(self.get_keys() + self._parent.get_all_keys()))
else:
return self.get_keys()
def get_all_values(self) -> List[str]:
"""
Gets all values from this BasicResourceBundle and its parents.
Due to casting to set the order of the values can vary.
Usage of this method is not encouraged.
:return: The keys
:rtype: List[str]
"""
if self._parent is not None:
return list(set(self.get_values() + self._parent.get_all_values()))
else:
return self.get_values()
def __str__(self):
return "<{} - '{}'>".format(self.__class__.__name__, self._name)
def __repr__(self):
return str(self)
def get_bundle(base_name: str, locale_: Locale = None, root: str = ".") -> RawResourceBundle:
"""
Gets a specific ResourceBundle.
:param base_name: The name of the ResourceBundle
:type base_name: str
:param locale_: The locale
:type locale_: ..util.Locale
:param root: The resources root directory path
:type root: str
:return: The ResourceBundle
:rtype: BasicResourceBundle
"""
return _new_bundle(base_name, locale_, _STANDARD_FILE_EXTENSION, root=root)
def _to_resource_name(bundle_name: str, format_: str) -> str:
"""
Converts the BasicResourceBundle name into the corresponding resource path.
:param bundle_name: The specific name of the BasicResourceBundle
:type bundle_name: str
:param format_: The format of this BasicResourceBundle (file extension)
:type format_: str
:return: The resource name
:rtype: str
"""
return bundle_name + "." + format_
def _to_bundle_name(base_name: str, locale_: Locale) -> str:
"""
Generates the bundle name for a BasicResourceBundle.
:param base_name: The base name of the BasicResourceBundle
:type base_name: str
:param locale_: The locale to use for generating the name
:type locale_: ..util.Locale
:return: The name of the BasicResourceBundle
:rtype: str
"""
return base_name + locale_.get_delim() + locale_.to_string() if locale_ != ROOT_LOCALE else base_name
def _new_bundle(base_name: str, locale_: Locale, format_: str, root: str = ".",
bundle_type: Type[RawResourceBundle] = RawResourceBundle
) -> RawResourceBundle:
"""
Creates a new ResourceBundle.
:param base_name: The base name of this ResourceBundle
:type base_name: str
:param locale_: The locale for this ResourceBundle
:type locale_: ..util.Locale
:param format_: The format (file extension)
:type format_: str
:param root: The resources root directory path
:type root: str
:param bundle_type: The type of the ResourceBundle
:type bundle_type: RawResourceBundle
:return: The new ResourceBundle
:rtype: BasicResourceBundle
"""
if locale_ is None:
return _new_bundle(base_name=base_name, locale_=ROOT_LOCALE, format_=format_,
root=root, bundle_type=bundle_type)
if type(locale_) is str:
locale_ = from_iso(str(locale_))
try:
bundle = bundle_type(_to_resource_name(_to_bundle_name(base_name, locale_), format_), root=root)
bundle.generate_parent_chain(base_name, locale_, root=root)
return bundle
except FileNotFoundError:
if locale_ != ROOT_LOCALE:
return _new_bundle(base_name, locale_.get_top_locale(), format_, root=root, bundle_type=bundle_type)
else:
raise MissingResourceBundleError(_to_bundle_name(base_name, locale_))
| 34.047794 | 121 | 0.610733 |
7945dd5732afaef444d5e686503c7c24760a6ea4 | 14,795 | py | Python | test/functional/wallet_address_types.py | derek-mckinney/GrumpyCat | 4f3a54396e55f4bd8b94ec0b59756bceb335d457 | [
"MIT"
] | 431 | 2015-01-21T03:57:18.000Z | 2022-03-30T17:17:18.000Z | test/functional/wallet_address_types.py | derek-mckinney/GrumpyCat | 4f3a54396e55f4bd8b94ec0b59756bceb335d457 | [
"MIT"
] | 140 | 2015-02-04T07:15:14.000Z | 2022-02-07T03:37:28.000Z | test/functional/wallet_address_types.py | derek-mckinney/GrumpyCat | 4f3a54396e55f4bd8b94ec0b59756bceb335d457 | [
"MIT"
] | 249 | 2015-01-03T19:48:55.000Z | 2022-02-23T09:46:33.000Z | #!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that the wallet can send and receive using all combinations of address types.
There are 5 nodes-under-test:
- node0 uses legacy addresses
- node1 uses p2sh/segwit addresses
- node2 uses p2sh/segwit addresses and bech32 addresses for change
- node3 uses bech32 addresses
- node4 uses a p2sh/segwit addresses for change
node5 exists to generate new blocks.
## Multisig address test
Test that adding a multisig address with:
- an uncompressed pubkey always gives a legacy address
- only compressed pubkeys gives the an `-addresstype` address
## Sending to address types test
A series of tests, iterating over node0-node4. In each iteration of the test, one node sends:
- 10/101th of its balance to itself (using getrawchangeaddress for single key addresses)
- 20/101th to the next node
- 30/101th to the node after that
- 40/101th to the remaining node
- 1/101th remains as fee+change
Iterate over each node for single key addresses, and then over each node for
multisig addresses.
Repeat test, but with explicit address_type parameters passed to getnewaddress
and getrawchangeaddress:
- node0 and node3 send to p2sh.
- node1 sends to bech32.
- node2 sends to legacy.
As every node sends coins after receiving, this also
verifies that spending coins sent to all these address types works.
## Change type test
Test that the nodes generate the correct change address type:
- node0 always uses a legacy change address.
- node1 uses a bech32 addresses for change if any destination address is bech32.
- node2 always uses a bech32 address for change
- node3 always uses a bech32 address for change
- node4 always uses p2sh/segwit output for change.
"""
from decimal import Decimal
import itertools
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
connect_nodes_bi,
sync_blocks,
sync_mempools,
)
class AddressTypeTest(DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 6
self.extra_args = [
["-addresstype=legacy"],
["-addresstype=p2sh-segwit"],
["-addresstype=p2sh-segwit", "-changetype=bech32"],
["-addresstype=bech32"],
["-changetype=p2sh-segwit"],
[],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
# Fully mesh-connect nodes for faster mempool sync
for i, j in itertools.product(range(self.num_nodes), repeat=2):
if i > j:
connect_nodes_bi(self.nodes, i, j)
self.sync_all()
def get_balances(self, confirmed=True):
"""Return a list of confirmed or unconfirmed balances."""
if confirmed:
return [self.nodes[i].getbalance() for i in range(4)]
else:
return [self.nodes[i].getunconfirmedbalance() for i in range(4)]
def test_address(self, node, address, multisig, typ):
"""Run sanity checks on an address."""
info = self.nodes[node].getaddressinfo(address)
assert(self.nodes[node].validateaddress(address)['isvalid'])
if not multisig and typ == 'legacy':
# P2PKH
assert(not info['isscript'])
assert(not info['iswitness'])
assert('pubkey' in info)
elif not multisig and typ == 'p2sh-segwit':
# P2SH-P2WPKH
assert(info['isscript'])
assert(not info['iswitness'])
assert_equal(info['script'], 'witness_v0_keyhash')
assert('pubkey' in info)
elif not multisig and typ == 'bech32':
# P2WPKH
assert(not info['isscript'])
assert(info['iswitness'])
assert_equal(info['witness_version'], 0)
assert_equal(len(info['witness_program']), 40)
assert('pubkey' in info)
elif typ == 'legacy':
# P2SH-multisig
assert(info['isscript'])
assert_equal(info['script'], 'multisig')
assert(not info['iswitness'])
assert('pubkeys' in info)
elif typ == 'p2sh-segwit':
# P2SH-P2WSH-multisig
assert(info['isscript'])
assert_equal(info['script'], 'witness_v0_scripthash')
assert(not info['iswitness'])
assert(info['embedded']['isscript'])
assert_equal(info['embedded']['script'], 'multisig')
assert(info['embedded']['iswitness'])
assert_equal(info['embedded']['witness_version'], 0)
assert_equal(len(info['embedded']['witness_program']), 64)
assert('pubkeys' in info['embedded'])
elif typ == 'bech32':
# P2WSH-multisig
assert(info['isscript'])
assert_equal(info['script'], 'multisig')
assert(info['iswitness'])
assert_equal(info['witness_version'], 0)
assert_equal(len(info['witness_program']), 64)
assert('pubkeys' in info)
else:
# Unknown type
assert(False)
def test_change_output_type(self, node_sender, destinations, expected_type):
txid = self.nodes[node_sender].sendmany(fromaccount="", amounts=dict.fromkeys(destinations, 0.001))
raw_tx = self.nodes[node_sender].getrawtransaction(txid)
tx = self.nodes[node_sender].decoderawtransaction(raw_tx)
# Make sure the transaction has change:
assert_equal(len(tx["vout"]), len(destinations) + 1)
# Make sure the destinations are included, and remove them:
output_addresses = [vout['scriptPubKey']['addresses'][0] for vout in tx["vout"]]
change_addresses = [d for d in output_addresses if d not in destinations]
assert_equal(len(change_addresses), 1)
self.log.debug("Check if change address " + change_addresses[0] + " is " + expected_type)
self.test_address(node_sender, change_addresses[0], multisig=False, typ=expected_type)
def run_test(self):
# Mine 101 blocks on node5 to bring nodes out of IBD and make sure that
# no coinbases are maturing for the nodes-under-test during the test
self.nodes[5].generate(101)
sync_blocks(self.nodes)
uncompressed_1 = "0496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858ee"
uncompressed_2 = "047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77"
compressed_1 = "0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"
compressed_2 = "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073"
# addmultisigaddress with at least 1 uncompressed key should return a legacy address.
for node in range(4):
self.test_address(node, self.nodes[node].addmultisigaddress(2, [uncompressed_1, uncompressed_2])['address'], True, 'legacy')
self.test_address(node, self.nodes[node].addmultisigaddress(2, [compressed_1, uncompressed_2])['address'], True, 'legacy')
self.test_address(node, self.nodes[node].addmultisigaddress(2, [uncompressed_1, compressed_2])['address'], True, 'legacy')
# addmultisigaddress with all compressed keys should return the appropriate address type (even when the keys are not ours).
self.test_address(0, self.nodes[0].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'legacy')
self.test_address(1, self.nodes[1].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'p2sh-segwit')
self.test_address(2, self.nodes[2].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'p2sh-segwit')
self.test_address(3, self.nodes[3].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'bech32')
for explicit_type, multisig, from_node in itertools.product([False, True], [False, True], range(4)):
address_type = None
if explicit_type and not multisig:
if from_node == 1:
address_type = 'bech32'
elif from_node == 0 or from_node == 3:
address_type = 'p2sh-segwit'
else:
address_type = 'legacy'
self.log.info("Sending from node {} ({}) with{} multisig using {}".format(from_node, self.extra_args[from_node], "" if multisig else "out", "default" if address_type is None else address_type))
old_balances = self.get_balances()
self.log.debug("Old balances are {}".format(old_balances))
to_send = (old_balances[from_node] / 101).quantize(Decimal("0.00000001"))
sends = {}
self.log.debug("Prepare sends")
for n, to_node in enumerate(range(from_node, from_node + 4)):
to_node %= 4
change = False
if not multisig:
if from_node == to_node:
# When sending non-multisig to self, use getrawchangeaddress
address = self.nodes[to_node].getrawchangeaddress(address_type=address_type)
change = True
else:
address = self.nodes[to_node].getnewaddress(address_type=address_type)
else:
addr1 = self.nodes[to_node].getnewaddress()
addr2 = self.nodes[to_node].getnewaddress()
address = self.nodes[to_node].addmultisigaddress(2, [addr1, addr2])['address']
# Do some sanity checking on the created address
if address_type is not None:
typ = address_type
elif to_node == 0:
typ = 'legacy'
elif to_node == 1 or (to_node == 2 and not change):
typ = 'p2sh-segwit'
else:
typ = 'bech32'
self.test_address(to_node, address, multisig, typ)
# Output entry
sends[address] = to_send * 10 * (1 + n)
self.log.debug("Sending: {}".format(sends))
self.nodes[from_node].sendmany("", sends)
sync_mempools(self.nodes)
unconf_balances = self.get_balances(False)
self.log.debug("Check unconfirmed balances: {}".format(unconf_balances))
assert_equal(unconf_balances[from_node], 0)
for n, to_node in enumerate(range(from_node + 1, from_node + 4)):
to_node %= 4
assert_equal(unconf_balances[to_node], to_send * 10 * (2 + n))
# node5 collects fee and block subsidy to keep accounting simple
self.nodes[5].generate(1)
sync_blocks(self.nodes)
new_balances = self.get_balances()
self.log.debug("Check new balances: {}".format(new_balances))
# We don't know what fee was set, so we can only check bounds on the balance of the sending node
assert_greater_than(new_balances[from_node], to_send * 10)
assert_greater_than(to_send * 11, new_balances[from_node])
for n, to_node in enumerate(range(from_node + 1, from_node + 4)):
to_node %= 4
assert_equal(new_balances[to_node], old_balances[to_node] + to_send * 10 * (2 + n))
# Get one p2sh/segwit address from node2 and two bech32 addresses from node3:
to_address_p2sh = self.nodes[2].getnewaddress()
to_address_bech32_1 = self.nodes[3].getnewaddress()
to_address_bech32_2 = self.nodes[3].getnewaddress()
# Fund node 4:
self.nodes[5].sendtoaddress(self.nodes[4].getnewaddress(), Decimal("1"))
self.nodes[5].generate(1)
sync_blocks(self.nodes)
assert_equal(self.nodes[4].getbalance(), 1)
self.log.info("Nodes with addresstype=legacy never use a P2WPKH change output")
self.test_change_output_type(0, [to_address_bech32_1], 'legacy')
self.log.info("Nodes with addresstype=p2sh-segwit only use a P2WPKH change output if any destination address is bech32:")
self.test_change_output_type(1, [to_address_p2sh], 'p2sh-segwit')
self.test_change_output_type(1, [to_address_bech32_1], 'bech32')
self.test_change_output_type(1, [to_address_p2sh, to_address_bech32_1], 'bech32')
self.test_change_output_type(1, [to_address_bech32_1, to_address_bech32_2], 'bech32')
self.log.info("Nodes with change_type=bech32 always use a P2WPKH change output:")
self.test_change_output_type(2, [to_address_bech32_1], 'bech32')
self.test_change_output_type(2, [to_address_p2sh], 'bech32')
self.log.info("Nodes with addresstype=bech32 always use a P2WPKH change output (unless changetype is set otherwise):")
self.test_change_output_type(3, [to_address_bech32_1], 'bech32')
self.test_change_output_type(3, [to_address_p2sh], 'bech32')
self.log.info('getrawchangeaddress defaults to addresstype if -changetype is not set and argument is absent')
self.test_address(3, self.nodes[3].getrawchangeaddress(), multisig=False, typ='bech32')
self.log.info('test invalid address type arguments')
assert_raises_rpc_error(-5, "Unknown address type ''", self.nodes[3].addmultisigaddress, 2, [compressed_1, compressed_2], None, '')
assert_raises_rpc_error(-5, "Unknown address type ''", self.nodes[3].getnewaddress, None, '')
assert_raises_rpc_error(-5, "Unknown address type ''", self.nodes[3].getrawchangeaddress, '')
assert_raises_rpc_error(-5, "Unknown address type 'bech23'", self.nodes[3].getrawchangeaddress, 'bech23')
self.log.info("Nodes with changetype=p2sh-segwit never use a P2WPKH change output")
self.test_change_output_type(4, [to_address_bech32_1], 'p2sh-segwit')
self.test_address(4, self.nodes[4].getrawchangeaddress(), multisig=False, typ='p2sh-segwit')
self.log.info("Except for getrawchangeaddress if specified:")
self.test_address(4, self.nodes[4].getrawchangeaddress(), multisig=False, typ='p2sh-segwit')
self.test_address(4, self.nodes[4].getrawchangeaddress('bech32'), multisig=False, typ='bech32')
if __name__ == '__main__':
AddressTypeTest().main()
| 48.990066 | 205 | 0.650828 |
7945ddcaab22afb78c8892bd6329049c3c7b8769 | 1,422 | py | Python | webots/controllers/lib/shared/arguments.py | dan-haessler/dezibot | 18ce64fef4314e3aa426e903de7750bf5f71a6b1 | [
"MIT"
] | null | null | null | webots/controllers/lib/shared/arguments.py | dan-haessler/dezibot | 18ce64fef4314e3aa426e903de7750bf5f71a6b1 | [
"MIT"
] | null | null | null | webots/controllers/lib/shared/arguments.py | dan-haessler/dezibot | 18ce64fef4314e3aa426e903de7750bf5f71a6b1 | [
"MIT"
] | null | null | null | from enum import Enum, IntEnum
import enum
from os import times
import os
from typing import List, Tuple
IDENTIFIER = "-"
class ArgType(IntEnum):
Float = 0
Floats = 1
Integer = 2
Integers = 3
String = 4
Strings = 5
def parseArgs(self, arguments):
if self == ArgType.Float:
return float(arguments[0])
elif self == ArgType.Floats:
return [float(arg) for arg in arguments]
elif self == ArgType.Integer:
return int(arguments[0])
elif self == ArgType.Integers:
return [int(arg) for arg in arguments]
elif self == ArgType.String:
return arguments[0]
elif self == ArgType.Strings:
return arguments
class ArgInfo:
def __init__(self, field: str, type: ArgType):
self.field = field
self.type = type
class ArgParse:
def parse(sysargs: List[str], *args: Tuple[ArgInfo]):
arguments = dict()
current = None
for arg in sysargs:
if arg.startswith(IDENTIFIER) and not arg[1:].isdigit():
field = arg[1:]
for argInfo in args:
if field == argInfo.field:
current = field
arguments[current] = []
break
else:
if current is not None:
arguments[current].append(arg)
vals = []
for key in arguments.keys():
for argInfo in args:
if key == argInfo.field:
vals.append(argInfo.type.parseArgs(arguments[key]))
return vals
| 24.517241 | 62 | 0.618847 |
7945dec96f958f705736a99fd3bd3d69bdfc28bf | 1,323 | py | Python | onnx/backend/test/case/node/sum.py | rajeevsrao/onnx | 355a4954ea4e5836a5e943589509951c44feb6b4 | [
"MIT"
] | 4,071 | 2018-12-13T04:17:38.000Z | 2022-03-30T03:29:35.000Z | blaze/thirdparty/onnx/onnx-1.2.2/onnx/backend/test/case/node/sum.py | laozhuang727/x-deeplearning | 781545783a4e2bbbda48fc64318fb2c6d8bbb3cc | [
"Apache-2.0"
] | 359 | 2018-12-21T01:14:57.000Z | 2022-02-15T07:18:02.000Z | blaze/thirdparty/onnx/onnx-1.2.2/onnx/backend/test/case/node/sum.py | laozhuang727/x-deeplearning | 781545783a4e2bbbda48fc64318fb2c6d8bbb3cc | [
"Apache-2.0"
] | 1,054 | 2018-12-20T09:57:42.000Z | 2022-03-29T07:16:53.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Sum(Base):
@staticmethod
def export(): # type: () -> None
data_0 = np.array([3, 0, 2]).astype(np.float32)
data_1 = np.array([1, 3, 4]).astype(np.float32)
data_2 = np.array([2, 6, 6]).astype(np.float32)
result = np.array([6, 9, 12]).astype(np.float32)
node = onnx.helper.make_node(
'Sum',
inputs=['data_0', 'data_1', 'data_2'],
outputs=['result'],
)
expect(node, inputs=[data_0, data_1, data_2], outputs=[result],
name='test_sum_example')
node = onnx.helper.make_node(
'Sum',
inputs=['data_0'],
outputs=['result'],
)
expect(node, inputs=[data_0], outputs=[data_0],
name='test_sum_one_input')
result = np.add(data_0, data_1)
node = onnx.helper.make_node(
'Sum',
inputs=['data_0', 'data_1'],
outputs=['result'],
)
expect(node, inputs=[data_0, data_1], outputs=[result],
name='test_sum_two_inputs')
| 29.4 | 71 | 0.566893 |
7945df3859285a8462723c5f49c9767d87ede7a4 | 2,928 | py | Python | tests/test_core_functions.py | ellert/jupyter-packaging | b0304d0c07e06f6f7933300e6c841ac006b89e91 | [
"BSD-3-Clause"
] | 59 | 2017-03-23T14:46:02.000Z | 2022-03-27T10:56:18.000Z | tests/test_core_functions.py | ellert/jupyter-packaging | b0304d0c07e06f6f7933300e6c841ac006b89e91 | [
"BSD-3-Clause"
] | 81 | 2017-03-08T01:18:17.000Z | 2022-03-27T20:10:46.000Z | tests/test_core_functions.py | ellert/jupyter-packaging | b0304d0c07e06f6f7933300e6c841ac006b89e91 | [
"BSD-3-Clause"
] | 47 | 2017-03-08T03:21:48.000Z | 2022-03-25T09:50:58.000Z | import os
from unittest.mock import patch, call
import pytest
from setuptools.dist import Distribution
from jupyter_packaging.setupbase import npm_builder, wrap_installers
def test_wrap_installers():
called = False
def func():
nonlocal called
called = True
cmd_class = wrap_installers(pre_dist=func, pre_develop=func,
post_dist=func, post_develop=func)
for name in ['pre_dist', 'pre_develop', 'post_dist', 'post_develop']:
cmd_class[name](Distribution()).run()
assert called
called = False
def test_npm_builder(mocker):
which = mocker.patch('jupyter_packaging.setupbase.which')
run = mocker.patch('jupyter_packaging.setupbase.run')
builder = npm_builder()
which.return_value = ['foo']
builder()
cwd=os.getcwd()
run.assert_has_calls([
call(['npm', 'install'], cwd=cwd),
call(['npm', 'run', 'build'], cwd=cwd)
])
def test_npm_build_skip(mocker):
which = mocker.patch('jupyter_packaging.setupbase.which')
run = mocker.patch('jupyter_packaging.setupbase.run')
mocker.patch('jupyter_packaging.setupbase.skip_npm', True)
builder = npm_builder()
which.return_value = ['foo']
builder()
run.assert_not_called()
def test_npm_builder_yarn(tmp_path, mocker):
which = mocker.patch('jupyter_packaging.setupbase.which')
run = mocker.patch('jupyter_packaging.setupbase.run')
tmp_path.joinpath('yarn.lock').write_text('hello')
builder = npm_builder(path=tmp_path)
which.return_value = ['foo']
builder()
run.assert_has_calls([
call(['yarn', 'install'], cwd=tmp_path),
call(['yarn', 'run', 'build'], cwd=tmp_path)
])
def test_npm_builder_missing_yarn(tmp_path, mocker):
which = mocker.patch('jupyter_packaging.setupbase.which')
run = mocker.patch('jupyter_packaging.setupbase.run')
tmp_path.joinpath('yarn.lock').write_text('hello')
builder = npm_builder(path=tmp_path)
which.side_effect = ['', 'foo']
builder()
run.assert_has_calls([
call(['npm', 'install'], cwd=tmp_path),
call(['npm', 'run', 'build'], cwd=tmp_path)
])
def test_npm_builder_not_stale(tmp_path, mocker):
which = mocker.patch('jupyter_packaging.setupbase.which')
run = mocker.patch('jupyter_packaging.setupbase.run')
is_stale = mocker.patch('jupyter_packaging.setupbase.is_stale')
is_stale.return_value = False
builder = npm_builder(build_dir=tmp_path, source_dir=tmp_path)
which.return_value = ['foo']
builder()
run.assert_not_called()
def test_npm_builder_no_npm(mocker):
which = mocker.patch('jupyter_packaging.setupbase.which')
run = mocker.patch('jupyter_packaging.setupbase.run')
is_stale = mocker.patch('jupyter_packaging.setupbase.is_stale')
is_stale.return_value = False
builder = npm_builder()
which.return_value = []
builder()
run.assert_not_called()
| 31.148936 | 73 | 0.692281 |
7945e01ab3fe78dc0636bad19388b72c36e0e9a0 | 449 | py | Python | themebase/hooking/post_build_hook.py | pyrustic/themebase | ab21b961b4d97739a9ed09c2d6377e00a7e3cbe2 | [
"MIT"
] | null | null | null | themebase/hooking/post_build_hook.py | pyrustic/themebase | ab21b961b4d97739a9ed09c2d6377e00a7e3cbe2 | [
"MIT"
] | null | null | null | themebase/hooking/post_build_hook.py | pyrustic/themebase | ab21b961b4d97739a9ed09c2d6377e00a7e3cbe2 | [
"MIT"
] | null | null | null | # post_build_hook.py generated by Pyrustic Manager
import sys
def get_data():
"""
Return None or a dict with the keys:
'script', 'target', 'app_pkg' and 'version'
"""
items = ("script", "target", "app_pkg", "version")
data = None
if len(sys.argv) == len(items):
data = {item: sys.argv[i] for i, item in enumerate(items)}
return data
def main():
sys.exit(0)
if __name__ == "__main__":
main()
| 19.521739 | 66 | 0.592428 |
7945e0e879ec4420b1dae89dcce525a84a0f3b9b | 244 | py | Python | src/models/subject.py | EOEPCA/um-pdp-engine | 482e6cbe29ef4188eb28c534af9d0a0a3af3004a | [
"Apache-2.0"
] | null | null | null | src/models/subject.py | EOEPCA/um-pdp-engine | 482e6cbe29ef4188eb28c534af9d0a0a3af3004a | [
"Apache-2.0"
] | null | null | null | src/models/subject.py | EOEPCA/um-pdp-engine | 482e6cbe29ef4188eb28c534af9d0a0a3af3004a | [
"Apache-2.0"
] | null | null | null | from models.attribute import Attribute
class Subject(Attribute):
def add_attribute(self, attribute_id, value, issuer, data_type, include_in_result):
super().add_attribute(attribute_id, value, issuer, data_type, include_in_result)
| 34.857143 | 88 | 0.782787 |
7945e13397cbe67e5c1aaf7987180c3073559bd8 | 14,605 | py | Python | openapi-python-client/openapi_client/api/metrics_api.py | yanavasileva/camunda-bpm-examples | 051f8f28c62845e68ce4059ab64264c5a0bdc009 | [
"Apache-2.0"
] | null | null | null | openapi-python-client/openapi_client/api/metrics_api.py | yanavasileva/camunda-bpm-examples | 051f8f28c62845e68ce4059ab64264c5a0bdc009 | [
"Apache-2.0"
] | null | null | null | openapi-python-client/openapi_client/api/metrics_api.py | yanavasileva/camunda-bpm-examples | 051f8f28c62845e68ce4059ab64264c5a0bdc009 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Camunda BPM REST API
OpenApi Spec for Camunda BPM REST API. # noqa: E501
The version of the OpenAPI document: 7.13.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class MetricsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_metrics(self, metrics_name, **kwargs): # noqa: E501
"""get_metrics # noqa: E501
Retrieves the `sum` (count) for a given metric. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_metrics(metrics_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str metrics_name: The name of the metric. (required)
:param datetime start_date: The start date (inclusive).
:param datetime end_date: The end date (exclusive).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: MetricsResultDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_metrics_with_http_info(metrics_name, **kwargs) # noqa: E501
def get_metrics_with_http_info(self, metrics_name, **kwargs): # noqa: E501
"""get_metrics # noqa: E501
Retrieves the `sum` (count) for a given metric. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_metrics_with_http_info(metrics_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str metrics_name: The name of the metric. (required)
:param datetime start_date: The start date (inclusive).
:param datetime end_date: The end date (exclusive).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(MetricsResultDto, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'metrics_name',
'start_date',
'end_date'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_metrics" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'metrics_name' is set
if self.api_client.client_side_validation and ('metrics_name' not in local_var_params or # noqa: E501
local_var_params['metrics_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `metrics_name` when calling `get_metrics`") # noqa: E501
collection_formats = {}
path_params = {}
if 'metrics_name' in local_var_params:
path_params['metrics-name'] = local_var_params['metrics_name'] # noqa: E501
query_params = []
if 'start_date' in local_var_params and local_var_params['start_date'] is not None: # noqa: E501
query_params.append(('startDate', local_var_params['start_date'])) # noqa: E501
if 'end_date' in local_var_params and local_var_params['end_date'] is not None: # noqa: E501
query_params.append(('endDate', local_var_params['end_date'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/metrics/{metrics-name}/sum', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MetricsResultDto', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def interval(self, **kwargs): # noqa: E501
"""interval # noqa: E501
Retrieves a list of metrics, aggregated for a given interval. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.interval(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: The name of the metric.
:param str reporter: The name of the reporter (host), on which the metrics was logged. This will have value provided by the [hostname configuration property](https://docs.camunda.org/manual/7.13/reference/deployment-descriptors/tags/process-engine/#hostname).
:param datetime start_date: The start date (inclusive).
:param datetime end_date: The end date (exclusive).
:param int first_result: Pagination of results. Specifies the index of the first result to return.
:param int max_results: Pagination of results. Specifies the maximum number of results to return. Will return less results if there are no more results left.
:param str interval: The interval for which the metrics should be aggregated. Time unit is seconds. Default: The interval is set to 15 minutes (900 seconds).
:param str aggregate_by_reporter: Aggregate metrics by reporter.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[MetricsIntervalResultDto]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.interval_with_http_info(**kwargs) # noqa: E501
def interval_with_http_info(self, **kwargs): # noqa: E501
"""interval # noqa: E501
Retrieves a list of metrics, aggregated for a given interval. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.interval_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: The name of the metric.
:param str reporter: The name of the reporter (host), on which the metrics was logged. This will have value provided by the [hostname configuration property](https://docs.camunda.org/manual/7.13/reference/deployment-descriptors/tags/process-engine/#hostname).
:param datetime start_date: The start date (inclusive).
:param datetime end_date: The end date (exclusive).
:param int first_result: Pagination of results. Specifies the index of the first result to return.
:param int max_results: Pagination of results. Specifies the maximum number of results to return. Will return less results if there are no more results left.
:param str interval: The interval for which the metrics should be aggregated. Time unit is seconds. Default: The interval is set to 15 minutes (900 seconds).
:param str aggregate_by_reporter: Aggregate metrics by reporter.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[MetricsIntervalResultDto], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'reporter',
'start_date',
'end_date',
'first_result',
'max_results',
'interval',
'aggregate_by_reporter'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method interval" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'name' in local_var_params and local_var_params['name'] is not None: # noqa: E501
query_params.append(('name', local_var_params['name'])) # noqa: E501
if 'reporter' in local_var_params and local_var_params['reporter'] is not None: # noqa: E501
query_params.append(('reporter', local_var_params['reporter'])) # noqa: E501
if 'start_date' in local_var_params and local_var_params['start_date'] is not None: # noqa: E501
query_params.append(('startDate', local_var_params['start_date'])) # noqa: E501
if 'end_date' in local_var_params and local_var_params['end_date'] is not None: # noqa: E501
query_params.append(('endDate', local_var_params['end_date'])) # noqa: E501
if 'first_result' in local_var_params and local_var_params['first_result'] is not None: # noqa: E501
query_params.append(('firstResult', local_var_params['first_result'])) # noqa: E501
if 'max_results' in local_var_params and local_var_params['max_results'] is not None: # noqa: E501
query_params.append(('maxResults', local_var_params['max_results'])) # noqa: E501
if 'interval' in local_var_params and local_var_params['interval'] is not None: # noqa: E501
query_params.append(('interval', local_var_params['interval'])) # noqa: E501
if 'aggregate_by_reporter' in local_var_params and local_var_params['aggregate_by_reporter'] is not None: # noqa: E501
query_params.append(('aggregateByReporter', local_var_params['aggregate_by_reporter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/metrics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[MetricsIntervalResultDto]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 47.57329 | 267 | 0.619582 |
7945e193daf64fe04fb34389cc8bb9a5a6c7034f | 215 | py | Python | frappe/geo/doctype/currency/test_currency.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 5 | 2017-09-12T15:56:31.000Z | 2022-03-09T13:50:21.000Z | frappe/geo/doctype/currency/test_currency.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 212 | 2017-08-16T13:03:18.000Z | 2020-10-06T12:26:21.000Z | frappe/geo/doctype/currency/test_currency.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 14 | 2020-11-04T11:22:44.000Z | 2022-02-01T20:59:37.000Z | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: See license.txt
# pre loaded
from __future__ import unicode_literals
import frappe
test_records = frappe.get_test_records('Currency') | 26.875 | 68 | 0.8 |
7945e3581429581ee9b7615d5aa3fbf559fc1f69 | 5,714 | py | Python | PyEMD/tests/test_checks.py | oshin94/PyEMD | 5859f5ea7e435ffc6e5130e5a1df9cd71784a75d | [
"Apache-2.0"
] | 1 | 2022-03-27T01:37:57.000Z | 2022-03-27T01:37:57.000Z | PyEMD/tests/test_checks.py | oshin94/PyEMD | 5859f5ea7e435ffc6e5130e5a1df9cd71784a75d | [
"Apache-2.0"
] | null | null | null | PyEMD/tests/test_checks.py | oshin94/PyEMD | 5859f5ea7e435ffc6e5130e5a1df9cd71784a75d | [
"Apache-2.0"
] | null | null | null | """Tests for checks.py."""
import unittest
import numpy as np
from PyEMD.checks import energy, mean_period, significance_aposteriori, significance_apriori, whitenoise_check
class TestCase(unittest.TestCase):
"""Test cases."""
def test_mean_period(self):
"""Test to check if mean period output is correct."""
T = np.linspace(0, 2, 100)
S = np.sin(2 * np.pi * T)
res = mean_period(S)
self.assertEqual(type(res), float, "Default data type is float")
self.assertTrue(res > 0, "mean-period cannot be zero")
def test_mean_period_zero_peaks(self):
"""Tect to check if mean period function can handle zero peaks."""
T = np.linspace(0, 2, 100)
res = mean_period(T)
self.assertEqual(res, len(T), "mean-period is same as signal length in case of monotonic curve")
def test_energy(self):
"""Test to check if energy of signal is being computed properly."""
T = np.linspace(0, 2, 200)
S = np.sin(2 * 2 * np.pi * T)
res = energy(S)
self.assertEqual(type(res), np.float64, "Default data type is float")
def test_significance_apriori(self):
"""a priori significance test."""
T = np.linspace(0, 2, 200)
S = np.sin(2 * 2 * np.pi * T)
energy_density = energy(S) / len(S)
res = significance_apriori(energy_density, 2, len(S), 0.9)
self.assertEqual(type(res), bool, "Default data type is bool")
def test_significance_aposteriori(self):
"""a posteriori significance test."""
T = np.linspace(0, 2, 200)
S = np.sin(2 * 2 * np.pi * T)
energy_density = energy(S) / len(S)
res = significance_aposteriori(energy_density, 2, len(S), 0.9)
self.assertEqual(type(res), bool, "Default data type is bool")
def test_whitenoise_check_apriori(self):
"""a priori whitenoise_check."""
T = [np.linspace(0, i, 200) for i in range(5, 0, -1)]
S = np.array([list(np.sin(2 * 2 * np.pi * i)) for i in T])
res = whitenoise_check(S, test_name="apriori")
self.assertEqual(type(res), dict or None, "Default data type is dict")
def test_whitenoise_check_apriori_alpha(self):
"""a priori whitenoise_check with custom alpha."""
T = [np.linspace(0, i, 200) for i in range(5, 0, -1)]
S = np.array([list(np.sin(2 * 2 * np.pi * i)) for i in T])
res = whitenoise_check(S, test_name="apriori", alpha=0.99)
self.assertEqual(type(res), dict or None, "Default data type is dict")
def test_whitenoise_check_alpha(self):
"""a posteriori whitenoise check with custom alpha value."""
T = [np.linspace(0, i, 200) for i in range(5, 0, -1)]
S = np.array([list(np.sin(2 * 2 * np.pi * i)) for i in T])
res = whitenoise_check(S, alpha=0.9)
self.assertEqual(type(res), dict or None, "Default data type is dict")
def test_whitenoise_check_rescaling_imf(self):
"""a posteriori whitenoise check with custom rescaling imf."""
T = [np.linspace(0, i, 200) for i in range(5, 0, -1)]
S = np.array([list(np.sin(2 * 2 * np.pi * i)) for i in T])
res = whitenoise_check(S, rescaling_imf=2)
self.assertEqual(type(res), dict or None, "Default data type is dict")
def test_whitenoise_check_nan_values(self):
"""whitenoise check with nan in IMF."""
S = np.array([np.full(100, np.NaN) for i in range(5, 0, -1)])
res = whitenoise_check(S)
self.assertEqual(res, None, "Input NaN returns None")
def test_invalid_alpha(self):
"""Test if invalid alpha return AssertionError."""
S = np.array([np.full(100, np.NaN) for i in range(5, 0, -1)])
self.assertRaises(AssertionError, whitenoise_check, S, alpha=1)
self.assertRaises(AssertionError, whitenoise_check, S, alpha=0)
self.assertRaises(AssertionError, whitenoise_check, S, alpha=-10)
self.assertRaises(AssertionError, whitenoise_check, S, alpha=2)
self.assertRaises(AssertionError, whitenoise_check, S, alpha="0.5")
def test_invalid_test_name(self):
"""Test if invalid test return AssertionError."""
S = np.random.random((5, 100))
self.assertRaises(AssertionError, whitenoise_check, S, test_name="apri")
self.assertRaises(AssertionError, whitenoise_check, S, test_name="apost")
self.assertRaises(AssertionError, whitenoise_check, S, test_name=None)
def test_invalid_input_type(self):
"""Test if invalid input type return AssertionError."""
S = [np.full(100, np.NaN) for i in range(5, 0, -1)]
self.assertRaises(AssertionError, whitenoise_check, S)
self.assertRaises(AssertionError, whitenoise_check, 1)
self.assertRaises(AssertionError, whitenoise_check, 1.2)
self.assertRaises(AssertionError, whitenoise_check, "[1,2,3,4,5]")
def test_invalid_rescaling_imf(self):
"""Test if invalid rescaling imf return AssertionError."""
T = [np.linspace(0, i, 200) for i in range(5, 0, -1)]
S = np.array([list(np.sin(2 * 2 * np.pi * i)) for i in T])
self.assertRaises(AssertionError, whitenoise_check, S, rescaling_imf=10)
self.assertRaises(AssertionError, whitenoise_check, S, rescaling_imf=1.2)
def test_empty_input_imf(self):
"""Test if empty IMF input return AssertionError."""
T1 = np.array([[], []])
T2 = np.array([])
res1 = whitenoise_check(T1)
res2 = whitenoise_check(T2)
self.assertEqual(res1, None, "Empty input returns None")
self.assertEqual(res2, None, "Empty input returns None")
if __name__ == "__main__":
unittest.main()
| 45.349206 | 110 | 0.636682 |
7945e706ae2dfaa6f77c582567cac81882d0e118 | 2,633 | py | Python | src/markets/tests/fixtures.py | thibault/cryptobalancer | de1db0006514c6b62ec7f1a28b170b97b03d6e65 | [
"MIT"
] | null | null | null | src/markets/tests/fixtures.py | thibault/cryptobalancer | de1db0006514c6b62ec7f1a28b170b97b03d6e65 | [
"MIT"
] | 6 | 2020-06-05T16:52:05.000Z | 2021-09-07T23:48:07.000Z | src/markets/tests/fixtures.py | thibault/cryptobalancer | de1db0006514c6b62ec7f1a28b170b97b03d6e65 | [
"MIT"
] | null | null | null | API_DATA = [
{
"id": "bitcoin",
"name": "Bitcoin",
"symbol": "BTC",
"rank": "1",
"price_usd": "7633.31",
"price_btc": "1.0",
"24h_volume_usd": "4732320000.0",
"market_cap_usd": "129200069231",
"available_supply": "16925825.0",
"total_supply": "16925825.0",
"max_supply": "21000000.0",
"percent_change_1h": "-1.23",
"percent_change_24h": "-7.99",
"percent_change_7d": "-15.11",
"last_updated": "1521370164",
"price_eur": "6207.82752405",
"24h_volume_eur": "3848582901.6",
"market_cap_eur": "105072602302"
},
{
"id": "ethereum",
"name": "Ethereum",
"symbol": "ETH",
"rank": "2",
"price_usd": "501.994",
"price_btc": "0.0662025",
"24h_volume_usd": "1519350000.0",
"market_cap_usd": "49326783233.0",
"available_supply": "98261699.0",
"total_supply": "98261699.0",
"max_supply": None,
"percent_change_1h": "-2.54",
"percent_change_24h": "-16.89",
"percent_change_7d": "-28.49",
"last_updated": "1521370152",
"price_eur": "408.24913047",
"24h_volume_eur": "1235618984.25",
"market_cap_eur": "40115253098.0"
},
{
"id": "ripple",
"name": "Ripple",
"symbol": "XRP",
"rank": "3",
"price_usd": "0.599304",
"price_btc": "0.00007904",
"24h_volume_usd": "469817000.0",
"market_cap_usd": "23427822075.0",
"available_supply": "39091716516.0",
"total_supply": "99992497246.0",
"max_supply": "100000000000",
"percent_change_1h": "-1.84",
"percent_change_24h": "-12.34",
"percent_change_7d": "-25.88",
"last_updated": "1521370141",
"price_eur": "0.4873869745",
"24h_volume_eur": "382081024.335",
"market_cap_eur": "19052793442.0"
},
{
"id": "bitcoin-cash",
"name": "Bitcoin Cash",
"symbol": "BCH",
"rank": "4",
"price_usd": "888.304",
"price_btc": "0.117149",
"24h_volume_usd": "388050000.0",
"market_cap_usd": "15122698268.0",
"available_supply": "17024238.0",
"total_supply": "17024238.0",
"max_supply": "21000000.0",
"percent_change_1h": "-1.69",
"percent_change_24h": "-12.19",
"percent_change_7d": "-14.34",
"last_updated": "1521370152",
"price_eur": "722.41766952",
"24h_volume_eur": "315583602.75",
"market_cap_eur": "12298609980.0"
}
]
| 31.722892 | 44 | 0.51918 |
7945e758e30cfdf868b3da39df741cf25dcd912b | 15,535 | py | Python | fakenet/listeners/FTPListener.py | evil-e/flare-fakenet-ng | de6e00e21eb0dbe37ce741caba4116b809a17487 | [
"Apache-2.0"
] | 1,360 | 2016-06-27T20:54:26.000Z | 2021-09-15T17:20:39.000Z | fakenet/listeners/FTPListener.py | evil-e/flare-fakenet-ng | de6e00e21eb0dbe37ce741caba4116b809a17487 | [
"Apache-2.0"
] | 90 | 2016-08-29T17:25:24.000Z | 2021-07-17T15:19:46.000Z | fakenet/listeners/FTPListener.py | evil-e/flare-fakenet-ng | de6e00e21eb0dbe37ce741caba4116b809a17487 | [
"Apache-2.0"
] | 335 | 2016-07-11T23:25:54.000Z | 2021-09-08T22:27:33.000Z | import logging
import os
import sys
import threading
import SocketServer
import ssl
import socket
from . import *
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler, TLS_FTPHandler
from pyftpdlib.filesystems import AbstractedFS
from pyftpdlib.servers import ThreadedFTPServer
import BannerFactory
FAKEUSER = 'FAKEUSER'
FAKEPWD = 'FAKEPWD'
EXT_FILE_RESPONSE = {
'.html': u'FakeNet.html',
'.png' : u'FakeNet.png',
'.ico' : u'FakeNet.ico',
'.jpeg': u'FakeNet.jpg',
'.exe' : u'FakeNetMini.exe',
'.pdf' : u'FakeNet.pdf',
'.xml' : u'FakeNet.html',
'.txt' : u'FakeNet.txt',
}
# Adapted from various sources including https://github.com/turbo/openftp4
BANNERS = {
'generic': '{servername} FTP Server',
'ncftpd': '{servername} NcFTPD Server (licensed copy) ready.',
'unspec1': lambda hostname: 'FTP server ready',
'unspec2': lambda hostname: 'FTP server ready %s',
'iis': lambda hostname: '%s Microsoft FTP Service',
'iis': lambda hostname: '%s Microsoft FTP Service',
'iis-3.0': lambda hostname: '%s Microsoft FTP Service (Version 3.0)',
'iis-4.0': lambda hostname: '%s Microsoft FTP Service (Version 4.0)',
'iis-5.0': lambda hostname: '%s Microsoft FTP Service (Version 5.0)',
'iis-6.0': lambda hostname: '%s Microsoft FTP Service (Version 6.0)',
'vs-2.0.7': lambda hostname: '(vsFTPd 2.0.7)',
'vs-2.1.0': lambda hostname: '(vsFTPd 2.1.0)',
'vs-2.1.2': lambda hostname: '(vsFTPd 2.1.2)',
'vs-2.1.2': lambda hostname: '(vsFTPd 2.1.2)',
'vs-2.2.0': lambda hostname: '(vsFTPd 2.2.0)',
'vs-2.2.1': lambda hostname: '(vsFTPd 2.2.1)',
'vs-2.2.2': lambda hostname: '(vsFTPd 2.2.2)',
'vs-2.3.0': lambda hostname: '(vsFTPd 2.3.0)',
'vs-2.3.1': lambda hostname: '(vsFTPd 2.3.1)',
'vs-2.3.2': lambda hostname: '(vsFTPd 2.3.2)',
'vs-2.3.4': lambda hostname: '(vsFTPd 2.3.4)',
'vs-2.3.5': lambda hostname: '(vsFTPd 2.3.5)',
'wu-2.4(1)': '{servername} (Version wu-2.4(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4(2)': '{servername} (Version wu-2.4(2) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4(20)': '{servername} (Version wu-2.4(20) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-academ(1)': '{servername} (Version wu-2.4.2-academ (1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-academ[BETA-15](1)': '{servername} (Version wu-2.4.2-academ[BETA-15](1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-academ[BETA-16](1)': '{servername} (Version wu-2.4.2-academ[BETA-16](1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-academ[BETA-18](1)': '{servername} (Version wu-2.4.2-academ[BETA-18](1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-academ[BETA-9](1)': '{servername} (Version wu-2.4.2-academ[BETA-9](1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-VR16(1)': '{servername} (Version wu-2.4.2-VR16(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-VR17(1)': '{servername} (Version wu-2.4.2-VR17(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4(3)': '{servername} (Version wu-2.4(3) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4(4)': '{servername} (Version wu-2.4(4) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4(6)': '{servername} (Version wu-2.4(6) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.5.0(1)': '{servername} (Version wu-2.5.0(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.0(1)': '{servername} (Version wu-2.6.0(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.0(2)': '{servername} (Version wu-2.6.0(2) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.0(4)': '{servername} (Version wu-2.6.0(4) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.0(5)': '{servername} (Version wu-2.6.0(5) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.0(7)': '{servername} (Version wu-2.6.0(7) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-0.6x.21': '{servername} (Version wu-2.6.1-0.6x.21 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1(1)': '{servername} (Version wu-2.6.1(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1(12)': '{servername} (Version wu-2.6.1(12) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-16': '{servername} (Version wu-2.6.1-16 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-16.7x.1': '{servername} (Version wu-2.6.1-16.7x.1 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-18': '{servername} (Version wu-2.6.1-18 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1(2)': '{servername} (Version wu-2.6.1(2) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-20': '{servername} (Version wu-2.6.1-20 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-21': '{servername} (Version wu-2.6.1-21 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-23.2': '{servername} (Version wu-2.6.1-23.2 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-24': '{servername} (Version wu-2.6.1-24 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-24.1': '{servername} (Version wu-2.6.1-24.1 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1(3)': '{servername} (Version wu-2.6.1(3) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(1)': '{servername} (Version wu-2.6.2(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(11)': '{servername} (Version wu-2.6.2(11) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-11.1204.1ubuntu': '{servername} (Version wu-2.6.2-11.1204.1ubuntu %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-11.71.1': '{servername} (Version wu-2.6.2-11.71.1 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-11.72.1': '{servername} (Version wu-2.6.2-11.72.1 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-11.73.1': '{servername} (Version wu-2.6.2-11.73.1 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-11.73.1mdk': '{servername} (Version wu-2.6.2-11.73.1mdk %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-12': '{servername} (Version wu-2.6.2-12 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-12.1.co5.PROX': '{servername} (Version wu-2.6.2-12.1.co5.PROX %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-12.rhel2': '{servername} (Version wu-2.6.2-12.rhel2 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(13)': '{servername} (Version wu-2.6.2(13) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2.1(5)': '{servername} (Version wu-2.6.2.1(5) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(15)': '{servername} (Version wu-2.6.2(15) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-15.7x.legacy': '{servername} (Version wu-2.6.2-15.7x.legacy %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-15.7x.PROX': '{servername} (Version wu-2.6.2-15.7x.PROX %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(16)': '{servername} (Version wu-2.6.2(16) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(2)': '{servername} (Version wu-2.6.2(2) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(3)': '{servername} (Version wu-2.6.2(3) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(4)': '{servername} (Version wu-2.6.2(4) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-468': '{servername} (Version wu-2.6.2-468 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(47)': '{servername} (Version wu-2.6.2(47) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(48)': '{servername} (Version wu-2.6.2(48) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-5': '{servername} (Version wu-2.6.2-5 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(5)': '{servername} (Version wu-2.6.2(5) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(52)': '{servername} (Version wu-2.6.2(52) %a %b %d %H:%M:%S {tz} %Y) ready.',
'ws_ftp-2.0.4': '{servername} V2 WS_FTP Server 2.0.4 (0)',
'ws_ftp-3.1.3': '{servername} V2 WS_FTP Server 3.1.3 (0)',
'ws_ftp-5.0.5': '{servername} V2 WS_FTP Server 5.0.5 (0)',
'ws_ftp-7.5.1': '{servername} V2 WS_FTP Server 7.5.1(0)',
'ws_ftp-7.7': '{servername} V2 WS_FTP Server 7.7(0)',
'ws_ftp-1.0.3 ': '{servername} X2 WS_FTP Server 1.0.3 (0)',
'ws_ftp-1.0.5 ': '{servername} X2 WS_FTP Server 1.0.5 (0)',
'ws_ftp-2.0.0 ': '{servername} X2 WS_FTP Server 2.0.0 (0)',
'ws_ftp-2.0.3 ': '{servername} X2 WS_FTP Server 2.0.3 (0)',
'ws_ftp-3.00 ': '{servername} X2 WS_FTP Server 3.00 (0)',
'ws_ftp-3.1.3 ': '{servername} X2 WS_FTP Server 3.1.3 (0)',
'ws_ftp-4.0.0 ': '{servername} X2 WS_FTP Server 4.0.0 (0)',
'ws_ftp-4.0.2 ': '{servername} X2 WS_FTP Server 4.0.2 (0)',
'ws_ftp-5.0.0 ': '{servername} X2 WS_FTP Server 5.0.0 (0)',
'ws_ftp-5.0.2 ': '{servername} X2 WS_FTP Server 5.0.2 (0)',
'ws_ftp-5.0.4 ': '{servername} X2 WS_FTP Server 5.0.4 (0)',
'ws_ftp-5.0.5 ': '{servername} X2 WS_FTP Server 5.0.5 (0)',
'ws_ftp-6.0': '{servername} X2 WS_FTP Server 6.0(0)',
'ws_ftp-6.1': '{servername} X2 WS_FTP Server 6.1(0)',
'ws_ftp-6.1.1': '{servername} X2 WS_FTP Server 6.1.1(0)',
'ws_ftp-7.0': '{servername} X2 WS_FTP Server 7.0(0)',
'ws_ftp-7.1': '{servername} X2 WS_FTP Server 7.1(0)',
'ws_ftp-7.5': '{servername} X2 WS_FTP Server 7.5(0)',
'ws_ftp-7.5.1': '{servername} X2 WS_FTP Server 7.5.1(0)',
'ws_ftp-7.6': '{servername} X2 WS_FTP Server 7.6(0)',
'ws_ftp-7.6': '{servername} X2 WS_FTP Server 7.6(0) FIPS',
'ws_ftp-7.6.2': '{servername} X2 WS_FTP Server 7.6.2(0)',
'ws_ftp-7.6.2-fips': '{servername} X2 WS_FTP Server 7.6.2(0) FIPS',
'ws_ftp-7.6.3': '{servername} X2 WS_FTP Server 7.6.3(0)',
'ws_ftp-7.7': '{servername} X2 WS_FTP Server 7.7(0)',
}
class FakeFTPHandler(FTPHandler, object):
def ftp_PASS(self, line):
# Dynamically add user to authorizer
if not self.authorizer.has_user(self.username):
self.authorizer.add_user(self.username, line, self.ftproot_path, 'elradfmwM')
return super(FakeFTPHandler, self).ftp_PASS(line)
class TLS_FakeFTPHandler(TLS_FTPHandler, object):
def ftp_PASS(self, line):
# Dynamically add user to authorizer
if not self.authorizer.has_user(self.username):
self.authorizer.add_user(self.username, line, self.ftproot_path, 'elradfmwM')
return super(TLS_FakeFTPHandler, self).ftp_PASS(line)
class FakeFS(AbstractedFS):
def open(self, filename, mode):
# If virtual filename does not exist return a default file based on extention
if not self.lexists(filename):
file_basename, file_extension = os.path.splitext(filename)
# Calculate absolute path to a fake file
filename = os.path.join(os.path.dirname(filename), EXT_FILE_RESPONSE.get(file_extension.lower(), u'FakeNetMini.exe'))
return super(FakeFS, self).open(filename, mode)
def chdir(self, path):
# If virtual directory does not exist change to the current directory
if not self.lexists(path):
path = u'.'
return super(FakeFS, self).chdir(path)
def remove(self, path):
# Don't remove anything
pass
def rmdir(self, path):
# Don't remove anything
pass
class FTPListener(object):
def taste(self, data, dport):
# See RFC5797 for full command list. Many of these commands are not likely
# to be used but are included in case malware uses FTP in unexpected ways
base_ftp_commands = [
'abor', 'acct', 'allo', 'appe', 'cwd', 'dele', 'help', 'list', 'mode',
'nlst', 'noop', 'pass', 'pasv', 'port', 'quit', 'rein', 'rest', 'retr',
'rnfr', 'rnto', 'site', 'stat', 'stor', 'stru', 'type', 'user'
]
opt_ftp_commands = [
'cdup', 'mkd', 'pwd', 'rmd', 'smnt', 'stou', 'syst'
]
confidence = 1 if dport == 21 else 0
data = data.lstrip().lower()
for command in base_ftp_commands + opt_ftp_commands:
if data.startswith(command):
return confidence + 1
return confidence
def __init__(self,
config,
name='FTPListener',
logging_level=logging.INFO,
running_listeners=None,
diverter=None
):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging_level)
self.config = config
self.name = name
self.local_ip = config.get('ipaddr')
self.server = None
self.running_listeners = running_listeners
self.diverter = diverter
self.name = 'FTP'
self.port = self.config.get('port', 21)
self.logger.debug('Starting...')
self.logger.debug('Initialized with config:')
for key, value in config.iteritems():
self.logger.debug(' %10s: %s', key, value)
# Initialize ftproot directory
path = self.config.get('ftproot','defaultFiles')
self.ftproot_path = ListenerBase.abs_config_path(path)
if self.ftproot_path is None:
self.logger.error('Could not locate ftproot directory: %s', path)
sys.exit(1)
def expand_ports(self, ports_list):
ports = []
for i in ports_list.split(','):
if '-' not in i:
ports.append(int(i))
else:
l,h = map(int, i.split('-'))
ports+= range(l,h+1)
return ports
def start(self):
self.authorizer = DummyAuthorizer()
if self.config.get('usessl') == 'Yes':
self.logger.debug('Using SSL socket.')
keyfile_path = 'listeners/ssl_utils/privkey.pem'
keyfile_path = ListenerBase.abs_config_path(keyfile_path)
if keyfile_path is None:
self.logger.error('Could not locate %s', keyfile_path)
sys.exit(1)
self.handler = TLS_FakeFTPHandler
self.handler.certfile = keyfile_path
else:
self.handler = FakeFTPHandler
self.handler.banner = self.genBanner()
self.handler.ftproot_path = self.ftproot_path
self.handler.abstracted_fs = FakeFS
self.handler.authorizer = self.authorizer
self.handler.passive_ports = self.expand_ports(self.config.get('pasvports', '60000-60010'))
self.server = ThreadedFTPServer((self.local_ip, int(self.config['port'])), self.handler)
# Override pyftpdlib logger name
logging.getLogger('pyftpdlib').name = self.name
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self):
self.logger.debug('Stopping...')
if self.server:
self.server.close_all()
def genBanner(self):
bannerfactory = BannerFactory.BannerFactory()
return bannerfactory.genBanner(self.config, BANNERS)
###############################################################################
# Testing code
def test(config):
import ftplib
client = ftplib.FTP()
client.connect('localhost', int(config.get('port', 21)))
client.login('user', 'password')
client.dir('.')
client.close()
def main():
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '21', 'usessl': 'No', 'protocol': 'tcp', 'ftproot': os.path.join('..', 'defaultFiles')}
listener = FTPListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
test(config)
if __name__ == '__main__':
main()
| 43.393855 | 129 | 0.562086 |
7945e78169be2ced69219126ea18c2e766e87085 | 4,544 | py | Python | pose_estimation/exp/top_down_448x320_global_small/config.py | AK391/UniFormer | 22c6b3b98b68236dda6a8fa7152a32af1af62a20 | [
"MIT"
] | 367 | 2022-01-14T03:32:25.000Z | 2022-03-31T04:48:20.000Z | pose_estimation/exp/top_down_448x320_global_small/config.py | hadlang/UniFormer | e8024703bffb89cb7c7d09e0d774a0d2a9f96c25 | [
"MIT"
] | 27 | 2022-01-27T07:12:49.000Z | 2022-03-31T04:31:13.000Z | pose_estimation/exp/top_down_448x320_global_small/config.py | hadlang/UniFormer | e8024703bffb89cb7c7d09e0d774a0d2a9f96c25 | [
"MIT"
] | 53 | 2022-01-18T11:21:43.000Z | 2022-03-31T06:42:41.000Z | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=5, create_symlink=False)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='AdamW',
lr=1.0e-3,
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys={'relative_position_bias_table': dict(decay_mult=0.)}
)
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='TopDown',
#pretrained='/path/to/hrt_small.pth', # Set the path to pretrained backbone here
backbone=dict(
type='UniFormer',
embed_dim=[64, 128, 320, 512],
layers=[3, 4, 8, 3],
head_dim=64,
drop_path_rate=0.2,
use_checkpoint=False,
windows=False,
hybrid=False
),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=512,
out_channels=channel_cfg['num_output_channels'],
norm_cfg=norm_cfg,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_root = 'data/coco/' # Set the data path here
data_cfg = dict(
image_size=[320, 448],
heatmap_size=[80, 112],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file=f'data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=3),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=256),
test_dataloader=dict(samples_per_gpu=256),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
# fp16 settings
fp16 = dict(loss_scale='dynamic') | 28.4 | 96 | 0.630722 |
7945e83218e926ccf4ced98eb59de90b751562f0 | 68 | py | Python | rest-api/flask_app/auth/__init__.py | sinedie/Flask-Svelte-Websockets-Nginx-Docker | 76daeec2c76f9f27ca526f53393ab4363020b92b | [
"WTFPL"
] | 4 | 2021-11-21T14:04:15.000Z | 2022-03-20T15:28:14.000Z | rest-api/flask_app/auth/__init__.py | sinedie/Utimate-flask-websocket-template | 76daeec2c76f9f27ca526f53393ab4363020b92b | [
"WTFPL"
] | null | null | null | rest-api/flask_app/auth/__init__.py | sinedie/Utimate-flask-websocket-template | 76daeec2c76f9f27ca526f53393ab4363020b92b | [
"WTFPL"
] | null | null | null | from flask_app.auth.jwt import *
from flask_app.auth.login import * | 22.666667 | 34 | 0.794118 |
7945e84a74540a2e053f5f7fc21fb5cb96cf7cc4 | 723 | py | Python | src/1_100/0001_Two_Sum/Two_Sum.py | himichael/LeetCode | d54f48e785af3d47a2a67a95fd3343d2b23f8ae5 | [
"Apache-2.0"
] | 1 | 2019-12-18T06:08:47.000Z | 2019-12-18T06:08:47.000Z | src/1_100/0001_Two_Sum/Two_Sum.py | himichael/LeetCode | d54f48e785af3d47a2a67a95fd3343d2b23f8ae5 | [
"Apache-2.0"
] | 1 | 2019-05-18T09:35:22.000Z | 2019-05-18T09:35:22.000Z | src/1_100/0001_Two_Sum/Two_Sum.py | himichael/LeetCode | d54f48e785af3d47a2a67a95fd3343d2b23f8ae5 | [
"Apache-2.0"
] | null | null | null | class Solution(object):
def twoSum(self, arr, target):
#用一个字典记录每个元素的值,以及下标
d = {}
for i,v in enumerate(arr):
d[v] = i
for i,v in enumerate(arr):
x = target - v
#如果target-v的值在字典中,并且这个值的下标和i不同,就是要求的值
if(d.has_key(x) and i!=d[x]):
return [d[x],i]
return []
# 一遍hash的实现方式,边加入值 边遍历,一次循环就可以搞定
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if not nums or len(nums)==0:
return [-1,-1]
d = dict()
for i,v in enumerate(nums):
n = target-v
if d.has_key(n):
return [i,d[n]]
d[v] = i
return [-1,-1] | 22.59375 | 40 | 0.489627 |
7945e8596ac339cf49f9a77940791626d68953a9 | 18,315 | py | Python | applications/FluidDynamicsApplication/python_scripts/navier_stokes_two_fluids_solver.py | Gaoliu19910601/Kratos | 0bac5e132d02061680fc90f1e52d4930b5ed7fa3 | [
"BSD-4-Clause"
] | null | null | null | applications/FluidDynamicsApplication/python_scripts/navier_stokes_two_fluids_solver.py | Gaoliu19910601/Kratos | 0bac5e132d02061680fc90f1e52d4930b5ed7fa3 | [
"BSD-4-Clause"
] | 1 | 2019-10-15T13:11:37.000Z | 2019-10-15T13:11:37.000Z | applications/FluidDynamicsApplication/python_scripts/navier_stokes_two_fluids_solver.py | Gaoliu19910601/Kratos | 0bac5e132d02061680fc90f1e52d4930b5ed7fa3 | [
"BSD-4-Clause"
] | null | null | null | from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Importing the Kratos Library
import KratosMultiphysics
import KratosMultiphysics.kratos_utilities as KratosUtilities
# Import applications
import KratosMultiphysics.FluidDynamicsApplication as KratosCFD
have_conv_diff = KratosUtilities.CheckIfApplicationsAvailable("ConvectionDiffusionApplication")
if have_conv_diff:
import KratosMultiphysics.ConvectionDiffusionApplication as KratosConvDiff
# Import base class file
from KratosMultiphysics.FluidDynamicsApplication.fluid_solver import FluidSolver
from KratosMultiphysics.FluidDynamicsApplication.read_distance_from_file import DistanceImportUtility
import KratosMultiphysics.python_linear_solver_factory as linear_solver_factory
def CreateSolver(model, custom_settings):
return NavierStokesTwoFluidsSolver(model, custom_settings)
class NavierStokesTwoFluidsSolver(FluidSolver):
@classmethod
def GetDefaultSettings(cls):
##settings string in json format
default_settings = KratosMultiphysics.Parameters("""
{
"solver_type": "two_fluids_solver_from_defaults",
"model_part_name": "",
"domain_size": -1,
"model_import_settings": {
"input_type": "mdpa",
"input_filename": "unknown_name",
"reorder": false
},
"material_import_settings": {
"materials_filename": ""
},
"distance_reading_settings" : {
"import_mode" : "from_mdpa",
"distance_file_name" : "no_distance_file"
},
"maximum_iterations": 7,
"echo_level": 0,
"time_order": 2,
"compute_reactions": false,
"reform_dofs_at_each_step": false,
"relative_velocity_tolerance": 1e-3,
"absolute_velocity_tolerance": 1e-5,
"relative_pressure_tolerance": 1e-3,
"absolute_pressure_tolerance": 1e-5,
"linear_solver_settings" : {
"solver_type" : "amgcl"
},
"volume_model_part_name" : "volume_model_part",
"skin_parts": [""],
"assign_neighbour_elements_to_conditions": false,
"no_skin_parts":[""],
"time_stepping" : {
"automatic_time_step" : true,
"CFL_number" : 1,
"minimum_delta_time" : 1e-2,
"maximum_delta_time" : 1.0,
"time_step" : 0.0
},
"periodic": "periodic",
"move_mesh_flag": false,
"formulation": {
"dynamic_tau": 1.0
},
"bfecc_convection" : false,
"bfecc_number_substeps" : 10
}""")
default_settings.AddMissingParameters(super(NavierStokesTwoFluidsSolver, cls).GetDefaultSettings())
return default_settings
def __init__(self, model, custom_settings):
self._validate_settings_in_baseclass=True # To be removed eventually
super(NavierStokesTwoFluidsSolver,self).__init__(model,custom_settings)
self.element_name = "TwoFluidNavierStokes"
self.condition_name = "NavierStokesWallCondition"
self.element_has_nodal_properties = True
self.min_buffer_size = 3
self._bfecc_convection = self.settings["bfecc_convection"].GetBool()
## Set the distance reading filename
# TODO: remove the manual "distance_file_name" set as soon as the problem type one has been tested.
if (self.settings["distance_reading_settings"]["import_mode"].GetString() == "from_GiD_file"):
self.settings["distance_reading_settings"]["distance_file_name"].SetString(self.settings["model_import_settings"]["input_filename"].GetString()+".post.res")
KratosMultiphysics.Logger.PrintInfo("NavierStokesTwoFluidsSolver", "Construction of NavierStokesTwoFluidsSolver finished.")
def AddVariables(self):
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DENSITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DYNAMIC_VISCOSITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.PRESSURE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.ACCELERATION)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.MESH_VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.IS_STRUCTURE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.BODY_FORCE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_H)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_AREA)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION_WATER_PRESSURE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NORMAL)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.EXTERNAL_PRESSURE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.FLAG_VARIABLE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISTANCE) # Distance function nodal values
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISTANCE_GRADIENT) # Distance gradient nodal values
KratosMultiphysics.Logger.PrintInfo("NavierStokesTwoFluidsSolver", "Fluid solver variables added correctly.")
def PrepareModelPart(self):
# Initialize the level-set function
if not self.main_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED]:
## Setting the nodal distance
self._set_distance_function()
# Call the base solver PrepareModelPart()
super(NavierStokesTwoFluidsSolver, self).PrepareModelPart()
def Initialize(self):
self.computing_model_part = self.GetComputingModelPart()
## Construct the linear solver
self.linear_solver = linear_solver_factory.ConstructSolver(self.settings["linear_solver_settings"])
KratosMultiphysics.NormalCalculationUtils().CalculateOnSimplex(self.computing_model_part, self.computing_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE])
self.neighbour_search = KratosMultiphysics.FindNodalNeighboursProcess(self.computing_model_part)
(self.neighbour_search).Execute()
self.accelerationLimitationUtility = KratosCFD.AccelerationLimitationUtilities( self.computing_model_part, 5.0 )
# If needed, create the estimate time step utility
if (self.settings["time_stepping"]["automatic_time_step"].GetBool()):
self.EstimateDeltaTimeUtility = self._GetAutomaticTimeSteppingUtility()
# Set the time discretization utility to compute the BDF coefficients
time_order = self.settings["time_order"].GetInt()
if time_order == 2:
self.time_discretization = KratosMultiphysics.TimeDiscretization.BDF(time_order)
else:
raise Exception("Only \"time_order\" equal to 2 is supported. Provided \"time_order\": " + str(time_order))
# Creating the solution strategy
self.conv_criteria = KratosCFD.VelPrCriteria(self.settings["relative_velocity_tolerance"].GetDouble(),
self.settings["absolute_velocity_tolerance"].GetDouble(),
self.settings["relative_pressure_tolerance"].GetDouble(),
self.settings["absolute_pressure_tolerance"].GetDouble())
(self.conv_criteria).SetEchoLevel(self.settings["echo_level"].GetInt())
self.level_set_convection_process = self._set_level_set_convection_process()
self.variational_distance_process = self._set_variational_distance_process()
time_scheme = KratosMultiphysics.ResidualBasedIncrementalUpdateStaticSchemeSlip(self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE], # Domain size (2,3)
self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]+1) # DOFs (3,4)
builder_and_solver = KratosMultiphysics.ResidualBasedBlockBuilderAndSolver(self.linear_solver)
self.solver = KratosMultiphysics.ResidualBasedNewtonRaphsonStrategy(self.computing_model_part,
time_scheme,
self.linear_solver,
self.conv_criteria,
builder_and_solver,
self.settings["maximum_iterations"].GetInt(),
self.settings["compute_reactions"].GetBool(),
self.settings["reform_dofs_at_each_step"].GetBool(),
self.settings["move_mesh_flag"].GetBool())
(self.solver).SetEchoLevel(self.settings["echo_level"].GetInt())
(self.solver).Initialize() # Initialize the solver. Otherwise the constitutive law is not initializated.
(self.solver).Check()
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.DYNAMIC_TAU, self.settings["formulation"]["dynamic_tau"].GetDouble())
KratosMultiphysics.Logger.PrintInfo("NavierStokesTwoFluidsSolver", "Solver initialization finished.")
def InitializeSolutionStep(self):
if self._TimeBufferIsInitialized():
# Recompute the BDF2 coefficients
(self.time_discretization).ComputeAndSaveBDFCoefficients(self.GetComputingModelPart().ProcessInfo)
# Perform the level-set convection according to the previous step velocity
if self._bfecc_convection:
(self.level_set_convection_process).BFECCconvect(
self.main_model_part,
KratosMultiphysics.DISTANCE,
KratosMultiphysics.VELOCITY,
self.settings["bfecc_number_substeps"].GetInt())
else:
(self.level_set_convection_process).Execute()
# Recompute the distance field according to the new level-set position
(self.variational_distance_process).Execute()
# Update the DENSITY and DYNAMIC_VISCOSITY values according to the new level-set
self._SetNodalProperties()
# Initialize the solver current step
(self.solver).InitializeSolutionStep()
def FinalizeSolutionStep(self):
if self._TimeBufferIsInitialized():
(self.solver).FinalizeSolutionStep()
(self.accelerationLimitationUtility).Execute()
# TODO: Remove this method as soon as the subproperties are available
def _SetPhysicalProperties(self):
import os
warn_msg = '\nThe materials import mechanism used in the two fluids solver is DEPRECATED!\n'
warn_msg += 'It will be removed to use the base fluid_solver.py one as soon as the subproperties are available.\n'
KratosMultiphysics.Logger.PrintWarning('\n\x1b[1;31mDEPRECATION-WARNING\x1b[0m', warn_msg)
# Check if the fluid properties are provided using a .json file
materials_filename = self.settings["material_import_settings"]["materials_filename"].GetString()
if (materials_filename != ""):
with open(materials_filename,'r') as materials_file:
materials = KratosMultiphysics.Parameters(materials_file.read())
# Create and read an auxiliary materials file for each one of the fields
for i_material in materials["properties"]:
aux_materials = KratosMultiphysics.Parameters()
aux_materials.AddEmptyArray("properties")
aux_materials["properties"].Append(i_material)
prop_id = i_material["properties_id"].GetInt()
aux_materials_filename = materials_filename + "_" + str(prop_id) + ".json"
with open(aux_materials_filename,'w') as aux_materials_file:
aux_materials_file.write(aux_materials.WriteJsonString())
aux_materials_file.close()
aux_material_settings = KratosMultiphysics.Parameters("""{"Parameters": {"materials_filename": ""}} """)
aux_material_settings["Parameters"]["materials_filename"].SetString(aux_materials_filename)
KratosMultiphysics.ReadMaterialsUtility(aux_material_settings, self.model)
os.remove(aux_materials_filename)
materials_imported = True
else:
materials_imported = False
# If the element uses nodal material properties, transfer them to the nodes
if self.element_has_nodal_properties:
self._SetNodalProperties()
return materials_imported
def _SetNodalProperties(self):
# Get fluid 1 and 2 properties
properties_1 = self.main_model_part.Properties[1]
properties_2 = self.main_model_part.Properties[2]
rho_1 = properties_1.GetValue(KratosMultiphysics.DENSITY)
rho_2 = properties_2.GetValue(KratosMultiphysics.DENSITY)
mu_1 = properties_1.GetValue(KratosMultiphysics.DYNAMIC_VISCOSITY)
mu_2 = properties_2.GetValue(KratosMultiphysics.DYNAMIC_VISCOSITY)
# Check fluid 1 and 2 properties
if rho_1 <= 0.0:
raise Exception("DENSITY set to {0} in Properties {1}, positive number expected.".format(rho_1, properties_1.Id))
if rho_2 <= 0.0:
raise Exception("DENSITY set to {0} in Properties {1}, positive number expected.".format(rho_2, properties_2.Id))
if mu_1 <= 0.0:
raise Exception("DYNAMIC_VISCOSITY set to {0} in Properties {1}, positive number expected.".format(mu_1, properties_1.Id))
if mu_2 <= 0.0:
raise Exception("DYNAMIC_VISCOSITY set to {0} in Properties {1}, positive number expected.".format(mu_2, properties_2.Id))
# Transfer density and (dynamic) viscostity to the nodes
for node in self.main_model_part.Nodes:
if node.GetSolutionStepValue(KratosMultiphysics.DISTANCE) <= 0.0:
node.SetSolutionStepValue(KratosMultiphysics.DENSITY, rho_1)
node.SetSolutionStepValue(KratosMultiphysics.DYNAMIC_VISCOSITY, mu_1)
else:
node.SetSolutionStepValue(KratosMultiphysics.DENSITY, rho_2)
node.SetSolutionStepValue(KratosMultiphysics.DYNAMIC_VISCOSITY, mu_2)
def _set_distance_function(self):
## Set the nodal distance function
if (self.settings["distance_reading_settings"]["import_mode"].GetString() == "from_GiD_file"):
DistanceUtility = DistanceImportUtility(self.main_model_part, self.settings["distance_reading_settings"])
DistanceUtility.ImportDistance()
elif (self.settings["distance_reading_settings"]["import_mode"].GetString() == "from_mdpa"):
KratosMultiphysics.Logger.PrintInfo("Navier Stokes Embedded Solver","Distance function taken from the .mdpa input file.")
def _set_level_set_convection_process(self):
# Construct the level set convection process
if self._bfecc_convection:
if have_conv_diff:
if self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] == 2:
locator = KratosMultiphysics.BinBasedFastPointLocator2D(self.main_model_part).UpdateSearchDatabase()
level_set_convection_process = KratosConvDiff.BFECCConvection2D(locator)
else:
locator = KratosMultiphysics.BinBasedFastPointLocator3D(self.main_model_part).UpdateSearchDatabase()
level_set_convection_process = KratosConvDiff.BFECCConvection3D(locator)
else:
raise Exception("The BFECC level set convection requires the Kratos ConvectionDiffusionApplication compilation.")
else:
if self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] == 2:
level_set_convection_process = KratosMultiphysics.LevelSetConvectionProcess2D(
KratosMultiphysics.DISTANCE,
self.main_model_part,
self.linear_solver)
else:
level_set_convection_process = KratosMultiphysics.LevelSetConvectionProcess3D(
KratosMultiphysics.DISTANCE,
self.main_model_part,
self.linear_solver)
return level_set_convection_process
def _set_variational_distance_process(self):
# Construct the variational distance calculation process
maximum_iterations = 2 #TODO: Make this user-definable
if self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] == 2:
variational_distance_process = KratosMultiphysics.VariationalDistanceCalculationProcess2D(
self.main_model_part,
self.linear_solver,
maximum_iterations,
KratosMultiphysics.VariationalDistanceCalculationProcess2D.CALCULATE_EXACT_DISTANCES_TO_PLANE)
else:
variational_distance_process = KratosMultiphysics.VariationalDistanceCalculationProcess3D(
self.main_model_part,
self.linear_solver,
maximum_iterations,
KratosMultiphysics.VariationalDistanceCalculationProcess3D.CALCULATE_EXACT_DISTANCES_TO_PLANE)
return variational_distance_process
| 54.186391 | 175 | 0.668905 |
7945e87b21cda20054ed16b1bfcf976b11f7fbcd | 988 | py | Python | matplotlib_examples/examples_src/pylab_examples/coords_demo.py | xzlmark/webspider | 133c620c65aa45abea1718b0dada09618c2115bf | [
"Apache-2.0"
] | 3 | 2020-04-09T02:35:26.000Z | 2021-02-27T17:00:21.000Z | matplotlib_examples/examples_src/pylab_examples/coords_demo.py | colorworlds/webspider | 133c620c65aa45abea1718b0dada09618c2115bf | [
"Apache-2.0"
] | null | null | null | matplotlib_examples/examples_src/pylab_examples/coords_demo.py | colorworlds/webspider | 133c620c65aa45abea1718b0dada09618c2115bf | [
"Apache-2.0"
] | 1 | 2020-04-09T02:35:08.000Z | 2020-04-09T02:35:08.000Z | """
An example of how to interact with the plotting canvas by connecting
to move and click events
"""
from __future__ import print_function
import sys
import matplotlib.pyplot as plt
import numpy as np
t = np.arange(0.0, 1.0, 0.01)
s = np.sin(2*np.pi*t)
fig, ax = plt.subplots()
ax.plot(t, s)
def on_move(event):
# get the x and y pixel coords
x, y = event.x, event.y
if event.inaxes:
ax = event.inaxes # the axes instance
print('data coords %f %f' % (event.xdata, event.ydata))
def on_click(event):
# get the x and y coords, flip y from top to bottom
x, y = event.x, event.y
if event.button == 1:
if event.inaxes is not None:
print('data coords %f %f' % (event.xdata, event.ydata))
binding_id = plt.connect('motion_notify_event', on_move)
plt.connect('button_press_event', on_click)
if "test_disconnect" in sys.argv:
print("disconnecting console coordinate printout...")
plt.disconnect(binding_id)
plt.show()
| 24.7 | 68 | 0.672065 |
7945ea0a2b211ec764a2e665dff4570e0bb46301 | 4,472 | py | Python | tests/fpga/multibank_reduce_fpga_test.py | jnice-81/dace | 5211794a2d17b7189037ac485ab0b292fb02aa0d | [
"BSD-3-Clause"
] | 227 | 2019-03-15T23:39:06.000Z | 2022-03-30T07:49:08.000Z | tests/fpga/multibank_reduce_fpga_test.py | jnice-81/dace | 5211794a2d17b7189037ac485ab0b292fb02aa0d | [
"BSD-3-Clause"
] | 834 | 2019-07-31T22:49:31.000Z | 2022-03-28T14:01:32.000Z | tests/fpga/multibank_reduce_fpga_test.py | jnice-81/dace | 5211794a2d17b7189037ac485ab0b292fb02aa0d | [
"BSD-3-Clause"
] | 64 | 2019-03-19T05:40:37.000Z | 2022-03-11T15:02:42.000Z | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
from dace import subsets
from dace.fpga_testing import xilinx_test
import numpy as np
# A test checking wcr-reduction with HBM/DDR arrays as inputs and output
def create_multibank_reduce_sdfg(
name,
mem_type,
banks=2,
):
N = dace.symbol("N")
M = dace.symbol("M")
sdfg = dace.SDFG(name + "_" + mem_type)
state = sdfg.add_state('red_' + mem_type, True)
in1 = sdfg.add_array("in1", [banks, N, M], dace.float32)
in2 = sdfg.add_array("in2", [banks, N, M], dace.float32)
out = sdfg.add_array("out", [banks, N], dace.float32)
in1[1].location["memorytype"] = mem_type
in2[1].location["memorytype"] = mem_type
out[1].location["memorytype"] = mem_type
in1[1].location["bank"] = f"0:{banks}"
in2[1].location["bank"] = f"{banks}:{2*banks}"
out[1].location["bank"] = f"{2*banks}:{3*banks}"
read_in1 = state.add_read("in1")
read_in2 = state.add_read("in2")
out_write = state.add_write("out")
tmp_in1_memlet = dace.Memlet(f"in1[k, i, j]")
tmp_in2_memlet = dace.Memlet(f"in2[k, i, j]")
tmp_out_memlet = dace.Memlet(f"out[k, i]", wcr="lambda x,y: x+y")
outer_entry, outer_exit = state.add_map("vadd_outer_map",
dict(k=f'0:{banks}'))
map_entry, map_exit = state.add_map("vadd_inner_map", dict(i="0:N",
j="0:M"))
tasklet = state.add_tasklet("mul", dict(__in1=None, __in2=None),
dict(__out=None), '__out = __in1 * __in2')
outer_entry.map.schedule = dace.ScheduleType.Unrolled
state.add_memlet_path(read_in1,
outer_entry,
map_entry,
tasklet,
memlet=tmp_in1_memlet,
dst_conn="__in1")
state.add_memlet_path(read_in2,
outer_entry,
map_entry,
tasklet,
memlet=tmp_in2_memlet,
dst_conn="__in2")
state.add_memlet_path(tasklet,
map_exit,
outer_exit,
out_write,
memlet=tmp_out_memlet,
src_conn="__out")
sdfg.apply_fpga_transformations()
return sdfg
def create_test_set(N, M, banks):
in1 = np.random.rand(*[banks, N, M]).astype('f')
in2 = np.random.rand(*[banks, N, M]).astype('f')
expected = np.sum(in1 * in2, axis=2, dtype=np.float32)
out = np.zeros((banks, N), dtype=np.float32)
return (in1, in2, expected, out)
def exec_test(N, M, banks, mem_type, name):
in1, in2, expected, target = create_test_set(N, M, banks)
sdfg = create_multibank_reduce_sdfg(name, mem_type, banks)
sdfg(in1=in1, in2=in2, out=target, N=N, M=M)
assert np.allclose(expected, target, rtol=1e-6)
return sdfg
@xilinx_test()
def test_hbm_reduce_2x3_2b():
return exec_test(2, 3, 2, "hbm", "red_2x3_2b")
@xilinx_test()
def test_hbm_reduce_10x50_4b():
return exec_test(10, 50, 4, "hbm", "red_10x50_4b")
@xilinx_test()
def test_hbm_reduce_red_1x50_1b():
return exec_test(1, 50, 1, "hbm", "red_1x50_1b")
@xilinx_test()
def test_hbm_reduce_red_1x40_8b():
return exec_test(1, 40, 8, "hbm", "red_1x40_8b")
@xilinx_test()
def test_hbm_reduce_red_2x40_6b():
return exec_test(2, 40, 6, "hbm", "red_2x40_6b")
@xilinx_test()
def test_ddr_reduce_2x3_2b():
return exec_test(2, 3, 2, "ddr", "red_2x3_2b")
@xilinx_test()
def test_ddr_reduce_10x50_4b():
return exec_test(10, 50, 4, "ddr", "red_10x50_4b")
@xilinx_test()
def test_ddr_reduce_red_1x50_1b():
return exec_test(1, 50, 1, "ddr", "red_1x50_1b")
@xilinx_test()
def test_ddr_reduce_red_1x40_8b():
return exec_test(1, 40, 8, "ddr", "red_1x40_8b")
@xilinx_test()
def test_ddr_reduce_red_2x40_6b():
return exec_test(2, 40, 6, "ddr", "red_2x40_6b")
if __name__ == "__main__":
test_hbm_reduce_2x3_2b(None)
test_hbm_reduce_10x50_4b(None)
test_hbm_reduce_red_1x50_1b(None)
test_hbm_reduce_red_1x40_8b(None)
test_hbm_reduce_red_2x40_6b(None)
test_ddr_reduce_2x3_2b(None)
test_ddr_reduce_10x50_4b(None)
test_ddr_reduce_red_1x50_1b(None)
test_ddr_reduce_red_1x40_8b(None)
test_ddr_reduce_red_2x40_6b(None)
| 30.421769 | 75 | 0.61203 |
7945ea44c7c420a4bfb7b5ff3fa24dfebd25ba56 | 1,046 | py | Python | setup.py | dceoy/create-pypkg | fae0c69ef63ca8f511339be470de453e74e3a318 | [
"MIT"
] | null | null | null | setup.py | dceoy/create-pypkg | fae0c69ef63ca8f511339be470de453e74e3a318 | [
"MIT"
] | null | null | null | setup.py | dceoy/create-pypkg | fae0c69ef63ca8f511339be470de453e74e3a318 | [
"MIT"
] | 1 | 2019-12-31T13:10:06.000Z | 2019-12-31T13:10:06.000Z | #!/usr/bin/env python
from setuptools import find_packages, setup
from createpypkg import __version__
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='create-pypkg',
version=__version__,
author='Daichi Narushima',
author_email='[email protected]',
description='Python package scaffold builder',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/dceoy/create-pypkg',
packages=find_packages(),
include_package_data=True,
install_requires=['docopt', 'jinja2'],
entry_points={
'console_scripts': ['create-pypkg=createpypkg.cli.main:main']
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
'Programming Language :: Python :: 3',
'Topic :: Software Development'
],
python_requires='>=3.6'
)
| 29.055556 | 69 | 0.656788 |
7945ea807c9c8844a2d70064eaa826207d637fa4 | 6,788 | py | Python | lipydomics/identification/characterize_lipid_ccs_pred.py | dylanhross/lipydomics | c7c3b72d4549a1a9937f287f3b314eff8e7ed054 | [
"MIT"
] | 3 | 2020-10-30T02:49:08.000Z | 2021-03-30T22:51:45.000Z | lipydomics/identification/characterize_lipid_ccs_pred.py | dylanhross/lipydomics | c7c3b72d4549a1a9937f287f3b314eff8e7ed054 | [
"MIT"
] | 49 | 2019-09-27T19:17:50.000Z | 2021-04-12T16:44:29.000Z | lipydomics/identification/characterize_lipid_ccs_pred.py | dylanhross/lipydomics | c7c3b72d4549a1a9937f287f3b314eff8e7ed054 | [
"MIT"
] | 2 | 2021-06-22T16:35:50.000Z | 2022-02-15T19:31:10.000Z | """
lipydomics/identification/characterize_lipid_ccs_pred.py
Dylan H. Ross
2019/10/09
description:
Characterizes performance of the predictive model for generating predicted CCS values by generating plots
of predicted vs. measured CCS organized by lipid class (along with FA modifier) and MS adduct
"""
import os
from sqlite3 import connect
from matplotlib import pyplot as plt
from matplotlib import rcParams, gridspec
from ..util import print_and_log
from .build_params import ccs_pred_ref_dsets
from .encoder_params import ccs_lipid_classes, ccs_fa_mods, ccs_ms_adducts
rcParams['font.size'] = 6
def single_class_plot(cursor, lipid_class, adduct, fa_mod=None):
"""
single_class_plot
description:
generates a plot comparing predicted CCS values against experimentally measured ones for a given lipid class
(accounting for fa_mod if present) and MS adduct. Saves the plot as a figure in the ccs_pred_perf/ directory
parameters:
cursor (sqlite3.cursor) -- cursor for querying lipids.db
lipid_class (str) -- lipid class
adduct (str) -- MS adduct
[fa_mod (None or str)] -- fatty acid modifier [optional, default=None]
"""
# predicted and measured m/z and ccs
mz_t, ccs_t = [], []
mz_m, ccs_m = [], []
# use nc and nu to compute residuals
nc_m, nu_m = [], []
nc_t, nu_t = [], []
# fetch the measured data
if fa_mod in ['o', 'p']:
qry = 'SELECT mz, ccs, src_tag, lipid_nc, lipid_nu FROM measured WHERE lipid_class="{}" AND fa_mod=="{}" AND adduct="{}"'
qry = qry.format(lipid_class, fa_mod, adduct)
else:
qry = 'SELECT mz, ccs, src_tag, lipid_nc, lipid_nu FROM measured WHERE lipid_class="{}" AND fa_mod IS NULL AND adduct="{}"'
qry = qry.format(lipid_class, adduct)
for mz, ccs, st, nc, nu in cursor.execute(qry).fetchall():
# only include data from the designated sources
src_ok = st in ccs_pred_ref_dsets
if src_ok:
nc_m.append(int(nc))
nu_m.append(int(nu))
mz_m.append(float(mz))
ccs_m.append(float(ccs))
# if no measured data was found, return None to skip plotting this class
if len(mz_m) < 1:
return None
# set bounds on the predicted data to fetch and display
mz_min, mz_max = int(min(mz_m)), int(max(mz_m))
ccs_min, ccs_max = int(min(ccs_m)), int(max(ccs_m))
# fetch the predicted data
if fa_mod in ['o', 'p']:
qry = 'SELECT mz, ccs, lipid_nc, lipid_nu FROM predicted_mz JOIN predicted_ccs ON '
qry += 'predicted_mz.t_id=predicted_ccs.t_id '
qry += 'WHERE lipid_class="{}" AND fa_mod=="{}" AND adduct="{}" AND (mz BETWEEN {} AND {}) AND '
qry += '(ccs BETWEEN {} AND {})'
qry = qry.format(lipid_class, fa_mod, adduct, mz_min, mz_max, ccs_min, ccs_max)
else:
qry = 'SELECT mz, ccs, lipid_nc, lipid_nu FROM predicted_mz JOIN predicted_ccs ON '
qry += 'predicted_mz.t_id=predicted_ccs.t_id '
qry += 'WHERE lipid_class="{}" AND fa_mod IS NULL AND adduct="{}" AND (mz BETWEEN {} AND {}) AND '
qry += '(ccs BETWEEN {} AND {})'
qry = qry.format(lipid_class, adduct, mz_min, mz_max, ccs_min, ccs_max)
for mz, ccs, nc, nu in cursor.execute(qry).fetchall():
mz_t.append(float(mz))
ccs_t.append(float(ccs))
nc_t.append(int(nc))
nu_t.append(int(nu))
# compute residual CCS
mz_resid, ccs_resid = [], []
for mzm, ccsm, nc, nu in zip(mz_m, ccs_m, nc_m, nu_m):
for mzt, ccst, nct, nut in zip(mz_t, ccs_t, nc_t, nu_t):
if nc == nct and nu == nut:
mz_resid.append(mzm)
ccs_resid.append(100. * (ccst - ccsm) / ccsm)
this_dir = os.path.dirname(__file__)
fig_fname = 'ccs_pred_perf/{}_{}{}_{}.png'.format(len(ccs_m), lipid_class, fa_mod if fa_mod else '', adduct)
fig_path = os.path.join(this_dir, fig_fname)
fig = plt.figure(figsize=(3.45, 3.75))
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax1.scatter(mz_t, ccs_t, marker='.', s=16, c='#ffa600', label='predicted', edgecolor='none')
ax1.scatter(mz_m, ccs_m, marker='.', s=8, c='purple', label='measured\n(n={} in training data)'.format(len(mz_m)), edgecolor='none')
ax1.legend()
ax1.set_xlabel('m/z')
ax1.set_ylabel(r'CCS ($\AA^2$)')
ax1.set_title('{}{} {}'.format(lipid_class, fa_mod if fa_mod else '', adduct), fontweight='bold')
for d in ['top', 'right']:
ax1.spines[d].set_visible(False)
#ax.set_xlim([mz_min - 10, mz_max + 10])
#ax.set_ylim([ccs_min - 2, ccs_max + 2])
# residuals
ax2.scatter(mz_resid, ccs_resid, marker='.', s=4, c='red', edgecolor='none')
ax2.axhline(ls='--', linewidth=0.3, c='grey', zorder=-1)
#ax2.axhline(y=5, ls='-', linewidth=0.3, c='lightgrey', zorder=-1)
#ax2.axhline(y=-5, ls='-', linewidth=0.3, c='lightgrey', zorder=-1)
ax2.axhline(y=3, ls='--', linewidth=0.3, c='lightgrey', zorder=-1)
ax2.axhline(y=-3, ls='--', linewidth=0.3, c='lightgrey', zorder=-1)
ax2.axhline(y=1, ls='-', linewidth=0.3, c='lightgrey', zorder=-1)
ax2.axhline(y=-1, ls='-', linewidth=0.3, c='lightgrey', zorder=-1)
for d in ['top', 'bottom', 'right']:
ax2.spines[d].set_visible(False)
ax2.set_ylim([-5, 5])
ax2.set_ylabel('CCS error (%)')
ax2.set_xticks([])
ax2.set_yticks([-5, -3, -1, 0, 1, 3, 5])
fig.set_tight_layout(True)
plt.savefig(fig_path, dpi=400, bbox_inches='tight')
plt.close()
def main(tstamp):
""" main build function """
# connect to database
db_path = os.path.join(os.path.dirname(__file__), 'lipids.db')
con = connect(db_path)
cur = con.cursor()
build_log = os.path.join(os.path.dirname(__file__), 'builds/build_log_{}.txt'.format(tstamp))
with open(build_log, 'a') as bl:
print_and_log('characterizing CCS prediction performance ...', bl, end=' ')
# automatically generate plots for all data included in the model training
qry = 'SELECT lipid_class, fa_mod, adduct, COUNT(*) as c FROM measured '
qry += 'GROUP BY lipid_class, fa_mod, adduct HAVING c > 9'
for lipid_class, fa_mod, adduct, c in cur.execute(qry).fetchall():
# only use the classes, fa_mods and adducts that are explicitly encoded
lc_ok = lipid_class in ccs_lipid_classes
fam_ok = fa_mod is None or fa_mod in ccs_fa_mods
add_ok = adduct in ccs_ms_adducts
if lc_ok and fam_ok and add_ok:
single_class_plot(cur, lipid_class, adduct, fa_mod=fa_mod)
print_and_log('ok\n', bl)
# close database connection
con.close()
| 40.404762 | 136 | 0.632292 |
7945eab2a85c7d8f7d887ccdbcddf6cb19e5fc6d | 426 | py | Python | deepr/layers/size.py | drohde/deepr | 672772ea3ce9cf391f9f8efc7ae9c9d438957817 | [
"Apache-2.0"
] | 50 | 2020-05-19T17:29:44.000Z | 2022-01-15T20:50:50.000Z | deepr/layers/size.py | drohde/deepr | 672772ea3ce9cf391f9f8efc7ae9c9d438957817 | [
"Apache-2.0"
] | 75 | 2020-05-20T16:53:37.000Z | 2022-01-12T15:53:46.000Z | deepr/layers/size.py | drohde/deepr | 672772ea3ce9cf391f9f8efc7ae9c9d438957817 | [
"Apache-2.0"
] | 17 | 2020-05-25T13:23:03.000Z | 2022-02-21T11:22:08.000Z | """Size Layers"""
import tensorflow as tf
from deepr.layers import base
class IsMinSize(base.Layer):
"""Compare size of inputs to minimum"""
def __init__(self, size: int, **kwargs):
super().__init__(n_in=1, n_out=1, **kwargs)
self.size = size
def forward(self, tensors, mode: str = None):
"""Forward method of the layer"""
return tf.greater_equal(tf.size(tensors), self.size)
| 23.666667 | 60 | 0.640845 |
7945eaddad797b5a254359ed43419eb5c24b4da8 | 2,744 | py | Python | models/tool.py | wang-chen/lgl-feature-matching | 55bd17ee5e8699a06514bca09a6ef834808448a7 | [
"BSD-3-Clause"
] | 17 | 2022-03-06T16:23:44.000Z | 2022-03-14T08:50:11.000Z | models/tool.py | wang-chen/lgl-feature-matching | 55bd17ee5e8699a06514bca09a6ef834808448a7 | [
"BSD-3-Clause"
] | 2 | 2022-03-09T11:05:19.000Z | 2022-03-25T20:54:42.000Z | models/tool.py | wang-chen/lgl-feature-matching | 55bd17ee5e8699a06514bca09a6ef834808448a7 | [
"BSD-3-Clause"
] | 2 | 2022-03-07T01:18:33.000Z | 2022-03-07T08:28:56.000Z | #!/usr/bin/env python3
import time
import torch
class GlobalStepCounter():
def __init__(self, initial_step=0):
self._steps = initial_step
@property
def steps(self):
return self._steps
def step(self, step=1):
self._steps += 1
return self._steps
class Timer:
def __init__(self):
torch.cuda.synchronize()
self.start_time = time.time()
def tic(self):
self.start()
def show(self, prefix="", output=True):
torch.cuda.synchronize()
duration = time.time()-self.start_time
if output:
print(prefix+"%fs" % duration)
return duration
def toc(self, prefix=""):
self.end()
print(prefix+"%fs = %fHz" % (self.duration, 1/self.duration))
return self.duration
def start(self):
torch.cuda.synchronize()
self.start_time = time.time()
def end(self):
torch.cuda.synchronize()
self.duration = time.time()-self.start_time
self.start()
return self.duration
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class EarlyStopScheduler(torch.optim.lr_scheduler.ReduceLROnPlateau):
def __init__(self, optimizer, factor=0.1, patience=10, min_lr=0, verbose=False):
super().__init__(optimizer, factor=factor, patience=patience, min_lr=min_lr, verbose=verbose)
self.no_decrease = 0
def step(self, metrics, epoch=None):
# convert `metrics` to float, in case it's a zero-dim Tensor
current = float(metrics)
if epoch is None:
epoch = self.last_epoch = self.last_epoch + 1
self.last_epoch = epoch
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
if self.num_bad_epochs > self.patience:
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
return self._reduce_lr(epoch)
def _reduce_lr(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
param_group['lr'] = new_lr
if self.verbose:
print('Epoch {:5d}: reducing learning rate'
' of group {} to {:.4e}.'.format(epoch, i, new_lr))
return False
else:
return True
| 29.191489 | 101 | 0.590743 |
7945ec8ade35befdcbbcbe4009ba765a6b3cb0d8 | 1,101 | py | Python | nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..featuredetection import DilateImage
def test_DilateImage_inputs():
input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
inputMaskVolume=dict(argstr='--inputMaskVolume %s', ),
inputRadius=dict(argstr='--inputRadius %d', ),
inputVolume=dict(argstr='--inputVolume %s', ),
outputVolume=dict(
argstr='--outputVolume %s',
hash_files=False,
),
)
inputs = DilateImage.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_DilateImage_outputs():
output_map = dict(outputVolume=dict(), )
outputs = DilateImage.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 33.363636 | 67 | 0.627611 |
7945eca81215abce4accacc4dea017ec7d45a49f | 1,871 | py | Python | sqlite2xls/sqlite2xls.py | dhilipsiva/test | 58b87e072eda51c0b744106926d25f1ec8b6aea4 | [
"MIT"
] | 1 | 2017-03-12T22:54:25.000Z | 2017-03-12T22:54:25.000Z | sqlite2xls/sqlite2xls.py | dhilipsiva/test | 58b87e072eda51c0b744106926d25f1ec8b6aea4 | [
"MIT"
] | 40 | 2015-04-08T06:00:07.000Z | 2020-05-11T05:45:10.000Z | sqlite2xls/sqlite2xls.py | dhilipsiva/test | 58b87e072eda51c0b744106926d25f1ec8b6aea4 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# vim: fenc=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# Copyright © dhilipsiva
#
'''
File name: sqlite2xls.py
Version: 0.1
Author: dhilipsiva <[email protected]>
Date created: 2015-04-03
'''
__author__ = "dhilipsiva"
__status__ = "development"
"""
Convert an SQlite database to an XLS (excel) spreadsheet.
pip install xlwt
Originally taken from: https://github.com/flow-phys/sqlite2xls
"""
import sqlite3 as lite
import xlwt
import sys
import math
db = sys.argv[1]
out = sys.argv[2]
con = lite.connect(db)
cur = con.cursor()
# Get all the tables in this database
cmd = "select name from sqlite_master where type = 'table' "
cur.execute(cmd)
tables = cur.fetchall()
workbook = xlwt.Workbook()
for table in tables:
# Get column heads
cur.execute('pragma table_info(%s)' % table[0])
head = cur.fetchall()
# Get row entries
cur.execute('select * from %s' % table[0])
players = cur.fetchall()
Np = len(players)
cmax = 30000 # Max character per cell
Rmax = 64000 # Max number of rows per sheet
NS = 1
if (Np > Rmax):
NS = int(math.ceil(float(Np)/float(Rmax)))
for ss in range(NS):
ips = ss*(Rmax)
ipe = min((ss+1)*Rmax, Np)
# Open workbook and save head/body
print table[0]
sheet = workbook.add_sheet(table[0][:30])
for col, item in enumerate(head):
sheet.write(0, col, item[1])
# body
for row, player in enumerate(players[ips:ipe]):
for col, item in enumerate(player):
if (type(item) == type(u'')):
imax = min(cmax, len(item))
sheet.write(row+1, col, item[0:imax])
else:
sheet.write(row+1, col, item)
# done
workbook.save(out)
| 23.683544 | 62 | 0.59861 |
7945ecb5a82911c7362c670198896b90663c853d | 6,890 | py | Python | readthedocs/redirects/models.py | phoenixflyinghigh/readthedocs.org | 2dc1615c674b08c8b681ac3543fee913c9d90a11 | [
"MIT"
] | 2 | 2019-01-31T02:20:01.000Z | 2019-02-02T21:47:56.000Z | readthedocs/redirects/models.py | phoenixflyinghigh/readthedocs.org | 2dc1615c674b08c8b681ac3543fee913c9d90a11 | [
"MIT"
] | null | null | null | readthedocs/redirects/models.py | phoenixflyinghigh/readthedocs.org | 2dc1615c674b08c8b681ac3543fee913c9d90a11 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Django models for the redirects app."""
import logging
import re
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext
from django.utils.translation import ugettext_lazy as _
from readthedocs.core.resolver import resolve_path
from readthedocs.projects.models import Project
from .managers import RedirectManager
log = logging.getLogger(__name__)
HTTP_STATUS_CHOICES = (
(301, _('301 - Permanent Redirect')),
(302, _('302 - Temporary Redirect')),
)
STATUS_CHOICES = (
(True, _('Active')),
(False, _('Inactive')),
)
TYPE_CHOICES = (
('prefix', _('Prefix Redirect')),
('page', _('Page Redirect')),
('exact', _('Exact Redirect')),
('sphinx_html', _('Sphinx HTMLDir -> HTML')),
('sphinx_htmldir', _('Sphinx HTML -> HTMLDir')),
# ('advanced', _('Advanced')),
)
# FIXME: this help_text message should be dynamic since "Absolute path" doesn't
# make sense for "Prefix Redirects" since the from URL is considered after the
# ``/$lang/$version/`` part. Also, there is a feature for the "Exact
# Redirects" that should be mentioned here: the usage of ``$rest``
from_url_helptext = _(
'Absolute path, excluding the domain. '
'Example: <b>/docs/</b> or <b>/install.html</b>',
)
to_url_helptext = _(
'Absolute or relative URL. Example: '
'<b>/tutorial/install.html</b>',
)
redirect_type_helptext = _('The type of redirect you wish to use.')
@python_2_unicode_compatible
class Redirect(models.Model):
"""A HTTP redirect associated with a Project."""
project = models.ForeignKey(
Project,
verbose_name=_('Project'),
related_name='redirects',
)
redirect_type = models.CharField(
_('Redirect Type'),
max_length=255,
choices=TYPE_CHOICES,
help_text=redirect_type_helptext,
)
from_url = models.CharField(
_('From URL'),
max_length=255,
db_index=True,
help_text=from_url_helptext,
blank=True,
)
to_url = models.CharField(
_('To URL'),
max_length=255,
db_index=True,
help_text=to_url_helptext,
blank=True,
)
http_status = models.SmallIntegerField(
_('HTTP Status'),
choices=HTTP_STATUS_CHOICES,
default=301,
)
status = models.BooleanField(choices=STATUS_CHOICES, default=True)
create_dt = models.DateTimeField(auto_now_add=True)
update_dt = models.DateTimeField(auto_now=True)
objects = RedirectManager()
class Meta:
verbose_name = _('redirect')
verbose_name_plural = _('redirects')
ordering = ('-update_dt',)
def __str__(self):
redirect_text = '{type}: {from_to_url}'
if self.redirect_type in ['prefix', 'page', 'exact']:
return redirect_text.format(
type=self.get_redirect_type_display(),
from_to_url=self.get_from_to_url_display(),
)
return ugettext(
'Redirect: {}'.format(
self.get_redirect_type_display(),
),
)
def get_from_to_url_display(self):
if self.redirect_type in ['prefix', 'page', 'exact']:
from_url = self.from_url
to_url = self.to_url
if self.redirect_type == 'prefix':
to_url = '/{lang}/{version}/'.format(
lang=self.project.language,
version=self.project.default_version,
)
return '{from_url} -> {to_url}'.format(
from_url=from_url,
to_url=to_url,
)
return ''
def get_full_path(self, filename, language=None, version_slug=None):
"""
Return a full path for a given filename.
This will include version and language information. No protocol/domain
is returned.
"""
# Handle explicit http redirects
if re.match('^https?://', filename):
return filename
return resolve_path(
project=self.project,
language=language,
version_slug=version_slug,
filename=filename,
)
def get_redirect_path(self, path, language=None, version_slug=None):
method = getattr(
self,
'redirect_{type}'.format(
type=self.redirect_type,
),
)
return method(path, language=language, version_slug=version_slug)
def redirect_prefix(self, path, language=None, version_slug=None):
if path.startswith(self.from_url):
log.debug('Redirecting %s', self)
cut_path = re.sub('^%s' % self.from_url, '', path)
to = self.get_full_path(
filename=cut_path,
language=language,
version_slug=version_slug,
)
return to
def redirect_page(self, path, language=None, version_slug=None):
if path == self.from_url:
log.debug('Redirecting %s', self)
to = self.get_full_path(
filename=self.to_url.lstrip('/'),
language=language,
version_slug=version_slug,
)
return to
def redirect_exact(self, path, language=None, version_slug=None):
full_path = path
if language and version_slug:
# reconstruct the full path for an exact redirect
full_path = self.get_full_path(path, language, version_slug)
if full_path == self.from_url:
log.debug('Redirecting %s', self)
return self.to_url
# Handle full sub-level redirects
if '$rest' in self.from_url:
match = self.from_url.split('$rest')[0]
if full_path.startswith(match):
cut_path = re.sub('^%s' % match, self.to_url, full_path)
return cut_path
def redirect_sphinx_html(self, path, language=None, version_slug=None):
for ending in ['/', '/index.html']:
if path.endswith(ending):
log.debug('Redirecting %s', self)
path = path[1:] # Strip leading slash.
to = re.sub(ending + '$', '.html', path)
return self.get_full_path(
filename=to,
language=language,
version_slug=version_slug,
)
def redirect_sphinx_htmldir(self, path, language=None, version_slug=None):
if path.endswith('.html'):
log.debug('Redirecting %s', self)
path = path[1:] # Strip leading slash.
to = re.sub('.html$', '/', path)
return self.get_full_path(
filename=to,
language=language,
version_slug=version_slug,
)
| 31.461187 | 79 | 0.585776 |
7945ecdc29aff549e88a93cfe33af08e013190fa | 665 | py | Python | manage.py | Loldozen/cowrywise | 99eeeb167dddce0840f5dd27efc4b058041eb735 | [
"MIT"
] | null | null | null | manage.py | Loldozen/cowrywise | 99eeeb167dddce0840f5dd27efc4b058041eb735 | [
"MIT"
] | null | null | null | manage.py | Loldozen/cowrywise | 99eeeb167dddce0840f5dd27efc4b058041eb735 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cowrywise.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.913043 | 73 | 0.679699 |
7945ed34575f192dbf0ec6340e5896a0731efc73 | 1,441 | py | Python | orange3/Orange/tests/test_basic_stats.py | rgschmitz1/BioDepot-workflow-builder | f74d904eeaf91ec52ec9b703d9fb38e9064e5a66 | [
"MIT"
] | 54 | 2017-01-08T17:21:49.000Z | 2021-11-02T08:46:07.000Z | orange3/Orange/tests/test_basic_stats.py | Synthia-3/BioDepot-workflow-builder | 4ee93abe2d79465755e82a145af3b6a6e1e79fd4 | [
"MIT"
] | 22 | 2017-03-28T06:03:14.000Z | 2021-07-28T05:43:55.000Z | orange3/Orange/tests/test_basic_stats.py | Synthia-3/BioDepot-workflow-builder | 4ee93abe2d79465755e82a145af3b6a6e1e79fd4 | [
"MIT"
] | 21 | 2017-01-26T21:12:09.000Z | 2022-01-31T21:34:59.000Z | # Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
from unittest import TestCase
from Orange.data import Table
from Orange.statistics.basic_stats import DomainBasicStats, BasicStats
class TestDomainBasicStats(TestCase):
def setUp(self):
self.zoo = Table("zoo")
def test_domain_basic_stats(self):
domain = self.zoo.domain
attr_stats = [BasicStats(self.zoo, a) for a in domain.attributes]
class_var_stats = [BasicStats(self.zoo, a) for a in domain.class_vars]
meta_stats = [BasicStats(self.zoo, a) for a in domain.metas]
domain_stats = DomainBasicStats(self.zoo)
self.assertStatsEqual(domain_stats.stats, attr_stats + class_var_stats)
domain_stats = DomainBasicStats(self.zoo, include_metas=True)
self.assertStatsEqual(
domain_stats.stats, attr_stats + class_var_stats + meta_stats
)
def assertStatsEqual(self, stats1, stats2):
self.assertEqual(len(stats1), len(stats2))
for stat1, stat2 in zip(stats1, stats2):
self.assertAlmostEqual(stat1.min, stat2.min)
self.assertAlmostEqual(stat1.max, stat2.max)
self.assertAlmostEqual(stat1.mean, stat2.mean)
self.assertAlmostEqual(stat1.var, stat2.var)
self.assertAlmostEqual(stat1.nans, stat2.nans)
self.assertAlmostEqual(stat1.non_nans, stat2.non_nans)
| 40.027778 | 79 | 0.700208 |
7945edbbe677333b13dfb78a5d282a921fcef25a | 1,037 | py | Python | mmseg/models/backbones/__init__.py | buoyancy99/DenseCLIP | eac7810ca17c93aaf59f2a6b6c86ccbb4cdfcc9b | [
"Apache-2.0"
] | 11 | 2022-02-04T01:09:45.000Z | 2022-03-08T05:49:16.000Z | mmseg/models/backbones/__init__.py | buoyancy99/DenseCLIP | eac7810ca17c93aaf59f2a6b6c86ccbb4cdfcc9b | [
"Apache-2.0"
] | null | null | null | mmseg/models/backbones/__init__.py | buoyancy99/DenseCLIP | eac7810ca17c93aaf59f2a6b6c86ccbb4cdfcc9b | [
"Apache-2.0"
] | 1 | 2022-02-03T10:29:40.000Z | 2022-02-03T10:29:40.000Z | # Copyright (c) OpenMMLab. All rights reserved.
from .bisenetv1 import BiSeNetV1
from .bisenetv2 import BiSeNetV2
from .cgnet import CGNet
from .erfnet import ERFNet
from .fast_scnn import FastSCNN
from .hrnet import HRNet
from .icnet import ICNet
from .mit import MixVisionTransformer
from .mobilenet_v2 import MobileNetV2
from .mobilenet_v3 import MobileNetV3
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1c, ResNetV1d, ResNetClip
from .resnext import ResNeXt
from .stdc import STDCContextPathNet, STDCNet
from .swin import SwinTransformer
from .timm_backbone import TIMMBackbone
from .twins import PCPVT, SVT
from .unet import UNet
from .vit import VisionTransformer
__all__ = [
'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet', 'FastSCNN',
'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3',
'VisionTransformer', 'SwinTransformer', 'MixVisionTransformer',
'BiSeNetV1', 'BiSeNetV2', 'ICNet', 'TIMMBackbone', 'ERFNet', 'PCPVT',
'SVT', 'STDCNet', 'STDCContextPathNet', 'ResNetClip'
]
| 35.758621 | 73 | 0.768563 |
7945eedd3564d189a58d0b888fe7250612ce5dd6 | 5,864 | py | Python | effreq/test/test_treq.py | radix/effreq | 3e217a0d2eafe5eb283e0a7b529ac57ec6090e74 | [
"Unlicense",
"MIT"
] | null | null | null | effreq/test/test_treq.py | radix/effreq | 3e217a0d2eafe5eb283e0a7b529ac57ec6090e74 | [
"Unlicense",
"MIT"
] | null | null | null | effreq/test/test_treq.py | radix/effreq | 3e217a0d2eafe5eb283e0a7b529ac57ec6090e74 | [
"Unlicense",
"MIT"
] | null | null | null | # coding: utf-8
import json
from ..intent import Request, Response
from ..treq import perform_with_treq
from testtools import TestCase
from twisted.internet.defer import succeed
from twisted.trial.unittest import SynchronousTestCase
class TreqTests(SynchronousTestCase):
"""
Tests for the treq performer.
"""
def test_perform(self):
"""
The Request effect dispatches a request to treq, and returns a
Response with all data populated.
"""
reactor = object()
expected_req = ('GET',
'http://google.com/',
{'reactor': reactor,
'headers': None,
'data': None})
response = Response(content='content', code=200, headers={})
treq = StubTreq(reqs=[(expected_req, response)],
contents=[(response, "content")])
req = Request(method="get", url="http://google.com/")
self.assertEqual(
self.successResultOf(perform_with_treq(treq, reactor, req)),
response)
def test_idna(self):
"""
IDNA is supported, despite treq's lack of support. The IDNA-encoded
URL is passed to treq as bytes.
"""
reactor = object()
expected_req = ('GET',
'http://xn--wxa.com/',
{'reactor': reactor,
'headers': None,
'data': None})
response = Response(content='content', code=200, headers={})
treq = StubTreq(reqs=[(expected_req, response)],
contents=[(response, "content")])
req = Request(method="get", url=u'http://λ.com/')
self.assertEqual(
self.successResultOf(perform_with_treq(treq, reactor, req)),
response)
def test_unicode_paths(self):
"""
Unicode paths are supported, despite treq's lack of support. The
UTF8-then-URL-quoted ascii bytes are passed to treq.
"""
reactor = object()
expected_req = ('GET',
'http://lambda.com/%CE%BB',
{'reactor': reactor,
'headers': None,
'data': None})
response = Response(content='content', code=200, headers={})
treq = StubTreq(reqs=[(expected_req, response)],
contents=[(response, "content")])
req = Request(method="get", url=u'http://lambda.com/λ')
self.assertEqual(
self.successResultOf(perform_with_treq(treq, reactor, req)),
response)
def test_unicode_queries(self):
"""
Unicode paths are supported, despite treq's lack of support. The
UTF8-then-URL-quoted ascii bytes are passed to treq.
"""
reactor = object()
expected_req = ('GET',
'http://lambda.com/%CE%BB?k%CE%BB=v%CE%BB',
{'reactor': reactor,
'headers': None,
'data': None})
response = Response(content='content', code=200, headers={})
treq = StubTreq(reqs=[(expected_req, response)],
contents=[(response, "content")])
# Agh we're escaping =
req = Request(method="get", url=u'http://lambda.com/lambda?kλ=vλ')
self.assertEqual(
self.successResultOf(perform_with_treq(treq, reactor, req)),
response)
class StubTreq(object):
"""
A stub version of otter.utils.logging_treq that returns canned responses
from dictionaries.
"""
def __init__(self, reqs=None, contents=None):
"""
:param reqs: A dictionary specifying the values that the `request`
method should return. Keys are tuples of:
(method, url, headers, data, (<other key names>)).
Since headers is usually passed as a dict, here it should be
specified as a tuple of two-tuples in sorted order.
:param contents: A dictionary specifying the values that the `content`
method should return. Keys should match up with the values of the
`reqs` dict.
"""
_check_unique_keys(reqs)
_check_unique_keys(contents)
self.reqs = reqs
self.contents = contents
def request(self, method, url, **kwargs):
"""
Return a result by looking up the arguments in the `reqs` dict.
The only kwargs we care about are 'headers' and 'data',
although if other kwargs are passed their keys count as part of the
request.
'log' would also be a useful kwarg to check, but since dictionary keys
should be immutable, and it's hard to get the exact instance of
BoundLog, that's being ignored for now.
"""
key = (method, url, kwargs)
return succeed(alist_get(self.reqs, key))
def content(self, response):
"""Return a result by looking up the response in the `contents` dict."""
return succeed(alist_get(self.contents, response))
def json_content(self, response):
"""Return :meth:`content` after json-decoding"""
return self.content(response).addCallback(json.loads)
def _check_unique_keys(data):
"""Check that all the keys in an association list are unique."""
# O(lol)
for itemindex, item in enumerate(data):
for itemagain in data[itemindex + 1:]:
if item[0] == itemagain[0]:
raise Exception("Duplicate items in EQDict: %r:%r and %r:%r"
% (item[0], item[1], itemagain[0], itemagain[1]))
def alist_get(data, key):
"""Look up a value in an association list."""
for item in data:
if item[0] == key:
return item[1]
raise KeyError(key)
| 37.589744 | 81 | 0.565825 |
7945efc1798f282c867be2e18b40f69235e2b98b | 36 | py | Python | oils/apps.py | ChristianJStarr/sbs-website | db891f0a67f46cc9cdadc95714304b2ea91a162a | [
"MIT"
] | 2 | 2021-12-28T01:53:00.000Z | 2022-01-22T00:42:39.000Z | oils/apps.py | ChristianJStarr/sbs-website | db891f0a67f46cc9cdadc95714304b2ea91a162a | [
"MIT"
] | 15 | 2020-02-12T00:00:38.000Z | 2022-03-11T23:43:44.000Z | oils/apps.py | ChristianJStarr/sbs-website | db891f0a67f46cc9cdadc95714304b2ea91a162a | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 9 | 33 | 0.805556 |
7945f0b7b8867d40d1fbf11c7aee277bfb2baf81 | 3,555 | py | Python | TextMagic/models/clear_and_assign_contacts_to_list_input_object.py | textmagic/textmagic-rest-python-v2 | 49055e214a6cf0f7545b85aa03e49e6d92bcef13 | [
"MIT"
] | 2 | 2020-10-21T09:44:33.000Z | 2021-06-29T20:58:57.000Z | TextMagic/models/clear_and_assign_contacts_to_list_input_object.py | textmagic/textmagic-rest-python-v2 | 49055e214a6cf0f7545b85aa03e49e6d92bcef13 | [
"MIT"
] | null | null | null | TextMagic/models/clear_and_assign_contacts_to_list_input_object.py | textmagic/textmagic-rest-python-v2 | 49055e214a6cf0f7545b85aa03e49e6d92bcef13 | [
"MIT"
] | 1 | 2021-12-02T12:15:56.000Z | 2021-12-02T12:15:56.000Z | # coding: utf-8
"""
TextMagic API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ClearAndAssignContactsToListInputObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'contacts': 'str'
}
attribute_map = {
'contacts': 'contacts'
}
def __init__(self, contacts=None): # noqa: E501
"""ClearAndAssignContactsToListInputObject - a model defined in Swagger""" # noqa: E501
self._contacts = None
self.discriminator = None
if contacts is not None:
self.contacts = contacts
@property
def contacts(self):
"""Gets the contacts of this ClearAndAssignContactsToListInputObject. # noqa: E501
Contact ID(s), separated by a comma or \"all\" to add all contacts belonging to the current user. # noqa: E501
:return: The contacts of this ClearAndAssignContactsToListInputObject. # noqa: E501
:rtype: str
"""
return self._contacts
@contacts.setter
def contacts(self, contacts):
"""Sets the contacts of this ClearAndAssignContactsToListInputObject.
Contact ID(s), separated by a comma or \"all\" to add all contacts belonging to the current user. # noqa: E501
:param contacts: The contacts of this ClearAndAssignContactsToListInputObject. # noqa: E501
:type: str
"""
self._contacts = contacts
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ClearAndAssignContactsToListInputObject, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClearAndAssignContactsToListInputObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.127119 | 119 | 0.591842 |
7945f1264d53e3a239d702212d578cef1972c690 | 4,241 | py | Python | localstack/plugins.py | wapmesquita/localstack | 0d67b3da8fefa05a077968be41220cf46764722a | [
"Apache-2.0"
] | null | null | null | localstack/plugins.py | wapmesquita/localstack | 0d67b3da8fefa05a077968be41220cf46764722a | [
"Apache-2.0"
] | null | null | null | localstack/plugins.py | wapmesquita/localstack | 0d67b3da8fefa05a077968be41220cf46764722a | [
"Apache-2.0"
] | null | null | null | import sys
from localstack.services.es import es_starter
from localstack.services.s3 import s3_listener, s3_starter
from localstack.services.kms import kms_starter
from localstack.services.sns import sns_listener
from localstack.services.sqs import sqs_listener, sqs_starter
from localstack.services.iam import iam_listener, iam_starter
from localstack.services.infra import (register_plugin, Plugin,
start_sns, start_ses, start_apigateway, start_elasticsearch_service, start_events, start_lambda,
start_redshift, start_firehose, start_cloudwatch, start_dynamodbstreams, start_route53,
start_ssm, start_sts, start_secretsmanager, start_cloudwatch_logs, start_ec2)
from localstack.services.kinesis import kinesis_listener, kinesis_starter
from localstack.services.dynamodb import dynamodb_listener, dynamodb_starter
from localstack.services.apigateway import apigateway_listener
from localstack.services.stepfunctions import stepfunctions_starter, stepfunctions_listener
from localstack.services.cloudformation import cloudformation_listener, cloudformation_starter
from localstack.services.events import events_listener
# register default plugins
def register_localstack_plugins():
try:
register_plugin(Plugin('apigateway',
start=start_apigateway,
listener=apigateway_listener.UPDATE_APIGATEWAY))
register_plugin(Plugin('cloudformation',
start=cloudformation_starter.start_cloudformation,
listener=cloudformation_listener.UPDATE_CLOUDFORMATION))
register_plugin(Plugin('cloudwatch',
start=start_cloudwatch))
register_plugin(Plugin('dynamodb',
start=dynamodb_starter.start_dynamodb,
check=dynamodb_starter.check_dynamodb,
listener=dynamodb_listener.UPDATE_DYNAMODB))
register_plugin(Plugin('dynamodbstreams',
start=start_dynamodbstreams))
register_plugin(Plugin('ec2',
start=start_ec2))
register_plugin(Plugin('elasticsearch',
start=es_starter.start_elasticsearch,
check=es_starter.check_elasticsearch))
register_plugin(Plugin('es',
start=start_elasticsearch_service))
register_plugin(Plugin('events',
start=start_events))
register_plugin(Plugin('firehose',
start=start_firehose))
register_plugin(Plugin('iam',
start=iam_starter.start_iam,
listener=iam_listener.UPDATE_IAM))
register_plugin(Plugin('kinesis',
start=kinesis_starter.start_kinesis,
check=kinesis_starter.check_kinesis,
listener=kinesis_listener.UPDATE_KINESIS))
register_plugin(Plugin('kms',
start=kms_starter.start_kms,
priority=10))
register_plugin(Plugin('lambda',
start=start_lambda))
register_plugin(Plugin('logs',
start=start_cloudwatch_logs))
register_plugin(Plugin('redshift',
start=start_redshift))
register_plugin(Plugin('route53',
start=start_route53))
register_plugin(Plugin('s3',
start=s3_starter.start_s3,
check=s3_starter.check_s3,
listener=s3_listener.UPDATE_S3))
register_plugin(Plugin('secretsmanager',
start=start_secretsmanager))
register_plugin(Plugin('ses',
start=start_ses))
register_plugin(Plugin('sns',
start=start_sns,
listener=sns_listener.UPDATE_SNS))
register_plugin(Plugin('sqs',
start=sqs_starter.start_sqs,
listener=sqs_listener.UPDATE_SQS,
check=sqs_starter.check_sqs))
register_plugin(Plugin('ssm',
start=start_ssm))
register_plugin(Plugin('sts',
start=start_sts))
register_plugin(Plugin('events',
start=start_events, listener=events_listener.UPDATE_EVENTS))
register_plugin(Plugin('stepfunctions',
start=stepfunctions_starter.start_stepfunctions,
listener=stepfunctions_listener.UPDATE_STEPFUNCTIONS))
except Exception as e:
print('Unable to register plugins: %s' % e)
sys.stdout.flush()
raise e
| 44.642105 | 100 | 0.705966 |
7945f15a17e4f9a11062eff10634224542ec3536 | 26,037 | py | Python | yocto/poky/bitbake/lib/toaster/orm/migrations/0014_auto__chg_field_package_summary__chg_field_layer_summary__chg_field_re.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 2 | 2019-03-27T08:11:14.000Z | 2020-02-22T20:40:24.000Z | yocto/poky/bitbake/lib/toaster/orm/migrations/0014_auto__chg_field_package_summary__chg_field_layer_summary__chg_field_re.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 39 | 2016-08-23T11:23:28.000Z | 2017-04-07T08:00:52.000Z | yocto/poky/bitbake/lib/toaster/orm/migrations/0014_auto__chg_field_package_summary__chg_field_layer_summary__chg_field_re.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 1 | 2021-09-10T08:10:12.000Z | 2021-09-10T08:10:12.000Z | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Package.summary'
db.alter_column(u'orm_package', 'summary', self.gf('django.db.models.fields.TextField')())
# Changing field 'Layer.summary'
db.alter_column(u'orm_layer', 'summary', self.gf('django.db.models.fields.TextField')(null=True))
# Changing field 'Recipe.summary'
db.alter_column(u'orm_recipe', 'summary', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'Package.summary'
db.alter_column(u'orm_package', 'summary', self.gf('django.db.models.fields.CharField')(max_length=200))
# Changing field 'Layer.summary'
db.alter_column(u'orm_layer', 'summary', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Changing field 'Recipe.summary'
db.alter_column(u'orm_recipe', 'summary', self.gf('django.db.models.fields.CharField')(max_length=100))
models = {
u'orm.bitbakeversion': {
'Meta': {'object_name': 'BitbakeVersion'},
'branch': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'dirpath': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'giturl': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
u'orm.branch': {
'Meta': {'unique_together': "(('layer_source', 'name'), ('layer_source', 'up_id'))", 'object_name': 'Branch'},
'bitbake_branch': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_source': ('django.db.models.fields.related.ForeignKey', [], {'default': 'True', 'to': u"orm['orm.LayerSource']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'up_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'up_id': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'})
},
u'orm.build': {
'Meta': {'object_name': 'Build'},
'bitbake_version': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'build_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'completed_on': ('django.db.models.fields.DateTimeField', [], {}),
'cooker_log_path': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'distro': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'distro_version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'errors_no': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'machine': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'outcome': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Project']", 'null': 'True'}),
'started_on': ('django.db.models.fields.DateTimeField', [], {}),
'timespent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'warnings_no': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'orm.helptext': {
'Meta': {'object_name': 'HelpText'},
'area': ('django.db.models.fields.IntegerField', [], {}),
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'helptext_build'", 'to': u"orm['orm.Build']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'orm.layer': {
'Meta': {'unique_together': "(('layer_source', 'up_id'), ('layer_source', 'name'))", 'object_name': 'Layer'},
'description': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_index_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'layer_source': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['orm.LayerSource']", 'null': 'True'}),
'local_path': ('django.db.models.fields.FilePathField', [], {'default': 'None', 'max_length': '255', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'summary': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'up_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'up_id': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'}),
'vcs_url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True'}),
'vcs_web_file_base_url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True'})
},
u'orm.layer_version': {
'Meta': {'unique_together': "(('layer_source', 'up_id'),)", 'object_name': 'Layer_Version'},
'branch': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'build': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'layer_version_build'", 'null': 'True', 'to': u"orm['orm.Build']"}),
'commit': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'dirpath': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'layer_version_layer'", 'to': u"orm['orm.Layer']"}),
'layer_source': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['orm.LayerSource']", 'null': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'up_branch': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['orm.Branch']", 'null': 'True'}),
'up_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'up_id': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'})
},
u'orm.layersource': {
'Meta': {'unique_together': "(('sourcetype', 'apiurl'),)", 'object_name': 'LayerSource'},
'apiurl': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '63'}),
'sourcetype': ('django.db.models.fields.IntegerField', [], {})
},
u'orm.layerversiondependency': {
'Meta': {'unique_together': "(('layer_source', 'up_id'),)", 'object_name': 'LayerVersionDependency'},
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependees'", 'to': u"orm['orm.Layer_Version']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_source': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['orm.LayerSource']", 'null': 'True'}),
'layer_version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependencies'", 'to': u"orm['orm.Layer_Version']"}),
'up_id': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'})
},
u'orm.logmessage': {
'Meta': {'object_name': 'LogMessage'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Build']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lineno': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'pathname': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Task']", 'null': 'True', 'blank': 'True'})
},
u'orm.machine': {
'Meta': {'unique_together': "(('layer_source', 'up_id'),)", 'object_name': 'Machine'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_source': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['orm.LayerSource']", 'null': 'True'}),
'layer_version': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Layer_Version']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'up_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'up_id': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'})
},
u'orm.package': {
'Meta': {'object_name': 'Package'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Build']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'installed_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Recipe']", 'null': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'orm.package_dependency': {
'Meta': {'object_name': 'Package_Dependency'},
'dep_type': ('django.db.models.fields.IntegerField', [], {}),
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_dependencies_target'", 'to': u"orm['orm.Package']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_dependencies_source'", 'to': u"orm['orm.Package']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']", 'null': 'True'})
},
u'orm.package_file': {
'Meta': {'object_name': 'Package_File'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'buildfilelist_package'", 'to': u"orm['orm.Package']"}),
'path': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
u'orm.project': {
'Meta': {'object_name': 'Project'},
'bitbake_version': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.BitbakeVersion']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Release']"}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'orm.projectlayer': {
'Meta': {'object_name': 'ProjectLayer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layercommit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Layer_Version']", 'null': 'True'}),
'optional': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Project']"})
},
u'orm.projecttarget': {
'Meta': {'object_name': 'ProjectTarget'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Project']"}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'orm.projectvariable': {
'Meta': {'object_name': 'ProjectVariable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Project']"}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'orm.recipe': {
'Meta': {'object_name': 'Recipe'},
'bugtracker': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file_path': ('django.db.models.fields.FilePathField', [], {'max_length': '255'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_source': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['orm.LayerSource']", 'null': 'True'}),
'layer_version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'recipe_layer_version'", 'to': u"orm['orm.Layer_Version']"}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'up_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'up_id': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'orm.recipe_dependency': {
'Meta': {'object_name': 'Recipe_Dependency'},
'dep_type': ('django.db.models.fields.IntegerField', [], {}),
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'r_dependencies_depends'", 'to': u"orm['orm.Recipe']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'r_dependencies_recipe'", 'to': u"orm['orm.Recipe']"})
},
u'orm.release': {
'Meta': {'object_name': 'Release'},
'bitbake_version': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.BitbakeVersion']"}),
'branch': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
u'orm.releasedefaultlayer': {
'Meta': {'object_name': 'ReleaseDefaultLayer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Layer']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Release']"})
},
u'orm.target': {
'Meta': {'object_name': 'Target'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Build']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'license_manifest_path': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'orm.target_file': {
'Meta': {'object_name': 'Target_File'},
'directory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'directory_set'", 'null': 'True', 'to': u"orm['orm.Target_File']"}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inodetype': ('django.db.models.fields.IntegerField', [], {}),
'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.FilePathField', [], {'max_length': '100'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'sym_target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'symlink_set'", 'null': 'True', 'to': u"orm['orm.Target_File']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']"})
},
u'orm.target_image_file': {
'Meta': {'object_name': 'Target_Image_File'},
'file_name': ('django.db.models.fields.FilePathField', [], {'max_length': '254'}),
'file_size': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']"})
},
u'orm.target_installed_package': {
'Meta': {'object_name': 'Target_Installed_Package'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'buildtargetlist_package'", 'to': u"orm['orm.Package']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']"})
},
u'orm.task': {
'Meta': {'ordering': "('order', 'recipe')", 'unique_together': "(('build', 'recipe', 'task_name'),)", 'object_name': 'Task'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'task_build'", 'to': u"orm['orm.Build']"}),
'cpu_usage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2'}),
'disk_io': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'elapsed_time': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_number': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'logfile': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'outcome': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'path_to_sstate_obj': ('django.db.models.fields.FilePathField', [], {'max_length': '500', 'blank': 'True'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'build_recipe'", 'to': u"orm['orm.Recipe']"}),
'script_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'source_url': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}),
'sstate_checksum': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'sstate_result': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_executed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'work_directory': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'})
},
u'orm.task_dependency': {
'Meta': {'object_name': 'Task_Dependency'},
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'task_dependencies_depends'", 'to': u"orm['orm.Task']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'task_dependencies_task'", 'to': u"orm['orm.Task']"})
},
u'orm.toastersetting': {
'Meta': {'object_name': 'ToasterSetting'},
'helptext': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '63'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'orm.toastersettingdefaultlayer': {
'Meta': {'object_name': 'ToasterSettingDefaultLayer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_version': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Layer_Version']"})
},
u'orm.variable': {
'Meta': {'object_name': 'Variable'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variable_build'", 'to': u"orm['orm.Build']"}),
'changed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'human_readable_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'variable_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'variable_value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'orm.variablehistory': {
'Meta': {'object_name': 'VariableHistory'},
'file_name': ('django.db.models.fields.FilePathField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_number': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'operation': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'variable': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vhistory'", 'to': u"orm['orm.Variable']"})
}
}
complete_apps = ['orm'] | 77.491071 | 177 | 0.558167 |
7945f23692d266c00a73e1661a6da142d10fd2da | 9,240 | py | Python | insights/parsers/tests/test_postgresql_conf.py | mglantz/insights-core | 6f20bbbe03f53ee786f483b2a28d256ff1ad0fd4 | [
"Apache-2.0"
] | 121 | 2017-05-30T20:23:25.000Z | 2022-03-23T12:52:15.000Z | insights/parsers/tests/test_postgresql_conf.py | mglantz/insights-core | 6f20bbbe03f53ee786f483b2a28d256ff1ad0fd4 | [
"Apache-2.0"
] | 1,977 | 2017-05-26T14:36:03.000Z | 2022-03-31T10:38:53.000Z | insights/parsers/tests/test_postgresql_conf.py | mglantz/insights-core | 6f20bbbe03f53ee786f483b2a28d256ff1ad0fd4 | [
"Apache-2.0"
] | 244 | 2017-05-30T20:22:57.000Z | 2022-03-26T10:09:39.000Z | import pytest
from insights.tests import context_wrap
from insights.parsers.postgresql_conf import PostgreSQLConf
postgresql_conf_cnt = """
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, or use "pg_ctl reload". Some
# parameters, which are marked below, require a server shutdown and restart to
# take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: kB = kilobytes Time units: ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
#data_directory = 'ConfigDir' # use data in another directory
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
# If external_pid_file is not explicitly set, no extra PID file is written.
#external_pid_file = '(none)' # write an extra PID file
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
#listen_addresses = 'localhost' # what IP address(es) to listen on;
#port = 5432 # (change requires restart)
### next line has been commented out by spacewalk-setup-postgresql ###
##max_connections = 100 # (change requires restart)
# Note: Increasing max_connections costs ~400 bytes of shared memory per
# connection slot, plus lock space (see max_locks_per_transaction).
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directory = '' # (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
#bonjour_name = '' # defaults to the computer name
# - Security and Authentication -
#authentication_timeout = 1min # 1s-600s
#ssl = off # (change requires restart)
#ssl_ciphers = 'ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers
#ssl_renegotiation_limit = 512MB # amount of data between renegotiations
#password_encryption = on
#db_user_namespace = off
# Kerberos and GSSAPI
#krb_server_keyfile = ''
#krb_srvname = 'postgres' # (Kerberos only)
#krb_caseins_users = off
# - TCP Keepalives -
# see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
### next line has been commented out by spacewalk-setup-postgresql ###
##shared_buffers = 32MB # min 128kB
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory
# per transaction slot, plus lock space (see max_locks_per_transaction).
# It is not advisable to set max_prepared_transactions nonzero unless you
# actively intend to use prepared transactions.
#work_mem = 1MB # min 64kB
#maintenance_work_mem = 16MB # min 1MB
#max_stack_depth = 2MB # min 100kB
# - Kernel Resource Usage -
#max_files_per_process = 1000 # min 25
#shared_preload_libraries = '' # (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0ms # 0-100 milliseconds
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round
# - Asynchronous Behavior -
#effective_io_concurrency = 1 # 1-1000. 0 disables prefetching
# These are only used if logging_collector is on:
log_directory = 'pg_log' # directory where log files are written,
log_filename = 'postgresql-%a.log' # log file name pattern,
log_truncate_on_rotation = on # If on, an existing log file of the
checkpoint_completion_target = 0.7
checkpoint_segments = 8
effective_cache_size = 1152MB
log_line_prefix = '%m '
maintenance_work_mem = 96MB
max_connections = 600
shared_buffers = 384MB
wal_buffers = 4MB
work_mem = 2560kB
password_encryption on
db_user_namespace = off
bgwriter_delay = 200ms # 10-10000ms between rounds
checkpoint_timeout = 5min
tcp_keepalives_interval 300
max_stack_depth = 2048576 # Test of as_memory_bytes with string of digits
test_strange_quoting '''strange quoting\\''
""".strip()
def test_postgresql_conf():
result = PostgreSQLConf(context_wrap(postgresql_conf_cnt))
assert result.get("checkpoint_segments") == "8"
# The bit before the hash mark should still be treated as valid:
assert result.get("log_filename") == "postgresql-%a.log"
# Quoting allows spaces at beginning or end of value
assert result.get("log_line_prefix") == "%m "
# Equals signs are optional
assert result.get("password_encryption") == "on"
# Values can include a quote with '' or \\' - both work.
assert result.get("test_strange_quoting") == "'strange quoting'"
# Default value tests
# get method from LegacyItemAccess
assert result.get(None) is None
assert result.get('') is None
assert 'listen_addresses' not in result
assert result.get('listen_addresses', 'localhost') == 'localhost'
def test_postgresql_conf_conversions():
result = PostgreSQLConf(context_wrap(postgresql_conf_cnt))
assert result.as_duration('bgwriter_delay') == 0.2
assert result.as_duration('checkpoint_timeout') == 300
assert result.as_duration('tcp_keepalives_interval') == 300
# Default value tests do conversions as well
assert result.as_duration(None) is None
assert 'vacuum_cost_delay' not in result
assert result.as_duration('vacuum_cost_delay', '200ms') == 0.2
assert result.as_duration('tcp_keepalives_idle', '0') == 0
assert result.as_duration('tcp_keepalives_idle', 0) == 0
assert result.as_boolean('password_encryption')
assert not result.as_boolean('db_user_namespace')
# Default value tests do conversions as well
assert result.as_boolean(None) is None
assert result.as_boolean('no_such_property', True)
assert 'krb_caseins_users' not in result
assert not result.as_boolean('krb_caseins_users', 'no')
assert result.as_memory_bytes('work_mem') == 2560 * 1024
assert result.as_memory_bytes('wal_buffers') == 4 * 1048576
# No scaling necessary if no suffix but conversion to int done
assert result.as_memory_bytes('max_stack_depth') == 2048576
# Default value tests do conversions as well
assert result.as_memory_bytes(None) is None
assert 'temp_buffers' not in result
assert result.as_memory_bytes('temp_buffers', '8MB') == 8192 * 1024
assert result.as_memory_bytes('temp_buffers', '8388608') == 8192 * 1024
assert result.as_memory_bytes('temp_buffers', 8388608) == 8192 * 1024
def test_postgresql_conf_conversion_errors():
result = PostgreSQLConf(context_wrap(postgresql_conf_cnt))
# Test that we raise the right errors for bad conversions
# Can't chain them because the raised error aborts further checks.
with pytest.raises(ValueError):
assert result.as_duration('log_filename')
with pytest.raises(ValueError):
assert result.as_duration('db_user_namespace')
with pytest.raises(ValueError):
assert result.as_boolean('log_directory')
with pytest.raises(ValueError):
assert result.as_boolean('checkpoint_segments')
with pytest.raises(ValueError):
assert result.as_memory_bytes('log_line_prefix')
with pytest.raises(ValueError):
assert result.as_memory_bytes('checkpoint_timeout')
| 40 | 79 | 0.686147 |
7945f2dc4c7157ed50d96b2bef1ea26530f0f499 | 25,667 | py | Python | volttron/platform/vip/agent/subsystems/rpc.py | SKalt/volttron | 79f077b5035188becb2f91514ba1ef97758d0083 | [
"Apache-2.0"
] | null | null | null | volttron/platform/vip/agent/subsystems/rpc.py | SKalt/volttron | 79f077b5035188becb2f91514ba1ef97758d0083 | [
"Apache-2.0"
] | null | null | null | volttron/platform/vip/agent/subsystems/rpc.py | SKalt/volttron | 79f077b5035188becb2f91514ba1ef97758d0083 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import inspect
import logging
import os
import sys
import traceback
import weakref
import re
import gevent.local
from gevent.event import AsyncResult
from volttron.platform import jsonapi
from volttron.platform.agent.utils import get_messagebus
from .base import SubsystemBase
from ..errors import VIPError
from ..results import counter, ResultsDictionary
from ..decorators import annotate, annotations, dualmethod, spawn
from .... import jsonrpc
from volttron.platform.vip.socket import Message
from zmq import Frame, NOBLOCK, ZMQError, EINVAL, EHOSTUNREACH
from zmq.green import ENOTSOCK
__all__ = ['RPC']
_ROOT_PACKAGE_PATH = os.path.dirname(
__import__(__name__.split('.', 1)[0]).__path__[-1]) + os.sep
_log = logging.getLogger(__name__)
def _isregex(obj):
return obj is not None and isinstance(obj, str) and len(obj) > 1 and obj[0] == obj[-1] == '/'
class Dispatcher(jsonrpc.Dispatcher):
def __init__(self, methods, local):
super(Dispatcher, self).__init__()
self.methods = methods
self.local = local
self._results = ResultsDictionary()
def serialize(self, json_obj):
return jsonapi.dumps(json_obj)
def deserialize(self, json_string):
return jsonapi.loads(json_string)
def batch_call(self, requests):
methods = []
results = []
for notify, method, args, kwargs in requests:
if notify:
ident = None
else:
result = next(self._results)
ident = result.ident
results.append(result)
methods.append((ident, method, args, kwargs))
return super(Dispatcher, self).batch_call(methods), results
def call(self, method, args=None, kwargs=None):
# pylint: disable=arguments-differ
result = next(self._results)
return super(Dispatcher, self).call(
result.ident, method, args, kwargs), result
def result(self, response, ident, value, context=None):
try:
result = self._results.pop(ident)
except KeyError:
return
result.set(value)
def error(self, response, ident, code, message, data=None, context=None):
try:
result = self._results.pop(ident)
except KeyError:
return
result.set_exception(jsonrpc.exception_from_json(code, message, data))
def exception(self, response, ident, message, context=None):
# XXX: Should probably wrap exception in RPC specific error
# rather than re-raising.
exc_type, exc, exc_tb = sys.exc_info() # pylint: disable=unused-variable
try:
result = self._results.pop(ident)
except KeyError:
return
result.set_exception(exc)
def method(self, request, ident, name, args, kwargs,
batch=None, context=None):
if kwargs:
try:
args, kwargs = kwargs['*args'], kwargs['**kwargs']
except KeyError:
pass
try:
method = self.methods[name]
except KeyError:
if name == 'inspect':
return {'methods': list(self.methods)}
elif name.endswith('.inspect'):
try:
method = self.methods[name[:-8]]
except KeyError:
pass
else:
return self._inspect(method)
raise NotImplementedError(name)
local = self.local
local.vip_message = context
local.request = request
local.batch = batch
try:
return method(*args, **kwargs)
except Exception as exc: # pylint: disable=broad-except
exc_tb = traceback.format_exc()
_log.error('unhandled exception in JSON-RPC method %r: \n%s',
name, exc_tb)
if getattr(method, 'traceback', True):
exc.exc_info = {'exc_tb': exc_tb}
raise
finally:
del local.vip_message
del local.request
del local.batch
def _inspect(self, method):
params = inspect.getargspec(method)
if hasattr(method, 'im_self'):
params.args.pop(0)
response = {'params': params}
doc = inspect.getdoc(method)
if doc:
response['doc'] = doc
try:
source = inspect.getsourcefile(method)
cut = len(os.path.commonprefix([_ROOT_PACKAGE_PATH, source]))
source = source[cut:]
lineno = inspect.getsourcelines(method)[1]
except IOError:
pass
else:
response['source'] = source, lineno
try:
# pylint: disable=protected-access
response['return'] = method._returns
except AttributeError:
pass
return response
class RPC(SubsystemBase):
def __init__(self, core, owner, peerlist_subsys):
self.core = weakref.ref(core)
self._owner = owner
self.context = None
self._exports = {}
self._dispatcher = None
self._counter = counter()
self._outstanding = weakref.WeakValueDictionary()
core.register('RPC', self._handle_subsystem, self._handle_error)
core.register('external_rpc', self._handle_external_rpc_subsystem, self._handle_error)
self._isconnected = True
self._message_bus = self.core().messagebus
self.peerlist_subsystem = peerlist_subsys
self.peer_list = {}
def export(member): # pylint: disable=redefined-outer-name
for name in annotations(member, set, 'rpc.exports'):
self._exports[name] = member
inspect.getmembers(owner, export)
def setup(sender, **kwargs):
# pylint: disable=unused-argument
self.context = gevent.local.local()
self._dispatcher = Dispatcher(self._exports, self.context)
core.onsetup.connect(setup, self)
core.ondisconnected.connect(self._disconnected)
core.onconnected.connect(self._connected)
self._iterate_exports()
def _connected(self, sender, **kwargs):
self._isconnected =True
# Registering to 'onadd' and 'ondrop' signals to get notified whenever new peer is added/removed
self.peerlist_subsystem.onadd.connect(self._add_new_peer)
self.peerlist_subsystem.ondrop.connect(self._drop_new_peer)
def _disconnected(self, sender, **kwargs):
self._isconnected = False
def _add_new_peer(self, sender, **kwargs):
try:
peer = kwargs.pop('peer')
message_bus = kwargs.pop('message_bus')
self.peer_list[peer] = message_bus
except KeyError:
pass
def _drop_new_peer(self, sender, **kwargs):
try:
peer = kwargs.pop('peer')
self.peer_list.pop(peer)
except KeyError:
pass
def _iterate_exports(self):
'''Iterates over exported methods and adds authorization checks
as necessary
'''
for method_name in self._exports:
method = self._exports[method_name]
caps = annotations(method, set, 'rpc.allow_capabilities')
if caps:
self._exports[method_name] = self._add_auth_check(method, caps)
def _add_auth_check(self, method, required_caps):
'''Adds an authorization check to verify the calling agent has the
required capabilities.
'''
def checked_method(*args, **kwargs):
user = str(self.context.vip_message.user)
if self._message_bus == "rmq":
# When we address issue #2107 external platform user should
# have instance name also included in username.
user = user.split(".")[1]
_log.debug("Current user in checked_method is {}".format(user))
user_capabilites = self._owner.vip.auth.get_capabilities(user)
_log.debug("**user caps is: {}".format(user_capabilites))
if user_capabilites:
user_capabilities_names = set(user_capabilites.keys())
else:
user_capabilities_names = set()
_log.debug("Required caps is : {}".format(required_caps))
_log.debug("user capability names: {}".format(user_capabilities_names))
if not required_caps.issubset(user_capabilities_names):
msg = ('method "{}" requires capabilities {}, but capability {} was'
' provided for user {}').format(method.__name__, required_caps, user_capabilites, user)
raise jsonrpc.exception_from_json(jsonrpc.UNAUTHORIZED, msg)
else:
# Now check if args passed to method are the ones allowed.
for cap_name, param_dict in user_capabilites.items():
if param_dict and required_caps and cap_name in required_caps:
# if the method has required capabilities and
# if the user capability has argument restrictions, check if the args passed to method
# match the requirement
_log.debug("args = {} kwargs= {}".format(args, kwargs))
args_dict = inspect.getcallargs(method, *args, **kwargs)
_log.debug("dict = {}".format(args_dict))
_log.debug("name= {} parameters allowed={}".format(cap_name, param_dict))
for name, value in param_dict.items():
_log.debug("name= {} value={}".format(name, value))
if name not in args_dict:
raise jsonrpc.exception_from_json(jsonrpc.UNAUTHORIZED,
"User {} capability is not defined "
"properly. method {} does not have "
"a parameter {}".format(user, method.__name__, name))
if _isregex(value):
regex = re.compile('^' + value[1:-1] + '$')
if not regex.match(args_dict[name]):
raise jsonrpc.exception_from_json(jsonrpc.UNAUTHORIZED,
"User {} can call method {} only "
"with {} matching pattern {} but called with "
"{}={}".format(user, method.__name__, name, value,
name, args_dict[name]))
elif args_dict[name] != value:
raise jsonrpc.exception_from_json(jsonrpc.UNAUTHORIZED,
"User {} can call method {} only "
"with {}={} but called with "
"{}={}".format(user, method.__name__, name, value,
name, args_dict[name]))
return method(*args, **kwargs)
return checked_method
@spawn
def _handle_external_rpc_subsystem(self, message):
ret_msg = dict()
#_log.debug("EXT_RPC subsystem handler IN message {0}".format(message))
op = message.args[0]
rpc_msg = message.args[1] #jsonapi.loads(message.args[1])
try:
#_log.debug("EXT_RPC subsystem handler IN message {0}, {1}".format(message.peer, rpc_msg))
method_args = rpc_msg['args']
#message.args = [method_args]
message.args = method_args
for idx, msg in enumerate(message.args):
if isinstance(msg, str):
message.args[idx] = jsonapi.loads(msg)
dispatch = self._dispatcher.dispatch
#_log.debug("External RPC IN message args {}".format(message))
responses = [response for response in (
dispatch(msg, message) for msg in message.args) if response]
#_log.debug("External RPC Responses {}".format(responses))
if responses:
message.user = ''
try:
message.peer = ''
message.subsystem = 'external_rpc'
frames = []
op = 'send_platform'
frames.append(op)
msg = jsonapi.dumps(dict(to_platform=rpc_msg['from_platform'],
to_peer=rpc_msg['from_peer'],
from_platform=rpc_msg['to_platform'],
from_peer=rpc_msg['to_peer'], args=responses))
frames.append(msg)
except KeyError:
_log.error("External RPC message did not contain proper message format")
message.args = jsonapi.dumps(ret_msg)
#_log.debug("EXT_RPC subsystem handler OUT message {}".format(message))
try:
self.core().connection.send_vip(peer='',
subsystem='external_rpc',
args=frames,
msg_id=message.id,
user=message.user,
copy=False)
except ZMQError as ex:
_log.error("ZMQ error: {}".format(ex))
pass
except KeyError:
pass
@spawn
def _handle_subsystem(self, message):
dispatch = self._dispatcher.dispatch
if self._message_bus == "rmq":
for idx, msg in enumerate(message.args):
if not isinstance(msg, dict):
message.args[idx] = jsonapi.loads(msg)
responses = [response for response in (
dispatch(msg, message) for msg in message.args) if response]
else:
responses = [response for response in (
dispatch(msg, message) for msg in message.args) if response]
if responses:
message.user = ''
message.args = responses
try:
if self._isconnected:
if self._message_bus == 'zmq':
self.core().connection.send_vip_object(message, copy=False)
else:
# Agent is running on RMQ message bus.
# Adding backward compatibility support for ZMQ. Check if the peer
# is running on ZMQ bus. If yes, send RPC message to proxy router
# agent to forward using ZMQ message bus connection
try:
msg_bus = self.peer_list[message.peer]
except KeyError:
msg_bus = self._message_bus
if msg_bus == 'zmq':
# If peer connected to ZMQ bus, send via proxy router agent
self.core().connection.send_vip_object_via_proxy(message)
else:
self.core().connection.send_vip_object(message, copy=False)
except ZMQError as exc:
if exc.errno == ENOTSOCK:
_log.debug("Socket send on non socket {}".format(self.core().identity))
def _handle_error(self, sender, message, error, **kwargs):
result = self._outstanding.pop(message.id, None)
if isinstance(result, AsyncResult):
result.set_exception(error)
elif result:
for result in result:
result.set_exception(error)
@dualmethod
def export(self, method, name=None):
self._exports[name or method.__name__] = method
return method
@export.classmethod
def export(cls, name=None): # pylint: disable=no-self-argument
if name is not None and not isinstance(name, str):
method, name = name, name.__name__
annotate(method, set, 'rpc.exports', name)
return method
def decorate(method):
annotate(method, set, 'rpc.exports', name)
return method
return decorate
def batch(self, peer, requests):
request, results = self._dispatcher.batch_call(requests)
if results:
items = weakref.WeakSet(results)
ident = '%s.%s' % (next(self._counter), id(items))
for result in results:
result._weak_set = items # pylint: disable=protected-access
self._outstanding[ident] = items
else:
ident = ''
if request:
if self._isconnected:
try:
self.core().connection.send_vip(peer, 'RPC', [request], msg_id=ident)
except ZMQError as exc:
if exc.errno == ENOTSOCK:
_log.debug("Socket send on non socket {}".format(self.core().identity))
return results or None
def call(self, peer, method, *args, **kwargs):
platform = kwargs.pop('external_platform', '')
request, result = self._dispatcher.call(method, args, kwargs)
ident = f'{next(self._counter)}.{hash(result)}'
self._outstanding[ident] = result
subsystem = None
frames = []
if not self._isconnected:
return
if self._message_bus == 'zmq':
if platform == '':#local platform
subsystem = 'RPC'
frames.append(request)
else:
frames = []
op = 'send_platform'
subsystem = 'external_rpc'
frames.append(op)
msg = dict(to_platform=platform, to_peer=peer,
from_platform='', from_peer='', args=[request])
frames.append(msg)
peer = ''
try:
# _log.debug("peer: {0}, subsytem: {1}, args:{2}, id: {3}".format(peer, subsystem,
# args, id))
self.core().connection.send_vip(peer,
subsystem,
args=frames,
msg_id=ident)
except ZMQError as exc:
if exc.errno == ENOTSOCK:
_log.debug("Socket send on non socket {}".format(self.core().identity))
# _log.debug("RPC subsystem: External platform RPC msg: {}".format(frames))
else:
# Agent running on RMQ message bus.
# Adding backward compatibility support for ZMQ. Check if peer
# is running on ZMQ bus. If yes, send RPC message to proxy router agent to
# forward over ZMQ message bus connection
try:
peer_msg_bus = self.peer_list[peer]
except KeyError:
peer_msg_bus = self._message_bus
if peer_msg_bus == 'zmq':
# peer connected to ZMQ bus, send via proxy router agent
self.core().connection.send_via_proxy(peer, 'RPC', msg_id=ident, args=[request])
else:
self.core().connection.send_vip(peer,
'RPC',
args=[request],
msg_id=ident,
platform=platform)
return result
__call__ = call
def notify(self, peer, method, *args, **kwargs):
platform = kwargs.pop('external_platform', '')
request = self._dispatcher.notify(method, args, kwargs)
frames = []
if not self._isconnected:
return
if self._message_bus == 'zmq':
subsystem = None
if platform == '':
subsystem = 'RPC'
frames.append(request)
else:
op = 'send_platform'
subsystem = 'external_rpc'
frames.append(op)
msg = dict(to_platform=platform, to_peer=peer,
from_platform='', from_peer='', args=[request])
frames.append(msg)
peer = ''
try:
# _log.debug("peer: {0}, subsytem: {1}, args:{2}".format(peer, subsystem,
# frames))
self.core().connection.send_vip(peer,
subsystem,
args=frames)
except ZMQError as exc:
if exc.errno == ENOTSOCK:
_log.debug("Socket send on non socket {}".format(self.core().identity))
else:
# Agent running on RMQ message bus.
# Adding backward compatibility support for ZMQ. Check if peer
# is running on ZMQ bus. If yes, send RPC message to proxy router agent to
# forward over ZMQ message bus connection
try:
peer_msg_bus = self.peer_list[peer]
except KeyError:
#self.peer_list = self.peerlist_subsystem.list_with_messagebus().get(2)
#_log.debug("PEERS: {}".format(self.peer_list))
peer_msg_bus = self._message_bus
if peer_msg_bus == 'zmq':
# peer connected to ZMQ bus, send via proxy router agent
self.core().connection.send_via_proxy(peer,
'RPC',
args=[request])
else:
self.core().connection.send_vip(peer,
'RPC',
args=[request],
platform=platform)
@dualmethod
def allow(self, method, capabilities):
if isinstance(capabilities, str):
cap = set([capabilities])
else:
cap = set(capabilities)
self._exports[method.__name__] = self._add_auth_check(method, cap)
@allow.classmethod
def allow(cls, capabilities):
"""
Decorator specifies required agent capabilities to call a method.
This is designed to be used with the export decorator:
.. code-block:: python
@RPC.export
@RPC.allow('can_read_status')
def get_status():
...
Multiple capabilities can be provided in a list:
.. code-block:: python
@RPC.allow(['can_read_status', 'can_call_my_methods'])
"""
def decorate(method):
if isinstance(capabilities, str):
annotate(method, set, 'rpc.allow_capabilities', capabilities)
else:
for cap in capabilities:
annotate(method, set, 'rpc.allow_capabilities', cap)
return method
return decorate
| 42.707155 | 120 | 0.538084 |
7945f2ec77908a49a4929ea333d7b27c57bb0187 | 3,944 | py | Python | tu_gerente/reservar/models.py | luzmar7/backend_tugerente | 54aaaf5ee2cd4dc24f4cffb06ac533977c8ed288 | [
"MIT"
] | null | null | null | tu_gerente/reservar/models.py | luzmar7/backend_tugerente | 54aaaf5ee2cd4dc24f4cffb06ac533977c8ed288 | [
"MIT"
] | null | null | null | tu_gerente/reservar/models.py | luzmar7/backend_tugerente | 54aaaf5ee2cd4dc24f4cffb06ac533977c8ed288 | [
"MIT"
] | null | null | null | from django.db import models
import datetime
# Create your models here.
class Habitacion(models.Model):
ESTADO = (
('Bueno','Bueno'),
('Regular','Regular')
)
id_habitacion = models.AutoField(
primary_key=True,
verbose_name="Id Habitacion"
)
descripcion = models.CharField(
verbose_name="descripcion",
max_length=200,
null=True,
blank = True,
)
numero = models.PositiveIntegerField(
verbose_name="numero"
)
piso = models.PositiveIntegerField(
verbose_name="piso"
)
precio_dia = models.DecimalField(
max_digits=10,
decimal_places=3,
verbose_name="precio_dia"
)
estado_habitacion = models.CharField(
max_length = 30,
choices = ESTADO,
default = 'Bueno',
verbose_name="estado"
)
def __int__(self):
return self.id_habitacion
class Meta:
verbose_name_plural = ("Habitaciones")
class Cliente(models.Model):
id_cliente = models.AutoField(
primary_key=True,
verbose_name="Id Cliente"
)
nombre = models.CharField(
verbose_name="nombre",
max_length= 30
)
apellido_paterno = models.CharField(
verbose_name="Apellido Paterno",
max_length = 50
)
apellido_materno = models.CharField(
verbose_name="Apellido Materno",
max_length = 50
)
numero_identificacion = models.CharField(
verbose_name="numero de identificacion",
max_length = 15
)
direccion = models.CharField(
verbose_name="direccion",
max_length = 100
)
telefono = models.IntegerField(
verbose_name="telefono",
null = True
)
celular = models.IntegerField(
verbose_name="celular",
null = True
)
def __int__(self):
return self.id_cliente
class Meta:
verbose_name_plural = ("Clientes")
class Reserva(models.Model):
ESTADO = (
('Pendiente','Pendiente'),
('Pagado','Pagado'),
('Eliminado','Eliminado')
)
id_reserva = models.AutoField(
primary_key=True,
verbose_name="Id Reserva"
)
id_habitacion = models.ForeignKey(
Habitacion,
on_delete=models.CASCADE,
verbose_name="Habitacion",
)
id_cliente = models.ForeignKey(
Cliente,
on_delete=models.CASCADE,
verbose_name="Cliente",
)
fecha_entrada= models.DateField(
default=datetime.date.today,
verbose_name="Fecha de Entrada",
)
fecha_salida = models.DateField(
verbose_name="Fecha de Salida",
null=True,
blank = True,
)
dias_estadia = models.IntegerField(
verbose_name="dias estadia",
null = True
)
precio_total = models.DecimalField(
max_digits=10,
decimal_places=3,
verbose_name="precio_total"
)
estado = models.CharField(
max_length = 30,
choices = ESTADO,
default = 'Pendiente',
verbose_name="Estado"
)
class Meta:
verbose_name = ("Receta")
verbose_name_plural = ("Reservas")
def __int__(self):
return self.id_receta
class Modo_Pago(models.Model):
id_modo_pago = models.AutoField(
primary_key=True,
verbose_name="Id Modo Pago"
)
nombre = models.CharField(
verbose_name="nombre",
max_length= 30
)
detalles = models.CharField(
verbose_name="detalles",
max_length= 30
)
def __int__(self):
return self.id_modo_pago
class Meta:
verbose_name_plural = ("Modo_Pago")
class Factura(models.Model):
id_factura = models.AutoField(
primary_key=True,
verbose_name="Id Factura"
)
id_cliente = models.ForeignKey(
Cliente,
on_delete=models.CASCADE,
verbose_name="Cliente",
)
id_reserva = models.ForeignKey(
Reserva,
on_delete=models.CASCADE,
verbose_name="Reserva",
)
id_modo_pago = models.ForeignKey(
Modo_Pago,
on_delete=models.CASCADE,
verbose_name="modo_pago",
)
fecha= models.DateField(
default=datetime.date.today,
verbose_name="Fecha de Entrada",
)
def __int__(self):
return self.id_factura
class Meta:
verbose_name_plural = ("Facturas") | 22.157303 | 44 | 0.674189 |
7945f307809734f889d5f001bc83bec7615c5ce0 | 4,232 | py | Python | build/lib/gcn/train.py | jindi-tju/MRFasGCN | 41b94278b80bbcb256097a5a3dfb6a433d9dbdc7 | [
"MIT"
] | 2 | 2021-06-20T03:34:09.000Z | 2022-03-24T07:22:56.000Z | build/lib/gcn/train.py | jindi-tju/MRFasGCN | 41b94278b80bbcb256097a5a3dfb6a433d9dbdc7 | [
"MIT"
] | null | null | null | build/lib/gcn/train.py | jindi-tju/MRFasGCN | 41b94278b80bbcb256097a5a3dfb6a433d9dbdc7 | [
"MIT"
] | 1 | 2022-03-17T01:26:07.000Z | 2022-03-17T01:26:07.000Z | from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from gcn.utils import *
from gcn.models import GCN, MLP
# Set random seed
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'cora', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'
flags.DEFINE_string('model', 'gcn', 'Model string.') # 'gcn', 'gcn_cheby', 'dense'
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')
flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')
flags.DEFINE_float('dropout', 0.8, 'Dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_integer('early_stopping', 10, 'Tolerance for early stopping (# of epochs).')
flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')
# Load data
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(FLAGS.dataset)
test_idx_reorder = np.loadtxt("data/ind.{}.test.index".format(FLAGS.dataset,FLAGS.dataset), dtype=int)
# Some preprocessing
features = preprocess_features(features)
if FLAGS.model == 'gcn':
support = [preprocess_adj(adj)]
num_supports = 1
model_func = GCN
elif FLAGS.model == 'gcn_cheby':
support = chebyshev_polynomials(adj, FLAGS.max_degree)
num_supports = 1 + FLAGS.max_degree
model_func = GCN
elif FLAGS.model == 'dense':
support = [preprocess_adj(adj)] # Not used
num_supports = 1
model_func = MLP
else:
raise ValueError('Invalid argument for model: ' + str(FLAGS.model))
# Define placeholders
placeholders = {
'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),
'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),
'labels_mask': tf.placeholder(tf.int32),
'dropout': tf.placeholder_with_default(0., shape=()),
'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout
}
# Create model
model = model_func(placeholders, input_dim=features[2][1], logging=True)
# Initialize session
sess = tf.Session()
# Define model evaluation function
def evaluate(features, support, labels, mask, placeholders):
t_test = time.time()
feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
outputs=tf.argmax(model.outputs,1)
outs_val = sess.run([model.loss, model.accuracy, outputs], feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], (time.time() - t_test), outs_val[2]
# Init variables
sess.run(tf.global_variables_initializer())
cost_val = []
# Train model
for epoch in range(FLAGS.epochs):
t = time.time()
# Construct feed dictionary
feed_dict = construct_feed_dict(features, support, y_train, train_mask, placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
# Training step
outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict)
# Validation
cost, acc, duration ,outputs= evaluate(features, support, y_val, val_mask, placeholders)
cost_val.append(cost)
# Print results
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]),
"train_acc=", "{:.5f}".format(outs[2]), "val_loss=", "{:.5f}".format(cost),
"val_acc=", "{:.5f}".format(acc), "time=", "{:.5f}".format(time.time() - t))
if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):
print("Early stopping...")
break
print("Optimization Finished!")
# Testing
test_cost, test_acc, test_duration, outputs = evaluate(features, support, y_test, test_mask, placeholders)
print("Test set results:", "cost=", "{:.5f}".format(test_cost),
"accuracy=", "{:.5f}".format(test_acc), "time=", "{:.5f}".format(test_duration))
outputs_nmi = outputs[test_idx_reorder]
y_test_nmi = y_test[test_idx_reorder]
test_nmi = NMI(outputs_nmi, y_test_nmi)
print("NMI=", "{:.5f}".format(test_nmi)) | 36.8 | 106 | 0.708176 |
7945f319904576257eca8c611cc50101c997456c | 22,984 | py | Python | JumpScale9Portal/portal/docgenerator/Confluence2HTML.py | Jumpscale/portal9 | fd4c9718daf51b877a6a98bd0d1ff4bc0b272f67 | [
"Apache-2.0"
] | 1 | 2017-06-07T08:12:09.000Z | 2017-06-07T08:12:09.000Z | JumpScale9Portal/portal/docgenerator/Confluence2HTML.py | Jumpscale/portal9 | fd4c9718daf51b877a6a98bd0d1ff4bc0b272f67 | [
"Apache-2.0"
] | 36 | 2017-05-18T10:54:44.000Z | 2019-03-27T11:24:20.000Z | JumpScale9Portal/portal/docgenerator/Confluence2HTML.py | Jumpscale/portal9 | fd4c9718daf51b877a6a98bd0d1ff4bc0b272f67 | [
"Apache-2.0"
] | 1 | 2018-06-12T05:18:01.000Z | 2018-06-12T05:18:01.000Z | import re
from js9 import j
from JumpScale9Portal.portal.macrolib import div_base
from .PageHTML import PageHTML
class Confluence2HTML():
def defGet(self, name):
name = name.lower().replace("_", "").replace("-", "").replace(" ", "")
if name in j.portal.tools.defmanager.defmanager.aliases:
name = j.portal.tools.defmanager.defmanager.aliases[name]
if name not in j.portal.tools.defmanager.defmanager.defs:
return None
return j.portal.tools.defmanager.defmanager.defs[name]
def processDefs(self, line, doc, page):
if not doc.processDefs:
return line
# print "processdefs:%s"%line
def processToken(token):
if token.find("{") != -1 or token.find("[") != -1 or token.find("]") != -1:
return token
# print "tok1:%s"%token
deff = self.defGet(token)
if deff is not None:
# print "founddef"
token = "[%s|%s]" % (deff.name, deff.pagename)
# print "tok2:%s"%token
return token
token = ""
lineout = ""
for char in line:
if char in [",", ";", ":", " ", ".", "?", "!", "|"]:
token = processToken(token)
lineout += "%s%s" % (token, char)
token = ""
elif char in ["/", "\\", "]"]:
lineout += "%s%s" % (token, char)
token = ""
else:
token += char
lineout += processToken(token)
lineout = self.findLinks(lineout)
return lineout
@staticmethod
def findLinks(line, lower=True):
# r=r"\[[-:|_@#.?\w\s\\=/&]*\]"
r = r"(\[.+?\|.+?\])"
#r = r"\[[^\[\]]+\]" # TODO: does not seem right to me
if j.data.regex.match(r, line): # find links
# print "match %s"% line
htmlelements = ""
for match in j.data.regex.yieldRegexMatches(r, line):
# print "link: %s" % match.founditem
link_id = link_class = None
match2 = match.founditem.replace("[", "").replace("]", "")
if match2.find("|") != -1:
parts = match2.split("|")
descr = parts[0]
link = parts[1]
if len(parts) >= 3:
if parts[2].strip() != "":
link_id = (parts[2].split('=')[1]).strip()
if len(parts) >= 4:
if parts[2].strip() != "":
link_class = (parts[3].split('=')[1]).strip()
if len(parts) >= 5:
htmlelements = parts[4]
elif match2.find(":") != -1:
descr, link = match2.split(":", 1)[1], match2
else:
link = match2
descr = link
# if link.find(":") != -1: # TODO: what was the reason for this, probly have broken something now
# link=link.replace(":","___")
if link.find(";") != -1:
space, pagename = link.split(";", 1)
if lower:
space = space.lower()
link = "/%s/%s" % (space.strip().strip("/"), pagename.strip().strip("/"))
# print "match:%s"%match.founditem
# print "getlink:%s" %page.getLink(descr,link)
line = line.replace(match.founditem, PageHTML.getLink(descr, link, link_id, link_class, htmlelements))
return line
# This is copied from PageHTML.py
# TODO: use only one copy
@staticmethod
def _format_styles(styles):
"""
Return CSS styles, given a list of CSS attributes
@param styles a list of tuples, of CSS attributes, e.g. [("background-color", "green), ("border", "1px solid green")]
>>> PageHTML._format_styles([("background-color", "green"), ("border", "1px solid green")])
'background-color: green; border: 1px solid green'
"""
try:
return '; '.join('{0}: {1}'.format(*style) for style in styles)
except IndexError:
return ''
def createImage(self, title, imagePath, width=None, height=None, styles=[]):
"""
@param title alt text of the image
@param imagePath can be url or local path
@param width width of the image
@param height height of the image
@param styles a list of tuples, containing CSS attributes for the image, e.g. [("background-color", "green), ("border", "1px solid green")]
"""
width_n_height = ''
if width:
width_n_height += ' width="{0}"'.format(width)
if height:
width_n_height += ' height="{0}"'.format(height)
return "<img src='%s' alt='%s' %s style='clear:both;%s' />" % (
imagePath, title, width_n_height, Confluence2HTML._format_styles(styles))
def escape(self, content):
for char in "`*_+-?^[{(}]":
content = content.replace(char, '\\' + char)
return content
def convert(self, content, page=None, doc=None, requestContext=None, paramsExtra={}):
# Now I should format the wiki text, but keeping the macros untouched because I don't want to
# screw it
#
# Formatting of text inside the macro is the responsibility of the macro itself
# This is a list of formatting tags & I'm going to replace the in HTML, e.g. _word_ will be replaced with
# <em>word</em>
#styled_text = r'([\w\-:_/= *.\.\/\>\<\\{},|`!]+)'
styled_text = r'[^{0}\n]*?'
def limiter(char):
# Limiters can contain RE special chars, so I escape them here
limiter_re = ''.join('\\' + c for c in char)
# This is the RE which is used to replace wiki text formatting with equivalent HTML tag
return re.compile(r'(\W){0}([^ #{0}]{1}[^ \n{0}]?){0}(\W)'.format(
limiter_re, styled_text.format(limiter_re)))
def limiter_replacement(sub):
return r'\1<{0}>\2</{0}>\3'.format(sub)
def substitute_email(match):
return r'<a href="{0}">{1}</a>'.format(match.group(1), match.group(1).replace('mailto:', '', 1))
def escape_char(char):
return '&#{0};'.format(ord(char.group(1)))
substitutions = [
(r'\\([^\n\r\\])', escape_char),
('<', '<'),
('>', '>'),
(r'\@LF\b', '<br>'), # This should come after !=
(limiter('`'), limiter_replacement('code')),
(limiter('*'), limiter_replacement('strong')),
(limiter('_'), limiter_replacement('em')),
(limiter('+'), limiter_replacement('ins')),
(limiter('-'), limiter_replacement('strike')),
(limiter('??'), limiter_replacement('cite')),
(limiter('^'), limiter_replacement('sup')),
(limiter('~'), limiter_replacement('sub')),
# {color: red}text goes here{color}
(re.compile(r'\{{color\:(.*?)\}}({0})\{{color\}}'.format(styled_text.format('{}')),
flags=re.DOTALL | re.MULTILINE | re.IGNORECASE),
r'<span style="color:\1">\2</span>'),
# Links & emails
#(r'\[(.*?)\]', substitute_email),
# blockquote
(r'bq\.\s+(.*?)\n', r'<blockquote>\1</blockquote>\n'),
# Escape characters by putting \ in front of it, e.g. \*
]
# First, divide the text into macros & non-macros
blocks = re.split(r'({{.*?}})', content, flags=re.DOTALL)
for i in range(len(blocks)):
if blocks[i].startswith('{{'): # a macro
continue
for tag_re, sub_re in substitutions:
blocks[i] = re.sub(tag_re, sub_re, blocks[i])
content = ''.join(blocks)
if page is None:
page = j.portal.tools.docgenerator.docgeneratorfactory.pageNewHTML("temp")
# images=j.sal.fs.listFilesInDir(dirpath,False)
# images3=[]L
# for image in images:
# image2=image.lower()
# if image2.find(".jpg") != -1 or image2.find(".png") != -1:
# image2=image2.strip()
# image2=j.sal.fs.getBaseName(image2.replace("\\","/"))
# images3.append(image2)
state = "start"
macro = ""
params = ""
if content.find("@show") != -1:
# process show blocks before converting
state = "start"
result = False
out = ""
for line in content.split("\n"):
# print "%s %s" % (state,line)
if line.find("@show") == 0:
state = "hide"
pythoncode = line.replace("@show", "").strip()
paramsExtra2 = paramsExtra
paramsExtra2["doc"] = doc
paramsExtra2["page"] = page
paramsExtra2["requestContext"] = requestContext
try:
result = eval(pythoncode, paramsExtra)
except Exception:
#out+="***error***: Could not parse & execute %s, error was %s\n" % (pythoncode,e)
continue
if not j.data.types.bool.check(result):
#out+="***error***: Could not parse & execute %s, result needs to be a boolean.\n" % (pythoncode)
continue
if result == True:
state = "show"
continue
else:
state = "hide"
continue
if line.find("@end") == 0:
state = "start"
result = False
continue
if state == "start" or state == "show":
out += line + "\n"
content = out
ulAttributes = ''
for line in content.split("\n"):
self._lastLine = line
if state not in ['macro']:
line = line.strip()
# \\ on their own line will emit <br>
if line == r'\\':
page.addNewLine()
line = ''
continue
# print "#: %s %s" % (state,line)
# END TABLE
if state == "table" and (line[0:1] == "||" or line.find("|") != 0):
state = "start"
if params != "":
page.addList(trows, theader, classparams=params)
else:
page.addList(trows, theader)
params = ""
# PAGEBREAK
if state == "start" and (line.find(" ") != -1): # or line=="":
page.addNewLine()
continue
if state != "macro" and line == "":
page._checkBlock('', '', '')
continue
# SKIP LINES
if state != "macro" and line[0] == "#":
continue
# IMAGE
regex = r"\![\w\-:_/=*.,|?&][\w\-:_/= *.,|?&]*[\w\-:_/=*.,|?&]\!"
if (state == "start" or state == "table")and j.data.regex.match(regex, line):
matches = j.data.regex.findAll(regex, line)
for match in matches:
image = match.replace("!", "")
if '|' in image:
# Image may have attributes, like
# !image.png|border=1px solid black, margin=1px!
# these should be written as CSS properties. The syntax for the macro should follow CSS format
#
# Result: <img src="image.png" style="border: 1px solid black; margin: 1px" />
image, styles = image.split('|', 1)
styles = [attr.split('=') for attr in styles.split(',')]
else:
styles = []
if image.startswith('/') or image.startswith('http://'):
imagePath = image
else:
imagePath = "/images/%s/%s" % (doc.getSpaceName(), image)
if image not in doc.images:
# th=j.data.tags.getObject(tags)
# result=th.getValues(width=800,height=600,border=True)
#page.addImage(image, image, result["width"], result["height"])
#page.addImage(image, imagePath, styles=styles)
width = None
height = None
for item in styles:
if len(item) == 1 and item[0].find(":") != -1: # can be tag
tags = j.data.tags.getObject(item[0])
if tags.tagExists("width"):
width = tags.tagGet("width")
if tags.tagExists("height"):
height = tags.tagGet("height")
line = line.replace(
match,
self.createImage(
image,
imagePath,
width=width,
height=height,
styles=styles))
# continue
else:
imagePath, tags, _ = doc.images[image]
th = j.data.tags.getObject(tags)
result = th.getValues(width=None, height=None, border=True)
imagePath = "/images/%s/%s" % (doc.getSpaceName(), image)
#page.addImage(image, imagePath, result["width"], result["height"], styles)
line = line.replace(
match,
self.createImage(
image,
imagePath,
result["width"],
result["height"],
styles))
# continue
line = self.findLinks(line)
if line.find("{center}") > -1:
continue
if line.startswith("{toc:"):
# line="{{toc}}"
line = ""
continue
# 1 line macros
if (state == "start" or state == "table") and line.find("{{") != -1 and line.find("}}") != -1:
macros = doc.preprocessor.macroexecutorPage.getMacroCandidates(line)
for macro in macros:
# print "## 1linemacro:%s"%macro
# mtayseer: this condition looks wrong!!
if line.find("{{") != 0 or len(macros) > 1:
htmlMacro = doc.preprocessor.macroexecutorPage.executeMacroReturnHTML(macro,
doc=doc, requestContext=requestContext, paramsExtra=paramsExtra, pagemirror4jscss=page)
line = line.replace(macro, htmlMacro)
else:
doc.preprocessor.macroexecutorPage.executeMacroAdd2Page(macro, page, doc=doc,
requestContext=requestContext, paramsExtra=paramsExtra)
line = ""
macro = ""
# print "processed 1 macro line:%s"%line
if line.strip() == "":
continue
# print "after1linemacrostate:%s %s"%(line,state)
if state == "start" and line.find("{{") != -1:
state = "macro"
if state == "macro":
macro += "%s\n" % line
if state == "macro" and line.find("}}") >= 0:
state = "start"
# print "macroend:%s"%line
# macrostr=macro
# macro=macro.strip().lstrip("{{")
# macro=macro.rstrip("}}")
if doc is not None:
doc.preprocessor.macroexecutorPage.executeMacroAdd2Page(
macro, page, doc=doc, requestContext=requestContext, paramsExtra=paramsExtra)
macro = ""
# params=""
continue
macro = ""
continue
if line.strip() == "":
continue
# print "linkcheck: %s" % j.data.regex.match("\[[-\\:|_\w\s/]*\]",line)
# FIND LINKS
line = self.findLinks(line)
# HEADING
header = j.data.regex.getRegexMatch("^h(\d)\. (.+?)$", line)
if header and state == "start":
level, line = header.foundSubitems
level = int(level)
line = self.processDefs(line, doc, page)
page.addHeading(line, level)
continue
unorderedItem = j.data.regex.getRegexMatch("^(\*+) (.+?)$", line)
if state == "start" and unorderedItem:
stars, line = unorderedItem.foundSubitems
level = len(stars)
line = self.processDefs(line, doc, page)
page.addBullet(line, level, attributes=ulAttributes)
ulAttributes = '' # ulAttributes is set in the previous iteration of the for-loop. It should be reset _after_ the list is added
continue
numberedItem = j.data.regex.getRegexMatch("^\*(#+) (.+?)$", line)
if state == "start" and numberedItem:
hashes, line = numberedItem.foundSubitems
level = len(hashes)
line = self.processDefs(line, doc, page)
page.addBullet(line, level, bullet_type='number', tag='ol', attributes=ulAttributes)
ulAttributes = ''
continue
# Read styles for lists
# The syntax will be like this
#
# *- id=main-menu | class=nav nav-list
# * item 1
# * item 2
ulAttributes = j.data.regex.getRegexMatch("^(\*+)- (.+?)$", line)
if ulAttributes:
ulAttributes = div_base.tag_params_to_html_attrs(ulAttributes.foundSubitems[1])
continue
else:
ulAttributes = ''
if state == "start" and j.data.regex.match(".*\|\|.*", line) and len(line.split("||")) == 2:
# DESCRIPTIONS
p1, p2 = line.split("||")
p2 = self.processDefs(line, doc, page)
page.addDescr(p1, p2)
continue
if state == "start" and (line.find("@divend") == 0 or line.find("@rowend") ==
0 or line.find("@colend") == 0 or line.find("@blockend") == 0):
page.addMessage("</div>")
continue
if state == "start" and line.find("@block") == 0:
# divlower(divauto,page,"block")
arg = line.replace("@block", "").strip()
if arg == "":
arg = "container"
page.addMessage("<div class=\"%s\">" % arg)
page.divlevel.append("block")
continue
if state == "start" and line.find("@row") == 0:
# divlower(divauto,page,"row")
arg = line.replace("@row", "").strip()
if arg == "":
arg = "row-fluid page-content"
page.addMessage("<div class=\"%s\">" % arg)
page.divlevel.append("row")
continue
if state == "start" and line.find("@col") == 0:
# divlower(divauto,page,"col")
arg = line.replace("@col", "").strip()
page.addMessage("<div class=\"span%s\">" % arg)
page.divlevel.append("col")
continue
if state == "start" and line.find("@block") == 0:
arg = line.replace("@block", "").strip()
if arg == "":
arg = "container-fluid"
page.addMessage("<div class=\"%s\">" % arg)
page.divlevel += 1
# check params
if state == "start" and line.find("@params") == 0:
params = line.replace("@params", "").strip()
from JumpScale.core.Shell import ipshell
print("DEBUG NOW params, not implemented")
ipshell()
if state == "start" and line.find("||") == 0:
# beginning of table
line = self.processDefs(line, doc, page)
state = "table"
cols = line.split("||")
cols = cols[1:-1]
theader = cols
trows = []
continue
if state == "start" and line.find("|") == 0:
# beginning of table
line = self.processDefs(line, doc, page)
state = "table"
theader = ""
trows = []
if state == "table" and line.find("|") == 0:
# ADD ROW TO TABLE
line = self.processDefs(line, doc, page)
cols = line.split("|")
trows.append(cols[1:-1])
# was a regular line so add
if state != "macro" and state != "table" and line != "":
if line[0] != "@":
line = self.processDefs(line, doc, page)
page.addMessage(line, isElement=False)
if page.body != "":
# work on the special includes with [[]]
includes = j.data.regex.findAll("\[\[[\w :;,\.\*\!\?\^\=\'\-/]*\]\]", page.body)
for item in includes:
item2 = item.replace("[[", "").replace("]]", "")
if doc.preprocessor.docExists(item2):
doc2 = doc.preprocessor.docGet(item2)
else:
page.body = page.body.replace(
item, " ***error*** : COULD NOT FIND DOC %s, could not include." %
item2)
continue
page2 = j.portal.tools.docgenerator.docgeneratorfactory.pageNewHTML("includeInConfluence2Wiki")
page2.liblocation = page.liblocation
page2 = self.convert(doc2.content, page2, doc2, requestContext)
page.body = page.body.replace(item, page2.body)
return page
| 41.562387 | 181 | 0.449748 |
7945f5abd9459ce4da5fd0cfa388877439f1e0ab | 286 | py | Python | lonely/items.py | mouse-reeve/lonely-scraper | 2a344e09777a14b95577dd80b43749a3724dab34 | [
"MIT"
] | null | null | null | lonely/items.py | mouse-reeve/lonely-scraper | 2a344e09777a14b95577dd80b43749a3724dab34 | [
"MIT"
] | null | null | null | lonely/items.py | mouse-reeve/lonely-scraper | 2a344e09777a14b95577dd80b43749a3724dab34 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class LonelyItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 19.066667 | 52 | 0.685315 |
7945f613dc9a15950235d6dcaf276ec3bb9dbd41 | 1,498 | py | Python | ppapi/generators/idl_visitor.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | ppapi/generators/idl_visitor.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | ppapi/generators/idl_visitor.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Visitor Object for traversing AST """
#
# IDLVisitor
#
# The IDLVisitor class will traverse an AST truncating portions of the tree
# when 'VisitFilter' returns false. After the filter returns true, for each
# node, the visitor will call the 'Arrive' member passing in the node and
# and generic data object from the parent call. The returned value is then
# passed to all children who's results are aggregated into a list. The child
# results along with the original Arrive result are passed to the Depart
# function which returns the final result of the Visit. By default this is
# the exact value that was return from the original arrive.
#
class IDLVisitor(object):
def __init__(self):
pass
# Return TRUE if the node should be visited
def VisitFilter(self, node, data):
return True
def Visit(self, node, data):
if not self.VisitFilter(node, data): return None
childdata = []
newdata = self.Arrive(node, data)
for child in node.GetChildren():
ret = self.Visit(child, newdata)
if ret is not None:
childdata.append(ret)
return self.Depart(node, newdata, childdata)
def Arrive(self, node, data):
__pychecker__ = 'unusednames=node'
return data
def Depart(self, node, data, childdata):
__pychecker__ = 'unusednames=node,childdata'
return data
| 32.565217 | 77 | 0.721629 |
7945f6f4f4622f9590a598974236c7033299db71 | 597 | py | Python | google/cloud/datastore/version.py | freelancing-solutions/python-datastore | 160f7751db6a2a27cdf2a5232eadc538de8a8268 | [
"Apache-2.0"
] | null | null | null | google/cloud/datastore/version.py | freelancing-solutions/python-datastore | 160f7751db6a2a27cdf2a5232eadc538de8a8268 | [
"Apache-2.0"
] | null | null | null | google/cloud/datastore/version.py | freelancing-solutions/python-datastore | 160f7751db6a2a27cdf2a5232eadc538de8a8268 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.1.1"
| 37.3125 | 74 | 0.757119 |
7945f71d97776ba37e1e170fe8aed3f29d1486ab | 4,587 | py | Python | Forecasting System/main.py | yinghonglin/Projects | 5af56a4fc5ebfade3c0afbfd63a6300d92831a2b | [
"MIT"
] | null | null | null | Forecasting System/main.py | yinghonglin/Projects | 5af56a4fc5ebfade3c0afbfd63a6300d92831a2b | [
"MIT"
] | 7 | 2020-03-24T17:58:58.000Z | 2022-02-10T01:16:50.000Z | Forecasting System/main.py | yinghonglin/Projects | 5af56a4fc5ebfade3c0afbfd63a6300d92831a2b | [
"MIT"
] | null | null | null | from Models import NN, DR, TM
from Helper import helper
import pandas as pd
import datetime
import time
import numpy as np
import json
class LoadPred(object):
def __init__(self, dataframe):
self.data = dataframe.copy()
self.NN = NN.NN(self.data)
self.DR = DR.DR(self.data)
self.TM = TM.TM(self.data)
self.models = [self.NN, self.DR, self.TM]
def create_validation_df(self):
self.validation_df = helper.validation_dataframe_cleaning(self.data)
def model_building(self, date):
self.date = date
self.MAPE = []
self.RMSE = []
exclude_model = [self.NN.name]
# exclude_model = [self.NN.name, self.TM.name]
for model in self.models:
print(f'-----------------------Running {model.name}-----------------------')
print(f'Date is {date}')
if model.name in exclude_model:
self.MAPE.append(float('inf'))
self.RMSE.append(float('inf'))
print(f'-----------------------{model.name} Complete-----------------------')
continue
start = time.time()
model.set_date(date)
model.model_selection_mape_rmse()
model.predict_next_40hours()
self.MAPE.append(model.mape)
self.RMSE.append(model.rmse)
end = time.time()
print(f'-----------------------{model.name} Complete-----------------------')
print(f'Status report: using {end - start} seconds')
print('************************************************************************************')
def ensemble_models(self):
index = self.MAPE.index(min(self.MAPE))
self.model = self.models[index]
def return_result(self):
self.forecast = self.model.predict_next_40hours()
return self.forecast
def get_error(self):
start = pd.to_datetime(self.date) + datetime.timedelta(hours=8)
end = pd.to_datetime(self.date) + datetime.timedelta(hours=47)
validation_list = self.validation_df[start:end]['Load'].tolist()
predict = self.forecast
res = predict
print(f'predict result: \n {predict}')
self.this_mape = helper.mape(validation_list, res)
self.this_rmse = helper.rmse(validation_list, res)
print(f'satrt time: {start}, end time: {end}')
print(f'future mape: {self.this_mape}')
print(f'future rmse: {self.this_rmse}')
def peakhour(self):
start = pd.to_datetime(self.date) + datetime.timedelta(hours=8)
end = pd.to_datetime(self.date) + datetime.timedelta(hours=47)
validation_list = self.validation_df[start:end]['Load'].tolist()
predict = self.forecast
validation_list = validation_list[-24:]
predict = predict[-24:]
validation_peak_index = validation_list.index(max(validation_list))
predict_peak_index = predict.index(max(predict))
if validation_peak_index == predict_peak_index:
self.peak_detected = 1
return 1
else:
self.peak_detected = 0
return 0
def main(data, date, length):
LP = LoadPred(data)
results_dict = dict()
datelist = list(map(str, pd.date_range(pd.to_datetime(date), periods=length).tolist()))
for date in datelist:
print('####################################################################################################')
print(f'Making prediction for {date}')
results_dict[date] = dict()
results_dict[date]['error'] = dict()
start = time.time()
LP.model_building(date)
LP.create_validation_df()
LP.ensemble_models()
LP.return_result()
LP.get_error()
LP.peakhour()
results_dict[date]['prediction'] = LP.forecast
results_dict[date]['error']['MAPE'] = LP.this_mape
results_dict[date]['error']['RMSE'] = LP.this_rmse
results_dict[date]['peak_detected'] = LP.peak_detected
print(f'peak hour: {LP.peak_detected}')
end = time.time()
print(f'used {end - start}')
results_dict[date]['time'] = end - start
print('####################################################################################################')
with open('predicted_results_2018Q4.json', 'w') as f:
json.dump(results_dict, f)
print('Results file generated')
if __name__ == '__main__':
path = 'Data/Hourly_Temp_Humi_Load-7.csv'
df = pd.read_csv(path)
main(df, '2018-10-01', 92)
| 36.696 | 117 | 0.548725 |
7945f754f398eab5629f3a42e6d86e62076e5810 | 1,179 | py | Python | task1.py | redwin/miscTools | 11779f7130efe58f930870dcde47993b65284aa2 | [
"MIT"
] | null | null | null | task1.py | redwin/miscTools | 11779f7130efe58f930870dcde47993b65284aa2 | [
"MIT"
] | null | null | null | task1.py | redwin/miscTools | 11779f7130efe58f930870dcde47993b65284aa2 | [
"MIT"
] | null | null | null |
maxlen=100000
minval=-1
maxval=200000
def solution(A):
length=len(A)
passedIndex=set()
if length == 0 or length > maxlen:
return 0
i=A[0]
passedIndex.add(0)
if length == 1:
if i == -1:
return 1
else:
return 0
while True:
if i <minval or i >maxval :
return 0
#loop exists
if i in passedIndex:
return 0
if i == -1:
return len(passedIndex)
#not valid index
if i >= length or i < -1:
return 0
passedIndex.add(i)
i = A[i]
def run_test():
test_suit = (
[],
[1],
[200000],
[1,1],
[1,2],
[2,1],
[200000,200000],
[1,-1],
[2,1,2],
[2,1,3],
[2,1,-1],
[1,4,-1,3,2],
[1,4,5,3,2],
[200000,4,5,3,2],
[1,-5,5,3,2],
[1,2,2000001,3,2],
)
for tc in test_suit:
print "test data:",tc, "solution data:",solution(tc)
if __name__=="__main__":
run_test()
| 15.513158 | 60 | 0.389313 |
7945f773cadbcc8af1af1231ca693ff3de2ed2bf | 4,091 | py | Python | bi4_export_webi2pdf.py | docmalkovich/sap-bi4-python | 3bebe754d84ab26bee889b02953d92d3b54db4d8 | [
"MIT"
] | null | null | null | bi4_export_webi2pdf.py | docmalkovich/sap-bi4-python | 3bebe754d84ab26bee889b02953d92d3b54db4d8 | [
"MIT"
] | null | null | null | bi4_export_webi2pdf.py | docmalkovich/sap-bi4-python | 3bebe754d84ab26bee889b02953d92d3b54db4d8 | [
"MIT"
] | null | null | null | #.Synopsis
#Exports WebI to PDF.
#.Description
#Logs onto BI Platform, retrieves PDF export of a Web Intelligence document and save to specified file.
#Modify this script to enter the logon credentials, URL to the RESTful Web Services, Language, Document SI_ID and folder path.
#.Uses
#Python 3 and SAP BI4.1 SP3+
#Tested with Portable Python 3.2.5.1 on Windows XP and BI4.1 SP3
#.source
#https://blogs.sap.com/2014/10/17/scripting-web-intelligence-the-restful-raylight-web-services-with-python-sample/
import urllib.request
import urllib.parse
import json
import os
############################################################################################################################
# Input: logonInfo, hostUrl, locale, documentId and folderPath to suite your preferences
logonInfo = {
'userName' : 'administrator',
'password' : 'Password1',
'auth' : 'secEnterprise'
}
hostUrl = 'http://10.160.206.89:6405/biprws'
documentId = '7827' # SI_ID for the document
locale = 'en-US' # Format language for the WebI exporter
contentLocale = 'en-US' # Format language for the WebI document contents
folderPath = r'C:\Users\me\Desktop\RESTful' # Folder where PDF file will be saved.
############################################################################################################################
raylightUrl = hostUrl + '/raylight/v1'
documentUrl = raylightUrl + '/documents/' + documentId
# Logon and retrieve the logon token to be used in subsequent RESTful calls.
headers = {
'Content-Type' : 'application/json',
'Accept' : 'application/json',
}
d=str.encode(json.dumps(logonInfo))
result = urllib.request.urlopen(urllib.request.Request(hostUrl + "/logon/long",d,headers))
reponse=json.loads(result.read().decode('utf-8'))
logonToken=reponse['logonToken']
# Get Web Intelligence document information.
headers = {
'X-SAP-LogonToken' : '"'+logonToken+'"' ,
'Accept' : 'application/json',
'Content-Type' : 'application/json',
'Accept-Language' : locale,
'X-SAP-PVL' : contentLocale
}
result = urllib.request.urlopen( urllib.request.Request(documentUrl,None,headers) )
reponse=json.loads(result.read().decode('utf-8'))
document = reponse['document']
# Refresh the document by sending empty prompts (assumes document has no prompts).
headers = {
'X-SAP-LogonToken' : '"'+logonToken+'"' ,
'Accept' : 'application/json' ,
'Content-Type' : 'application/json' ,
'X-SAP-PVL' : contentLocale
}
parametersUrl = documentUrl + '/parameters'
urllib.request.urlopen( urllib.request.Request(parametersUrl,None,headers) )
# Retrieve and save PDF first ensuring the file path is valid.
filePath = os.path.join(folderPath , document['name'] + '.pdf')
if( os.access(os.path.dirname(filePath), os.W_OK) ) :
# Get PDF and save to file
headers = {
'X-SAP-LogonToken' : '"'+logonToken+'"' ,
'Accept' : 'application/pdf' ,
'X-SAP-PVL' : contentLocale
}
result = urllib.request.urlopen( urllib.request.Request(documentUrl + '/pages',None,headers) )
f = open(filePath, 'wb')
f.write(result.read())
f.close()
else :
print ('Invalid file path ' + filePath)
# Unload document from Raylight.
headers = {
'X-SAP-LogonToken' : '"'+logonToken+'"' ,
'Accept' : 'application/json' ,
'Content-Type' : 'application/json' ,
'X-SAP-PVL' : contentLocale
}
data = {
'document' : { 'state' : 'Unused' }
}
d=str.encode(json.dumps(data))
urllib.request.urlopen(urllib.request.Request(documentUrl,d,headers))
# Log off the Session identified by the X-SAP-LogonToken HTTP Header
headers = {
'X-SAP-LogonToken' : '"'+logonToken+'"' ,
'Accept' : 'application/json' ,
'Content-Type' : 'application/json'
}
urllib.request.urlopen(urllib.request.Request(hostUrl + '/logoff',b'',headers))
| 38.233645 | 127 | 0.606453 |
7945f795d1cfa63d2a5bb3a49f9e203d5cc5494d | 4,693 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2018_03_01/_policy_client.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2018_03_01/_policy_client.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2018_03_01/_policy_client.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from msrest import Deserializer, Serializer
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models
from ._configuration import PolicyClientConfiguration
from .operations import PolicyAssignmentsOperations, PolicyDefinitionsOperations, PolicySetDefinitionsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class PolicyClient:
"""To manage and control access to your resources, you can define customized policies and assign
them at a scope.
:ivar policy_assignments: PolicyAssignmentsOperations operations
:vartype policy_assignments:
azure.mgmt.resource.policy.v2018_03_01.operations.PolicyAssignmentsOperations
:ivar policy_definitions: PolicyDefinitionsOperations operations
:vartype policy_definitions:
azure.mgmt.resource.policy.v2018_03_01.operations.PolicyDefinitionsOperations
:ivar policy_set_definitions: PolicySetDefinitionsOperations operations
:vartype policy_set_definitions:
azure.mgmt.resource.policy.v2018_03_01.operations.PolicySetDefinitionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2018-03-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = PolicyClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.policy_assignments = PolicyAssignmentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.policy_definitions = PolicyDefinitionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.policy_set_definitions = PolicySetDefinitionsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> PolicyClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 44.273585 | 132 | 0.705732 |
7945f7a34dc91f25dda17927d66a6b477d6fe619 | 15,271 | py | Python | findthetail/ftt.py | fstroth/findthetail | f4525a1393ab362886395bfb3a789446c1ac5143 | [
"Apache-2.0"
] | 3 | 2018-07-26T23:08:39.000Z | 2021-05-03T20:09:02.000Z | findthetail/ftt.py | fstroth/findthetail | f4525a1393ab362886395bfb3a789446c1ac5143 | [
"Apache-2.0"
] | 1 | 2019-04-29T14:17:24.000Z | 2019-05-09T12:08:56.000Z | findthetail/ftt.py | fstroth/findthetail | f4525a1393ab362886395bfb3a789446c1ac5143 | [
"Apache-2.0"
] | 1 | 2019-11-04T12:59:21.000Z | 2019-11-04T12:59:21.000Z | """
.. module:: ftt
:platform: Unix, Windows
:synopsis: Module for Paper XY.
.. moduleauthor:: Frederik Strothmann <[email protected]>
"""
import os
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import genpareto
from jinja2 import Environment, FileSystemLoader, select_autoescape
from .teststatistics import au2, cramer_von_mises, anderson_darling
class Ftt:
def __init__(self, data, data_name, mc_steps=1000, threads=1):
self.data = data
self.data_name = data_name
self.mc_steps = mc_steps
self.mc_steps_run = 0
self.mc_error = None
self.mc_counter_au2 = 0
self.mc_counter_a2 = 0
self.mc_counter_w2 = 0
self.p_value_au2 = None
self.p_value_a2 = None
self.p_value_w2 = None
self.q = None
self.cond = None
self.significant_digit_of_data = None
self.au_2_data = None
self.cramer_data = None
self.anderson_data = None
self.optimal_tail_index = None
self.optimal_tail = None
self.rv_list = []
self.cdf_list = []
self.threads = threads
# setup matplotlib parameters
plt.rcParams['figure.figsize'] = 16, 9
# make sure a diretory for the report is generated
if not os.path.exists(os.getcwd() + '/reports'):
os.mkdir(os.getcwd() + '/reports')
if not os.path.exists(os.getcwd() + '/reports/' + self.data_name):
os.mkdir(os.getcwd() + '/reports/' + self.data_name)
# plot the data before is is sorted and prepared
self.plot_data()
self.perpare_data()
@staticmethod
def get_significant_digit(number):
"""Retrurns the first non zero digit after decimal point."""
latter_number_part = str(number).split('.')[1]
if latter_number_part == '0':
return 0
else:
# leading zeros get removed automatically for integers
return len(latter_number_part[::-1])
def perpare_data(self):
"""
This function prepares the data for the processing. The given data will be sorted in descending order and
a small linear increasing value will be added, so that two values won't be the same. Becuase this would
cause problems in the calculation of the test statistics.
"""
# check if any values is peresent more than once
if not np.unique(self.data).size == self.data.size:
self.significant_digit_of_data = max(
[Ftt.get_significant_digit(number) for number in self.data.astype('float')])
# add random noise below the significant digit to make sure no two values are the same
while np.unique(self.data).size != self.data.size:
self.data += np.random.normal(size=self.data.size) / 10 ** (self.significant_digit_of_data + 6)
self.data[::-1].sort()
def generate_tails(self, data):
"""
Generates the tails of a given data set. And and transforms it, so the location of the pareto distribution
for the returned tail is 0.
Args:
data (numpy.ndarray): Data to generate the tail for.
Yields:
ndarray: The next tail
"""
for i in range(1, data.size - 1):
yield data[:i] - data[i]
@staticmethod
def fit_tail(tail):
"""
Fitting the tail using scipys genpareto and calculating the cdf of the tail for the fitted distribution
Args:
tail (numpy.ndarray): tail to fit
Returns:
numpy.ndarray, tuple: Cdf of the data for the fitted tail, fit parameters (c, loc, scale).
"""
# floc is set to zero because the data is expected to be transformed, so the location of the pareto distribution
# is 0. Check generate_tails for further information.
fit_out = genpareto.fit(tail, floc=0)
# generate distribution with the fitted parameters
estimated_distribution = genpareto(c=fit_out[0], loc=fit_out[1], scale=fit_out[2])
# calculate the cdf of the estimated distribution in ascending order
cdf_of_tail = estimated_distribution.cdf(tail)
cdf_of_tail.sort()
return cdf_of_tail, fit_out
def find_optimal_tail(self):
"""
The function fits all tails and saves the generated fit information. After all tails have been fitted
the tail with the minimal AU2 test statistic and the index of the tail are saved.
Returns:
None
"""
# make sure all lists are cleaned up
self.cdf_list = []
self.rv_list = []
# fit the tails
for index, tail in enumerate(self.generate_tails(self.data)):
print("\t" + str(index) + "/" + str(self.data.size), end='\r', flush=True)
cdf, fit_out = self.fit_tail(tail)
self.cdf_list.append(cdf)
# save rv's
rv = genpareto(c=fit_out[0],
loc=fit_out[1],
scale=fit_out[2])
self.rv_list.append(rv)
# calculate the test statitics
self.au_2_data = np.array([au2(tail) for tail in self.cdf_list])
self.cramer_data = np.array([cramer_von_mises(tail) for tail in self.cdf_list])
self.anderson_data = np.array([anderson_darling(tail) for tail in self.cdf_list])
self.optimal_tail_index = self.au_2_data.argmin()
self.optimal_tail = self.cdf_list[self.au_2_data.argmin()]
def montecarlo_simulation(self, mc_steps=None):
"""
Runs Monte Carlo simulation for the optimal position.
Args:
mc_steps: number of Monte Carlo steps to run.
Returns:
float: p-value for the AU2 test statistic
float: p-value for the Anderson-Darling test statistic
float: p-value for the Cramér-von Mises test statistic
int: number of montecarlo steps
Raises:
RuntimeError is the function gets called, when the fit for the optimal tail start has not been run before.
"""
if (self.optimal_tail_index is None or
self.rv_list is None or
self.cdf_list is None):
raise RuntimeError("Fits have to run before the Monte Carlo simulation")
if mc_steps is None:
mc_steps = self.mc_steps
# generate mc points
mc_counter_au2 = 0
mc_counter_a2 = 0
mc_counter_w2 = 0
# make sure every thread has a different seed
random_state = np.random.RandomState(np.random.seed())
random_variates = self.rv_list[self.optimal_tail_index].rvs(size=(mc_steps, self.optimal_tail.size), random_state=random_state)
for index, random_variate in enumerate(random_variates):
print("\t" + str(index) + "/" + str(mc_steps), end='\r', flush=True)
fit_out = genpareto.fit(np.sort(random_variate)[::-1], floc=0)
my_pareto = genpareto(c=fit_out[0], loc=fit_out[1], scale=fit_out[2])
cdf_of_tail = np.sort(my_pareto.cdf(random_variate))
if au2(cdf_of_tail) > self.au_2_data[self.optimal_tail_index]:
mc_counter_au2 += 1
if anderson_darling(cdf_of_tail) > self.anderson_data[self.optimal_tail_index]:
mc_counter_a2 += 1
if cramer_von_mises(cdf_of_tail) > self.cramer_data[self.optimal_tail_index]:
mc_counter_w2 += 1
return mc_counter_au2, mc_counter_a2, mc_counter_w2, mc_steps
def run_montecarlo_simulation(self, mc_steps=None, threads=None):
"""
Runs the montecarlo simulation and saves the results in class variables.
Args:
mc_steps: Number of montecarlo steps per thread.
threads: Number of threads to use.
Returns:
None
"""
if mc_steps is None:
mc_steps = self.mc_steps
if threads is None:
threads = self.threads
with Pool(threads) as p:
results = p.map(self.montecarlo_simulation, [mc_steps] * threads)
for result in results:
self.save_montecarlo_information(*result)
def save_montecarlo_information(self, mc_counter_au2, mc_counter_a2, mc_counter_w2, mc_steps):
self.mc_steps_run += mc_steps
self.mc_counter_au2 += mc_counter_au2
self.mc_counter_a2 += mc_counter_a2
self.mc_counter_w2 += mc_counter_w2
self.p_value_au2 = self.mc_counter_au2 / self.mc_steps_run
self.p_value_a2 = self.mc_counter_a2 / self.mc_steps_run
self.p_value_w2 = self.mc_counter_w2 / self.mc_steps_run
self.mc_error = 1 / self.mc_steps_run ** 0.5
def quantil_and_cvar(self, p_values=np.array([0.95, 0.97, 0.99, 0.999])):
"""
Calculates the quantiles for given p-values.
Args:
p_values (np.ndarray): p-values to calculate the quantils and the conditional value at risk for.
Defaults to [0.95, 0.97, 0.99, 0.999]
Returns:
None
Raises:
RuntimeError if the Monte Carlo simulation has not been run befor.
"""
if self.mc_steps_run == 0:
raise RuntimeError('Quantil and cvar can only be calculated after the montecarlo simulation')
else:
sigma = self.rv_list[self.optimal_tail_index].kwds['scale']
xi = self.rv_list[self.optimal_tail_index].kwds['c']
quantile = self.data[self.optimal_tail_index] + sigma / xi * (
(self.data.size / (self.optimal_tail_index + 1) * (1 - p_values)) ** -xi - 1)
self.q = [(p, round(q, self.significant_digit_of_data)) for p, q in zip(p_values, quantile)]
cond = quantile + (sigma + xi * quantile) / (1 - xi)
self.cond = [(p, round(c, self.significant_digit_of_data)) for p, c in zip(p_values, cond)]
def plot_statistics(self):
"""Plots the three test statistics and saves the plot"""
fig, ax = plt.subplots(1, 1, figsize=(16, 9))
ax.plot(self.au_2_data, label='AU2')
ax.plot(self.anderson_data, label='Anderson-Darling')
ax.plot(self.cramer_data, label='Cramér-von Mises')
ax.grid()
ax.set_xlabel(r"Sorted Data Index $k$", fontsize=24)
ax.set_ylabel("Statistics", fontsize=24)
ax.tick_params(labelsize=20)
ax.set_yscale('log')
ax.legend(loc='best', fontsize=24)
fig.savefig(os.getcwd() + '/reports/' + self.data_name + '/test_statistics.png')
plt.close(fig)
def plot_data(self):
"""Plots the data and saves the plot"""
fig, ax = plt.subplots(1, 1, figsize=(16, 9))
ax.plot(np.arange(self.data.size), self.data / self.data.size, label=self.data_name)
ax.set_xlabel(r"Data Index $i$", fontsize=24)
ax.set_ylabel(r"$X_i$", fontsize=24)
ax.tick_params(labelsize=20)
ax.grid()
ax.legend(loc='best', fontsize=24)
fig.savefig(os.getcwd() + '/reports/' + self.data_name + '/data.png')
plt.close(fig)
def plot_empirical_distribution(self, closeup=True, save=True):
"""
Plots the empirical distribution and saves the plot.
Args:
closeup (bool): If True the p-value range is set, so that the values of p > 0.95 are shown. This parameter
is used to have a closeup in the plot of the empirical distiribution.
save (bool): If True the plot is saved else, the function just returns None. This is used for the picture in
picture of the close up.
Returns:
None
"""
fig, ax = plt.subplots(1, 1, figsize=(16, 9))
ax.plot(self.data[::-1], np.arange(1, self.data.size + 1) / self.data.size, '+r', label='Data')
x = np.arange(self.data[self.optimal_tail_index],
self.data[0] * 1.5,
(self.data[0] * 1.5 - self.data[self.optimal_tail_index]) / 100)
tw = (self.optimal_tail_index + 1) / self.data.size
ax.plot(x, (1 - tw) + self.rv_list[self.optimal_tail_index].cdf(x - x.min()) * tw,
label="Generalized Pareto distribution")
ax.legend(loc='upper center', bbox_to_anchor=(0.7, 0.9), fontsize=16)
if closeup:
axins = ax.inset_axes([.35, .1, .6, .6])
axins.plot(x, (1 - tw) + self.rv_list[self.optimal_tail_index].cdf(x - x.min()) * tw,
label="Generalized Pareto distribution")
axins.plot(self.data[::-1], np.arange(1, self.data.size + 1) / self.data.size, '+r', label='Data')
axins.hlines((0.95, 0.97, 0.99, 0.999), self.data.min(), self.data.max() * 2, alpha=0.3)
axins.set_yticks([0.95, 0.97, 0.99, 0.999])
axins.set_xlim([self.data[self.optimal_tail_index] - self.data.std(), self.data[0] * 1.5])
axins.set_ylim((0.94, 1.005))
fig.savefig(os.getcwd() + '/reports/' + self.data_name + '/data_empirical.png')
plt.close(fig)
def report_html(self):
"""
Generates a html report with.
Returns:
None
"""
# TODO: only push the class dict to the stream function and chnage the html form
env = Environment(
loader=FileSystemLoader(os.path.dirname(os.path.realpath(__file__)) + '/templates'),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template('report_base.html')
template.stream(
{'data_name': self.data_name,
'data': self.data,
'p_value_au2': self.p_value_au2,
'au2_value': self.au_2_data[self.optimal_tail_index],
'optimal_point_position': self.optimal_tail_index + 1,
'montecarlo_steps': self.mc_steps_run,
'fit_parameter': self.rv_list[self.optimal_tail_index].kwds,
'quantile': self.q,
'cond': self.cond,
'optimal_point': round(self.data[self.optimal_tail_index], self.significant_digit_of_data),
'w2_value': self.cramer_data[self.optimal_tail_index],
'a2_value': self.anderson_data[self.optimal_tail_index],
'data_size': self.data.size,
'p_value_a2': self.p_value_a2,
'p_value_w2': self.p_value_w2}
).dump(os.getcwd() + '/reports/' + self.data_name + '/' + self.data_name + '.html')
def run_analysis(self, p_values=np.array([0.95, 0.97, 0.99, 0.999])):
"""
Runs a complete analysis.
Args:
p_values: p values to calculate the quantiles and cvar for.
Returns:
None
"""
print('Runnig fit')
self.find_optimal_tail()
print('Running Montecarlo simulation')
self.run_montecarlo_simulation()
print('Calculating q and cvar')
self.quantil_and_cvar(p_values=p_values)
print('Generating plots')
self.plot_statistics()
self.plot_empirical_distribution(save='save')
| 41.953297 | 135 | 0.607229 |
7945f7d49bcb4909c6deb9cd059c16f8be88d4fb | 1,821 | py | Python | python/project/pandas_data.py | Jai-Doshi/Python-Project | 40a77ae1eb2c66444d94f40aef4dbda2bc8d957a | [
"MIT"
] | null | null | null | python/project/pandas_data.py | Jai-Doshi/Python-Project | 40a77ae1eb2c66444d94f40aef4dbda2bc8d957a | [
"MIT"
] | null | null | null | python/project/pandas_data.py | Jai-Doshi/Python-Project | 40a77ae1eb2c66444d94f40aef4dbda2bc8d957a | [
"MIT"
] | null | null | null | # IMPORTING LIBRARIES
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import warnings
# IMPORTING DATASET
df = pd.read_csv('..//assets/data.csv')
# FEATURES
# Basic
df.head()
df.tail()
df.info()
df.describe()
df.columns
# Intermediate
df['name'].unique()
df['name'].value_counts()
df['age'].unique()
df['age'].value_counts()
df['location'].unique()
df['location'].value_counts()
df['category'].unique()
df['category'].value_counts()
df['subcategory'].unique()
df['subcategory'].value_counts()
df['year'].unique()
df['year'].value_counts()
# Advance
df[df['location'] == 'GUJARAT'].groupby('location')['category'].value_counts()
# Expert
df[(df['year'] == 2021) & (df['location'] == 'GUJARAT')].groupby(['year','location'])['category'].value_counts()
# Expert ++
col = []
d = {}
c = []
def exp():
for i in df:
col.append(i)
for k in col:
d.setdefault(k)
length = len(i)
val = []
for j in df[i]:
val.append(j)
d[k] = val
return func()
def func():
for i in range(len(d)):
for j in range(len(d[col[i]])):
c.append(df[(df[col[i]] == d[col[i]][j]) & (df[col[i-1]] == d[col[i-1]][j])].groupby([col[i],col[i-1]])[col[i-2]].value_counts())
return c
print(exp())
# Particular User
def user(user_name):
print(df[df['name'] == un.capitalize()].groupby('category')['subcategory'].describe())
print('\n \n')
print(df[df['name'] == un.capitalize()].groupby('subcategory')['category'].describe())
print('\n \n')
print(df[df['name'] == un.capitalize()].value_counts())
un = input('Enter : ')
user(un)
| 18.773196 | 142 | 0.555739 |
7945f8d06af88b7c18f1120fbabb335540045599 | 7,220 | py | Python | tcapy_examples/gen/non_db_tca_example.py | Ahrvo-Trading-Systems/tcapy | df8439aa5c754fc9a7fde463c44c489b27112f76 | [
"Apache-2.0"
] | 189 | 2020-03-20T17:03:04.000Z | 2022-03-30T13:33:27.000Z | tcapy_examples/gen/non_db_tca_example.py | Ahrvo-Trading-Systems/tcapy | df8439aa5c754fc9a7fde463c44c489b27112f76 | [
"Apache-2.0"
] | 4 | 2020-06-06T14:58:21.000Z | 2022-03-10T22:31:15.000Z | tcapy_examples/gen/non_db_tca_example.py | Ahrvo-Trading-Systems/tcapy | df8439aa5c754fc9a7fde463c44c489b27112f76 | [
"Apache-2.0"
] | 60 | 2020-03-20T17:06:56.000Z | 2022-03-26T02:48:58.000Z | from __future__ import division, print_function
__author__ = 'saeedamen' # Saeed Amen / [email protected]
#
# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
import os
from collections import OrderedDict
import time
from tcapy.util.mediator import Mediator
from tcapy.conf.constants import Constants
constants = Constants()
folder = constants.test_data_harness_folder
volatile_cache = Mediator.get_volatile_cache()
def tca_example_csv_trade_data_dukascopy():
"""Loads up trade/order data from CSV files and market data externally from Dukascopy. Does not use any databases, if
you rarely use TCA, this is fine. However, for heavy use of TCA, we strongly recommend maintaining an internal tick
database, as external downloading of data can be very slow.
In this case we are simply calculating the slippage of every trade and orders above them.
"""
from tcapy.analysis.tcaengine import TCAEngineImpl
from tcapy.analysis.tcarequest import TCARequest
from tcapy.analysis.algos.benchmark import BenchmarkArrival, BenchmarkMarketSpreadToMid
from tcapy.analysis.algos.metric import MetricSlippage
from tcapy.analysis.algos.resultsform import TimelineResultsForm
tca_version = constants.tcapy_version
tca_engine = TCAEngineImpl(version=tca_version)
# The test trade/order data is populated between 25 Apr 2017-05 Jun 2017
# with trades/orders for 'EURUSD', 'USDJPY' and 'EURJPY'
csv_trade_order_mapping = OrderedDict([('trade_df', os.path.join(folder, 'small_test_trade_df.csv')),
('order_df', os.path.join(folder, 'small_test_order_df.csv'))])
# Specify the TCA request (note: by specifiying use_multithreading is False, we avoid dependencies like Celery
# Depending on how the caching is setup, tcapy may try to download market data in monthly/weekly chunks and cache them,
# To force deletion of the cache you can run the below
# volatile_cache.clear_cache()
# However if you run TCA for the same period, it will load the market data from Redis/in-memory, rather than
# downloading it externally from Dukascopy
tca_request = TCARequest(start_date='05 May 2017', finish_date='10 May 2017', ticker=['EURUSD'],
tca_type='detailed',
trade_data_store='csv', market_data_store='dukascopy',
trade_order_mapping=csv_trade_order_mapping,
metric_calcs=[MetricSlippage()],
results_form=[TimelineResultsForm(metric_name='slippage', by_date='datehour', scalar=10000.0)],
benchmark_calcs=[BenchmarkArrival(), BenchmarkMarketSpreadToMid()],
use_multithreading=False)
# Dictionary of dataframes as output from TCA calculation
dict_of_df = tca_engine.calculate_tca(tca_request)
print(dict_of_df.keys())
def tca_example_csv_trade_data_dukascopy_no_redis():
"""Running TCA calculation but without any Redis caching at all. In practice, this should be avoided, since it will
likely be much slower, given we'll end up accessing market data/trade data a lot more often from a slow source.
This is particularly an issue when we're downloading large samples of market data from an external source. For very small
time periods this might be fine.
"""
from tcapy.analysis.tcaengine import TCAEngineImpl
from tcapy.analysis.tcarequest import TCARequest
from tcapy.analysis.algos.benchmark import BenchmarkArrival, BenchmarkMarketSpreadToMid
from tcapy.analysis.algos.metric import MetricSlippage
from tcapy.analysis.algos.resultsform import TimelineResultsForm
tca_version = constants.tcapy_version
tca_engine = TCAEngineImpl(version=tca_version)
# The test trade/order data is populated between 25 Apr 2017-05 Jun 2017
# with trades/orders for 'EURUSD', 'USDJPY' and 'EURJPY'
csv_trade_order_mapping = OrderedDict([('trade_df', os.path.join(folder, 'small_test_trade_df.csv')),
('order_df', os.path.join(folder, 'small_test_order_df.csv'))])
# Specify the TCA request (note: by specifiying use_multithreading is False, we avoid dependencies like Celery
# Depending on how the caching is setup, tcapy may try to download market data in monthly/weekly chunks and cache them,
# To force deletion of the cache you can run the below
# volatile_cache.clear_cache()
# However if you run TCA for the same period, it will load the market data from Redis/in-memory, rather than
# downloading it externally from Dukascopy
tca_request = TCARequest(start_date='05 May 2017', finish_date='06 May 2017', ticker=['EURUSD'],
tca_type='detailed',
trade_data_store='csv', market_data_store='dukascopy',
trade_order_mapping=csv_trade_order_mapping,
metric_calcs=[MetricSlippage()],
results_form=[
TimelineResultsForm(metric_name='slippage', by_date='datehour', scalar=10000.0)],
benchmark_calcs=[BenchmarkArrival(), BenchmarkMarketSpreadToMid()],
use_multithreading=False)
tca_request.multithreading_params = {'splice_request_by_dates': False, # True or False
'cache_period': 'month', # month or week
# Cache trade data in monthly/periodic chunks in Redis (reduces database calls a lot)
'cache_period_trade_data': False,
# Cache market data in monthly/periodic chunks in Redis (reduces database calls a lot)
'cache_period_market_data': False,
# Return trade data internally as handles (usually necessary for Celery)
'return_cache_handles_trade_data': False,
# Return market data internally as handles (usually necessary for Celery)
'return_cache_handles_market_data': False,
# Recommend using Celery, which allows us to reuse Python processes
'parallel_library': 'single'
}
# Dictionary of dataframes as output from TCA calculation
dict_of_df = tca_engine.calculate_tca(tca_request)
print(dict_of_df.keys())
market_df = dict_of_df['market_df']
market_df_minute = market_df.resample('1min').last()
print(market_df_minute)
if __name__ == '__main__':
start = time.time()
# tca_example_csv_trade_data_dukascopy()
tca_example_csv_trade_data_dukascopy_no_redis()
finish = time.time()
print('Status: calculated ' + str(round(finish - start, 3)) + "s")
| 47.5 | 127 | 0.662188 |
7945fa9b1fa77ddbcf7c4474b9d72452d036364c | 4,536 | py | Python | CONFIG/FollowAPP/API/views.py | Brktrlw/Instagram-Clone-Django-and-React | 6390db2133d3beae2097a680097e170bd4fbcabe | [
"MIT",
"PostgreSQL",
"Unlicense"
] | null | null | null | CONFIG/FollowAPP/API/views.py | Brktrlw/Instagram-Clone-Django-and-React | 6390db2133d3beae2097a680097e170bd4fbcabe | [
"MIT",
"PostgreSQL",
"Unlicense"
] | null | null | null | CONFIG/FollowAPP/API/views.py | Brktrlw/Instagram-Clone-Django-and-React | 6390db2133d3beae2097a680097e170bd4fbcabe | [
"MIT",
"PostgreSQL",
"Unlicense"
] | null | null | null | from rest_framework.generics import CreateAPIView,DestroyAPIView
from .serializers import SerializerFollow,SerializerCreateRequest,SerializerDENEME
from UserAPP.models import ModelFollower,ModelUser
from django.shortcuts import get_object_or_404
from NotificationAPP.models import ModelNotification,ModelRequest
class UnfollowerUserAPIView(DestroyAPIView):
#Takipten çık
lookup_field = "follower__username"
serializer_class = SerializerFollow
queryset = ModelFollower.objects.all()
def get_object(self):
follower = get_object_or_404(ModelUser,username=self.kwargs.get("follower__username"))
following=self.request.user
return get_object_or_404(ModelFollower,follower=follower,following=following)
def perform_destroy(self, instance):
ModelNotification.objects.filter(receiver_user=instance.follower,sender_user=self.request.user,post=None).delete()
instance.delete()
class FollowUserAPIView(CreateAPIView):
# Herkese açık hesabı takip et
serializer_class = SerializerFollow
queryset = ModelFollower.objects.all()
def perform_create(self, serializer):
# Takip ederken karşı tarafa bildirim de gönderiyoruz
receiver_user = get_object_or_404(ModelUser,username=serializer.validated_data["follower"].get("username"))
sender_user = self.request.user
ModelNotification.objects.create(receiver_user=receiver_user,sender_user=sender_user,notificationType=2)
serializer.save(follower=receiver_user,following=sender_user) # Takipçilere ekler
class CreateRequestFollowAPIView(CreateAPIView):
# gizli hesaba istek atma
serializer_class = SerializerCreateRequest
queryset = ModelRequest.objects.all()
def perform_create(self, serializer):
receiver_user = get_object_or_404(ModelUser,username=serializer.validated_data["receiver_user"].get("username"))
sender_user = self.request.user
ModelNotification.objects.create(receiver_user=receiver_user, sender_user=sender_user, notificationType=3)
serializer.save(receiver_user=receiver_user, sender_user=sender_user) # Takipçilere ekler
class UnRequestFollowAPIView(DestroyAPIView):
# Takip isteğini sil
lookup_field = "follower__username"
serializer_class = SerializerCreateRequest
queryset = ModelRequest.objects.all()
def get_object(self):
follower = get_object_or_404(ModelUser,username=self.kwargs.get("follower__username"))
following = self.request.user
return get_object_or_404(ModelRequest,receiver_user=follower,sender_user=following)
def perform_destroy(self, instance):
ModelNotification.objects.filter(receiver_user=instance.receiver_user,sender_user=self.request.user,post=None).delete()
instance.delete()
class AllFollowProccessAPIView(CreateAPIView):
serializer_class = SerializerDENEME
queryset = ModelFollower.objects.all()
def perform_create(self, serializer):
senderUser = self.request.user
receiverUser = get_object_or_404(ModelUser,username=serializer["receiver"].value)
followOBJ = ModelFollower.objects.filter(follower=receiverUser,following=senderUser)
if followOBJ.exists():
#takibi bırakma
followOBJ.delete()
ModelNotification.objects.filter(receiver_user=receiverUser, sender_user=senderUser, notificationType="2").delete()
else:
#takip etme
if receiverUser.private:
#hesap gizliyse
requestOBJ=ModelRequest.objects.filter(receiver_user=receiverUser,sender_user=senderUser)
if requestOBJ:
#istek zaten atmış ,isteği geri çekiyoruz
requestOBJ.delete()
ModelNotification.objects.filter(receiver_user=receiverUser,sender_user=senderUser,notificationType="3").delete()
else:
#istek atmamış istek oluşturuyoruz
ModelRequest.objects.create(receiver_user=receiverUser, sender_user=senderUser)
ModelNotification.objects.create(receiver_user=receiverUser,sender_user=senderUser,notificationType="3")
else:
#gizli hesap değilse direkt takip ediyoruz
ModelFollower.objects.create(follower=receiverUser,following=senderUser)
ModelNotification.objects.create(receiver_user=receiverUser,sender_user=senderUser,notificationType="2")
| 47.747368 | 133 | 0.731481 |
7945faadcf0a1202da759d7ad0994b0953636f2b | 657 | py | Python | capitalrelacional/views.py | saulm/firedeptmanagement | 06548bf872fc76ac214ec25cc536f34aa8145305 | [
"Unlicense"
] | 2 | 2019-09-24T19:12:04.000Z | 2019-09-28T19:07:57.000Z | capitalrelacional/views.py | saulm/firedeptmanagement | 06548bf872fc76ac214ec25cc536f34aa8145305 | [
"Unlicense"
] | 1 | 2020-08-16T02:34:28.000Z | 2021-03-16T14:15:47.000Z | capitalrelacional/views.py | saulm/firedeptmanagement | 06548bf872fc76ac214ec25cc536f34aa8145305 | [
"Unlicense"
] | 2 | 2017-01-18T21:10:18.000Z | 2020-03-12T20:25:08.000Z | #coding=utf-8
#from haystack.query import SearchQuerySet
from firedeptmanagement.capitalrelacional.models import RelationalCompany, RelationalPerson
from django.shortcuts import render_to_response
from firedeptmanagement.personal.models import Firefighter
from django.contrib.auth.decorators import login_required
from django.template.context import RequestContext
@login_required
def search_related(request):
query = request.GET.get('query', '')
data = {"Firefighter": Firefighter.search(query), "RelationalCompany":RelationalCompany.search(query), "query":query}
return render_to_response("directorio.html", RequestContext(request, data))
| 46.928571 | 121 | 0.820396 |
7945fc2666721373a9c47789603627d5d9969c43 | 3,752 | py | Python | back/education/migrations/0001_initial.py | yeezy-na-izi/.shSkill | 54608cd89ddc90377d190104115debc702d9aa1b | [
"Apache-2.0"
] | null | null | null | back/education/migrations/0001_initial.py | yeezy-na-izi/.shSkill | 54608cd89ddc90377d190104115debc702d9aa1b | [
"Apache-2.0"
] | null | null | null | back/education/migrations/0001_initial.py | yeezy-na-izi/.shSkill | 54608cd89ddc90377d190104115debc702d9aa1b | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-07 22:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Название')),
('description', models.TextField(verbose_name='Описание')),
('price', models.FloatField(verbose_name='Цена')),
],
options={
'verbose_name': 'Курс',
'verbose_name_plural': 'Курсы',
},
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Название')),
('description', models.TextField(verbose_name='Условие задачи')),
],
options={
'verbose_name': 'Задача',
'verbose_name_plural': 'Задачи',
},
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Название')),
('tasks', models.ManyToManyField(blank=True, to='education.Task', verbose_name='Задачи')),
],
options={
'verbose_name': 'Урок',
'verbose_name_plural': 'Уроки',
},
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chat', models.URLField(verbose_name='Ссылка на чат')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='education.course', verbose_name='Курс')),
('teacher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.teacher', verbose_name='Учитель')),
('users', models.ManyToManyField(to='user.Student', verbose_name='Ученики')),
],
options={
'verbose_name': 'Группа',
'verbose_name_plural': 'Группы',
},
),
migrations.CreateModel(
name='Date',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_time', models.DateTimeField(verbose_name='Время и дата')),
('description', models.TextField(verbose_name='Описание')),
('lesson_link', models.URLField(verbose_name='Ссылка на подключение')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='education.group', verbose_name='Группа')),
('lesson', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='education.lesson', verbose_name='Урок')),
],
options={
'verbose_name': 'Дата',
'verbose_name_plural': 'Даты',
},
),
migrations.AddField(
model_name='course',
name='lessons',
field=models.ManyToManyField(to='education.Lesson', verbose_name='Уроки'),
),
]
| 42.636364 | 135 | 0.554904 |
7945fd3cc9d522c6614ba19e5e1aa55ef3a04204 | 1,333 | py | Python | agenda.py | Mateus-Toni/workstation-backend | 209f32604eaf07261679ac95c4dc8dcab8380a90 | [
"MIT"
] | null | null | null | agenda.py | Mateus-Toni/workstation-backend | 209f32604eaf07261679ac95c4dc8dcab8380a90 | [
"MIT"
] | null | null | null | agenda.py | Mateus-Toni/workstation-backend | 209f32604eaf07261679ac95c4dc8dcab8380a90 | [
"MIT"
] | null | null | null | import dao as Bank
# agenda workstation
from datetime import datetime
from datetime import timedelta
# validando e salvando agendamentos
start_date = request.form['data_inicio'] # aaaa/mm/dd
start_hour = request.form["hora_inicio"] # hh:mm:ss
final_hour = request.form["hora_fim"] # hh:mm:ss
final_date = request.form["data_final"] # aaaa/mm/dd
id_user = request.form["id_user"]
id_box = request.form["id_box"]
if Bank.verify_scheduling(start_date, start_hour, final_hour, final_date, id_box):
Bank.save_scheduling(start_date, start_hour, final_hour,
final_date, id_user, id_box)
else:
print('Agendamento inválido')
# mandando todas datas usadas para o front
list_locations = Bank.show_all_scheduling()
if list_locations:
for dict_locations in list_locations:
start_date = dict_locations['datainicio']
final_date = dict_locations['datafim']
start_day = start_date.day
start_month = start_date.month
start_year = start_date.year
final_day = final_date.day
final_month = final_date.month
final_year = final_date.year
list_days = [days for days in range(start_day, final_day+1)]
list_dates = [f'{days}/{start_month}/{start_year}' for days in list_days]
print(list_dates)
| 32.512195 | 82 | 0.696174 |
7945fd4337c44e6aa40bb51bc63a9f47ea0b91b9 | 7,127 | py | Python | Demo/InversionHorizontalReflector2D_FrequencyDomain_VariableDensity_Parallel.py | zfang-slim/PysitForPython3 | dc60537b26018e28d92b7a956a2cf96775f0bdf9 | [
"BSD-3-Clause"
] | null | null | null | Demo/InversionHorizontalReflector2D_FrequencyDomain_VariableDensity_Parallel.py | zfang-slim/PysitForPython3 | dc60537b26018e28d92b7a956a2cf96775f0bdf9 | [
"BSD-3-Clause"
] | null | null | null | Demo/InversionHorizontalReflector2D_FrequencyDomain_VariableDensity_Parallel.py | zfang-slim/PysitForPython3 | dc60537b26018e28d92b7a956a2cf96775f0bdf9 | [
"BSD-3-Clause"
] | 1 | 2020-06-13T07:13:07.000Z | 2020-06-13T07:13:07.000Z | import time
import copy
import numpy as np
import matplotlib.pyplot as plt
import math
import os
from shutil import copy2
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpi4py import MPI
import sys
import scipy.io as sio
from pysit import *
from pysit.gallery import horizontal_reflector
from pysit.util.io import *
from pysit.util.compute_tools import *
from pysit.util.parallel import *
if __name__ == '__main__':
# Set up parallel computing environment
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
pwrap = ParallelWrapShot()
# Set up domain, mesh and velocity model
pmlx = PML(0.1, 1000)
pmlz = PML(0.1, 1000)
x_config = (0.0, 2.0, pmlx, pmlx)
z_config = (0.0, 1.0, pmlz, pmlz)
d = RectangularDomain(x_config, z_config)
nx = 201
nz = 101
m = CartesianMesh(d, nx, nz)
C, C0, m, d = horizontal_reflector(m)
rho0 = np.ones((nx, nz))
rho = rho0 + np.reshape(C-C0, (nx,nz))
rho0 = rho0.reshape((nx*nz,1))
rho = rho.reshape((nx*nz,1))
kappa0 = rho0 * C0**2.0
kappa = rho * C**2.0
model_param = {'kappa': kappa, 'rho': rho}
model_init = {'kappa' : kappa0, 'rho': rho0}
# Set up shots
zmin = d.z.lbound
zmax = d.z.rbound
zpos = zmin + (1./10.)*zmax
Nshots = 3
shots = equispaced_acquisition(m,
RickerWavelet(10.0),
sources=Nshots,
source_depth=zpos,
source_kwargs={},
receivers='max',
receiver_depth=zpos,
receiver_kwargs={}
)
shots_freq = copy.deepcopy(shots)
# Define and configure the wave solver
trange = (0.0,2.0)
# Define the time-domain wave-equation solver and generate the time-domain data
solver = VariableDensityHelmholtz(m,
spatial_accuracy_order=4,
trange=trange,
)
base_model = solver.ModelParameters(m,model_param)
frequencies = [2.0,3.0]
print('Generating data...')
tt = time.time()
generate_seismic_data(shots, solver, base_model, frequencies=frequencies)
print('Data generation: {0}s'.format(time.time()-tt))
# Check the result and plot the result
if rank == 0:
clim = rho.min(),rho.max()
plt.figure(figsize=(20,8))
plt.subplot(2,2,1)
vis.plot(rho0, m, clim=clim)
plt.title(r'Initial Model of $\rho$')
plt.colorbar()
plt.xlabel('X [km]')
plt.ylabel('Z [km]')
plt.subplot(2,2,2)
vis.plot(rho, m, clim=clim)
plt.title(r"True Model of $\rho$")
plt.colorbar()
plt.xlabel('X [km]')
plt.ylabel('Z [km]')
clim = kappa.min(),kappa.max()
plt.subplot(2,2,3)
vis.plot(kappa0, m, clim=clim)
plt.title(r'Initial Model of $\kappa$')
plt.colorbar()
plt.xlabel('X [km]')
plt.ylabel('Z [km]')
plt.subplot(2,2,4)
vis.plot(kappa, m, clim=clim)
plt.title(r"True Model of $\kappa$")
plt.colorbar()
plt.xlabel('X [km]')
plt.ylabel('Z [km]')
plt.show()
comm.Barrier()
data = shots[0].receivers.data_dft
xrec = np.linspace(0.0,2.0,201)
data1 = shots[0].receivers.data_dft[2.0]
data2 = shots[0].receivers.data_dft[3.0]
plt.figure(figsize=(12,12))
plt.subplot(2,2,1)
plt.plot(xrec, np.real(data1.flatten()))
plt.xlabel('Receivers [km]')
plt.title('Real part of data at f = 2.0Hz')
plt.subplot(2,2,2)
plt.plot(xrec, np.real(data2.flatten()))
plt.xlabel('Receivers [km]')
plt.title('Real part of data at f = 3.0Hz')
plt.subplot(2,2,3)
plt.plot(xrec, np.imag(data1.flatten()))
plt.xlabel('Receivers [km]')
plt.title('Imaginary part of data at f = 2.0Hz')
plt.subplot(2,2,4)
plt.plot(xrec, np.imag(data2.flatten()))
plt.xlabel('Receivers [km]')
plt.title('Imaginary part of data at f = 3.0Hz')
plt.show()
# Set up the inversion
objective = FrequencyLeastSquares(solver)
invalg = PQN(objective, proj_op=None, memory_length=10)
nsteps = 5
loop_configuration = [(nsteps, {'frequencies': [2.0]}), (nsteps, {'frequencies': [3.0]})]
status_configuration = {'value_frequency' : 1,
'residual_frequency' : 1,
'residual_length_frequency' : 1,
'objective_frequency' : 1,
'step_frequency' : 1,
'step_length_frequency' : 1,
'gradient_frequency' : 1,
'gradient_length_frequency' : 1,
'run_time_frequency' : 1,
'alpha_frequency' : 1,
}
initial_value = solver.ModelParameters(m,model_init)
line_search = 'backtrack'
result = invalg(shots, initial_value, loop_configuration,
line_search=line_search,
status_configuration=status_configuration, verbose=True)
# Check result
if rank == 0:
obj_vals = np.array([v for k,v in list(invalg.objective_history.items())])
plt.figure()
plt.semilogy(obj_vals)
plt.xlabel('Iteration')
plt.ylabel('Objective value')
plt.show()
clim = C.min(),C.max()
# Do something to visualize the results
plt.figure(figsize=(20,16))
clim = rho.min(),rho.max()
plt.subplot(3,2,1)
vis.plot(rho0, m, clim=clim)
plt.title(r'Initial Model of $\rho$')
plt.xlabel('X [km]')
plt.ylabel('Z [km]')
plt.colorbar()
plt.subplot(3,2,3)
vis.plot(rho, m, clim=clim)
plt.title(r'True Model of $\rho$')
plt.xlabel('X [km]')
plt.ylabel('Z [km]')
plt.colorbar()
plt.subplot(3,2,5)
vis.plot(result.rho, m, clim=clim)
plt.title(r'Reconstruction or $\rho$')
plt.xlabel('X [km]')
plt.ylabel('Z [km]')
plt.colorbar()
clim = kappa.min(),kappa.max()
plt.subplot(3,2,2)
vis.plot(kappa0, m, clim=clim)
plt.title(r'Initial Model of $\kappa$')
plt.xlabel('X [km]')
plt.ylabel('Z [km]')
plt.colorbar()
plt.subplot(3,2,4)
vis.plot(kappa, m, clim=clim)
plt.title(r'True Model of $\kappa$')
plt.xlabel('X [km]')
plt.ylabel('Z [km]')
plt.colorbar()
plt.subplot(3,2,6)
vis.plot(result.kappa, m, clim=clim)
plt.title(r'Reconstruction or $\kappa$')
plt.xlabel('X [km]')
plt.ylabel('Z [km]')
plt.colorbar()
plt.show()
comm.Barrier() | 29.572614 | 93 | 0.530237 |
7945fd721995bc73387fcfcc5eecbb1c42bdb564 | 3,515 | py | Python | maxent_irl.py | ufgtb24/IRL | 893377ac1f703be04af91e8923b4907045a1678c | [
"MIT"
] | null | null | null | maxent_irl.py | ufgtb24/IRL | 893377ac1f703be04af91e8923b4907045a1678c | [
"MIT"
] | null | null | null | maxent_irl.py | ufgtb24/IRL | 893377ac1f703be04af91e8923b4907045a1678c | [
"MIT"
] | null | null | null | '''
Implementation of maximum entropy inverse reinforcement learning in
Ziebart et al. 2008 paper: Maximum Entropy Inverse Reinforcement Learning
https://www.aaai.org/Papers/AAAI/2008/AAAI08-227.pdf
Acknowledgement:
This implementation is largely influenced by Matthew Alger's maxent implementation here:
https://github.com/MatthewJA/Inverse-Reinforcement-Learning/blob/master/irl/maxent.py
By Yiren Lu ([email protected]), May 2017
'''
import numpy as np
import mdp.gridworld as gridworld
import mdp.value_iteration as value_iteration
import img_utils
from utils import *
def compute_state_visition_freq(P_a, gamma, trajs, policy, deterministic=True):
"""compute the expected states visition frequency p(s| theta, T)
using dynamic programming
inputs:
P_a NxNxN_ACTIONS matrix - transition dynamics
gamma float - discount factor
trajs list of list of Steps - collected from expert
policy Nx1 vector (or NxN_ACTIONS if deterministic=False) - policy
returns:
p Nx1 vector - state visitation frequencies
"""
N_STATES, _, N_ACTIONS = np.shape(P_a)
T = len(trajs[0])
# mu[s, t] is the prob of visiting state s at time t
mu = np.zeros([N_STATES, T])
for traj in trajs:
mu[traj[0].cur_state, 0] += 1
mu[:,0] = mu[:,0]/len(trajs)
# 一次compute_state_visition_freq函数中,最后一个 s 最准确,因为是基于之前的 N_STATES-1个 s。但是
# 训练是需要多次调用 compute_state_visition_freq,所以可以抵消这种偏差
for s in range(N_STATES):
for t in range(T-1):
if deterministic:
mu[s, t+1] = sum([mu[pre_s, t]*P_a[pre_s, s, int(policy[pre_s])] for pre_s in range(N_STATES)])
else:
mu[s, t+1] = sum([sum([mu[pre_s, t]*P_a[pre_s, s, a1]*policy[pre_s, a1] for a1 in range(N_ACTIONS)]) for pre_s in range(N_STATES)])
p = np.sum(mu, 1)
return p
def maxent_irl(feat_map, P_a, gamma, trajs, lr, n_iters):
"""
Maximum Entropy Inverse Reinforcement Learning (Maxent IRL)
使用了两次 MDP ,作为内循环,分别是 policy 和 state_freq
inputs:
feat_map NxD matrix - the features for each state
P_a NxNxN_ACTIONS matrix - P_a[s0, s1, a] is the transition prob of
landing at state s1 when taking action
a at state s0
gamma float - RL discount factor
trajs a list of demonstrations
lr float - learning rate
n_iters int - number of optimization steps
returns
rewards Nx1 vector - recoverred state rewards
"""
N_STATES, _, N_ACTIONS = np.shape(P_a)
# init parameters
theta = np.random.uniform(size=(feat_map.shape[1],))
# calc feature expectations
feat_exp = np.zeros([feat_map.shape[1]])
for episode in trajs:
for step in episode:
feat_exp += feat_map[step.cur_state,:]
feat_exp = feat_exp/len(trajs)
# training
for iteration in range(n_iters):
if iteration % (n_iters/20) == 0:
print ('iteration: {}/{}'.format(iteration, n_iters))
# compute reward function
rewards = np.dot(feat_map, theta)
# compute policy
_, policy = value_iteration.value_iteration(P_a, rewards, gamma, error=0.01, deterministic=False)
# compute state visition frequences
svf = compute_state_visition_freq(P_a, gamma, trajs, policy, deterministic=False)
# compute gradients
grad = feat_exp - feat_map.T.dot(svf)
# update params
theta += lr * grad
rewards = np.dot(feat_map, theta)
# return sigmoid(normalize(rewards))
return normalize(rewards)
| 31.383929 | 139 | 0.678236 |
7945fdbdd393c872859ae1613f19feca9b11ca26 | 839 | py | Python | JSGridDjangoSample/urls.py | cuauhtemoc-amdg/jsgrid-django | b0cf13ebc32d74be53e21090920f870e85418b26 | [
"MIT"
] | null | null | null | JSGridDjangoSample/urls.py | cuauhtemoc-amdg/jsgrid-django | b0cf13ebc32d74be53e21090920f870e85418b26 | [
"MIT"
] | null | null | null | JSGridDjangoSample/urls.py | cuauhtemoc-amdg/jsgrid-django | b0cf13ebc32d74be53e21090920f870e85418b26 | [
"MIT"
] | null | null | null | """JSGridDjangoSample URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('clients/', include('clients.urls')),
]
| 34.958333 | 77 | 0.712753 |
7945fe5fbcfb072b0327288ea0464e6eafc059fb | 982 | py | Python | handlers/travel_result.py | icoxfog417/enigma_travel | 0b4ab4f2ea0dfbc4110a5f9e97c36245c2cb3ed9 | [
"MIT"
] | 1 | 2015-10-09T01:26:23.000Z | 2015-10-09T01:26:23.000Z | handlers/travel_result.py | icoxfog417/enigma_travel | 0b4ab4f2ea0dfbc4110a5f9e97c36245c2cb3ed9 | [
"MIT"
] | null | null | null | handlers/travel_result.py | icoxfog417/enigma_travel | 0b4ab4f2ea0dfbc4110a5f9e97c36245c2cb3ed9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import tornado.web
from models.travel_model import Travel
from service.ubic_api import UBICApi
class TravelResultHandler(tornado.web.RequestHandler):
"""
グループに対し、学習した結果の旅行プランを提示する。
"""
def get(self, group_id):
budget = int(self.get_query_argument("budget", "10000"))
filter = {
"budget": budget
}
result = self.__decide_travel(group_id, filter)
self.render("travel_result.html", result=result)
def __decide_travel(self, group_id, filter):
"""
学習結果を基に、旅行プランを提案する
:return:
"""
api = UBICApi()
# 学習が済んでいるかをチェック
is_trained = False
if not is_trained:
train_data = [] #DBに保管しておいた学習データを取得
api.train(group_id, train_data)
candidates = api.predict(group_id)
# filterとして渡された条件で絞り込み
filtered = candidates
# 上位一件を取得
result = filtered[0]
return result
| 22.837209 | 64 | 0.59776 |
7945fe7760276ca0200b1f313584835122927696 | 96 | py | Python | apps/app/main.py | acronhub/sls-python-fastapi | a997b83ac8b28412b7932993f1f70fa03f9e28f2 | [
"MIT"
] | null | null | null | apps/app/main.py | acronhub/sls-python-fastapi | a997b83ac8b28412b7932993f1f70fa03f9e28f2 | [
"MIT"
] | null | null | null | apps/app/main.py | acronhub/sls-python-fastapi | a997b83ac8b28412b7932993f1f70fa03f9e28f2 | [
"MIT"
] | null | null | null | from init import create_app
from mangum import Mangum
app = create_app()
handler = Mangum(app)
| 16 | 27 | 0.78125 |
7945ffcebeab90c6e1ab98d7e9302c7c91202352 | 4,935 | py | Python | src/system/sfc.py | ethanejohnsons/Ignition | 2d06c5ac896ba2ff673aa8a522978ffe40eb0471 | [
"MIT"
] | null | null | null | src/system/sfc.py | ethanejohnsons/Ignition | 2d06c5ac896ba2ff673aa8a522978ffe40eb0471 | [
"MIT"
] | null | null | null | src/system/sfc.py | ethanejohnsons/Ignition | 2d06c5ac896ba2ff673aa8a522978ffe40eb0471 | [
"MIT"
] | null | null | null | # Copyright (C) 2018-2021
# Author: Cesar Roman
# Contact: [email protected]
"""SFC Functions.
The following functions give you access to interact with the SFCs in the
Gateway.
"""
from __future__ import print_function
__all__ = [
"cancelChart",
"getRunningCharts",
"getVariables",
"pauseChart",
"redundantCheckpoint",
"resumeChart",
"setVariable",
"setVariables",
"startChart",
]
from system.dataset import Dataset
class PyChartScope(object):
"""This class represents any "scope" in the SFC system, and is
fundamentally just an observable dictionary.
Despite its name, it is not limited to chart scope. This class
notifies listeners when values are changed, and wraps any
dictionaries assigned to it as PyChartScopes as well.
"""
pass
def cancelChart(instanceId):
"""Cancels the execution of a running chart instance.
Any running steps will be told to stop, and the chart will enter
Canceling state.
Args:
instanceId (str): The ID of the chart instance to cancel.
"""
print(instanceId)
def getRunningCharts(charPath=None):
"""Retrieves information about running charts.
Can search all running charts, or be filtered charts at a specific
path. This function will return charts that are in a Paused state.
Args:
charPath (str): The path to a chart to filter on: i.e.,
"folder/chartName". If specified, only charts at the path
will be included in the returned dataset. If omitted, the
function will return data for all active charts.
Returns:
Dataset: A dataset with information on the active chart.
"""
print(charPath)
return Dataset()
def getVariables(instanceId):
"""Get the variables in a chart instance's scope.
Commonly used to check the value of a Chart Parameter, or determine
how long the chart has been running for.
Args:
instanceId (str): The instance identifier of the chart.
Returns:
PyChartScope: A python dictionary of variables. Step scopes for
active steps are found under the "activeSteps" key.
"""
print(instanceId)
return PyChartScope()
def pauseChart(instanceId):
"""Pauses a running chart instance.
Any running steps will be told to pause, and the chart will enter
Pausing state.
Args:
instanceId (str): The ID of the chart instance to pause.
"""
print(instanceId)
def redundantCheckpoint(instanceId):
"""Synchronizes chart and step variables of the specified chart
instance across a redundant cluster, allowing the chart instance to
continue where it left off if a redundant failover occurs.
Args:
instanceId (str): The instance identifier of the chart.
"""
print(instanceId)
def resumeChart(instanceId):
"""Resumes a chart that was paused.
Steps which were previously paused will be resumed, and chart will
enter Resuming state.
Args:
instanceId (str): The ID of the chart instance to resume.
Raises:
KeyError: If the ID does not match any running chart instance.
"""
if not instanceId:
raise KeyError("Invalid UUID string: {}".format(instanceId))
def setVariable(instanceId, stepId, variableName, variableValue):
"""Sets a variable inside a currently running chart.
Args:
instanceId (str): The instance identifier of the chart.
stepId (str): The id for a step inside of a chart. If omitted
the function will target a chart scoped variable.
variableName (str): The name of the variable to set.
variableValue (object): The value for the variable to be set to.
"""
print(instanceId, stepId, variableName, variableValue)
def setVariables(instanceId, stepId, variableMap):
"""Sets any number of variables inside a currently running chart.
Args:
instanceId (str): The instance identifier of the chart.
stepId (str): The id for a step inside of a chart. If omitted
the function will target a chart scoped variable.
variableMap (dict): A dictionary containing the name:value pairs
of the variables to set.
"""
print(instanceId, stepId, variableMap)
def startChart(path, chartPath, arguments):
"""Starts a new instance of a chart.
The chart must be set to "Callable" execution mode.
Args:
path (str): The path to the chart, for example:
"ChartFolder/ChartName".
chartPath (str): The path to the chart, for example
"ChartFolder/ChartName"
arguments (dict): A dictionary of arguments. Each key-value pair
in the dictionary becomes a variable in the chart scope and
will override any default.
Returns:
str: The unique ID of this chart.
"""
print(path, chartPath, arguments)
return "UUID"
| 28.69186 | 72 | 0.678825 |
7946007bedaf13d4058fd67635c89032d0c7199b | 1,732 | py | Python | elastalert/alerters/sns.py | perceptron01/elastalert2 | bb91ecdb03dedda207237ca83d628fd5d40d29c6 | [
"Apache-2.0"
] | 250 | 2021-04-24T18:06:30.000Z | 2022-03-31T04:37:47.000Z | elastalert/alerters/sns.py | perceptron01/elastalert2 | bb91ecdb03dedda207237ca83d628fd5d40d29c6 | [
"Apache-2.0"
] | 129 | 2021-04-24T17:09:50.000Z | 2022-03-29T08:52:14.000Z | elastalert/alerters/sns.py | perceptron01/elastalert2 | bb91ecdb03dedda207237ca83d628fd5d40d29c6 | [
"Apache-2.0"
] | 128 | 2021-04-25T15:20:34.000Z | 2022-03-31T04:37:49.000Z | import boto3
from elastalert.alerts import Alerter
from elastalert.util import elastalert_logger, EAException
class SnsAlerter(Alerter):
""" Send alert using AWS SNS service """
required_options = frozenset(['sns_topic_arn'])
def __init__(self, *args):
super(SnsAlerter, self).__init__(*args)
self.sns_topic_arn = self.rule.get('sns_topic_arn', None)
self.sns_aws_access_key_id = self.rule.get('sns_aws_access_key_id')
self.sns_aws_secret_access_key = self.rule.get('sns_aws_secret_access_key')
self.sns_aws_region = self.rule.get('sns_aws_region', 'us-east-1')
self.profile = self.rule.get('sns_aws_profile', None)
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
return subject
def alert(self, matches):
body = self.create_alert_body(matches)
try:
if self.profile is None:
session = boto3.Session(
aws_access_key_id=self.sns_aws_access_key_id,
aws_secret_access_key=self.sns_aws_access_key_id,
region_name=self.sns_aws_region
)
else:
session = boto3.Session(profile_name=self.profile)
sns_client = session.client('sns')
sns_client.publish(
TopicArn=self.sns_topic_arn,
Message=body,
Subject=self.create_title(matches)
)
except Exception as e:
raise EAException("Error sending Amazon SNS: %s" % e)
elastalert_logger.info("Sent Amazon SNS notification to %s" % (self.sns_topic_arn))
def get_info(self):
return {'type': 'sns'}
| 36.083333 | 91 | 0.625289 |
7946012c98d27b6de6d23476b1b447e633831fa0 | 348 | py | Python | tests/test_storc.py | cdbethune/d3m-primitives | 5530da1b8efba7de8cec6890401c5d4091acd45a | [
"MIT"
] | null | null | null | tests/test_storc.py | cdbethune/d3m-primitives | 5530da1b8efba7de8cec6890401c5d4091acd45a | [
"MIT"
] | null | null | null | tests/test_storc.py | cdbethune/d3m-primitives | 5530da1b8efba7de8cec6890401c5d4091acd45a | [
"MIT"
] | null | null | null | from kf_d3m_primitives.clustering.k_means.storc_pipeline import StorcPipeline
def _test_fit_produce(dataset):
pipeline = StorcPipeline()
pipeline.write_pipeline()
pipeline.fit_produce(dataset)
pipeline.delete_pipeline()
def test_fit_produce_dataset_chlorine():
_test_fit_produce('66_chlorineConcentration_MIN_METADATA')
| 26.769231 | 77 | 0.804598 |
794601e9665c61f862ce6856c2f2fbfdc14db675 | 422 | py | Python | tests/functional/s3/test_fileformat.py | datavaluepeople/tentaclio | eb6920a0e115c6c08043063a8c1013d812ec34c8 | [
"MIT"
] | 12 | 2019-04-30T16:07:42.000Z | 2021-12-08T08:02:09.000Z | tests/functional/s3/test_fileformat.py | octoenergy/tentaclio | eb6920a0e115c6c08043063a8c1013d812ec34c8 | [
"MIT"
] | 74 | 2019-04-25T11:18:22.000Z | 2022-01-18T11:31:14.000Z | tests/functional/s3/test_fileformat.py | datavaluepeople/tentaclio | eb6920a0e115c6c08043063a8c1013d812ec34c8 | [
"MIT"
] | 4 | 2019-05-05T13:13:21.000Z | 2022-01-14T00:33:07.000Z | import pickle
import tentaclio
def test_pickle(fixture_client):
expected = """
This is a highly convoluted test,
with multiple output...
encountered.
"""
with tentaclio.open("s3://hostname/data.pickle", mode="wb") as f:
pickle.dump(expected, f)
with tentaclio.open("s3://hostname/data.pickle", mode="rb") as f:
retrieved = pickle.load(f)
assert expected == retrieved
| 20.095238 | 69 | 0.646919 |
79460435b15592e5297c2fa1437be47f0f19bc59 | 980 | py | Python | src/second_month/task_2_3.py | NareTorosyan/Python_Introduction_to_Data_Science | 5912ab8ddb147c85f3a798aa9a1ee01aa8a97c40 | [
"Apache-2.0"
] | null | null | null | src/second_month/task_2_3.py | NareTorosyan/Python_Introduction_to_Data_Science | 5912ab8ddb147c85f3a798aa9a1ee01aa8a97c40 | [
"Apache-2.0"
] | null | null | null | src/second_month/task_2_3.py | NareTorosyan/Python_Introduction_to_Data_Science | 5912ab8ddb147c85f3a798aa9a1ee01aa8a97c40 | [
"Apache-2.0"
] | null | null | null | import numpy as np
#1Write a program to find maximum and minimum values of multidimensional NumPY massive
def max_min(array):
return np.max(array), np.min(array)
#2Write a program to find maximum and minimum values of the second column of multidimensional NumPY massive
def max_min_by_2nd_column(array):
return np.max(array[:,2]),np.min(array[:,2])
#3Write a program to find the median of a multidimensional NumPY massive
arr = np.arange(1, 12, 2).reshape(2, 3)
def median(array):
return np.median(array)
#4Create one-dimentional and multidimentional NumPy massives and multiply with each other
def mul(array1,array2):
arr1_reshaped = array1.reshape((3, 2))
return np.dot(arr1_reshaped,array2)
def main():
arr = np.arange(1, 12, 2).reshape(2, 3)
arr1 = np.array([1, 2, 3, 4, 5, 6])
arr2 = np.array([[1, 2, 3], [8, 9, 10]])
print(max_min(arr))
print(max_min_by_2nd_column(arr))
print(median(arr))
print(mul(arr1,arr2))
main() | 30.625 | 107 | 0.704082 |
794604577aedacd9ff97518decdbc9f421ef7e0c | 204 | py | Python | Student.py | Waine2ge/example | 8d210b4d387025a193626961c4b2e018c988ba1d | [
"Apache-2.0"
] | null | null | null | Student.py | Waine2ge/example | 8d210b4d387025a193626961c4b2e018c988ba1d | [
"Apache-2.0"
] | null | null | null | Student.py | Waine2ge/example | 8d210b4d387025a193626961c4b2e018c988ba1d | [
"Apache-2.0"
] | null | null | null | class Student:
def __init__(self, name, major, gpa, is_on_probation):
self.name = name
self.major = major
self.gpa = gpa
self.is_on_probation = is_on_probation
| 25.5 | 59 | 0.607843 |
794606ff07ee44c194b4226bd3facc03c552150b | 28,862 | py | Python | lib-python/conftest.py | hollmmax/zig | d80baa5a5fcbc82b3e2294b398edc20a98737a52 | [
"MIT"
] | null | null | null | lib-python/conftest.py | hollmmax/zig | d80baa5a5fcbc82b3e2294b398edc20a98737a52 | [
"MIT"
] | null | null | null | lib-python/conftest.py | hollmmax/zig | d80baa5a5fcbc82b3e2294b398edc20a98737a52 | [
"MIT"
] | 1 | 2022-03-30T11:42:37.000Z | 2022-03-30T11:42:37.000Z | """
test configuration(s) for running CPython's regression
test suite on top of PyPy
"""
import py
import pytest
import sys
import re
import pypy
from pypy.interpreter.gateway import ApplevelClass
from pypy.interpreter.error import OperationError
from pypy.interpreter.module import Module as PyPyModule
from pypy.interpreter.main import run_string, run_file
# the following adds command line options as a side effect!
from pypy.conftest import option as pypy_option
from pypy.tool.pytest import appsupport
from pypy.tool.pytest.confpath import pypydir, testdir, testresultdir
from rpython.config.parse import parse_info
pytest_plugins = "resultlog",
rsyncdirs = ['.', '../pypy/']
#
# Interfacing/Integrating with py.test's collection process
#
def pytest_addoption(parser):
group = parser.getgroup("complicance testing options")
group.addoption('-T', '--timeout', action="store", type="string",
default="1000", dest="timeout",
help="fail a test module after the given timeout. "
"specify in seconds or 'NUMmp' aka Mega-Pystones")
group.addoption('--pypy', action="store", type="string", dest="pypy",
help="use given pypy executable to run lib-python tests. "
"This will run the tests directly (i.e. not through py.py)")
group.addoption('--filter', action="store", type="string", default=None,
dest="unittest_filter", help="Similar to -k, XXX")
def gettimeout(timeout):
from rpython.translator.test import rpystone
if timeout.endswith('mp'):
megapystone = float(timeout[:-2])
t, stone = pystone.Proc0(10000)
pystonetime = t/stone
seconds = megapystone * 1000000 * pystonetime
return seconds
return float(timeout)
# ________________________________________________________________________
#
# classification of all tests files (this is ongoing work)
#
class RegrTest:
""" Regression Test Declaration."""
def __init__(self, basename, core=False, compiler=None, usemodules='',
skip=None):
self.basename = basename
self._usemodules = usemodules.split() + [
'_socket', 'binascii', 'time',
'select', 'signal', ]
if not sys.platform == 'win32':
self._usemodules.extend(['_posixsubprocess', 'fcntl'])
self._compiler = compiler
self.core = core
self.skip = skip
assert self.getfspath().check(), "%r not found!" % (basename,)
def usemodules(self):
return self._usemodules # + pypy_option.usemodules
usemodules = property(usemodules)
def compiler(self):
return self._compiler # or pypy_option.compiler
compiler = property(compiler)
def ismodified(self):
#XXX: ask hg
return None
def getfspath(self):
return testdir.join(self.basename)
def run_file(self, space):
fspath = self.getfspath()
assert fspath.check()
modname = fspath.purebasename
space.appexec([], '''():
from test import %(modname)s
m = %(modname)s
if hasattr(m, 'test_main'):
m.test_main()
''' % locals())
testmap = [
RegrTest('test___all__.py', core=True),
RegrTest('test___future__.py', core=True),
RegrTest('test__locale.py', usemodules='_locale'),
RegrTest('test__opcode.py'),
RegrTest('test__osx_support.py'),
RegrTest('test__xxsubinterpreters.py'),
RegrTest('test_abc.py'),
RegrTest('test_abstract_numbers.py'),
RegrTest('test_aifc.py'),
RegrTest('test_argparse.py', usemodules='binascii'),
RegrTest('test_array.py', core=True, usemodules='struct array binascii'),
RegrTest('test_asdl_parser.py'),
RegrTest('test_ast.py', core=True, usemodules='struct'),
RegrTest('test_asyncgen.py'),
RegrTest('test_asynchat.py', usemodules='select fcntl'),
RegrTest('test_asyncio'),
RegrTest('test_asyncore.py', usemodules='select fcntl'),
RegrTest('test_atexit.py', core=True),
RegrTest('test_audioop.py'),
RegrTest('test_audit.py'),
RegrTest('test_augassign.py', core=True),
RegrTest('test_base64.py', usemodules='struct'),
RegrTest('test_baseexception.py'),
RegrTest('test_bdb.py'),
RegrTest('test_bigaddrspace.py'),
RegrTest('test_bigmem.py'),
RegrTest('test_binascii.py', usemodules='binascii'),
RegrTest('test_binhex.py'),
RegrTest('test_binop.py', core=True),
RegrTest('test_bisect.py', core=True, usemodules='_bisect'),
RegrTest('test_bool.py', core=True),
RegrTest('test_buffer.py', core=True),
RegrTest('test_bufio.py', core=True),
RegrTest('test_builtin.py', core=True, usemodules='binascii'),
RegrTest('test_bytes.py', usemodules='struct binascii'),
RegrTest('test_bz2.py', usemodules='bz2'),
RegrTest('test_c_locale_coercion.py'),
RegrTest('test_calendar.py'),
RegrTest('test_call.py', core=True),
RegrTest('test_capi.py', usemodules='cpyext'),
RegrTest('test_cgi.py'),
RegrTest('test_cgitb.py'),
RegrTest('test_charmapcodec.py', core=True),
RegrTest('test_check_c_globals.py'),
RegrTest('test_class.py', core=True),
RegrTest('test_clinic.py'),
RegrTest('test_cmath.py', core=True),
RegrTest('test_cmd.py'),
RegrTest('test_cmd_line.py'),
RegrTest('test_cmd_line_script.py'),
RegrTest('test_code.py', core=True),
RegrTest('test_code_module.py'),
RegrTest('test_codeccallbacks.py', core=True),
RegrTest('test_codecencodings_cn.py', usemodules='_multibytecodec'),
RegrTest('test_codecencodings_hk.py', usemodules='_multibytecodec'),
RegrTest('test_codecencodings_iso2022.py', usemodules='_multibytecodec'),
RegrTest('test_codecencodings_jp.py', usemodules='_multibytecodec'),
RegrTest('test_codecencodings_kr.py', usemodules='_multibytecodec'),
RegrTest('test_codecencodings_tw.py', usemodules='_multibytecodec'),
RegrTest('test_codecmaps_cn.py', usemodules='_multibytecodec'),
RegrTest('test_codecmaps_hk.py', usemodules='_multibytecodec'),
RegrTest('test_codecmaps_jp.py', usemodules='_multibytecodec'),
RegrTest('test_codecmaps_kr.py', usemodules='_multibytecodec'),
RegrTest('test_codecmaps_tw.py', usemodules='_multibytecodec'),
RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec struct unicodedata array'),
RegrTest('test_codeop.py', core=True),
RegrTest('test_collections.py', usemodules='binascii struct'),
RegrTest('test_colorsys.py'),
RegrTest('test_compare.py', core=True),
RegrTest('test_compile.py', core=True),
RegrTest('test_compileall.py'),
RegrTest('test_complex.py', core=True),
RegrTest('test_concurrent_futures.py', skip="XXX: deadlocks" if sys.platform == 'win32' else False),
RegrTest('test_configparser.py'),
RegrTest('test_contains.py', core=True),
RegrTest('test_context.py'),
RegrTest('test_contextlib.py', usemodules="thread"),
RegrTest('test_contextlib_async.py'),
RegrTest('test_copy.py', core=True),
RegrTest('test_copyreg.py', core=True),
RegrTest('test_coroutines.py'),
RegrTest('test_cprofile.py'),
RegrTest('test_crashers.py'),
RegrTest('test_crypt.py'),
RegrTest('test_csv.py', usemodules='_csv'),
RegrTest('test_ctypes.py', usemodules="_rawffi thread cpyext"),
RegrTest('test_curses.py'),
RegrTest('test_dataclasses.py'),
RegrTest('test_datetime.py', usemodules='binascii struct'),
RegrTest('test_dbm.py'),
RegrTest('test_dbm_dumb.py'),
RegrTest('test_dbm_gnu.py'),
RegrTest('test_dbm_ndbm.py'),
RegrTest('test_decimal.py'),
RegrTest('test_decorators.py', core=True),
RegrTest('test_defaultdict.py', usemodules='_collections'),
RegrTest('test_deque.py', core=True, usemodules='_collections struct'),
RegrTest('test_descr.py', core=True, usemodules='_weakref'),
RegrTest('test_descrtut.py', core=True),
RegrTest('test_devpoll.py'),
RegrTest('test_dict.py', core=True),
RegrTest('test_dict_version.py', skip="implementation detail"),
RegrTest('test_dictcomps.py', core=True),
RegrTest('test_dictviews.py', core=True),
RegrTest('test_difflib.py'),
RegrTest('test_dis.py'),
RegrTest('test_distutils.py'),
RegrTest('test_doctest.py', usemodules="thread"),
RegrTest('test_doctest2.py'),
RegrTest('test_docxmlrpc.py'),
RegrTest('test_dtrace.py'),
RegrTest('test_dynamic.py'),
RegrTest('test_dynamicclassattribute.py'),
RegrTest('test_eintr.py'),
RegrTest('test_email'),
RegrTest('test_embed.py'),
RegrTest('test_ensurepip.py'),
RegrTest('test_enum.py'),
RegrTest('test_enumerate.py', core=True),
RegrTest('test_eof.py', core=True),
RegrTest('test_epoll.py'),
RegrTest('test_errno.py', usemodules="errno"),
RegrTest('test_exception_hierarchy.py'),
RegrTest('test_exception_variations.py'),
RegrTest('test_exceptions.py', core=True),
RegrTest('test_extcall.py', core=True),
RegrTest('test_faulthandler.py'),
RegrTest('test_fcntl.py', usemodules='fcntl'),
RegrTest('test_file.py', usemodules="posix", core=True),
RegrTest('test_file_eintr.py'),
RegrTest('test_filecmp.py', core=True),
RegrTest('test_fileinput.py', core=True),
RegrTest('test_fileio.py'),
RegrTest('test_finalization.py'),
RegrTest('test_float.py', core=True),
RegrTest('test_flufl.py'),
RegrTest('test_fnmatch.py', core=True),
RegrTest('test_fork1.py', usemodules="thread"),
RegrTest('test_format.py', core=True),
RegrTest('test_fractions.py'),
RegrTest('test_frame.py'),
RegrTest('test_frozen.py'),
RegrTest('test_fstring.py'),
RegrTest('test_ftplib.py'),
RegrTest('test_funcattrs.py', core=True),
RegrTest('test_functools.py'),
RegrTest('test_future.py', core=True),
RegrTest('test_future3.py', core=True),
RegrTest('test_future4.py', core=True),
RegrTest('test_future5.py', core=True),
RegrTest('test_gc.py', usemodules='_weakref', skip="implementation detail"),
RegrTest('test_gdb.py', skip="not applicable"),
RegrTest('test_generator_stop.py'),
RegrTest('test_generators.py', core=True, usemodules='thread _weakref'),
RegrTest('test_genericalias.py'),
RegrTest('test_genericclass.py'),
RegrTest('test_genericpath.py'),
RegrTest('test_genexps.py', core=True, usemodules='_weakref'),
RegrTest('test_getargs2.py', usemodules='binascii', skip=True),
RegrTest('test_getopt.py', core=True),
RegrTest('test_getpass.py'),
RegrTest('test_gettext.py'),
RegrTest('test_glob.py', core=True),
RegrTest('test_global.py', core=True),
RegrTest('test_grammar.py', core=True),
RegrTest('test_graphlib.py'),
RegrTest('test_grp.py'),
RegrTest('test_gzip.py', usemodules='zlib'),
RegrTest('test_hash.py', core=True),
RegrTest('test_hashlib.py', core=True),
RegrTest('test_heapq.py', core=True),
RegrTest('test_hmac.py'),
RegrTest('test_html.py'),
RegrTest('test_htmlparser.py'),
RegrTest('test_http_cookiejar.py'),
RegrTest('test_http_cookies.py'),
RegrTest('test_httplib.py'),
RegrTest('test_httpservers.py'),
RegrTest('test_idle.py'),
RegrTest('test_imaplib.py'),
RegrTest('test_imghdr.py'),
RegrTest('test_imp.py', core=True, usemodules='thread'),
RegrTest('test_import'),
RegrTest('test_importlib'),
RegrTest('test_index.py'),
RegrTest('test_inspect.py', usemodules="struct unicodedata"),
RegrTest('test_int.py', core=True),
RegrTest('test_int_literal.py', core=True),
RegrTest('test_io.py', core=True, usemodules='array binascii'),
RegrTest('test_ioctl.py'),
RegrTest('test_ipaddress.py'),
RegrTest('test_isinstance.py', core=True),
RegrTest('test_iter.py', core=True),
RegrTest('test_iterlen.py', core=True, usemodules="_collections itertools"),
RegrTest('test_itertools.py', core=True, usemodules="itertools struct"),
RegrTest('test_json'),
RegrTest('test_keyword.py'),
RegrTest('test_keywordonlyarg.py'),
RegrTest('test_kqueue.py'),
RegrTest('test_largefile.py'),
RegrTest('test_lib2to3.py'),
RegrTest('test_linecache.py'),
RegrTest('test_list.py', core=True),
RegrTest('test_listcomps.py', core=True),
RegrTest('test_lltrace.py'),
RegrTest('test_locale.py', usemodules="_locale"),
RegrTest('test_logging.py', usemodules='thread'),
RegrTest('test_long.py', core=True),
RegrTest('test_longexp.py', core=True),
RegrTest('test_lzma.py'),
RegrTest('test_macurl2path.py'),
RegrTest('test_mailbox.py'),
RegrTest('test_mailcap.py'),
RegrTest('test_marshal.py', core=True),
RegrTest('test_math.py', core=True, usemodules='math'),
RegrTest('test_memoryio.py'),
RegrTest('test_memoryview.py'),
RegrTest('test_metaclass.py', core=True),
RegrTest('test_mimetypes.py'),
RegrTest('test_minidom.py'),
RegrTest('test_mmap.py', usemodules="mmap"),
RegrTest('test_module.py', core=True),
RegrTest('test_modulefinder.py'),
RegrTest('test_msilib.py'),
RegrTest('test_multibytecodec.py', usemodules='_multibytecodec'),
RegrTest('test_multiprocessing_fork.py'),
RegrTest('test_multiprocessing_forkserver.py'),
RegrTest('test_multiprocessing_main_handling.py'),
RegrTest('test_multiprocessing_spawn.py'),
RegrTest('test_named_expressions.py'),
RegrTest('test_netrc.py'),
RegrTest('test_nis.py'),
RegrTest('test_nntplib.py'),
RegrTest('test_ntpath.py'),
RegrTest('test_numeric_tower.py'),
RegrTest('test_opcodes.py', core=True),
RegrTest('test_openpty.py'),
RegrTest('test_operator.py', core=True),
RegrTest('test_optparse.py'),
RegrTest('test_ordered_dict.py'),
RegrTest('test_os.py', core=True),
RegrTest('test_ossaudiodev.py'),
RegrTest('test_osx_env.py'),
RegrTest('test_parser.py', skip="slowly deprecating compiler"),
RegrTest('test_pathlib.py'),
RegrTest('test_pdb.py'),
RegrTest('test_peepholer.py'),
RegrTest('test_peg_parser.py'),
RegrTest('test_pickle.py', core=True),
RegrTest('test_picklebuffer.py'),
RegrTest('test_pickletools.py', core=False),
RegrTest('test_pipes.py'),
RegrTest('test_pkg.py', core=True),
RegrTest('test_pkgutil.py'),
RegrTest('test_platform.py'),
RegrTest('test_plistlib.py'),
RegrTest('test_poll.py'),
RegrTest('test_popen.py'),
RegrTest('test_poplib.py'),
RegrTest('test_positional_only_arg.py'),
RegrTest('test_posix.py', usemodules="_rawffi"),
RegrTest('test_posixpath.py'),
RegrTest('test_pow.py', core=True),
RegrTest('test_pprint.py', core=True),
RegrTest('test_print.py', core=True),
RegrTest('test_profile.py'),
RegrTest('test_property.py', core=True),
RegrTest('test_pstats.py'),
RegrTest('test_pty.py', usemodules='fcntl termios select'),
RegrTest('test_pulldom.py'),
RegrTest('test_pwd.py', usemodules="pwd"),
RegrTest('test_py_compile.py'),
RegrTest('test_pyclbr.py'),
RegrTest('test_pydoc.py'),
RegrTest('test_pyexpat.py'),
RegrTest('test_queue.py', usemodules='thread'),
RegrTest('test_quopri.py'),
RegrTest('test_raise.py', core=True),
RegrTest('test_random.py'),
RegrTest('test_range.py', core=True),
RegrTest('test_re.py', core=True),
RegrTest('test_readline.py'),
RegrTest('test_regrtest.py'),
RegrTest('test_repl.py'),
RegrTest('test_reprlib.py', core=True),
RegrTest('test_resource.py'),
RegrTest('test_richcmp.py', core=True),
RegrTest('test_rlcompleter.py'),
RegrTest('test_robotparser.py'),
RegrTest('test_runpy.py'),
RegrTest('test_sax.py'),
RegrTest('test_sched.py'),
RegrTest('test_scope.py', core=True),
RegrTest('test_script_helper.py'),
RegrTest('test_secrets.py'),
RegrTest('test_select.py'),
RegrTest('test_selectors.py'),
RegrTest('test_set.py', core=True),
RegrTest('test_setcomps.py', core=True),
RegrTest('test_shelve.py'),
RegrTest('test_shlex.py'),
RegrTest('test_shutil.py'),
RegrTest('test_signal.py'),
RegrTest('test_site.py', core=False),
RegrTest('test_slice.py', core=True),
RegrTest('test_smtpd.py'),
RegrTest('test_smtplib.py'),
RegrTest('test_smtpnet.py'),
RegrTest('test_sndhdr.py'),
RegrTest('test_socket.py', usemodules='thread _weakref'),
RegrTest('test_socketserver.py', usemodules='thread'),
RegrTest('test_sort.py', core=True),
RegrTest('test_source_encoding.py'),
RegrTest('test_spwd.py'),
RegrTest('test_sqlite.py', usemodules="thread _rawffi zlib"),
RegrTest('test_ssl.py', usemodules='_socket select'),
RegrTest('test_startfile.py'),
RegrTest('test_stat.py'),
RegrTest('test_statistics.py'),
RegrTest('test_strftime.py'),
RegrTest('test_string.py', core=True),
RegrTest('test_string_literals.py'),
RegrTest('test_stringprep.py'),
RegrTest('test_strptime.py'),
RegrTest('test_strtod.py'),
RegrTest('test_struct.py', usemodules='struct'),
RegrTest('test_structmembers.py', skip="CPython specific"),
RegrTest('test_structseq.py'),
RegrTest('test_subclassinit.py'),
RegrTest('test_subprocess.py', usemodules='signal'),
RegrTest('test_sunau.py'),
RegrTest('test_sundry.py'),
RegrTest('test_super.py', core=True),
RegrTest('test_support.py'),
RegrTest('test_symbol.py'),
RegrTest('test_symtable.py', skip="implementation detail"),
RegrTest('test_syntax.py', core=True),
RegrTest('test_sys.py', core=True, usemodules='struct'),
RegrTest('test_sys_setprofile.py', core=True),
RegrTest('test_sys_settrace.py', core=True),
RegrTest('test_sysconfig.py'),
RegrTest('test_sysconfig_pypy.py'),
RegrTest('test_syslog.py'),
RegrTest('test_tabnanny.py'),
RegrTest('test_tarfile.py'),
RegrTest('test_tcl.py'),
RegrTest('test_telnetlib.py'),
RegrTest('test_tempfile.py'),
RegrTest('test_textwrap.py'),
RegrTest('test_thread.py', usemodules="thread", core=True),
RegrTest('test_threadedtempfile.py', usemodules="thread", core=False),
RegrTest('test_threading.py', usemodules="thread", core=True),
RegrTest('test_threading_local.py', usemodules="thread", core=True),
RegrTest('test_threadsignals.py', usemodules="thread"),
RegrTest('test_time.py', core=True, usemodules="struct thread _rawffi"),
RegrTest('test_timeit.py'),
RegrTest('test_timeout.py'),
RegrTest('test_tix.py'),
RegrTest('test_tk.py'),
RegrTest('test_tokenize.py'),
RegrTest('test_tools', skip="CPython internal details"),
RegrTest('test_trace.py'),
RegrTest('test_traceback.py', core=True),
RegrTest('test_tracemalloc.py'),
RegrTest('test_ttk_guionly.py'),
RegrTest('test_ttk_textonly.py'),
RegrTest('test_tuple.py', core=True),
RegrTest('test_turtle.py'),
RegrTest('test_type_comments.py'),
RegrTest('test_typechecks.py'),
RegrTest('test_types.py', core=True),
RegrTest('test_typing.py'),
RegrTest('test_ucn.py'),
RegrTest('test_unary.py', core=True),
RegrTest('test_unicode.py', core=True),
RegrTest('test_unicode_file.py'),
RegrTest('test_unicode_file_functions.py'),
RegrTest('test_unicode_identifiers.py'),
RegrTest('test_unicodedata.py'),
RegrTest('test_unittest.py', core=True),
RegrTest('test_univnewlines.py'),
RegrTest('test_unpack.py', core=True),
RegrTest('test_unpack_ex.py', core=True),
RegrTest('test_unparse.py'),
RegrTest('test_urllib.py'),
RegrTest('test_urllib2.py'),
RegrTest('test_urllib2_localnet.py', usemodules="thread"),
RegrTest('test_urllib2net.py'),
RegrTest('test_urllib_response.py'),
RegrTest('test_urllibnet.py'),
RegrTest('test_urlparse.py'),
RegrTest('test_userdict.py', core=True),
RegrTest('test_userlist.py', core=True),
RegrTest('test_userstring.py', core=True),
RegrTest('test_utf8_mode.py'),
RegrTest('test_utf8source.py'),
RegrTest('test_uu.py'),
RegrTest('test_uuid.py'),
RegrTest('test_venv.py', usemodules="struct"),
RegrTest('test_wait3.py', usemodules="thread"),
RegrTest('test_wait4.py', usemodules="thread"),
RegrTest('test_warnings'),
RegrTest('test_wave.py'),
RegrTest('test_weakref.py', core=True, usemodules='_weakref'),
RegrTest('test_weakset.py'),
RegrTest('test_webbrowser.py'),
RegrTest('test_winconsoleio.py'),
RegrTest('test_winreg.py'),
RegrTest('test_winsound.py'),
RegrTest('test_with.py'),
RegrTest('test_wsgiref.py'),
RegrTest('test_xdrlib.py'),
RegrTest('test_xml_dom_minicompat.py'),
RegrTest('test_xml_etree.py'),
RegrTest('test_xml_etree_c.py'),
RegrTest('test_xmlrpc.py'),
RegrTest('test_xmlrpc_net.py'),
RegrTest('test_xxtestfuzz.py', skip="CPython internal details"),
RegrTest('test_yield_from.py'),
RegrTest('test_zipapp.py'),
RegrTest('test_zipfile.py'),
RegrTest('test_zipfile64.py'),
RegrTest('test_zipimport.py', usemodules='zlib zipimport'),
RegrTest('test_zipimport_support.py', usemodules='zlib zipimport'),
RegrTest('test_zlib.py', usemodules='zlib'),
]
def check_testmap_complete():
listed_names = dict.fromkeys([regrtest.basename for regrtest in testmap])
assert len(listed_names) == len(testmap)
# names to ignore
listed_names['test_support.py'] = True
listed_names['test_multibytecodec_support.py'] = True
missing = []
for path in testdir.listdir(fil='test_*.py'):
name = path.basename
if name not in listed_names:
missing.append(' RegrTest(%r),' % (name,))
missing.sort()
assert not missing, "non-listed tests:\n%s" % ('\n'.join(missing),)
check_testmap_complete()
def pytest_configure(config):
config._basename2spec = cache = {}
for x in testmap:
cache[x.basename] = x
def pytest_ignore_collect(path, config):
if path.basename == '__init__.py':
return False
if path.isfile():
regrtest = config._basename2spec.get(path.basename, None)
if regrtest is None or path.dirpath() != testdir:
return True
def pytest_collect_file(path, parent):
if path.basename == '__init__.py':
# handle the RegrTest for the whole subpackage here
pkg_path = path.dirpath()
regrtest = parent.config._basename2spec.get(pkg_path.basename, None)
if pkg_path.dirpath() == testdir and regrtest:
return RunFileExternal(
pkg_path.basename, parent=parent, regrtest=regrtest)
@pytest.hookimpl(tryfirst=True)
def pytest_pycollect_makemodule(path, parent):
config = parent.config
regrtest = config._basename2spec[path.basename]
return RunFileExternal(path.basename, parent=parent, regrtest=regrtest)
class RunFileExternal(py.test.collect.File):
def __init__(self, name, parent, regrtest):
super(RunFileExternal, self).__init__(name, parent)
self.regrtest = regrtest
self.fspath = regrtest.getfspath()
def collect(self):
if self.regrtest.ismodified():
name = 'modified'
else:
name = 'unmodified'
return [ReallyRunFileExternal(name, parent=self)]
#
# testmethod:
# invoking in a separate process: py.py TESTFILE
#
import os
class ReallyRunFileExternal(py.test.collect.Item):
class ExternalFailure(Exception):
"""Failure in running subprocess"""
def getinvocation(self, regrtest):
fspath = regrtest.getfspath()
python = sys.executable
pypy_script = pypydir.join('bin', 'pyinteractive.py')
alarm_script = pypydir.join('tool', 'alarm.py')
if sys.platform == 'win32':
watchdog_name = 'watchdog_nt.py'
else:
watchdog_name = 'watchdog.py'
watchdog_script = pypydir.join('tool', watchdog_name)
regr_script = pypydir.join('tool', 'pytest',
'run-script', 'regrverbose.py')
regrrun = str(regr_script)
option = self.config.option
TIMEOUT = gettimeout(option.timeout.lower())
if option.pypy:
execpath = py.path.local(option.pypy)
if not execpath.check():
execpath = py.path.local.sysfind(option.pypy)
if not execpath:
raise LookupError("could not find executable %r" % option.pypy)
# check modules
info = py.process.cmdexec("%s --info" % execpath)
info = parse_info(info)
for mod in regrtest.usemodules:
if info.get('objspace.usemodules.%s' % mod) is not True:
py.test.skip("%s module not included in %s" % (mod,
execpath))
cmd = "%s -m test -v %s" % (execpath, fspath.purebasename)
# add watchdog for timing out
cmd = "%s %s %s %s" % (python, watchdog_script, TIMEOUT, cmd)
else:
pypy_options = []
pypy_options.extend(
['--withmod-%s' % mod for mod in regrtest.usemodules])
sopt = " ".join(pypy_options)
cmd = "%s %s %d %s -S %s %s %s -v" % (
python, alarm_script, TIMEOUT,
pypy_script, sopt,
regrrun, fspath.purebasename)
return cmd
def runtest(self):
""" invoke a subprocess running the test file via PyPy.
record its output into the 'result/user@host' subdirectory.
(we might want to create subdirectories for
each user, because we will probably all produce
such result runs and they will not be the same
i am afraid.
"""
regrtest = self.parent.regrtest
if regrtest.skip:
if regrtest.skip is True:
msg = "obsolete or unsupported platform"
else:
msg = regrtest.skip
py.test.skip(msg)
(skipped, exit_status, test_stdout, test_stderr) = \
self.getresult(regrtest)
if skipped:
py.test.skip(test_stderr.splitlines()[-1])
if exit_status:
raise self.ExternalFailure(test_stdout, test_stderr)
def repr_failure(self, excinfo):
if not excinfo.errisinstance(self.ExternalFailure):
return super(ReallyRunFileExternal, self).repr_failure(excinfo)
out, err = excinfo.value.args
return out + err
def getstatusouterr(self, cmd):
tempdir = py.test.ensuretemp(self.fspath.basename)
stdout = tempdir.join(self.fspath.basename) + '.out'
stderr = tempdir.join(self.fspath.basename) + '.err'
if sys.platform == 'win32':
status = os.system("%s >%s 2>%s" % (cmd, stdout, stderr))
if status >= 0:
status = status
else:
status = 'abnormal termination 0x%x' % status
else:
if self.config.option.unittest_filter is not None:
cmd += ' --filter %s' % self.config.option.unittest_filter
if self.config.option.usepdb:
cmd += ' --pdb'
if self.config.option.capture == 'no':
status = os.system(cmd)
stdout.write('')
stderr.write('')
else:
status = os.system("%s >>%s 2>>%s" % (cmd, stdout, stderr))
if os.WIFEXITED(status):
status = os.WEXITSTATUS(status)
else:
status = 'abnormal termination 0x%x' % status
return status, stdout.read(mode='rU'), stderr.read(mode='rU')
def getresult(self, regrtest):
cmd = self.getinvocation(regrtest)
tempdir = py.test.ensuretemp(self.fspath.basename)
oldcwd = tempdir.chdir()
exit_status, test_stdout, test_stderr = self.getstatusouterr(cmd)
oldcwd.chdir()
skipped = False
timedout = test_stderr.rfind(26*"=" + "timedout" + 26*"=") != -1
if not timedout:
timedout = test_stderr.rfind("KeyboardInterrupt") != -1
if test_stderr.rfind(26*"=" + "skipped" + 26*"=") != -1:
skipped = True
if not exit_status:
# match "FAIL" but not e.g. "FAILURE", which is in the output of a
# test in test_zipimport_support.py
if re.search(r'\bFAIL\b', test_stdout) or re.search('[^:]ERROR', test_stderr):
exit_status = 2
return skipped, exit_status, test_stdout, test_stderr
def _keywords(self):
lst = list(py.test.collect.Item._keywords(self))
regrtest = self.parent.regrtest
if regrtest.core:
lst.append('core')
return lst
| 40.030513 | 104 | 0.65983 |
7946073a8994e0c05f717aaac9ef034a00662aff | 1,911 | py | Python | old-approach/vgg19.py | Addi-11/Neural_Style_Transfer | 7570eb7deaaea5c18f58e908ac94319d87b3934d | [
"MIT"
] | null | null | null | old-approach/vgg19.py | Addi-11/Neural_Style_Transfer | 7570eb7deaaea5c18f58e908ac94319d87b3934d | [
"MIT"
] | 1 | 2020-07-17T22:59:06.000Z | 2020-07-17T22:59:06.000Z | old-approach/vgg19.py | Addi-11/Neural_Style_Transfer | 7570eb7deaaea5c18f58e908ac94319d87b3934d | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import scipy.io
import scipy.misc
from utils import CONFIG
class VGG19:
vgg = scipy.io.loadmat(CONFIG.PRE_TRAINED_PATH)
vgg_layers = vgg['layers']
def _weights(self, layer, name):
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
wb = self.vgg_layers[0][layer][0][0][2]
W = wb[0][0]
b = wb[0][1]
return W, b
def _relu(self, conv2d_layer):
return tf.nn.relu(conv2d_layer)
def _conv2d(self, prev_layer, layer, layer_name):
W, b = self._weights(layer,layer_name)
W = tf.constant(W)
b = tf.constant(np.reshape(b, (b.size)))
stride = [1,1,1,1]
padding = 'SAME'
return tf.nn.conv2d(prev_layer, filters=W, strides=stride, padding=padding) + b
def _conv2d_relu(self, prev_layer, layer, layer_name):
return self._relu(self._conv2d(prev_layer, layer, layer_name))
def _avgpool(self, prev_layer):
padding = 'SAME'
stride = [1,2,2,1]
return tf.nn.avg_pool(prev_layer, ksize=[1,2,2,1], strides=stride, padding=padding)
def load_vgg_model(self):
# Constructing a graph model
# we are doing this to replace the maxpool layers of VGG19 with avg pool layers
graph = {}
graph['input'] = tf.Variable(np.zeros((1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)), dtype = 'float32')
prev_layer = 'input'
# layers to br added in our model
for layer_num in range(self.vgg_layers.shape[1] - 6):
layer_name = self.vgg_layers[0][layer_num][0][0][0][0]
layer_type = layer_name[:4]
if layer_type == 'relu':
continue
elif layer_type == 'conv':
graph[layer_name] = self._conv2d_relu(graph[prev_layer], layer_num, layer_name)
elif layer_type == 'pool':
graph['avg'+layer_name] = self._avgpool(graph[prev_layer])
layer_name = 'avg'+layer_name
prev_layer = layer_name
return graph
| 27.695652 | 128 | 0.701727 |
794607ae03df301f89cd93bb71468bbe14414272 | 5,685 | py | Python | Sudoku/sudoku.py | ancientHacker/sudoku-generator | a1b395cef2cc699951e17e56fc699df83309d5e1 | [
"MIT"
] | 4 | 2020-03-29T20:10:52.000Z | 2021-01-04T07:46:21.000Z | Sudoku/sudoku.py | ancientHacker/sudoku-generator | a1b395cef2cc699951e17e56fc699df83309d5e1 | [
"MIT"
] | null | null | null | Sudoku/sudoku.py | ancientHacker/sudoku-generator | a1b395cef2cc699951e17e56fc699df83309d5e1 | [
"MIT"
] | 6 | 2019-07-05T14:54:42.000Z | 2022-01-28T10:34:05.000Z | # MIT License
#
# Copyright (c) 2018-2019 Daniel Brotsky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import sys
import time
import click
from .board import Board
from .generator import Generator
from .norvig_solver import Solver
@click.group()
@click.pass_context
@click.option('-v', '--verbose', count=True,
help="print puzzle details then content to stderr")
@click.option('-o', '--output', type=click.Choice(['json', 'html', 'ascii']),
default='json', show_default=True,
help="format for writing results to stdout")
def sudoku(ctx: click.Context, verbose: int, output: str):
ctx.ensure_object(dict)
ctx.obj['verbose'] = verbose
ctx.obj['output'] = output
@sudoku.command()
@click.option('-s', '--sidelen', type=click.Choice(["4", "9", "16"]),
required=True,
help="Desired puzzle side length (4, 9, or 16).")
@click.option('-d', '--difficulty',
type=click.Choice(Generator.thresholds.keys()),
default='easy', show_default=True,
help="Desired difficulty of the generated puzzle.")
@click.option('-c', '--count', type=click.IntRange(min=1),
default=1, show_default=True,
help="How many puzzles to generate.")
@click.pass_context
def generate(ctx: click.Context, sidelen, difficulty, count):
"""Generate one or more Sudoku puzzles.
You can specify the size and difficulty of the puzzles."""
verbose = ctx.obj['verbose']
output = ctx.obj['output']
outputs = []
gen = Generator(sidelen, difficulty)
start = time.time()
for iteration in range(1, count + 1):
result = gen.generate_one()
outputs.append(result)
end = time.time()
if output == 'json':
print(json.dumps([dict(puzzle=result['puzzle'].values(),
solution=result['solution'].values())
for result in outputs]))
else:
for iteration, result in enumerate(outputs, 1):
if output == 'html':
print("Puzzle #{0}:\n{1}\nSolution #{0}:\n{2}\n"
.format(iteration, result['puzzle'].html(), result['solution'].html()), file=sys.stderr)
elif output == 'ascii':
print("Puzzle #{0}:\n{1}\nSolution #{0}:\n{2}\n"
.format(iteration, result['puzzle'].ascii(), result['solution'].ascii()), file=sys.stderr)
if verbose:
print("Summary statistics:", file=sys.stderr)
puzzle_str = "puzzles that are" if count > 1 else "puzzle that is"
print("Generated {3} '{0}' {2} {1}x{1}."
.format(difficulty, sidelen, puzzle_str, count),
file=sys.stderr)
for index, result in enumerate(outputs, 1):
board = result['puzzle']
empty = len(board.get_empty_cells())
filled = len(board.get_filled_cells())
total = board.size
print("Puzzle {}: Empty={} ({:.0%}), Filled={} ({:.0%})."
.format(index,
empty, empty / total,
filled, filled / total),
file=sys.stderr)
print("Generation time: {:.1f} seconds total ({:.1f} secs/puzzle)."
.format(end - start, (end - start) / count),
file=sys.stderr)
@sudoku.command()
@click.argument('infile', type=click.File(), required=True)
@click.pass_context
def solve(ctx: click.Context, infile):
"""Solve the puzzle whose values are specified in INFILE (or - for stdin).
The file format is whitespace-separated cell values, filled left-to-right top-to-bottom.
You can use multiple lines in the file, one row per line, but linebreaks are not required.
Each value is a digit 1-9/A-G. The puzzle size is inferred from the number of cells."
"""
verbose = ctx.obj['verbose']
output = ctx.obj['output']
if infile is None:
raise ValueError("You must specify a puzzle to solve.")
board = Board.from_file(infile)
if verbose:
print("Puzzle before solving is:\n\n{}".format(board), file=sys.stderr)
solver = Solver(board)
if solver.can_solve():
solution = solver.solution
if verbose:
print("Puzzle after solving is:\n\n{}".format(solution), file=sys.stderr)
if output == 'html':
print(solution.html())
elif output == 'ascii':
print(solution)
else:
print(json.dumps(dict(puzzle=board.values(), solution=solution.values())))
else:
raise ValueError("Puzzle cannot be solved")
if __name__ == '__main__':
sudoku(obj={})
| 41.801471 | 112 | 0.625154 |
794608b7f3d6009a330d04a38e8055c191b3981d | 3,706 | py | Python | p2ptracker/tests/test_torrents.py | TMG-nl/p2ptracker | 0e6e77eac77de3fa4f15443920bc6f6886b129b4 | [
"MIT"
] | 5 | 2015-04-29T04:55:21.000Z | 2017-10-27T08:51:56.000Z | p2ptracker/tests/test_torrents.py | TMG-nl/p2ptracker | 0e6e77eac77de3fa4f15443920bc6f6886b129b4 | [
"MIT"
] | null | null | null | p2ptracker/tests/test_torrents.py | TMG-nl/p2ptracker | 0e6e77eac77de3fa4f15443920bc6f6886b129b4 | [
"MIT"
] | null | null | null | """
https://github.com/hyves-org/p2ptracker
Copyright (c) 2011, Ramon van Alteren
MIT license: http://www.opensource.org/licenses/MIT
"""
from __future__ import with_statement
from flaskext.testing import TestCase
from p2ptracker import create_app
from p2ptracker.tests.helpers import utils
import os
import logging
import redis
import shutil
REMOVE_LOG=False
SCRIPTDIR = os.path.dirname(__file__)
log = logging.getLogger('hyves.%s' % __name__)
class TestTorrents(TestCase):
'''So test the torrent methods'''
def create_app(self):
return create_app()
def setUp(self):
r = redis.Redis(host=self.app.config['REDISHOST'], port=self.app.config['REDISPORT'])
r.ping()
r.flushdb()
def tearDown(self):
if os.path.exists('p2ptracker.log') and REMOVE_LOG:
os.remove('p2ptracker.log')
if os.path.exists(self.app.config['UPLOAD_PATH']):
shutil.rmtree(self.app.config['UPLOAD_PATH'])
r = redis.Redis(host=self.app.config['REDISHOST'], port=self.app.config['REDISPORT'])
r.ping()
r.flushdb()
# Actual test methods
def test_empty_torrents(self):
'''Test that we get an empty dict if no torrent files have been registered'''
url = "/torrents/"
resp = self.client.get(url)
self.assert200(resp)
self.assertEquals(resp.json, dict())
def test_post_torrent_falsefilename(self):
resp = utils.post_torrentfile(self.client, 'ramon.txt', None)
self.assert_status(resp, 501)
def test_post_torrent_correctfilename(self):
filename = '%s/test.torrent' % SCRIPTDIR
try:
file = open(filename, 'r')
resp = utils.post_torrentfile(self.client, filename, file)
self.assert200(resp)
finally:
file.close()
def test_nonempty_torrents_after_post(self):
'''test if we get a non-empty list back after we posted a torrent file'''
try:
filename = '%s/test.torrent' % SCRIPTDIR
file = open(filename, 'r')
utils.post_torrentfile(self.client, filename, file)
finally:
file.close()
resp = self.client.get('torrents/')
self.assert200(resp)
assert(len(resp.json.keys()) > 0)
self.assertEquals(resp.json.keys(), ['test.torrent'])
def test_retrieve_torrentfile(self):
try:
filename = '%s/test.torrent' % SCRIPTDIR
file = open(filename, 'r')
utils.post_torrentfile(self.client, filename, file)
finally:
file.close()
resp = utils.get_torrentfile(self.client, '%s/test.torrent' % SCRIPTDIR)
self.assert200(resp)
self.assertEquals(resp.data, open(filename, 'r').read())
def test_delete_nonexistant_torrentfile(self):
'''Make sure a torrentfile is actually deleted'''
resp = utils.get_torrentfile(self.client, 'nonexistant.torrent')
self.assert404(resp)
def test_delete_existing_torrentfile(self):
'''Delete a torrent file from the tracker'''
try:
filename = '%s/test.torrent' % SCRIPTDIR
file = open(filename, 'r')
resp = utils.post_torrentfile(self.client, filename, file)
log.debug('resp on post: %s' % resp)
finally:
file.close()
with self.app.test_client() as c:
resp = c.delete('torrents/test.torrent')
log.debug('delete resp: %s' % resp)
log.debug('delet resp data: %s' % resp.data)
self.assert200(resp)
resp = utils.get_torrentfile(self.client, filename)
self.assert404(resp)
| 33.690909 | 93 | 0.621425 |
79460988779aab163b4ca21f92f2794eb5abe65d | 676 | py | Python | MTG/combat.py | wanqizhu/mtg-python-engine | fb68edd9f7a99643c30f70042caf9513cf71493d | [
"MIT"
] | 25 | 2017-11-15T18:08:24.000Z | 2021-11-20T07:11:25.000Z | MTG/combat.py | wanqizhu/mtg-python-engine | fb68edd9f7a99643c30f70042caf9513cf71493d | [
"MIT"
] | 1 | 2020-08-03T03:04:27.000Z | 2020-08-19T14:53:49.000Z | MTG/combat.py | wanqizhu/mtg-python-engine | fb68edd9f7a99643c30f70042caf9513cf71493d | [
"MIT"
] | 7 | 2018-03-04T06:56:53.000Z | 2022-02-03T20:11:33.000Z | # def fight(creature1, creature2):
# creature1.deals_damage(creature2, creature1.power)
# creature2.deals_damage(creature1, creature2.power)
def check_valid_attack(attacker):
# TODO: can't attack alone
return True
def check_valid_block(attacker, defender):
for creature in defender.battlefield:
if creature.is_creature and creature.status.is_blocking:
if not creature.can_block(creature.status.is_blocking):
return False
# TODO: can't block alone
# for creature in attacker.battlefield:
# if creature.is_creature:
# target = creature.status.is_attacking
# if target
return True
| 30.727273 | 67 | 0.692308 |
794609ed83551b7f89023dd3f787f6b99c885912 | 1,967 | py | Python | regreg/affine/tests/test_fused_lasso.py | matthew-brett/regreg | 8a10a79cbaf771c2a6d70e8094ab753ec075aab7 | [
"BSD-3-Clause"
] | null | null | null | regreg/affine/tests/test_fused_lasso.py | matthew-brett/regreg | 8a10a79cbaf771c2a6d70e8094ab753ec075aab7 | [
"BSD-3-Clause"
] | null | null | null | regreg/affine/tests/test_fused_lasso.py | matthew-brett/regreg | 8a10a79cbaf771c2a6d70e8094ab753ec075aab7 | [
"BSD-3-Clause"
] | null | null | null | from itertools import product
import numpy as np
import scipy.sparse
import regreg.api as rr
import regreg.affine.fused_lasso as FL
from regreg.identity_quadratic import identity_quadratic as sq
import nose.tools as nt
def test_class():
p = 50
for order in range(1,3):
fused = FL.trend_filter.grid(p, order=order)
fused2 = FL.trend_filter(np.arange(p), order=order)
V = np.random.standard_normal(p)
U = np.random.standard_normal(p - order)
np.testing.assert_allclose(fused.linear_map(V), fused2.linear_map(V))
np.testing.assert_allclose(fused.affine_map(V), fused2.affine_map(V))
np.testing.assert_allclose(fused.adjoint_map(U), fused2.adjoint_map(U))
V2 = np.random.standard_normal((p, 3))
U2 = np.random.standard_normal((p - order, 3))
np.testing.assert_allclose(fused.linear_map(V2), fused2.linear_map(V2))
np.testing.assert_allclose(fused.affine_map(V2), fused2.affine_map(V2))
np.testing.assert_allclose(fused.adjoint_map(U2), fused2.adjoint_map(U2))
if order == 1:
fusedI = FL.trend_filter_inverse.grid(p, order=order)
fusedI2 = FL.trend_filter_inverse(np.arange(p), order=order)
np.testing.assert_allclose(fusedI.linear_map(U), fusedI2.linear_map(U))
np.testing.assert_allclose(fusedI.affine_map(U), fusedI2.affine_map(U))
np.testing.assert_allclose(fusedI.adjoint_map(V), fusedI2.adjoint_map(V))
np.testing.assert_allclose(fusedI.linear_map(U2), fusedI2.linear_map(U2))
np.testing.assert_allclose(fusedI.affine_map(U2), fusedI2.affine_map(U2))
np.testing.assert_allclose(fusedI.adjoint_map(V2), fusedI2.adjoint_map(V2))
def test_difference_transform():
p = 50
for order in range(1,3):
FL.difference_transform(np.arange(p), order=order, sorted=False)
FL.difference_transform(np.arange(p), order=order, transform=False)
| 42.76087 | 87 | 0.699034 |
79460a3c372aeb32fb8e23c48769ce0619bf540f | 972 | py | Python | azure-mgmt-network/azure/mgmt/network/v2018_02_01/models/network_interface_paged.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2018_02_01/models/network_interface_paged.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-network/azure/mgmt/network/v2018_02_01/models/network_interface_paged.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class NetworkInterfacePaged(Paged):
"""
A paging container for iterating over a list of :class:`NetworkInterface <azure.mgmt.network.v2018_02_01.models.NetworkInterface>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[NetworkInterface]'}
}
def __init__(self, *args, **kwargs):
super(NetworkInterfacePaged, self).__init__(*args, **kwargs)
| 34.714286 | 141 | 0.589506 |
79460ad4bd0b71bae5136db4cdd0637bb2a1dbdf | 7,201 | py | Python | vendor/bundle/ruby/2.0.0/gems/pygments.rb-1.1.2/vendor/pygments-main/pygments/lexers/algebra.py | apaigesh/apaigesh.github.io | c79e576c8fb620c9b5cb3cc812e76d1f897a1c37 | [
"MIT"
] | null | null | null | vendor/bundle/ruby/2.0.0/gems/pygments.rb-1.1.2/vendor/pygments-main/pygments/lexers/algebra.py | apaigesh/apaigesh.github.io | c79e576c8fb620c9b5cb3cc812e76d1f897a1c37 | [
"MIT"
] | null | null | null | vendor/bundle/ruby/2.0.0/gems/pygments.rb-1.1.2/vendor/pygments-main/pygments/lexers/algebra.py | apaigesh/apaigesh.github.io | c79e576c8fb620c9b5cb3cc812e76d1f897a1c37 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pygments.lexers.algebra
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for computer algebra systems.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['GAPLexer', 'MathematicaLexer', 'MuPADLexer', 'BCLexer']
class GAPLexer(RegexLexer):
"""
For `GAP <http://www.gap-system.org>`_ source code.
.. versionadded:: 2.0
"""
name = 'GAP'
aliases = ['gap']
filenames = ['*.g', '*.gd', '*.gi', '*.gap']
tokens = {
'root': [
(r'#.*$', Comment.Single),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
if|then|elif|else|fi|
for|while|do|od|
repeat|until|
break|continue|
function|local|return|end|
rec|
quit|QUIT|
IsBound|Unbind|
TryNextMethod|
Info|Assert
)\b''', Keyword),
(r'''(?x)\b(?:
true|false|fail|infinity
)\b''',
Name.Constant),
(r'''(?x)\b(?:
(Declare|Install)([A-Z][A-Za-z]+)|
BindGlobal|BIND_GLOBAL
)\b''',
Name.Builtin),
(r'\.|,|:=|;|=|\+|-|\*|/|\^|>|<', Operator),
(r'''(?x)\b(?:
and|or|not|mod|in
)\b''',
Operator.Word),
(r'''(?x)
(?:\w+|`[^`]*`)
(?:::\w+|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
}
class MathematicaLexer(RegexLexer):
"""
Lexer for `Mathematica <http://www.wolfram.com/mathematica/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Mathematica'
aliases = ['mathematica', 'mma', 'nb']
filenames = ['*.nb', '*.cdf', '*.nbp', '*.ma']
mimetypes = ['application/mathematica',
'application/vnd.wolfram.mathematica',
'application/vnd.wolfram.mathematica.package',
'application/vnd.wolfram.cdf']
# http://reference.wolfram.com/mathematica/guide/Syntax.html
operators = (
";;", "=", "=.", "!=" "==", ":=", "->", ":>", "/.", "+", "-", "*", "/",
"^", "&&", "||", "!", "<>", "|", "/;", "?", "@", "//", "/@", "@@",
"@@@", "~~", "===", "&", "<", ">", "<=", ">=",
)
punctuation = (",", ";", "(", ")", "[", "]", "{", "}")
def _multi_escape(entries):
return '(%s)' % ('|'.join(re.escape(entry) for entry in entries))
tokens = {
'root': [
(r'(?s)\(\*.*?\*\)', Comment),
(r'([a-zA-Z]+[A-Za-z0-9]*`)', Name.Namespace),
(r'([A-Za-z0-9]*_+[A-Za-z0-9]*)', Name.Variable),
(r'#\d*', Name.Variable),
(r'([a-zA-Z]+[a-zA-Z0-9]*)', Name),
(r'-?\d+\.\d*', Number.Float),
(r'-?\d*\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(words(operators), Operator),
(words(punctuation), Punctuation),
(r'".*?"', String),
(r'\s+', Text.Whitespace),
],
}
class MuPADLexer(RegexLexer):
"""
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <[email protected]>.
.. versionadded:: 0.8
"""
name = 'MuPAD'
aliases = ['mupad']
filenames = ['*.mu']
tokens = {
'root': [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
# (r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][\w#]*|`[^`]*`)
(?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''',
bygroups(Name.Function, Text, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][\w#]*|`[^`]*`)
(?:::[a-zA-Z_#][\w#]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
}
class BCLexer(RegexLexer):
"""
A `BC <https://www.gnu.org/software/bc/>`_ lexer.
.. versionadded:: 2.1
"""
name = 'BC'
aliases = ['bc']
filenames = ['*.bc']
tokens = {
'root': [
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'[{}();,]', Punctuation),
(words(('if', 'else', 'while', 'for', 'break', 'continue',
'halt', 'return', 'define', 'auto', 'print', 'read',
'length', 'scale', 'sqrt', 'limits', 'quit',
'warranty'), suffix=r'\b'), Keyword),
(r'\+\+|--|\|\||&&|'
r'([-<>+*%\^/!=])=?', Operator),
# bc doesn't support exponential
(r'[0-9]+(\.[0-9]*)?', Number),
(r'\.[0-9]+', Number),
(r'.', Text)
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
}
| 32.436937 | 81 | 0.404527 |
79460d13d383fc3be1b7522d870376052d109664 | 4,618 | py | Python | api/spotify.py | SivertUtne/novatorem | d2f66a1821553082cf6c15e2cfc46d23383e9492 | [
"Unlicense"
] | null | null | null | api/spotify.py | SivertUtne/novatorem | d2f66a1821553082cf6c15e2cfc46d23383e9492 | [
"Unlicense"
] | null | null | null | api/spotify.py | SivertUtne/novatorem | d2f66a1821553082cf6c15e2cfc46d23383e9492 | [
"Unlicense"
] | null | null | null | import os
import random
import requests
from base64 import b64encode
from dotenv import load_dotenv, find_dotenv
from flask import Flask, Response, render_template
load_dotenv(find_dotenv())
SPOTIFY_CLIENT_ID = os.getenv("SPOTIFY_CLIENT_ID")
SPOTIFY_SECRET_ID = os.getenv("SPOTIFY_SECRET_ID")
SPOTIFY_REFRESH_TOKEN = os.getenv("SPOTIFY_REFRESH_TOKEN")
REFRESH_TOKEN_URL = "https://accounts.spotify.com/api/token"
NOW_PLAYING_URL = "https://api.spotify.com/v1/me/player/currently-playing"
RECENTLY_PLAYING_URL = "https://api.spotify.com/v1/me/player/recently-played?limit=10"
app = Flask(__name__)
def getAuth():
return b64encode(f"{SPOTIFY_CLIENT_ID}:{SPOTIFY_SECRET_ID}".encode()).decode("ascii")
def refreshToken():
data = {
"grant_type": "refresh_token",
"refresh_token": SPOTIFY_REFRESH_TOKEN,
}
headers = {"Authorization": "Basic {}".format(getAuth())}
response = requests.post(REFRESH_TOKEN_URL, data=data, headers=headers)
return response.json()["access_token"]
def nowPlaying():
headers = {"Authorization": f"Bearer {refreshToken()}"}
response = requests.get(NOW_PLAYING_URL, headers=headers)
if response.status_code == 204:
return {}
return response.json()
def recentlyPlayed():
headers = {"Authorization": f"Bearer {refreshToken()}"}
response = requests.get(RECENTLY_PLAYING_URL, headers=headers)
if response.status_code == 204:
return {}
return response.json()
def addBar(barNr, startPixel, bartype, lowSpeed, highSpeed, ):
bar = "<div class='" + bartype + "Bar'></div>"
animationSpeed = random.randint(lowSpeed, highSpeed)
barCSS = (
"." + bartype + "Bar:nth-child({}) {{ left: {}px; animation-duration: {}ms; }}"
.format(barNr, startPixel, animationSpeed)
)
return bar, barCSS
def getRandomBarType(barNr):
# Distribtes base frequencies to the right, and high frequensies to the left.
if barNr < 15:
bartype = random.randint(0, 1)
elif barNr < 75:
bartype = random.randint(0, 2)
else:
bartype = random.randint(1, 2)
return bartype
def generateBars():
barCount, startPixel = 90, 1 # barCount has to be a multiple of 3
bars, barsCSS = "", ""
barLayout = "position: absolute;" \
"width: 4px;" \
"bottom: 1px;" \
"height: 15px;" \
"background: #21AF43;" \
"border-radius: 1px 1px 0px 0px;"
bartypes = [("high", 500, 1000),
("medium", 650, 810),
("base", 349, 351)]
for i in range(1, barCount):
bartype = getRandomBarType(i)
newBar, newBarCSS = addBar(i, startPixel, bartypes[bartype][0], bartypes[bartype][1], bartypes[bartype][2])
bars += newBar
barsCSS += newBarCSS
startPixel += 4
return barsCSS, barLayout, bars
def loadImageB64(url):
return b64encode(requests.get(url).content).decode("ascii")
def makeSVG(data):
currentlyPlaying = data != {} and data["item"] != "None" and (data["item"]["is_local"] is False)
if currentlyPlaying:
currentStatus = "🎧 Vibing to"
item = data["item"]
# Create the animated bars
barCSS, barLayout, animatedBars = generateBars()
else:
currentStatus = "🎧 Recently vibed to"
# get random track from recently played, filter away local tracks
recentPlays = [item for item in recentlyPlayed()["items"] if item["track"]["is_local"] is not True]
itemIndex = random.randint(0, len(recentPlays) - 1)
item = recentPlays[itemIndex]["track"]
animatedBars, barLayout, barCSS = "", "", ""
# Data that is sent to html
dataDict = {
"status": currentStatus,
"image": loadImageB64(item["album"]["images"][1]["url"]),
"songName": item["name"].replace("&", "&"),
"artistName": item["artists"][0]["name"].replace("&", "&"),
"explicit": item["explicit"],
# "previewLink": item["preview_url"],
"trackLink": item["external_urls"]["spotify"],
# "popularity": item["popularity"],
"animatedBars": animatedBars,
"barLayout": barLayout,
"barCSS": barCSS,
}
return render_template("spotify.html.j2", **dataDict)
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
def catch_all(path):
data = nowPlaying()
svg = makeSVG(data)
resp = Response(svg, mimetype="image/svg+xml")
resp.headers["Cache-Control"] = "s-maxage=1"
return resp
if __name__ == "__main__":
app.run(debug=True)
| 32.293706 | 115 | 0.628844 |
79460db734241f76873ff2d11851a2927027fe9e | 4,470 | py | Python | src/webapp/highscore.py | NullVoxPopuli/html5tetris | b2cea0e189128a441af1249f3a2cc5e9918c270e | [
"Apache-2.0"
] | null | null | null | src/webapp/highscore.py | NullVoxPopuli/html5tetris | b2cea0e189128a441af1249f3a2cc5e9918c270e | [
"Apache-2.0"
] | null | null | null | src/webapp/highscore.py | NullVoxPopuli/html5tetris | b2cea0e189128a441af1249f3a2cc5e9918c270e | [
"Apache-2.0"
] | 1 | 2021-03-23T18:06:32.000Z | 2021-03-23T18:06:32.000Z | import cgi
import datetime
import urllib
import wsgiref.handlers
import random
import datetime
from django.utils import simplejson
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class Score(db.Model):
name=db.StringProperty()
score=db.IntegerProperty()
date=db.StringProperty()
tempRef=db.IntegerProperty()
class HSPostGameHandler(webapp.RequestHandler):
def post(self):
myScoreQ = db.GqlQuery("SELECT * FROM Score " +
"WHERE tempRef = %s" % (self.request.get('tempRef')))
myScore = myScoreQ[0]
dailyRankQ = db.GqlQuery("SELECT * FROM Score WHERE date = '%s' AND score > %s"
% (myScore.date, myScore.score))
dailyRank = len(dailyRankQ.fetch(1000)) + 1
if dailyRank > 100:
dailyRank = -1
self.response.out.write(simplejson.dumps({
'userScore': myScore.score,
'tempRef': myScore.tempRef,
'dailyRank': dailyRank
}) + '\n');
class HSReportScoreHandler(webapp.RequestHandler):
def post(self):
# TODO: make a real anti-fake-score-abuse system
# don't care about abuse, blowing away long-term score storage
score = int(self.request.get('gthbyu'))/17
tempRef = int(random.random() * 100000000)
# TODO: CGI clean name input
record = Score(score=score,
name=cgi.escape(self.request.get('name') or 'Unnamed'),
tempRef=tempRef,
date=datetime.date.today().isoformat())
record.put()
self.response.out.write(str(tempRef) + '\n')
class HSApplyNameHandler(webapp.RequestHandler):
def post(self):
tempRef = self.request.get('tempRef')
name = self.request.get('name')
scoreQ = db.GqlQuery("SELECT * FROM Score WHERE tempRef = %s" % tempRef)
score = scoreQ[0]
score.name = name
score.tempRef = 0
score.put()
class HSTablesHandler(webapp.RequestHandler):
def post(self):
# get both lists
topScoreQ = db.GqlQuery("SELECT * FROM Score ORDER BY score DESC")
topScores = topScoreQ.fetch(100)
todayString = datetime.date.today().isoformat()
dailyScoreQ = db.GqlQuery("SELECT * FROM Score WHERE date = '%s' ORDER By score DESC" % (todayString))
dailyScores = dailyScoreQ.fetch(100)
topScoreList = []
dailyScoreList = []
# remove the unneeded values from the lists
for curScore in topScores:
topScoreList.append({
'score': curScore.score,
'date': curScore.date,
'name': curScore.name
})
for curScore in dailyScores:
dailyScoreList.append({
'score': curScore.score,
'name': curScore.name
})
self.response.out.write(simplejson.dumps({
'topScores': topScoreList,
'dailyScores': dailyScoreList
}));
class HSPurgeHandler(webapp.RequestHandler):
def get(self):
topScoreQ = db.GqlQuery("SELECT * FROM Score ORDER BY score DESC")
topScores = topScoreQ.fetch(100)
todayString = datetime.date.today().isoformat()
dailyScoreQ = db.GqlQuery("SELECT * FROM Score WHERE date = '%s' ORDER By score DESC" % (todayString))
dailyScores = dailyScoreQ.fetch(100)
keepSet = set()
for score in topScores:
keepSet.add(score.key())
for score in dailyScores:
keepSet.add(score.key())
allScores = db.GqlQuery("SELECT * FROM Score")
# remove the values that are not in the top 100 or daily 100
for score in allScores:
if score.key() not in keepSet:
score.delete()
application = webapp.WSGIApplication([
('/score/postGame', HSPostGameHandler),
('/score/reportScore', HSReportScoreHandler),
('/score/apply', HSApplyNameHandler),
('/score/tables', HSTablesHandler),
('/score/purge', HSPurgeHandler)
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| 32.867647 | 110 | 0.575839 |
79460e2a91d222779ba4f1b7255c78da291c05d3 | 1,548 | py | Python | Python/pyworkout/comprehensions/ex29_mod2.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | null | null | null | Python/pyworkout/comprehensions/ex29_mod2.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | 3 | 2020-03-24T16:26:35.000Z | 2020-04-15T19:40:41.000Z | Python/pyworkout/comprehensions/ex29_mod2.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | null | null | null | from typing import List
from string import digits
def number_is_valid(
number: str
) -> bool:
separator = '-'
area_code, telephone_prefix, line_number = \
number.split(sep=separator, maxsplit=3)
length_validators = [
len(area_code) != 3,
len(telephone_prefix) != 3,
len(line_number) != 4
]
if any(length_validators):
return False
number_validators = [
not all([char in digits for char in number_part])
for number_part in (area_code, telephone_prefix, line_number)
]
if any(number_validators):
return False
return True
def transform_number(
number: str
) -> str:
area_code_last_char_idx = 3
return '{0}{1}'.format(
int(number[:area_code_last_char_idx]) + 1,
number[area_code_last_char_idx:]
)
def number_is_outdated(
number: str
) -> bool:
telephone_prefix_first_char_idx = 5
allowed_chars = range(6)
return int(number[telephone_prefix_first_char_idx]) in allowed_chars
def update_phone_numbers(
numbers: List[str]
) -> List[str]:
if any([not number_is_valid(number) for number in numbers]):
error_msg = "Number format is invalid"
raise ValueError(error_msg)
return [
transform_number(number) if number_is_outdated(number) else number
for number in numbers
]
def main():
numbers = ['123-456-7890', '123-333-4444', '123-777-8888']
result = update_phone_numbers(numbers)
print(result)
if __name__ == '__main__':
main()
| 22.434783 | 74 | 0.653747 |
79460f3dd7c9d09746dbac8430597aaa1ec4b32e | 2,586 | py | Python | crazy_crawler/crazy_crawler/spiders/crazy_spider.py | Tabuyos/crazy-crawler | 2cc2c86855d32d682b218414764402b89abcafdf | [
"MIT"
] | null | null | null | crazy_crawler/crazy_crawler/spiders/crazy_spider.py | Tabuyos/crazy-crawler | 2cc2c86855d32d682b218414764402b89abcafdf | [
"MIT"
] | null | null | null | crazy_crawler/crazy_crawler/spiders/crazy_spider.py | Tabuyos/crazy-crawler | 2cc2c86855d32d682b218414764402b89abcafdf | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@Author Tabuyos
@Time 2020/10/9 10:10
@Site www.tabuyos.com
@Email [email protected]
@Description
"""
import os
from os.path import abspath
from scrapy import Spider, Request
from op_json.op_json import OperateJson
class CrazySpider(Spider):
project_name = "crazy-crawler"
# get spider config.
root_path = os.path.join(abspath(__file__).split(project_name)[0], project_name)
dataset = OperateJson(os.path.join(root_path, "config/WebSiteListing.json")).load_json()
site_list = dataset.get("Listing")
name = "crazy-crawler"
def parse(self, response, **kwargs):
current_url = response.url
body = response.body
site_info = response.request.meta.get("site_info")
if body == b'get_next_url':
ls = self.site_list
pending_index = ls.index(site_info.get("BaseInfo").get("Name")) + 1
if len(ls) != pending_index:
name = ls[pending_index]
else:
return None
config_path = os.path.join(self.root_path, "config/{name}_Config.json".format(name=name))
site_info = OperateJson(config_path).load_json()
pending_url = site_info.get("BaseInfo").get("Url")
meta = {"first": True, "site_info": site_info}
yield Request(url=pending_url, callback=self.parse, meta=meta, dont_filter=True)
article = site_info.get("CrawlerInfo").get("Article")
for art in response.selector.xpath(article.get("XPath")):
result1 = art.xpath(article.get("ContentElements")[0].get("XPath")).extract()
result2 = art.xpath(article.get("ContentElements")[1].get("XPath")).extract()
result3 = art.xpath(article.get("ContentElements")[2].get("XPath")).extract()
print("".join(result1).strip(), "|", "".join(result2).strip(), "|", "".join(result3).strip())
if body != "end_url":
yield Request(url=current_url, callback=self.parse, meta={"site_info": site_info}, dont_filter=True)
else:
pass
def start_requests(self):
print("==============================", "Tabuyos-start_requests", "==============================")
config_path = os.path.join(self.root_path, "config/{name}_Config.json".format(name=self.site_list[0]))
site_info = OperateJson(config_path).load_json()
init_url = site_info.get("BaseInfo").get("Url")
yield Request(url=init_url, callback=self.parse, meta={"first": True, "site_info": site_info}, dont_filter=True)
| 42.393443 | 120 | 0.621036 |
79461082acd20c805edfa051ba755789194a6e6d | 11,000 | py | Python | homeassistant/components/bmw_connected_drive/__init__.py | DavidDeSloovere/core | 909a20b36d4df6724c955c2ae28cb82fe6d50c2e | [
"Apache-2.0"
] | 4 | 2020-08-10T20:02:24.000Z | 2022-01-31T02:14:22.000Z | homeassistant/components/bmw_connected_drive/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 78 | 2020-07-23T07:13:08.000Z | 2022-03-31T06:02:04.000Z | homeassistant/components/bmw_connected_drive/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 3 | 2022-01-17T20:10:54.000Z | 2022-01-17T20:17:22.000Z | """Reads vehicle status from BMW connected drive portal."""
from __future__ import annotations
import logging
from bimmer_connected.account import ConnectedDriveAccount
from bimmer_connected.country_selector import get_region_from_name
import voluptuous as vol
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_NAME,
CONF_PASSWORD,
CONF_REGION,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
from .const import (
ATTRIBUTION,
CONF_ACCOUNT,
CONF_ALLOWED_REGIONS,
CONF_READ_ONLY,
CONF_USE_LOCATION,
DATA_ENTRIES,
DATA_HASS_CONFIG,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "bmw_connected_drive"
ATTR_VIN = "vin"
ACCOUNT_SCHEMA = vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_REGION): vol.In(CONF_ALLOWED_REGIONS),
vol.Optional(CONF_READ_ONLY): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: {cv.string: ACCOUNT_SCHEMA}}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA = vol.Schema({vol.Required(ATTR_VIN): cv.string})
DEFAULT_OPTIONS = {
CONF_READ_ONLY: False,
CONF_USE_LOCATION: False,
}
PLATFORMS = ["binary_sensor", "device_tracker", "lock", "notify", "sensor"]
UPDATE_INTERVAL = 5 # in minutes
SERVICE_UPDATE_STATE = "update_state"
_SERVICE_MAP = {
"light_flash": "trigger_remote_light_flash",
"sound_horn": "trigger_remote_horn",
"activate_air_conditioning": "trigger_remote_air_conditioning",
"find_vehicle": "trigger_remote_vehicle_finder",
}
UNDO_UPDATE_LISTENER = "undo_update_listener"
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the BMW Connected Drive component from configuration.yaml."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][DATA_HASS_CONFIG] = config
if DOMAIN in config:
for entry_config in config[DOMAIN].values():
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=entry_config
)
)
return True
@callback
def _async_migrate_options_from_data_if_missing(hass, entry):
data = dict(entry.data)
options = dict(entry.options)
if CONF_READ_ONLY in data or list(options) != list(DEFAULT_OPTIONS):
options = dict(DEFAULT_OPTIONS, **options)
options[CONF_READ_ONLY] = data.pop(CONF_READ_ONLY, False)
hass.config_entries.async_update_entry(entry, data=data, options=options)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up BMW Connected Drive from a config entry."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(DATA_ENTRIES, {})
_async_migrate_options_from_data_if_missing(hass, entry)
try:
account = await hass.async_add_executor_job(
setup_account, entry, hass, entry.data[CONF_USERNAME]
)
except OSError as ex:
raise ConfigEntryNotReady from ex
async def _async_update_all(service_call=None):
"""Update all BMW accounts."""
await hass.async_add_executor_job(_update_all)
def _update_all() -> None:
"""Update all BMW accounts."""
for entry in hass.data[DOMAIN][DATA_ENTRIES].copy().values():
entry[CONF_ACCOUNT].update()
# Add update listener for config entry changes (options)
undo_listener = entry.add_update_listener(update_listener)
hass.data[DOMAIN][DATA_ENTRIES][entry.entry_id] = {
CONF_ACCOUNT: account,
UNDO_UPDATE_LISTENER: undo_listener,
}
# Service to manually trigger updates for all accounts.
hass.services.async_register(DOMAIN, SERVICE_UPDATE_STATE, _async_update_all)
await _async_update_all()
hass.config_entries.async_setup_platforms(
entry, [platform for platform in PLATFORMS if platform != NOTIFY_DOMAIN]
)
# set up notify platform, no entry support for notify platform yet,
# have to use discovery to load platform.
hass.async_create_task(
discovery.async_load_platform(
hass,
NOTIFY_DOMAIN,
DOMAIN,
{CONF_NAME: DOMAIN},
hass.data[DOMAIN][DATA_HASS_CONFIG],
)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
entry, [platform for platform in PLATFORMS if platform != NOTIFY_DOMAIN]
)
# Only remove services if it is the last account and not read only
if (
len(hass.data[DOMAIN][DATA_ENTRIES]) == 1
and not hass.data[DOMAIN][DATA_ENTRIES][entry.entry_id][CONF_ACCOUNT].read_only
):
services = list(_SERVICE_MAP) + [SERVICE_UPDATE_STATE]
for service in services:
hass.services.async_remove(DOMAIN, service)
for vehicle in hass.data[DOMAIN][DATA_ENTRIES][entry.entry_id][
CONF_ACCOUNT
].account.vehicles:
hass.services.async_remove(NOTIFY_DOMAIN, slugify(f"{DOMAIN}_{vehicle.name}"))
if unload_ok:
hass.data[DOMAIN][DATA_ENTRIES][entry.entry_id][UNDO_UPDATE_LISTENER]()
hass.data[DOMAIN][DATA_ENTRIES].pop(entry.entry_id)
return unload_ok
async def update_listener(hass, config_entry):
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
def setup_account(entry: ConfigEntry, hass, name: str) -> BMWConnectedDriveAccount:
"""Set up a new BMWConnectedDriveAccount based on the config."""
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
region = entry.data[CONF_REGION]
read_only = entry.options[CONF_READ_ONLY]
use_location = entry.options[CONF_USE_LOCATION]
_LOGGER.debug("Adding new account %s", name)
pos = (
(hass.config.latitude, hass.config.longitude) if use_location else (None, None)
)
cd_account = BMWConnectedDriveAccount(
username, password, region, name, read_only, *pos
)
def execute_service(call):
"""Execute a service for a vehicle."""
vin = call.data[ATTR_VIN]
vehicle = None
# Double check for read_only accounts as another account could create the services
for entry_data in [
e
for e in hass.data[DOMAIN][DATA_ENTRIES].values()
if not e[CONF_ACCOUNT].read_only
]:
vehicle = entry_data[CONF_ACCOUNT].account.get_vehicle(vin)
if vehicle:
break
if not vehicle:
_LOGGER.error("Could not find a vehicle for VIN %s", vin)
return
function_name = _SERVICE_MAP[call.service]
function_call = getattr(vehicle.remote_services, function_name)
function_call()
if not read_only:
# register the remote services
for service in _SERVICE_MAP:
hass.services.register(
DOMAIN, service, execute_service, schema=SERVICE_SCHEMA
)
# update every UPDATE_INTERVAL minutes, starting now
# this should even out the load on the servers
now = dt_util.utcnow()
track_utc_time_change(
hass,
cd_account.update,
minute=range(now.minute % UPDATE_INTERVAL, 60, UPDATE_INTERVAL),
second=now.second,
)
# Initialize
cd_account.update()
return cd_account
class BMWConnectedDriveAccount:
"""Representation of a BMW vehicle."""
def __init__(
self,
username: str,
password: str,
region_str: str,
name: str,
read_only: bool,
lat=None,
lon=None,
) -> None:
"""Initialize account."""
region = get_region_from_name(region_str)
self.read_only = read_only
self.account = ConnectedDriveAccount(username, password, region)
self.name = name
self._update_listeners = []
# Set observer position once for older cars to be in range for
# GPS position (pre-7/2014, <2km) and get new data from API
if lat and lon:
self.account.set_observer_position(lat, lon)
self.account.update_vehicle_states()
def update(self, *_):
"""Update the state of all vehicles.
Notify all listeners about the update.
"""
_LOGGER.debug(
"Updating vehicle state for account %s, notifying %d listeners",
self.name,
len(self._update_listeners),
)
try:
self.account.update_vehicle_states()
for listener in self._update_listeners:
listener()
except OSError as exception:
_LOGGER.error(
"Could not connect to the BMW Connected Drive portal. "
"The vehicle state could not be updated"
)
_LOGGER.exception(exception)
def add_update_listener(self, listener):
"""Add a listener for update notifications."""
self._update_listeners.append(listener)
class BMWConnectedDriveBaseEntity(Entity):
"""Common base for BMW entities."""
def __init__(self, account, vehicle):
"""Initialize sensor."""
self._account = account
self._vehicle = vehicle
self._attrs = {
"car": self._vehicle.name,
"vin": self._vehicle.vin,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
@property
def device_info(self) -> DeviceInfo:
"""Return info for device registry."""
return {
"identifiers": {(DOMAIN, self._vehicle.vin)},
"name": f'{self._vehicle.attributes.get("brand")} {self._vehicle.name}',
"model": self._vehicle.name,
"manufacturer": self._vehicle.attributes.get("brand"),
}
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return self._attrs
@property
def should_poll(self):
"""Do not poll this class.
Updates are triggered from BMWConnectedDriveAccount.
"""
return False
def update_callback(self):
"""Schedule a state update."""
self.schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add callback after being added to hass.
Show latest data after startup.
"""
self._account.add_update_listener(self.update_callback)
| 31.428571 | 90 | 0.668818 |
794610ad8a46ae35b3bc2c63fae2e1c2b60d96b6 | 1,849 | py | Python | python/tree/fenwick_tree.py | MacSimmy/algo-and-data-structure | 424ce425ed2205522de17cfddefcdb90fc9fb777 | [
"Apache-2.0"
] | 11,393 | 2015-01-23T20:01:53.000Z | 2022-03-31T18:50:32.000Z | python/tree/fenwick_tree.py | BiswajitMahato/interview | 94be5deb0c0df30ade2a569cf3056b7cc1e012f4 | [
"Apache-2.0"
] | 128 | 2015-06-28T03:01:12.000Z | 2022-01-04T17:27:36.000Z | python/tree/fenwick_tree.py | BiswajitMahato/interview | 94be5deb0c0df30ade2a569cf3056b7cc1e012f4 | [
"Apache-2.0"
] | 5,893 | 2015-01-07T19:48:31.000Z | 2022-03-31T02:41:32.000Z | #################################################################################################################################
#Implementation of Binary Indexed Tree OR Fenwick Tree
#Time Complexities:
# Construction of Tree: O (n.log (n))
# Updating an element: O (log (n))
# Prefix Query (sum of elements 0 to i) or Range Minimum Query (sum of elements x to y): O (log (n))
#Space Complexity: O (n)
#################################################################################################################################
class FenTree (object):
def __init__ (self, array):
self.array, self.tree = [0] * len (array), [0] * (len (array) + 1);
for i in range (len (array)):
self.update (i, array [i]);
def get_parent (self, child):
return (child - (child & -child));
def get_next (self, index):
return (index + (index & -index));
def update (self, index, item):
current, self.array [index] = self.array [index], item;
item -= current;
index += 1;
while (index <= len (self.array)):
self.tree [index] += item;
index = self.get_next (index);
def prefix_sum (self, index):
index += 1;
total = 0;
while (index > 0):
total += self.tree [index];
index = self.get_parent (index);
return (total);
def range_sum (self, x, y):
return (self.prefix_sum (max (x, y)) - self.prefix_sum (min (x, y) - 1));
def describe (self):
print ('ARRAY =>\t', self.array);
print ('Binary Indexed Tree =>\t', self.tree);
if (__name__ == '__main__'):
tree = FenTree ([3,2,-1,6,5,4]);
# tree = FenTree ([int (i) for i in input ('Enter the array (space-separated integers): ').split ()]);
tree.describe ();
tree.update (4, 8); #replaces 5 with 8 in the list given to the fenwick tree
tree.describe ();
print (tree.range_sum (1, 5)); #returns 2-1+6+5+4
print (tree.prefix_sum (5)); #returns 3+2-1+6+5+4
| 33.618182 | 129 | 0.540833 |
794610b6887174483993c9b87b34aaabd33d61f2 | 1,251 | py | Python | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/port.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/port.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/port.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Port(Model):
"""Properties of a network port.
:param transport_protocol: Protocol type of the port. Possible values
include: 'Tcp', 'Udp'
:type transport_protocol: str or
~azure.mgmt.devtestlabs.models.TransportProtocol
:param backend_port: Backend port of the target virtual machine.
:type backend_port: int
"""
_attribute_map = {
'transport_protocol': {'key': 'transportProtocol', 'type': 'str'},
'backend_port': {'key': 'backendPort', 'type': 'int'},
}
def __init__(self, **kwargs):
super(Port, self).__init__(**kwargs)
self.transport_protocol = kwargs.get('transport_protocol', None)
self.backend_port = kwargs.get('backend_port', None)
| 35.742857 | 76 | 0.611511 |
794610f5c813d2603fadeacc557ddd7d12e88795 | 1,191 | py | Python | Python/robot-return-to-origin.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2022-01-30T06:55:28.000Z | 2022-01-30T06:55:28.000Z | Python/robot-return-to-origin.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | null | null | null | Python/robot-return-to-origin.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2021-12-31T03:56:39.000Z | 2021-12-31T03:56:39.000Z | # Time: O(n)
# Space: O(1)
# Initially, there is a Robot at position (0, 0). Given a sequence of its moves,
# judge if this robot makes a circle, which means it moves back to the original place.
#
# The move sequence is represented by a string. And each move is represent by a character.
# The valid robot moves are R (Right), L (Left), U (Up) and D (down).
# The output should be true or false representing whether the robot makes a circle.
#
# Example 1:
# Input: "UD"
# Output: true
# Example 2:
# Input: "LL"
# Output: false
import collections
class Solution(object):
def judgeCircle(self, moves):
"""
:type moves: str
:rtype: bool
"""
count = collections.Counter(moves)
return count['L'] == count['R'] and count['U'] == count['D']
def judgeCircle2(self, moves):
"""
:type moves: str
:rtype: bool
"""
v, h = 0, 0
for move in moves:
if move == 'U':
v += 1
elif move == 'D':
v -= 1
elif move == 'R':
h += 1
elif move == 'L':
h -= 1
return v == 0 and h == 0
| 25.340426 | 90 | 0.528128 |
7946117131373ba2ba5e193685e65b97feec2d93 | 12,716 | py | Python | gordon/resources/s3.py | patgoley/gordon | 4c1cd0c4dea2499d98115672095714592f80f7aa | [
"BSD-3-Clause"
] | 2,204 | 2016-06-02T07:47:45.000Z | 2022-03-29T15:57:28.000Z | gordon/resources/s3.py | patgoley/gordon | 4c1cd0c4dea2499d98115672095714592f80f7aa | [
"BSD-3-Clause"
] | 159 | 2016-06-02T11:04:20.000Z | 2020-08-26T09:07:36.000Z | gordon/resources/s3.py | ericblade/gordon | 0cf326db916b3528eae80ad3c732d8732079a2a6 | [
"BSD-3-Clause"
] | 188 | 2016-06-02T10:58:00.000Z | 2021-11-01T08:15:14.000Z | import re
from collections import defaultdict, Counter
import six
import troposphere
from troposphere import sqs, sns, awslambda
from . import base
from gordon import exceptions
from gordon import utils
from gordon.actions import Ref
from gordon.contrib.s3.resources import (
S3BucketNotificationConfiguration,
NotificationConfiguration, KeyFilter
)
class BaseNotification(object):
def __init__(self, bucket_notification_configuration, **kwargs):
self.settings = kwargs
self.bucket_notification_configuration = bucket_notification_configuration
self.events = []
# Validate all notifications have an id. This important because
# we'll rely on this id to create/modify/delete notifactions
if 'id' in self.settings:
self.id = self.settings['id']
else:
raise exceptions.ResourceValidationError(
(
"You need to define an id which identifies the "
"notification {}").format(self.settings)
)
# Validate that events is present, and that it contains valid values
if 'events' in self.settings and self.settings['events']:
for event in self.settings['events']:
event_match = re.match(r's3\:(\w+|\*)(?:\:(\w+|\*))?', event)
if event_match:
self.events.append([event] + list(event_match.groups()))
else:
raise exceptions.ResourceValidationError(
"Invalid event {}".format(event)
)
else:
raise exceptions.ResourceValidationError(
("You need to define a list of events for the "
"notification {}").format(self.name)
)
# Validate that filters are a subset of (prefix, suffix) and keys
# are not duplicated.
_filters = self.settings.get('key_filters', {})
if set(_filters.values()) > set(('prefix', 'suffix')):
raise exceptions.ResourceValidationError(
"""You can't create filters for '{}'.""".format(
', '.join(_filters)
)
)
else:
self.filters = [(k, v) for k, v in six.iteritems(_filters)]
@classmethod
def from_dict(cls, data, id, bucket_notification_configuration):
notification_type = set(('lambda', 'topic', 'queue')) & set(data.keys())
if len(notification_type) != 1:
raise exceptions.ResourceValidationError(
(
"You need to define either a lamda, a queue or a topic "
"as destination of your notification {}"
).format(bucket_notification_configuration)
)
return {'lambda': LambdaFunctionNotification,
'queue': QueueNotification,
'topic': TopicNotification}.get(
list(notification_type)[0])(
id=id,
bucket_notification_configuration=bucket_notification_configuration,
**data
)
def get_destination_arn(self):
pass
def register_destination_publish_permission(self, template):
pass
class LambdaFunctionNotification(BaseNotification):
api_property = 'LambdaFunctionConfigurations'
def register_destination_publish_permission(self, template):
template.add_resource(
awslambda.Permission(
utils.valid_cloudformation_name(
self.bucket_notification_configuration.name,
self.id,
'permission'
),
Action="lambda:InvokeFunction",
FunctionName=self.get_destination_arn(),
Principal="s3.amazonaws.com",
SourceAccount=troposphere.Ref(troposphere.AWS_ACCOUNT_ID),
SourceArn=self.bucket_notification_configuration.get_bucket_arn()
)
)
def get_destination_arn(self):
return troposphere.Ref(
self.bucket_notification_configuration.project.reference(
utils.lambda_friendly_name_to_grn(
self.settings['lambda']
)
)
)
class QueueNotification(BaseNotification):
api_property = 'QueueConfigurations'
def get_destination_arn(self):
destination = self.settings['queue']
region = troposphere.Ref(troposphere.AWS_REGION)
if isinstance(destination, six.string_types):
if destination.startswith('arn:aws:'):
return destination
account = troposphere.Ref(troposphere.AWS_ACCOUNT_ID)
elif isinstance(destination, dict):
account = destination['account_id']
destination = destination['name']
else:
return destination
return troposphere.Join(":", [
"arn:aws:sqs",
region,
account,
destination
])
def get_destination_url(self):
destination = self.settings['queue']
region = troposphere.Ref(troposphere.AWS_REGION)
if isinstance(destination, six.string_types):
account = troposphere.Ref(troposphere.AWS_ACCOUNT_ID)
elif isinstance(destination, dict):
account = destination['account_id']
destination = destination['name']
else:
return destination
return troposphere.Join("", [
"https://sqs.",
region,
".amazonaws.com/",
account,
"/",
destination
])
def register_destination_publish_permission(self, template):
template.add_resource(
sqs.QueuePolicy(
utils.valid_cloudformation_name(
self.bucket_notification_configuration.name,
self.id,
'permission'
),
Queues=[self.get_destination_url()],
PolicyDocument={
"Version": "2008-10-17",
"Id": "PublicationPolicy",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": ["sqs:SendMessage"],
"Resource": self.get_destination_arn(),
"Condition": {
"ArnEquals": {"aws:SourceArn": self.bucket_notification_configuration.get_bucket_arn()}
}
}]
}
)
)
class TopicNotification(BaseNotification):
api_property = 'TopicConfigurations'
def get_destination_arn(self):
destination = self.settings['topic']
region = troposphere.Ref(troposphere.AWS_REGION)
if isinstance(destination, six.string_types):
if destination.startswith('arn:aws:'):
return destination
account = troposphere.Ref(troposphere.AWS_ACCOUNT_ID)
elif isinstance(destination, dict):
account = destination['account_id']
destination = destination['name']
else:
return destination
return troposphere.Join(":", [
"arn:aws:sns",
region,
account,
destination
])
def register_destination_publish_permission(self, template):
template.add_resource(
sns.TopicPolicy(
utils.valid_cloudformation_name(
self.bucket_notification_configuration.name,
self.id,
'permission'
),
Topics=[self.get_destination_arn()],
PolicyDocument={
"Version": "2008-10-17",
"Id": "PublicationPolicy",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": ["sns:Publish"],
"Resource": self.get_destination_arn(),
"Condition": {
"ArnEquals": {"aws:SourceArn": self.bucket_notification_configuration.get_bucket_arn()}
}
}]
}
)
)
class BucketNotificationConfiguration(base.BaseResource):
grn_type = 's3-bucket-notification'
required_settings = (
'bucket',
'notifications',
)
def __init__(self, *args, **kwargs):
super(BucketNotificationConfiguration, self).__init__(*args, **kwargs)
self._notifications = {}
for notification_id, notification_data in six.iteritems(self.settings.get('notifications', {})):
self._notifications[notification_id] = BaseNotification.from_dict(
id=notification_id,
data=notification_data,
bucket_notification_configuration=self
)
self._validate_notifications()
def get_bucket_arn(self):
bucket_name = self.get_bucket_name()
return troposphere.Join("", ["arn:aws:s3:::", bucket_name])
def get_bucket_name(self):
bucket = self.settings.get('bucket')
if isinstance(bucket, troposphere.Ref):
return bucket
return bucket
def _validate_notifications(self):
# Validate that all key prefix/suffix filters for a bucket
# don't overlap one to each other.
all_filters = defaultdict(list)
for notification_id, notification in six.iteritems(self._notifications):
for name, value in notification.filters:
all_filters[name].append(value)
overlap_checks = {'prefix': 'startswith', 'suffix': 'endswith'}
for filter_type, values in six.iteritems(all_filters):
check = overlap_checks.get(filter_type)
# Don't check fields that are Ref instances
# since Refs aren't bound until apply
if isinstance(check, Ref):
continue
overlaps = [sum([int(getattr(v, check)(z)) for z in values]) for v in values]
if sum(overlaps) > len(values):
raise exceptions.ResourceValidationError(
"One or more {} filters overlap one to each other {}.".format(
filter_type,
', '.join(values)
)
)
def register_resources_template(self, template):
extra = defaultdict(list)
for notification_id, notification in six.iteritems(self._notifications):
notification.register_destination_publish_permission(template)
extra[notification.api_property].append(
NotificationConfiguration(
Id=troposphere.Join('-', ['gordon', notification.id]),
DestinationArn=notification.get_destination_arn(),
Events=[e for e, _, _ in notification.events],
KeyFilters=[KeyFilter(Name=name, Value=value) for name, value in notification.filters]
)
)
bucket_notification_configuration_lambda = 'lambda:contrib_s3:bucket_notification_configuration:current'
template.add_resource(
S3BucketNotificationConfiguration.create_with(
utils.valid_cloudformation_name(self.name),
DependsOn=[self.project.reference(bucket_notification_configuration_lambda)],
lambda_arn=troposphere.Ref(self.project.reference(bucket_notification_configuration_lambda)),
Bucket=self.get_bucket_name(),
**dict([[k, v] for k, v in six.iteritems(extra) if v])
)
)
def validate(self):
"""Validate that there are no any other resources in the project which
try to register notifications for the same bucket than this resource"""
for resource in \
(r for r in self.project.get_resources() if isinstance(r, self.__class__) and r.bucket == self.bucket):
raise exceptions.ResourceValidationError(
("Both resources '{}' and '{}', registers notifications for "
"the bucket '{}'. Because AWS API limitations we need you to "
"register all notifications of one bucket in the same "
"resource.").format(self, resource, self.bucket)
)
| 37.510324 | 119 | 0.562126 |
794614446a56a2a63403c9b59790bed66db50444 | 872 | py | Python | setup.py | StanfordAHA/Lake | 34df001db107e1a0824b7fdb05b9f2145bf49a3e | [
"BSD-3-Clause"
] | 11 | 2019-10-14T02:05:38.000Z | 2022-03-10T14:10:22.000Z | setup.py | StanfordAHA/Lake | 34df001db107e1a0824b7fdb05b9f2145bf49a3e | [
"BSD-3-Clause"
] | 29 | 2019-09-02T05:49:40.000Z | 2022-02-26T00:57:54.000Z | setup.py | StanfordAHA/Lake | 34df001db107e1a0824b7fdb05b9f2145bf49a3e | [
"BSD-3-Clause"
] | 1 | 2021-04-16T20:26:13.000Z | 2021-04-16T20:26:13.000Z | from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='lake-aha',
version="0.0.4",
author='Maxwell Strange',
author_email='[email protected]',
description='Memory Generator based on Kratos: The God of War.',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/StanfordAHA/lake",
python_requires=">=3.5",
packages=[
"lake",
"lake.attributes",
"lake.dsl",
"lake.dsl.dsl_examples",
"lake.models",
"lake.modules",
"lake.modules.spec",
"lake.passes",
"lake.spec",
"lake.top",
"lake.utils"
],
install_requires=[
"kratos",
"fault",
"magma-lang",
"pytest"
]
)
| 24.222222 | 68 | 0.580275 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.