seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
7366582703
|
#Código para sortear valores e organizá-los utilizando itemgetter da biblioteca operator, criando um ranking de vencedores
from time import sleep
from random import randint
from operator import itemgetter
classificação = ()
jogos = {'jogador1': randint(1,6),
'jogador2': randint(1,6),
'jogador3': randint(1,6),
'jogador4': randint(1,6)}
print(f'== Resultados: ==')
for k, v in jogos.items():
print(f'{k} tirou {v}')
sleep(1)
classificação = sorted(jogos.items(), key=itemgetter(1), reverse=True)
print(f'-='*20)
print(f'\* Classificação: */')
for k, v in enumerate(classificação):
print(f'{k+1}ª posição: {v[0]} com {v[1]}')
sleep(1)
|
mateuzh/Python
|
desafio091.py
|
desafio091.py
|
py
| 694 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
21215598425
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.utils.translation import gettext as _
from django.db.models.manager import BaseManager
import plotly.offline as plotly
import plotly.graph_objs as go
from reports import utils
def weight_change(
actual_weights: BaseManager, percentile_weights: BaseManager, birthday: datetime
):
"""
Create a graph showing weight over time.
:param actual_weights: a QuerySet of Weight instances.
:param percentile_weights: a QuerySet of Weight Percentile instances.
:param birthday: a datetime of the child's birthday
:returns: a tuple of the the graph's html and javascript.
"""
actual_weights = actual_weights.order_by("-date")
weighing_dates: list[datetime] = list(actual_weights.values_list("date", flat=True))
measured_weights = list(actual_weights.values_list("weight", flat=True))
actual_weights_trace = go.Scatter(
name=_("Weight"),
x=weighing_dates,
y=measured_weights,
fill="tozeroy",
mode="lines+markers",
)
if percentile_weights:
dates = list(
map(
lambda timedelta: birthday + timedelta,
percentile_weights.values_list("age_in_days", flat=True),
)
)
# reduce percentile data xrange to end 1 day after last weigh in for formatting purposes
# https://github.com/babybuddy/babybuddy/pull/708#discussion_r1332335789
last_date_for_percentiles = max(weighing_dates) + timedelta(days=2)
dates = dates[: dates.index(last_date_for_percentiles)]
percentile_weight_3_trace = go.Scatter(
name=_("P3"),
x=dates,
y=list(percentile_weights.values_list("p3_weight", flat=True)),
line={"color": "red"},
)
percentile_weight_15_trace = go.Scatter(
name=_("P15"),
x=dates,
y=list(percentile_weights.values_list("p15_weight", flat=True)),
line={"color": "orange"},
)
percentile_weight_50_trace = go.Scatter(
name=_("P50"),
x=dates,
y=list(percentile_weights.values_list("p50_weight", flat=True)),
line={"color": "green"},
)
percentile_weight_85_trace = go.Scatter(
name=_("P85"),
x=dates,
y=list(percentile_weights.values_list("p85_weight", flat=True)),
line={"color": "orange"},
)
percentile_weight_97_trace = go.Scatter(
name=_("P97"),
x=dates,
y=list(percentile_weights.values_list("p97_weight", flat=True)),
line={"color": "red"},
)
data = [
actual_weights_trace,
]
layout_args = utils.default_graph_layout_options()
layout_args["barmode"] = "stack"
layout_args["title"] = _("<b>Weight</b>")
layout_args["xaxis"]["title"] = _("Date")
layout_args["xaxis"]["rangeselector"] = utils.rangeselector_date()
layout_args["yaxis"]["title"] = _("Weight")
if percentile_weights:
# zoom in on the relevant dates
layout_args["xaxis"]["range"] = [
birthday,
max(weighing_dates) + timedelta(days=1),
]
layout_args["yaxis"]["range"] = [0, max(measured_weights) * 1.5]
data.extend(
[
percentile_weight_97_trace,
percentile_weight_85_trace,
percentile_weight_50_trace,
percentile_weight_15_trace,
percentile_weight_3_trace,
]
)
fig = go.Figure({"data": data, "layout": go.Layout(**layout_args)})
output = plotly.plot(fig, output_type="div", include_plotlyjs=False)
return utils.split_graph_output(output)
|
babybuddy/babybuddy
|
reports/graphs/weight_change.py
|
weight_change.py
|
py
| 3,806 |
python
|
en
|
code
| 1,766 |
github-code
|
6
|
72782974907
|
import _pickle as pickle
import numpy as np
from mtqt_source import MTQTSource
from pathlib import Path
from pyrocko import orthodrome
import tensorflow_probability as tfp
import waveform_processing as wp
tfd = tfp.distributions
pi = np.pi
def find_closest_grid_point(lat_ev, lon_ev, depth_ev, path_models=None,
gf_store_id=None, min_dist=7000.,
min_dist_depth=10000.):
pathlist = Path(path_models).glob('model_%s_*' % gf_store_id)
k = 0
for path in sorted(pathlist):
path = str(path)
model_coordinates = path.split("_")
lat = float(model_coordinates[3])
lon = float(model_coordinates[4])
depth = float(model_coordinates[5])
dist = orthodrome.distance_accurate50m(lat_ev, lon_ev, lat, lon)
if dist < min_dist:
min_dist = dist
dist_depth = abs(depth-depth_ev)
if dist_depth < min_dist_depth:
min_dist_depth = dist_depth
best_model = path
k = k+1
return best_model
def grid_points_in_error_ellipses(lat_ev, lon_ev, depth_ev, error_h, error_z,
path_models=None, gf_store_id=None):
pathlist = Path(path_models).glob('model_%s_*' % gf_store_id)
region = orthodrome.radius_to_region(lat_ev, lon_ev, error_h)
grid_points = []
for path in sorted(pathlist):
path = str(path)
model_coordinates = path.split("_")
lat = float(model_coordinates[3])
lon = float(model_coordinates[4])
depth = float(model_coordinates[5])
dists = orthodrome.distance_accurate50m_numpy(lat_ev, lon_ev, lat, lon)
if dists < error_h:
if depth_ev-error_z < depth and depth_ev+error_z > depth:
grid_points.append(path)
return grid_points
def find_event(path_events, time):
pathlist = Path(path_events).glob('ev_*')
for path in sorted(pathlist):
path = str(path)+"/"
event = model.load_events(path+"event.txt")[0]
if time-10 < event.time and time+10 > event.time:
return event, path
def loss_function_negative_log_likelihood():
neg_log_likelihood = lambda x, rv_x: -rv_x.log_prob(x)
return neg_log_likelihood
def posterior_mean_field(kernel_size, bias_size=0, dtype=None):
n = kernel_size + bias_size
c = np.log(np.expm1(1.))
return tf.keras.Sequential([
tfp.layers.VariableLayer(2 * n, dtype=dtype),
tfp.layers.DistributionLambda(lambda t: tfd.Independent(
tfd.Normal(loc=t[..., :n],
scale=1e-5 + tf.nn.softplus(c + t[..., n:])),
reinterpreted_batch_ndims=1)),
])
# Specify the prior over `keras.layers.Dense` `kernel` and `bias`.
def prior_trainable(kernel_size, bias_size=0, dtype=None):
n = kernel_size + bias_size
return tf.keras.Sequential([
tfp.layers.VariableLayer(n, dtype=dtype),
tfp.layers.DistributionLambda(lambda t: tfd.Independent(
tfd.Normal(loc=t, scale=1),
reinterpreted_batch_ndims=1)),
])
def lambda_dist(scale=1e-3):
return lambda t: tfd.Normal(loc=t, scale=scale)
def getitem__all_values(filenames, idx, batch_size=72):
batch_x = filenames[idx]
data = []
labels = []
for i in range(len(filenames)):
batch_x = filenames[i]
f = open(batch_x, 'rb')
data_events, labels_events, nsamples,\
events = pickle.load(f)
f.close()
for d, l in zip(data_events, labels_events):
labels.append(l[0])
d = d[0]
d = np.asarray(d)
d = d.reshape(d.shape+(1,))
data.append(d)
return np.array(data), np.array(labels), events
def waveform_2dGenerator_from_files(filenames, batchsize=72):
batchsize = batchsize
while 1:
data = []
labels = []
for i in range(len(filenames)):
batch_x = filenames[i]
f = open(batch_x, 'rb')
data_events, labels_events, nsamples,\
events = pickle.load(f)
f.close()
for d, l in zip(data_events, labels_events):
labels.append(l[0])
d = d[0]
d = np.asarray(d)
d = d.reshape(d.shape+(1,))
data.append(d)
if len(labels) == batchsize:
yield np.array(data), np.array(labels)
data = []
labels = []
def convert_norm2real(values):
true_mts = []
true_values = []
for p in values:
p = p[0]
v, w, kappa, sigma, h = p[3], p[4], p[0], p[1], p[2]
v = (1/3)-(((1/3)*2)*v)
w = ((3/8)*pi)-((((3/8)*pi)*2)*w)
kappa = kappa*2.*pi
sigma = (pi/2)-(2*(pi/2)*sigma)
h = h
if h > 1.:
h = 1.
if v > 1.:
v = 1.
mtqt_source = MTQTSource(v=v, w=w, kappa=kappa, sigma=sigma,
h=h)
mt = mtqt_source.pyrocko_moment_tensor()
M = mtqt_source.m6
true_mts.append(mt)
true_values.append(M)
return true_mts, true_values
def convert_waveforms_to_input(waveforms):
waveforms_events = [waveforms[:]]
data_events, nsamples = wp.prepare_waveforms(waveforms_events)
data_events = np.asarray(data_events)
data_events = data_events.reshape((data_events.shape[0],)+data_events.shape[1:]+(1,))
data_events = np.float32(data_events)
return data_events
def convert_data_events_to_input(data_events):
data_events = np.asarray(data_events)
data_events = data_events.reshape((data_events.shape[0],)+data_events.shape[1:]+(1,))
data_events = np.float32(data_events)
return data_events
def getitem_values(filenames, batch_size, idx):
batch_x = filenames[idx]
f = open(batch_x, 'rb')
data_events, labels_events, nsamples, events = pickle.load(f)
f.close()
return np.array(data_events), np.array(labels_events), events
|
braunfuss/BNN-MT
|
cnn_util.py
|
cnn_util.py
|
py
| 6,023 |
python
|
en
|
code
| 9 |
github-code
|
6
|
10819501559
|
import yaml
import librosa
import numpy as np
import os
sr = 22050
namesong = 'LizNelson_Rainfall'
def merge_stems(namesong):
# Merge all instrumental stems into 1 mix and all vocal stems into 1 mix
stream = open("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_METADATA.yaml", "r")
docs = yaml.load_all(stream)
list_vocal = []
list_instru = []
for doc in docs:
for k, v in doc.items():
if k == 'stems':
for cle, valeur in v.items():
for items in valeur.items():
if items[0] == 'instrument':
if "singer" in items[1]:
y, sr = librosa.load("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_STEMS/" + namesong + "_STEM_" + cle[1:3] + ".wav")
if max(abs(y)) != 0:
y = y / max(abs(y))
list_vocal.append(y)
else:
y, sr = librosa.load("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_STEMS/" + namesong + "_STEM_" + cle[1:3] + ".wav")
if max(abs(y)) != 0:
y = y / max(abs(y))
list_instru.append(y)
vocal_sum = np.zeros(len(y))
instru_sum = np.zeros(len(y))
for i in range(len(list_vocal)):
vocal_sum += list_vocal[i]
for j in range(len(list_instru)):
instru_sum += list_instru[j]
if max(abs(vocal_sum)) != 0:
vocal_sum = vocal_sum / max(abs(vocal_sum))
if max(abs(instru_sum)) != 0:
instru_sum = instru_sum / max(abs(instru_sum))
mix_sum = np.zeros(len(y))
for k in range(len(y)):
mix_sum[k] = vocal_sum[k] + instru_sum[k]
librosa.output.write_wav("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_VOCALMIX.wav", vocal_sum, sr)
librosa.output.write_wav("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_INSTRUMIX.wav", instru_sum, sr)
librosa.output.write_wav("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_MIX_SUM.wav", mix_sum, sr)
return 0
def show_intrumental():
a = 0
for namesong in os.listdir("./MedleyDB_sample/Audio/"):
stream = open("./MedleyDB_sample/Audio/" + namesong + "/" + namesong + "_METADATA.yaml", "r")
docs = yaml.load_all(stream)
for doc in docs:
for k, v in doc.items():
if k == "instrumental":
if v == "yes":
print(namesong)
#if k == "genre":
#if v == "Singer/Songwriter":
#print(namesong, v)
#show_intrumental()
|
moulinleo/Voice-Isolation
|
merge_stems.py
|
merge_stems.py
|
py
| 2,850 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2277882687
|
import random
import time
import discord
from discord.ext import commands
import utils
class Misc(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def profile(self, ctx, user: discord.Member = None):
""" Get information about a Discord user.
Argument 'user', if specified, is the user to get information about.
'user' can be submitted using the users name, nickname, id or a mention.
If 'user' isn't entered it will display your profile.
Included information:
Nickname
Name
ID
Account creation date
Date of joining the server
Current status (online, offline, away, etc)
The top role in the server
"""
user = ctx.author if user is None else user
# Create the embed object for the message
em = utils.embed(title=f"{user.display_name} #{user.discriminator}", thumbnail=user.avatar_url,
colour=user.colour)
# Add fields containing all the information
em.add_field(name="Name", value=user.name)
em.add_field(name="Id", value=user.id)
em.add_field(name="Created", value=utils.format_time(user.created_at))
em.add_field(name="Joined", value=utils.format_time(user.joined_at))
em.add_field(name="Status", value=user.status)
em.add_field(name="Top role", value=user.top_role)
# Adding user activity information
if user.activity is not None:
activity = user.activity.type.name.title()
activity_name = user.activity.name
# Formatting for if the activity is listening to make grammar correct
activity = activity + ' to' if activity == 'Listening' else activity
# Add support for Spotify by displaying the song title and the artist
if activity_name == 'Spotify':
activity_name += f': {user.activity.title} by {user.activity.artist}'
em.add_field(name=activity, value=activity_name)
await ctx.send(embed=em)
@commands.command()
async def ping(self, ctx):
""" Test the latency to the bot and see how fast it responds """
# Create embed for message
em = utils.embed(title=f"Ping", description="Pinging")
start = time.perf_counter()
message = await ctx.send(embed=em)
end = time.perf_counter()
# Work out Time difference and convert to milliseconds
duration = (end - start) * 1000
em.description = f'Pong! {round(duration, 2)}ms'
await message.edit(embed=em)
@commands.command(aliases=["hi", "sup", "hey", "yo", "howdy"])
async def hello(self, ctx):
""" Say hello and get a random greeting from the bot."""
# Pick a random greeting to reply with from the aliases
greeting = random.choice([x.aliases for x in self.bot.commands if x.name == 'hello'][0]).title()
# Send greeting message
await ctx.send(embed=utils.embed(title="Hello", description=f"{greeting} {ctx.author.mention}!",
thumbnail='https://static.tumblr.com/gwp7jk3/QXAma9845/k-on_wave.gif'))
@commands.command(aliases=["calc"])
async def math(self, ctx, equation: str):
""" Get the result to basic arithmetic. """
try:
# Send result
await ctx.send(embed=utils.embed(title="Math", description=f"{equation.strip()} = {eval(equation)}"))
except SyntaxError:
# If a syntax error occured print the result as "SyntaxError"
await ctx.send(embed=utils.embed(title="Math", description=f"{equation.strip()} = SyntaxError"))
@commands.command(aliases=["inv"])
async def invite(self, ctx):
""" Get an invite link for the server.
If a server doesn't have an invite link then a new one will be generated.
Otherwise an existing one will be displayed.
"""
# Check for invite links
if len(await ctx.guild.invites()) < 1:
await ctx.guild.channels[0].create_invite()
# Send invite link
await ctx.send((await ctx.guild.invites())[0].url)
def setup(bot):
bot.add_cog(Misc(bot))
|
SkippyTheSnake/Discord-bot
|
cogs/misc.py
|
misc.py
|
py
| 3,962 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16800280781
|
class Book:
def __init__(self,id,name,isbn,page_count,issued,author,year):
self.id = id
self.name = name
self.isbn = isbn
self.page_count = page_count
self.issued = issued
self.author = author
self.year = year
def doIssue(self):
self.issued = True
def doReturn(self):
self.issued = False
def to_dict(self):
return {
"id" : self.id,
"name" : self.name,
"isbn" : self.isbn,
"page_count" : self.page_count,
"issued" : self.issued,
"author" : self.author,
"year" : self.year
}
#book = Book(1,"Book Name", 12345,200,2003,"John Doe", 2021)
#print(book.to_dict())
|
ale90bsas/library-python-mongodb
|
book.py
|
book.py
|
py
| 775 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28475246683
|
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
import math
import numpy as np
from tensorflow.keras.regularizers import l2
tf.keras.backend.set_learning_phase(1)
conv_init = tf.keras.initializers.VarianceScaling(scale=2.0, mode='fan_out', distribution='truncated_normal')
dense_init = tf.keras.initializers.VarianceScaling(scale=1.0 / 3.0, mode='fan_out', distribution='uniform')
#計算根據放寬倍數的filter數量
def round_filters(filters, width_coefficient):
multiplier = width_coefficient
divisor = 8
min_depth = None
min_depth = min_depth or divisor
filters *= multiplier
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
#計算根據深度倍數的layer
def round_repeats(repeats, depth_coefficient):
multiplier = depth_coefficient
return int(math.ceil(multiplier * repeats))
def drop_connect(inputs, survival_prob):
'''
根據"Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
隨機Drop一些Block
'''
#非training階段,直接返回
#根據機率隨機Drop某個Block
random_tensor = survival_prob
batch_size = tf.shape(inputs)[0]
random_tensor += tf.random.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
output = tf.math.divide(inputs, survival_prob) * binary_tensor
#tf.print('learn phase:', tf.keras.backend.learning_phase())
return tf.keras.backend.in_train_phase(output, inputs)
class act_layer(tf.keras.layers.Layer):
def __init__(self):
super(act_layer, self).__init__()
def call(self, inputs):
return tf.nn.swish(inputs)
#return tf.nn.relu(inputs)
class SENet(tf.keras.layers.Layer):
def __init__(self, num_filter, input_channels, se_ratio=0.25, **kwargs):
super(SENet, self).__init__(**kwargs)
self.reduce_filters = max(1, int(input_channels * se_ratio))
self.avgpool = tf.keras.layers.GlobalAveragePooling2D()
self.reduce_conv = tf.keras.layers.Conv2D(filters=self.reduce_filters,
kernel_size=(1,1),
strides=(1,1),
padding='same',
kernel_initializer=conv_init,
kernel_regularizer=l2(1e-5))
self.exapnd_conv = tf.keras.layers.Conv2D(filters=num_filter,
kernel_size=(1,1),
strides=(1,1),
padding='same',
kernel_initializer=conv_init,
kernel_regularizer=l2(1e-5))
self.act = act_layer()
def call(self, inputs):
x = self.avgpool(inputs)
x = tf.expand_dims(input=x, axis=1)
x = tf.expand_dims(input=x, axis=1)
x = self.reduce_conv(x)
x = self.act(x)
x = self.exapnd_conv(x)
x = tf.nn.sigmoid(x)
return inputs * x
class MBConv(tf.keras.layers.Layer):
def __init__(self, input_channels, output_channels, expand_ratio,
kernel_size, strides, se_ratio = 0.25, drop_ratio=0.2):
'''
Args:
input_channels:input channels數量
output_channels:output channels數量
expand_ratio : 論文中MBConv後帶的數字
kernel_size: kernel size (3*3) or (5*5)
strides: Conv stride 1 or 2
se_ratio: SENet 參數
drop_ratio:論文中的 drop connect機率
'''
super(MBConv, self).__init__()
self.strides = strides
self.input_channels = input_channels
self.output_channels = output_channels
self.survival_prob = 1 - drop_ratio
self.drop_ratio = drop_ratio
self.expand_ration = expand_ratio
#根據expand ratio增加filter
self.filters = input_channels * expand_ratio
self.conv_0 = tf.keras.layers.Conv2D(filters=self.filters,
kernel_size=(1,1),
strides=(1,1),
padding='same',
use_bias=False,
kernel_initializer=conv_init,
kernel_regularizer=l2(1e-5))
self.bn_0 = tf.keras.layers.BatchNormalization()
#depthwise convolution
self.depth_conv_0 = tf.keras.layers.DepthwiseConv2D(kernel_size=kernel_size,
strides=(self.strides, self.strides),
padding='same',
use_bias=False,
kernel_initializer=conv_init,
kernel_regularizer=l2(1e-5))
self.bn_1 = tf.keras.layers.BatchNormalization()
#SENet
self.SENet = SENet(self.filters, self.input_channels, se_ratio=se_ratio)
#project convolution
self.conv_1 = tf.keras.layers.Conv2D(filters=output_channels,
kernel_size=(1,1),
strides=(1,1),
padding='same',
use_bias=False,
kernel_initializer=conv_init,
kernel_regularizer=l2(1e-5))
self.bn_2 = tf.keras.layers.BatchNormalization()
self.act_1 = act_layer()
self.act_2 = act_layer()
def call(self, inputs):
#expand dim
if self.expand_ration != 1:
x = self.conv_0(inputs)
x = self.bn_0(x)
x = self.act_1(x)
else:
x = inputs
#depthwise conv
x = self.depth_conv_0(x)
x = self.bn_1(x)
x = self.act_2(x)
#SENet
x = self.SENet(x)
x = self.conv_1(x)
x = self.bn_2(x)
if self.strides == 1 and self.input_channels == self.output_channels:
if self.drop_ratio:
x = drop_connect(x, self.survival_prob)
x = tf.add(x, inputs)
return x
def creat_mbconv_block(input_tensor, input_channels, output_channels,
layer_repeat, expand_ratio, kernel_size, strides,
se_ratio = 0.25, drop_ratio=0.2):
'''
根據參數設定MBConv1, MBConv6 ...
'''
#如果layer > 1,則接下來的MBConv block
#會用output_channels當作input_channels,strides = 1
x = MBConv(input_channels = input_channels, output_channels = output_channels,
expand_ratio = expand_ratio, kernel_size = kernel_size,
strides = strides, se_ratio = se_ratio, drop_ratio=drop_ratio)(input_tensor)
for i in range(layer_repeat - 1):
x = MBConv(input_channels = output_channels, output_channels = output_channels,
expand_ratio = expand_ratio, kernel_size = kernel_size,
strides = 1, se_ratio = se_ratio, drop_ratio=drop_ratio)(x)
return x
def creat_efficient_net(width_coefficient, depth_coefficient, resolution, dropout_rate, num_classes=1000):
'''
總共分為九個部分
1.降採樣
2~8.不同的MBConv
9.Conv & Pooling & FC
'''
img_input = tf.keras.Input(shape=(resolution, resolution, 3))
#第一部分
x = tf.keras.layers.Conv2D(filters=round_filters(32, width_coefficient),
kernel_size=(3,3), strides=(2,2), padding='same',
use_bias=False, kernel_initializer=conv_init,
kernel_regularizer=l2(1e-5))(img_input)
x = tf.keras.layers.BatchNormalization()(x)
#第二部分
#2
x = creat_mbconv_block(x,
input_channels=round_filters(32, width_coefficient),
output_channels=round_filters(16, width_coefficient),
layer_repeat=round_repeats(1, depth_coefficient),
expand_ratio=1, kernel_size=(3,3), strides=1,
drop_ratio=dropout_rate)
#3
x = creat_mbconv_block(x,
input_channels=round_filters(16, width_coefficient),
output_channels=round_filters(24, width_coefficient),
layer_repeat=round_repeats(2, depth_coefficient),
expand_ratio=6, kernel_size=(3,3), strides=2,
drop_ratio=dropout_rate)
#4
x = creat_mbconv_block(x,
input_channels=round_filters(24, width_coefficient),
output_channels=round_filters(40, width_coefficient),
layer_repeat=round_repeats(2, depth_coefficient),
expand_ratio=6, kernel_size=(5,5), strides=2,
drop_ratio=dropout_rate)
#5
x = creat_mbconv_block(x,
input_channels=round_filters(40, width_coefficient),
output_channels=round_filters(80, width_coefficient),
layer_repeat=round_repeats(3, depth_coefficient),
expand_ratio=6, kernel_size=(3,3), strides=2,
drop_ratio=dropout_rate)
#6
x = creat_mbconv_block(x,
input_channels=round_filters(80, width_coefficient),
output_channels=round_filters(112, width_coefficient),
layer_repeat=round_repeats(3, depth_coefficient),
expand_ratio=6, kernel_size=(5,5), strides=1,
drop_ratio=dropout_rate)
#7
x = creat_mbconv_block(x,
input_channels=round_filters(112, width_coefficient),
output_channels=round_filters(192, width_coefficient),
layer_repeat=round_repeats(4, depth_coefficient),
expand_ratio=6, kernel_size=(5,5), strides=2,
drop_ratio=dropout_rate)
#8
x = creat_mbconv_block(x,
input_channels=round_filters(192, width_coefficient),
output_channels=round_filters(320, width_coefficient),
layer_repeat=round_repeats(1, depth_coefficient),
expand_ratio=6, kernel_size=(3,3), strides=1,
drop_ratio=dropout_rate)
#9
x = tf.keras.layers.Conv2D(filters=round_filters(1280, width_coefficient),
kernel_size=(1,1), strides=(1,1), padding='same',
use_bias=False, kernel_regularizer=l2(1e-5))(x)
x = tf.keras.layers.BatchNormalization()(x)
x = act_layer()(x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(dropout_rate)(x)
x = tf.keras.layers.Dense(units=num_classes, kernel_initializer=dense_init, kernel_regularizer=l2(1e-5))(x)
x = tf.keras.layers.Activation('softmax', dtype='float32')(x)
model = tf.keras.Model(inputs=img_input, outputs=x)
return model
if __name__ == '__main__':
model = creat_efficient_net(1.0, 1.0, 224, 0.3)
model.compile(optimizer = 'adam', loss = tf.keras.losses.SparseCategoricalCrossentropy())
model.summary()
|
qwerasdf887/Keras-Efficientnet
|
MBConv.py
|
MBConv.py
|
py
| 12,100 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10230251745
|
import json
import numpy as np
from eval_list import eval_list
import evals.data
from evals.registry import registry
np.random.seed(42)
min_samples_per_dataset = 50
n_test_samples = 10
seen = set()
datarows = []
for eval in registry.get_evals("*"):
if eval.key not in eval_list or eval.key in seen:
continue
seen.add(eval.key)
if eval.args and "samples_jsonl" in eval.args:
samples = evals.data.get_jsonl(eval.args["samples_jsonl"])
# Contruct our tasks dataset
instruction_input_output = []
for sample in samples:
if "input" in sample and "ideal" in sample:
# We only want single-system single-user samples:
if isinstance(sample["input"], list) and len(sample["input"]) == 2:
if (
sample["input"][0]["role"] == "system"
and sample["input"][1]["role"] == "user"
):
# Skip if output is a list
if isinstance(sample["ideal"], list):
continue
dp_instruction = sample["input"][0]["content"]
dp_in = sample["input"][1]["content"]
dp_out = sample["ideal"]
instruction_input_output.append((dp_instruction, dp_in, dp_out))
# Skip if there are not enough samples
if len(instruction_input_output) < min_samples_per_dataset:
continue
# Check that all dp_instruction are the same
instruction_input_output = sorted(instruction_input_output, key=lambda x: x[0])
if instruction_input_output[0][0] != instruction_input_output[-1][0]:
continue
# Shuffle samples
np.random.shuffle(instruction_input_output)
test_samples = [
{
"input": i,
"output": o,
}
for _, i, o in instruction_input_output[:n_test_samples]
]
train_samples = [
{
"input": i,
"output": o,
}
for _, i, o in instruction_input_output[n_test_samples:]
]
row = {
"eval": eval.key,
"instruction": instruction_input_output[0][0],
"test_samples": test_samples,
"train_samples": train_samples,
}
datarows.append(row)
assert len(datarows) == len(
eval_list
), f"Unexpected number of evals: {len(datarows)} != {len(eval_list)}"
assert set([r["eval"] for r in datarows]) == set(
eval_list
), f"Missing evals: {set(eval_list) - set([r['eval'] for r in datarows])}"
# Shuffle rows
np.random.shuffle(datarows)
# Save jsonl to file
with open("samples.jsonl", "w") as f:
for row in datarows:
f.write(json.dumps(row) + "\n")
|
openai/evals
|
evals/elsuite/self_prompting/scripts/dataset/compile_data.py
|
compile_data.py
|
py
| 2,868 |
python
|
en
|
code
| 12,495 |
github-code
|
6
|
43105564820
|
import random
def flip_coin():
"""
returns a coin flip- random integer between 0 and 1
if 1 - the coin lands on head
if 0 - the coin lands on tail
"""
return random.randint(0,1) #equal chance of being on head or tails
def monte_carlo(n):
"""
performs a monte_carlo simulation of a coin flip
[Param]\t n (int)- number of samples
[Return]\t None- prints out the results of the simulation
"""
head_count=0
tail_count=0
exp_count=0
while exp_count < n:
result = flip_coin()
if result == 1:
head_count +=1
else:
tail_count +=1
exp_count+=1
print(f"There were {n} simulations performed.")
msg=f"There were {(head_count/n) * 100} % heads"
print(msg)
msg=f"There were {(tail_count/n)* 100} % tails"
print(msg)
# import the random module
help(random.choice)
monte_carlo(1000000)
'''if result ==1:
print("head")
else:
print("tail")'''
|
jzhanay001/Python-Bootcamp
|
week_two/coin.py
|
coin.py
|
py
| 977 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14149764666
|
class LinkedList:
def __init__(self):
self.length = 0
self.head = None
def print_backward(self):
print("[", end="")
if self.head is not None:
self.head.print_backward()
print("]")
def add_first(self, cargo):
node = Node(cargo)
node.next = self.head
self.head = node
self.length += 1
def print_list(self):
print("[", end="")
while self.head is not None:
print(self.head, end="")
self.head = self.head.next
if self.head is not None:
print(", ", end="")
print("]", end="")
print()
class Node:
def __init__(self, cargo=None, next=None):
self.cargo = cargo
self.next = next
def __str__(self):
if self.cargo == None:
return ""
return str(self.cargo)
def print_backward(self):
first_node = self
if self.next is not None: #if not last item which does not have next defined
tail = self.next #move to the next item, print nothing
tail.print_backward() #call to print tail recursively until we reach
print(self.cargo, end=" ") #print item with no next defined
if self is not first_node:
print(",", end=" ")
linklist = LinkedList()
node1 = Node(1)
node1.print_backward()
linklist.add_first(3)
linklist.add_first(2)
linklist.add_first(1)
linklist.print_backward()
linklist.print_list()
#
# def remove_second(list):
# if list is None: return
# if list.next is None: return
# second = list.next
# first = list
# # this makes the first node refer to the third as the
# # second one refers to the third and now is assigned as next for the first
# first.next = second.next
# #separates second node from the rest (removes its link)
# second.next = None
# return second
#
#
# node1 = Node(1)
# node2 = Node(2)
# node3 = Node(3)
#
# #
# node1.next = node2
# node2.next = node3
# # print_list(node1)
# node1.print_backward()
#
# removed = remove_second(node1)
# print_list(removed)
#
# print_list(node1)
|
Tomasz-Kluczkowski/Education-Beginner-Level
|
THINK LIKE A COMPUTER SCIENTIST FOR PYTHON 3/CHAPTER 24 LINKED LISTS/linked_list.py
|
linked_list.py
|
py
| 2,148 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18003897185
|
import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.distributions import Normal
from algo.pn_utils.maniskill_learn.utils.torch import ExtendedModule
from ..builder import DENSEHEADS
class GaussianHeadBase(ExtendedModule):
def __init__(self, scale_prior=1, bias_prior=0, dim_action=None, epsilon=1E-6):
super(GaussianHeadBase, self).__init__()
self.scale_prior = Parameter(torch.tensor(scale_prior, dtype=torch.float32), requires_grad=False)
self.bias_prior = Parameter(torch.tensor(bias_prior, dtype=torch.float32), requires_grad=False)
if dim_action is None:
assert self.scale_prior.ndim == 1
self.dim_action = self.scale_prior.shape[0]
self.epsilon = epsilon
self.log_unif_prob = torch.log(1.0 / (2 * self.scale_prior.data)).sum().item()
def uniform(self, sample_shape):
return ((torch.rand(sample_shape, self.dim_action, device=self.device) * 2 - 1)
* self.scale_prior + self.bias_prior), torch.ones(sample_shape, device=self.device) * self.log_unif_prob
def sample(self, mean, log_std, num_actions):
log_std = log_std.expand_as(mean)
mean = torch.repeat_interleave(mean, num_actions, dim=0)
log_std = torch.repeat_interleave(log_std, num_actions, dim=0)
std = log_std.exp()
normal = Normal(mean, std)
x_t = normal.rsample()
y_t = torch.tanh(x_t)
action = y_t * self.scale_prior + self.bias_prior
log_prob = normal.log_prob(x_t)
log_prob -= torch.log(self.scale_prior * (1 - y_t.pow(2)) + self.epsilon)
log_prob = log_prob.sum(1, keepdim=True)
mean = torch.tanh(mean) * self.scale_prior + self.bias_prior
return action, log_prob, mean, log_std, std
@DENSEHEADS.register_module()
class GaussianHead(GaussianHeadBase):
def __init__(self, scale_prior=1, bias_prior=0, dim_action=None, log_sig_min=-20, log_sig_max=2, epsilon=1e-6):
super(GaussianHead, self).__init__(scale_prior, bias_prior, dim_action, epsilon)
self.log_sig_min = log_sig_min
self.log_sig_max = log_sig_max
def forward(self, feature, num_actions=1):
assert feature.shape[-1] % 2 == 0
mean, log_std = feature.split(feature.shape[-1] // 2, dim=-1)
log_std = torch.clamp(log_std, min=self.log_sig_min, max=self.log_sig_max)
return self.sample(mean, log_std, num_actions)
@DENSEHEADS.register_module()
class SharedGaussianHead(GaussianHeadBase):
def __init__(self, scale_prior=1, bias_prior=0, dim_action=None, epsilon=1e-6):
super(SharedGaussianHead, self).__init__(scale_prior, bias_prior, dim_action, epsilon)
self.log_std = nn.Parameter(torch.zeros(1, self.dim_action).float())
def forward(self, mean, num_actions=1):
return self.sample(mean, self.log_std, num_actions)
|
PKU-EPIC/UniDexGrasp
|
dexgrasp_policy/dexgrasp/algo/pn_utils/maniskill_learn/networks/dense_heads/gaussian.py
|
gaussian.py
|
py
| 2,881 |
python
|
en
|
code
| 63 |
github-code
|
6
|
38793898315
|
# import tensorflow libraries
import tensorflow as tf
import numpy as np
# import opencv and find webcam
import cv2
cap = cv2.VideoCapture(0)
if not(cap.isOpened()):
print("Can't find webcam, shutting down...")
quit()
# set resolution of camera capture
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960.0)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 960.0)
# import system libraries
import os
import sys
sys.path.append("..")
# import tf and custom tf libraries
from utils import label_map_util
import custom_utils
# load prediction model
print("\nLoading Saved Model...")
PATH_TO_MODEL = "new_model" # point to folder containing 'model' folder
detect_fn = tf.saved_model.load(os.path.join(PATH_TO_MODEL, "saved_model"))
# load category index for prediction model
print("\nLoading Category Index...")
category_index = label_map_util.create_category_index_from_labelmap(os.path.join(PATH_TO_MODEL, "mscoco_label_map.pbtxt"), use_display_name=True)
# begin main loop
print("\nBegin Live Image Predicting")
while True:
# capture image from webcam
ret, image_np = cap.read()
if ret == False:
print("Error Reading Frame, skipping...")
continue
# convert image to tensor
input_tensor = tf.convert_to_tensor(image_np)
input_tensor = input_tensor[tf.newaxis, ...]
# perform prediction/detection on image tensor
detections = detect_fn(input_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy() for key, value in detections.items()}
detections['num_detections'] = num_detections
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
# draw detection boxes on image using modified visualization function
custom_utils.visualize_boxes_and_labels_on_image_array(
image_np,
detections['detection_boxes'],
detections['detection_classes'],
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=200,
min_score_thresh=0.65,
agnostic_mode=False,
line_thickness=8)
# display captured image with detection boxes
cv2.imshow('object detection', cv2.resize(image_np, (800,600)))
# exit program when 'q' key is pressed
if cv2.waitKey(25) == ord('q'):
cv2.destroyAllWindows()
break
|
OSUrobotics/object_detection
|
mainfile.py
|
mainfile.py
|
py
| 2,379 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30881965405
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
version = '0.9.4'
long_description = (
open('README.rst').read()
+ '\n' +
open(os.path.join('docs', 'HISTORY.rst')).read()
+ '\n')
setup(name='plone.jsonapi.routes',
version=version,
description="Plone JSON API -- Routes",
long_description=long_description,
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
"Framework :: Plone",
"Framework :: Plone :: 4.3",
"Framework :: Plone :: 5.0",
"Framework :: Plone :: 5.1",
"Framework :: Zope2",
],
keywords='',
author='Ramon Bartl',
author_email='[email protected]',
url='https://github.com/collective/plone.jsonapi.routes',
license='MIT',
packages=find_packages('src'),
package_dir = {'': 'src'},
namespace_packages=['plone', 'plone.jsonapi'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'plone.api',
'plone.jsonapi.core>=0.6',
# -*- Extra requirements: -*-
],
extras_require={
'test': [
'plone.app.testing',
'unittest2',
'robotsuite',
'robotframework-selenium2library',
'plone.app.robotframework',
'robotframework-debuglibrary',
]
},
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
|
collective/plone.jsonapi.routes
|
setup.py
|
setup.py
|
py
| 1,641 |
python
|
en
|
code
| 12 |
github-code
|
6
|
70986505148
|
#!/usr/bin/env python3
"""
Created on Thu Mar 13 9:31:11 2020
@author: Hanrui Wu
"""
import pysam
import sys
import gzip
def read_cell_names1(pathseq_bam_file, write_bac):
seqbam = pysam.AlignmentFile(pathseq_bam_file, "rb",threads=36)
read_name_pathseq = open(write_bac,'w')
total_pathseq_reads=0
total_YP_reads=0
for each_line in seqbam:
total_pathseq_reads+=1
if each_line.has_tag('YP'):
total_YP_reads+=1
#outline = each_line.query_name + '\t' + each_line.get_tag('YP') + '\t' + str(each_line.mapping_quality) + '\n'#update 0520: added mapping quality column
#022422:use AS tag instead of mapping_quality
outline = each_line.query_name + '\t' + each_line.get_tag('YP') + '\t' + str(each_line.get_tag('AS')) + '\n'
read_name_pathseq.write(outline)
print('Total reads in pathseq bam = ',total_pathseq_reads)
print('Total reads in pathseq bam with YP tag = ',total_YP_reads)
return
def read_readnames(readname_file):
set_for_readnames = set()
dict_name = {}
with open (readname_file,'r') as r:
for each_line in r:
#remove \n
each_line = each_line.rstrip('\n')
each_line_list = each_line.split('\t')
set_for_readnames.add(each_line_list[0])
dict_name[each_line_list[0]] = {}
dict_name[each_line_list[0]]["pathogen"] = each_line_list[1]
dict_name[each_line_list[0]]["mapping_quality"] = each_line_list[2]
#print('Number of Pathseq Reads (with YP TAG) = ',len(set_for_readnames))
return set_for_readnames, dict_name
def read_pathseq_report_and_create_dict(pathseq_report_csv):
pathseq_report = open(pathseq_report_csv,'r')
#three dicts: 1. cell-> cell_UMI 2. UMI-> id_sting 3. id -> genus
#id -> genus:
dict_for_genus = {}
set_for_genera = set()
#print('lens of pathseq report: ',len(pathseq_report))
for each_line in pathseq_report:
each_line = each_line.rstrip('\n')
each_line_list = each_line.split('\t')
level = each_line_list[2]
tax = each_line_list[3]
if level == 'genus':
set_for_genera.add(tax)
if '|' in each_line_list[1]:
name_string_list = each_line_list[1].split('|')
for n in range(len(name_string_list)):
pointer = -n-1
if not '_' in name_string_list[pointer]:
name = name_string_list[pointer]
break
if 'unclassified' in name_string_list[pointer]:
name = name_string_list[pointer]
break
id = each_line_list[0]
dict_for_genus[id] = name
print ("len(dict_for_genus) = ",len(dict_for_genus))
# 070421:added a veriation function to make sure only annotate genera
#exclude_set = set()
#for each_key in dict_for_genus:
# if not dict_for_genus[each_key] in set_for_genera:
# exclude_set.add(each_key)
#for each_key in list(dict_for_genus):
# if each_key in exclude_set:
# del dict_for_genus[each_key]
# #print("deleting: ", each_key)
#print ("len(dict_for_genus) 2 = ",len(dict_for_genus))
return dict_for_genus
def read_cell_names2(set_of_readnames, dict_name, dict_for_genus,original_bam_file,unmap_cbub_bam_file,unmap_cbub_fasta_file, out_cell_list,out_readname_cell_path):
seqbam = pysam.AlignmentFile(original_bam_file, "rb",threads=36)
readname_cell_path = open(out_readname_cell_path,'w')
# Also output fasta and bam for pathseq/kk2
unmap_cbub_fasta = open(unmap_cbub_fasta_file,'w')
unmap_cbub_bam = pysam.AlignmentFile(unmap_cbub_bam_file, "wb", seqbam)
set_for_infect_cells=set()
total_cellranger_bam_reads = 0
total_cellranger_reads_UB_CB_tags = 0
total_cellranger_reads_UB_CB_unmap = 0
total_cellranger_reads_UB_CB_unmap_Aligned_to_Pathseq_YP_reads = 0
total_potential_UMI_including_ambigious_reads = set()
#added 102621: output a bam file with only UBCB unmap reads
for each_line in seqbam:
total_cellranger_bam_reads+=1
if each_line.has_tag('CB') and each_line.has_tag('UB'):
total_cellranger_reads_UB_CB_tags+=1
if each_line.has_tag('CB') and each_line.has_tag('UB') and each_line.is_unmapped:#updated 0520: only extract unmapped reads from cellranger
total_cellranger_reads_UB_CB_unmap+=1
# added 102721: output a fasta file for kraken
query_name_in_cellranger_bam = each_line.query_name
seq_in_cellranger_bam = each_line.query_sequence
unmap_cbub_fasta.write('>')
unmap_cbub_fasta.write(query_name_in_cellranger_bam)
unmap_cbub_fasta.write('\n')
unmap_cbub_fasta.write(seq_in_cellranger_bam)
unmap_cbub_fasta.write('\n')
unmap_cbub_bam.write(each_line)
if each_line.query_name in set_of_readnames:
set_for_infect_cells.add(each_line.get_tag('CB'))
readname = each_line.query_name
cellname = each_line.get_tag('CB')
umi = each_line.get_tag('UB')
path = dict_name[readname]["pathogen"]
#print(readname)
#print(path)
#translate id to genus
id_string_list = path.split(',')
genus_list = []
for each_id in id_string_list:
#070421: add function to ignore unfound genus
if each_id in dict_for_genus:
genus = dict_for_genus[each_id]
genus_list.append(genus)
else:
print(each_id," not found!")
genus_list = list(set(genus_list))
genus_list.sort()
genus_list_string = ','.join(genus_list)
#barcode_UMI_dict[barcode_UMI]["genus_string"] = genus_list
mapping_quality = dict_name[readname]["mapping_quality"]
outline = readname+'\t'+cellname+'\t'+umi+'\t'+path+'\t'+mapping_quality+'\t'+genus_list_string+'\n'
readname_cell_path.write(outline)
total_potential_UMI_including_ambigious_reads.add(umi)
total_cellranger_reads_UB_CB_unmap_Aligned_to_Pathseq_YP_reads+=1
print('total cellranger bam reads = ',total_cellranger_bam_reads)
print('total cellranger bam reads with UB CB tags = ',total_cellranger_reads_UB_CB_tags)
print('total UNMAPPED cellranger bam reads with UB CB tags = ',total_cellranger_reads_UB_CB_unmap)
print('total cellranger reads with UB_CB_unmap Aligned to Pathseq reads with YP tags = ',total_cellranger_reads_UB_CB_unmap_Aligned_to_Pathseq_YP_reads)
print('total potential UMI including ambigious reads = ',len(total_potential_UMI_including_ambigious_reads))
cell_list = open(out_cell_list,'w')
for each_cell in set_for_infect_cells:
cell_list.write(each_cell)
cell_list.write('\n')
return
def generate_barcode_UMI_dict(out_readname_cell_path):#,pathseq_report_csv,out_genus_file,sample_ident):
cell_path_file = open(out_readname_cell_path,'r')
# pathseq_report = open(pathseq_report_csv,'r')
#genus_file = open(out_genus_file,'w')
barcode_UMI_dict = {}
for each_line in cell_path_file:
each_line = each_line.rstrip('\n')
each_line_list = each_line.split('\t')
read_name = each_line_list[0]
cell_barcode = each_line_list[1]
UMI = each_line_list[2]
id_string = each_line_list[3]
id_string_list = id_string.split(',')
barcode_UMI = cell_barcode+'+'+UMI
mapping_quality = each_line_list[4]
genus_string = each_line_list[5]
#then, pick the read with highest mapping quality for each UMI, it's a different strategy then previous idea
if not barcode_UMI in barcode_UMI_dict:
#if not int(mapping_quality) == 0:#052721 tried to test 0 mapping quality
barcode_UMI_dict[barcode_UMI]={}
barcode_UMI_dict[barcode_UMI]["id_string"] = id_string_list
barcode_UMI_dict[barcode_UMI]["mapping_quality"] = int(mapping_quality)
barcode_UMI_dict[barcode_UMI]["genus_string"] = genus_string
'''
#also add a translated genus name column (052221)
genus_list = []
for each_id in id_string_list:
genus = dict_for_genus[each_id]
genus_list.append(genus)
genus_list = list(set(genus_list))
barcode_UMI_dict[barcode_UMI]["genus_string"] = genus_list
'''
elif int(mapping_quality) > barcode_UMI_dict[barcode_UMI]["mapping_quality"]:
barcode_UMI_dict[barcode_UMI]["id_string"] = id_string_list
barcode_UMI_dict[barcode_UMI]["mapping_quality"] = int(mapping_quality)
barcode_UMI_dict[barcode_UMI]["genus_string"] = genus_string
print('Total UMI in barcode_UMI_dict = ',len(barcode_UMI_dict))
return barcode_UMI_dict #fast, no need to generate another temp file
def output_cells_genus_list(barcode_UMI_dict,dict_for_genus):
#genus_file = open(out_genus_file,'w')
#three dicts: 1. cell-> cell_UMI 2. UMI-> id_sting 3. id -> genus
cells_dict = {}
for barcode_UMI in barcode_UMI_dict:
cell = barcode_UMI.split('+')[0]
if not cell in cells_dict:
cells_dict[cell]=[]
cells_dict[cell].append(barcode_UMI)
else:
cells_dict[cell].append(barcode_UMI)
#UMI_id_dict is to store UMI<- ids string
UMI_id_dict = {}
for barcode_UMI in barcode_UMI_dict:
#0523:
if not ',' in barcode_UMI_dict[barcode_UMI]["genus_string"]:
UMI_id_dict[barcode_UMI] = barcode_UMI_dict[barcode_UMI]["id_string"]
#then update UMI_id_dict with genus name
unambigious_UMI = {}
for barcode_UMI in UMI_id_dict:
#id_string = UMI_id_dict[barcode_UMI]
id_list = UMI_id_dict[barcode_UMI]
genus_list = []
for each_id in id_list:
#070421: add function to ignore unfound genus!!
if each_id in dict_for_genus:
genus = dict_for_genus[each_id]
genus_list.append(genus)
genus_list = list(set(genus_list))
if len(genus_list) == 1:#only keep unambigious UMI
unambigious_UMI[barcode_UMI] = genus_list[0]
#next, construct cell_metadata using unambigious_UMI dict,also count the number of UMI in cells
print('Total unambigious UMI = ',len(unambigious_UMI))
cell_metadata_dict = {}
for barcode_UMI in unambigious_UMI:
barcode = barcode_UMI.split('+')[0]
UMI = barcode_UMI.split('+')[1]
genus = unambigious_UMI[barcode_UMI]
if not barcode in cell_metadata_dict:
cell_metadata_dict[barcode] = {} #0527: changed the structure of cell_metadata_dict#0531: update the structure again
cell_metadata_dict[barcode]['genus'] = []
cell_metadata_dict[barcode]['genus'].append(genus)
cell_metadata_dict[barcode]['barcode_UMI']={}
cell_metadata_dict[barcode]['barcode_UMI'][barcode_UMI] = genus
cell_metadata_dict[barcode]['pathogen_count']={}
#cell_metadata_dict[barcode]['pathogen_count'][genus] = 1
else:
cell_metadata_dict[barcode]['genus'].append(genus)
cell_metadata_dict[barcode]['barcode_UMI'][barcode_UMI] = genus
if not genus in cell_metadata_dict[barcode]['pathogen_count']:
cell_metadata_dict[barcode]['pathogen_count'][genus] = 1
else:
cell_metadata_dict[barcode]['pathogen_count'][genus] += 1
#updated 0531 to count pathogen UMI for each cell
'''
if not barcode in cell_metadata_dict:
cell_metadata_dict[barcode] = {}
cell_metadata_dict[barcode]['genus'] = []
cell_metadata_dict[barcode]['genus'].append(genus)
cell_metadata_dict[barcode]['UMI_count'] = 1
else:
cell_metadata_dict[barcode]['genus'].append(genus)
cell_metadata_dict[barcode]['UMI_count'] += 1
'''
#then create output
# 052621: put the output part into an individual function
# cell_metadata_dict = update_cell_metadata_list(unambigious_UMI,cell_metadata_dict,threshold_for_min_cell, threshold_for_min_umi)#comment for MULTI
return cell_metadata_dict
def output_cell_metadata(cell_metadata_dict,out_genus_file,sample_ident,barcode_whitelist_file):
#Strategy 0601:
#1. if periority, then -> periority
#2. elif single highest number -> highest genus
#3. else (multiple highest) -> "Multi"
print('total pathogen-associated gems = ', len(cell_metadata_dict))
white_list_set = set()
white_list_dict = {}
white_list = gzip.open(barcode_whitelist_file, 'rt')
for each_line in white_list:
each_line = each_line.rstrip('\n')
white_list_set.add(each_line)
#print("total number of cells = ", len(white_list_set))
for barcode in cell_metadata_dict:
if barcode in white_list_set:
white_list_dict[barcode]= cell_metadata_dict[barcode]
cell_metadata_dict = white_list_dict
print("total filtered pathogen-associated cells = ", len(cell_metadata_dict))
#dict_updated with white list
genus_file = open(out_genus_file,'w')
header = 'cell_name,pathogen,UMI_count,pathogen_count\n'
genus_file.write(header)
for barcode in cell_metadata_dict:
if not sample_ident == '':
cell_name = sample_ident+'_'+barcode
else:
cell_name = barcode
genus_list = []
for barcode_UMI in cell_metadata_dict[barcode]['barcode_UMI']:
genus_list.append(cell_metadata_dict[barcode]['barcode_UMI'][barcode_UMI])
sorted_genus_list = list(set(genus_list))
sorted_genus_list.sort()
genus = '+'.join(sorted_genus_list)
UMI_count = len(cell_metadata_dict[barcode]['barcode_UMI'])
#then we need a new item for pathogen count
pathogen_count_list = []
for each_pathogen in cell_metadata_dict[barcode]['pathogen_count']:
pathogen_count=each_pathogen
pathogen_count+=':'
pathogen_count+=str(cell_metadata_dict[barcode]['pathogen_count'][each_pathogen])
pathogen_count_list.append(pathogen_count)
pathogen_count_list.sort()
pathogen_count_str = ';'.join(pathogen_count_list)
#insert new rule to RE-ASSIGN genus and UMI-count
Periority_pathogen = 'Fusobacterium'
pathogen_count_mini_dict = cell_metadata_dict[barcode]['pathogen_count']
temp_max_list = []
UMI_count_sum = 0
#max_UMI = 0
#for each_pathogen in pathogen_count_mini_dict:
max_count = max(pathogen_count_mini_dict.values())
for key,value in pathogen_count_mini_dict.items():
if value == max_count:
temp_max_list.append(key)
max_UMI = value
UMI_count_sum += value
UMI_count = UMI_count_sum
if len(set(temp_max_list)) > 1:
genus = 'MULTI'
UMI_count = UMI_count_sum
else:
genus = temp_max_list[0]
UMI_count = max_UMI
#muted 061821
#if Periority_pathogen in pathogen_count_mini_dict:
# genus = Periority_pathogen
# UMI_count = pathogen_count_mini_dict[Periority_pathogen]
#End of new rule
'''
# 060121 new plotting rule:
plotting_list = [
"Treponema",
"Tannerella",
"Porphyromonas",
"Capnocytophaga",
"Campylobacter",
"Capnocytophaga+Treponema",
"Mycoplasma",
"Fusobacterium",
"Solobacterium",
"Leptotrichia",
"Gelidibacter"
]
if genus in plotting_list:
output_line = ','.join([cell_name,genus,str(UMI_count),pathogen_count_str])+'\n'
else:
genus = "Other_bacteria"
output_line = ','.join([cell_name,genus,str(UMI_count),pathogen_count_str])+'\n'
'''
#1. cell_name 2. genus 3. UMI_count 4. pathogen_count_str
output_line = ','.join([cell_name,genus,str(UMI_count),pathogen_count_str])+'\n'
if UMI_count >= 1:
genus_file.write(output_line)
return
def UMI_table_output(cell_metadata_dict,barcode_whitelist_file,sample_ident,output_UMI_table_csv,output_UMI_validate_table_csv):
#0719: added white list
white_list_set = set()
white_list_dict = {}
white_list = gzip.open(barcode_whitelist_file, 'rt')
for each_line in white_list:
each_line = each_line.rstrip('\n')
white_list_set.add(each_line)
print("total number of cells = ", len(white_list_set))
for barcode in cell_metadata_dict:
if barcode in white_list_set:
white_list_dict[barcode]= cell_metadata_dict[barcode]
cell_metadata_dict = white_list_dict
#print(white_list_dict)
# added 101521: print a UMI table for cross validation (cell+UMI and pathogen)
output_UMI_validate_table = open(output_UMI_validate_table_csv,'w')
for each_cell in cell_metadata_dict:
for each_UMI in cell_metadata_dict[each_cell]['barcode_UMI']:
UMI = each_UMI
pathogen = cell_metadata_dict[each_cell]['barcode_UMI'][UMI]
output_UMI_validate_table.write(UMI+','+pathogen+'\n')
####################
output_UMI_table = open(output_UMI_table_csv,'w')
#added0714 to output everything for metadata
#cell_metadata_dict[barcode]['pathogen_count'][each_pathogen]
#first get a complete list of all genera in this sample:
genera_list_set = set()
for barcode in cell_metadata_dict:
for pathogen in cell_metadata_dict[barcode]['pathogen_count']:
#cell_metadata_dict[barcode]['pathogen_count'][pathogen]
genera_list_set.add(pathogen)
genera_list = sorted(list(genera_list_set))
header = ['barcode']+genera_list
header_out = ','.join(header)
output_UMI_table.write(header_out)
output_UMI_table.write('\n')
#then start each_line
for barcode in cell_metadata_dict:
if not sample_ident == '':
cell_name = sample_ident+'_'+barcode
else:
cell_name = barcode
genera_count_list = []
for each_genus in genera_list:
if each_genus in cell_metadata_dict[barcode]['pathogen_count']:
genus_count = cell_metadata_dict[barcode]['pathogen_count'][each_genus]
else:
genus_count = 0
genera_count_list.append(str(genus_count))
output_line = [cell_name]+genera_count_list
output_line_out = ','.join(output_line)
output_UMI_table.write(output_line_out)
output_UMI_table.write('\n')
return
if __name__ == "__main__":
cellranger_bam_file,sample_ident,barcode_whitelist_file,pathseq_bam_file,pathseq_report_csv,read_name_pathseq,unmap_cbub_bam_file,unmap_cbub_fasta_file,out_cell_list,out_readname_cell_path,out_genus_file,output_UMI_table_csv,output_UMI_validate_table_csv=sys.argv[1:]
dict_for_genus = read_pathseq_report_and_create_dict(pathseq_report_csv)
step1 = read_cell_names1(pathseq_bam_file, read_name_pathseq)
step2 = read_readnames(read_name_pathseq)
step3 = read_cell_names2(step2[0], step2[1], dict_for_genus,cellranger_bam_file,unmap_cbub_bam_file,unmap_cbub_fasta_file, out_cell_list,out_readname_cell_path)
step4 = generate_barcode_UMI_dict(out_readname_cell_path)
step5 = output_cells_genus_list(step4,dict_for_genus)
output_cell_metadata(step5,out_genus_file,sample_ident,barcode_whitelist_file)
cell_metadata_dict = step5
UMI_table_output(cell_metadata_dict,barcode_whitelist_file,sample_ident,output_UMI_table_csv,output_UMI_validate_table_csv)
#0714:added output_UMI_table_csv
# cellranger_bam_file,
# sample_ident,
# barcode_whitelist_file,
# pathseq_bam_file,
# pathseq_report_csv,
# read_name_pathseq,
# unmap_cbub_bam_file,
# unmap_cbub_fasta_file,
# out_cell_list,
# out_readname_cell_path,
# out_genus_file,
# output_UMI_table_csv,
# output_UMI_validate_table_csv=sys.argv[1:]
|
FredHutch/invadeseq
|
bin/UMI_matrix.py
|
UMI_matrix.py
|
py
| 20,709 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33126934610
|
import pandas as pd
from tools.readFile import read_excl
# 读取excel中指定列整列元素,返回一个集合
def readExcelData(filePath, column):
df = pd.read_excel(filePath, usecols=[column - 1]) # 指定读取的列
df_list = df.values.tolist()
backList = []
for i in df_list:
backList.append(i[0])
if len(backList) == 0:
data = read_excl(filePath) # 文件位置
feature1 = data[:, column - 1:column]
m = 0
for i in feature1:
tmpKey = str(feature1[m][0])
backList.append(tmpKey)
m += 1
else:
return backList
|
linhe-demo/sync_dataTable
|
tools/readExcel.py
|
readExcel.py
|
py
| 632 |
python
|
en
|
code
| 5 |
github-code
|
6
|
20946824389
|
import datetime
from pysolar import solar
# Calculate the altitude and azimuth of the sun given the location and the time
def sun_pos(payload):
# Input variables
lat = payload["lat"] # Lattitude (deg)
lon = payload["lon"] # Longitude (deg)
epoch = payload["epoch"] # time (Linux epoch in seconds)
# Calculate UTC time
date = datetime.datetime.fromtimestamp(epoch, tz=datetime.timezone.utc)
# Calculate the azimuth and altitude of the sun using [Pysolar](https://pysolar.org/)
altitude_sun = solar.get_altitude(lat, lon, date) # Sun's altitude (deg)
azimuth_sun = solar.get_azimuth(lat, lon, date) # Sun's azimuth (deg)
return {"altitude": altitude_sun, "azimuth": azimuth_sun}
|
bsamadi/metadata-processor
|
app/sun_pos.py
|
sun_pos.py
|
py
| 727 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6422355002
|
from django.urls import path
from . import views
urlpatterns = [
path('account', views.account, name="account"),
path('profile', views.prifile, name="profile"),
path('signup', views.sign_up, name="signup"),
path('signin', views.sign_in, name="signin"),
path('signout', views.sign_out, name="signout"),
]
|
aposgial/Project_E3
|
happy_traveller/register/urls.py
|
urls.py
|
py
| 315 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39747449584
|
"""Training and Predicting Cifar10 with Mutant Networks.
The networks mutate their architecture using genetic algorithms.
Author: Lucas David -- <[email protected]>
Licence: MIT License 2016 (c)
"""
import logging
import artificial as art
import numpy as np
import tensorflow as tf
from artificial.utils.experiments import arg_parser, ExperimentSet, Experiment
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
import mutant
class Cifar10MutantEnvironment(mutant.Environment):
def build(self):
tf.logging.info('building environment...')
tf.logging.info('|-loading data...')
(X, y), (X_test, y_test) = cifar10.load_data()
X = X.astype('float32') / 255
X_test = X_test.astype('float32') / 255
g = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
tf.logging.info('|-fitting image generator...')
g.fit(X)
tf.logging.info('|-defining data sets...')
self.dataset_ = g.flow(X, y, batch_size=self.consts.batch_size,
shuffle=self.consts.shuffle_train)
self.test_dataset_ = self.val_dataset_ = (X_test, y_test)
tf.logging.info('building complete')
return self
class ClimbOverCifar10Experiment(Experiment):
env_ = None
def setup(self):
consts = self.consts
# Settings for logging.
verbosity_level = logging.INFO if consts.verbose else logging.WARNING
for m in ('artificial', 'tensorflow', 'connoisseur'):
logger = logging.getLogger(m)
logger.setLevel(verbosity_level)
logger.addHandler(logging.FileHandler(consts.log_file))
np.random.seed(consts.seed)
# Create mutation environment.
e = Cifar10MutantEnvironment(optimizer='adam', consts=consts)
e.agents = [
mutant.Agent(search=art.searches.local.HillClimbing,
environment=e,
**consts.agent_params)
]
initial_architecture = e.architect_.validate({
mutant.Codes.Conv2D: [
e.architect_.random_layer(mutant.Codes.Conv2D)
for _ in range(4)
],
mutant.Codes.Dense: [
e.architect_.random_layer(mutant.Codes.Dense)
for _ in range(2)
],
})
initial_state = mutant.MutantNetwork(initial_architecture)
e.current_state = e.initial_state = initial_state
self.env_ = e
def run(self):
try:
self.env_.live(n_cycles=1)
finally:
answer = self.env_.current_state
if answer:
tf.logging.info('train and validation loss after %i epochs: '
'(%s, %s)', self.consts.n_epochs,
answer.loss_, answer.validation_loss_)
if __name__ == '__main__':
print(__doc__, flush=True)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('tensorflow').propagate = False
(ExperimentSet(ClimbOverCifar10Experiment)
.load_from_json(arg_parser.parse_args().constants)
.run())
|
lucasdavid/unicamp-ia004-neural-networks-2
|
mutant-networks/experiments/cifar-hill-climbing/experiment.py
|
experiment.py
|
py
| 3,372 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19342659029
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
import torchvision.models as models
from torch.autograd import Variable, grad
from copy import deepcopy
from tqdm import tqdm
import torch.nn.init as init
from .DevNet import SemiADNet
import sys
sys.path.append("..")
from data import ALDataset
class waal_clf(nn.Module):
def __init__(self, input_dim, embSize=320, num_classes=1):
super(waal_clf, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(input_dim, embSize),
nn.ReLU(),
nn.Dropout(p=0.01)
)
self.layer2 = nn.Sequential(
nn.Linear(embSize, 50),
nn.ReLU(),
nn.Dropout(p=0.01)
)
self.layer3 = nn.Linear(50, num_classes)
self.embSize = embSize
def forward(self, X, return_embedding=False):
emb = self.layer1(X)
X = self.layer2(emb)
X = self.layer3(X)
if return_embedding:
return X, emb
else:
return X
def get_embedding_dim(self):
return self.embSize
class Net_WAAL:
def __init__(self, model_name='WAAL', config=None, net_clf=None, net_dis=None):
self.config = config
self.batch_size = config['model_batch_size']
self.nb_batch = 20
self.device = torch.device("cuda" if config['use_cuda'] else "cpu")
self.net_clf = net_clf if isinstance(net_clf, nn.Module) else waal_clf
self.net_dis = net_dis if isinstance(net_dis, nn.Module) else Discriminator
self.use_imbalence_train = False
def fit(self, X_train, y_train, X_unlabeled=None, ratio=None, X_valid=None, y_valid=None, alpha=1e-3):
n_epoch = self.config['n_epoch']
dim = X_train.shape[1]
outlier_indices = np.where(y_train == 1)[0]
inlier_indices = np.where(y_train == 0)[0]
self.clf = self.net_clf(input_dim=dim, num_classes=2).to(self.device)
self.dis = self.net_dis(dim = self.clf.get_embedding_dim()).to(self.device)
# setting three optimizers
self.opt_clf = optim.Adam(self.clf.parameters(), lr=0.001, weight_decay=1e-5)
self.opt_dis = optim.Adam(self.dis.parameters(), lr=0.001, weight_decay=1e-5)
# computing the unbalancing ratio, a value betwwen [0,1]
#gamma_ratio = X_labeled.shape[0]/X_unlabeled.shape[0]
gamma_ratio = X_train.shape[0]/X_unlabeled.shape[0]
print(gamma_ratio)
self.clf.train()
self.dis.train()
if not self.use_imbalence_train:
data = ALDataset(X_train, y_train)
loader = DataLoader(data, self.batch_size , shuffle=True)
for epoch in range(n_epoch):
if self.use_imbalence_train:
for i in range(self.nb_batch):
label_x, label_y = self.input_batch_generation_sup(X_train, outlier_indices, inlier_indices, self.batch_size)
label_x, label_y = label_x.to(self.device), label_y.to(self.device)
unlabel_x = self.sample_unlabeled(X_unlabeled, self.batch_size).to(self.device)
self.train(label_x, label_y , unlabel_x, gamma_ratio=gamma_ratio,alpha=alpha)
else:
for batch_idx, (label_x, label_y) in enumerate(loader):
label_y = label_y.unsqueeze(1)
label_x, label_y = label_x.to(self.device), label_y.to(self.device)
unlabel_x = self.sample_unlabeled(X_unlabeled, len(label_x) ).to(self.device)
self.train(label_x, label_y , unlabel_x, gamma_ratio=gamma_ratio,alpha=alpha)
def train(self, label_x, label_y , unlabel_x, gamma_ratio=None,alpha=1e-3):
# training feature extractor and predictor
self.set_requires_grad(self.clf,requires_grad=True)
self.set_requires_grad(self.dis,requires_grad=False)
self.opt_clf.zero_grad()
lb_out, lb_z = self.clf(label_x, return_embedding=True)
_, unlb_z = self.clf(unlabel_x, return_embedding=True)
# prediction loss (deafult we use F.cross_entropy)
zero_prob = 1-label_y
gt_probs = torch.cat((zero_prob,label_y),1)
pred_loss = torch.mean(F.cross_entropy(lb_out,gt_probs))
# pred_loss = self.deviation_loss(label_y, lb_out)
# Wasserstein loss (unbalanced loss, used the redundant trick)
wassertein_distance = self.dis(unlb_z).mean() - gamma_ratio * self.dis(lb_z).mean()
with torch.no_grad():
_, lb_z = self.clf(label_x, return_embedding=True)
_, unlb_z = self.clf(unlabel_x, return_embedding=True)
gp = self.gradient_penalty(self.dis, unlb_z, lb_z)
loss = pred_loss + alpha * wassertein_distance + alpha * gp * 5
# for CIFAR10 the gradient penality is 5
# for SVHN the gradient penality is 2
loss.backward()
# clip_grad_norm_(self.clf.parameters(), 1.0)
self.opt_clf.step()
# Then the second step, training discriminator
self.set_requires_grad(self.clf, requires_grad=False)
self.set_requires_grad(self.dis, requires_grad=True)
with torch.no_grad():
_, lb_z = self.clf(label_x, return_embedding=True)
_, unlb_z = self.clf(unlabel_x, return_embedding=True)
for _ in range(1):
# gradient ascent for multiple times like GANS training
gp = self.gradient_penalty(self.dis, unlb_z, lb_z)
wassertein_distance = self.dis(unlb_z).mean() - gamma_ratio * self.dis(lb_z).mean()
dis_loss = -1 * alpha * wassertein_distance - alpha * gp * 2
self.opt_dis.zero_grad()
dis_loss.backward()
self.opt_dis.step()
def input_batch_generation_sup(self, X_train, outlier_indices, inlier_indices, batch_size):
'''
batchs of samples. This is for csv data.
Alternates between positive and negative pairs.
'''
n_inliers = len(inlier_indices)
n_outliers = len(outlier_indices)
sample_num = batch_size//2
inlier_idx = np.random.choice([i for i in range(n_inliers)], sample_num, replace=True)
outlier_idx = np.random.choice([i for i in range(n_outliers)], sample_num, replace=True)
sampled_X = np.concatenate((X_train[inlier_indices[inlier_idx]], X_train[outlier_indices[outlier_idx]]), axis=0)
sampled_y = np.concatenate((np.expand_dims(np.zeros(sample_num), axis=1), np.expand_dims(np.ones(sample_num), axis=1)), axis=0)
# print(sampled_X.shape)
return torch.from_numpy(sampled_X).float(), torch.from_numpy(sampled_y).float()
def sample_unlabeled(self, X_unlabeled, batch_size):
# is_replace = True if len(X_unlabeled)<batch_size else False
is_replace = True
idx = np.random.choice([i for i in range(len(X_unlabeled))], batch_size, replace=is_replace)
sampled = X_unlabeled[idx]
return torch.from_numpy(sampled).float()
def deviation_loss(self, y_true, y_pred):
'''
z-score-based deviation loss
'''
confidence_margin = 5.
self.ref = torch.normal(mean=0., std=torch.full([5000], 1.)).cuda()
dev = (y_pred - torch.mean(self.ref)) / torch.std(self.ref)
inlier_loss = torch.abs(dev)
outlier_loss = torch.abs((confidence_margin - dev).clamp_(min=0.))
return torch.mean((1 - y_true) * inlier_loss + y_true * outlier_loss)
def predict_prob(self, X, y=None, method="linear", threshold_method="quantile", num=0.95):
self.clf.eval()
with torch.no_grad():
X = torch.from_numpy(X).to(self.device)
out = self.clf(X.float())
prob = F.softmax(out, dim=1).cpu().detach()
return prob
def predict_score(self, X, y=None, return_threshold=False, quantile_num=0.95):
prob = self.predict_prob(X).numpy()
score = prob[:, 1]
if return_threshold:
print('quanitile:')
print(np.quantile(score,[i/10 for i in range(0,11)]))
threshold = np.quantile(score, quantile_num)
return score, threshold
else:
return score
def predict(self, X, y=None, threshold=0.5):
prob = self.predict_prob(X)
label = prob.max(1)[1]
return label
def single_worst(self, probas):
"""
The single worst will return the max_{k} -log(proba[k]) for each sample
:param probas:
:return: # unlabeled \times 1 (tensor float)
"""
value,_ = torch.max(-1*torch.log(probas),1)
return value
# setting gradient values
def set_requires_grad(self, model, requires_grad=True):
"""
Used in training adversarial approach
:param model:
:param requires_grad:
:return:
"""
for param in model.parameters():
param.requires_grad = requires_grad
# setting gradient penalty for sure the lipschitiz property
def gradient_penalty(self, critic, h_s, h_t):
''' Gradeitnt penalty approach'''
alpha = torch.rand(h_s.size(0), 1).to(self.device)
differences = h_t - h_s
interpolates = h_s + (alpha * differences)
interpolates = torch.cat([interpolates, h_s, h_t]).requires_grad_()
# interpolates.requires_grad_()
preds = critic(interpolates)
gradients = grad(preds, interpolates,
grad_outputs=torch.ones_like(preds),
retain_graph=True, create_graph=True)[0]
gradient_norm = gradients.norm(2, dim=1)
gradient_penalty = ((gradient_norm - 1)**2).mean()
return gradient_penalty
def get_model(self):
return self.clf
def get_embeddings(self, data):
self.clf.eval()
embeddings = torch.zeros([len(data), self.clf.get_embedding_dim()])
loader = DataLoader(data, shuffle=False, **self.params['loader_te_args'])
with torch.no_grad():
for x, y, idxs in loader:
x, y = x.to(self.device), y.to(self.device)
out, e1 = self.clf(x)
embeddings[idxs] = e1.cpu()
return embeddings
class Discriminator(nn.Module):
"""Adversary architecture(Discriminator) for WAE-GAN."""
def __init__(self, dim=20):
super(Discriminator, self).__init__()
self.dim = np.prod(dim)
self.net = nn.Sequential(
nn.Linear(self.dim, 512),
nn.ReLU(True),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512,1),
nn.Sigmoid(),
)
self.weight_init()
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def forward(self, z):
return self.net(z).reshape(-1)
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
def normal_init(m, mean, std):
if isinstance(m, (nn.Linear, nn.Conv2d)):
m.weight.data.normal_(mean, std)
if m.bias.data is not None:
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
m.weight.data.fill_(1)
if m.bias.data is not None:
m.bias.data.zero_()
|
clarenceluo78/ActiveAD
|
models/nets_waal.py
|
nets_waal.py
|
py
| 10,208 |
python
|
en
|
code
| 0 |
github-code
|
6
|
858377764
|
from __future__ import division
from vistrails.core.bundles.pyimport import py_import
import vistrails.core.requirements
from vistrails.gui.modules.source_configure import SourceConfigurationWidget
from PyQt4 import QtCore, QtGui
from vistrails.gui.theme import CurrentTheme
def TextEditor(parent=None):
try:
py_import('PyQt4.Qsci', {'linux-debian': 'python-qscintilla2',
'linux-ubuntu': 'python-qscintilla2'}, True)
except ImportError:
return OldTextEditor(parent)
else:
return NewTextEditor(parent)
def NewTextEditor(parent):
vistrails.core.requirements.require_python_module('PyQt4.Qsci')
from PyQt4.Qsci import QsciScintilla
class _TextEditor(QsciScintilla):
def __init__(self, parent=None):
QsciScintilla.__init__(self, parent)
## set the default font of the editor
## and take the same font for line numbers
font = CurrentTheme.PYTHON_SOURCE_EDITOR_FONT
self.setFont(font)
fm = QtGui.QFontMetrics(font)
## Line numbers
# conventionally, margin 0 is for line numbers
self.setMarginWidth(0, fm.width( "0000" ) + 4)
self.setMarginLineNumbers(0, True)
self.setAutoIndent(True)
## Edge Mode shows a red vetical bar at 80 chars
self.setEdgeMode(QsciScintilla.EdgeLine)
self.setEdgeColumn(80)
self.setEdgeColor(QtGui.QColor("#CCCCCC"))
## Folding visual : we will use boxes
self.setFolding(QsciScintilla.BoxedTreeFoldStyle)
## Braces matching
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
## Editing line color
# self.setCaretLineVisible(True)
# self.setCaretLineBackgroundColor(QtGui.QColor("#CDA869"))
## Margins colors
# line numbers margin
self.setMarginsBackgroundColor(QtGui.QColor("#FFFFFF"))
self.setMarginsForegroundColor(QtGui.QColor("#000000"))
# folding margin colors (foreground,background)
self.setFoldMarginColors(QtGui.QColor("#DDDDDD"),QtGui.QColor("#DDDDDD"))
# do not use tabs
self.setIndentationsUseTabs(False)
self.setTabWidth(4)
self.setTabIndents(True)
# set autocompletion
self.setAutoCompletionThreshold(2)
self.setAutoCompletionSource(QsciScintilla.AcsDocument)
self.setAutoCompletionCaseSensitivity(True)
self.setAutoCompletionReplaceWord(True)
self.setAutoCompletionFillupsEnabled(True)
def setPlainText(self, text):
""" setPlainText(text: str) -> None
redirect to setText
"""
self.setText(text)
def toPlainText(self):
""" setPlainText(text: str) -> None
redirect to self.text()
"""
text = self.text()
return text.replace('\r\n', '\n').replace('\r', '\n')
# def focusOutEvent(self, event):
# if self.parent():
# QtCore.QCoreApplication.sendEvent(self.parent(), event)
# QsciScintilla.focusOutEvent(self, event)
return _TextEditor(parent)
class OldTextEditor(QtGui.QTextEdit):
def __init__(self, parent=None):
QtGui.QTextEdit.__init__(self, parent)
self.setAcceptRichText(False)
self.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.formatChanged(None)
self.setCursorWidth(8)
self.connect(self,
QtCore.SIGNAL('currentCharFormatChanged(QTextCharFormat)'),
self.formatChanged)
def formatChanged(self, f):
self.setFont(CurrentTheme.PYTHON_SOURCE_EDITOR_FONT)
def keyPressEvent(self, event):
""" keyPressEvent(event: QKeyEvent) -> Nont
Handle tab with 4 spaces
"""
if event.key()==QtCore.Qt.Key_Tab:
self.insertPlainText(' ')
else:
# super(PythonEditor, self).keyPressEvent(event)
QtGui.QTextEdit.keyPressEvent(self, event)
class TextConfigurationWidget(SourceConfigurationWidget):
def __init__(self, module, controller, parent=None):
SourceConfigurationWidget.__init__(self, module, controller,
TextEditor, False, False, parent, False, portName='value')
|
VisTrails/VisTrails
|
vistrails/gui/modules/string_configure.py
|
string_configure.py
|
py
| 4,614 |
python
|
en
|
code
| 100 |
github-code
|
6
|
39911776352
|
import os
import re
import json
import pickle
import kss
import pandas as pd
from tqdm import tqdm
from elasticsearch import Elasticsearch
from torch.utils.data import DataLoader, TensorDataset
from datasets import load_metric, load_from_disk, load_dataset, Features, Value, Sequence, DatasetDict, Dataset
from sentence_transformers import SentenceTransformer, util
from data_processing import *
from mask import mask_to_tokens
def save_pickle(save_path, data_set):
file = open(save_path, "wb")
pickle.dump(data_set, file)
file.close()
return None
def get_pickle(pickle_path):
f = open(pickle_path, "rb")
dataset = pickle.load(f)
f.close()
return dataset
def save_data(data_path, new_wiki):
with open(data_path, 'w', encoding='utf-8') as make_file:
json.dump(new_wiki, make_file, indent="\t", ensure_ascii=False)
def passage_split_400(text):
num = len(text) // 400
count = 1
split_datas = kss.split_sentences(text)
data_list = []
data = ""
for split_data in split_datas:
if abs(len(data) - 400) > abs(len(data) + len(split_data) - 400) and count < num:
if len(data) == 0:
data += split_data
else:
data += (" " + split_data)
elif count < num:
data_list.append(data)
count += 1
data = ""
data += split_data
else:
data += split_data
data_list.append(data)
return data_list, len(data_list)
def passage_split(text):
length = len(text) // 2
split_datas = kss.split_sentences(text)
data_1 = ""
data_2 = ""
for split_data in split_datas:
if abs(len(data_1) - length) > abs(len(data_1) + len(split_data) - length):
if len(data_1) == 0:
data_1 += split_data
else:
data_1 += (" " + split_data)
else:
if len(data_2) == 0:
data_2 += split_data
else:
data_2 += (" " + split_data)
return data_1, data_2
def preprocess(text):
text = re.sub(r'\n', ' ', text)
text = re.sub(r"\\n", " ", text)
text = re.sub(r"\s+", " ", text)
text = re.sub(r'#', ' ', text)
text = re.sub(r"[^a-zA-Z0-9가-힣ㄱ-ㅎㅏ-ㅣぁ-ゔァ-ヴー々〆〤一-龥<>()\s\.\?!》《≪≫\'<>〈〉:‘’%,『』「」<>・\"-“”∧]", "", text)
return text
def run_preprocess(data_dict):
context = data_dict["context"]
start_ids = data_dict["answers"]["answer_start"][0]
before = data_dict["context"][:start_ids]
after = data_dict["context"][start_ids:]
process_before = preprocess(before)
process_after = preprocess(after)
process_data = process_before + process_after
ids_move = len(before) - len(process_before)
data_dict["context"] = process_data
data_dict["answers"]["answer_start"][0] = start_ids - ids_move
return data_dict
def run_preprocess_to_wiki(data_dict):
context = data_dict["text"]
process_data = preprocess(context)
data_dict["text"] = process_data
return data_dict
def search_es(es_obj, index_name, question_text, n_results):
query = {
'query': {
'match': {
'document_text': question_text
}
}
}
res = es_obj.search(index=index_name, body=query, size=n_results)
return res
def make_custom_dataset(dataset_path) :
if not (os.path.isdir("../data/train_dataset") or
os.path.isdir("../data/wikipedia_documents.json")) :
raise Exception ("Set the original data path to '../data'")
train_f = Features({'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)})
if not os.path.isfile("../data/preprocess_wiki.json") :
with open("../data/wikipedia_documents.json", "r") as f:
wiki = json.load(f)
new_wiki = dict()
for ids in range(len(wiki)):
new_wiki[str(ids)] = run_preprocess_to(wiki[str(ids)])
with open('../data/preprocess_wiki.json', 'w', encoding='utf-8') as make_file:
json.dump(new_wiki, make_file, indent="\t", ensure_ascii=False)
if not os.path.isfile("/opt/ml/input/data/preprocess_train.pkl"):
train_dataset = load_from_disk("../data/train_dataset")['train']
val_dataset = load_from_disk("../data/train_dataset")['validation']
new_train_data, new_val_data = [], []
for data in train_dataset:
new_data = run_preprocess(data)
new_train_data.append(new_data)
for data in val_dataset:
new_data = run_preprocess(data)
new_val_data.append(new_data)
train_df = pd.DataFrame(new_train_data)
val_df = pd.DataFrame(new_val_data)
dataset = DatasetDict({'train': Dataset.from_pandas(train_df, features=train_f),
'validation': Dataset.from_pandas(val_df, features=train_f)})
save_pickle(dataset_path, dataset)
if 'preprocess' in dataset_path:
return dataset
if 'squad' in dataset_path :
train_data = get_pickle("../data/preprocess_train.pkl")["train"]
val_data = get_pickle("../data/preprocess_train.pkl")["validation"]
korquad_data = load_dataset("squad_kor_v1")["train"]
df_train_data = pd.DataFrame(train_data)
df_val_data = pd.DataFrame(val_data)
df_korquad_data = pd.DataFrame(korquad_data, columns=['answers', 'context', 'id', 'question'])
df_total_train = pd.concat([df_train_data, df_korquad_data])
dataset = DatasetDict({'train': Dataset.from_pandas(df_total_train, features=train_f),
'validation': Dataset.from_pandas(df_val_data, features=train_f)})
save_pickle("../data/korquad_train.pkl", dataset)
return train_dataset
if 'concat' in dataset_path :
base_dataset = get_pickle("../data/preprocess_train.pkl")
train_dataset, val_dataset = base_dataset["train"], base_dataset["validation"]
train_data = [{"id" : train_dataset[i]["id"], "question" : train_dataset[i]["question"],
"answers" : train_dataset[i]["answers"], "context" : train_dataset[i]["context"]}
for i in range(len(train_dataset))]
val_data = [{"id" : val_dataset[i]["id"], "question" : val_dataset[i]["question"],
"answers" : val_dataset[i]["answers"], "context" : val_dataset[i]["context"]}
for i in range(len(val_dataset))]
config = {'host':'localhost', 'port':9200}
es = Elasticsearch([config])
k = 5 # k : how many contexts to concatenate
for idx, train in enumerate(train_data):
res = search_es(es, "wiki-index", question["question"], k)
context_list = [(hit['_source']['document_text'], hit['_score']) for hit in res['hits']['hits']]
contexts = train["context"]
count = 0
for context in context_list:
# if same context already exists, don't concatenate
if train["context"] == context[0]:
continue
contexts += " " + context[0]
count += 1
if count == (k-1):
break
train_data[idx]["context"] = contexts
for idx, val in enumerate(val_data):
res = search_es(es, "wiki-index", question["question"], k)
context_list = [(hit['_source']['document_text'], hit['_score']) for hit in res['hits']['hits']]
contexts = val["context"]
count = 0
for context in context_list:
if val["context"] == context[0]:
continue
contexts += " " + context[0]
count += 1
if count == (k-1):
break
val_data[idx]["context"] = contexts
train_df = pd.DataFrame(train_data)
val_df = pd.DataFrame(val_data)
dataset = DatasetDict({'train': Dataset.from_pandas(train_df, features=train_f),
'validation': Dataset.from_pandas(val_df, features=train_f)})
save_pickle(dataset_path, dataset)
return dataset
if "split_wiki_400" in dataset_path:
with open("/opt/ml/input/data/preprocess_wiki.json", "r") as f:
wiki = json.load(f)
new_wiki = dict()
for i in tqdm(range(len(wiki))):
if len(wiki[str(i)]["text"]) < 800:
new_wiki[str(i)] = wiki[str(i)]
continue
data_list, count = passage_split_400(wiki[str(i)]["text"])
for j in range(count):
new_wiki[str(i) + f"_{j}"] = {"text" : data_list[j], "corpus_source" : wiki[str(i)]["corpus_source"],
"url" : wiki[str(i)]["url"], "domain" : wiki[str(i)]["domain"],
"title" : wiki[str(i)]["title"], "author" : wiki[str(i)]["author"],
"html" : wiki[str(i)]["html"],"document_id" : wiki[str(i)]["document_id"]}
save_data("../data/wiki-index-split-400.json", new_wiki)
if "split_wiki" in dataset_path and dataset_path != "split_wiki_400":
with open("/opt/ml/input/data/preprocess_wiki.json", "r") as f:
wiki = json.load(f)
limit = 0
if "800" in dataset_path:
limit = 800
if "1000" in dataset_path:
limit = 1000
new_wiki = dict()
for i in tqdm(range(len(wiki))):
if len(wiki[str(i)]["text"]) < limit:
new_wiki[str(i)] = wiki[str(i)]
continue
data_1, data_2 = passage_split(wiki[str(i)]["text"])
new_wiki[str(i) + f"_1"] = {"text" : data_1, "corpus_source" : wiki[str(i)]["corpus_source"], "url" : wiki[str(i)]["url"],
"domain" : wiki[str(i)]["domain"], "title" : wiki[str(i)]["title"], "author" : wiki[str(i)]["author"],
"html" : wiki[str(i)]["html"], "document_id" : wiki[str(i)]["document_id"]}
new_wiki[str(i) + f"_2"] = {"text" : data_2, "corpus_source" : wiki[str(i)]["corpus_source"], "url" : wiki[str(i)]["url"],
"domain" : wiki[str(i)]["domain"], "title" : wiki[str(i)]["title"],
"author" : wiki[str(i)]["author"], "html" : wiki[str(i)]["html"], "document_id" : wiki[str(i)]["document_id"]}
save_data(f"../data/split_wiki_{limit}.json")
def make_mask_dataset(dataset_path, k, tokenizer):
base_dataset, opt = None, None
if 'default' in dataset_path:
base_dataset = get_pickle("../data/preprocess_train.pkl")
if 'concat' in dataset_path:
base_dataset = get_pickle("../data/concat_train.pkl")
k = int(re.findall("\d", dataset_path)[0])
data_processor = DataProcessor(tokenizer)
train_dataset, val_dataset = base_dataset['train'], base_dataset['val']
column_names = train_dataset.column_names
train_dataset = data_processor.train_tokenizer(train_dataset, column_names)
val_dataset = data_processor.val_tokenizer(val_dataset, column_names)
model = SentenceTransformer('sentence-transformers/xlm-r-100langs-bert-base-nli-stsb-mean-tokens')
mask_dataset = mask_to_tokens(train_dataset, tokenizer, k, model)
dataset = DatasetDict({'train': mask_dataset,
'validation': val_dataset})
save_pickle(dataset_path, dataset)
return dataset
|
TEAM-IKYO/Open-Domain-Question-Answering
|
code/prepare_dataset.py
|
prepare_dataset.py
|
py
| 12,138 |
python
|
en
|
code
| 24 |
github-code
|
6
|
18164621881
|
import json
# Read the network.json file
with open("network.json", "r") as f:
network = json.load(f)
# Create a set to store all pairs (a, b) such that a follows b but b doesn't follow back a
pairs = set()
# Iterate over all users in the network
for user, data in network.items():
# Get the list of users that the current user follows
following = data.get("Connections", [])
# Iterate over all users that the current user follows
for follower in following:
# Check if the follower doesn't follow back the current user
if user not in network.get(str(follower), {}).get("Connections", []):
pairs.add((user, str(follower)))
# Remove mutual follows from the set
mutual_follows = {(b, a) for (a, b) in pairs if (b, a) in pairs}
pairs -= mutual_follows
# Print all pairs (a, b) such that a follows b but b doesn't follow back a
if len(pairs) > 0:
for pair in pairs:
print(pair)
# Print the number of such pairs
print(f"Number of pairs where 'a' follows 'b' but 'b' doesn't follow back 'a': {len(pairs)}")
else:
# If there are no such pairs, display that the relationship is completely bidirectional
print("The relationship is completely bidirectional.")
|
GOVINDFROMINDIA/Twitter-Scam-Victims
|
GraphEvaluation.py
|
GraphEvaluation.py
|
py
| 1,265 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11464862154
|
import speech_recognition as sr
from requests import get
from bs4 import BeautifulSoup
from gtts import gTTS
from paho.mqtt import publish
import os
##### CONFIGURAÇÕES #####
with open('arquivoConfiguraGoogleSpeech.json') as credenciais_google:
credenciais_google = credenciais_google.read()
executaAcao = False
serverMQTT = 'iot.eclipse.org'
portaMQTT = 1883
topicoLuz = 'iluminacao/status'
hotword = 'verônica'
hotwordNoticias = 'notícias'
hotwordTemperatura = 'temperatura'
hotwordLigarLuz = 'ligar a luz'
hotwordDesligarLuz = 'desativar a luz'
def monitorarAudio():
microfone = sr.Recognizer()
with sr.Microphone() as source:
print("Aguardando o Comando: ")
audio = microfone.listen(source)
try:
trigger = microfone.recognize_google_cloud(audio, credentials_json=credenciais_google, language='pt-BR')
trigger = trigger.lower()
if hotword in trigger and not getStatusTrigger():
print('Comando reconhecido!')
respoder('feedback')
setStatusTrigger(True)
elif getStatusTrigger():
setStatusTrigger(False)
return trigger
except sr.UnknownValueError:
print("Google not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Cloud Speech service; {0}".format(e))
return None
def setStatusTrigger(status):
global executaAcao
executaAcao = status
def getStatusTrigger():
return executaAcao
def analisarAcao(comando):
if hotwordNoticias in comando:
retornarUltimasNoticias()
elif hotwordTemperatura in comando:
retornarPrevisaoTempo()
elif hotwordLigarLuz in comando:
publicarNoTopico(topicoLuz, 1)
retornarIluminacao(1)
elif hotwordDesligarLuz in comando:
publicarNoTopico(topicoLuz, 0)
retornarIluminacao(0)
else:
criarAudio(comando.strip(hotword), 'comando')
respoder('comando')
respoder('notfound')
def retornarUltimasNoticias():
site = get('https://news.google.com/news/rss?ned=pt_br&gl=BR&hl=pt')
noticias = BeautifulSoup(site.text, 'html.parser')
for item in noticias.findAll('item')[:2]:
noticia = item.title.text
criarAudio(noticia, 'noticia')
respoder('noticia')
respoder('thau')
def retornarPrevisaoTempo():
site = get('http://api.openweathermap.org/data/2.5/weather?id=3462377&q=goiania,br&APPID=1d20fd1ca254ea2797f60e64520675a8&units=metric&lang=pt')
clima = site.json()
temperatura = clima['main']['temp']
#minima = clima['main']['temp_min']
#maxima = clima['main']['temp_max']
descricao = clima['weather'][0]['description']
mensagem = f'No momento a temperatura é de {temperatura} graus com {descricao}'
criarAudio(mensagem, 'clima')
respoder('clima')
respoder('thau')
def retornarIluminacao(status):
if status == 1:
mensagem = 'A luz foi ligada'
else:
mensagem = 'A luz foi desligada'
criarAudio(mensagem, 'iluminacao')
respoder('iluminacao')
respoder('thau')
def publicarNoTopico(topico, payload):
publish.single(topico, payload=payload, qos=1, retain=True, hostname=serverMQTT,
port=portaMQTT, client_id="veronica")
def criarAudio(texto, nome_arquivo):
tts = gTTS(texto, lang='pt-br')
path = 'audios/' + nome_arquivo + '.mp3'
with open(path, 'wb') as file:
tts.write_to_fp(file)
def respoder(nome_arquivo):
path = 'audios/' + nome_arquivo + '.mp3'
os.system('mpg321 ' + path)
def __main__():
while True:
comando = monitorarAudio()
if comando is not None:
analisarAcao(comando)
__main__()
|
cicerojmm/assistentePessoalIoT
|
veronica/veronica.py
|
veronica.py
|
py
| 3,705 |
python
|
pt
|
code
| 2 |
github-code
|
6
|
12300846904
|
"""
This example illustrates how to display the tree of a single TreeGrower for
debugging purpose.
"""
from sklearn.datasets import make_classification
import numpy as np
from pygbm.binning import BinMapper
from pygbm.grower import TreeGrower
from pygbm import plotting
rng = np.random.RandomState(0)
n_samples = int(1e7)
n_leaf_nodes = 5
X, y = make_classification(n_samples=n_samples, n_classes=2, n_features=5,
n_informative=3, n_redundant=0, random_state=rng)
bin_mapper_ = BinMapper(random_state=rng)
X_binned = bin_mapper_.fit_transform(X)
gradients = np.asarray(y, dtype=np.float32).copy()
hessians = np.ones(1, dtype=np.float32)
# First run to trigger the compilation of numba jit methods to avoid recording
# the compiler overhead in the profile report.
TreeGrower(X_binned, gradients, hessians, max_leaf_nodes=n_leaf_nodes).grow()
# New run with to collect timing statistics that will be included in the plot.
grower = TreeGrower(X_binned, gradients, hessians, max_leaf_nodes=n_leaf_nodes)
grower.grow()
plotting.plot_tree(grower)
|
ogrisel/pygbm
|
examples/plot_performance_profile_single_small_tree.py
|
plot_performance_profile_single_small_tree.py
|
py
| 1,076 |
python
|
en
|
code
| 175 |
github-code
|
6
|
24494832097
|
import requests
import pandas as pd
from bs4 import BeautifulSoup as bs
def get_spy():
url = 'https://www.slickcharts.com/sp500'
request = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})
soup = bs(request.text, "lxml")
stats = soup.find('table', class_='table table-hover table-borderless table-sm')
df = pd.read_html(str(stats))[0]
df['% Chg'] = df['% Chg'].str.strip('()-%')
df['% Chg'] = pd.to_numeric(df['% Chg'])
df['Chg'] = pd.to_numeric(df['Chg'])
return df
|
reesecake/td_api
|
util/IndexInfo.py
|
IndexInfo.py
|
py
| 520 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9790495928
|
from django.core.files.storage import FileSystemStorage
from django.http import FileResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseNotFound
from django.http import JsonResponse
from rest_framework import mixins
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from journeys.errors import ConflictNotResolvedError
from journeys.errors import JourneysError
from journeys.modifier.conflict.plugins import load_plugins
from ..validators.checks_for_cli import default_checks
from . import forms
from . import logic
from . import models
from . import serializers
def get_supported_features(request):
plugins = load_plugins()
return JsonResponse({"items": [plugin.ID for plugin in plugins]})
def get_supported_validators(request):
return JsonResponse(
{
"validators": {
check.name: {
"require_source": check.require_source,
"require_root": check.require_root,
"description": check.description,
}
for check in default_checks.values()
}
}
)
class SessionsViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet,
):
def get_serializer_class(self):
if self.action == "retrieve":
return serializers.SessionDetailsSerializer
return serializers.SessionSerializer
queryset = models.Session.objects.all() # pylint: disable=E1101
def perform_create(self, serializer):
session = serializer.save()
logic.get_controller(session=session, allow_empty=True)
@action(detail=True, methods=["post"])
def source(self, request, pk):
session = models.Session.objects.get(pk=pk) # pylint: disable=E1101
system_credentials = None
form = forms.SourceForm(request.POST, request.FILES)
form.is_valid()
if form.errors:
# TODO: Fill response content
return HttpResponseBadRequest()
fs = FileSystemStorage(location=session.working_directory)
as3_file = (
fs.save(form.cleaned_data["as3_file"].name, form.cleaned_data["as3_file"])
if "as3_file" in form.cleaned_data and form.cleaned_data["as3_file"]
else None
)
if "ucs_file" in form.cleaned_data and form.cleaned_data["ucs_file"]:
ucs_file = fs.save(
form.cleaned_data["ucs_file"].name, form.cleaned_data["ucs_file"]
)
ucs_passphrase = form.cleaned_data.get("ucs_passphrase", None)
else:
system_credentials = models.SystemCredentials(
username=form.cleaned_data["username"],
password=form.cleaned_data["password"],
host=form.cleaned_data["host"],
)
try:
ucs_file, ucs_passphrase = logic.download_ucs(
session=session, system_credentials=system_credentials
)
system_credentials.save()
except JourneysError:
# TODO: Fill response content
return HttpResponseBadRequest()
clear = True # request.GET.get("clear", False)
logic.initialize(
session=session,
ucs_file=ucs_file,
ucs_passphrase=ucs_passphrase,
as3_file=as3_file,
clear=clear,
credentials=system_credentials,
)
return Response()
@action(detail=True, methods=["post"])
def current_conflict(self, request, pk):
session = models.Session.objects.get(pk=pk) # pylint: disable=E1101
conflict_id = request.data["conflict_id"]
logic.set_current_conflict(session=session, conflict_id=conflict_id)
return Response()
@current_conflict.mapping.delete
def delete_current_conflict(self, request, pk):
session = models.Session.objects.get(pk=pk) # pylint: disable=E1101
logic.reset_current_conflict(session=session)
return Response()
class SessionFilesViewSet(viewsets.GenericViewSet):
lookup_value_regex = r".+"
lookup_url_kwarg = "file_path"
def retrieve(self, request, session_pk, file_path, *args, **kwargs):
session = models.Session.objects.get(pk=session_pk) # pylint: disable=E1101
controller = logic.get_controller(session=session)
fs = FileSystemStorage(location=controller.repo_path)
try:
f = fs.open(file_path)
except FileNotFoundError:
return HttpResponseNotFound()
return FileResponse(f, content_type="application/octet-stream")
def update(self, request, session_pk, file_path, *args, **kwargs):
session = models.Session.objects.get(pk=session_pk) # pylint: disable=E1101
controller = logic.get_controller(session=session)
form = forms.FileUploadFrom(data=request.POST, files=request.FILES)
form.full_clean()
if form.errors:
# TODO: Fill response content
return HttpResponseBadRequest()
fs = FileSystemStorage(location=controller.repo_path)
if fs.exists(file_path):
fs.delete(file_path)
fs.save(file_path, form.cleaned_data["file"])
return Response(status=202)
def delete(self, request, session_pk, file_path, *args, **kwargs):
session = models.Session.objects.get(pk=session_pk) # pylint: disable=E1101
controller = logic.get_controller(session=session)
fs = FileSystemStorage(location=controller.repo_path)
try:
fs.delete(file_path)
except FileNotFoundError:
return HttpResponseNotFound()
return Response()
class SessionBranchesViewSet(viewsets.GenericViewSet):
lookup_value_regex = r".+"
class SessionBranchesFilesViewSet(viewsets.GenericViewSet):
lookup_value_regex = r".+"
lookup_url_kwarg = "file_path"
def retrieve(
self, request, session_pk, session_branch_pk, file_path, *args, **kwargs
):
session = models.Session.objects.get(pk=session_pk) # pylint: disable=E1101
controller = logic.get_controller(session=session)
git = controller.repo.git
content = git.show(f"{session_branch_pk}:{file_path}")
return FileResponse(content, content_type="application/octet-stream")
class SessionConflictsViewSet(
mixins.ListModelMixin, viewsets.GenericViewSet,
):
def get_queryset(self):
return models.Conflict.objects.filter( # pylint: disable=E1101
session=self.kwargs["session_pk"]
)
def get_serializer_class(self):
if self.action == "retrieve":
return serializers.ConflictDetailsSerializer
return serializers.ConflictSerializer
class SessionChangesViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet,
):
def get_queryset(self):
return models.Change.objects.filter( # pylint: disable=E1101
session=self.kwargs["session_pk"]
).order_by("id")
def get_serializer_class(self):
if self.action == "retrieve":
return serializers.ChangeDetailsSerializer
return serializers.ChangeSerializer
def create(self, request, session_pk, *args, **kwargs):
session = models.Session.objects.get(pk=session_pk) # pylint: disable=E1101
message = request.data.get("message", None)
try:
logic.process(session=session, commit_name=message)
except ConflictNotResolvedError:
# TODO: Fill response content
return HttpResponseBadRequest()
return Response()
class SessionDeploymentsViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet,
):
def get_queryset(self):
return models.Deployment.objects.filter( # pylint: disable=E1101
session=self.kwargs["session_pk"]
)
def get_serializer_class(self):
if self.action == "retrieve":
return serializers.DeploymentDetailsSerializer
return serializers.DeploymentSerializer
@action(detail=True, methods=["get"])
def log(self, request, session_pk=None, pk=None):
# TODO: implement
return Response({})
@action(detail=True, methods=["get"])
def report(self, request, session_pk=None, pk=None):
# TODO: implement
return Response({})
|
wiksla/f5-bigip-journeys-app
|
journeys/backend/views.py
|
views.py
|
py
| 8,720 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36152977435
|
import os, sys, logging
from flask import Blueprint, current_app
from flask import request, jsonify
ml_model_bp = Blueprint('ml_model_bp', __name__) # create a Blueprint object
# create 'index' view for testing purposes
@ml_model_bp.route('/', methods=["GET", "POST"])
def index():
return "ML model service is running!"
# helper method for predict/ endpoint
def get_pred(data):
"""Predict from in-memory data on the fly.
"""
try:
nn_model = current_app.model
pred = nn_model.predict(data)
pred = pred.tolist()
except Exception as e:
print(e)
pred = []
return pred
# create route for prediction
@ml_model_bp.route("/predict", methods=["GET", "POST"])
def predict():
"""Performs an inference
"""
if request.method == "POST":
data = request.get_json() # sentences come in through JSON
current_app.logger.debug(f"Input to \"predict\" endpoint: {data['sentences']}")
pred = get_pred( data=data["sentences"])
current_app.logger.debug(f"Sentiment predictions = {pred}")
return jsonify({"input": data, "pred": pred})
if request.method == "GET":
msg = "Please compose your request in POST type with data."
current_app.logger.error(f"Wrong request type {request}.")
return jsonify({"msg": msg})
|
bhavenp/docker_sentiment_analysis
|
ml_service/ml_model_api/ml_model_blueprint.py
|
ml_model_blueprint.py
|
py
| 1,355 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72067619709
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 9 13:31:37 2022
@author: basile
"""
import pandas as pd
df = pd.DataFrame(columns=['index', 'prenom', 'nom', 'email', 'groupe', 'mystere'])
df.to_csv('priants.csv', index=False)
import streamlit as st
input = st.text_input("text", key="text")
but = st.button("clear text input")
if but:
st.session_state["text"] = ""
st.write(input)
|
BasileR29/chapelet_tournant
|
test.py
|
test.py
|
py
| 422 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13913894709
|
from __future__ import annotations as _annotations
from typing import TYPE_CHECKING
import pytest
from watchfiles import watch
from watchfiles.main import _default_force_polling
if TYPE_CHECKING:
from .conftest import SetEnv
class MockRustNotify:
@staticmethod
def watch(*args):
return 'stop'
def __enter__(self):
return self
def __exit__(self, *args):
pass
def test_watch_polling_not_env(mocker):
m = mocker.patch('watchfiles.main.RustNotify', return_value=MockRustNotify())
for _ in watch('.'):
pass
m.assert_called_once_with(['.'], False, False, 300, True, False)
def test_watch_polling_env(mocker, env: SetEnv):
env('WATCHFILES_FORCE_POLLING', '1')
m = mocker.patch('watchfiles.main.RustNotify', return_value=MockRustNotify())
for _ in watch('.'):
pass
m.assert_called_once_with(['.'], False, True, 300, True, False)
@pytest.mark.parametrize(
'env_var,arg,expected',
[
(None, True, True),
(None, False, False),
(None, None, False),
('', True, True),
('', False, False),
('', None, False),
('1', True, True),
('1', False, False),
('1', None, True),
('disable', True, True),
('disable', False, False),
('disable', None, False),
],
)
def test_default_force_polling(mocker, env: SetEnv, env_var, arg, expected):
uname = type('Uname', (), {'system': 'Linux', 'release': '1'})
mocker.patch('platform.uname', return_value=uname())
if env_var is not None:
env('WATCHFILES_FORCE_POLLING', env_var)
assert _default_force_polling(arg) == expected
@pytest.mark.parametrize(
'env_var,arg,expected,call_count',
[
(None, True, True, 0),
(None, False, False, 0),
(None, None, True, 1),
('', True, True, 0),
('', False, False, 0),
('', None, True, 1),
('1', True, True, 0),
('1', False, False, 0),
('1', None, True, 0),
('disable', True, True, 0),
('disable', False, False, 0),
('disable', None, False, 0),
],
)
def test_default_force_polling_wsl(mocker, env: SetEnv, env_var, arg, expected, call_count):
uname = type('Uname', (), {'system': 'Linux', 'release': 'Microsoft-Standard'})
m = mocker.patch('platform.uname', return_value=uname())
if env_var is not None:
env('WATCHFILES_FORCE_POLLING', env_var)
assert _default_force_polling(arg) == expected
assert m.call_count == call_count
|
samuelcolvin/watchfiles
|
tests/test_force_polling.py
|
test_force_polling.py
|
py
| 2,551 |
python
|
en
|
code
| 1,336 |
github-code
|
6
|
29125164138
|
from django.conf.urls import include, patterns, url
view_location = 'customProfile.views'
urlpatterns = patterns(view_location,
# Views
url(r'^(?P<username>[-\w]+)/', include(patterns(view_location,
url(r'^$', 'profileRedirect', {'redirect_url': 'artist:about'}, name='home'),
url(r'^news/$', 'artist_news', name='news'),
url(r'^shows/$','shows', name='shows'),
url(r'^shows/(?P<year>\d{4})/$','shows',name='shows'),
url(r'^photos/$', 'artist_photos', name='photos'),
))),
)
view_location = 'social_links.views'
urlpatterns += patterns(view_location,
# Forms
url(r'^about/links/edit/$', 'artist_social_links', name='socialLinksForm'),
url(r'^photos/links/edit/$', 'artist_photo_links', name='photoLinksForm'),
url(r'^music/links/edit/$', 'music_links', name='musicLinksForm'),
url(r'^videos/links/edit/$', 'artist_video_links', name='videoLinksForm'),
)
view_location = 'artist.views'
urlpatterns += patterns(view_location,
# Views
url(r'^(?P<username>[-\w]+)/', include(patterns(view_location,
url(r'^about/$', 'about', name='about'),
url(r'^music/$', 'music', name='music'),
url(r'^videos/$', 'videos', name='videos'),
))),
#Forms
url(r'^about/', include(patterns(view_location,
url(r'^biography/edit/$', 'biography', name='biographyForm'),
url(r'^contact/edit/$', 'contact_info', name='userContactForm'),
url(r'^member/add/$', 'members', name='memberForm'),
url(r'^member/edit/(?P<member_id>[\.\w-]+)$', 'members', name='memberForm'),
url(r'^(?P<contact_type>[\.\w-]+)/edit/$', 'contacts', name='contactForm'),
))),
url(r'^music/', include(patterns(view_location,
url(r'^album/add/$', 'AddEditAlbum', name='albumForm'),
url(r'^album/(?P<album_id>[\.\w-]+)/edit/$', 'AddEditAlbum', name='editAlbumForm'),
url(r'^album/(?P<album_id>[\.\w-]+)/tracks/add/$', 'AddEditTracks', name='tracksForm'),
#url(r'^interview/add/$', 'interview_form', name='interview_form'),
#url(r'^interview/add/(?P<trackID>[\.\w-]+)$', 'interview_form', name='interview_form'),
))),
url(r'^videos/', include(patterns(view_location,
url(r'^add/$', 'video_form', name='video_form'),
url(r'^album/add/$', 'AddEditAlbum', {'success_url': 'artist:video_tracks_form'}, name='video_album_form'),
url(r'^album/(?P<album_id>[\.\w-]+)/edit/$', 'AddEditAlbum', {'success_url': 'artist:video_tracks_form'}, name='video_edit_album'),
url(r'^album/(?P<album_id>[\.\w-]+)/tracks/add/$', 'add_video_to_album', name='video_tracks_form'),
#url(r'^interview-video/add/$', 'interview_video_form', name='interview_video_form'),
#url(r'^interview-video/add/(?P<trackID>[\.\w-]+)$', 'interview_video_form', name='interview_video_form'),
))),
)
|
TimBest/ComposersCouch
|
artist/urls.py
|
urls.py
|
py
| 2,864 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20232075182
|
from tradingview.client import TradingViewWebSocketClient, fetch_japan_symbols
from datetime import datetime
client = TradingViewWebSocketClient()
symbols = fetch_japan_symbols()
client.add_symbols(symbols[:100])
# client.add_symbols(['TSE:4689'])
for x in client.fetch_ohlc(past_bar=302):
print(datetime.fromtimestamp(x.bar_time), x)
|
otomarukanta/tradingview
|
example_ohlc.py
|
example_ohlc.py
|
py
| 341 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41664319378
|
import random
import tkinter as tk
import tkinter.messagebox
import pandas as pd
from tkinter import ttk
from tkinter import *
# import unidecode
from tkinter import PhotoImage
LARGE_FONT = ("Courier", 20, "bold")
MEDIUM_FONT = ("Courier", 15)
BACKGROUND_COLOR = "#6699CC"
# BACKGROUND_COLOR_ALT = "#B1DDC6"
class LanguageWorkout(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
# creating a container
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
tk.Tk.title(self, "Teddy spricht")
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
# initializing frames to an empty dictionary
self.frames = {}
# iterating through a tuple consisting
# of the different page layouts
for F in (HomePage, WorkoutPage):
frame = F(container, self)
self.frames[F] = frame
frame.config(background=BACKGROUND_COLOR)
frame.grid(row=0, column=0, padx=0, pady=0, sticky="nsew")
# frame.grid(row=0, column=0, sticky="nsew")
# initializing frame of that object from
# home page, Spending, Profit etc. respectively with
# for loop
self.show_frame(HomePage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
# first window frame home page
class HomePage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = ttk.Label(self, text="Hlavná stránka", font=LARGE_FONT,
background=BACKGROUND_COLOR,
foreground="#FFFFFF")
label.grid(row=0, column=0, pady=20, padx=40)
# WORKOUT
pic_flag = PhotoImage(file="figs/de_to_svk_small.png")
pic_flag_resize = pic_flag.subsample(3, 3)
button1 = tk.Button(self, text="Prekladat ",
font=MEDIUM_FONT,
background=BACKGROUND_COLOR,
highlightbackground=BACKGROUND_COLOR,
foreground="#FFFFFF",
image=pic_flag_resize,
compound=RIGHT,
command=lambda: controller.show_frame(WorkoutPage))
button1.image = pic_flag_resize
button1.grid(row=1, column=0, pady=20, padx=40)
button2 = tk.Button(self, text="Slovník DE->SVK", width=15, height=1,
font=MEDIUM_FONT,
background=BACKGROUND_COLOR,
highlightbackground=BACKGROUND_COLOR,
foreground="#FFFFFF",
command=lambda: controller.show_frame(Vocabulary))
button2.grid(row=2, column=0, pady=20, padx=40, ipadx=30, ipady=5)
class WorkoutPage(tk.Frame):
words = pd.read_csv("data/german_words_1500_SK.csv")
dict_words = words.to_dict(orient="records")
current_word = dict((random.choice(dict_words)))
print(current_word)
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.question = ttk.Label(self, text=f"Prelož '{self.current_word['German']}'",
font=LARGE_FONT,
background=BACKGROUND_COLOR,
foreground="#FFFFFF")
self.question.grid(row=0, column=0, pady=20, padx=40, sticky="w")
# USER ENTRY
self.translation_entry = ttk.Entry(self, width=15, font=MEDIUM_FONT)
self.translation_entry.bind('<Return>', self.checking_answer)
self.translation_entry.grid(row=1, column=0, pady=20, padx=40)
# CHECK BUTTON
self.check_button = Button(self, text="Skontroluj", width=12, height=1,
font=MEDIUM_FONT,
background=BACKGROUND_COLOR,
highlightbackground=BACKGROUND_COLOR,
foreground="#FFFFFF",
command=self.checking_answer)
self.check_button.grid(row=2, column=0, pady=20, padx=40)
# BACK HOME BUTTON
button2 = ttk.Button(self, text="Spät",
command=lambda: controller.show_frame(HomePage))
button2.grid(row=3, column=0, pady=20, padx=40)
# BRAIN TEDDY
# self.canvas_brain1 = Canvas(self, width=320, height=300)
# self.brain1 = PhotoImage(file="images/image843.png")
# self.canvas_brain1.create_image(10, 40, anchor=NW, image=self.brain1)
# #self.canvas_brain1.config(bg=BACKGROUND_COLOR, highlightthickness=0)
# self.canvas_brain1.grid(row=0, column=1, rowspan=3)
def random_word(self):
self.current_word = (random.choice(self.dict_words))
print(self.current_word["German"].lower())
def checking_answer(self, *args):
user_input = unidecode(self.translation_entry.get().lower()) # unidecode removes diacritics
answer_split = self.current_word['Slovak'].split('/') # split into possible answers if more than one
answer_adjusted = [unidecode(word.lower())
for word
in answer_split] # removing diacritics and making lowercase letters
print(answer_adjusted)
if user_input in answer_adjusted:
self.question.config(text=f"Správne!\n{self.current_word['German']} znamená {', '.join(answer_adjusted)}")
self.check_button.config(text="Dalšie slovo", command=self.next_word)
self.translation_entry.bind('<Shift_R>', self.next_word)
else:
self.question.config(text=f"'{self.current_word['German']}' znamená {', '.join(answer_adjusted)}")
self.check_button.config(text="Dalšie slovo", command=self.next_word)
self.translation_entry.bind('<Shift_R>', self.next_word)
def next_word(self, *args):
self.random_word()
self.translation_entry.delete(0, "end")
self.check_button.config(text="Skontroluj", command=self.checking_answer)
self.question.config(text=f"Prelož '{self.current_word['German']}'")
class Vocabulary(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
def save_txt():
with open('data/german_words_1500_SK.csv', 'w', encoding="utf8") as f:
f.write(self.text_box.get(1.0, END))
tkinter.messagebox.showinfo(title='Slovník', message='Zmeny boli úspešne uložené.')
with open('data/german_words_1500_SK.csv', 'r', encoding="utf8") as f:
text = f.read()
# scrollbar
self.text_scroll = Scrollbar(self)
self.text_scroll.grid(row=0, column=2, sticky='ns')
# text box
self.text_box = Text(self, width=40, height=10, font=MEDIUM_FONT, undo=True, yscrollcommand=self.text_scroll.set)
self.text_box.insert(END, text)
self.text_box.grid(row=0, column=0, pady=10, padx=0, columnspan=2)
# config scrollbar
self.text_scroll.config(command=self.text_box.yview)
# save button
self.save = ttk.Button(self, text='Uložit zmeny', command=save_txt)
self.save.grid(row=1, column=0, pady=20, padx=10)
# BACK HOME BUTTON
button2 = ttk.Button(self, text="Spät",
command=lambda: controller.show_frame(HomePage))
button2.grid(row=1, column=1, pady=20, padx=40)
app = LanguageWorkout()
app.mainloop()
|
wRajter/teddy_spricht_vSK
|
main.py
|
main.py
|
py
| 7,699 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41675138600
|
# 부녀회장이 될 테야
"""
5 1 7 28 84 210
4 1 6 21 56 126
3 1 5 15 35 70
2 1 4 10 20 35
1 1 3 6 10 15
0 1 2 3 4 5 (0층)
1 2 3 4 5
"""
import sys
input = sys.stdin.readline
T = int(input())
for _ in range(T):
floor = int(input())
room = int(input())
info = [[0 for _ in range(room + 1)] for _ in range(floor + 1)]
for i in range(room + 1):
info[0][i] = i
for f in range(1, floor + 1):
for r in range(1, room + 1):
info[f][r] = info[f - 1][r] + info[f][r - 1]
print(info[floor][room])
|
jisupark123/Python-Coding-Test
|
baekjoon/2022-6/bronze/2775.py
|
2775.py
|
py
| 589 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72743743228
|
import os
import boto3
import json
from app.shared.clients.secret_manager import SecretManager
def _map_to_auth_response(principal, resource, effect):
statement = {
"Action": "execute-api:Invoke",
"Effect": effect,
"Resource": resource
}
policy_document = {
"Version": "2012-10-17",
"Statement": [statement]
}
response = {
"principalId": principal,
"policyDocument": policy_document
}
print('Response: ', response)
return response
def authorize(resource: str, principal: str, basic_string: str) -> str:
expected_basic_string = SecretManager().secret(secret_name='API_AUTH_KEY', key_name='BASIC_AUTH_KEY')
print(f'AUTH KEY in Secrets Manager: {expected_basic_string}')
print(f'AUTH KEY in Request: {basic_string}')
if basic_string == expected_basic_string:
effect = 'Allow'
else:
effect = 'Deny'
return _map_to_auth_response(principal, resource, effect)
|
ishwar2303/graphidot-serverless-backend
|
app/functions/basic_authorizer/basic_authorizer.py
|
basic_authorizer.py
|
py
| 989 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24512005131
|
import datetime as dt
import warnings
import numpy as np
import pandas as pd
from asgiref.sync import async_to_sync
from dateutil.parser import parse
from django.core.cache import cache
from django.utils import timezone
from apps.integration.tasks.sockets.get_kws_object import get_kws_object
warnings.filterwarnings("ignore")
def get_instrument(underlying: str) -> pd.DataFrame:
df = cache.get("OPTION_INSTRUMENTS")
return df[df["underlying"] == underlying].reset_index(drop=True)
def set_initial_fields_for_instruments(instrument, instruments):
tz = timezone.get_current_timezone()
instruments["last_price"] = np.nan
instruments["exchange_timestamp"] = np.nan
instruments["last_trade_time"] = np.nan
instruments["oi"] = np.nan
instruments["expiry"] = instruments["expiry"].apply(
lambda x: parse(f"{x} 15:30:00").replace(tzinfo=tz)
)
instruments["str_expiry"] = instruments["expiry"].apply(
lambda y: y.strftime("%d-%b-%Y").upper()
)
cache.set(f"{instrument}_OPTION_INSTRUMENTS", instruments)
def set_instrument_cache(df, instruments, instrument):
df = df[
[
"instrument_token",
"last_price",
"exchange_timestamp",
"last_trade_time",
"oi",
]
].copy()
df.rename(columns={"instrument_token": "kite_instrument_token"}, inplace=True)
instruments = instruments.merge(df, how="left", on="kite_instrument_token")
instruments["last_price"] = instruments["last_price_y"].fillna(
instruments["last_price_x"]
)
instruments["exchange_timestamp"] = instruments["exchange_timestamp_y"].fillna(
instruments["exchange_timestamp_x"]
)
instruments["last_trade_time"] = instruments["last_trade_time_y"].fillna(
instruments["last_trade_time_x"]
)
instruments["oi"] = instruments["oi_y"].fillna(instruments["oi_x"])
instruments.drop(
columns=[
"last_price_x",
"last_price_y",
"exchange_timestamp_x",
"exchange_timestamp_y",
"last_trade_time_x",
"last_trade_time_y",
"oi_x",
"oi_y",
],
inplace=True,
)
cache.set(f"{instrument}_OPTION_INSTRUMENTS", instruments)
for websocket_id, instruments_buffer in instruments.groupby("websocket_id"):
cache.set(
f"{instrument}_{websocket_id}_OPTION_INSTRUMENTS",
instruments_buffer.sort_values(
["strike", "option_type"], ignore_index=True
),
)
def on_connect(ws, response):
ws.subscribe(ws.instrument_tokens)
ws.set_mode(ws.MODE_FULL, ws.instrument_tokens)
def on_ticks(ws, ticks):
instruments = cache.get(f"{ws.instrument}_OPTION_INSTRUMENTS")
df = pd.DataFrame(ticks)
if not df.empty:
set_instrument_cache(df, instruments, ws.instrument)
if timezone.localtime().time() > dt.time(15, 30):
ws.unsubscribe(ws.instrument_tokens)
ws.close()
def on_close(ws, code, reason):
if not code and not reason:
ws.stop()
def run_option_websocket(instrument: str) -> None:
instruments = get_instrument(instrument)
if instruments.empty:
return
set_initial_fields_for_instruments(instrument, instruments)
kws = get_kws_object()
kws.instrument = instrument
kws.instrument_tokens = instruments["kite_instrument_token"].to_list()
kws.on_ticks = on_ticks
kws.on_connect = on_connect
kws.on_close = on_close
kws.connect(threaded=True)
|
finbyz/trading_child
|
apps/integration/tasks/sockets/option_websocket.py
|
option_websocket.py
|
py
| 3,575 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3642629983
|
from utils.transformers_utils import SiameseRobertaModel,TrainerLogger,get_preds,compute_metrics,update_metrics
import numpy as np
import pandas as pd
from transformers import RobertaTokenizerFast,RobertaConfig,TrainingArguments
from datasets import Dataset,DatasetDict #!pip install datasets
import evaluate #!pip install evaluate
from sklearn.metrics import roc_auc_score,recall_score,f1_score,precision_score,accuracy_score
DATASETS_PATH = '../data/datasets'
TOKENIZER_PATH = f'../data/tokenizers/tokenizer_opcode_bpe'
MODEL_PATH = f'../data/models/roberta_lm'
RESULTADOS_PATH = f'../data/resultados'
def get_dataset(target,tokenizer=None,remove_columns=None):
dataset = pd.read_csv(f'{DATASETS_PATH}/{target}_balanced.csv')[['opcode_nocds',target,'is_valid']]#.sample(300)
#dataset['arithmetic'] = np.where(dataset['arithmetic']==1,'Danger','Safe')
dataset[target] = np.where(dataset[target]==1,0,1)
dataset.columns = ['text','labels','is_valid']
ds = DatasetDict()
ds['train'] = Dataset.from_pandas(dataset[~dataset.is_valid].drop('is_valid',axis=1), preserve_index=False)
ds['valid'] = Dataset.from_pandas(dataset[dataset.is_valid].drop('is_valid',axis=1), preserve_index=False)
if tokenizer!= None:
ds = ds.map(lambda x:tokenizer(x["text"],truncation=True,padding='max_length'), batched=True,remove_columns=remove_columns)
return ds
def get_trainer(ds,model):
training_args = TrainingArguments("test_trainer",
num_train_epochs=6,
no_cuda=False,
evaluation_strategy="epoch",#steps
#logging_strategy
learning_rate= 5e-05,
lr_scheduler_type= 'linear',
#'linear',#cosine_with_restarts
fp16=True
)
train_dataset = ds['train']
eval_dataset = ds['valid']
return TrainerLogger(
model=model, args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
)
max_size_sentence = 4048
metric = evaluate.load("accuracy")
tokenizer = RobertaTokenizerFast.from_pretrained(TOKENIZER_PATH, max_len=max_size_sentence)
config = RobertaConfig(
vocab_size=1000,
max_position_embeddings=512 + 2, # 514
hidden_size=216,
num_attention_heads=6,
num_hidden_layers=4,
type_vocab_size=1
# id2label={0: 'LABEL_0', 1: 'LABEL_1', 2: 'LABEL_2'}
)
resultados = {
'fold': ['f1', 'auc-roc', 'auc-roc-th', 'precision', 'recall', 'accuracy', 'size'],
}
vulnerabilities = ['access_control', 'arithmetic', 'denial_service',
'front_running', 'reentrancy', 'time_manipulation',
'unchecked_low_calls']
for target in vulnerabilities:
print(target)
ds = get_dataset(target, tokenizer, ['text'])
model = SiameseRobertaModel(config, n_childs=8, pretrained='')
trainer = get_trainer(ds, model)
trainer.train()
y_real_train, y_pred_train = get_preds(model, ds['train'])
y_real, y_pred = get_preds(model, ds['valid'])
update_metrics(target, resultados, (y_real_train, y_pred_train, y_real, y_pred))
pd.DataFrame(resultados).to_csv(f'{RESULTADOS_PATH}/transfomers_no_lm.csv', index=False)
max_size_sentence = 4048
metric = evaluate.load("accuracy")
tokenizer = RobertaTokenizerFast.from_pretrained(TOKENIZER_PATH, max_len=max_size_sentence)
config = RobertaConfig(
vocab_size=1000,
max_position_embeddings=512 + 2, # 514
hidden_size=216,
num_attention_heads=6,
num_hidden_layers=4,
type_vocab_size=1
# id2label={0: 'LABEL_0', 1: 'LABEL_1', 2: 'LABEL_2'}
)
resultados = {
'fold': ['f1', 'auc-roc', 'auc-roc-th', 'precision', 'recall', 'accuracy', 'size'],
}
vulnerabilities = ['access_control', 'arithmetic', 'denial_service',
'front_running', 'reentrancy', 'time_manipulation',
'unchecked_low_calls']
for target in vulnerabilities:
print(target)
ds = get_dataset(target, tokenizer, ['text'])
model = SiameseRobertaModel(config, n_childs=8, pretrained=MODEL_PATH)
trainer = get_trainer(ds, model)
trainer.train()
y_real_train, y_pred_train = get_preds(model, ds['train'])
y_real, y_pred = get_preds(model, ds['valid'])
update_metrics(target, resultados, (y_real_train, y_pred_train, y_real, y_pred))
pd.DataFrame(resultados).to_csv(f'{RESULTADOS_PATH}/transfomers_yes_lm.csv', index=False)
|
matisyo/vulnerability_detection
|
Notebooks/8. Transformers Classifier.py
|
8. Transformers Classifier.py
|
py
| 4,686 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8845375190
|
from fastapi import APIRouter, Depends
from fastapi_pagination import Page, Params
from src.admins.dependencies import get_groups_service, get_valid_group
from src.admins.models import Group
from src.admins.schemas import CreateGroupSchema, GroupOut
from src.admins.services import GroupService
router = APIRouter()
@router.get("/")
async def get_groups(
params: Params = Depends(),
groups_service: GroupService = Depends(get_groups_service)
) -> Page[GroupOut]:
return await groups_service.get_all(params=params)
@router.post("/")
async def create_group(
data: CreateGroupSchema,
groups_service: GroupService = Depends(get_groups_service)
) -> GroupOut:
return await groups_service.create(**data.dict())
@router.get("/{group_id}")
async def get_group(
group: Group = Depends(get_valid_group)
) -> GroupOut:
return group
@router.delete("/{group_id}")
async def delete_group(
group: Group = Depends(get_valid_group)
) -> GroupOut:
await group.delete()
return group
|
Qwizi/fastapi-sourcemod
|
sourcemod_api/src/admins/views/groups.py
|
groups.py
|
py
| 1,015 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36994780654
|
# -*- coding:utf-8 -*-
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import os
import sys
import shutil
import time
from utils import get_logger
import network
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
sys.path.append('../..')
from data_helper import to_categorical
from evaluator import cail_evaluator
flags = tf.flags
flags.DEFINE_bool('is_retrain', False, 'if is_retrain is true, not rebuild the summary')
flags.DEFINE_integer('max_epoch', 1, 'update the embedding after max_epoch, default: 1')
flags.DEFINE_integer('max_max_epoch', 1000, 'all training epoches, default: 1000')
flags.DEFINE_float('lr', 1e-3, 'initial learning rate, default: 1e-3')
flags.DEFINE_float('decay_rate', 0.6, 'decay rate, default: 0.65')
flags.DEFINE_float('keep_prob', 0.5, 'keep_prob for training, default: 0.5')
flags.DEFINE_string("log_file_train", "train.log", "File for log")
flags.DEFINE_integer('decay_step', 5000, 'decay_step, default: 5000')
flags.DEFINE_integer('valid_step', 2500, 'valid_step, default: 2500')
flags.DEFINE_float('last_score12', 0.0, 'if valid_score12 > last_score12, save new model. default: 0.0')
FLAGS = flags.FLAGS
lr = FLAGS.lr
last_score12 = FLAGS.last_score12
settings = network.Settings()
summary_path = settings.summary_path
ckpt_path = settings.ckpt_path
model_path = ckpt_path + 'model.ckpt'
log_path = settings.log_path
embedding_path = '../../data/word_embedding_256.npy'
data_train_path = '../../data/wd_pdQS200/train/'
data_valid_path = '../../data/wd_pdQS200/valid/'
tr_batches = os.listdir(data_train_path)
va_batches = os.listdir(data_valid_path)
n_tr_batches = len(tr_batches)
n_va_batches = len(va_batches)
def get_batch(data_path, batch_id):
new_batch = np.load(data_path + str(batch_id) + '.npz')
X_batch = new_batch['X']
y_batch = new_batch['y']
return [X_batch, y_batch]
def valid_epoch(data_path, sess, model):
va_batches = os.listdir(data_path)
n_va_batches = len(va_batches)
_costs = 0.0
predict_labels_list = list()
marked_labels_list = list()
for i in range(n_va_batches):
[X_batch, y_batch] = get_batch(data_path, i)
marked_labels_list.extend(y_batch)
y_batch = to_categorical(y_batch)
_batch_size = len(y_batch)
fetches = [model.loss, model.y_pred]
feed_dict = {model.X_inputs: X_batch,
model.y_inputs: y_batch, model.batch_size: _batch_size,
model.tst: True, model.keep_prob: 1.0}
_cost, predict_labels = sess.run(fetches, feed_dict)
_costs += _cost
predict_labels_list.extend(predict_labels)
f1_micro, f1_macro, score12 = cail_evaluator(predict_labels_list, marked_labels_list)
return f1_micro, f1_macro, score12
def train_epoch(data_path, sess, model, train_fetches,
valid_fetches, train_writer, test_writer, logger):
global last_score12
global lr
time0 = time.time()
batch_indexs = np.random.permutation(n_tr_batches)
for batch in tqdm(range(n_tr_batches)):
global_step = sess.run(model.global_step)
if 0 == (global_step + 1) % FLAGS.valid_step:
f1_micro, f1_macro, score12 = valid_epoch(data_valid_path, sess, model)
print('Global_step=%d: f1_micro=%g, f1_macro=%g, score12=%g, time=%g s' % (
global_step, f1_micro, f1_macro, score12, time.time() - time0))
logger.info('END:Global_step={}: f1_micro={}, f1_macro={}, score12={}'.
format(sess.run(model.global_step), f1_micro, f1_macro, score12))
time0 = time.time()
if score12 > last_score12:
last_score12 = score12
saving_path = model.saver.save(sess, model_path, global_step+1)
print('saved new model to %s ' % saving_path)
# training
batch_id = batch_indexs[batch]
[X_batch, y_batch] = get_batch(data_path, batch_id)
y_batch = to_categorical(y_batch)
_batch_size = len(y_batch)
feed_dict = {model.X_inputs: X_batch,
model.y_inputs: y_batch, model.batch_size: _batch_size,
model.tst: False, model.keep_prob: FLAGS.keep_prob}
summary, _cost, _, _ = sess.run(train_fetches, feed_dict) # the cost is the mean cost of one batch
# valid per 500 steps
if 0 == (global_step + 1) % 500:
train_writer.add_summary(summary, global_step)
batch_id = np.random.randint(0, n_va_batches) # 随机选一个验证batch
[X_batch, y_batch] = get_batch(data_valid_path, batch_id)
y_batch = to_categorical(y_batch)
_batch_size = len(y_batch)
feed_dict = {model.X_inputs: X_batch,
model.y_inputs: y_batch, model.batch_size: _batch_size,
model.tst: True, model.keep_prob: 1.0}
summary, _cost = sess.run(valid_fetches, feed_dict)
test_writer.add_summary(summary, global_step)
def main(_):
global ckpt_path
global last_score12
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
if not os.path.exists(summary_path):
os.makedirs(summary_path)
elif not FLAGS.is_retrain:
shutil.rmtree(summary_path)
os.makedirs(summary_path)
if not os.path.exists(summary_path):
os.makedirs(summary_path)
if not os.path.exists(log_path):
os.makedirs(log_path)
print('1.Loading data...')
W_embedding = np.load(embedding_path)
print('training sample_num = %d' % n_tr_batches)
print('valid sample_num = %d' % n_va_batches)
logger = get_logger(log_path + FLAGS.log_file_train)
print('2.Building model...')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
model = network.Atten_TextCNN(W_embedding, settings)
with tf.variable_scope('training_ops') as vs:
learning_rate = tf.train.exponential_decay(FLAGS.lr, model.global_step,
FLAGS.decay_step,
FLAGS.decay_rate, staircase=True)
with tf.variable_scope('Optimizer1'):
tvars1 = tf.trainable_variables()
grads1 = tf.gradients(model.loss, tvars1)
optimizer1 = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op1 = optimizer1.apply_gradients(zip(grads1, tvars1),
global_step=model.global_step)
with tf.variable_scope('Optimizer2'):
tvars2 = [tvar for tvar in tvars1 if 'embedding' not in tvar.name]
grads2 = tf.gradients(model.loss, tvars2)
optimizer2 = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op2 = optimizer2.apply_gradients(zip(grads2, tvars2),
global_step=model.global_step)
update_op = tf.group(*model.update_emas)
merged = tf.summary.merge_all() # summary
train_writer = tf.summary.FileWriter(summary_path + 'train', sess.graph)
test_writer = tf.summary.FileWriter(summary_path + 'test')
training_ops = [v for v in tf.global_variables() if v.name.startswith(vs.name+'/')]
if os.path.exists(ckpt_path + "checkpoint"):
print("Restoring Variables from Checkpoint...")
model.saver.restore(sess, tf.train.latest_checkpoint(ckpt_path))
f1_micro, f1_macro, score12 = valid_epoch(data_valid_path, sess, model)
print('f1_micro=%g, f1_macro=%g, score12=%g' % (f1_micro, f1_macro, score12))
sess.run(tf.variables_initializer(training_ops))
train_op2 = train_op1
else:
print('Initializing Variables...')
sess.run(tf.global_variables_initializer())
print('3.Begin training...')
print('max_epoch=%d, max_max_epoch=%d' % (FLAGS.max_epoch, FLAGS.max_max_epoch))
logger.info('max_epoch={}, max_max_epoch={}'.format(FLAGS.max_epoch, FLAGS.max_max_epoch))
train_op = train_op2
for epoch in range(FLAGS.max_max_epoch):
print('\nepoch: ', epoch)
logger.info('epoch:{}'.format(epoch))
global_step = sess.run(model.global_step)
print('Global step %d, lr=%g' % (global_step, sess.run(learning_rate)))
if epoch == FLAGS.max_epoch:
train_op = train_op1
train_fetches = [merged, model.loss, train_op, update_op]
valid_fetches = [merged, model.loss]
train_epoch(data_train_path, sess, model, train_fetches,
valid_fetches, train_writer, test_writer, logger)
# 最后再做一次验证
f1_micro, f1_macro, score12 = valid_epoch(data_valid_path, sess, model)
print('END:Global_step=%d: f1_micro=%g, f1_macro=%g, score12=%g' % (
sess.run(model.global_step), f1_micro, f1_macro, score12))
logger.info('END:Global_step={}: f1_micro={}, f1_macro={}, score12={}'.
format(sess.run(model.global_step), f1_micro, f1_macro, score12))
if score12 > last_score12:
saving_path = model.saver.save(sess, model_path, sess.run(model.global_step)+1)
print('saved new model to %s ' % saving_path)
logger.info('saved new model to {}'.format(saving_path))
if __name__ == '__main__':
tf.app.run()
|
shelleyHLX/cail
|
models/Attention_Textcnn/train.py
|
train.py
|
py
| 9,638 |
python
|
en
|
code
| 77 |
github-code
|
6
|
26244830544
|
from __future__ import absolute_import, print_function
import unittest
import poker
testdata = ["5H 5C 6S 7S KD 2C 3S 8S 8D TD",
"5D 8C 9S JS AC 2C 5C 7D 8S QH",
"2D 9C AS AH AC 3D 6D 7D TD QD",
"4D 6S 9H QH QC 3D 6D 7H QD QS",
"2H 2D 4C 4D 4S 3C 3D 3S 9S 9D"]
testwinner = [1, 0, 1, 0, 0]
class TestPoker(unittest.TestCase):
def testpoker(self):
games = []
for line in testdata:
hand0, hand1 = line[:14], line[15:]
player0 = poker.player(hand0)
player1 = poker.player(hand1)
game = poker.game()
game.set_player(player0, player1)
games.append(game)
for i in range(5):
game = games[i]
winner = testwinner[i]
self.assertEqual(game.winner(), winner)
if __name__ == "__main__":
unittest.main()
|
tak0kada/procon
|
project euler/python/50/54/poker/tests/test_poker.py
|
test_poker.py
|
py
| 882 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42810587556
|
import numpy as np
# Color palette
palette = {
0: (0, 0, 0), # Undefined (black)
1: (255, 255, 255), # Impervious surfaces (white)
2: (0, 0, 255), # Buildings (dark blue)
3: (0, 128, 0), # Vegetation (light green)
4: (255, 0, 0), # Water (red)
}
invert_palette = {v: k for k, v in palette.items()}
def convert_from_color(arr_3d, palette=invert_palette):
""" RGB-color encoding to grayscale labels """
arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8)
for c, i in palette.items():
m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2)
arr_2d[m] = i
return arr_2d
def convert_to_color(arr_2d, palette=palette):
""" grayscale labels to RGB-color encoding """
arr_3d = np.zeros((arr_2d.shape[0], arr_2d.shape[1], 3), dtype=np.uint8)
for c, i in palette.items():
m = arr_2d == c
arr_3d[m] = i
return arr_3d
|
nshaud/DeepNetsForEO
|
OSM/labels.py
|
labels.py
|
py
| 987 |
python
|
en
|
code
| 468 |
github-code
|
6
|
28726581501
|
import logging
import feedparser
import requests
from .. import read_list
log = logging.getLogger(__name__)
class VideoFeed:
def __init__(self, known_path, url):
self.url = url
self.read_list = read_list.ReadList(known_path, url)
def is_new(self, item):
return self.read_list.is_new(item.id)
def fetch(self):
try:
response = requests.get(self.url)
response.raise_for_status()
feed = feedparser.parse(response.content)
items = feed.entries
if len(items) == 0:
raise Exception("Empty feed. Is site down?")
new_items = list(filter(self.is_new, items))
log.info("Feed contains %d items, %d are new." % (len(items), len(new_items)))
# iterate first, then save new read list
for i in new_items:
yield i
self.read_list.save()
except Exception:
log.exception("Unexpected error with %s", self.url, exc_info=True)
def fetch_video_codes(self):
raise NotImplementedError
def append_to_queue(self, queue_path):
codes = self.fetch_video_codes()
links = ["\nhttps://www.youtube.com/watch?v=" + v for v in codes]
f = open(queue_path, "a")
f.writelines(links)
f.close()
|
EliseAv/tubeforme
|
video_feeds/_base.py
|
_base.py
|
py
| 1,334 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29644952837
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""DESCRIPTION
This module is a list of coding exercises. The intention
is to emulate some functions from other languages.
"""
#==================================================================
# Generate a list of divisors
#==================================================================
def divisors_of(number_to_divide):
"""Find the integer divisors of <number_to_divide>
param: number_to_divide."""
number_to_divide = number_to_divide + 1
possible_divisors_list = list(range(number_to_divide))
if 0 in possible_divisors_list:
possible_divisors_list.remove(0)
if 1 in possible_divisors_list:
possible_divisors_list.remove(1)
divisors_list = [1]
number_to_divide = number_to_divide - 1
for each_possible_divisor in list(possible_divisors_list):
if number_to_divide % each_possible_divisor == 0:
divisors_list.append(each_possible_divisor)
elif number_to_divide % each_possible_divisor != 0:
possible_divisors_list.remove(each_possible_divisor)
return divisors_list
#==================================================================
# Generate a range of prime numbers
#==================================================================
def prime_range(how_many_primes_you_need):
"""Return a range of prime numbers.
param: how_many_primes_you_need"""
primes = []
for each_number in range(how_many_primes_you_need):
if len(divisors_of(each_number)) == 2:
primes
primes.append(each_number)
return primes
#====================================================
# Partition a lists into sublists of size x
#====================================================
def partition_list(working_list, size_of_partition):
""" Return a list of lists of the size of <size_of_partition>."""
final_list = []
for each_elem in range(0, len(working_list) + 1, size_of_partition):
elem = working_list[each_elem: each_elem + size_of_partition]
final_list.append(elem)
for each_sublist in final_list:
if len(each_sublist) != size_of_partition:
final_list.remove(each_sublist)
return list(final_list)
#==================================================================
# Factorial function
#==================================================================
def factorial_increment(limit_of_factorial_increment):
"""Return the factorial product of the sequence
with limit <limit_of_factorial_increment> """
limit_of_factorial_increment = limit_of_factorial_increment + 1
limit_of_factorial_increment = int(limit_of_factorial_increment)
terms_of_factorial_sequence_list = \
list(range(1, limit_of_factorial_increment, 1))
limit_of_factorial_increment = limit_of_factorial_increment - 1
factorial_product = 1
if limit_of_factorial_increment == 1:
return 1
elif limit_of_factorial_increment == 0:
return 1
while len(terms_of_factorial_sequence_list) != 0:
for each_term in terms_of_factorial_sequence_list:
if each_term == 1:
term, *rest = terms_of_factorial_sequence_list
next_term, *next_rest = rest
factorial_product = term * next_term
terms_of_factorial_sequence_list.remove(term)
terms_of_factorial_sequence_list.remove(next_term)
elif each_term != 1:
term, *rest = terms_of_factorial_sequence_list
factorial_product = factorial_product * term
terms_of_factorial_sequence_list.remove(term)
return factorial_product
#==================================================================
# Riffle elements of two lists
#==================================================================
def interleave_lists(first_list, second_list):
"""Returns a single list with elements of first_list interlaced
with elements of second_list."""
first_list = first_list[:]
second_list = second_list[:]
len_of_first_list = int(len(first_list))
riffled_list = []
if len_of_first_list == 0:
return riffled_list
else:
while len_of_first_list != 0:
len_of_first_list -= 1
for each_element in first_list[:]:
global riffled_list
element_of_first_list, *rest_of_first_list = first_list
element_of_second_list, *rest_of_second_list = second_list
riffled_list.append(element_of_first_list)
riffled_list.append(element_of_second_list)
first_list.remove(element_of_first_list)
second_list.remove(element_of_second_list)
first_list = rest_of_first_list
second_list = rest_of_second_list
return riffled_list
#==================================================================
# Prime Factorization
#==================================================================
def prime_factors(input_number):
"""
Return a list with prime factors of argument <int_number>.
param: input_number
Arguments:
Integer type input. -> List
"""
prime_factors_list = []
first_prime = 2
while first_prime**2 <= input_number:
while (input_number % first_prime) == 0:
prime_factors_list.append(first_prime)
input_number //= first_prime
first_prime += 1
if input_number > 1:
prime_factors_list.append(input_number)
return prime_factors_list
#==================================================================
# Fibonacci Series
#==================================================================
def fibonacci_range(size_of_fibonacci_range):
"""Fibonacci series:
Returns a list with a sequnce of numbers
in which the next is the sum of the las two.
Arguments:
limit of the series.
"""
fibonacci_number_list = [1]
term, next_term = 0, 1
while True:
term, next_term = next_term, term + next_term
fibonacci_number_list.append(next_term)
if len(fibonacci_number_list) == int(size_of_fibonacci_range):
break
return fibonacci_number_list
#==================================================================
# Module runs as script
#==================================================================
if __name__ == '__main__':
fibonacci_range()
prime_factors()
factorial_increment()
|
arthur-schopenhauer/Mathematica_Notebooks
|
trivial_functions.py
|
trivial_functions.py
|
py
| 6,732 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7775796221
|
class Node:
def __init__(self, item):
self.data = item
self.next = None
class LinkedList:
def __init__(self):
self.nodeCount = 0
self.head = None
self.tail = None
def __repr__(self):
if self.nodeCount == 0:
return 'LinkedList: empty'
s = ''
curr = self.head
while curr is not None:
s += repr(curr.data)
if curr.next is not None:
s += ' -> '
curr = curr.next
return s
def getAt(self, pos):
if pos < 1 or pos > self.nodeCount:
return None
i = 1
curr = self.head
while i < pos:
curr = curr.next
i += 1
return curr
def traverse(self):
if not self.head:
return []
answer = []
curr = self.head
while curr is not None:
answer.append(curr.data)
curr = curr.next
return answer
def insertAt(self, pos, newNode): # 요소 삽입
if pos < 1 or pos > self.nodeCount +1:
return False
if pos == 1:
newNode.next = self.head
self.head = newNode
else:
if pos == self.nodeCount + 1:
prev = self.tail
else:
prev = self.getAt(pos-1)
newNode.next = prev.next
prev.next = newNode
if pos == self.nodeCount + 1: # 맨 처음, 맨 마지막
self.tail = newNode
self.nodeCount +=1
return True
def popAt(self, pos):
"""
pos가 1일때 self.head = self.head.next
pos가 마지막일때 self.tail = prev
유일한 노드를 삭제할 때 유일한 노드 = self.head = 1 , self.tail = 1
:param pos: 위치
:return:
"""
if pos < 1 or pos > self.nodeCount:
raise IndexError
result = 0
if pos == 1:
if pos == self.nodeCount: # 유일한 노드
self.tail = None
result = self.head.data
self.head = self.head.next
else:
prev = self.getAt(pos-1)
curr = prev.next
prev.next = curr.next
result = curr.data
if pos == self.nodeCount:
self.tail = prev
self.nodeCount -= 1;
return result
a = Node(10)
b = Node(20)
c = Node(30)
L = LinkedList()
L2 = LinkedList()
L.insertAt(1,a)
L.insertAt(2,b)
L2.insertAt(1,c)
L2.insertAt(2,b)
print(L)
print(L2)
|
lowelllll/DataStructure
|
LinkedList/linked_list.py
|
linked_list.py
|
py
| 2,577 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71493772028
|
'''
2D Multiple Circles Problem
'''
from lsdo_genie import Genie2D
from lsdo_genie.utils.geometric_shapes import Multi_circle
import numpy as np
num_surface_pts = 25
centers = [[-13.,-0.5],[-7.,2.],[2.,0.],[10.,-4.]]
radii = [2.,2.,4.,3.]
geom_shape = Multi_circle(centers,radii)
surface_points = geom_shape.surface_points(num_surface_pts)
surface_normals = geom_shape.unit_normals(num_surface_pts)
custom_domain = np.array([
[-18.,18.],
[-8.,8.]
])
genie = Genie2D(verbose=True)
genie.input_point_cloud(
surface_points=surface_points,
surface_normals=surface_normals,
)
genie.config(
domain=custom_domain,
max_control_points=30,
min_ratio=0.75,
)
genie.solve_energy_minimization(
Ln=1e0,
Lr=1e-4,
)
genie.visualize()
|
LSDOlab/lsdo_genie
|
examples/2D_examples/ex_circles.py
|
ex_circles.py
|
py
| 758 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14098850969
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Few more things we might wanna work out here.
Our lack of the module :mod:`inspect` is pretty surprising.
Refer to either `IPython.core.oinspect` or `xonsh.inspectors`
for some good uses of the std lib module.
"""
from pprint import pformat
from pygments import highlight
from pygments.formatters.terminal256 import TerminalTrueColorFormatter
try:
from gruvbox.gruvbox import GruvboxStyle
except ImportError:
from pygments.styles.inkpot import InkPotStyle
style = InkPotStyle()
else:
style = GruvboxStyle()
from IPython.core.getipython import get_ipython
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.lib.lexers import IPyLexer
from IPython.lib.pretty import pprint
@magics_class
class PrettyColorfulInspector(Magics):
"""Implementation for a magic function that inspects a given python object.
The extension then prints a syntax-highlighted and pretty-printed
version of the provided object.
"""
# Use Pygments to do syntax highlighting
lexer = IPyLexer()
if GruvboxStyle is not None:
style = GruvboxStyle
else:
style = InkPotStyle
formatter = TerminalTrueColorFormatter(style=style)
def shell(self):
# why the fuck is this returning none
return get_ipython()
def __repr__(self):
return f"<{self.__class__.__name__}>:"
@line_magic
def ins(self, line=None):
"""Alias for the `%inspect_obj magic defined here."""
self.inspect(line=line)
@line_magic
def inspect_obj(self, line=None):
"""Deviate from the original implementation a bit.
In this version, we'll use the IPython lexer used at IPython.lib
instead of pygments.
Parameters
----------
line : str
Line to be evaluated by the IPython shell.
Note that this invokes the get_ipython().ev() method.
So we might wanna wrap this in a try/except but idk what it'll raise.
"""
if not line:
return
# evaluate the line to get a python object
python_object = self.shell.ev(line)
# Pretty Print/Format the object
# Print the output, but don't return anything (otherwise, we'd
# potentially get a wall of color-coded text.
formatted_dict = pformat(python_object.__dict__)
print(highlight(formatted_dict, lexer, formatter).strip())
pprint(python_object)
def load_ipython_extension(shell=None):
"""Add to the list of extensions used by IPython."""
if shell is None:
shell = get_ipython()
if shell is None:
return
shell.register_magics(PrettyColorfulInspector)
shell.register_magic_function(PrettyColorfulInspector.inspect_obj)
shell.register_magic_function(PrettyColorfulInspector.ins)
if __name__ == "__main__":
load_ipython_extension(get_ipython())
|
farisachugthai/dynamic_ipython
|
default_profile/extensions/extension_inspect.py
|
extension_inspect.py
|
py
| 2,946 |
python
|
en
|
code
| 7 |
github-code
|
6
|
17178723426
|
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objects as go
import plotly.express as px
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
import pickle
st.set_page_config(page_title="Air Quality", page_icon="💨", layout='wide', initial_sidebar_state='auto')
@st.cache_data
def load_data():
df = pd.read_csv("data/model_input.csv")
df.drop(['location'],axis=1,inplace=True)
df = df[df['lceq_avg'] != 0]
airquality = pd.read_csv("data/Air_Quality.csv", delimiter=",")
with open('data/xgb_airquality.pkl', 'rb') as f:
model = pickle.load(f)
return df, airquality, model
df, airquality, model = load_data()
airquality['time_stamp'] = pd.to_datetime(airquality['time_stamp'])
airquality['month'] = airquality['time_stamp'].dt.month
airquality['day_month'] = airquality['time_stamp'].dt.day
airquality['day_week'] = airquality['time_stamp'].dt.dayofweek.apply(lambda x: 7 if x == 6 else x + 1)
airquality['hour'] = airquality['time_stamp'].dt.hour
airquality['minute'] = airquality['time_stamp'].dt.minute
merged_df = pd.merge(df, airquality, how='left', on=['month', 'day_month', 'day_week', 'hour', 'minute'])
new_df = merged_df.drop(['lcpeak_avg', 'lceq_avg', 'v85', 'Telraam data', 'avg_pedestrians', 'avg_bikes', 'avg_cars', 'avg_trucks' ], axis=1)
st.title("Air Quality analysis 💨")
st.markdown("""In this section, we will analyse the air quality data found in the PurpleAir API.
We will start by looking at the data and then we will try to find some correlations between the different variables.""")
# Group the data by month and calculate the mean of '2.5_um_count'
grouped_df = new_df.groupby('month')['2.5_um_count'].mean().reset_index()
expander_corr = st.expander("Correlation heatmap explanation")
expander_corr.markdown("We will start by looking at the correlation heatmap of the different variables. This will give us a first idea of the variables that are somewhat correlated with the count of 2.5um particles.")
columns_of_interest = ['LC_TEMP', 'LC_DAILYRAIN', 'LC_RAD', 'LC_WINDDIR', 'month', '2.5_um_count']
corr_matrix = new_df[columns_of_interest].corr()
# Create the correlation heatmap using Plotly
fig = go.Figure(data=go.Heatmap(
z=corr_matrix.values,
x=corr_matrix.columns,
y=corr_matrix.columns,
colorscale='RdBu',
zmin=-1,
zmax=1,
colorbar=dict(title="Correlation")
))
# Add custom annotations for the correlation values inside each square
annotations = []
for i, row in enumerate(corr_matrix.values):
for j, value in enumerate(row):
annotations.append(
dict(
x=corr_matrix.columns[j],
y=corr_matrix.columns[i],
text=str(round(value, 2)),
font=dict(color='white' if abs(value) > 0.5 else 'black'),
showarrow=False
)
)
fig.update_layout(
title='Correlation Heatmap',
xaxis_title='Variables',
yaxis_title='Variables',
width=800,
height=600,
annotations=annotations
)
expander_corr.plotly_chart(fig)
monthly_avg = new_df.groupby('month')['2.5_um_count'].mean().reset_index()
expander_mon = st.expander("Average PM2.5 particles count per Month")
expander_mon.markdown("We will now look at the average PM2.5 particles count per Month. We can see that there is a negative correlation between the 2.5_um_count and the month. This shows that the air quality is better during the summer months.")
fig = px.line(monthly_avg, x='month', y='2.5_um_count', color_discrete_sequence=['#3366cc'])
fig.update_layout(title='Average 2.5_um_count per Month',
xaxis_title='Month', yaxis_title='Average 2.5_um_count')
expander_mon.plotly_chart(fig)
expander_temp = st.expander("Average PM2.5 particles count per Temperature")
expander_temp.markdown("We will now look at the average PM2.5 particles count per Temperature. We can see that there is a negative correlation between the 2.5_um_count and the LC_TEMP. This means that when the temperature is higher, the air quality is better.")
fig = px.scatter(new_df, x="LC_TEMP", y="2.5_um_count", trendline="ols",
animation_frame="month", animation_group="day_month", color="day_month",
hover_name="day_month", range_x=[-5, 25], range_y=[0, 40])
fig.update_layout(title='2.5_um_count by LC_TEMP', xaxis_title='LC_TEMP', yaxis_title='2.5_um_count')
expander_temp.plotly_chart(fig)
merged_df['2.5_um_count'] = merged_df['2.5_um_count'].fillna(method='ffill').rolling(window=10, min_periods=1).mean()
merged_df = merged_df.drop(['time_stamp'], axis=1)
x = merged_df.drop(['2.5_um_count'], axis=1)
y = merged_df['2.5_um_count']
xgb = model
expander_imp = st.expander("Feature importance")
expander_imp.markdown("We will now look at the feature importance of the different variables. The used model is a XGBoost model, with the target variable being the 2.5_um_count. By looking at the feature importance, we can see which variables are the most important in predicting the 2.5_um_count. We can see that the most important variables are the temporal data and weather conditions.")
importance_sorted = sorted(zip(xgb.feature_importances_, x.columns), reverse=True)
importance_values_sorted = [imp for imp, _ in importance_sorted]
variable_names_sorted = [var for _, var in importance_sorted]
fig = px.bar(x=importance_values_sorted, y=variable_names_sorted, orientation='h')
fig.update_layout(
title='Feature importance',
xaxis_title='Importance',
yaxis_title='Variables',
yaxis=dict(
tickmode='array',
ticktext=variable_names_sorted,
tickvals=variable_names_sorted,
showticklabels=True,
automargin=True
)
)
expander_imp.plotly_chart(fig)
|
vavald/MDA_streamlit
|
pages/6_💨_AirQuality.py
|
6_💨_AirQuality.py
|
py
| 5,860 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11704116245
|
import platform
from tkinter import filedialog
from web3 import Web3
from web3.contract import Contract
from web3.providers.rpc import HTTPProvider
from solcx import install_solc
install_solc(version='latest')
from solcx import compile_source
import subprocess
import os
import tkinter as tk
from PIL import Image, ImageTk
import threading
import json
from dotenv import load_dotenv
load_dotenv()
PRIVATE_KEY = os.getenv('PRIVATE_KEY')
onChainSmartContract = None
web3_1 = None
web3_2 = None
web3_3 = None
web3_4 = None
def get_sc(filename):
current_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_dir, filename), 'r') as file:
text = file.read()
return text
def update_storage(map):
current_dir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(current_dir, 'onchaindata.json')
try:
with open(filename, 'r') as file:
maps = json.load(file)
except FileNotFoundError:
maps = []
maps.append(map)
with open(filename, 'w') as file:
json.dump(maps, file)
def read_storage(name: str):
current_dir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(current_dir, 'onchaindata.json')
with open(filename, 'r') as file:
maps = json.load(file)
for map_data in maps:
if map_data['name'] == name:
return map_data
return None
def deploy(w3: Web3, contract: Contract, name: str):
sm_transaction = {
"from": w3.eth.accounts[0],
"maxFeePerGas": w3.to_hex(1000000000000),
"maxPriorityFeePerGas": w3.to_hex(0),
"gas": w3.to_hex(1000000000000),
"nonce": w3.eth.get_transaction_count(w3.eth.accounts[0]),
"data": contract.bytecode,
'chainId': w3.eth.chain_id
}
signedTransaction = w3.eth.account.sign_transaction(sm_transaction, PRIVATE_KEY)
transaction_hash = w3.eth.send_raw_transaction(signedTransaction.rawTransaction)
tx_receipt = w3.eth.wait_for_transaction_receipt(transaction_hash)
contract = w3.eth.contract(address=tx_receipt.contractAddress, abi=contract.abi, bytecode=contract.bytecode)
new_map = {
'name': name,
'address': str(contract.address),
'abi': str(contract.abi)
}
update_storage(new_map)
return contract
def read(contract: Contract, function_name: str, args: list):
if len(args) == 0:
# result = contract.functions.askForDeploySmartContract().call()
result = contract.functions[function_name]().call()
elif len(args) == 1:
result = contract.functions[function_name](args[0]).call()
elif len(args) == 2:
result = contract.functions[function_name](args[0], args[1]).call()
else:
result = contract.functions[function_name](args[0], args[1], args[2]).call()
return result
def write(w3: Web3, contract: Contract, function_name: str, args: any):
new_transaction = {
"from": w3.eth.accounts[0],
"to": contract.address,
"data": contract.encodeABI(fn_name=function_name, args=args),
"gas": w3.to_hex(1000000000000),
# "gasPrice": w3.to_wei('0', 'gwei'),
"maxFeePerGas": w3.to_hex(1000000000000),
"maxPriorityFeePerGas": w3.to_wei(0, 'gwei'),
"nonce": w3.eth.get_transaction_count(w3.eth.accounts[0]),
'chainId': w3.eth.chain_id
}
#"gas": w3.to_hex(6721975),
signedTransaction = w3.eth.account.sign_transaction(new_transaction, PRIVATE_KEY)
transaction_hash = w3.eth.send_raw_transaction(signedTransaction.rawTransaction)
receipt = w3.eth.wait_for_transaction_receipt(transaction_hash)
return receipt
def init_web3():
global web3_1
global web3_2
global web3_3
global web3_4
web3_1 = Web3(HTTPProvider("http://127.0.0.1:8545"))
if web3_1.is_connected():
print("Connected to http://127.0.0.1:8545")
else:
print("Not connected to http://127.0.0.1:8545")
web3_2 = Web3(HTTPProvider("http://127.0.0.1:8546"))
if web3_2.is_connected():
print("Connected to http://127.0.0.1:8546")
else:
print("Not connected to http://127.0.0.1:8546")
web3_3 = Web3(HTTPProvider("http://127.0.0.1:8547"))
if web3_3.is_connected():
print("Connected to http://127.0.0.1:8547")
else:
print("Not connected to http://127.0.0.1:8547")
web3_4 = Web3(HTTPProvider("http://127.0.0.1:8548"))
if web3_4.is_connected():
print("Connected to http://127.0.0.1:8548")
else:
print("Not connected to http://127.0.0.1:8548")
def loadOnChainManager():
compiledSmartContract = compile_source(get_sc("onchainmanager.sol"), output_values=['abi', 'bin'])
_, smartContractInterface = compiledSmartContract.popitem()
smartContractBytecode = smartContractInterface['bin']
smartContractAbi = smartContractInterface['abi']
global onChainSmartContract
onChainSmartContract = web3_1.eth.contract(abi=smartContractAbi, bytecode=smartContractBytecode)
count = web3_1.eth.get_transaction_count(web3_1.eth.accounts[0])
sc = read_storage("onchainsc")
if sc is None:
onChainSmartContract = deploy(web3_1, onChainSmartContract, 'onchainsc')
# my_contract = web3_1.eth.contract(address=onChainSmartContract.address, abi=onChainSmartContract.abi)
else:
onChainSmartContract = web3_1.eth.contract(address=sc["address"], abi=smartContractAbi, bytecode=smartContractBytecode)
write(web3_1, onChainSmartContract, 'setAddress1', ["http://127.0.0.1:8546"])
write(web3_1, onChainSmartContract, 'setAddress2', ["http://127.0.0.1:8547"])
write(web3_1, onChainSmartContract, 'setAddress3', ["http://127.0.0.1:8548"])
class Loader(tk.Frame):
def __init__(self, parent):
super().__init__(parent)
parent.title("Progetto Software Security & Blockchain")
self.result_text = tk.Text(self)
tk.Button(self, text="Avvia le Blockchain", command=self.run_script_threaded).grid(row=1, column=0)
tk.Button(self, text="Avvia il Programma", command=self.start_app).grid(row=1, column=1)
self.result_text.grid(row=2, column=0, columnspan=2)
def run_script_threaded(self):
threading.Thread(target=self.run_script).start()
# threading.Thread(target=self.init_web3_thread).start()
def start_app(self):
# self.result_text.insert(tk.END, "Tutte le Blockchain sono state inizializzate correttamente!")
# self.result_text.insert(tk.END, "Caricamento del programma... (impiega circa 15 secondi)")
homepage = HomePage(self.master)
init_web3()
loadOnChainManager()
def run_script(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
system = platform.system()
try:
if system == 'Windows':
dir = os.path.join(current_dir, "init.sh")
process = subprocess.Popen([dir], stdout=subprocess.PIPE, shell=True, stderr=subprocess.PIPE, universal_newlines=True)
elif system == 'Linux':
command = ['bash', os.path.join(current_dir, "init.sh")]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
elif system == 'Darwin':
command = ['bash', os.path.join(current_dir, "init.sh")]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
else:
print(f"Error: Unsupported system '{system}'")
except Exception as e:
print("Errore", "File non trovato al percorso " + e)
while True:
output = process.stdout.readline()
if not output and process.poll() is not None:
break
self.result_text.insert(tk.END, output)
self.result_text.see(tk.END)
class HomePage(tk.Toplevel):
def __init__(self, parent):
super().__init__(parent)
# screen_width = root.winfo_screenwidth()
# screen_height = root.winfo_screenheight()
# self.geometry(f"{screen_width//2}x{screen_height//2}+{screen_width//4}+{screen_height//4}")
self.title("Home Page")
current_dir = os.path.dirname(os.path.abspath(__file__))
button1_image = Image.open(os.path.join(current_dir, "receipt_long.png"))
button2_image = Image.open(os.path.join(current_dir, "assignment.png"))
button3_image = Image.open(os.path.join(current_dir, "tty.png"))
button4_image = Image.open(os.path.join(current_dir, "delete.png"))
button1_photo = ImageTk.PhotoImage(button1_image)
button2_photo = ImageTk.PhotoImage(button2_image)
button3_photo = ImageTk.PhotoImage(button3_image)
button4_photo = ImageTk.PhotoImage(button4_image)
title_label = tk.Label(self, text="Scegli come deployare il tuo smart contract, oppure se richiamare un metodo di uno smart contract esistente.", font=("Arial", 13))
title_label.grid(row=0, column=0, columnspan=3, pady=20)
button1 = tk.Button(self, image=button1_photo, text="Deploy il file .sol", compound=tk.TOP, font=("Arial", 12), command=self.button1_clicked)
button1.image = button1_photo
button1.grid(row=1, column=0, padx=0, pady=10)
frame1 = tk.Frame(self, height=100, width=100)
frame1.pack_propagate(0)
frame1.grid(row=2, column=0, padx=0, pady=10)
label1 = tk.Label(frame1, text="Carica il tuo\nfile in solidity\nin cui è scritto\nlo Smart Contract", font=("Arial", 13))
label1.pack(fill=tk.BOTH, expand=1)
button2 = tk.Button(self, image=button2_photo, text="Deploy da ABI e Bytecode", compound=tk.TOP, font=("Arial", 12), command=self.button2_clicked)
button2.image = button2_photo
button2.grid(row=1, column=1, padx=0, pady=10)
frame2 = tk.Frame(self, height=100, width=100)
frame2.pack_propagate(0)
frame2.grid(row=2, column=1, padx=0, pady=10)
label2 = tk.Label(frame2, text="Carica il tuo\nSmart Contract\nscrivendo l'ABI\ne il Bytecode", font=("Arial", 13))
label2.pack(fill=tk.BOTH, expand=1)
button3 = tk.Button(self, image=button3_photo, text="Chiama metodo", compound=tk.TOP, font=("Arial", 12), command=self.button3_clicked)
button3.image = button3_photo
button3.grid(row=1, column=2, padx=0, pady=10)
frame3 = tk.Frame(self, height=100, width=100)
frame3.pack_propagate(0)
frame3.grid(row=2, column=2, padx=0, pady=10)
label3 = tk.Label(frame3, text="Chiama un\nmetodo di uno\nSmart Contract\nesistente", font=("Arial", 13))
label3.pack(fill=tk.BOTH, expand=1)
button4 = tk.Button(self, image=button4_photo, text="Elimina Smart Contract", compound=tk.TOP, font=("Arial", 12), command=self.button4_clicked)
button4.image = button4_photo
button4.grid(row=1, column=3, padx=0, pady=10)
frame4 = tk.Frame(self, height=100, width=100)
frame4.pack_propagate(0)
frame4.grid(row=2, column=3, padx=0, pady=10)
label4 = tk.Label(frame4, text="Elimina uno\nSmart Contract", font=("Arial", 13))
label4.pack(fill=tk.BOTH, expand=1)
def get_folder_path():
while True:
folder_path = input("Please enter the path of the folder: ")
if os.path.isdir(folder_path):
return folder_path
else:
print("Invalid folder path. Please try again.")
def button1_clicked(self):
soliditypage = SolidityPage(self.master)
def button2_clicked(self):
abibytecodepage = ABIBytecodePage(self.master)
def button3_clicked(self):
callmethodpage = MethodsPage(self.master)
def button4_clicked(self):
deletepage = DeletePage(self.master)
class SolidityPage(tk.Toplevel):
def __init__(self, parent):
super().__init__(parent)
# screen_width = root.winfo_screenwidth()
# screen_height = root.winfo_screenheight()
# self.geometry(f"{screen_width//2}x{screen_height//2}+{screen_width//4}+{screen_height//4}")
self.title("Deploy file .sol")
self.name_label = tk.Label(self, text="Nome:")
self.name_label.pack()
self.name_entry = tk.Entry(self)
self.name_entry.pack()
self.file_label = tk.Label(self, text="File:")
self.file_label.pack()
self.file_entry = tk.Entry(self, state="readonly")
self.file_entry.pack()
self.browse_button = tk.Button(self, text="Cerca", command=self.browse_file)
self.browse_button.pack()
frame = tk.Frame(self)
self.ok_button = tk.Button(frame, text="Deploy", command=self.ok_button_click)
self.ok_button.pack(side=tk.LEFT)
self.cancel_button = tk.Button(frame, text="Annulla", command=self.cancel_button_click)
self.cancel_button.pack(side=tk.LEFT)
frame.pack()
def browse_file(self):
filetypes = (("Solidity files", "*.sol"), ("All files", "*.*"))
filename = filedialog.askopenfilename(filetypes=filetypes)
if filename:
self.file_entry.config(state="normal")
self.file_entry.delete(0, tk.END)
self.file_entry.insert(0, filename)
self.file_entry.config(state="readonly")
def ok_button_click(self):
name = self.name_entry.get()
filename = self.file_entry.get()
if not filename.endswith(".sol"):
tk.messagebox.showerror("Error", "Invalid file format. Please select a .sol file.")
else:
compiledSmartContract = compile_source(get_sc(os.path.basename(filename).split('/')[-1]), output_values=['abi', 'bin'])
_, smartContractInterface = compiledSmartContract.popitem()
smartContractBytecode = smartContractInterface['bin']
smartContractAbi = smartContractInterface['abi']
receipt = write(web3_1, onChainSmartContract, "getNextAddress", [])
logs = onChainSmartContract.events.NextAddressReturned().process_receipt(receipt)
nextAddress = logs[0]['args']['nextAddress']
web3_c = Web3(HTTPProvider(nextAddress))
if web3_c.is_connected():
print("Connected to " + nextAddress)
customSmartContract = web3_c.eth.contract(abi=smartContractAbi, bytecode=smartContractBytecode)
customSmartContract = deploy(web3_c, customSmartContract, name)
write(web3_1, onChainSmartContract, 'addContract', [name, str(nextAddress), str(customSmartContract.address), str(smartContractAbi)])
result = read(onChainSmartContract, 'getContract', [name])
print("Result: " + str(result))
show_toast('Lo Smart Contract è stato deployato con successo', '')
else:
print("Not connected to " + nextAddress)
self.destroy()
def cancel_button_click(self):
self.destroy()
class ABIBytecodePage(tk.Toplevel):
def __init__(self, parent):
super().__init__(parent)
# screen_width = root.winfo_screenwidth()
# screen_height = root.winfo_screenheight()
# self.geometry(f"{screen_width//2}x{screen_height//2}+{screen_width//4}+{screen_height//4}")
self.title("Deploy da ABI e Bytecode")
self.name_label = tk.Label(self, text="Nome:")
self.name_label.pack()
self.name_entry = tk.Entry(self)
self.name_entry.pack()
self.abi_label = tk.Label(self, text="ABI:")
self.abi_label.pack()
self.abi_label = tk.Entry(self)
self.abi_label.pack()
self.bytecode_label = tk.Label(self, text="Bytecode:")
self.bytecode_label.pack()
self.bytecode_label = tk.Entry(self)
self.bytecode_label.pack()
frame = tk.Frame(self)
self.ok_button = tk.Button(frame, text="Deploy", command=self.ok_button_click)
self.ok_button.pack(side=tk.LEFT)
self.cancel_button = tk.Button(frame, text="Annulla", command=self.cancel_button_click)
self.cancel_button.pack(side=tk.LEFT)
frame.pack()
def ok_button_click(self):
name = self.name_entry.get()
smartContractAbi = self.abi_label.get()
smartContractBytecode = self.bytecode_label.get()
receipt = write(web3_1, onChainSmartContract, "getNextAddress", [])
logs = onChainSmartContract.events.NextAddressReturned().process_receipt(receipt)
nextAddress = logs[0]['args']['nextAddress']
web3_c = Web3(HTTPProvider(nextAddress))
if web3_c.is_connected():
print("Connected to " + nextAddress)
customSmartContract = web3_c.eth.contract(abi=smartContractAbi, bytecode=smartContractBytecode)
customSmartContract = deploy(web3_c, customSmartContract, name)
write(web3_1, onChainSmartContract, 'addContract', [name, str(nextAddress), str(customSmartContract.address), str(smartContractAbi)])
result = read(onChainSmartContract, 'getContract', [name])
print("Result: " + str(result))
show_toast('Lo Smart Contract è stato deployato con successo', '')
else:
print("Not connected to " + nextAddress)
self.destroy()
def cancel_button_click(self):
self.destroy()
class MethodsPage(tk.Toplevel):
def __init__(self, parent):
super().__init__(parent)
# screen_width = root.winfo_screenwidth()
# screen_height = root.winfo_screenheight()
# self.geometry(f"{screen_width//2}x{screen_height//2}+{screen_width//4}+{screen_height//4}")
self.title("Chiama metodo")
self.name_label = tk.Label(self, text="Nome dello Smart Contract:")
self.name_label.pack()
self.name_entry = tk.Entry(self)
self.name_entry.pack()
self.function_label = tk.Label(self, text="Nome della Funzione:")
self.function_label.pack()
self.function_label = tk.Entry(self)
self.function_label.pack()
self.selected_option = tk.IntVar()
self.selected_option.set(0)
self.option1 = tk.Radiobutton(self, text='Lettura', variable=self.selected_option, value=0)
self.option1.pack(padx=10)
self.option2 = tk.Radiobutton(self, text='Scrittura', variable=self.selected_option, value=1)
self.option2.pack(padx=10)
self.arg1_label = tk.Label(self, text="Arg 1:")
self.arg1_label.pack()
self.arg1_label = tk.Entry(self)
self.arg1_label.pack()
self.arg2_label = tk.Label(self, text="Arg 2:")
self.arg2_label.pack()
self.arg2_label = tk.Entry(self)
self.arg2_label.pack()
self.arg3_label = tk.Label(self, text="Arg 3:")
self.arg3_label.pack()
self.arg3_label = tk.Entry(self)
self.arg3_label.pack()
frame = tk.Frame(self)
self.ok_button = tk.Button(frame, text="OK", command=self.ok_button_click)
self.ok_button.pack(side=tk.LEFT)
self.cancel_button = tk.Button(frame, text="Annulla", command=self.cancel_button_click)
self.cancel_button.pack(side=tk.LEFT)
frame.pack()
def ok_button_click(self):
selected_option = self.selected_option.get()
name = self.name_entry.get()
function = self.function_label.get()
arg1 = self.arg1_label.get()
arg2 = self.arg2_label.get()
arg3 = self.arg3_label.get()
data = read(onChainSmartContract, "getContract", [name])
blockChainAddress = data[0]
address = data[1]
abi = data[2].replace("'", '"').replace('False', 'false').replace('True', 'true')
web3_c = Web3(HTTPProvider(blockChainAddress))
if web3_c.is_connected():
print("Connected to " + blockChainAddress)
customSmartContract = web3_c.eth.contract(address=address, abi=abi)
if arg1 and arg2 and arg3:
if selected_option == 0:
result = read(customSmartContract, function, [arg1, arg2, arg3])
else:
write(web3_c, customSmartContract, function, [arg1, arg2, arg3])
elif arg1 and arg2:
if selected_option == 0:
result = read(customSmartContract, function, [arg1, arg2])
else:
write(web3_c, customSmartContract, function, [arg1, arg2])
elif arg1:
if selected_option == 0:
result = read(customSmartContract, function, [arg1])
else:
write(web3_c, customSmartContract, function, [arg1])
else:
if selected_option == 0:
result = read(customSmartContract, function, [])
else:
write(web3_c, customSmartContract, function, [])
if selected_option == 0:
show_toast('Dati ottenuti correttamente', str(result))
else:
show_toast('Dati scritti correttamente', '')
self.destroy()
else:
show_toast('Lo Smart Contract non esiste', 'Deployalo prima di eseguire un suo metodo')
print("Not connected to " + blockChainAddress)
def cancel_button_click(self):
self.destroy()
def show_toast(title, description):
window = tk.Toplevel()
window.overrideredirect(True)
window.attributes("-topmost", True)
# screen_width = root.winfo_screenwidth()
# screen_height = root.winfo_screenheight()
# window.geometry(f"{500}x{100}+{screen_width//2-50}+{screen_height//2-50}")
# create a frame for the toast message
frame = tk.Frame(window, bg='white', bd=1, relief=tk.RAISED)
frame.pack(side=tk.BOTTOM, padx=10, pady=10)
# create a label for the title and add it to the frame
title_label = tk.Label(frame, text=title, font=('Arial', 14, 'bold'), fg='black', bg='white')
title_label.pack(padx=10, pady=5)
# create a label for the description and add it to the frame
desc_label = tk.Label(frame, text=description, font=('Arial', 12), fg='gray', bg='white')
desc_label.pack(padx=10, pady=5)
# function to destroy the window after a short delay
def destroy_window():
window.after(3000, window.destroy)
window.after(3000, destroy_window)
class DeletePage(tk.Toplevel):
def __init__(self, parent):
super().__init__(parent)
# screen_width = root.winfo_screenwidth()
# screen_height = root.winfo_screenheight()
# self.geometry(f"{screen_width//2}x{screen_height//2}+{screen_width//4}+{screen_height//4}")
self.title("Elimina Smart Contract")
self.name_label = tk.Label(self, text="Nome:")
self.name_label.pack()
self.name_entry = tk.Entry(self)
self.name_entry.pack()
frame = tk.Frame(self)
self.ok_button = tk.Button(frame, text="Elimina", command=self.ok_button_click)
self.ok_button.pack(side=tk.LEFT)
self.cancel_button = tk.Button(frame, text="Annulla", command=self.cancel_button_click)
self.cancel_button.pack(side=tk.LEFT)
frame.pack()
def ok_button_click(self):
name = self.name_entry.get()
write(web3_1, onChainSmartContract, "deleteContract", [name])
show_toast('Smart Contract eliminato correttamente', '')
self.destroy()
def cancel_button_click(self):
self.destroy()
root = tk.Tk()
app = Loader(root)
app.pack()
root.mainloop()
|
MassimilianoPiccinini/SoftwareSecurity-Blockchain
|
src/offchain.py
|
offchain.py
|
py
| 23,961 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19707336879
|
# # # -*- coding: utf-8 -*-
# # # @Time :2021/3/22 20:56
# # # @Author :huangzg28153
# # # @File :test.py
# # # @Software :PyCharm
# # import numpy as np
# # import pandas as pd
# # # type = [0,1,1,1,2,0,1,0,1,2,2,0]
# # # ser = [0,1,2,3,4,5,6,0,1,2,3,4]
# # # layer = [0,0,0,0,0,1,1,0,0,0,0,1]
# # # sample = [0,0,0,0,0,0,0,1,1,1,1,1]
# # #
# # # df = pd.DataFrame({"type":type,"ser":ser,"layer":layer,"sample":sample})
# # #
# # #
# # # df.sort_values(by=["ser",'type',"sample","layer"],axis=0)
# # # df.sort_values(by=["layer","sample","type","ser"],axis=0)
# # # df.sort_values(by=["type","layer","sample","ser"],axis=0)
# # # df['order'] = [0,2,4,5,6,9,11,1,3,7,8,10]
# # # df = df.sort_values(by=['order'],axis=0)
# # # df.sort_values(by=['layer','ser','type','sample'],axis=0)
# # # df.sort_values(by=["sample","type",'ser',"layer"],axis=0)
# # #
# # # ########################################################
# # # df.sort_values(by=['layer',"type","sample","ser"],axis=0).reset_index().index
# # # #######################################################
# # # from multiprocess import Process,Manager
# # # from pyHGT.data import Graph, renamed_load
# # # from pyHGT.data import renamed_load
# # # import os
# # # import ray
# # # Manager().register("Graph", Graph)
# # # dir(Manager())
# # # ABSULUTE_DIR = '/data1/huangzg/research/pyHGT_OAG'
# # # graph = renamed_load(open(os.path.join(ABSULUTE_DIR + '/data/oag_output', 'graph_CS.pk'), 'rb'))
# # # func = lambda graph,inp: print(graph.__dir__())
# # #
# # # # graph = Manager().Graph(graph)
# # # ray_graph = ray.put(graph)
# # ###########################
# #
# # import oaglog
# # from pyHGT.data import renamed_load
# # from pyHGT.model import *
# # from pyHGT.SubgraphToTorch import SubgraphToTorch
# # from warnings import filterwarnings
# # filterwarnings("ignore")
# # import ray
# # import os
# # import numpy as np
# # import dill
# # from collections import defaultdict
# # import sys
# # import argparse
# # oaglog.logger.info("流程开始。。。")
# # parser = argparse.ArgumentParser(description='Training GNN on Paper-Venue (Journal) classification task')
# #
# # '''
# # Dataset arguments
# # '''
# # parser.add_argument('--data_dir', type=str, default='/data/oag_output/',
# # help='The address of preprocessed graph.')
# # parser.add_argument('--subgraphs_dir',type=str,default='/data/sampled_subgraphs/',
# # help='The adress of sampled subgraph.')
# # parser.add_argument('--model_dir', type=str, default='/model_save/',
# # help='The address for storing the models and optimization results.')
# # parser.add_argument('--task_name', type=str, default='PV',
# # help='The name of the stored models and optimization results.')
# # parser.add_argument('--cuda', type=int, default=0,
# # help='Avaiable GPU ID')
# # parser.add_argument('--domain', type=str, default='_CS',
# # help='CS, Medicion or All: _CS or _Med or (empty)')
# # '''
# # Model arguments
# # '''
# # parser.add_argument('--conv_name', type=str, default='hgt',
# # choices=['hgt', 'gcn', 'gat', 'rgcn', 'han', 'hetgnn'],
# # help='The name of GNN filter. By default is Heterogeneous Graph Transformer (hgt)')
# # parser.add_argument('--n_hid', type=int, default=400,
# # help='Number of hidden dimension')
# # parser.add_argument('--n_heads', type=int, default=8,
# # help='Number of attention head')
# # parser.add_argument('--n_layers', type=int, default=4,
# # help='Number of GNN layers')
# # parser.add_argument('--dropout', type=float, default=0.2,
# # help='Dropout ratio')
# # parser.add_argument('--sample_depth', type=int, default=6,
# # help='How many numbers to sample the graph')
# # parser.add_argument('--sample_width', type=int, default=128,
# # help='How many nodes to be sampled per layer per type')
# # parser.add_argument('--feature_flags', type=tuple, default=('rw','sp'),
# # help='which kind of distance feature to use,"random walk","shortest path" or both')
# # parser.add_argument('--max_sprw', type=tuple, default=(4, 4),
# # help='parameters of distance feature')
# # parser.add_argument('--if_sample_mp',type=bool, default=True,
# # help="whether sample subgraph with multiprocessing or not")
# # parser.add_argument('--sample_n_pool',type=int,default=16,
# # help="how many pools to sample subgraph")
# # '''
# # Optimization arguments
# # '''
# # parser.add_argument('--optimizer', type=str, default='adamw',
# # choices=['adamw', 'adam', 'sgd', 'adagrad'],
# # help='optimizer to use.')
# # parser.add_argument('--data_percentage', type=float, default=1.0,
# # help='Percentage of training and validation data to use')
# # parser.add_argument('--n_epoch', type=int, default=200,
# # help='Number of epoch to run')
# # parser.add_argument('--n_pool', type=int, default=4,
# # help='Number of process to sample subgraph')
# # parser.add_argument('--n_batch', type=int, default=32,
# # help='Number of batch (sampled graphs) for each epoch')
# # parser.add_argument('--repeat', type=int, default=2,
# # help='How many time to train over a singe batch (reuse data)')
# # parser.add_argument('--batch_size', type=int, default=256,
# # help='Number of output nodes for training')
# # parser.add_argument('--clip', type=float, default=0.25,
# # help='Gradient Norm Clipping')
# #
# # args = parser.parse_args()
# #
# # if args.cuda != -1:
# # device = torch.device("cuda:" + str(args.cuda))
# # else:
# # device = torch.device("cpu")
# #
# # ABSULUTE_DIR = '/data1/huangzg/research/pyHGT_OAG'
# #
# # ###############################################data_preparing#########################################################
# # # oaglog.logger.info("读取数据开始。。。")
# # # graph = renamed_load(open(os.path.join(ABSULUTE_DIR + args.data_dir, 'graph%s.pk' % args.domain), 'rb'))
# # # oaglog.logger.info("读取数据完毕。")
# # #
# # # from ReadData import read_data, graph_to_dict
# # #
# # # dict_graph = graph_to_dict(graph)
# #
# # from multiprocess import Manager, Pool, SharedMemoryManager
# # # manager = Manager()
# # # graph_temp = manager.dict(dict_graph)
# #
# # graph = [np.ones(10**8) for i in range(20)]
# #
# # def mp_test(graph):
# # print(id(graph))
# # return 1
# #
# # p = Pool(6)
# #
# # result = p.apply_async(mp_test,graph_temp)
# # # @ray.remote
# # # class Counter(object):
# # # def __init__(self,a):
# # # self.n = 0
# # # self.a = a
# # # def increment(self):
# # # self.n += 1
# # #
# # # def read(self,b,m_graph):
# # # print("a")
# # # self.increment()
# # # print(id(m_graph))
# # # del m_graph
# # # return self.n * b
# # #
# # # counters = [Counter.remote(a=0) for i in range(8)]
# # # futures = [c.read.remote(2, ray_graph) for c in counters]
# # #
# # # print('******************************')
# # # print(ray.get(futures))
# #
# # ray.init()
# # @ray.remote
# # def func(array, param):
# # # print(array.job_id)
# # # print(array.task_id)
# # # print(array.size)
# # # print(type(array))
# # print(id(array))
# # return 1
# #
# # # array = np.ones(10**6)
# # # Store the array in the shared memory object store once
# # # so it is not copied multiple times.
# # # graph = {i: np.ones(10**8) for i in range(20)}
# # graph = [np.ones(10**8) for i in range(20)]
# # array_id = ray.put(graph)
# #
# # result_ids = [func.remote(array_id, i) for i in range(40)]
# # output = ray.get(result_ids)
# # #################################################################
# # #
# # # ray.get(ray_graph)
# # # import ray
# # # import asyncio
# # # ray.init()
# # #
# # # import asyncio
# # #
# # # @ray.remote
# # # class AsyncActor:
# # # async def run_task(self):
# # # print("started")
# # # await asyncio.sleep(1) # Network, I/O task here
# # # print("ended")
# # #
# # # actor = AsyncActor.remote()
# # # # All 50 tasks should start at once. After 1 second they should all finish.
# # # # they should finish at the same time
# # # ray.get([actor.run_task.remote() for _ in range(50)])
# # ###################################################################
# # # import ray
# # # import asyncio
# # # ray.init()
# # #
# # # @ray.remote(num_cpus=40)
# # # class AsyncActor:
# # # # multiple invocation of this method can be running in
# # # # the event loop at the same time
# # # async def run_concurrent(self):
# # # print("started")
# # # await asyncio.sleep(2) # concurrent workload here
# # # print("finished")
# # #
# # # actor = AsyncActor.remote()
# # #
# # # # regular ray.get
# # # ray.get([actor.run_concurrent.remote() for _ in range(80)])
# #
# # # # async ray.get
# # # await actor.run_concurrent.remote()
# #
# # ########################################################################
# from multiprocessing import Pool,Manager,shared_memory
# from multiprocessing.managers import SharedMemoryManager
# import numpy as np
#
#
# a = np.array([np.ones(10**8) for i in range(20)])
#
# shm = shared_memory.SharedMemory(create=True, size=a.nbytes)
# b = np.ndarray(a.shape, dtype=a.dtype, buffer=shm.buf)
# b[:] = a[:]
#
# def mp_test(graph):
# print(id(graph))
# return 1
#
# p = Pool(6)
# results = []
# for i in range(3):
# result = p.apply_async(mp_test, args=(b,))
# results.append(result)
#
# re = [job.get() for job in results]
# ############################################################################
from multiprocessing import Pool
import multiprocessing as mp
from collections import defaultdict
import pandas as pd
import os
class NewClass(object):
def __init__(self,
a):
self.a = a
self.b = {"a":a}
self.c = pd.DataFrame(self.b)
self.d = {"c":self.c, "b":self.b, "a":a}
def my_method(self,e):
print(id(self.a))
print(id(self.b))
print(id(self.c))
print(id(self.d))
print(id(e))
defaultdict(lambda :[])
return 1
graph = NewClass([1,3,6])
global graph
def my_fun(param,graph):
print(os.getpid(), id(graph))
return 1
def my_mp(param):
my_fun(param, graph)
if __name__ == '__main__':
p = Pool(5)
jobs = []
for i in range(mp.cpu_count()-1):
job = p.apply_async(my_mp, args=(['a','b'],))
jobs.append(job)
result = [job.get() for job in jobs]
print(result)
|
hzg0601/cn-annotation
|
HGT_OAG_cn-annotation/codes/test.py
|
test.py
|
py
| 10,935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72621252988
|
hargaBarang = int(input("Masukkan Harga Barang: "))
uang = int(input("Masukkan Uang Anda: "))
kembalian = uang - hargaBarang
pecahan_uang = [100000, 50000, 20000, 10000, 5000, 2000, 1000]
if hargaBarang > uang:
print("uang anda tidak cukup, dilarang utang disini!")
exit()
for pecahan in pecahan_uang:
jumlah_uang = kembalian // pecahan
kembalian %= pecahan
print(f"{jumlah_uang} uang sejumlah Rp.{pecahan} ")
#nah jumlah_uang itu adalah lembar uang yang dikembalikan nantinya
|
ArdiansyahAsrifah/LAB_AP_09
|
H071231016/Praktikum-3/nomor02.py
|
nomor02.py
|
py
| 502 |
python
|
id
|
code
| 0 |
github-code
|
6
|
18790221797
|
import torch
import decord
from decord import cpu, gpu
from tqdm import tqdm
import json
import os
import random
import numpy as np
import pickle
def sample_frames(num_frames, vlen, sample='rand', fix_start=None):
acc_samples = min(num_frames, vlen)
intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
ranges = []
for idx, interv in enumerate(intervals[:-1]):
ranges.append((interv, intervals[idx + 1] - 1))
if sample == 'rand':
frame_idxs = [random.choice(range(x[0], x[1])) for x in ranges]
elif fix_start is not None:
frame_idxs = [x[0] + fix_start for x in ranges]
elif sample == 'uniform':
frame_idxs = [(x[0] + x[1]) // 2 for x in ranges]
else:
raise NotImplementedError
return frame_idxs
video_list_train = json.load(open("video_train.json"))
video_list_val = json.load(open("video_val.json"))
video_list_test = json.load(open("video_test.json"))
for video_id in tqdm(video_list_train):
if os.path.exists(f"videos/{video_id}.mp4"):
video_path = f"videos/{video_id}.mp4"
else:
video_path = f"videos/{video_id}.mkv"
video_reader = decord.VideoReader(video_path, width=512, height=512)
decord.bridge.set_bridge('torch')
vlen=len(video_reader)
for i in range(1,10):
frame_idxs = sample_frames(3, vlen, sample="rand")
frames = video_reader.get_batch(frame_idxs).byte()
frames = frames.permute(0, 3, 1, 2)
frame_ids[f"{video_id}_{i}"]=frame_idxs
pickle.dump(frames,open(f"allinone_data/images/rand_new/{video_id}_{i}","wb"))
for video_id in tqdm(video_list_val+video_list_test):
if os.path.exists(f"videos/{video_id}.mp4"):
video_path = f"videos/{video_id}.mp4"
else:
video_path = f"videos/{video_id}.mkv"
video_reader = decord.VideoReader(video_path, width=512, height=512, num_threads=1)
decord.bridge.set_bridge('torch')
vlen=len(video_reader)
frame_idxs = sample_frames(3, vlen, sample="uniform")
frames = video_reader.get_batch(frame_idxs).byte()
frames = frames.permute(0, 3, 1, 2)
pickle.dump(frames,open(f"clipbert/images/uniform/{video_id}","wb"))
|
MILVLG/anetqa-code
|
clipbert/sample_imgs_clipbert.py
|
sample_imgs_clipbert.py
|
py
| 2,196 |
python
|
en
|
code
| 6 |
github-code
|
6
|
690172059
|
'''Refazendo Progressão Aritmética com While'''
print('=-='*20)
print('Progressão Aritmética')
print('=-='*20)
primeiro = int(input('Digite o primeiro termo da progressão: '))
razao = int(input('Digite a razão para a progressão: '))
termo = primeiro
cont = 1
total = 0
mais = 10
while mais != 0:
total = total + mais
while cont <= total:
print('{} → '.format(termo),end='')
termo += razao
cont += 1
print('Pausa')
mais = int(input('Quantos termos você quer mostrar a mais? '))
print('Progressão finalizada com {} termos mostrados.'.format(total))
|
thaisouza30/Exercicios-Python3-Curso-em-Video
|
ex062.py
|
ex062.py
|
py
| 617 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
39503714070
|
# The file is object for VICON. This is effective method for storing and
# accesing information about the vicon objects
import numpy as np
import os
from VICONMath import transformations as transferOp
from VICONFileOperations import rwOperations
class ObjectVicon:
"""The class is object is base class for VICONTrackingObjects and VICONCameraObjects"""
def __init__(self, rotation = [0, 0, 0, 1], translation = [0,0,0], name = "temp"):
"""
Initialize the class, the position orientation and name of object
:param rotation: list of roation parameters
:param translation: list of translation parameters
:param name: name of the object
"""
self.rotation = rotation # list of parameters
self.translation = translation # list of parameters
self.name = name
self.featureMat = np.zeros((3,1))
self.featurePoints = []
self.featureList = []
self.featureDict = {}
def clearFeatures(self):
"""
Clear all features
:return: True
"""
self.featureDict = {}
def setTransformationParameters(self, rotation, translation ):
self.rotation = rotation
self.translation = translation
def transferFeaturesToViconSpace(self) :
"""
Transfer the features from Object space to target space
:return: dict (3D features)
"""
assert (len(self.featureDict) != 0), "Dicitonary empty!! No object features to transfer"
featureList = list(self.featureDict.values())
targetPoints = transferOp.transformPoint3D(featureList, self.rotation, self.translation)
targetPoints = targetPoints.tolist()
transferedFeatureDict = {}
for feature,index in zip(self.featureDict,range(len(targetPoints)) ):
transferedFeatureDict[feature] = targetPoints[index]
return transferedFeatureDict
def transferFeaturesToObjectSpace(self, featureDictViconSpace):
"""
Transfer feature points from given coordinate system to object space
:return: dictionary of features
"""
assert (len(featureDictViconSpace) != 0), "Given featurelist is empty"
featureList = list(featureDictViconSpace.values())
targetPoints = transferOp.transformPoint3D(featureList, self.rotation, self.translation, True)
targetPoints = targetPoints.tolist()
transferedFeatureDict = {}
for feature, index in zip(featureDictViconSpace, range(len(targetPoints)) ):
transferedFeatureDict[feature] = targetPoints[index]
return transferedFeatureDict
def transferPointsToObjectSpace(self, pointList):
"""
Transfers the list points from given space to Object space using given R and T
:return:
"""
invertedPoints = transferOp.transformPoint3D(pointList, self.rotation , self.translation, True) # R_inv . (P-T)
return invertedPoints
def transferPointsFromObjectSpace(self, pointList):
"""
Transfers given list of points from object space to target space using given R and T
:return:
"""
targetPoints = transferOp.transformPoint3D(pointList,self.rotation,self.translation)
return targetPoints
def setFeatures(self, featuresDict):
"""
Set the given features in the feature list
:param set the features in the dict using given dict
:return: True/False
"""
for feature in featuresDict:
self.featureDict[feature] = featuresDict[feature]
return True
def removeSelectedFeatures(self, features):
"""
Remove the given features
:param featureDict: dict
:return: bool
"""
for feature in features:
if feature in self.featureDict:
del self.featureDict[feature]
return True
def filterFeaturesBasedOnObject(self, dict):
"""
Filter the feature dict to only have features from a specific object
:return: dict
"""
modifiedDict = {}
for feature in dict :
if self.name in feature:
modifiedDict[feature] = dict[feature]
return modifiedDict
def readFeaturePointsFromFile(self, path, objectFilter = True):
"""
Reads given *.mp (VICON) file to create feature points for Object
:param path: Storage path of the
:return: True if success
"""
if os.path.exists(path) and ".mp" in path:
featureDict = rwOperations.readFeaturePointsFromViconFile(path)
self.setFeatures(featureDict)
return True
elif os.path.exists(path) and ".txt" in path:
featureDict = rwOperations.readFeaturePointsFromTextFile(path)
if objectFilter:
objectSpecificDict = self.filterFeaturesBasedOnObject(featureDict)
self.setFeatures(objectSpecificDict)
return True
else:
raise ValueError("Given file does not exist ", path)
if __name__ == '__main__':
rotation = [0 , 0 , 0 , 1] # Identity
translation = [0 , 0 , 0 ] # Translation
# Test creating points from file
filePath = "D:/BirdTrackingProject/VICON_DataRead/VICONDrawingOperations\\9mm_02.mp"
objectName = "9mm_02"
viconObject = ObjectVicon(rotation, translation, objectName)
viconObject.readFeaturePointsFromFile(filePath)
viconObject.name = "point"
viconObject.readFeaturePointsFromFile("D:\BirdTrackingProject\VICON_DataRead\VICONFileOperations\\temp.txt")
print("Print features: ", viconObject.featureDict)
# Test creating points without file
pointlist = list(viconObject.featureDict.values())
rotationCam1 = [-0.79697172135980876, 0.009835241003934278, 0.056076936965809787, 0.60132746530298287]
translationCam1 = [-701.064504485933, -6173.2248621199, 1830.24808693825]
cam1Points = [[1415.76135949, - 570.12878565, 7724.00698054],
[1427.61481598, - 588.07764951, 7708.65018457],
[1433.03420332, - 579.43874105, 7728.08209005],
[1439.77318296, - 598.69765411, 7698.82965911]]
viconObject.setTransformationParameters(rotationCam1,translationCam1)
# transfer points to object space
transferedPoints = viconObject.transferPointsFromObjectSpace(cam1Points)
print("TF Points:", transferedPoints)
reversedPoints = viconObject.transferPointsToObjectSpace(transferedPoints)
print("Reversed points:", reversedPoints)
transferedDict = viconObject.transferFeaturesToViconSpace()
print("TF Points:", transferedDict)
reversedDict = viconObject.transferFeaturesToObjectSpace(transferedDict)
print("Reversed points:", reversedDict)
# Test adding feature and clear the features
p = {'1': [0, 0, 0]}
viconObject.setFeatures(p)
print("Print added features: ", viconObject.featureDict)
viconObject.removeSelectedFeatures(p)
print("Print removed features: ", viconObject.featureDict)
viconObject.clearFeatures()
print("Print features after clearance: ", viconObject.featureDict)
|
hmnaik/smartbarn-mocap
|
VICONSystem/objectVicon.py
|
objectVicon.py
|
py
| 7,219 |
python
|
en
|
code
| 1 |
github-code
|
6
|
14132002645
|
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime, Float, UniqueConstraint, Index
from sqlalchemy.orm import relationship
from src.models import Base
class GpsRecord(Base):
__tablename__ = "gps_record"
id = Column(Integer, primary_key=True, index=True)
datetime = Column(DateTime(timezone=True))
latitude = Column(Float())
longitude = Column(Float())
altitude = Column(Float(), nullable=True)
accuracy = Column(Float(), nullable=True)
vertical_accuracy = Column(Float(), nullable=True)
description = Column(String, nullable=True)
device = Column(String(length=128))
app = Column(String(length=128))
user = Column(String(length=32), default='castel')
distance = Column(Float())
Index('device_records', GpsRecord.datetime, GpsRecord.device, GpsRecord.app, GpsRecord.user, unique=True)
Index('desc_date_per_app', GpsRecord.datetime.desc(), GpsRecord.device, GpsRecord.app)
Index('per_app', GpsRecord.device, GpsRecord.app)
Index('desc_date_per_device', GpsRecord.datetime.desc(), GpsRecord.device)
Index('desc_date', GpsRecord.datetime.desc())
|
jmcastellote/whereabouts
|
src/gps_record/model.py
|
model.py
|
py
| 1,126 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15912701941
|
"""
Class to deal with the pooling problem (differing amounts of tweets for various days)
"""
import torch
from torch import nn, tensor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#device = torch.device("cpu")
# will we have a device setting here? to ensure that the data is being processed? (why is this bad practice)
class pooling(nn.Module):
"""A pooling class, so that the forward pass in this variation of the teanet model is lest complex
Also, perhaps it can be trained separately?
"""
def __init__(self, dim, lag):
super(pooling, self).__init__()
self.dim = dim
self.lag = lag
# multiple pooling layers? A feed forward neural network?
self.adaptive_pooling = nn.AdaptiveMaxPool2d((1, dim))
def forward(self, input):
batch_of_tweets = None
for x_val in input[0]:
processed_tweets = None
# iterate through the days in the x_value
for day in x_val:
processed = self.adaptive_pooling(day.view(1, day.shape[0], day.shape[1]))
if(processed_tweets == None):
processed_tweets = processed
else:
processed_tweets = torch.cat((processed_tweets, processed), 1)
if(batch_of_tweets == None):
batch_of_tweets = processed_tweets.view(1, self.lag, self.dim)
else:
batch_of_tweets = torch.cat((batch_of_tweets, processed_tweets.view(1, self.lag, self.dim)), 0)
return batch_of_tweets.to(device)
|
biirving/michinaga
|
src/utils/pooling.py
|
pooling.py
|
py
| 1,583 |
python
|
en
|
code
| 2 |
github-code
|
6
|
32898453966
|
from __future__ import print_function
import unittest
import numpy
import irbasis
from irbasis_util.two_point_basis import *
from irbasis_util.four_point import *
from irbasis_util.internal import *
from irbasis_util.regression import *
from atomic_limit import *
def G1_iw_pole_f(n, pole, beta):
return 1/(1J * (2 * n + 1) * numpy.pi / beta - pole)
def G1_iw_pole_b(n, pole, beta):
return 1/(1J * (2 * n) * numpy.pi / beta - pole)
def _compute_G4pt_iw(beta, pole, r, nvec):
if r == 0:
return G1_iw_pole_f(nvec[0], pole, beta) * G1_iw_pole_f(nvec[1], pole, beta) * G1_iw_pole_f(nvec[2], pole, beta)
elif r == 1:
return G1_iw_pole_f(nvec[0], pole, beta) * G1_iw_pole_f(nvec[1], pole, beta) * G1_iw_pole_f(nvec[3], pole, beta)
elif r == 4:
return G1_iw_pole_f(nvec[0], pole, beta) * G1_iw_pole_b(nvec[0] + nvec[1] + 1, pole, beta)\
* G1_iw_pole_f(-nvec[3] - 1, pole, beta)
elif r == 5:
return G1_iw_pole_f(nvec[0], pole, beta) * G1_iw_pole_b(nvec[0] + nvec[1] + 1, pole, beta) \
* G1_iw_pole_f(-nvec[2] - 1, pole, beta)
else:
raise RuntimeError("Not supported")
def _outer_product(Gl1, Gl2, Gl3):
tensor12 = Gl1[:, numpy.newaxis] * Gl2[numpy.newaxis, :]
return tensor12[:, :, numpy.newaxis] * Gl3[numpy.newaxis, numpy.newaxis, :]
def _compute_G4pt_l(b4pt, pole, r):
Nl = b4pt.Nl
Gl = numpy.zeros((16, Nl, Nl, Nl))
if r >= 0 and r <= 3:
Gl[r, :, :, :] = _outer_product(
Gl_pole(b4pt.basis_beta_f, pole)[:Nl],
Gl_pole(b4pt.basis_beta_f, pole)[:Nl],
Gl_pole(b4pt.basis_beta_f, pole)[:Nl])
elif r <= 15:
Gl[r, :, :, :] = _outer_product(
Gl_pole(b4pt.basis_beta_f, pole)[:Nl],
Gl_pole(b4pt.basis_beta_b, pole)[:Nl],
Gl_pole(b4pt.basis_beta_f, pole)[:Nl])
else:
raise RuntimeError("Not supported")
return Gl
class TestMethods(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMethods, self).__init__(*args, **kwargs)
def test_matsubara(self):
Lambda = 10.0
beta = 0.2
wmax = Lambda/beta
b4pt = FourPoint(Lambda, beta, 1e-12)
Nl = b4pt.Nl
pole = 0.2 * wmax
n1, n2, n3 = 1, 2, 4
n4 = - n1 - n2 - n3 - 2
prj = b4pt.projector_to_matsubara(n1, n2, n3, n4)
# Test No. 1, 2, 5, 6
for r in [0, 1, 4, 5]:
giw_4pt = _compute_G4pt_iw(beta, pole, r, numpy.array([n1, n2, n3, n4]))
gl_4pt = _compute_G4pt_l(b4pt, pole, r)
diff = numpy.abs(numpy.sum(prj * gl_4pt) - giw_4pt)
self.assertLess(diff, 1e-10)
def test_sampling_points_matsubara(self):
Lambda = 10.0
beta = 0.2
alpha = 1e-15
augmented = True
wmax = Lambda / beta
b4pt = FourPoint(Lambda, beta, 1e-2, augmented)
Nl = b4pt.Nl
whichl = Nl - 1
pole = 0.2 * wmax
# build the sampling frequency structure
sp = b4pt.sampling_points_matsubara(whichl)
S = b4pt.normalized_S()
n_sp = len(sp)
prj = numpy.array(b4pt.projector_to_matsubara_vec(sp))[:, :, :, :]
prj_mat = prj[:, :, :, :].reshape((n_sp, 16 * Nl**3))
print(Nl, prj_mat.shape)
# Build the check frequency structure
n1234_check = []
niw = 100
for i, j, k in product(range(-niw, niw, 10), repeat=3):
n1234_check.append((i, j, k, - i - j - k - 2))
prj_check = numpy.array(b4pt.projector_to_matsubara_vec(n1234_check))[:, :, :, :]
# Test No. 1, 2, 5, 6
for r in [0, 1, 4, 5]:
Giwn = numpy.array([ _compute_G4pt_iw(beta, pole, r, n1234) for n1234 in sp])
Giwn_check_ref = numpy.array([_compute_G4pt_iw(beta, pole, r, n1234) for n1234 in n1234_check])
coeffs = ridge_complex(prj_mat, Giwn, alpha).reshape((16, Nl, Nl, Nl))
Giwn_check = numpy.dot(prj_check.reshape((len(n1234_check), 16 * Nl**3)), (coeffs).reshape((16 * Nl**3)))
self.assertLessEqual(numpy.amax(numpy.abs(Giwn_check - Giwn_check_ref)), 1e-2)
def test_transformation_to_PH(self):
self.assertEqual(to_PH_convention(from_PH_convention( (0,1,2) )), (0,1,2))
if __name__ == '__main__':
unittest.main()
|
shinaoka/irbasis_utility
|
test/python/four_point_test.py
|
four_point_test.py
|
py
| 4,357 |
python
|
en
|
code
| 6 |
github-code
|
6
|
74884399227
|
# -*- coding: utf-8 -*-
import re
import sqlite3
from collections import defaultdict
import requests
import gnupg
class OTCDB(object):
gpg_file = 'GPG.db'
rating_file = 'RatingSystem.db'
def __init__(self, path):
self.path = path
self.trusted = {}
def open_db(self):
gpg_path = '{0}/{1}'.format(self.path, self.gpg_file)
self.gdb = sqlite3.connect(gpg_path, check_same_thread=False)
self.gdb.row_factory = sqlite3.Row
rating_path = '{0}/{1}'.format(self.path, self.rating_file)
self.rdb = sqlite3.connect(rating_path, check_same_thread=False)
self.rdb.row_factory = sqlite3.Row
def close_db(self):
self.gdb.close()
self.rdb.close()
def update_db(self):
g = self.update_rating_db()
r = self.update_gpg_db()
def update_rating_db(self):
filename = '{0}/{1}'.format(self.path, self.rating_file)
url = 'http://bitcoin-otc.com/otc/RatingSystem.db'
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r.iter_content(10*1024):
f.write(chunk)
return True
else:
return False
def update_gpg_db(self):
filename = '{0}/{1}'.format(self.path, self.gpg_file)
url = 'http://bitcoin-otc.com/otc/GPG.db'
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r.iter_content(10*1024):
f.write(chunk)
return True
else:
return False
def update_trust(self):
self.update_db()
self.open_db()
new_trust = self.assbot_trust()
self.close_db()
return new_trust
def trust_diff(self, old, new):
new_keys = set(new.keys())
old_keys = set(old.keys())
added_keys = list(new_keys - old_keys)
removed_keys = list(old_keys - new_keys)
# add metadata in removed list before we throw it away
removed_keys = [(rk, old[rk][0]) for rk in removed_keys]
return (added_keys, removed_keys)
def assbot_trust(self):
assbot_ratings = defaultdict(int)
trusted = {}
sel = """SELECT nick, rated_user_id, rater_user_id, rating FROM ratings
JOIN users ON ratings.rated_user_id = users.id
WHERE rater_user_id = 5506 OR rater_user_id IN (SELECT rated_user_id FROM ratings WHERE rater_user_id = 5506)
"""
cursor = self.rdb.cursor()
cursor.execute(sel)
results = cursor.fetchall()
for row in results:
add = 1 if row['rating'] > 0 else -1
assbot_ratings[ row['nick'] ] += add
selkey = 'SELECT fingerprint FROM users WHERE lower(nick) = ? AND fingerprint IS NOT NULL'
gcursor = self.gdb.cursor()
for nick in assbot_ratings:
if assbot_ratings[nick] > 0:
row = gcursor.execute(selkey, (nick.lower(),)).fetchone()
if row:
trusted[ row['fingerprint'] ] = (nick, assbot_ratings[nick])
return trusted
## make gpg pubring
class GPGManager(object):
def __init__(self, gpghome, keyserver=None):
self.gpghome = gpghome
self.keyserver = keyserver if keyserver else 'hkp://pool.sks-keyservers.net'
self.notfound = re.compile('key ([^ ]+) not found on keyserver')
self.gpg = gnupg.GPG(homedir=gpghome)
self.gpg.keyserver = self.keyserver
def recv_keys(self, fingerprints, batch=10):
not_found = []
imported = 0
for chunk in chunks(fingerprints, batch):
r = self.gpg.recv_keys(*chunk)
missing = self.notfound.findall(r.data)
for k in missing:
if k in chunk: not_found.append(k)
imported += r.counts['imported']
return (imported, not_found)
def verify(self, data):
return self.gpg.verify(data)
def delete_keys(self, fingerprints):
return self.gpg.delete_keys(fingerprints)
def chunks(l, n):
""" Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i+n]
|
extempore/deedbundler
|
deedbundler/otc.py
|
otc.py
|
py
| 3,668 |
python
|
en
|
code
| 8 |
github-code
|
6
|
31460291272
|
'''
颜色特征向量
||
(赋权颜色直方向量 拼接 赋权颜色矩向量)
// \\
归一化颜色直方向量 归一化颜色矩向量
// \\
颜色直方向量√ 颜色矩向量
这里获取颜色直方向量
'''
import tensorflow as tf
'''
下面涉及到的3600,只是假设有3600,图片张数可以变
'''
def getColorStraight(rawImagesList):
picsStraightR = [] # 3600张图片的颜色直方向量R分量 3600X86
picsStraightG = [] # 3600张图片的颜色直方向量G分量 3600X86
picsStraightB = [] # 3600张图片的颜色直方向量B分量 3600X86
colorStraightList = [] # 颜色直方向量3600X258,3600张图片,每张图片有258个颜色直方特征
picsStraightRGBList = [picsStraightR, picsStraightG, picsStraightB] # 为下面放picsStraightR/G/B值做准备
for i in range(3):
# 3600张图片一次性拿出R/G/B分量,按照专利,选取图片中心的112X112部分作为主要图片
majorImagesList = rawImagesList[:, 56:168, 56:168, i]
for j in range(86):
# tempListForRGB=[]#存储计算R/G/B各自分量时的数据,3600X86
if j == 0:
# 先统计有多少个0
totoalZero = 0
majorImagesListCmpWithZero = tf.math.equal(majorImagesList, 0) # 看看每个数是否为0,真的为T
out = tf.cast(majorImagesListCmpWithZero, dtype=tf.float32) # True的时候会变为1,False的时候会变为0
# 统计0的个数
'''
下面两行代码意思: 上面的out是一个shape为(3600,112,112)3600张图片,宽高112X112,
而一张图片的shape为(1,112,112)所以要单独统计3600张图片中 的0的个数
使得最后的totoalZero 的shape为(3600,),3600个数,每个数代表每张图片0的个数
再将其添加到tempListForRGB
而最后picsStraightR/G/B 先得到的是 86X3600的矩阵,可以之后再转置
'''
totoalZero = tf.reduce_sum(out, axis = 1)
totoalZero = tf.reduce_sum(totoalZero, axis = 1)
picsStraightRGBList[i].append(totoalZero) # 将0的个数放到列表的第一个位置
else: # 统计非0的个数
'''
根据专利,将0-255划分为85个区间,也就是每3个数为一个区间,在端点处的值归到其左侧的区间
每个数都必须 j<x<=j+2 例如 1<x<=3
但是当j为1的时候,由于1的左边是0,没有左侧区间所以当J为1的时候
将1统计到其右侧区间
'''
if j == 1:
# 当j是1的时候将其放到右侧区间也就是[1-3]这个区间
majorImagesListGT = tf.math.greater_equal(majorImagesList, (j * 3) - 2)
else:
# J不是1的时候取开区间
majorImagesListGT = tf.math.greater(majorImagesList, (j * 3) - 2)
# 由于要将端点值放到其左侧区间,也就是说 [1-3],[4,6],当端点值为4的时候,它是归到[1-3]区间的以此类推
majorImagesListLE = tf.math.less(majorImagesList, (j * 3) + 1)
# 还没找到能够一次性提取语句 比如 x>i&& x<=i*3 这种提取方法现在想到的是将他们各自求出来然后再找共同部分
# 经过比较之后的majorImagesListGT,majorImagesListLE,里面的元素是bool类型,只需要两部分均为ture
majorImagesGTAndLE = tf.math.equal(majorImagesListGT, majorImagesListLE)
# 转为float
out = tf.cast(majorImagesGTAndLE, dtype=tf.float32)
# 统计 j-2 < x <=j*3+1 的个数
sumMajorImagesGTAndLE = tf.reduce_sum(out, axis=1)
sumMajorImagesGTAndLE = tf.reduce_sum(sumMajorImagesGTAndLE, axis=1)
picsStraightRGBList[i].append(sumMajorImagesGTAndLE) # 将统计到的数据放进去
picsStraightR = tf.convert_to_tensor(picsStraightRGBList[0])#得到 86X3600
picsStraightR = tf.transpose(picsStraightR)#转置变为3600x86
picsStraightG = tf.convert_to_tensor(picsStraightRGBList[1])
picsStraightG = tf.transpose(picsStraightG)
picsStraightB = tf.convert_to_tensor(picsStraightRGBList[2])
picsStraightB = tf.transpose(picsStraightB)
#3个3600x86拼接起来得到3600X258这就是颜色直方向量
colorStraightList = tf.concat([picsStraightR,picsStraightG,picsStraightB],axis=1)
return colorStraightList
|
heqisen199966/pythonProject2
|
work2/colorStraight.py
|
colorStraight.py
|
py
| 5,049 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
3035513185
|
# given two strings A and B, write a function to return a list of
# all start indices within A where the substring of A is an
# anagram of B. for example, if A = "abcdcbac" and B = "abc" then
# you want to return [0,4,5] since those are the starting indices
# of substrings of A that are anagrams of B
A = 'abcdcbac'
B = 'abc'
B_hash = {}
for n in B:
B_hash.update({n:True})
indices = []
for i,m in enumerate(A):
if i<=(len(A)-len(B)):
curr_window = A[i:i+len(B)]
A_count = {}
for n in curr_window:
if B_hash.get(n)==True:
if A_count.get(n)==None:
A_count.update({n:1})
else:
A_count.update({n:A_count[n]+1})
else:
A_count = {}
if len(A_count)==3:
indices.append(i)
print(indices)
|
estimatrixPipiatrix/decision-scientist
|
key_algos/anagram_substring.py
|
anagram_substring.py
|
py
| 838 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17286729620
|
import cv2
import imutils
import numpy as np
cv2.namedWindow("MyImage")
img = cv2.imread("img.jpg")
# translated = imutils.translate(img, 25, -75)
# rotated = imutils.rotate(img, 45)
img = imutils.resize(img, width=600)
# url_images = imutils.url_to_image(
# "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_92x30dp.png")
kernel = np.ones((4, 4))
# conv = cv2.filter2D(img, cv2.CV_16U, kernel)
blur = cv2.GaussianBlur(img, (5, 5), 1)
cv2.imshow("MyImage", blur)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
osoulim/Computer-Vision
|
Python/Week3/pre_proccess.py
|
pre_proccess.py
|
py
| 532 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17573160892
|
from flask import Flask, render_template, request, redirect, session, flash
from mysqlconnection import connectToMySQL
import json
app = Flask(__name__)
@app.route("/")
def index():
mysql = connectToMySQL("leads_and_clients_db")
query = "SELECT concat(clients.first_name, ' ', clients.last_name) as name, count(leads.client_id) as leads FROM clients JOIN leads ON leads.client_id = clients.id GROUP BY leads.client_id"
all_leads = mysql.query_db(query)
print(all_leads)
total_leads = 0
for each in all_leads:
total_leads += int(each['leads'])
print("*************************total number of leads**********************",total_leads)
percentage = int(all_leads[0]["leads"])/total_leads
print(round(percentage, 3))
return render_template("index.html", template_all_leads = all_leads, template_total_leads = total_leads, json_leads = map(json.dumps, all_leads))
if __name__ == "__main__":
app.run(debug = True)
|
aaronfennig/pythonDjango
|
flask/flask_mysql/leads_and_clients/server.py
|
server.py
|
py
| 963 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40510581064
|
#Avem hardcodat punctele prin care algoritmul detecteaza forma obiectului
myVar = {
'1': {'Points': 3, 'Form': 'Triunghi'},
'2': {'Points': 4, 'Form': ['Patrat', 'Dreptunghi']},
'3': {'Points': 5, 'Form': 'Pentagon'},
'4': {'Points': 6, 'Form': 'Hexagon'},
'5': {'Points': 7, 'Form': 'Heptagon'},
'6': {'Points': 8, 'Form': 'Cerc'},
'7': {'Points': 12, 'Form': 'Patrat'},
'8': {'Points': 10, 'Form': 'Stea'}
}
#Declaram o functie care returneaza numele obiectului
def getNameOfShape(pointNumber, w, h):
for var in myVar:
#Se verifica daca variabila pointNumber este egala cu 4
if pointNumber == myVar[var]['Points'] and pointNumber == 4:
#In cazul in care rezultatul if-ului este unul pozitiv se va verifica daca obiectul este un patrat sau un dreptunghi
if ((float(w)/h) >= 0.95 and (float(w)/h) <= 1.05):
return myVar[var]['Form'][0]
else:
return myVar[var]['Form'][1]
#In cazul in care pointNumber-ul este diferit de 4 dar este egal cu alta forma din array, se ve returna valoarea formei
elif pointNumber == myVar[var]['Points']:
return myVar[var]['Form']
return 0
|
ConstantinescuAndrei/ShapeDetection
|
shapeList.py
|
shapeList.py
|
py
| 1,233 |
python
|
ro
|
code
| 0 |
github-code
|
6
|
14852199031
|
# coding: utf-8
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion, label_smoothed_nll_loss
def root_mean_squared_difference(v1, v2, clip_max=0.3):
diff = v1 - v2
# rmsd = diff
mean_squared = torch.mean(torch.mul(diff, diff), -1)
rmsd = torch.sqrt(mean_squared + 1e-9) # To avoid NaN caused by sqrt(0).
if clip_max > 0:
rmsd = torch.clamp(rmsd, 0, clip_max)
return rmsd
@register_criterion('tcvae_spacefusion_loss')
class TCVAESpaceFusionCriterion(LabelSmoothedCrossEntropyCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.eps = args.label_smoothing
self.bow_loss_weight = args.bow_loss_weight
self.interp_loss_weight = args.interp_loss_weight
self.fuse_loss_weight = args.fuse_loss_weight
self.euclidean_distance_clip = args.euclidean_distance_clip
if args.bow_loss_weight > 0:
tgt_dict = task.tgt_dict
class_weight = torch.ones(len(tgt_dict))
class_weight[tgt_dict.pad_index] = 0
class_weight[tgt_dict.eos_index] = 0
self.bow_loss_fc = torch.nn.CrossEntropyLoss(
weight=class_weight, ignore_index=tgt_dict.pad_index)
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--interp-loss-weight',
default=1, type=float, help='alpha')
parser.add_argument('--fuse-loss-weight',
default=30, type=float, help='beta')
parser.add_argument('--euclidean-distance-clip',
default=0.3, type=float)
def compute_ce_loss(self, model, net_output, sample, reduce=True):
nto = net_output[0]
lprobs = model.get_normalized_probs(net_output, log_probs=True)
target = model.get_targets(sample, net_output)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = target.reshape(-1, 1)
loss, nll_loss, = label_smoothed_nll_loss(
lprobs, target, self.eps, ignore_index=self.padding_idx,
reduce=reduce
)
return loss, nll_loss
def compute_fuse_loss(self, encoder_out, reduce=True):
'''
*NOTE*
The fuse_loss is not divided by the batch size to make the scale equal to other losses.
The reduction method used in Fairseq is summation over examples in a batch and the averaged fuse_loss over batch is computed only in aggregate_logging_outputs().
'''
prior_out = encoder_out['prior_out']
post_out = encoder_out['post_out']
batch_size = prior_out.shape[0]
# Make z_s2s[i] and z_AE[i] close.
fuse1 = root_mean_squared_difference(
prior_out, post_out,
clip_max=self.euclidean_distance_clip
)
# Make z_s2s[i] and z_s2s[j] distant.
fuse2 = torch.sum(root_mean_squared_difference(
prior_out.unsqueeze(1),
prior_out.unsqueeze(0),
clip_max=self.euclidean_distance_clip
), -1) / (batch_size - 1)
# Make z_AE[i] and z_AE[j] distant.
fuse3 = torch.sum(root_mean_squared_difference(
post_out.unsqueeze(1),
post_out.unsqueeze(0),
clip_max=self.euclidean_distance_clip
), -1) / (batch_size - 1)
fuse_loss = fuse1 - (fuse2 + fuse3)
if reduce is True:
fuse_loss = fuse_loss.sum()
return fuse_loss
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
encoder_out = model.encoder(**sample['net_input'])
prev_output_tokens = sample['net_input']['prev_output_tokens']
# z_x -> y
prior_decoder_out = model.decoder(prev_output_tokens,
encoder_out=encoder_out,
latent_variable_type='prior_out')
# z_y -> y
post_decoder_out = model.decoder(prev_output_tokens,
encoder_out=encoder_out,
latent_variable_type='post_out')
# u*z_x + (1-u)*z_y -> y
interp_decoder_out = model.decoder(prev_output_tokens,
encoder_out=encoder_out,
latent_variable_type='interp_out')
prior_loss, prior_nll_loss = self.compute_ce_loss(model, prior_decoder_out, sample, reduce=reduce)
post_loss, post_nll_loss = self.compute_ce_loss(model, post_decoder_out, sample, reduce=reduce)
interp_loss, interp_nll_loss = self.compute_ce_loss(model, interp_decoder_out, sample, reduce=reduce)
# d(x_i, y_i) - d(x_i, x_j) - d(y_i, y_j)
fuse_loss = self.compute_fuse_loss(encoder_out, reduce=reduce)
# As T-CVAE optimizes cross-entropy and KLD, cross-entropy loss should be computed at sentence level but not at token level to make the scale of the losses compatible.
assert self.args.sentence_avg == True
if self.args.sentence_avg:
# When args.sentence_avg == True, all losses directly used for optimization are the sum of losses computed at sentence level. This is for a case where other loss is added to the cross-entropy.
sample_size = sample['target'].size(0)
ntokens_per_sent = sample['ntokens'] / sample['target'].size(0)
# The losses are divided by the avg. length of the outputs to make the scales of NLL_loss and other losses equal. They are computed at sentence level.
prior_loss /= ntokens_per_sent
prior_nll_loss /= ntokens_per_sent
post_loss /= ntokens_per_sent
post_nll_loss /= ntokens_per_sent
interp_loss /= ntokens_per_sent
interp_nll_loss /= ntokens_per_sent
else:
sample_size = sample['ntokens']
loss = prior_loss + post_loss + self.interp_loss_weight * interp_loss + self.fuse_loss_weight * fuse_loss
# sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
prior_mean_norm = torch.sum(torch.norm(encoder_out['prior_mean'], dim=-1))
prior_std_norm = torch.sum(torch.norm(encoder_out['prior_std'], dim=-1))
post_mean_norm = torch.sum(torch.norm(encoder_out['post_mean'], dim=-1))
post_std_norm = torch.sum(torch.norm(encoder_out['post_std'], dim=-1))
loss_log = utils.item(loss.data) if reduce else loss.data
logging_output = {
'loss': loss_log,
'nll_loss': utils.item(post_nll_loss.data) if reduce else post_nll_loss.data,
'prior_nll_loss': utils.item(prior_nll_loss.data) if reduce else prior_nll_loss.data,
'post_nll_loss': utils.item(post_nll_loss.data) if reduce else post_nll_loss.data,
'interp_nll_loss': utils.item(interp_nll_loss.data) if reduce else interp_nll_loss.data,
'fuse_loss': utils.item(fuse_loss.data) if reduce else fuse_loss,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
logging_output_latent = {
'prior_mean_norm': utils.item(prior_mean_norm.data) if reduce else prior_mean_norm.data,
'prior_std_norm': utils.item(prior_std_norm.data) if reduce else prior_std_norm.data,
'post_mean_norm': utils.item(post_mean_norm.data) if reduce else post_mean_norm.data,
'post_std_norm': utils.item(post_std_norm.data) if reduce else post_std_norm.data,
}
logging_output.update(logging_output_latent)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
aggregated = {
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2) if sample_size > 0 else 0.,
'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / sample_size / math.log(2) if sample_size > 0 else 0.,
'prior_nll_loss': sum(log.get('prior_nll_loss', 0) for log in logging_outputs) / sample_size / math.log(2) if sample_size > 0 else 0.,
'post_nll_loss': sum(log.get('post_nll_loss', 0) for log in logging_outputs) / sample_size / math.log(2) if sample_size > 0 else 0.,
'interp_nll_loss': sum(log.get('interp_nll_loss', 0) for log in logging_outputs) / sample_size / math.log(2) if sample_size > 0 else 0.,
'fuse_loss': sum(log.get('fuse_loss', 0) for log in logging_outputs) / nsentences if nsentences > 0 else 0.,
'prior_mu': sum(log.get('prior_mean_norm', 0) for log in logging_outputs) / nsentences if nsentences > 0 else 0.,
'post_mu': sum(log.get('post_mean_norm', 0) for log in logging_outputs) / nsentences if nsentences > 0 else 0.,
'prior_std': sum(log.get('prior_std_norm', 0) for log in logging_outputs) / nsentences if nsentences > 0 else 0.,
'post_std': sum(log.get('post_std_norm', 0) for log in logging_outputs) / nsentences if nsentences > 0 else 0.,
}
return aggregated
|
jack-and-rozz/speculative_sampling
|
fairseq/extensions/criterions/spacefusion_loss.py
|
spacefusion_loss.py
|
py
| 10,265 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15067404413
|
from django.shortcuts import render, redirect
from .forms import Registration, Login
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from .models import *
from .custom_manager import CustomUserManager
from django.contrib import messages
import uuid
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.auth.decorators import login_required
# Create your views here.
def registration(request):
if request.user.is_authenticated:
return redirect('home')
form = Registration()
if request.method == "POST":
email = request.POST.get('email')
if CustomUser.objects.filter(email=email).first():
messages.success(request, 'Email is taken.')
return redirect('registration')
try:
auth_token = str(uuid.uuid4())
form = Registration(request.POST)
if form.is_valid():
subject = 'Your accounts need to be verified'
message = f'Welcome to Online Tiffin Service. Thanks for registering on our website. Follow this link to verify your account http://localhost:8000/user/verify/{auth_token}'
email_from = settings.EMAIL_HOST_USER
recipient_list = [email]
res = send_mail(subject, message, email_from, recipient_list)
new_form = form.save(commit=False)
new_form.auth_token = auth_token
new_form.save()
# messages.success(request, 'Registration Successful.')
return redirect('token_send')
except Exception as e:
print(e)
return render(request, 'customuser/registration.html', {'form': form})
def login_view(request):
if request.user.is_authenticated:
return redirect('home')
if request.method == "POST":
form = Login(request.POST)
if request.POST['email'] and request.POST['password']:
email = request.POST['email']
try:
verify_user = CustomUser.objects.get(email=email)
except:
messages.error(request, "User with this email address doesn't exits.")
return redirect('login')
if not verify_user.is_verified:
messages.error(
request, 'Profile is not verified check your mail.')
return redirect('token_send')
if form.is_valid():
password = request.POST['password']
user = authenticate(email=email, password=password)
if user:
login(request, user)
messages.success(request, 'Login Successfully.')
print("Login Successfully.")
return redirect('home')
else:
messages.error(request, 'Invalid username and password.')
return redirect('login')
else:
messages.error(request, 'Fill both field')
return render(request, 'customuser/login.html', {'form': form})
else:
form = Login()
return render(request, 'customuser/login.html', {'form': form})
def userlogout(request):
if request.user.is_authenticated:
logout(request)
messages.success(request, 'Logout Successfully.')
return redirect('home')
return redirect('login')
def success(request):
return render(request, 'customuser/success.html')
def token_send(request):
return render(request, 'customuser/token_send.html')
def verify(request, auth_token):
try:
profile_obj = CustomUser.objects.filter(auth_token=auth_token).first()
if profile_obj:
if profile_obj.is_verified:
messages.success(request, 'Your account is already verified.')
return redirect('login')
profile_obj.is_verified = True
profile_obj.save()
messages.success(request, 'Your account has been verified.')
return redirect('login')
else:
return redirect('/error')
except Exception as e:
print(e)
return redirect('/')
def error_page(request):
return render(request, 'error.html')
|
leenabadgujar/Online_Tiffin_Service
|
CustomUser/views.py
|
views.py
|
py
| 4,317 |
python
|
en
|
code
| 0 |
github-code
|
6
|
86572452967
|
#!/usr/bin/env python3
"""
Check for and replace aliases with their new names from vk.xml
"""
import argparse
import pathlib
import subprocess
import sys
import xml.etree.ElementTree as et
THIS_FILE = pathlib.Path(__file__)
CWD = pathlib.Path.cwd()
VK_XML = THIS_FILE.parent / 'vk.xml'
EXCLUDE_PATHS = [
VK_XML.relative_to(CWD).as_posix(),
# These files come from other repos, there's no point checking and
# fixing them here as that would be overwritten in the next sync.
'src/amd/vulkan/radix_sort/',
'src/virtio/venus-protocol/',
]
def get_aliases(xml_file: pathlib.Path):
"""
Get all the aliases defined in vk.xml
"""
xml = et.parse(xml_file)
for node in ([]
+ xml.findall('.//enum[@alias]')
+ xml.findall('.//type[@alias]')
+ xml.findall('.//command[@alias]')
):
# Some renames only apply to some APIs
if 'api' in node.attrib and 'vulkan' not in node.attrib['api'].split(','):
continue
yield node.attrib['name'], node.attrib['alias']
def remove_prefix(string: str, prefix: str):
"""
Remove prefix if string starts with it, and return the full string
otherwise.
"""
if not string.startswith(prefix):
return string
return string[len(prefix):]
# Function from https://stackoverflow.com/a/312464
def chunks(lst: list, n: int):
"""
Yield successive n-sized chunks from lst.
"""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def main(paths: list[str]):
"""
Entrypoint; perform the search for all the aliases and replace them.
"""
def prepare_identifier(identifier: str) -> str:
prefixes_seen = []
for prefix in [
# Various macros prepend these, so they will not appear in the code using them.
# List generated using this command:
# $ prefixes=$(git grep -woiE 'VK_\w+_' -- src/ ':!src/vulkan/registry/' | cut -d: -f2 | sort -u)
# $ for prefix in $prefixes; do grep -q $prefix src/vulkan/registry/vk.xml && echo "'$prefix',"; done
# (the second part eliminates prefixes used only in mesa code and not upstream)
'VK_BLEND_FACTOR_',
'VK_BLEND_OP_',
'VK_BORDER_COLOR_',
'VK_COMMAND_BUFFER_RESET_',
'VK_COMMAND_POOL_RESET_',
'VK_COMPARE_OP_',
'VK_COMPONENT_SWIZZLE_',
'VK_DESCRIPTOR_TYPE_',
'VK_DRIVER_ID_',
'VK_DYNAMIC_STATE_',
'VK_FORMAT_',
'VK_IMAGE_ASPECT_MEMORY_PLANE_',
'VK_IMAGE_ASPECT_PLANE_',
'VK_IMAGE_USAGE_',
'VK_NV_',
'VK_PERFORMANCE_COUNTER_UNIT_',
'VK_PIPELINE_BIND_POINT_',
'VK_SAMPLER_ADDRESS_MODE_',
'VK_SHADER_STAGE_TESSELLATION_',
'VK_SHADER_STAGE_',
'VK_STENCIL_OP_',
'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_',
'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_',
'VK_STRUCTURE_TYPE_',
'VK_USE_PLATFORM_',
'VK_VERSION_',
# Many places use the identifier without the `vk` prefix
# (eg. with the driver name as a prefix instead)
'VK_',
'Vk',
'vk',
]:
# The order matters! A shorter substring will match before a longer
# one, hiding its matches.
for prefix_seen in prefixes_seen:
assert not prefix.startswith(prefix_seen), f'{prefix_seen} must come before {prefix}'
prefixes_seen.append(prefix)
identifier = remove_prefix(identifier, prefix)
return identifier
aliases = {}
for old_name, alias_for in get_aliases(VK_XML):
old_name = prepare_identifier(old_name)
alias_for = prepare_identifier(alias_for)
aliases[old_name] = alias_for
print(f'Found {len(aliases)} aliases in {VK_XML.name}')
# Some aliases have aliases
recursion_needs_checking = True
while recursion_needs_checking:
recursion_needs_checking = False
for old, new in aliases.items():
if new in aliases:
aliases[old] = aliases[new]
recursion_needs_checking = True
# Doing the whole search in a single command breaks grep, so only
# look for 500 aliases at a time. Searching them one at a time would
# be extremely slow.
files_with_aliases = set()
for aliases_chunk in chunks([*aliases], 500):
grep_cmd = [
'git',
'grep',
'-rlP',
'|'.join(aliases_chunk),
] + paths
search_output = subprocess.run(
grep_cmd,
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
).stdout.decode()
files_with_aliases.update(search_output.splitlines())
def file_matches_path(file: str, path: str) -> bool:
# if path is a folder; match any file within
if path.endswith('/') and file.startswith(path):
return True
return file == path
for excluded_path in EXCLUDE_PATHS:
files_with_aliases = {
file for file in files_with_aliases
if not file_matches_path(file, excluded_path)
}
if not files_with_aliases:
print('No alias found in any file.')
sys.exit(0)
print(f'{len(files_with_aliases)} files contain aliases:')
print('\n'.join(f'- {file}' for file in sorted(files_with_aliases)))
command = [
'sed',
'-i',
";".join([f's/{old}/{new}/g' for old, new in aliases.items()]),
]
command += files_with_aliases
subprocess.check_call(command, stderr=subprocess.DEVNULL)
print('All aliases have been replaced')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('paths',
nargs=argparse.ZERO_OR_MORE,
default=['src/'],
help='Limit script to these paths (default: `src/`)')
args = parser.parse_args()
main(**vars(args))
|
sailfishos-mirror/mesa
|
src/vulkan/registry/update-aliases.py
|
update-aliases.py
|
py
| 6,150 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72331237629
|
import datacube
from datacube.api import *
# basic stuff.
from collections import defaultdict
import time
from datetime import datetime
import json
# dc data comes out as xray arrays
import xarray as xr
import xarray.ufuncs
# gdal related stuff.
import gdal
from gdalconst import *
# np for arrays
import numpy as np
# Author: AHDS
# Creation date: 2016-06-23
# Modified by:
# Last modified date: 2016-08-05
class DataAccessApi:
"""
Class that provides wrapper functionality for the DataCube.
"""
dc = None
api = None
# defaults for all the required fields.
product_default = 'ls7_ledaps'
platform_default = 'LANDSAT_7'
def __init__(self):
# using both the datacube object and the api.
# dc is useful for all data access, api is only really used for metadata
# fetching.
# hardcoded config location. could parameterize.
self.dc = datacube.Datacube(config='/home/localuser/Datacube/data_cube_ui/config/.datacube.conf')
#self.dc = datacube.Datacube()
self.api = datacube.api.API(datacube=self.dc)
"""
query params are defined in datacube.api.query
"""
def get_dataset_by_extent(self, product, product_type=None, platform=None, time=None,
longitude=None, latitude=None, measurements=None, output_crs=None, resolution=None):
"""
Gets and returns data based on lat/long bounding box inputs.
All params are optional. Leaving one out will just query the dc without it, (eg leaving out
lat/lng but giving product returns dataset containing entire product.)
Args:
product (string): The name of the product associated with the desired dataset.
product_type (string): The type of product associated with the desired dataset.
platform (string): The platform associated with the desired dataset.
time (tuple): A tuple consisting of the start time and end time for the dataset.
longitude (tuple): A tuple of floats specifying the min,max longitude bounds.
latitude (tuple): A tuple of floats specifying the min,max latitutde bounds.
measurements (list): A list of strings that represents all measurements.
output_crs (string): Determines reprojection of the data before its returned
resolution (tuple): A tuple of min,max ints to determine the resolution of the data.
Returns:
data (xarray): dataset with the desired data.
"""
# there is probably a better way to do this but I'm not aware of it.
query = {}
if product_type is not None:
query['product_type'] = product_type
if platform is not None:
query['platform'] = platform
if time is not None:
query['time'] = time
if longitude is not None and latitude is not None:
query['longitude'] = longitude
query['latitude'] = latitude
data = self.dc.load(product=product, measurements=measurements,
output_crs=output_crs, resolution=resolution, **query)
# data = self.dc.load(product=product, product_type=product_type, platform=platform, time=time, longitude=longitude,
# latitude=latitude, measurements=measurements, output_crs=output_crs,
# resolution=resolution)
return data
def get_dataset_tiles(self, product, product_type=None, platform=None, time=None,
longitude=None, latitude=None, measurements=None, output_crs=None, resolution=None):
"""
Gets and returns data based on lat/long bounding box inputs.
All params are optional. Leaving one out will just query the dc without it, (eg leaving out
lat/lng but giving product returns dataset containing entire product.)
Args:
product (string): The name of the product associated with the desired dataset.
product_type (string): The type of product associated with the desired dataset.
platform (string): The platform associated with the desired dataset.
time (tuple): A tuple consisting of the start time and end time for the dataset.
longitude (tuple): A tuple of floats specifying the min,max longitude bounds.
latitude (tuple): A tuple of floats specifying the min,max latitutde bounds.
measurements (list): A list of strings that represents all measurements.
output_crs (string): Determines reprojection of the data before its returned
resolution (tuple): A tuple of min,max ints to determine the resolution of the data.
Returns:
data (xarray): dataset with the desired data in tiled sections.
"""
# there is probably a better way to do this but I'm not aware of it.
query = {}
if product_type is not None:
query['product_type'] = product_type
if platform is not None:
query['platform'] = platform
if time is not None:
query['time'] = time
if longitude is not None and latitude is not None:
query['longitude'] = longitude
query['latitude'] = latitude
#set up the grid workflow
gw = GridWorkflow(self.dc.index, product=product)
#dict of tiles.
request_tiles = gw.list_cells(product=product, measurements=measurements,
output_crs=output_crs, resolution=resolution, **query)
"""
tile_def = defaultdict(dict)
for cell, tiles in request_tiles.items():
for time, tile in tiles.items():
tile_def[cell, time]['request'] = tile
keys = list(tile_def)
data_tiles = {}
for key in keys:
tile = tile_def[key]['request']
data_tiles[key[0]] = gw.load(key[0], tile)
"""
#cells now return stacked xarrays of data.
data_tiles = {}
for tile_key in request_tiles:
tile = request_tiles[tile_key]
data_tiles[tile_key] = gw.load(tile, measurements=measurements)
return data_tiles
def get_scene_metadata(self, platform, product, longitude=None, latitude=None, crs=None, time=None):
"""
Gets a descriptor based on a request.
Args:
platform (string): Platform for which data is requested
product_type (string): Product type for which data is requested
longitude (tuple): Tuple of min,max floats for longitude
latitude (tuple): Tuple of min,max floats for latitutde
crs (string): Describes the coordinate system of params lat and long
time (tuple): Tuple of start and end datetimes for requested data
Returns:
scene_metadata (dict): Dictionary containing a variety of data that can later be
accessed.
"""
descriptor_request = {}
if platform is not None:
descriptor_request['platform'] = platform
if longitude is not None and latitude is not None:
dimensions = {}
longitude_dict = {}
latitude_dict = {}
time_dict = {}
longitude_dict['range'] = longitude
latitude_dict['range'] = latitude
if crs is not None:
longitude_dict['crs'] = crs
latitude_dict['crs'] = crs
dimensions['longitude'] = longitude_dict
dimensions['latitude'] = latitude_dict
if time is not None:
time_dict['range'] = time
dimensions['time'] = time_dict
descriptor_request['dimensions'] = dimensions
descriptor = self.api.get_descriptor(descriptor_request=descriptor_request)
scene_metadata = {}
if product in descriptor and len(descriptor[product]['result_min']) > 2:
scene_metadata['lat_extents'] = (descriptor[product]['result_min'][1], descriptor[product]['result_max'][1])
scene_metadata['lon_extents'] = (descriptor[product]['result_min'][2], descriptor[product]['result_max'][2])
scene_metadata['time_extents'] = (descriptor[product]['result_min'][0], descriptor[product]['result_max'][0])
scene_metadata['tile_count'] = len(descriptor[product]['storage_units'])
scene_metadata['scene_count'] = descriptor[product]['result_shape'][0]
scene_metadata['pixel_count'] = descriptor[product]['result_shape'][1] * descriptor[product]['result_shape'][2]
scene_metadata['storage_units'] = descriptor[product]['storage_units']
else:
scene_metadata = {'lat_extents': (0,0), 'lon_extents': (0,0), 'time_extents': (0,0), 'tile_count': 0, 'scene_count': 0, 'pixel_count': 0, 'storage_units': {}}
return scene_metadata
def list_acquisition_dates(self, platform, product, longitude=None, latitude=None, crs=None, time=None):
"""
Get a list of all acquisition dates for a query.
Args:
platform (string): Platform for which data is requested
product_type (string): Product type for which data is requested
longitude (tuple): Tuple of min,max floats for longitude
latitude (tuple): Tuple of min,max floats for latitutde
crs (string): Describes the coordinate system of params lat and long
time (tuple): Tuple of start and end datetimes for requested data
Returns:
times (list): Python list of dates that can be used to query the dc for single time
sliced data.
"""
metadata = self.get_scene_metadata(platform, product, longitude=longitude, latitude=latitude, crs=crs, time=time)
#gets a list of times, corrected for utc offset.
# (unit[0] + unit[0].utcoffset()) if unit[0].utcoffset() else
times = set([unit[0] for unit in metadata['storage_units'].keys()])
return sorted(times)
def get_datacube_metadata(self, platform, product):
"""
Gets some details on the cube and its contents.
Args:
platform (string): Desired platform for requested data.
product (string): Desired product for requested data.
Returns:
datacube_metadata (dict): a dict with multiple keys containing relevant metadata.
"""
descriptor = self.api.get_descriptor({'platform': platform})
datacube_metadata = {}
if product in descriptor:
datacube_metadata['lat_extents'] = (descriptor[product]['result_min'][1], descriptor[product]['result_max'][1])
datacube_metadata['lon_extents'] = (descriptor[product]['result_min'][2], descriptor[product]['result_max'][2])
datacube_metadata['time_extents'] = (descriptor[product]['result_min'][0], descriptor[product]['result_max'][0])
datacube_metadata['tile_count'] = len(descriptor[product]['storage_units'])
datacube_metadata['scene_count'] = descriptor[product]['result_shape'][0]
datacube_metadata['pixel_count'] = descriptor[product]['result_shape'][1] * descriptor[product]['result_shape'][2]
else:
datacube_metadata = {'lat_extents': (0,0), 'lon_extents': (0,0), 'time_extents': (0,0), 'tile_count': 0, 'scene_count': 0, 'pixel_count': 0}
return datacube_metadata
|
ceos-seo/Data_Cube_v2
|
ui/django_site_v2/data_cube_ui/utils/data_access_api.py
|
data_access_api.py
|
py
| 11,474 |
python
|
en
|
code
| 26 |
github-code
|
6
|
34953476775
|
from django.shortcuts import render_to_response
from read_num.models import get_seven_read_data, get_today_hot_data, get_yesterday_hot_data
from django.contrib.contenttypes.models import ContentType
from blog.models import Blog
from django.utils import timezone
from django.db.models import Sum
from django.core.cache import cache
import datetime
def home(request):
blog_content_type = ContentType.objects.get_for_model(Blog)
dates, read_nums = get_seven_read_data(blog_content_type)
#获取七天热门博客缓存数据
seven_hot_data = cache.get('seven_hot_data')
if seven_hot_data is None:
seven_hot_data = get_seven_hot_data()
cache.set('seven_hot_data', seven_hot_data, 3600)
context = {}
context['dates'] = dates
context['read_nums'] = read_nums
context['today_hot_data'] = get_today_hot_data(blog_content_type)
context['yesterday_hot_data'] = get_yesterday_hot_data(blog_content_type)
context['seven_hot_data'] = seven_hot_data
return render_to_response('home.html', context)
def get_seven_hot_data():
today = timezone.now().date()
date = today - datetime.timedelta(days=6)
blogs = Blog.objects \
.filter(read_details__date__lte=today, read_details__date__gt=date) \
.values('id', 'title') \
.annotate(read_num_sum=Sum('read_details__read_num')) \
.order_by('-read_num_sum')
return blogs[:7]
|
shane-constantine/mysite
|
mysite/views.py
|
views.py
|
py
| 1,499 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21355104515
|
#
# @lc app=leetcode.cn id=762 lang=python3
#
# [762] 二进制表示中质数个计算置位
#
# @lc code=start
class Solution:
def countPrimeSetBits(self, left: int, right: int) -> int:
primes = {2,3,5,7,11,13,17,19,23,29,31}
def get_bits(n):
cnt = 0
while n!=0:
n &= (n-1)
cnt += 1
return cnt
return len([n for n in range(left, right+1) if get_bits(n) in primes])
# @lc code=end
|
Alex-Beng/ojs
|
FuckLeetcode/762.二进制表示中质数个计算置位.py
|
762.二进制表示中质数个计算置位.py
|
py
| 478 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3602691667
|
"""
Description:
使用 Bokeh,基于各国家创建一个 CPI 和童工数据的散点图。
拓展:
Bokeh(http://bokeh.pydata.org/)是一个 Python 绘图库,能够用相当简单的命令来绘制更
复杂的图表类型。如果想要创建一个条形图、散点图或时间序列图,尝试Bokeh,看看是
否合适。使用 Bokeh,基于各国家创建一个 CPI 和童工数据的散点图。
"""
from bokeh.plotting import figure, show, output_file
# NOTE: You'll need to have 'africa_cpi_cl' table from Chapter 9 to use this
# code.
def scatter_point(chart, x, y, marker_type): # 定义一个函数,scatter_point,接受一个图表、x 轴和 y 轴值、标记的类型(圆形、正方形、矩形),并且添加这些点到图表中。
chart.scatter(x, y, marker=marker_type, line_color="#6666ee",
fill_color="#ee6666", fill_alpha=0.7, size=10) # 图表的 scatter 方法需要两个必需的参数(x 轴和 y 轴)和一些不同的关键参数,为这些点添加样式(包括颜色、透明度、大小)。这行代码传递了边缘颜色和填充颜色以及大小和透明度到函数中。
chart = figure(title="Perceived Corruption and Child Labor in Africa") # 使用函数 figure创建图表,同时传入一个标题。
output_file("scatter_plot.html") # 使用函数output_file 定义输出的文件。这会在你运行代码的文件夹下创建文件scatter_plot.html。
for row in africa_cpi_cl.rows:
scatter_point(chart, float(row['CPI 2013 Score']),
float(row['Total (%)']), 'circle') # 对于每一行数据,使用CPI得分作为x 轴,童工雇用率作为 y 轴,添加一个数据点。
show(chart) # 在浏览器窗口中展示这张图表。
|
lafitehhq/PythonBook
|
Python-03数据处理/Reference/code/chp10-presentation-数据展示/chart_bokeh_使用Bokeh绘图1.0.py
|
chart_bokeh_使用Bokeh绘图1.0.py
|
py
| 1,776 |
python
|
zh
|
code
| 2 |
github-code
|
6
|
2990307783
|
import time
import turtle
from drawing.draw import draw_from_function
from drawing.util import (
write_function_name,
clear_screen,
fill_background,
set_up_screen,
)
from breed.babies import BabyMaker
from plants.plants import (
tree,
daisy,
cyclamen,
foxglove,
generated_flower,
good_flower,
func__14_23,
)
if __name__ == "__main__":
f1 = func__14_23
set_up_screen()
draw_from_function(f1, start=(400, 400), incremental=True)
while True:
pass
|
SimonCarryer/mutant_flowers
|
draw_flower.py
|
draw_flower.py
|
py
| 514 |
python
|
en
|
code
| 6 |
github-code
|
6
|
74363301949
|
#!/usr/bin/python3
'''
A Simple addition module
'''
def add_integer(a, b=98):
''' a function that add two integers or floats. '''
if type(a) is not int and type(a) is not float:
raise TypeError('a must be an integer')
if type(b) is not int and type(b) is not float:
raise TypeError('b must be an integer')
result = int(a) + int(b)
return result
if __name__ == '__main__':
from doctest import testfile
testfile('test/0-add_integer.txt')
|
ugwujustine/alx-higher_level_programming
|
0x07-python-test_driven_development/0-add_integer.py
|
0-add_integer.py
|
py
| 484 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16786521432
|
from flask import render_template, flash, redirect, url_for, request, jsonify, current_app, g, send_from_directory
from flask_login import login_required, login_user, logout_user, current_user
from app import db
from app.helper import clean_list, normalize, convertArrayToString, convertStringToArray, prepFullAddressSearch, Marker, Error, tryconvert, allowed_file
from app.project import bp
from app.models import User, Post, Project, ProjectImage, Address, Link, Tag, UserRole
from app.main.forms import PostForm, SearchForm, EditAddressForm, TaggingForm, DemoForm, ReviewForm, HiddenDataForm
from app.project.forms import ProjectCreateInitialForm, EditProjectForm, ProjectFilterForm, PhotoForm
from app.service import GoogleMaps_api, AWS_api
from datetime import datetime
from guess_language import guess_language
from flask_googlemaps import GoogleMaps, Map
import geocoder
import os
import flask_s3
import boto3
@bp.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
g.search_form = SearchForm()
def save_tags(asset, func):
# form = ProjectCreateInitialForm(request.form)
form = func(request.form)
tags_data_array = list(map(lambda v: tryconvert(v, v, int), convertStringToArray(form.tags.data)))
for element in tags_data_array:
exists = db.session.query(db.exists().where(Tag.id == element)).scalar()
if not exists:
# TODO: Defaulting category_id to 0; Either create a logic that can self categorize itself or create a process so that tags are created are automatically in a "bucket" category.
# BUG: Newly created tags' name property is not visible from admin panel. Name is however viewable from app view.
tag = Tag(category_id=0, name=element)
db.session.add(tag)
else:
tag = Tag.query.get(element)
asset.add_tag(tag)
db.session.commit()
@bp.route('/explore', methods=['GET', 'POST'])
@login_required
def explore():
geo = geocoder.ip('me')
page = request.args.get('page', 1, type=int)
projects = Project.query.order_by(Project.timestamp.desc()).paginate(page, current_app.config['POSTS_PER_PAGE'], False)
filtered = list(filter(lambda x: x.site.lat != None, projects.items))
# avgLat = sum(project.site.lat for project in filtered)/len(filtered)
avgLat = geo.latlng[0]
# avgLng = sum(project.site.lng for project in filtered)/len(filtered)
avgLng = geo.latlng[1]
#TODO: Preset center and markers according to filter
mymap = Map(
identifier="view-map",
lat=avgLat,
lng=avgLng,
markers=[(project.site.lat, project.site.lng) for project in filtered],
style="height:400px;width:100%;margin:0;"
)
next_url = url_for('project.explore', page=projects.next_num) \
if projects.has_next else None
prev_url = url_for('project.explore', page=projects.prev_num) \
if projects.has_prev else None
return render_template('index.html', title='Explore', projects=projects.items, next_url=next_url, prev_url=prev_url, mymap=mymap)
@bp.route('/create', methods=['GET', 'POST'])
@login_required
def create():
if not current_user.is_authenticated:
flash('You need to be a registered user to create projects.')
return redirect(url_for('main.index'))
form = ProjectCreateInitialForm()
if form.validate_on_submit():
g = GoogleMaps_api()
citystate = form.city.data + ' ' + form.state.data
full_address = prepFullAddressSearch(form.address1.data, form.address2.data, citystate, form.zipcode.data)
exists = db.session.query(db.exists().where(Address.full_address == full_address)).scalar()
if not exists:
print('form address1: {}, form city: {}, form state: {}'.format(form.address1.data, form.city.data, form.state.data))
geocode = g.getGeocode(form.address1.data, form.city.data, form.state.data)
address = Address(address1=form.address1.data, address2=form.address2.data, city=form.city.data, state = form.state.data, zipcode=form.zipcode.data, country=form.country.data, full_address=full_address, lat=geocode['lat'], lng=geocode['lng'])
db.session.add(address)
else:
address = Address.query.filter_by(full_address=full_address).first()
project = Project(name=form.name.data, creator=current_user, site=address)
db.session.add(project)
save_tags(project, ProjectCreateInitialForm)
db.session.commit()
flash('Congratulations, you just created a project and address!')
return redirect(url_for('project.upload', project_id=project.id))
return render_template('project/create.html', title='Create', form=form)
@bp.route('/upload', methods=['GET', 'POST'])
def upload(*args, **kwargs):
form = HiddenDataForm()
form.data.data = request.args.get('project_id') or args
if request.method == 'POST':
for key, f in request.files.items():
if key.startswith('file'):
f.save(os.path.join(current_app.config['UPLOADED_PATH'], 'project{}-{}'.format(form.data.data, f.filename)))
#TODO: Give user opportunity to add more image related data here
if form.validate_on_submit():
s3 = boto3.client(
"s3",
aws_access_key_id=current_app.config['S3_ACCESS_KEY'],
aws_secret_access_key=current_app.config['S3_SECRET_ACCESS_KEY']
)
project = Project.query.filter_by(id=form.data.data).first()
uploadFileNames = []
sourceDir = os.path.join(current_app.config['APP_ROOT'], 'app/static/uploads/')
for (sourceDir, dirname, filename) in os.walk(sourceDir):
uploadFileNames.extend(filename)
break
for filename in uploadFileNames:
sourcepath = sourceDir + filename
print('########### SOURCEPATH: {}'.format(sourcepath))
with open(sourcepath, 'rb') as data:
s3.upload_fileobj(
data,
current_app.config['S3_BUCKET_NAME'],
filename,
ExtraArgs={
"ACL": 'public-read',
"ContentType": filename.rsplit('.', 1)[1].lower()
}
)
object_url = "https://s3-us-west-2.amazonaws.com/{}/{}".format(current_app.config['S3_BUCKET_NAME'], filename)
project_image = ProjectImage(description='this is a static description placeholder... will need to refactor', image_url=object_url, image_project=project, photo_uploader=current_user)
db.session.add(project_image)
db.session.commit()
return redirect(url_for('project.project', project_id=form.data.data))
return render_template('upload.html', form=form)
# @bp.route('/upload-old', methods=['GET', 'POST'])
# def upload(project_id):
# if form.validate_on_submit():
# f = form.photo.data
# filename = secure_filename(f.filename)
# f.save(os.path.join(
# app.instance_path, 'photos', filename
# ))
# return redirect(url_for('project.project', project_id=project_id))
# return render_template('upload.html', form=form)
@bp.route('/timeline/<address_id>')
def view_timeline(address_id):
address = Address.query.filter_by(id=address_id).first()
projects = Project.query.filter_by(address_id=address_id)
mymap = Map(
identifier="view-map",
lat=address.lat,
lng=address.lng,
markers=[(address.lat, address.lng)],
style="height:400px;width:100%;margin:0;"
)
##TODO: Add functionality that allows user to start the process of creating a review
return render_template('project/timeline.html', title='Timeline', mymap=mymap, projects=projects)
@bp.route('/<project_id>', methods=['GET', 'POST'])
def project(project_id):
form = PostForm()
if form.validate_on_submit():
language = guess_language(form.body.data)
if language == 'UNKNOWN' or len(language) > 5:
language = ''
project = Project.query.filter_by(id=project_id).first_or_404()
post = Post(body=form.body.data, author=current_user, commented_project=project, language=language)
post.save()
flash('Your post is now live!')
return redirect(url_for('project.project', project_id=project_id))
page = request.args.get('page', 1, type=int)
project = Project.query.filter_by(id=project_id).first_or_404()
user = User.query.filter_by(username=project.creator.username).first()
mymap = Map(
identifier="view-map",
lat=project.site.lat,
lng=project.site.lng,
markers=[(project.site.lat, project.site.lng)],
style="height:400px;width:100%;margin:0;",
fit_markers_to_bounds = True
)
posts = project.posts.order_by(Post.timestamp.desc()).paginate(page, current_app.config['POSTS_PER_PAGE'], False)
images = project.images.all()
next_url = url_for('project.project', username=user.username, page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('project.project', username=user.username, page=posts.prev_num) \
if posts.has_prev else None
return render_template('project/project.html', user=user, project=project, form=form, posts=posts.items, next_url=next_url, prev_url=prev_url, mymap=mymap, images=images)
@bp.route('/image/<id>', methods=['GET', 'POST'])
def viewProjectImage(id):
project_image = ProjectImage.query.filter_by(id=id).first()
return project_image.image_url
@bp.route('/photo-form', methods=['POST'])
@login_required
def review_form():
form = PhotoForm()
form.project_id.data = request.form['project_id']
return render_template('_comment.html', form=form)
#TODO: Include Photo submission and refactor this. There has to be a better way to do this.
@bp.route('/edit_project/<project_id>', methods=['GET', 'POST'])
@login_required
def edit_project(project_id):
project = Project.query.filter_by(id=project_id).first_or_404()
form = EditProjectForm()
if form.validate_on_submit():
project.name = form.name.data
project.headline = form.headline.data
project.description = form.description.data
project.completion_date = form.completion_date.data
address = Address.query.get(project.address_id)
address.address1 = form.address1.data
address.address2 = form.address2.data
address.city = form.city.data
address.state = form.state.data
address.zipcode = form.zipcode.data
address.country = form.country.data
save_tags(project, ProjectCreateInitialForm)
flash('Your changes have been saved.')
return redirect(url_for('project.project', project_id=project.id))
elif request.method == 'GET':
form.name.data = project.name
form.headline.data = project.headline
form.description.data = project.description
form.completion_date.data = project.completion_date
form.address1.data = project.site.address1
form.address2.data = project.site.address2
form.city.data = project.site.city
form.state.data = project.site.state
form.zipcode.data = project.site.zipcode
form.country.data = project.site.country
#BUG: tags not populating
form.tags.data = convertArrayToString(project.tags.all())
return render_template('project/edit_project.html', title='Edit Project',
form=form)
@bp.route('/favorite/<project_id>')
@login_required
def favorite(project_id):
project = Project.query.filter_by(id=project_id).first()
if project is None:
flash('Project not found.')
return redirect(url_for('project.project', project_id=project_id))
if current_user.is_favorited(project):
flash('You already favorited this project.')
return redirect(url_for('project.project', project_id=project_id))
current_user.favorite(project)
db.session.commit()
flash('You favorited this project!')
return redirect(url_for('project.project', project_id=project_id))
@bp.route('/unfavorite/<project_id>')
@login_required
def unfavorite(project_id):
project = Project.query.filter_by(id=project_id).first()
if project is None:
flash('Project not found.')
return redirect(url_for('project.project', project_id=project_id))
if not current_user.is_favorited(project):
flash('You already unfavorited this project.')
return redirect(url_for('project.project', project_id=project_id))
current_user.unfavorite(project)
db.session.commit()
flash('You unfavorited this project!')
return redirect(url_for('project.project', project_id=project_id))
##TODO: The contribution feature will need to be refactored; Feature will need the following: 1) contribution request form will need to allow users to indicate which project they are trying to contribute to and attach proof of contribution, 2) send email to platform support for verification, 3) support to send email back to approve or decline contribution request, 4) verified contributors will be identified as verified
@bp.route('/contribute/<project_id>')
@login_required
def contribute(project_id):
project = Project.query.filter_by(id=project_id).first()
if project is None:
flash('Project not found.')
return redirect(url_for('project.project', project_id=project_id))
if current_user.has_contributed(project):
flash('You already unfavorited this project.')
return redirect(url_for('project.project', project_id=project_id))
current_user.contribute(project)
db.session.commit()
flash('You contributed to this project!')
return redirect(url_for('project.project', project_id=project_id))
@bp.route('/uncontribute/<project_id>')
@login_required
def uncontribute(project_id):
project = Project.query.filter_by(id=project_id).first()
if project is None:
flash('Project not found.')
return redirect(url_for('project.project', project_id=project_id))
if not current_user.has_contributed(project):
flash('You already uncontributed this project.')
return redirect(url_for('project.project', project_id=project_id))
current_user.uncontribute(project)
db.session.commit()
flash('You uncontributed to this project!')
return redirect(url_for('project.project', project_id=project_id))
|
iamjasonkuo/househunt
|
app/project/routes.py
|
routes.py
|
py
| 14,693 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41685965854
|
from utils import Position
import copy
import pickle
def load_cache(file: str) -> dict:
try:
with open(file, 'rb') as f:
cache = pickle.load(f)
except FileNotFoundError:
cache = {}
return cache
def save_cache(cache, file: str):
with open(file, 'wb') as f:
pickle.dump(cache, f)
def minimax(position: Position, depth=0, alpha=float('-inf'), beta=float('inf'),
maximizing_player: int = 1, cache=None) -> int:
if cache is None:
cache = {}
if position.key in cache:
return cache[position.key]
outcome = position.result
if type(outcome) == int:
return outcome * (10 - depth)
if maximizing_player == 1:
maxEval = float('-inf')
for child in position.get_children():
eval_ = minimax(position=child, depth=depth + 1, alpha=alpha, beta=beta, maximizing_player=-1, cache=cache)
maxEval = max(maxEval, eval_)
alpha = max(alpha, eval_)
if alpha >= beta:
break
cache[position.key] = maxEval
return maxEval
elif maximizing_player == -1:
minEval = float('inf')
for child in position.get_children():
eval_ = minimax(position=child, depth=depth + 1, alpha=alpha, beta=beta, maximizing_player=1, cache=cache)
minEval = min(minEval, eval_)
beta = min(beta, eval_)
if alpha >= beta:
break
cache[position.key] = minEval
return minEval
def get_best_move(position: Position, maximizing_player: int = 1, cache: dict = None):
best_value = float('-inf') if maximizing_player == 1 else float('inf')
best_move = (None, None)
for i in range(3):
for j in range(3):
if position.position[i][j] == 0:
new_position = copy.deepcopy(position)
new_position.make_move((i, j))
value = minimax(position=new_position, depth=0, maximizing_player=-maximizing_player, cache=cache)
if maximizing_player == 1 and value > best_value:
best_value = value
best_move = (i, j)
elif maximizing_player == -1 and value < best_value:
best_value = value
best_move = (i, j)
return best_move
|
Epico-Coder/TicTacToe
|
ai.py
|
ai.py
|
py
| 2,357 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33230034244
|
# EKF SLAM - adding one landmark.
#
# slam_09_b_slam_add_landmark
# Claus Brenner, 20 JAN 13
from lego_robot import *
from math import sin, cos, pi, atan2, sqrt
from numpy import *
from slam_f_library import write_cylinders, write_error_ellipses
class ExtendedKalmanFilterSLAM:
def __init__(self, state, covariance,
robot_width, scanner_displacement,
control_motion_factor, control_turn_factor):
# The state. This is the core data of the Kalman filter.
self.state = state
self.covariance = covariance
# Some constants.
self.robot_width = robot_width
self.scanner_displacement = scanner_displacement
self.control_motion_factor = control_motion_factor
self.control_turn_factor = control_turn_factor
# Currently, the number of landmarks is zero.
self.number_of_landmarks = 0
@staticmethod
def g(state, control, w):
x, y, theta = state
l, r = control
if r != l:
alpha = (r - l) / w
rad = l/alpha
g1 = x + (rad + w/2.)*(sin(theta+alpha) - sin(theta))
g2 = y + (rad + w/2.)*(-cos(theta+alpha) + cos(theta))
g3 = (theta + alpha + pi) % (2*pi) - pi
else:
g1 = x + l * cos(theta)
g2 = y + l * sin(theta)
g3 = theta
return array([g1, g2, g3])
@staticmethod
def dg_dstate(state, control, w):
theta = state[2]
l, r = control
if r != l:
alpha = (r-l)/w
theta_ = theta + alpha
rpw2 = l/alpha + w/2.0
m = array([[1.0, 0.0, rpw2*(cos(theta_) - cos(theta))],
[0.0, 1.0, rpw2*(sin(theta_) - sin(theta))],
[0.0, 0.0, 1.0]])
else:
m = array([[1.0, 0.0, -l*sin(theta)],
[0.0, 1.0, l*cos(theta)],
[0.0, 0.0, 1.0]])
return m
@staticmethod
def dg_dcontrol(state, control, w):
theta = state[2]
l, r = tuple(control)
if r != l:
rml = r - l
rml2 = rml * rml
theta_ = theta + rml/w
dg1dl = w*r/rml2*(sin(theta_)-sin(theta)) - (r+l)/(2*rml)*cos(theta_)
dg2dl = w*r/rml2*(-cos(theta_)+cos(theta)) - (r+l)/(2*rml)*sin(theta_)
dg1dr = (-w*l)/rml2*(sin(theta_)-sin(theta)) + (r+l)/(2*rml)*cos(theta_)
dg2dr = (-w*l)/rml2*(-cos(theta_)+cos(theta)) + (r+l)/(2*rml)*sin(theta_)
else:
dg1dl = 0.5*(cos(theta) + l/w*sin(theta))
dg2dl = 0.5*(sin(theta) - l/w*cos(theta))
dg1dr = 0.5*(-l/w*sin(theta) + cos(theta))
dg2dr = 0.5*(l/w*cos(theta) + sin(theta))
dg3dl = -1.0/w
dg3dr = 1.0/w
m = array([[dg1dl, dg1dr], [dg2dl, dg2dr], [dg3dl, dg3dr]])
return m
def predict(self, control):
"""The prediction step of the Kalman filter."""
# covariance' = G * covariance * GT + R
# where R = V * (covariance in control space) * VT.
# Covariance in control space depends on move distance.
# --->>> Put your code here.
# Hints:
# - The number of landmarks is self.number_of_landmarks.
# - eye(n) is the numpy function which returns a n x n identity matrix.
# - zeros((n,n)) returns a n x n matrix which is all zero.
# - If M is a matrix, M[0:2,1:5] returns the submatrix which consists
# of the rows 0 and 1 (but not 2) and the columns 1, 2, 3, 4.
# This submatrix operator can be used on either side of an assignment.
# - Similarly for vectors: v[1:3] returns the vector consisting of the
# elements 1 and 2, but not 3.
# - All matrix and vector indices start at 0.
G3 = self.dg_dstate(self.state, control, self.robot_width)
left, right = control
left_var = (self.control_motion_factor * left)**2 +\
(self.control_turn_factor * (left-right))**2
right_var = (self.control_motion_factor * right)**2 +\
(self.control_turn_factor * (left-right))**2
control_covariance = diag([left_var, right_var])
V = self.dg_dcontrol(self.state, control, self.robot_width)
R3 = dot(V, dot(control_covariance, V.T))
# Now enlarge G3 and R3 to accomodate all landmarks. Then, compute the
# new covariance matrix self.covariance.
new_offset = 2*self.number_of_landmarks
G = zeros((3 + new_offset, 3 + new_offset))
G[0:3, 0:3] = G3
G[3:, 3:] = eye(new_offset, new_offset)
R = zeros((3 + new_offset, 3 + new_offset))
R[0:3, 0:3] = R3
self.covariance = dot(G, dot(self.covariance, G.T)) + R
# state' = g(state, control)
self.state[0:3] = self.g(self.state[0:3], control, self.robot_width)
def add_landmark_to_state(self, initial_coords):
"""Enlarge the current state and covariance matrix to include one more
landmark, which is given by its initial_coords (an (x, y) tuple).
Returns the index of the newly added landmark."""
# --->>> Put here your new code to augment the robot's state and
# covariance matrix.
# Initialize the state with the given initial_coords and the
# covariance with 1e10 (as an approximation for "infinity".
# Hints:
# - If M is a matrix, use M[i:j,k:l] to obtain the submatrix of
# rows i to j-1 and colums k to l-1. This can be used on the left and
# right side of the assignment operator.
# - zeros(n) gives a zero vector of length n, eye(n) an n x n identity
# matrix.
# - Do not forget to increment self.number_of_landmarks.
# - Do not forget to return the index of the newly added landmark. I.e.,
# the first call should return 0, the second should return 1.
landmark_index = self.number_of_landmarks
old_offset = 2*self.number_of_landmarks
self.number_of_landmarks += 1
new_offset = 2*self.number_of_landmarks
covarianceN = zeros((3 + new_offset, 3 + new_offset))
covarianceN[0:3+old_offset, 0:3+old_offset] = self.covariance
covarianceN[-2,-2] = covarianceN[-1,-1] = 1e10
self.covariance = covarianceN
state_prime = zeros(3 + new_offset)
state_prime[0:3+old_offset] = self.state
state_prime[-2] = initial_coords[0]
state_prime[-1] = initial_coords[1]
self.state = state_prime
return landmark_index
def get_landmarks(self):
"""Returns a list of (x, y) tuples of all landmark positions."""
return ([(self.state[3+2*j], self.state[3+2*j+1])
for j in xrange(self.number_of_landmarks)])
def get_landmark_error_ellipses(self):
"""Returns a list of all error ellipses, one for each landmark."""
ellipses = []
for i in xrange(self.number_of_landmarks):
j = 3 + 2 * i
ellipses.append(self.get_error_ellipse(
self.covariance[j:j+2, j:j+2]))
return ellipses
@staticmethod
def get_error_ellipse(covariance):
"""Return the position covariance (which is the upper 2x2 submatrix)
as a triple: (main_axis_angle, stddev_1, stddev_2), where
main_axis_angle is the angle (pointing direction) of the main axis,
along which the standard deviation is stddev_1, and stddev_2 is the
standard deviation along the other (orthogonal) axis."""
eigenvals, eigenvects = linalg.eig(covariance[0:2,0:2])
angle = atan2(eigenvects[1,0], eigenvects[0,0])
return (angle, sqrt(eigenvals[0]), sqrt(eigenvals[1]))
if __name__ == '__main__':
# Robot constants.
scanner_displacement = 30.0
ticks_to_mm = 0.349
robot_width = 155.0
# Filter constants.
control_motion_factor = 0.35 # Error in motor control.
control_turn_factor = 0.6 # Additional error due to slip when turning.
# Arbitrary start position.
initial_state = array([500.0, 0.0, 45.0 / 180.0 * pi])
# Covariance at start position.
initial_covariance = zeros((3,3))
# Setup filter.
kf = ExtendedKalmanFilterSLAM(initial_state, initial_covariance,
robot_width, scanner_displacement,
control_motion_factor, control_turn_factor)
# Just to test the algorithm, add one landmark.
kf.add_landmark_to_state((400.0, 700.0))
# To make the error ellipse visible, set a smaller variance.
if kf.number_of_landmarks > 0:
kf.covariance[-2,-2] = 300.0**2 # 300 mm in x.
kf.covariance[-1,-1] = 500.0**2 # 500 mm in y.
# Read data.
logfile = LegoLogfile()
logfile.read("robot4_motors.txt")
# Loop over all motor tick records and all measurements and generate
# filtered positions and covariances.
# This is the EKF SLAM loop.
f = open("ekf_slam_add_landmarks.txt", "w")
for i in xrange(len(logfile.motor_ticks)):
# Prediction.
control = array(logfile.motor_ticks[i]) * ticks_to_mm
kf.predict(control)
# End of EKF SLAM - from here on, data is written.
# Output the center of the scanner, not the center of the robot.
print >> f, "F %f %f %f" % \
tuple(kf.state[0:3] + [scanner_displacement * cos(kf.state[2]),
scanner_displacement * sin(kf.state[2]),
0.0])
# Write covariance matrix in angle stddev1 stddev2 stddev-heading form
e = ExtendedKalmanFilterSLAM.get_error_ellipse(kf.covariance)
print >> f, "E %f %f %f %f" % (e + (sqrt(kf.covariance[2,2]),))
# Write estimates of landmarks.
write_cylinders(f, "W C", kf.get_landmarks())
# Write error ellipses of landmarks.
write_error_ellipses(f, "W E", kf.get_landmark_error_ellipses())
f.close()
|
jfrascon/SLAM_AND_PATH_PLANNING_ALGORITHMS
|
06-SLAM/CODE/slam_09_b_slam_add_landmark_question.py
|
slam_09_b_slam_add_landmark_question.py
|
py
| 10,329 |
python
|
en
|
code
| 129 |
github-code
|
6
|
4135669430
|
#!/usr/bin/env python3
# Covariance Calculation from doc2vec model
import numpy as np
import gensim.models
import gensim
import sys
import pickle
from helpers import get_name
def compute_covariance_matrix(model_name, to_json=True):
model = gensim.models.Doc2Vec.load(model_name)
doctags = list(model.docvecs.doctags)
N = len(doctags)
X = []
for x in doctags:
X.append(model.docvecs[x])
X = np.array(X)
# R[i, j] = R[j, i] = dot(vi, vj) / (norm(vi) * norm(vj))
R = np.corrcoef(X)
if to_json:
RR = {}
for x, dx in enumerate(doctags):
for y, dy in enumerate(doctags):
RR[get_name(dx), get_name(dy)] = R[x,y]
return doctags, RR
else:
return doctags, R
if __name__ == '__main__':
model_name = sys.argv[1]
doctags, R = compute_covariance_matrix(model_name)
pickle.dump(R, open('corrcoef.pickle', 'wb+'))
|
papachristoumarios/sade
|
sade/corrcoef.py
|
corrcoef.py
|
py
| 930 |
python
|
en
|
code
| 8 |
github-code
|
6
|
3549042262
|
from django import forms
from catalog.models import Category, Product
class ProductAdminForm(forms.ModelForm):
class Meta:
model = Product
fields = ['name', 'slug','brand','sku','price','old_price',\
'is_active','is_bestseller','is_featured','quantity',\
'description','meta_keywords','meta_description', \
'categories','image','thumbnail','image_caption']
def clean_price(self):
if self.cleaned_data['price'] <= 0:
raise forms.ValidationError('Price must be greater than zero.')
return self.cleaned_data['price']
|
Hamfri/shopping
|
catalog/forms.py
|
forms.py
|
py
| 626 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10190974986
|
import numpy as np
import neural
import random
#my_seed = 95
#random.seed(my_seed)
#np.random.seed(my_seed)
# Load data
f = open('seeds_dataset.csv', 'r')
features = []
labels = []
rows = f.readlines()
for row in rows:
values = [float(x) for x in row.split(',')]
features.append(values[:-1]) # Ignore last column
label = int(values[-1])
if label == 1:
labels.append([1, 0, 0])
elif label == 2:
labels.append([0, 1, 0])
else:
labels.append([0, 0, 1])
# Split data in training and testing
X_train, X_test, y_train, y_test = [], [], [], []
for i in range(len(features)):
if random.random() > 0.25:
X_train.append(features[i])
y_train.append(labels[i])
else:
X_test.append(features[i])
y_test.append(labels[i])
X_train = np.array(X_train, dtype=np.float128).T
y_train = np.array(y_train, dtype=np.float128).T
X_test = np.array(X_test, dtype=np.float128).T
y_test = np.array(y_test, dtype=np.float128).T
print(X_train.shape)
print(y_train.shape)
# First train
nn = neural.NeuralNetwork([7, 5, 3],activations=['sigmoid', 'sigmoid'])
nn.train(X_train, y_train, epochs=1000, batch_size=64, lr = 0.1)
# Evaluate
print(y_test)
_, output = nn.feed_forward(X_test)
y_prime = [x.index(max(x)) for x in output]
percent_errors = []
for i in range(len(y)):
percent_errors.append(abs((y_prime[i] - y[i]) / y[i]))
mean_error = sum(percent_errors) / len(percent_errors)
print("Mean percent error: {0:.2f}%".format(float((mean_error * 100).astype(str))))
print("Max error: {0:.2f}%".format(float((max(percent_errors) * 100).astype(str))))
print("Min error: {0:.2f}%".format(float((min(percent_errors) * 100).astype(str))))
print("Accuracy: {0:.2f}%".format(float(((1-mean_error) * 100).astype(str))))
|
peterapps/NumpyNeural
|
seeds2.py
|
seeds2.py
|
py
| 1,713 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30357262081
|
from threading import Thread
from time import sleep
from traits.api import HasTraits, Int, Button
from traitsui.api import View, Item, VGroup
class ThreadDemo(HasTraits):
# The thread specific counters:
thread_0 = Int()
thread_1 = Int()
thread_2 = Int()
# The button used to start the threads running:
start = Button('Start Threads')
# The count of how many threads ae currently running:
running = Int()
view = View(
VGroup(
Item('thread_0', style='readonly'),
Item('thread_1', style='readonly'),
Item('thread_2', style='readonly'),
),
'_',
Item('start', show_label=False, enabled_when='running == 0'),
resizable=True,
width=250,
title='Monitoring threads',
)
def _start_changed(self):
for i in range(3):
Thread(
target=self.counter,
args=('thread_%d' % i, (i * 10 + 10) / 1000.0),
).start()
def counter(self, name, interval):
self.running += 1
count = 0
for i in range(200):
setattr(self, name, count)
count += 1
sleep(interval)
self.running -= 1
# Create the demo:
demo = ThreadDemo()
# Run the demo (if invoked from the command line):
if __name__ == '__main__':
demo.configure_traits()
|
enthought/traitsui
|
traitsui/examples/demo/Advanced/Multi_thread_demo.py
|
Multi_thread_demo.py
|
py
| 1,376 |
python
|
en
|
code
| 290 |
github-code
|
6
|
5469207228
|
#ライブラリ、モジュールをインポート
import pandas as pd
import openpyxl as px
from openpyxl.formatting.rule import CellIsRule
from openpyxl.styles import Color, PatternFill
#ブック名入力
tdname=input('testdataName?')
edname=input('editordataName?')
#読み込んだブックの同じテストのデータをDataFrameに格納
td=pd.read_excel(tdname,header=1,sheet_name=0)
ed=pd.read_excel(edname,header=1,sheet_name=0)
#テストデータのラベルの部分をリストに変換
tdlabel=td.columns.values
tdlabel=tdlabel.tolist()
#テストデータのラベルの長さを格納(後のループ処理(rangeの引数)に使用)
l=len(tdlabel)
#DataFrameの定義
#定義不要かもしれない
add=pd.DataFrame()
result=pd.DataFrame()
#エディタデータ成形
#テストデータラベルの項目名と正規表現でマッチする部分のエディタデータを検索、成形
for i in range(l):
add = ed.loc[:,ed.columns.str.match(tdlabel[i])]
result = pd.concat([result,add],axis=1)
#TrueFalse判定
tf=pd.DataFrame()
tf=td==result
#出力ブックの名前指定
outname=input('outputName?')
#各データを1ブック2シートに出力
with pd.ExcelWriter(outname) as writer:
tf.to_excel(writer,sheet_name='TrueFalse')
td.to_excel(writer,sheet_name='TestData')
result.to_excel(writer,sheet_name='EditorData')
#FALSEを強調表示する条件付き書式設定
wb=px.load_workbook(outname)
ws=wb['TrueFalse']
ws.conditional_formatting.add('A1:AZ100',CellIsRule(operator='equal',formula=['FALSE'],
fill=PatternFill(start_color='FF0000', end_color='FF0000',
fill_type='solid')))
white=px.styles.PatternFill(patternType='solid',
fgColor='000000', bgColor='000000')
ws['A1'].fill=white
ws.conditional_formatting.add('A1:AZ100',CellIsRule(operator='equal',formula=[''],
fill=PatternFill(start_color='000000', end_color='000000',
fill_type='solid')))
wb.save(outname)
|
kobayu0902art/work_snippets
|
reshape/reshape_v1.4_1.py
|
reshape_v1.4_1.py
|
py
| 2,175 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
70929712188
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 6 17:20:25 2023
@author: Gilberto
"""
""
# streamlit_app.py
import io
import base64
import streamlit as st
import pandas as pd
from datetime import datetime
from mortgagestyle_v2 import MortgageStyle
from straightline_v2 import StraightLineAmortization
from SOFRDataExtractor import SOFRDataExtractor # Assuming the previous code is saved in this file
import streamlit as st
def homepage():
st.title("Amortization Calculator Home")
# Background Information
st.header("Background Information")
st.markdown("""
This application helps to generate an amortization schedule, which is a table detailing each periodic payment on an
amortizing loan.
**Types of Amortization:**
- **Mortgage Style:**
- **Hybrid Style:**
- **Straight Line:**
""")
# Application Features
st.header("Application Features")
st.markdown("""
- Calculate Mortgage Style, Hybrid Style, and Straight Line Amortizations.
- Supports both fixed and floating interest rates.
- Downloadable amortization schedule in Excel format.
""")
# How to Use
st.header("How to Use")
st.markdown("""
1. Enter the required details such as Settlement Date, Maturity Date, Notional Amount, etc.
2. Choose the type of Amortization: Mortgage Style, Hybrid Style, or Straight Line.
3. For floating rate, upload the SOFR data file and select the reset frequency.
4. Click on the "Generate Amortization" button to view the amortization table.
5. You can also download the table in Excel format using the provided link.
""")
# Details about SOFR_Data file
st.header("SOFR Data Formatting and Source")
st.markdown("""
**Formatting Requirements for the SOFR Data File:**
- The file should be in `.xls` or `.xlsx` format.
- Ensure the file contains columns labeled 'Date' and 'Rate'.
- Data should be sorted chronologically.
- Rates should be in decimal form (e.g., 0.03 for 3%).
-A sample format for the data can be found in github
If you don't have the SOFR data file, you can obtain the required data from:
[Pensford Resources - Forward Curve](https://www.pensford.com/resources/forward-curve)
""")
# Details about contacting
st.header("Contact info")
st.markdown("""
**If you find any errors or have any question please feel free to reach out via LinkedIn:**
https://www.linkedin.com/in/gil-de-la-cruz-vazquez-62049b125/""")
def apply_floating_rate(df, f, spread):
for i in range(len(df)):
if i == 0:
continue # No change for the first period
df.at[i, 'Period Interest'] = round(df.at[i, 'Outstanding Balance'] * (f(i) + spread) / 12,2) # Applying spread to the SOFR rate
df.at[i, 'Period Payment'] = round(df.at[i, 'Period Interest'] + df.at[i, 'Principal Payment'],2)
df.at[i, 'Outstanding Balance'] = df.at[i-1, 'Outstanding Balance'] - df.at[i, 'Principal Payment']
return df
def main():
st.title("Amortization Calculator")
# Input parameters
settlement_date = st.date_input("Settlement Date", datetime(2022, 8, 1))
maturity_date = st.date_input("Maturity Date", datetime(2032, 8, 1))
first_payment_date = st.date_input("First Payment Date", datetime(2022, 9, 1))
notional_amount = st.number_input("Notional Amount", value=600000.0, step=5000.0)
rate = st.number_input("Rate (%)", value=7.03, step=0.01)
basis_numerator = st.selectbox("Basis Numerator", ["ACT", "30"])
basis_denominator = st.selectbox("Basis Denominator", [360, 365])
payment_frequency = st.selectbox("Frequency", ["1M", "3M", "6M"])
amortization_years = st.number_input("Amortization Years", value=25, step=1)
# Output format selection
output_format = st.selectbox("Output Format", ["Simple Amortization", "P+I"])
# Choose amortization type
amortization_type = st.selectbox("Choose Amortization Type", ["Mortgage Style", "Hybrid Style", "Straight Line"])
rate_type = st.selectbox("Rate Type", ["Fixed", "Floating"])
if rate_type == "Floating":
sofr_file = st.file_uploader("Upload SOFR Data File", type=['xls', 'xlsx'])
spread = st.number_input("Enter Spread (%)", min_value=0.0, max_value=10.0, value=0.0, step=0.1) / 100.0 # Spread in percentage
if sofr_file:
data_extractor = SOFRDataExtractor(sofr_file)
months_duration = st.selectbox("Reset Frequency", ["1M", "3M", "6M"])
if months_duration == "1M":
f = data_extractor.interpolate_curve(data_extractor.data_1m)
elif months_duration == "3M":
f = data_extractor.interpolate_curve(data_extractor.data_3m)
else: # For 6M, using 3M data for simplicity. Ideally, you'd have 6M data
f = data_extractor.interpolate_curve(data_extractor.data_3m)
if st.button("Generate Amortization"):
if amortization_type == "Mortgage Style":
mortgage = MortgageStyle(
settlement_date, maturity_date, first_payment_date, notional_amount,
rate, basis_numerator, basis_denominator, amortization_years, payment_frequency
)
df = mortgage.create_mortgage_style_amort()
elif amortization_type == "Hybrid Style":
mortgage = MortgageStyle(
settlement_date, maturity_date, first_payment_date, notional_amount,
rate, basis_numerator, basis_denominator, amortization_years, payment_frequency
)
df = mortgage.create_hybrid_style_amort()
else:
sla = StraightLineAmortization(
settlement_date, maturity_date, first_payment_date, notional_amount,
rate, basis_numerator, basis_denominator, amortization_years, payment_frequency
)
df = sla.generate_schedule()
if rate_type == "Floating":
df = apply_floating_rate(df, f, spread)
df['Interest Rate (%)'] = (df['Period Interest'] / df['Outstanding Balance'].shift(1)) * 12 * 100
df['Interest Rate (%)'] = df['Interest Rate (%)'].round(2)
# Calculate additional columns for P+I
if 'Period Payment' in df.columns and 'Outstanding Balance' in df.columns:
df['Remaining Notional Balance'] = df['Outstanding Balance'] - df['Principal Payment']
if 'Period Payment' in df.columns and 'Principal Payment' in df.columns:
df['Period Interest'] = df['Period Payment'] - df['Principal Payment']
# Customize output format
if output_format == "Simple Amortization":
columns = ['Period Start Date', 'Period End Date', 'Outstanding Balance']
else: # "P+I"
columns = ['Payment Number', 'Period Start Date', 'Period End Date', 'Outstanding Balance',
'Period Payment', 'Principal Payment', 'Period Interest', 'Remaining Notional Balance']
if rate_type == "Floating":
df = apply_floating_rate(df, f, spread)
df['Interest Rate (%)'] = (df['Period Interest'] / df['Outstanding Balance'].shift(1)) * 12 * 100
df['Interest Rate (%)'] = df['Interest Rate (%)'].round(2)
# Set the first period's interest rate to the SOFR rate for the first period plus spread
first_period_rate = f(0) + spread
df.at[0, 'Interest Rate (%)'] = round(first_period_rate*100,2) # Convert to annual rate in percentage
columns.append('Interest Rate (%)') # Only add this column if rate_type is Floating
df = df[columns]
# Display the dataframe
st.write(df)
# Download link for Excel
towrite = io.BytesIO()
downloaded_file = df.to_excel(towrite, encoding='utf-8', index=False, engine='openpyxl')
towrite.seek(0)
b64 = base64.b64encode(towrite.read()).decode()
st.markdown(f'<a href="data:application/vnd.openxmlformats-officedocument.spreadsheetml.sheet;base64,{b64}" download="amortization.xlsx">Download Excel File</a>', unsafe_allow_html=True)
if __name__ == "__main__":
page = st.sidebar.radio("Select Page", ["Home", "Amortization Calculator"])
if page == "Home":
homepage()
else:
main()
|
gdelacruzv/Amortization_calculator
|
Amortization_app_V4.py
|
Amortization_app_V4.py
|
py
| 8,584 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70940682109
|
#this is how to get code output in py
print(
"This is my first code in python. Hello python")
#this is how to declare a variable
# variable has a name then after an equal sign the value
subject_name = "Tom"
another_name = "Jerry"
there_shows_name = subject_name + " & " + another_name
print("This is a cartoon show name:", there_shows_name)
#that was a string type variable
#there are 3 types of variable
# variable types = numaric , string , boolean
#now will write numeric type of variable.
zero = 0
one = 1
first_digit = 2
second_digit = 4
third_digit = 6
fourth_digit = 8
#now i'm going to create a math operation with these variable
plus_result = first_digit + zero
minus_result = fourth_digit - third_digit
multiply_result = first_digit * one
divided_result = second_digit / first_digit
print(plus_result)
print(minus_result)
print(multiply_result)
print(divided_result)
#all operation seems good
#now i'm going to write boolean types of variable
is_cold = False
is_hot = True
print(is_cold , "&" , is_hot)
#three types variable was completely showed
#iput
# niw i m writting code input
input("this is an input. you can write here")
user_name = input("enter your user name")
user_password = input("enter your password")
user_age = input("Inter your age")
age_int = int(user_age)
#now i'm going to learn decition/ condition making
#in_ proggraming its call if else
#now i'm going to write some if else condition
mobilePrice = 5000
haveMoney = 4999
if mobilePrice < haveMoney:
print("I will buy a mobile")
else:
print("calculator is better")
#this condition making code is work
#now we should play with if else.. .. ..
|
ahmadbinraees/basicConcepts
|
src/basic.py
|
basic.py
|
py
| 1,664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39614840706
|
#!/usr/bin/env python3
import sys
import os
import time
from datetime import datetime
import textwrap
import argparse
import platform
try:
import influxdb
except ImportError:
print("Trying to Install required module: influxdb\n")
os.system('python3 -m pip install influxdb')
time.sleep(5)
def fioinput(ip, port, database, hostname):
client = influxdb.InfluxDBClient(host=ip, port=8086)
try:
client.ping()
client.create_database(database)
client.switch_database(database)
except:
print("!!Was unable to connect to the Influxdb!!\
\nPlease check that the IP address and port information is correct.\
\nKilling the fio session as well.\
\n")
os.system('pkill fio')
quit()
# minimal format found here: https://www.andypeace.com/fio_minimal.html
for line in sys.stdin:
fullfio_data = line.split(",")
fullfio_data = fullfio_data[0].split(";")
# Run info
terseversion = fullfio_data[0]
fioversion = fullfio_data[1]
jobname = fullfio_data[2]
# Read IO info
readtotalio = (int(fullfio_data[5]) / 1024)
readbandwidthio = (int(fullfio_data[6]) / 1024)
readiopsio = fullfio_data[7]
readpercent = float(fullfio_data[43].strip('%'))
# Read Submission Latency info
rdsubmissionmin = int(fullfio_data[9])
rdsubmissionmax = int(fullfio_data[10])
rdsubmissionmean = int(float(fullfio_data[11]))
rdsubmissiondeviation = int(float(fullfio_data[12]))
# Read Completion Latency info
rdcompletionmin = int(fullfio_data[13])
rdcompletionmax = int(fullfio_data[14])
rdcompletionmean = int(float(fullfio_data[15]))
rdcompletiondeviation = int(float(fullfio_data[16]))
# Read Total Latency info
rdtotalmin = int(fullfio_data[37])
rdtotalmax = int(fullfio_data[38])
rdtotalmean = int(float(fullfio_data[39]))
rdtotaldeviation = int(float(fullfio_data[40]))
# Write IO info
writetotalio = (int(fullfio_data[46]) / 1024)
writebandwidthio = (int(fullfio_data[47]) / 1024)
writeiopsio = fullfio_data[48]
writepercent = float(fullfio_data[84].strip('%'))
# Write Submission Latency info
wrsubmissionmin = int(fullfio_data[50])
wrsubmissionmax = int(fullfio_data[51])
wrsubmissionmean = int(float(fullfio_data[52]))
wrsubmissiondeviation = int(float(fullfio_data[53]))
# Write Completion Latency info
wrcompletionmin = int(fullfio_data[54])
wrcompletionmax = int(fullfio_data[55])
wrcompletionmean = int(float(fullfio_data[56]))
wrcompletiondeviation = int(float(fullfio_data[57]))
# Write Total Latency info
wrtotalmin = int(fullfio_data[78])
wrtotalmax = int(fullfio_data[79])
wrtotalmean = int(float(fullfio_data[80]))
wrtotaldeviation = int(float(fullfio_data[81]))
# IO depth distribution
iodepth01 = float(fullfio_data[92].strip('%'))
iodepth02 = float(fullfio_data[93].strip('%'))
iodepth04 = float(fullfio_data[94].strip('%'))
iodepth08 = float(fullfio_data[95].strip('%'))
iodepth16 = float(fullfio_data[96].strip('%'))
iodepth32 = float(fullfio_data[97].strip('%'))
iodepth64 = float(fullfio_data[98].strip('%'))
# Block size
# Bandwidth / IOPS
if readiopsio == "0":
readblocksize = float(0)
else:
readblocksize = round((int(readbandwidthio) / int(readiopsio)) * 1024, 1)
if writeiopsio == "0":
writeblocksize = float(0)
else:
writeblocksize = round((int(writebandwidthio) / int(writeiopsio)) * 1024, 1)
# Calculate percentage of read vs write IOPS
totaliops = int(readiopsio) + int(writeiopsio)
readiopspercentage = int(readiopsio) / int(totaliops)
writeiopspercentage = int(writeiopsio) / int(totaliops)
# CPU Usage
cpuuser = float(fullfio_data[87].strip('%'))
cpusystem = float(fullfio_data[88].strip('%'))
# print("Read IOPS % : "+str(readiopspercentage))
# print("Write IOPS % : "+str(writeiopspercentage))
current_time = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
print(current_time+" | Job Name: "+jobname+" | Read IOPS: "+readiopsio+" | Write IOPS: "+writeiopsio+" | Block(read/write): "+str(readblocksize)+" / "+str(writeblocksize), end='\r')
sys.stdout.flush()
json_body = [
{
"measurement": "FIO",
"tags": {
"runId": jobname,
"hostname": hostname
},
"time": current_time,
"fields": {
"Read_IOPS": int(readiopsio),
"Read_Percentage": readpercent,
"Read_Total_I/O_(MB)": readtotalio,
"Read_bandwidth_(MB/s)": readbandwidthio,
"Read_Latency_Submission_min": rdsubmissionmin,
"Read_Latency_Submission_max": rdsubmissionmax,
"Read_Latency_Submission_mean": rdsubmissionmean,
"Read_Latency_Submission_deviation": rdsubmissiondeviation,
"Read_Latency_Completion_min": rdcompletionmin,
"Read_Latency_Completion_max": rdcompletionmax,
"Read_Latency_Completion_mean": rdcompletionmean,
"Read_Latency_Completion_deviation": rdcompletiondeviation,
"Read_Latency_Total_min": rdtotalmin,
"Read_Latency_Total_max": rdtotalmax,
"Read_Latency_Total_mean": rdtotalmean,
"Read_Latency_Total_deviation": rdtotaldeviation,
"Write_IOPS": int(writeiopsio),
"Write_Percentage": writepercent,
"Write_Latency_Submission_min": wrsubmissionmin,
"Write_Latency_Submission_max": wrsubmissionmax,
"Write_Latency_Submission_mean": wrsubmissionmean,
"Write_Latency_Submission_deviation": wrsubmissiondeviation,
"Write_Latency_Completion_min": wrcompletionmin,
"Write_Latency_Completion_max": wrcompletionmax,
"Write_Latency_Completion_mean": wrcompletionmean,
"Write_Latency_Completion_deviation": wrcompletiondeviation,
"Write_Latency_Total_min": wrtotalmin,
"Write_Latency_Total_max": wrtotalmax,
"Write_Latency_Total_mean": wrtotalmean,
"Write_Latency_Total_deviation": wrtotaldeviation,
"Write_Total_I/O_(MB)": writetotalio,
"Write_bandwidth_(MB/s)": writebandwidthio,
"Read Block Size (KB)": readblocksize,
"Write Block Size (KB)": writeblocksize,
"CPU User": cpuuser,
"CPU System": cpusystem,
"IOdepthdist01": iodepth01,
"IOdepthdist02": iodepth02,
"IOdepthdist04": iodepth04,
"IOdepthdist08": iodepth08,
"IOdepthdist16": iodepth16,
"IOdepthdist32": iodepth32,
"IOdepthdist64": iodepth64,
"Read_IOPS_Percentage": readiopspercentage,
"Write_IOPS_Percentage": writeiopspercentage
}
}
]
client.write_points(json_body)
def main():
parser = argparse.ArgumentParser(
prog='fio_to_influxdb',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
The following options must be added to the fio command for this script to function
--status-interval=1
--minimal
Example usage:
fio instructionfile.fio --status-interval=1 --minimal | fio_to_influxdb.py
--
'''))
parser.add_argument("-ip", default='localhost',help="IP or DNS name of host running influxdb. Default is localhost", type=str)
parser.add_argument("-port", default='8086',help="Port used to connect to influxdb. Default is 8086", type=int)
parser.add_argument("-database", default='fio',help="Name of database created in influxdb. Default is fio", type=str)
parser.parse_args()
args = parser.parse_args()
print(\
"\nConnecting to influx database with the following parameters\n\
\tIP/DNS: "+args.ip+"\n\
\tPort: "+str(args.port)+"\n\
\tDatabase: "+args.database+"\n\
"
)
# Get OS host name
hostname = platform.uname()[1]
fioinput(args.ip, args.port, args.database, hostname)
print("\n\nJob complete\n")
main()
|
philcanman/fio-to-influxdb
|
fio_to_influxdb.py
|
fio_to_influxdb.py
|
py
| 9,043 |
python
|
en
|
code
| 1 |
github-code
|
6
|
75131855228
|
from datetime import datetime
import pymysql
import json as j
import flask as f
from flask import Flask, redirect
from flask import request
from flask import send_from_directory
from flaskext.mysql import MySQL
app=Flask(__name__, static_url_path="")
#Povezivanje sa bazom(parametri)
mysql=MySQL(cursorclass=pymysql.cursors.DictCursor)
app.config["MYSQL_DATABASE_USER"] = "root"
app.config["MYSQL_DATABASE_PASSWORD"] = "root"
app.config["MYSQL_DATABASE_DB"] = "racunarske_komponente_shop"
app.config["MYSQL_DATABASE_HOST"] = "localhost"
mysql.init_app(app)
#Korisnici
#Provera autenticnosti korisnika
@app.route("/korisnici", methods=["POST"])
def dobaviKorisnike():
prijava = request.get_json()
cursor = mysql.get_db().cursor()
cursor.execute("SELECT * FROM korisnici")
korisnici = cursor.fetchall()
for korisnik in korisnici:
if korisnik["korisnicko_ime"] == prijava["korisnicko_ime"]:
if korisnik["lozinka"] == prijava["lozinka"]:
return f.jsonify(korisnik)
return "greska"
#Registracija korisnika
@app.route("/dodajKorisnika", methods=["POST"])
def dodajKorisnika():
data = request.get_json()
db = mysql.get_db()
cursor = db.cursor()
cursor.execute("SELECT * FROM korisnici")
korisnici = cursor.fetchall()
for korisnik in korisnici:
if korisnik["korisnicko_ime"] == data["korisnicko_ime"]:
return "greska"
upit = '''INSERT INTO
korisnici(korisnicko_ime, lozinka, ime, prezime, adresa, email)
VALUES(%s, %s, %s, %s, %s, %s)'''
cursor.execute(upit, (data["korisnicko_ime"], data["lozinka"], data["ime"], data["prezime"],
data["adresa"], data["email"]))
db.commit()
return "uspesno"
#Izmena korisnika
@app.route("/izmeniKorisnika/<int:id>", methods=["POST"])
def izmeniKorisnika(id):
data = request.json
print(data)
db = mysql.get_db()
cursor = db.cursor()
upit = '''UPDATE korisnici SET korisnicko_ime=%s, lozinka=%s, ime=%s, prezime=%s, adresa=%s, email=%s WHERE id=%s'''
cursor.execute(upit, (data["korisnicko_ime"], data["lozinka"], data["ime"], data["prezime"], data["adresa"], data["email"], id))
db.commit()
return "uspesno"
#Komponente
#Dobavi komponente
@app.route("/komponente", methods=["GET"])
def dobaviKomponente():
cursor = mysql.get_db().cursor()
cursor.execute("SELECT * FROM komponente")
rows = cursor.fetchall()
return f.jsonify(rows)
#Racuni
#Dodaj racun
@app.route("/dodavanjeKupovine", methods=["POST"])
def dodajKupovinu():
data = request.get_json()
if(data["kolicinaKomponente"] == 0):
return "nemanastanju"
db = mysql.get_db()
cursor = db.cursor()
upit = '''UPDATE komponente SET kolicina = kolicina - 1 WHERE id=%s and kolicina > 0'''
cursor.execute(upit, (data["id_komponente"]))
db.commit()
upit = '''INSERT INTO kupovine(datum_vreme, id_korisnika, naziv_proizvoda, cena)
VALUES(%s, %s, %s, %s)'''
now = datetime.now()
formatted_date = now.strftime('%Y-%m-%d %H:%M:%S')
data["datum_vreme"] = formatted_date
cursor.execute(upit, (data["datum_vreme"], data["id_korisnika"], data["naziv_proizvoda"], data["cena"]))
db.commit()
return "uspesno"
#Pokretanje aplikacije
app.run("192.168.0.13",port=5000, debug=True)
|
haribate98/Android
|
FlaskZaProjekat/main.py
|
main.py
|
py
| 3,497 |
python
|
en
|
code
| 1 |
github-code
|
6
|
43956557150
|
from flask import flash
from db import db
# Feedback function for registered users
def feedback(user_id, message):
sql = "INSERT INTO messages (user_id, message) VALUES (:user_id, :message)"
db.session.execute(sql, {"user_id":user_id, "message":message})
db.session.commit()
flash("Kiitos palautteestasi")
# Feedback function for non-registered users
def anonymous_feedback(email, message):
sql = "INSERT INTO messages (email, message) VALUES (:email, :message)"
db.session.execute(sql, {"email":email, "message":message})
db.session.commit()
flash("Kiitos palautteestasi")
# returns all messages (feedback)
def get_messages():
sql = "SELECT COALESCE(U.username, '-'), M.message, COALESCE(M.email, '-') FROM messages M LEFT JOIN users U ON M.user_id=U.id"
result = db.session.execute(sql)
messages = result.fetchall()
db.session.commit()
return messages
|
asianomainen/tsoha-s2020-tuntivarausjarjestelma
|
messages.py
|
messages.py
|
py
| 909 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30353743561
|
# Author: Varun Hiremath <[email protected]>
# Enthought library imports.
from traits.api import Instance, Enum
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports
from mayavi.filters.filter_base import FilterBase
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `ExtractVectorComponents` class.
######################################################################
class ExtractVectorComponents(FilterBase):
""" This wraps the TVTK ExtractVectorComponents filter and allows
one to select any of the three components of an input vector data
attribute."""
# The version of this class. Used for persistence.
__version__ = 0
# The actual TVTK filter that this class manages.
filter = Instance(tvtk.ExtractVectorComponents, args=(), allow_none=False)
# The Vector Component to be extracted
component = Enum('x-component', 'y-component', 'z-component',
desc='component of the vector to be extracted')
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['vectors'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
view = View(Group(Item(name='component')),
resizable=True
)
######################################################################
# `Filter` interface.
######################################################################
def update_pipeline(self):
# Do nothing if there is no input.
inputs = self.inputs
if len(inputs) == 0 or len(inputs[0].outputs) == 0:
return
fil = self.filter
self.configure_connection(fil, inputs[0])
fil.update()
self._component_changed(self.component)
######################################################################
# Non-public interface.
######################################################################
def _component_changed(self, value):
# Obtain output from the TVTK ExtractVectorComponents filter
# corresponding to the selected vector component
if len(self.inputs) == 0 or len(self.inputs[0].outputs) == 0:
return
if value == 'x-component':
self._set_outputs([self.filter.vx_component])
elif value == 'y-component':
self._set_outputs([self.filter.vy_component])
elif value == 'z-component':
self._set_outputs([self.filter.vz_component])
self.render()
|
enthought/mayavi
|
mayavi/filters/extract_vector_components.py
|
extract_vector_components.py
|
py
| 2,728 |
python
|
en
|
code
| 1,177 |
github-code
|
6
|
30791829002
|
from scipy.fftpack import dct, idct
# implement 2D DCT
def dct2(a):
return dct(dct(a.T, norm='ortho').T, norm='ortho')
# implement 2D IDCT
def idct2(a):
return idct(idct(a.T, norm='ortho').T, norm='ortho')
import cv2
import numpy as np
import matplotlib.pylab as plt
# read lena RGB image and convert to grayscale
im =cv2.imread("G:/Classical Object Detection/1.jpg",0)
imF = dct2(im)
rows, cols = imF.shape
imF2=np.array([[0 for i in range(cols)] for j in range(rows)])
print(imF2.shape)
for i in range(imF.shape[0]//10):
for j in range(imF.shape[1]//10):
imF2[i][j]=imF[i][j]
im1 = idct2(imF2)
# check if the reconstructed image is nearly equal to the original image
print(np.allclose(im, im1))
# True
# plot original and reconstructed images with matplotlib.pylab
plt.gray()
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('original image', size=10)
plt.subplot(122), plt.imshow(im1), plt.axis('off'), plt.title('reconstructed image (DCT+IDCT)', size=10)
plt.show()
|
NadiaFaramarzi/ClassicalObjectDetection
|
Codes/DCT(discrete cosine transform).py
|
DCT(discrete cosine transform).py
|
py
| 1,048 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44593105346
|
# script to create scatter plot for mean intensity ranking in three emotion
# categories. refer to readme for more information about survey and ranking
# task.
# 18 November 2018, Pulkit Singh
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#----------------------------------------------------------------------------#
# Sadness Category
sad_labels = ['displeased', 'unhappy', 'sad', 'dejected', 'miserable',
'heartbroken', 'depressed']
sad_means = [1.38, 1.94, 2.56, 3.44, 4.22, 4.33, 4.38]
sad_x = list(range(1, len(sad_means) + 1))
# plotting means
sad_plt, sad_ax = plt.subplots(figsize=(9, 9))
sad_ax = sns.scatterplot(sad_x, sad_means, marker="")
# annotating data points
for i in range(len(sad_means)):
sad_ax.annotate(sad_labels[i], (sad_x[i], sad_means[i]))
plt.xlabel("Words in 'sadness' category")
plt.ylabel("Average intensity ranking \n")
plt.title("Average intensity ranking of 'sadness' category words")
plt.xlim((0, 10))
plt.ylim((1, 5))
plt.xticks([], [])
plt.savefig("IR_mean_sadness.png")
#----------------------------------------------------------------------------#
# Anger Category
anger_labels = ['irked', 'annoyed', 'irritated', 'mad', 'incensed',
'angry', 'infuriated', 'enraged']
anger_means = [1.83, 1.94, 2.44, 2.88, 3.22, 3.38, 4.55, 4.72]
anger_x = list(range(1, len(anger_means) + 1))
# plotting means
anger_plt, anger_ax = plt.subplots(figsize=(9, 9))
anger_ax = sns.scatterplot(anger_x, anger_means, marker="")
# annotating data points
for i in range(len(anger_means)):
anger_ax.annotate(anger_labels[i], (anger_x[i], anger_means[i]))
plt.xlabel("Words in 'anger' category")
plt.ylabel("Average intensity ranking \n")
plt.title("Average intensity ranking of 'anger' category words")
plt.xlim((0, 10))
plt.ylim((1, 5))
plt.xticks([], [])
plt.savefig("IR_mean_anger.png")
#----------------------------------------------------------------------------#
# Fear Category
fear_labels = ['afraid', 'scared', 'intimidated', 'alarmed', 'distressed',
'frightened', 'horrified', 'terrified']
fear_means = [2.27, 2.27, 2.33, 2.38, 2.66, 3.05, 4.38, 4.55]
fear_x = list(range(1, len(fear_means) + 1))
# plotting means
fear_plt, fear_ax = plt.subplots(figsize=(9, 9))
fear_ax = sns.scatterplot(fear_x, fear_means, marker="")
# annotating data points
for i in range(len(fear_means)):
fear_ax.annotate(fear_labels[i], (fear_x[i], fear_means[i]))
plt.xlabel("Words in 'fear' category")
plt.ylabel("Average intensity ranking \n")
plt.title("Average intensity ranking of 'fear' category words")
plt.xlim((0, 10))
plt.ylim((1, 5))
plt.xticks([], [])
plt.savefig("IR_mean_fear.png")
#----------------------------------------------------------------------------#
|
pulkitsingh/IW-Emoji-Intensity
|
Emotion Word Survey/Word Intensity Ranking/Scatter plots mean/intensityRanking_mean.py
|
intensityRanking_mean.py
|
py
| 2,743 |
python
|
en
|
code
| 0 |
github-code
|
6
|
654986927
|
#!/usr/bin/env python
import argparse
from .. import view_container
def tobool(inp):
return inp.lower() in ('y', '1', 'ok', 't')
parser = argparse.ArgumentParser(description='Display datasets in h5 or n5/zarr container.')
parser.add_argument('path', type=str, help='path to container')
parser.add_argument('--ndim', type=int, default=3,
help='expected number of dimensions')
parser.add_argument('--exclude_names', type=str, nargs='+', default=None,
help='names of datasets that will not be loaded')
parser.add_argument('--include_names', type=str, nargs='+', default=None,
help='names of datasets that will ONLY be loaded')
parser.add_argument('--load_into_memory', type=tobool, default='n',
help='whether to load all data into memory')
parser.add_argument('--n_threads', type=int, default=1,
help='number of threads used by z5py')
def main():
args = parser.parse_args()
view_container(args.path, args.ndim,
args.exclude_names, args.include_names,
args.load_into_memory, args.n_threads)
if __name__ == '__main__':
main()
|
constantinpape/heimdall
|
heimdall/scripts/view_container.py
|
view_container.py
|
py
| 1,184 |
python
|
en
|
code
| 20 |
github-code
|
6
|
21940354299
|
r"""Format notebooks using the TensorFlow docs style.
Install the tensorflow-docs package:
$ python3 -m pip install -U [--user] git+https://github.com/tensorflow/docs
Usage:
$ python3 -m tensorflow_docs.tools.nbfmt [options] notebook.ipynb [...]
See the TensorFlow notebook template:
https://github.com/tensorflow/docs/blob/master/tools/templates/notebook.ipynb
And the TensorFlow docs contributor guide:
https://www.tensorflow.org/community/contribute/docs
"""
import enum
import json
import os
import pathlib
import re
import sys
import textwrap
from typing import Any, Dict, List
from absl import app
from absl import flags
from tensorflow_docs.tools.nbfmt import notebook_utils
OSS = True
flags.DEFINE_integer(
"indent", 2, "Indention level for pretty-printed JSON.", lower_bound=0)
flags.DEFINE_bool("oss", None, "Use OSS formatting.")
flags.DEFINE_bool("remove_outputs", False,
"Remove output cells from the notebook")
flags.DEFINE_bool("test", False,
"Test if the notebook is formatted (useful for CI).")
FLAGS = flags.FLAGS
def clean_notebook(data: Dict[str, Any], nb_source: str, filepath: pathlib.Path,
remove_outputs: bool, indent: int) -> bytes:
"""The main notebook formatting logic.
Args:
data: object representing a parsed JSON notebook.
nb_source: JSON string of entire notebook contents.
filepath: String of notebook filepath passed to the command-line.
remove_outputs: Boolean to clear cell output.
indent: Integer indicating the number of spaces to indent the JSON.
Returns:
A byte string for the JSON formatted notebook.
"""
clean_root(data, filepath) # Top-level notebook fields.
clean_cells(data, nb_source, remove_outputs)
update_license_cells(data)
nbjson = json.dumps(data, sort_keys=True, ensure_ascii=False, indent=indent)
if not OSS:
# Serialization differences in environments.
str_replaces = {"<": r"\u003c", ">": r"\u003e", "&": r"\u0026"}
for str_from, str_to in str_replaces.items():
nbjson = nbjson.replace(str_from, str_to)
return (nbjson + "\n").encode("utf-8")
def clean_root(data: Dict[str, Any], filepath: pathlib.Path) -> None:
"""Deletes extra top-level notebook fields and metadata.
Jupyter format spec:
https://nbformat.readthedocs.io/en/latest/format_description.html
Args:
data: object representing a parsed JSON notebook.
filepath: String of notebook filepath passed to the command-line.
"""
# These top-level fields are required:
notebook_utils.del_entries_except(
data, keep=["cells", "metadata", "nbformat_minor", "nbformat"])
# All metadata is optional according to spec, but we use some of it.
notebook_utils.del_entries_except(
data["metadata"], keep=["accelerator", "colab", "kernelspec"])
metadata = data.get("metadata", {})
colab = metadata.get("colab", {})
# Set top-level notebook defaults.
data["nbformat"] = 4
data["nbformat_minor"] = 0
# Colab metadata
notebook_utils.del_entries_except(
colab, keep=["collapsed_sections", "name", "toc_visible"])
colab["name"] = os.path.basename(filepath)
colab["toc_visible"] = True
metadata["colab"] = colab
# Kernelspec metadata
kernelspec = metadata.get("kernelspec", {})
notebook_utils.del_entries_except(kernelspec, keep=["display_name", "name"])
supported_kernels = {"python3": "Python 3", "swift": "Swift"}
kernel_name = kernelspec.get("name")
if kernel_name not in supported_kernels:
kernel_name = "python3" # Notebook defaults to Python3 (same as Colab).
kernelspec["name"] = kernel_name
kernelspec["display_name"] = supported_kernels[kernel_name]
metadata["kernelspec"] = kernelspec
data["metadata"] = metadata
def _clean_code_cell(cell_data: Dict[str, Any], remove_outputs: bool) -> None:
"""Clean an individual code cell and optionally remove outputs.
Args:
cell_data: object representing a parsed JSON cell.
remove_outputs: Boolean to clear cell output.
"""
if remove_outputs:
cell_data["outputs"] = []
cell_data["execution_count"] = None
# Ensure outputs field exists since part of the nbformat spec.
if cell_data.get("outputs", None) is None:
cell_data["outputs"] = []
# Spec allows null or int (null is Colab default).
if cell_data.get("execution_count") == 0:
cell_data["execution_count"] = None
def _clean_metadata_colab(cell_metadata: Dict[str, Any],
remove_outputs: bool) -> None:
"""Clean up a cell's `metadata.colab` field.
Remove all `metadata.colab` contents except for `metadata.colab.resources`, if
present. The Colab resources are used to embed data within the notebook and
can be treated like output cells (kept unless explicitly removed).
Args:
cell_metadata: object representing the parsed JSON metadata from a cell.
remove_outputs: Boolean to clear cell output.
"""
colab = cell_metadata.pop("colab", {})
# If no outputs, just clear out `metadata.colab`.
if remove_outputs:
return
# Clear around `resources` if not empty. Otherwise, clear out `metata.colab`.
if colab.get("resources"):
notebook_utils.del_entries_except(colab, keep=["resources"])
cell_metadata["colab"] = colab
def clean_cells(data: Dict[str, Any], nb_source: str,
remove_outputs: bool) -> None:
"""Remove empty cells and clean code cells.
Args:
data: Object representing a parsed JSON notebook.
nb_source: JSON string of entire notebook contents.
remove_outputs: Boolean True to remove code cell outputs, False to keep.
"""
# Clear leading and trailing newlines.
for cell in data["cells"]:
cell_source = cell["source"]
while cell_source and cell_source[0] == "\n":
cell_source.pop(0)
while cell_source and cell_source[-1] == "\n":
cell_source.pop()
cell["source"] = cell_source
# Remove empty cells.
data["cells"] = [cell for cell in data["cells"] if any(cell["source"])]
# Clean cell metadata.
cell_count = 0
for cell in data["cells"]:
cell_count += 1
cell_metadata = cell.get("metadata", {})
if "id" not in cell_metadata:
cell_metadata["id"] = notebook_utils.generate_cell_id(
cell["source"], cell_count)
notebook_utils.del_entries_except(
cell_metadata, keep=["id", "cellView", "colab"])
_clean_metadata_colab(cell_metadata, remove_outputs)
cell["metadata"] = cell_metadata
# The presence of this field indicates that ouputs are already saved.
has_outputs = True if '"output_type"' in nb_source else False
for cell in data["cells"]:
if cell["cell_type"] == "code":
_clean_code_cell(cell, remove_outputs)
if has_outputs and remove_outputs:
notebook_utils.warn("Removed the existing output cells.")
def update_license_cells(data: Dict[str, Any]) -> None:
"""Format license cell to hide code pane from the Colab form.
Args:
data: object representing a parsed JSON notebook.
"""
# This pattern in Apache and MIT license boilerplate.
license_re = re.compile(r"#@title.*License")
for idx, cell in enumerate(data["cells"]):
src_text = "".join(cell["source"])
if license_re.search(src_text):
# Hide code pane from license form
metadata = cell.get("metadata", {})
metadata["cellView"] = "form"
data["cells"][idx]["metadata"] = metadata
class Status(enum.Enum):
PASS = 0
FAIL = 1
def format_nb(
*,
notebooks: List[str],
remove_outputs: bool = False,
indent: int = 2,
test: bool = False,
) -> Status:
"""Formats a notebook."""
found_error = False # Track errors for final return code.
test_fail_notebooks = []
paths, err_paths = notebook_utils.collect_notebook_paths(notebooks)
if err_paths:
found_error = True
test_fail_notebooks.extend(err_paths)
for path in paths:
print(f"Format notebook: {path}", file=sys.stderr)
data, source = notebook_utils.load_notebook(path)
if not data:
found_error = True
test_fail_notebooks.append(path)
continue
# Returns formatted JSON byte string.
expected_output = clean_notebook(data, source, path, remove_outputs, indent)
if test:
# Compare formatted contents with original file contents.
src_bytes = path.read_bytes()
if expected_output != src_bytes:
test_fail_notebooks.append(path)
else:
path.write_bytes(expected_output)
if test:
if test_fail_notebooks:
error_template = textwrap.dedent("""
[test] The following notebooks are not formatted:
{notebooks}
Please install `nbfmt` and format:
$ python3 -m pip install -U --user git+https://github.com/tensorflow/docs
$ python3 -m tensorflow_docs.tools.nbfmt notebook.ipynb
""")
notebooks = "\n".join([f"- {str(fp)}" for fp in test_fail_notebooks])
print(error_template.format(notebooks=notebooks), file=sys.stderr)
return Status.FAIL
else:
print("[test] Notebooks are formatted", file=sys.stderr)
return Status.PASS
if found_error:
return Status.FAIL
return Status.PASS
def main(argv):
if len(argv) <= 1:
raise app.UsageError("Missing arguments.")
if FLAGS.oss is not None:
global OSS
OSS = FLAGS.oss
exit_code = format_nb(
notebooks=argv[1:],
remove_outputs=FLAGS.remove_outputs,
indent=FLAGS.indent,
test=FLAGS.test)
if exit_code == Status.FAIL:
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
app.run(main)
|
tensorflow/docs
|
tools/tensorflow_docs/tools/nbfmt/__main__.py
|
__main__.py
|
py
| 9,543 |
python
|
en
|
code
| 5,917 |
github-code
|
6
|
71245917627
|
import time
import numpy as np
from testing import Ptot
import matplotlib.pyplot as plt
if __name__ == "__main__":
div = int(1e4)
# number of tests
Q = int(8e6)//div
# population size
N = int(40e6)//div
R = 0
Ip_arr = np.arange(500, 3000)
# FNR variations
Ptot_arr_save = []
Ptot_arr_save.append(Ip_arr)
FNR = 0.2
start_time = time.time()
Ptot_arr = [Ptot(Q, I = Ip, S = N - Ip - R, R = R, Q = Q, \
b = 2, FNR = FNR, FPR = 0.05, replacement = True)
for Ip in Ip_arr]
Ptot_arr_save.append(Ptot_arr)
np.savetxt("testing_data_replacement_true_CA.dat", np.c_[Ptot_arr_save].T)
print("--- %s seconds ---" % (time.time() - start_time))
print(Ptot_arr_save)
plt.figure()
plt.plot(Ptot_arr_save[0], Ptot_arr_save[1])
plt.show()
|
lubo93/disease-testing
|
sampling/testing_analytical_replacement_true_CA.py
|
testing_analytical_replacement_true_CA.py
|
py
| 881 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21313993250
|
class CryptoTool:
def DecypherFromTranslation(self, Message, CryptedKey, UncryptedKey):
Message = Message.upper()
CryptedKey = CryptedKey.upper()
UncryptedKey = UncryptedKey.upper()
Alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
translatedAlphabet = ""
result = ""
for letter in Alphabet:
letterIndex = UncryptedKey.find(letter)
if (letterIndex != -1):
translatedAlphabet += CryptedKey[letterIndex]
else:
translatedAlphabet += letter
for letter in Message:
letterIndex = translatedAlphabet.find(letter)
if (letterIndex != -1):
result += Alphabet[letterIndex]
else:
result += letter
print(result)
#print result
if __name__ == "__main__":
# To be able to execute binary version in Windows correctly freeze_support
# function is called before anything else in the main program
# http://docs.python.org/library/multiprocessing.html#multiprocessing.freeze_support
crypto = CryptoTool()
crypto.DecypherFromTranslation("NSTK KD KTTKSTK TUTD VRAK AK KKTK","KEXKNATNK RD-L I","SYREADIAE JX-F C")
crypto.DecypherFromTranslation("DUADK RN BKXTDKX UG NBXTR WDN","KEXKNATNK RD-L I","SYREADIAE JX-F C")
crypto.DecypherFromTranslation("HKKUTT AK BUDXK KUADTKT NAS KD XT","KEXKNATNK RD-L I","SYREADIAE JX-F C")
crypto.DecypherFromTranslation("XKBKTKF BKXK KUR. KKIXKD TNXAK","KEXKNATNK RD-L I","SYREADIAE JX-F C")
crypto.DecypherFromTranslation("UA VRUTTKF ANTK RPUHKIAX.","KEXKNATNK RD-L I","SYREADIAE JX-F C")
crypto.DecypherFromTranslation("AKIUABXKF RPNUXXKAX","KEXKNATNK RD-L I","SYREADIAE JX-F C")
crypto.DecypherFromTranslation("KEXKNATNK RD-L I","KEXKNATNK RD-L I","SYREADIAE JX-F C")
|
yenshin/test-proto-tuto
|
CryptoTool/cryptotool.py
|
cryptotool.py
|
py
| 1,847 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74147618108
|
# coding:utf-8
"""
Django administration setup
@author: Sébastien Renard <[email protected]>
@license: AGPL v3 or newer (http://www.gnu.org/licenses/agpl-3.0.html)
"""
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from billing.models import ClientBill, SupplierBill
from billing.forms import ClientBillForm, SupplierBillForm
from core.admin import ReturnToAppAdmin
class BillAdmin(ReturnToAppAdmin):
list_display = ["id", "bill_id", "lead", "state", "amount", "creation_date", "due_date", "payment_date", "comment"]
ordering = ("-creation_date",)
actions = None
list_filter = ["state", "creation_date", "due_date", "payment_date"]
search_fields = ["lead__name", "lead__client__organisation__name", "comment",
"lead__paying_authority__contact__name", "lead__paying_authority__company__name",
"lead__client__contact__name", "lead__client__organisation__company__name"]
class ClientBillAdmin(BillAdmin):
fieldsets = [
(_("Description"), {"fields": ["lead", "bill_id", "bill_file"]}),
(_("Amounts"), {"fields": ["amount", "vat", "amount_with_vat", ]}),
(_("Dates"), {"fields": ["creation_date", "due_date", "payment_date", ]}),
(_("State"), {"fields": ["state", "comment", ]}),
(_("Link with expenses"), {"fields": ["expenses", "expenses_with_vat", ]}),
]
class SupplierBillAdmin(BillAdmin):
search_fields = BillAdmin.search_fields + ["supplier__contact__name", "supplier__company__name"]
list_display = list(BillAdmin.list_display) # Copy list before changing it
list_display.insert(2, "supplier")
fieldsets = [
(_("Description"), {"fields": ["supplier", "lead", "bill_id", "supplier_bill_id", "bill_file"]}),
(_("Amounts"), {"fields": ["amount", "vat", "amount_with_vat", ]}),
(_("Dates"), {"fields": ["creation_date", "due_date", "payment_date", ]}),
(_("State"), {"fields": ["state", "comment", ]}),
(_("Link with expenses"), {"fields": ["expenses", "expenses_with_vat", ]}),
]
admin.site.register(ClientBill, ClientBillAdmin)
admin.site.register(SupplierBill, SupplierBillAdmin)
|
digitalfox/pydici
|
billing/admin.py
|
admin.py
|
py
| 2,334 |
python
|
en
|
code
| 122 |
github-code
|
6
|
16906648825
|
'''Пример использования функции filter.'''
from random import randint
lst = [randint(1, 10) for el in range(1, 10)]
print(f"Начальный список: {lst}")
# Вариант решения без функции filter
# Создаем новый список только с четными элементами списка lst
new_lst = []
for el in lst:
if el % 2 == 0:
new_lst.append(el)
print(f"Список четных элеметов без применения filter: {new_lst}")
# Новое решение
lst = list(filter(lambda x: not x % 2, lst))
print(f'Список четных элеметов c применением filter: {lst}')
|
AlexLep1n/Python
|
lesson-6/app_3.py
|
app_3.py
|
py
| 705 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
1544319758
|
from fastapi import FastAPI, UploadFile, Form,File
import cloudinary
import cloudinary.uploader
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Configure Cloudinary credentials
cloudinary.config(
cloud_name='moibit',
api_key='368643671927417',
api_secret='q2l37dcCA701JQlidDMoJaFtOY4'
)
@app.post('/upload')
async def upload_file(file: UploadFile = File(...), upload_name: str = Form(...)):
try:
# Upload the file to Cloudinary
result = cloudinary.uploader.upload(file.file, public_id=upload_name)
return f"File uploaded successfully. Public ID: {result['public_id']}"
except Exception as e:
return f"Error uploading file: {str(e)}", 500
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host='0.0.0.0', port=8000)
|
naman360/courtledger
|
scripts/main.py
|
main.py
|
py
| 961 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15206133090
|
"""Tests for utils."""
import unittest
import torch
from chemicalx.utils import segment_softmax
class TestPipeline(unittest.TestCase):
"""Test the utils."""
def test_segment_softmax(self):
"""Set up the test case with some data."""
logit = torch.FloatTensor([-0.5, -2.5, 0.5, 1.5])
number_of_segments = torch.LongTensor([2])
segmentation_index = torch.LongTensor([0, 0, 1, 1])
index = torch.LongTensor([0, 1, 2, 3])
temperature = torch.LongTensor([2, 2, 2, 2])
truth = torch.FloatTensor([0.7311, 0.2689, 0.3775, 0.6225])
segment_s = segment_softmax(logit, number_of_segments, segmentation_index, index, temperature)
difference = torch.sum(torch.abs(truth - segment_s))
assert difference < 0.001
|
AstraZeneca/chemicalx
|
tests/unit/test_utils.py
|
test_utils.py
|
py
| 789 |
python
|
en
|
code
| 672 |
github-code
|
6
|
1976029717
|
from django.db import models
from equipas.models import Equipa
# Create your models here.
class Campeonato(models.Model):
campeonato_id = models.AutoField(primary_key=True)
modalidade = models.ForeignKey('Modalidade', models.DO_NOTHING)
nome = models.CharField(max_length=100)
epoca = models.CharField(max_length=20)
datainicio = models.DateField(blank=True, null=True)
datafim = models.DateField(blank=True, null=True)
class Meta:
managed = False
db_table = 'campeonato'
class Modalidade(models.Model):
modalidade_id = models.AutoField(primary_key=True)
designacao = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'modalidade'
class Participante(models.Model):
equipa = models.OneToOneField(Equipa, models.DO_NOTHING, primary_key=True)
campeonato = models.ForeignKey(Campeonato, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'participante'
unique_together = (('equipa', 'campeonato'),)
|
OliveiraRP/django-webapp
|
src/webapp/campeonatos/models.py
|
models.py
|
py
| 1,039 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
31131850381
|
import jwt
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import requests
import time
import uvicorn
from fastapi.middleware.cors import CORSMiddleware
# GLOBALS
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
last_token_expiration = 0
# send request to apple for token revoke
# MODEL
revokeURL = "https://appleid.apple.com/auth/revoke"
teamID = "5NMJ2A479W"
filename = "AuthKey_73YATAJ963.p8"
keyID = "73YATAJ963"
tokenType = "access_token"
class RevokeTokenModel(BaseModel):
clientID: str
token: str
tokenTypeGint: str
def generate_tokenv2(bundleID):
with open(filename, "r") as f:
private_key = f.read()
team_id = teamID
client_id = bundleID
key_id = keyID
validity_minutes = 20
timestamp_now = int(time.time())
timestamp_exp = timestamp_now + (60 * validity_minutes)
# Assuming `last_token_expiration` is a class variable defined somewhere else
# cls.last_token_expiration = timestamp_exp
data = {
"iss": team_id,
"iat": timestamp_now,
"exp": timestamp_exp,
"aud": "https://appleid.apple.com",
"sub": client_id
}
token = jwt.encode(
payload=data,
key=private_key.encode('utf-8'),
algorithm="ES256",
headers={"kid": key_id}
)
return token
def generate_token(bundleID):
with open(filename, "r") as f:
private_key = f.read()
team_id = teamID
client_id = bundleID
key_id = keyID
validity_minutes = 20
timestamp_now = int(time.time())
timestamp_exp = timestamp_now + (60 * validity_minutes)
data = {
"iss": team_id,
"iat": timestamp_now,
"exp": timestamp_exp,
"aud": "https://appleid.apple.com",
"sub": client_id
}
token = jwt.encode(payload=data, key=private_key, algorithm="ES256", headers={"kid": key_id})
return token
def revoke_token_request(client_secret: str, clientID: str, tokenTypeGint: str, token: str):
data = {
"client_id": clientID,
"client_secret": client_secret,
"token": token,
"token_type_hint": tokenTypeGint
}
response = requests.post(revokeURL, data=data)
print(response)
if response.status_code == 200:
return True
else:
# You can raise an HTTPException here if you want to handle the error differently
print(f"\n\nRESPONSE -> {response.text}\n\n")
with open("logs.txt", "w+") as f:
f.write(response.text)
print("ERROR")
raise HTTPException(status_code=response.status_code, detail=response.text)
@app.post("/revoke")
def revokeToken(token: str, clientID: str):
client_secret = generate_token(bundleID=bundleID)
with open("log1.txt", "w+") as f:
f.write(client_secret)
revoked = revoke_token_request(token=token, clientID=clientID, tokenTypeGint=tokenType, client_secret=client_secret)
return {"token_revoked": revoked}
apple_token_revocation_url = 'https://appleid.apple.com/auth/token'
@app.post('/revoke-token')
async def revoke_token(token: str):
if not token:
raise HTTPException(status_code=400, detail='Token is missing.')
try:
response = requests.post(
apple_token_revocation_url,
data={
'token': token,
'client_id': 'your_client_id',
'client_secret': 'your_client_secret',
}
)
print(f"STATUS CODE: {response.status_code}")
if response.status_code == 200:
return {'success': True}
else:
raise HTTPException(status_code=400, detail='Token revocation failed.')
except requests.RequestException as e:
raise HTTPException(status_code=500, detail='Internal server error.')
# if __name__ == "__main__":
# uvicorn.run("app:app", host="0.0.0.0", port=8000, reload=True)
# token = "eyJraWQiOiJZdXlYb1kiLCJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJodHRwczovL2FwcGxlaWQuYXBwbGUuY29tIiwiYXVkIjoiY29tLmZvb3RiYWxsdGFya293LmNvbSIsImV4cCI6MTY5MTI2MTI0OCwiaWF0IjoxNjkxMTc0ODQ4LCJzdWIiOiIwMDE3NTguNmQ4ODZlMzQwNDkyNDA1ZThmODU0ZDkxZDRjZGMwNTguMTUyNyIsImNfaGFzaCI6InI2NjNWbTYxbTR0VkJfckxyRkZhSnciLCJlbWFpbCI6IjhmMnBza2t2dGhAcHJpdmF0ZXJlbGF5LmFwcGxlaWQuY29tIiwiZW1haWxfdmVyaWZpZWQiOiJ0cnVlIiwiaXNfcHJpdmF0ZV9lbWFpbCI6InRydWUiLCJhdXRoX3RpbWUiOjE2OTExNzQ4NDgsIm5vbmNlX3N1cHBvcnRlZCI6dHJ1ZX0.TkZtkIgljXOhCc1bp4YAx77yfZOBYz6iHDE3fxIi_l4oSjwOjM1xxUr9rkKEnOriJgHBChTop-CmGlM3zvh4taXwP_ZTb-xzQL9UeQtElM53f9l4w2j-PWfGrjxiX8Dyuyor-vbcxlKtUIcsVFIcDikHWQsI1iLYU40mn7x-399MdSFqyKqIarfk1P6TuBK3Fwf9EBYvPWrizXfFV1v5Kc-7p1mEbV3OChrMXEgLAvmhUWcFg95GKzhglbnHg2NOSWijeDfDFTuZC8EEPDplEhV86RzLi47jrksGReGQteVl8-LobLusceFrvRB-xAIWstEDl6al9SJ4dIAanGnBVA"
# bundleID = "com.footballtarkow.com"
# keyID = "73YATAJ963"
# client_secret = generate_tokenv2(bundleID)
# revoke_token_request(client_secret,bundleID,tokenType, token)
|
BoogieManN8/FootballTrainerHelper
|
main.py
|
main.py
|
py
| 5,139 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18132737377
|
# In this project, I'm going to desgin a user infterface for translating difference languages in real time
# Library Used: google trans, textblob
import googletrans
import textblob
import customtkinter
from tkinter import END
# Adding languages
language = googletrans.LANGUAGES
translator = googletrans.Translator()
lang_value = list(language.values())
lang_short = language.keys()
def Translate():
# Get the language to translate
to_language = to_language_menu.get()
# Perform the translation
from_text = from_language_input_box.get(1.0,END)
for idx, val in language.items():
if val == to_language:
lan_ = idx
words = translator.translate(from_text, dest=lan_)
# Show the translation
to_language_input_box.delete(0.0, END)
to_language_input_box.insert(0.0,words.text)
# 使用者介面
# 設定系統相關參數
customtkinter.set_appearance_mode("System")
customtkinter.set_default_color_theme("green")
# APP 整體框架
app = customtkinter.CTk()
app.title("Jay's Translator")
app.geometry("750x500")
# From-language selector
from_language_menu = customtkinter.CTkLabel(master=app,text="Please Enter any language:")
from_language_menu.grid(row=0, column=0,
padx=50, pady=20,
)
# To-language selector
to_language_menu = customtkinter.CTkOptionMenu(master=app,values=lang_value)
to_language_menu.grid(row=0, column=1,
padx=50, pady=20,
)
# to-language input box(Inputbox)
to_language_input_box = customtkinter.CTkTextbox(app, width=150, height=150)
to_language_input_box.grid(row=1, column=1,
padx=50, pady=20,
)
# from-language input box(Inputbox)
from_language_input_box = customtkinter.CTkTextbox(app, width=150, height=150)
from_language_input_box.grid(row=1, columns=1,
padx=50, pady=20,
)
# translate button
translate_button = customtkinter.CTkButton(app, text='Translate',command=Translate)
translate_button.grid(row=2, column=0,
padx=(180,0), pady=20, sticky='w',columnspan=3)
app.mainloop()
|
JayChen1060920909/Projects
|
Real Time Translation.py
|
Real Time Translation.py
|
py
| 2,191 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3844969159
|
#! /usr/bin/python2
import pefile
import os
import array
import math
import pickle
import time
# from sklearn.externals import joblib
import joblib
import sys
from .ModulePredict import data_extraction
from .XKendworld import pure_import
import pymongo
import hashlib
myclient = pymongo.MongoClient('DATABASE_URL')
mydb = myclient["DATABASE"]
mycol = mydb["COLLECTION"]
def checkpre(filepath):
clf = joblib.load(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'classifier/classifier.pkl'
))
with open('classifier/features.pkl', 'rb') as f:
features = pickle.load(f)
data = data_extraction(repathfile(filepath))
pe_features = list(map(lambda x:data[x], features))
res= clf.predict([pe_features])[0]
return (['Malicious', 'Legitimate'][res])
def hashcheck(filepath):
pe = pefile.PE(repathfile(filepath))
fp = open(repathfile(filepath),'rb')
data = fp.read()
return hashlib.md5(data).hexdigest()
def procedureXK001(filepath):
clf = joblib.load(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'classifierxk/classifierxk.pkl'
))
with open('classifierxk/featuresxk.pkl', 'rb') as f:
features = pickle.load(f)
data = pure_import(repathfile(filepath))
pe_features = list(map(lambda x:data[x], features))
res= clf.predict([pe_features])[0]
return (['Adware','Backdoor','Keylogger','Ransomware','Rootkit','Spyware','Trojan','Virus','Worm'][res])
def repathfile(filepath):
return os.path.join(os.path.dirname(os.path.realpath(__file__)) + filepath)
def savestorage(filepath):
return os.remove(repathfile(filepath))
def sample_extraction(filepath):
pe = pefile.PE(repathfile(filepath))
fp = open(repathfile(filepath),'rb')
data = fp.read()
y = []
y.append(len(data))
if pe.FILE_HEADER.Machine == 0x14C:
y.append("Architecture : 32 Bits Binary")
elif pe.FILE_HEADER.Machine == 0x8664:
y.append("Architecture : 64 Bits Binary")
y.append(hashlib.md5(data).hexdigest())
y.append(hashlib.sha1(data).hexdigest())
y.append(hashlib.sha256(data).hexdigest())
val = pe.FILE_HEADER.TimeDateStamp
y.append(time.asctime(time.gmtime(val)))
return y
def db_saving(filepath):
pe = pefile.PE(repathfile(filepath))
fp = open(repathfile(filepath),'rb')
data = fp.read()
dbstr= {}
dbstr["dataSize"] = len(data)
if pe.FILE_HEADER.Machine == 0x14C:
dbstr["arch"] = "32 Bits Binary"
elif pe.FILE_HEADER.Machine == 0x8664:
dbstr["arch"] = "64 Bits Binary"
dbstr["md5"] = hashlib.md5(data).hexdigest()
dbstr["sha1"] = hashlib.sha1(data).hexdigest()
dbstr["sha256"] = hashlib.sha256(data).hexdigest()
val = pe.FILE_HEADER.TimeDateStamp
dbstr["timestamp"] =time.asctime(time.gmtime(val))
if checkpre(filepath) == "Legitimate":
dbstr['status'] = "Legitimate"
else :
dbstr['status'] = "Malicious"
dbstr['type'] = procedureXK001(filepath)
x = mycol.insert_one(dbstr)
return dbstr
|
fadzniaidil/imawa
|
malwr/CheckingFile.py
|
CheckingFile.py
|
py
| 3,102 |
python
|
en
|
code
| 1 |
github-code
|
6
|
28900645911
|
from keras.models import *
from keras.layers import *
import keras
from dlblocks.keras_utils import allow_growth , showKerasModel
allow_growth()
from dlblocks.pyutils import env_arg
import tensorflow as tf
from Utils import Trainer
class GiretTwoCell(keras.layers.Layer):
def __init__(self, cell_1 , cell_2 , nHidden , **kwargs):
self.cell_1 = cell_1
self.cell_2 = cell_2
self.nHidden = nHidden
self.state_size = [nHidden,nHidden]
super(GiretTwoCell, self).__init__(**kwargs)
def build(self, input_shape):
nHidden = self.nHidden
input_shape_n = ( input_shape[0] , input_shape[1]- 2 )
# print "pp", input_shape_n
# self.cell_1.build(input_shape_n)
# self.cell_2.build(input_shape_n)
self._trainable_weights += ( self.cell_1.trainable_weights )
self._trainable_weights += ( self.cell_2.trainable_weights )
self._non_trainable_weights += ( self.cell_1.non_trainable_weights )
self._non_trainable_weights += ( self.cell_2.non_trainable_weights )
self.built = True
def call(self, inputs, states):
nHidden = self.nHidden
gate_val_1 = inputs[ : , 0:1]
gate_val_2 = inputs[ : , 1:2]
inputs = inputs[ : , 2: ]
gate_val_1 = K.repeat_elements(gate_val_1 , nHidden , -1 ) # shape # bs , hidden
gate_val_2 = K.repeat_elements(gate_val_2 , nHidden , -1 ) # shape # bs , hidden
_ , [h1 , c1 ] = self.cell_1.call( inputs , states )
_ , [h2 , c2 ] = self.cell_2.call( inputs , states )
h = gate_val_1*h1 + gate_val_2*h2 + (1 - gate_val_1 - gate_val_2 )*states[0]
c = gate_val_1*c1 + gate_val_2*c2 + (1 - gate_val_1 - gate_val_2 )*states[1]
return h, [h , c ]
class GIRNet_SeqLab(Trainer):
def build_model(self):
config = self.config
embed = Embedding( self.config['vocab_size'] , self.config['embed_dim'] , mask_zero=True)
rnn_hi = LSTM( self.config['nHidden'] , return_sequences=True )
rnn_en = LSTM( self.config['nHidden'] , return_sequences=True )
# en
inp_en = Input(( self.config['sent_len'] , ))
x = embed(inp_en)
x = rnn_en( x )
out_en = TimeDistributed(Dense( config['n_class_en'] , activation='softmax'))(x)
# hi
inp_hi = Input(( self.config['sent_len'] , ))
x = embed(inp_hi)
x = rnn_hi( x )
out_hi = TimeDistributed(Dense( config['n_class_hi'] , activation='softmax'))(x)
cell_combined = GiretTwoCell( rnn_hi.cell , rnn_en.cell , self.config['nHidden'] )
inp_enhi = Input(( self.config['sent_len'] , ))
x = embed(inp_enhi )
x_att = x
x_att = Bidirectional(LSTM(32 , return_sequences=True))( x )
bider_h = x_att
x_att = TimeDistributed( Dense(3, activation='softmax') )(x_att)
x_att = Lambda(lambda x : x[... , 1: ])(x_att)
x = Concatenate(-1)([x_att , x ])
x = RNN(cell_combined , return_sequences=True )( x )
out_enhi = TimeDistributed(Dense( self.config['n_class_enhi'] , activation='softmax'))(x)
self.model = Model( [inp_hi , inp_en , inp_enhi ] , [ out_hi , out_en , out_enhi ] )
Trainer.build_model( self )
# jjj
"""
config = {}
config['epochs'] = 4
config['dataset'] = "/tmp/postag_prepped.h5"
config['exp_name'] = 'pos_girnet_1l'
config['embed_dim'] = 50
config['vocab_size'] = 30003
config['nHidden'] = 100
config['sent_len'] = 150
config['n_class_en'] = 45
config['n_class_hi'] = 25
config['n_class_enhi'] = 19
model = GIRNet_SeqLab( exp_location="./ttt" , config_args = config )
model.train()
"""
|
divamgupta/mtl_girnet
|
sequence_labeling/girnet.py
|
girnet.py
|
py
| 3,916 |
python
|
en
|
code
| 6 |
github-code
|
6
|
17396933652
|
from django.http import HttpResponse
def hello(req):
return HttpResponse('Hello, World !!')
def hello_html(req):
src = []
src.append('<!doctype html>')
src.append('<html>')
src.append('<head>')
src.append('<meta charset="utf-8">')
src.append('<title>Hello, World</title>')
src.append('</head>')
src.append('<body>')
src.append('<h1 style="color:#F4A346;">Hello, World!!</h1>')
src.append('</body>')
src.append('</html>')
return HttpResponse('\n'.join(src))
from django.shortcuts import render
from django.views.generic import TemplateView
class HelloTemplateView(TemplateView):
template_name = 'hello.html'
def get(self, request, *args, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
return render(self.request, self.template_name, context)
|
RyoJ/hellopython
|
hello/views.py
|
views.py
|
py
| 853 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.