seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
39430188225
|
import json
import pandas as pd
import yfinance as yf
import pytz
from datetime import datetime, timedelta
import time
# Read forex pairs from JSON file
with open('forex_pairs.json', 'r') as file:
forex_pairs = json.load(file)
# Define the time frame and time zone
timeframe = '1h'
timezone = 'Africa/Nairobi'
# Define the number of periods for Moving Average calculation
period = 500
# Get the current time in Kenya time zone
kenya_tz = pytz.timezone(timezone)
current_time = datetime.now(kenya_tz)
# Define the start and end times for data retrieval
end_time = current_time
start_time = current_time - timedelta(hours=period)
# Save the signals to a file
filename = 'MA_signals.txt'
with open(filename, 'w') as file:
file.write(f"Today: {end_time}\n\n")
# Generate MA signals for each forex pair
for pair in forex_pairs:
symbol = pair + '=X'
retry_count = 0
success = False
data = None
while data is None:
try:
# Download historical price data using yfinance
data = yf.download(symbol, start=start_time, end=end_time, interval=timeframe)
except Exception as e:
print("Error occurred:", e)
print("Retrying...")
if not data.empty and len(data) >= 1:
# Extract the 'Close' prices from the downloaded data
close_prices = data['Close'].tolist()
# Define the window sizes for the moving averages
window_short = 20 # Short-term moving average window size
window_long = 50 # Long-term moving average window size
# Calculate the moving averages
moving_avg_short = sum(close_prices[-window_short:]) / window_short
moving_avg_long = sum(close_prices[-window_long:]) / window_long
# Get the latest closing price
latest_price = close_prices[-1]
entry_price = latest_price # Replace with your actual entry price
lot_size = 0.01 # Replace with your actual lot size
# Define risk-reward ratios
take_profit_ratio = 0.002 # 0.2%
stop_loss_ratio = 0.001 # 0.1%
# Determine the buy, sell, or hold signal based on moving average crossovers and price relationship
if moving_avg_short > moving_avg_long and latest_price > moving_avg_short:
signal = "Buy"
elif moving_avg_short < moving_avg_long and latest_price < moving_avg_short:
signal = "Sell"
else:
signal = "Hold"
if signal == "Sell":
current_price = latest_price
take_profit = current_price - (current_price * take_profit_ratio)
elif signal == "Buy":
current_price = latest_price
take_profit = current_price + (current_price * take_profit_ratio)
else:
take_profit = 0.0
rounded_number = round(take_profit, 6)
file.write(f"MA Signals for {pair} ({timeframe} timeframe): {signal} take_profit: {rounded_number}\n")
else:
print("No data available to generate a signal.")
|
Nurain313/N1l8w5f9s2g5
|
Trash/ma.py
|
ma.py
|
py
| 3,229 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27577951391
|
from django.shortcuts import get_object_or_404, render, redirect, HttpResponseRedirect
from django.views.generic import TemplateView, UpdateView
from django.contrib.auth import get_user_model
from .models import Message
from django.urls import reverse
from django.contrib import messages
from review.models import Review
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .forms import EditProfileForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
from django.template import RequestContext
def handler404(request, *args, **argv):
response = render('404.html', {}, context_instance=RequestContext(request))
response.status_code = 404
return response
class HomePageView(TemplateView):
template_name = 'home.html'
def MessageView(request, username):
User = get_user_model()
message_user = get_object_or_404(User, username=username)
all_messages = message_user.messages.all()
if request.method == 'POST':
message = request.POST['message']
user_new_message = Message.objects.create(
customuser=message_user, text=message)
user_new_message.save()
messages.success(
request, 'Compliments dropped successfully, Create your own Account below to receive and give Anonymous compliments')
return redirect('account_signup')
return render(request, 'message.html', {'all_messages': all_messages,
'message_user': message_user,
})
class UserProfile(LoginRequiredMixin, TemplateView):
template_name = 'user_profile.html'
CustomUser = get_user_model()
@login_required(login_url='account_login')
def EditProfile(request, username):
user = get_object_or_404(get_user_model(), username=username)
if request.method == 'POST':
form = EditProfileForm(request.POST, instance=user)
if form.is_valid():
form.save()
# user_profile = user.get_absolute_url()
user_profile = reverse('user_profile')
return HttpResponseRedirect(user_profile)
form = EditProfileForm(instance=user)
return render(request, 'edit_profile.html', {'form': form})
@login_required(login_url='account_login')
def delete_message(request, m_id):
message_instance = get_object_or_404(Message, id=m_id)
message_instance_user = message_instance.customuser
if message_instance_user == request.user:
message_instance.delete()
return redirect(message_instance_user.get_absolute_url())
else:
return redirect('home')
@login_required(login_url='account_login')
def spam_message(request, m_id):
# get the Message filtering by the id provided in the url as *args
message_instance = get_object_or_404(Message, id=m_id)
# the person who created the message
message_instance_user = message_instance.customuser
if message_instance_user == request.user:
message_instance.text = 'The owner has marked this message as a spam.'
message_instance.save()
return redirect(message_instance_user.get_absolute_url())
else:
if request.user.is_authenticated:
url = 'home'
else:
url = 'account_login'
return redirect(url)
class AbouUs(TemplateView):
template_name = 'about.html'
class ContactUs(TemplateView):
template_name = 'contact.html'
def ReviewView(request):
reviews = Review.objects.all()
template_name = "review.html"
paginator = Paginator(reviews, 5)
page_number = request.GET.get('page')
try:
page_obj = paginator.get_page(page_number)
except PageNotAnInteger:
page_obj = paginator.page(page_number)
except EmptyPage:
page_obj = paginator.page(paginator.num_pages)
return render(request, template_name, {'page_obj': page_obj})
@login_required(login_url='account_login')
def SettingsView(request):
return render(request, 'settings.html', {})
def AddReview(request):
if request.method == 'POST':
name = request.POST['name']
review = request.POST['review']
occupation = request.POST['occupation']
new_review = Review.objects.create(
name=name, review=review, occupation=occupation)
new_review.save()
messages.success(request, 'Review submitted successfully')
return redirect('review')
return render(request, 'add_review.html', {})
|
Afeez1131/Anonymous-v1
|
anonymous/views.py
|
views.py
|
py
| 4,560 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23019499020
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import tensorflow as tf
TOWER_NAME = 'tower'
def sparsity_hook_forward(x_list):
"""Helper to create summaries of sparsity.
Creates a summary that measures the sparsity of a tensor.
Args:
x_list: a list of tensor
Returns:
tensor and sparsity tuble
"""
retrieve_list = []
for x in x_list:
tensor_name = x.op.name
sparsity = tf.nn.zero_fraction(x)
tf.summary.scalar(tensor_name + '/sparsity', sparsity)
retrieve_list.append((x, sparsity))
return retrieve_list
def sparsity_hook_backward(loss, x_list):
"""Helper to create summaries for gradients of intermediate results in
backward pass.
Creates a summary that measures the sparsity of gradients of intermediate
results in backward pass.
Args:
loss: the loss
x_list: a list of Tensors
Returns:
a list of tensor and sparsity tuple
"""
gradient_list = tf.gradients(loss, x_list)
grad_retrieve_list = []
for g in gradient_list:
tensor_name = g.op.name
sparsity = tf.nn.zero_fraction(g)
tf.summary.scalar(tensor_name + '/sparsity', sparsity)
grad_retrieve_list.append((g, sparsity))
return grad_retrieve_list
def get_non_zero_index(a, shape):
raw_index = np.where(a != 0)
n_dim = len(raw_index)
assert n_dim == 4 or n_dim == 2
n_data = len(raw_index[0])
index_list = []
if n_dim == 4:
size_chw = shape[1].value * shape[2].value * shape[3].value
size_hw = shape[2].value * shape[3].value
size_w = shape[3].value
elif n_dim == 2:
size_c = shape[1].value
for i in range(n_data):
if n_dim == 4:
index = raw_index[0][i] * size_chw + raw_index[1][i] * size_hw + raw_index[2][i] * size_w + raw_index[3][i]
elif n_dim == 2:
index = raw_index[0][i] * size_c + raw_index[1][i]
index_list.append(index)
return index_list
def calc_index_diff_percentage(index_list, ref_index_list, sparsity, all_counts):
percentage = 1.0
n_idx = float(len(index_list))
n_ref_idx = float(len(ref_index_list))
#print("Current non-zero data size: ", len(index_list))
#print("Previous non-zero data size: ", len(ref_index_list))
all_index = np.concatenate((index_list, ref_index_list), axis=0)
#print("Merged non-zero data size: ", len(all_index))
#print("Unique non-zero data size: ", len(np.unique(all_index, axis=0)))
unchanged_counts = len(all_index) - len(np.unique(all_index, axis=0))
diff_counts = (n_idx - unchanged_counts) + (n_ref_idx - unchanged_counts)
#print("Differenct counts: ", diff_counts)
percentage = float(diff_counts) / all_counts
return percentage
def feature_map_extraction(tensor, data_format, batch_index, channel_index):
# The feature map returned will be represented in a context of matrix
# sparsity (1 or 0), in which 1 means non-zero value, 0 means zero
n_dim = len(tensor.shape)
if n_dim == 4:
if data_format == "NCHW":
extracted_subarray = tensor[batch_index,channel_index,:,:]
elif data_format == "NHWC":
extracted_subarray = tensor[batch_index,:,:,channel_index]
if n_dim == 2:
extracted_subarray = tensor
extracted_subarray[np.nonzero(extracted_subarray)] = 1
return extracted_subarray
def zero_block_ratio_matrix(a, shape, block_size):
'''
Args:
a: a numpy n-d array (tensor)
shape: the tensor shape
Return:
The count of zero blocks
'''
n_dim = len(shape)
if n_dim == 2:
n_row = shape[0].value
n_col = shape[1].value
matrix = a
elif n_dim == 4:
n_row = shape[0].value
n_col = shape[1].value * shape[2].value * shape[3].value
matrix = a.reshape((n_row, n_col))
n_block_row = int(n_row + block_size - 1) / int(block_size)
n_block_col = int(n_col + block_size - 1) / int(block_size)
n_blocks = n_block_row * n_block_col
if n_row % block_size != 0:
n_padded_zeros_in_row = block_size - n_block_row % block_size
else:
n_padded_zeros_in_row = 0
if n_col % block_size != 0:
n_padded_zeros_in_col = block_size - n_block_col % block_size
else:
n_padded_zeros_in_col = 0
if n_padded_zeros_in_row != 0 or n_padded_zeros_in_col != 0:
padded_zeros_in_row = np.zeros((n_padded_zeros_in_row, n_col))
padded_zeros_in_col = np.zeros((n_row+n_padded_zeros_in_row, n_padded_zeros_in_col))
padded_a = np.concatenate((np.concatenate((matrix, padded_zeros_in_row), axis=0),\
padded_zeros_in_col), axis=1)
else:
padded_a = matrix
# Reshape the tensor column-wise first
reshaped_a = padded_a.reshape((n_block_row, block_size, n_col))
# Sum the elements within each block column-wise
summed_a_row = np.sum(reshaped_a, axis=1)
# Reshape the summed array to a new tensor row-wise
reshaped_a = summed_a_row.reshape((n_block_row, n_block_col, block_size))
# Sum the elements within each block row-wise
summed_a = np.sum(reshaped_a, axis=2)
zero_element_indices = np.where(summed_a == 0)
zero_counts = len(zero_element_indices[0])
return float(zero_counts)/float(summed_a.size)
|
shidong-ai/sparsity_analysis
|
imagenet/sparsity_util.py
|
sparsity_util.py
|
py
| 5,059 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71483369148
|
"""
Created by kevin-desktop, on the 18/02/2023
ADD sentiment columns to a sample Excel sheet.
"""
import numpy as np
import pandas as pd
import tqdm
from asba_model import run_models
path = "data/marco_sample.pkl"
df = pd.read_pickle(path)
dic = {}
max_nb_terms = 0
for row in tqdm.tqdm(df.itertuples(name=None)):
ind = row[0]
ti = row[1]
ab = row[4]
dtkey = row[13].split(", ")
sentiments = run_models(ab, dtkey)
if max_nb_terms < len(sentiments):
max_nb_terms = len(sentiments)
dic[ind] = sentiments
df_sent = pd.DataFrame.from_dict(dic, orient="index")
df_sent.rename(columns={i: f'Term{i + 1}' for i in range(0, max_nb_terms)}, inplace=True)
df = pd.concat([df, df_sent], axis=1)
lst_columns_w_term = [col for col in df.columns if 'Term' in col]
df.fillna('0', inplace=True)
df.to_excel("data/sample_marco.xlsx")
|
KnuxV/SentA
|
add_sentiment_to_dataframe.py
|
add_sentiment_to_dataframe.py
|
py
| 868 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71236766909
|
import struct
from enum import Enum
import typing as ty
from mate.net.nao_data import Data, DebugValue, DebugImage
NO_SUBSCRIBE_KEY = "none"
K = ty.TypeVar('K')
def split(predicate: ty.Callable[[K], bool], dictionary: ty.Dict[K, dict]):
dict1 = {}
dict2 = {}
for key in dictionary:
if predicate(key):
dict1[key] = dictionary[key]
else:
dict2[key] = dictionary[key]
return dict1, dict2
class DebugMsgType(Enum):
subscribe = 0
unsubscribe = 1
update = 2
request_list = 3
list = 4
subscribe_bulk = 5
image = 6
class ConfigMsgType(Enum):
set = 0 # Sets a given key to a given value (at runtime)
get_mounts = 1 # ask for send_mounts, containing all mounts
get_keys = 2 # ask for send_keys of a given key
save = 3 # saves the current config
send_keys = 4 # containing key, value
send_mounts = 5 # containing filename, key
class ConnectionStatusType(Enum):
disconnected = 0
connected = 1
connection_lost = 2
connection_refused = 3
class Message:
def __init__(self,
type: DebugMsgType,
body: str = "",
length: int = None,
version: int = 1):
self.type = type
self.body = body
self.length = length if length is not None else max(0, len(body))
self.version = version
def __str__(self):
return "{}|v{}|{}|{}|{}".format(
type(self).__name__, self.version, self.type.name, self.length,
self.body)
class ConfigMessage(Message):
def __init__(self,
type: DebugMsgType,
body: str = "",
length: int = None,
version: int = 1):
super(ConfigMessage, self).__init__(type, body, length, version)
@staticmethod
def header_from_bytes(msg):
if len(msg) >= 8:
fmt = "<4sBBH"
(msg_head, raw_version, raw_type, msg_size) = struct.unpack(
fmt, msg[:8])
return (msg_head, raw_version, ConfigMsgType(raw_type), msg_size)
def toBytes(self):
msg_format = "<4sBBH{}s".format(len(self.body))
return struct.pack(msg_format, b'CONF', self.version, self.type.value,
self.length, self.body.encode())
class DebugMessage(Message):
def __init__(self,
type: DebugMsgType,
body: str = "",
length: int = None,
version: int = 1):
super(DebugMessage, self).__init__(type, body, length, version)
def toBytes(self):
fmt = "<4sbbxxIxxxx{}s".format(self.length)
return struct.pack(fmt, b'DMSG', self.version, self.type.value,
self.length, self.body.encode())
@staticmethod
def header_from_bytes(msg):
if len(msg) >= 16:
fmt = "<4sbbxxIxxxx"
(msg_head, raw_version, raw_type, msg_size) = struct.unpack(
fmt, msg[:16])
return (msg_head, raw_version, DebugMsgType(raw_type), msg_size)
@staticmethod
def to_body(type, msg):
if type == DebugMsgType.image:
return msg
else:
return msg.decode(errors='ignore')
@staticmethod
def get_image(body):
fmt = "<HHH"
(width, height, key_length) = struct.unpack(fmt, body[:6])
return body[6:6 + key_length].decode(), width, height, body[
6 + key_length:]
@staticmethod
def parse_data(d: dict) -> Data:
if d.get("isImage", False):
return DebugImage(
d["key"],
d.get("width", 0),
d.get("height", 0),
d.get("value", b'')
)
else:
return DebugValue(
d["key"],
d.get("timestamp", 0),
d.get("value", 0)
)
|
humanoid-robotics-htl-leonding/robo-ducks-core
|
tools/mate/mate/net/utils.py
|
utils.py
|
py
| 3,976 |
python
|
en
|
code
| 5 |
github-code
|
6
|
71396397949
|
# Import packages
import gpxpy
import numpy as np
# Read gpx-file
gpxFile = "yourfile.gpx"
gpx_file = open(gpxFile, 'r')
gpx = gpxpy.parse(gpx_file)
# Calculate speeds between points
speed = []
for track in gpx.tracks:
for segment in track.segments:
for point_no, point in enumerate(segment.points):
speed.append(point.speed_between(segment.points[point_no - 1]))
# Upper limit is defined as 3x the 75% quantile, this can be tweaked according to the GPS errors encountered
upperLimit = 3*(np.quantile(speed, q = 0.75))
# Find elements above the threshold
indices = [
index for index, item in enumerate(speed)
if item > upperLimit
]
pointsRemoved = 0
while len(indices) > 0:
gpxpy.gpx.GPXTrackSegment.remove_point(gpx,indices[0])
pointsRemoved = pointsRemoved + 1
# Calculate speeds between points
speed = []
for track in gpx.tracks:
for segment in track.segments:
for point_no, point in enumerate(segment.points):
speed.append(point.speed_between(segment.points[point_no - 1]))
indices = [
index for index, item in enumerate(speed)
if item > upperLimit
]
print(pointsRemoved)
# Write the corrected GPX file
outputFile = gpxFile[:-4] + "_corrected.gpx"
with open(outputFile, "w") as f:
f.write( gpx.to_xml())
|
Haukiii/simpleGpxRunCorrector
|
simpleGPXrunCorrector.py
|
simpleGPXrunCorrector.py
|
py
| 1,329 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18485867132
|
# Read values PMS5003 and return as dict
def read_pms5003(pms5003):
values = {}
try:
pm_values = pms5003.read() # int
values["pm1"] = pm_values.pm_ug_per_m3(1)
values["pm25"] = pm_values.pm_ug_per_m3(2.5)
values["pm10"] = pm_values.pm_ug_per_m3(10)
except ReadTimeoutError:
pms5003.reset()
pm_values = pms5003.read()
values["pm1"] = pm_values.pm_ug_per_m3(1)
values["pm25"] = pm_values.pm_ug_per_m3(2.5)
values["pm10"] = pm_values.pm_ug_per_m3(10)
return values
# Read values from BME280 and return as dict
def read_bme280(bme280):
# Compensation factor for temperature
comp_factor = 2.25
values = {}
cpu_temp = get_cpu_temperature()
raw_temp = bme280.get_temperature() # float
comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)
values["temperature"] = int(comp_temp)
values["pressure"] = round(
int(bme280.get_pressure() * 100), -1
) # round to nearest 10
values["humidity"] = int(bme280.get_humidity())
data = gas.read_all()
values["oxidised"] = int(data.oxidising / 1000)
values["reduced"] = int(data.reducing / 1000)
values["nh3"] = int(data.nh3 / 1000)
values["lux"] = int(ltr559.get_lux())
return values
disp = ST7735.ST7735(
port=0,
cs=ST7735.BG_SPI_CS_FRONT,
dc=9,
backlight=12,
rotation=90)
disp.begin()
img = Image.new('RGB', (disp.width, disp.height), color=(0, 0, 0))
draw = ImageDraw.Draw(img)
while True:
low, mid, high, amp = noise.get_noise_profile()
low *= 128
mid *= 128
high *= 128
amp *= 64
img2 = img.copy()
draw.rectangle((0, 0, disp.width, disp.height), (0, 0, 0))
img.paste(img2, (1, 0))
draw.line((0, 0, 0, amp), fill=(int(low), int(mid), int(high)))
disp.display(img)
|
BurnoutDV/AirWatcher
|
snippets.py
|
snippets.py
|
py
| 1,836 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30478183030
|
# Flatten a Linked List
class Node:
def __init__(self, head, bottom): # child and down are same
self.head = head
self.next = None
self.bottom = bottom
def flatten(head):
if head is None:
return None
temp = None
tail = head
while tail.next is not None:
tail = tail.next
curr = head
while curr is not tail:
if curr.bottom is not None:
tail.next = curr.bottom
temp = curr.bottom
while temp.next is not None:
temp = temp.next
tail = temp
curr = curr.next
return head
|
prabhat-gp/GFG
|
Linked List/Love Babbar/23_flatten_ll.py
|
23_flatten_ll.py
|
py
| 628 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5560963127
|
""" this is a mixture of the best #free twitter sentimentanalysis modules on github.
i took the most usable codes and mixed them into one because all of them
where for a linguistical search not usable and did not show a retweet or a full tweet
no output as csv, only few informations of a tweet, switching language
or even to compare linguistic features in tweets of two different langauges and etc. etc ...
special and many many thanks to https://github.com/vprusso/youtube_tutorials who showed on his
page a tutorial on how to do a sentimentanalysis with python
i did this for users with not much skills and linguistical background to help them to get a corpus of twitterdata
and to show them how to do a comparison between sentence based vs document based sentimentanalysis
credits to all AVAILABLE FREE AND SIMPLE sentimentanalysis programms (dec. 2019) on github.
many thanks to everybody and of course to github for making this exchange and usage possible!
cemre koc (Goethe University, Frankfurt) Python3.7
"""
from textblob import TextBlob #Sentimentlexikon FOR GERMAN (TEXTBLOB_DE import textblob_de
import re #modul for regular expressions
from tweepy import API #Twitter API modul for more info: look tweepy doc please!
from tweepy import Cursor
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import tweepy #usage of diffrent feautres of my programm
import sys #only if wanted
import csv ##only if wanted (see rest of programm)
import pandas as pd #pandas for illustration
import authentification #access to twitter
import numpy as np #collection of tweets via numpy
import matplotlib.pyplot as plt #if needed (see below for ploting)
import numpy
#output screen (if you use pycharm for full screen view)
#only if needed
pd.set_option('display.max_rows', 1000000000000)
pd.set_option('display.max_columns', 1000000000)
pd.set_option('display.width', 100000000000)
pd.set_option('display.float_format', '{:20,.2f}'.format)
#for maximal OUTPUT!
#pd.set_option('display.max_colwidth', -1)
#TWITTER AUTHENTIFICTION (Twitter development)
#please fill for that the identification.py with your credentials!
#you need a twitter developer account for getting these informations
class TwitterAuthenticator():
def authenticate_twitter_app(self):
auth = OAuthHandler(authentification.CONSUMER_KEY, authentification.CONSUMER_SECRET)
auth.set_access_token(authentification.ACCESS_TOKEN, authentification.ACCESS_TOKEN_SECRET)
return auth
#TWITTER CLIENT SERVER
class TwitterClient():
def __init__(self, twitter_user=None):
self.auth = TwitterAuthenticator().authenticate_twitter_app()
self.twitter_client = API(self.auth)
self.twitter_user = twitter_user
def get_twitter_client_api(self):
return self.twitter_client
def get_user_timeline_tweets(self, num_tweets):
tweets = []
for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets):
tweets.append(tweet)
return tweets
def get_friend_list(self, num_friends):
friend_list = []
for friend in Cursor(self.twitter_client.friends, id=self.twitter_user).items(num_friends):
friend_list.append(friend)
return friend_list
def get_home_timeline_tweets(self, num_tweets):
home_timeline_tweets = []
for tweet in Cursor(self.twitter_client.home_timeline, id=self.twitter_user).items(num_tweets):
home_timeline_tweets.append(tweet)
return home_timeline_tweets
#TWITTER STREAMER FOR STREAMING AND LIVE TWEETS
class TwitterStreamer():
def __init__(self):
self.twitter_autenticator = TwitterAuthenticator()
def stream_tweets(self, fetched_tweets_filename, hash_tag_list):
# AUTHENTIFICATION AND CONNECTION TO API
listener = TwitterListener(fetched_tweets_filename)
auth = self.twitter_autenticator.authenticate_twitter_app()
stream = Stream(auth, listener)
#you can use the stream.filter for defining the search for words/hasthags!!!!!!
#same sentimentanalysis works for words or hashtags!!!
stream.filter(track=hash_tag_list)
#TWITTER STREAM LISTENER FOR PRINTING TWEETS
class TwitterListener(StreamListener):
def __init__(self, fetched_tweets_filename):
self.fetched_tweets_filename = fetched_tweets_filename
def on_data(self, data):
try:
print(data)
with open(self.fetched_tweets_filename, 'a') as tf:
tf.write(data)
return True
except BaseException as e:
print("Error on_data %s" % str(e))
return True
def on_error(self, status):
if status == 420:
#OCCURS IF RATE LIMIT IS PASSED
return False
print(status)
#FOR ANALYZING CLEANING TWEETS (TO CONTENT)
class TweetAnalyzer():
#DELTETE ALL UNNECESSARY CHARACTERS
def clean_tweet(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
#SIMPLE SENTIMENTANALYSIS VIA TEXTBLOB (englisch)
def analyze_sentiment(self, tweet):
analysis = TextBlob(self.clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 1
elif analysis.sentiment.polarity == 0:
return 0
else:
return -1
#You can use the following classification of polarity for sentence based analysis
#since i am using this programm for document level classification I left it with 1 -1 and 0
# if (polarity == 0):
# print("Neutral")
# elif (polarity > 0 and polarity <= 0.3):
# print("Schwach positiv")
# elif (polarity > 0.3 and polarity <= 0.6):
# print("positiv")
# elif (polarity > 0.6 and polarity <= 1.0):
# print("Stark positiv")
# elif (polarity > -0.3 and polarity <= 0):
# print("schwach negativ")
# elif (polarity > -0.6 and polarity <= -0.3):
# print("Negativ")
# elif (polarity >= -1.0 and polarity <= -0.6):
# print("Stark negativ")
def tweets_to_data_frame(self, tweets):
df = pd.DataFrame(data=[tweet.full_text for tweet in tweets], columns=['tweets'])
#THIS IS FOR RETWEETS OF A CERTAIN TWEET! BUT BE CARFUL ONLY A CERTAIN NUMBER OF TWEETS PER DAY!
#TWITTER RESTRICTION
#remove the """ for usage!
"""replies = []
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
for full_tweets in tweepy.Cursor(api.user_timeline, screen_name='GretaThunberg', timeout=999999).items(20):
for tweet in tweepy.Cursor(api.search, q='to:GretaThunberg', since_id=1203618558267273225,
result_type='recent',
timeout=999999).items(100):
if hasattr(tweet, 'in_reply_to_status_id_str'):
if (tweet.in_reply_to_status_id_str == full_tweets.id_str):
replies.append(tweet.text)
print("Tweet :", full_tweets.text.translate(non_bmp_map))
for elements in replies:
print("Replies :", elements)
# replies.clear()"""
#DATA SET VIA DATAFRAME TO SHOW WITH NUMPY
#YOU CAN PRINT GIVEN DATA LIKE LENGTH RETWEET NUMBER LANGUAGE etc. CHOSSE:
#['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__',
# '__getattribute__', '__getstate__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '_
# _lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '_
# _sizeof__', '__str__', '__subclasshook__', '__weakref__', '_api', '_json', 'author', 'contributors', 'coordinates',
# 'created_at', 'destroy', 'display_text_range', 'entities', 'favorite', 'favorite_count', 'favorited',
# 'full_text', 'geo', 'id', 'id_str', 'in_reply_to_screen_name', 'in_reply_to_status_id', 'in_reply_to_status_id_str',
# 'in_reply_to_user_id', 'in_reply_to_user_id_str', 'is_quote_status', 'lang', 'parse', 'parse_list', 'place',
# 'possibly_sensitive', 'quoted_status', 'quoted_status_id', 'quoted_status_id_str', 'quoted_status_permalink',
# 'retweet', 'retweet_count', 'retweeted', 'retweets', 'source', 'source_url', 'truncated', 'user']
df['id'] = np.array([tweet.id for tweet in tweets])
df['len'] = np.array([len(tweet.full_text) for tweet in tweets])
df['date'] = np.array([tweet.created_at for tweet in tweets])
#df['source'] = np.array([tweet.source for tweet in tweets])
df['likes'] = np.array([tweet.favorite_count for tweet in tweets])
df['retweets'] = np.array([tweet.retweet_count for tweet in tweets])
df["lang"] = ([tweet.lang for tweet in tweets])
#df["in_reply_to_status_id_str"] = ([tweet.replies for tweet in tweets])
return df
#Programm begins here!!
if __name__ == '__main__':
twitter_client = TwitterClient()
tweet_analyzer = TweetAnalyzer()
api = twitter_client.get_twitter_client_api()
#TWEET MODE EXTENDED FOR FULL TWEET OUTPUT!! RETWEETS STAY THE SAME!
#COUNT= LAST TWEET NUMBER OF USER (SCREEN NAME)
#HERE FOR GRETA THUNBERG, JUST DELETE AND TYPE ACCOUNT NAME TO CHANGE
#FOR EXAMPLE rtErdogan (for president of turkey), realDonaldTrump (for Trump) etc...
tweets = api.user_timeline(screen_name="GretaThunberg", count=200, tweet_mode="extended")
#print DATA
print(dir(tweets[0]))
#print(tweets[0].retweet_count) #retweet count print
#sentimentanalysis for printing it in a dataframe with the other informations!
df = tweet_analyzer.tweets_to_data_frame(tweets)
df['sentiment'] = np.array([tweet_analyzer.analyze_sentiment(tweet) for tweet in df['tweets']])
#AVARAGE LENGTH OF ALL TWEETS
#print(np.mean(df['len']))
# GET NUMBER OF LIKES
#print(np.max(df['likes']))
# GET NUMBER OF RETWEETS
#print(np.max(df['retweets']))
#EXAMPLE RETWEET STATUS OF A CERTAIN TWEET ID
#To get ID you need to look on your broswers URL of this CERTAIN TWEET
#print(np.max(df["lang"]))
##print(df.in_reply_to_status_id[1075801005504258061])
#ANYWAY THERE IS A RESTRICTION SINCE 2019 ON ONLY 200 TWEETS
#THANK YOU CAMBRIDGE ANALYTICA
print(df.head(200))
# DO CSV FILE (DELETE OR NAME IT NEW TO MAKE IT SEPRATE)
#df.to_csv('KocSentiment.csv')
#TIME SERIES FOR CHART VIEW!!! DONT FORGET TO TURN ON MATPLOT LIBRARY
#time_likes = pd.Series(data=df['len'].values, index=df['date'])
#time_likes.plot(figsize=(16, 4), color='r')
#plt.show()
#time_favs = pd.Series(data=df['likes'].values, index=df['date'])
#time_favs.plot(figsize=(16, 4), color='r')
#plt.show()
#time_retweets = pd.Series(data=df['retweets'].values, index=df['date'])
#time_retweets.plot(figsize=(16, 4), color='r')
#plt.show()
#LAYERED VIEW! FOR COMPARISON !!
#time_likes = pd.Series(data=df['likes'].values, index=df['date'])
#time_likes.plot(figsize=(16, 4), label="likes", legend=True)
#time_retweets = pd.Series(data=df['retweets'].values, index=df['date'])
#time_retweets.plot(figsize=(16, 4), label="retweets", legend=True)
#plt.show()
|
CemFFM/Sentimentanalysis
|
full_equipt_sentimentanalysis .py
|
full_equipt_sentimentanalysis .py
|
py
| 11,573 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7759594347
|
import os
import sys
from PyQt5.QtWidgets import QFrame, QSizePolicy
def isFloat(s: str):
try:
float(s)
except ValueError:
return False
return True
def isInt(s: str):
try:
int(s)
except ValueError:
return False
return True
def returnFloat(s: str):
try:
float(s)
except ValueError:
return -1
return float(s)
def returnInt(s: str):
try:
int(s)
except ValueError:
return -1
return int(s)
def convertPressureUnits(value: float, fromUnit: str = "Pa", toUnit: str = "Pa"):
conversionFactor = 1
if fromUnit == "Pa":
conversionFactor *= 1
elif fromUnit == "kPa":
conversionFactor *= 1000
elif fromUnit == "MPa":
conversionFactor *= 1000000
elif fromUnit == "bar":
conversionFactor *= 100000
if toUnit == "Pa":
conversionFactor /= 1
elif toUnit == "kPa":
conversionFactor /= 1000
elif toUnit == "MPa":
conversionFactor /= 1000000
elif toUnit == "bar":
conversionFactor /= 100000
return value * conversionFactor
def convertTimeUnits(value: float, fromUnit: str = "s", toUnit: str = "s"):
conversionFactor = 1
if fromUnit == "s":
conversionFactor *= 1
elif fromUnit == "min":
conversionFactor *= 60
elif fromUnit == "h":
conversionFactor *= 3600
elif fromUnit == "ms":
conversionFactor /= 1000
if toUnit == "s":
conversionFactor /= 1
elif toUnit == "min":
conversionFactor /= 60
elif toUnit == "h":
conversionFactor /= 3600
elif toUnit == "ms":
conversionFactor *= 1000
return value * conversionFactor
def find_nth(haystack, needle, n):
start = haystack.find(needle)
while start >= 0 and n > 1:
start = haystack.find(needle, start+len(needle))
n -= 1
return start
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
class QHLine(QFrame):
def __init__(self):
super().__init__()
self.setMinimumWidth(1)
self.setFixedHeight(20)
self.setFrameShape(QFrame.HLine)
self.setFrameShadow(QFrame.Sunken)
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum)
return
|
timhenning1997/Serial-Port-Monitor
|
UsefulFunctions.py
|
UsefulFunctions.py
|
py
| 2,539 |
python
|
en
|
code
| 2 |
github-code
|
6
|
34180911472
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 29 18:27:58 2020
@author: Kollarlab
"""
# import comtypes
import os
import time
#import subprocess
#import re
import scipy
import pylab
#import tarfile
#import struct
#import glob
import numpy
import time
#import pickle
#import datetime
#import itertools
import sys
from Acqiris import Acqiris
def get_color(ind):
colorlist = ['firebrick', 'mediumblue', 'deepskyblue', 'darkgoldenrod', 'forestgreen', 'indigo', 'dodgerblue']
nind = numpy.mod(ind, len(colorlist))
return colorlist[nind]
hardwareAddress = "PXI23::0::0::INSTR"
IVIbinPath = "C:\\Program Files\\IVI Foundation\\IVI\\Bin\\"
sys.path.append(IVIbinPath)
card.activeChannels = [1,2]
card.timeout = 120
avs = 1
segs = 1
card.samples = 1024*125
card.segments = segs
card.averages = 1
#
#card.SetParams() #here is the danger of not using properties to set everything.
##without this here, card.samples isn't
dataMat1 = numpy.zeros((len(delays), card.samples))
dataMat1_av = numpy.zeros((len(delays), card.samples))
dataMat2 = numpy.zeros((len(delays), card.samples))
dataMat2_av = numpy.zeros((len(delays), card.samples))
tMat = numpy.zeros((len(delays), card.samples))
#pretake data to set everything up for test
card.averages = 1
card.triggerDelay = 0
card.SetParams() #pushes default to to card if the fields haven't been edited
card.ArmAndWait() #initiates aquisition and calibrates if need be
if len(card.activeChannels) == 1:
data1 = card.ReadAllData() #read data for the active channels.
else:
data1, data2 = card.ReadAllData() #read data for the active channels.
t0 = time.time()
for ind in range(0, avs):
card.averages = 1
card.triggerDelay = 0
card.SetParams() #pushes default to to card if the fields haven't been edited
card.ArmAndWait() #initiates aquisition and calibrates if need be
if len(card.activeChannels) == 1:
data1 = card.ReadAllData() #read data for the active channels.
else:
data1, data2 = card.ReadAllData() #read data for the active channels.
ts = ( delay + scipy.arange(0, len(data1),1.)*1/card.sampleRate)
t1 = time.time()
card.averages = 50
card.triggerDelay = 0
card.SetParams() #pushes default to to card if the fields haven't been edited
card.ArmAndWait() #initiates aquisition and calibrates if need be
if len(card.activeChannels) == 1:
avData1 = card.ReadAllData() #read data for the active channels.
else:
avData1, avData2 = card.ReadAllData() #read data for the active channels.
t2 = time.time()
d1 = numpy.round(t1-t0, 3)
d2 = numpy.round(t2-t1, 3)
print('segments = ' + str(segs))
print('averages = ' + str(avs))
print('time for ' + str(avs) + ' single (possibly multiseg) runs = ' + str(d1) )
print('time for ' + str(avs) + ' averages on card = ' + str(d2) )
|
MRitter95/Kollar-Lab
|
Old_scripts_delete_20220804/Control/Acqiris_development/CdriverPythonWrapper/Acqiris_testScript_Averagertiming.py
|
Acqiris_testScript_Averagertiming.py
|
py
| 2,844 |
python
|
en
|
code
| 2 |
github-code
|
6
|
38053163844
|
from django.conf.urls import patterns, include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib.sitemaps.views import sitemap
from django.contrib.sitemaps import Sitemap
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
urlpatterns = i18n_patterns('',
url(r'^$', 'core.views.home', name='home'),
url(r'^new/$', 'core.views.new', name='new'),
url(r'^statistics/$', 'core.views.statistics', name='statistics'),
url(r'^(?P<key>\w+-\w+-\w+-\w+-\w+)$', 'core.views.tip_redir'),
url(r'^(?P<key>\w+-\w+-\w+-\w+-\w+)/$', 'core.views.tip'),
url(r'^(?P<key>\w+-\w+-\w+)$', 'core.views.tip_redir', name='tip_redir'),
url(r'^(?P<key>\w+-\w+-\w+)/$', 'core.views.tip', name='tip'),
url(r'^gratuity-example/$', 'core.views.tips_example', name='tips_example'),
url(r'^w/(?P<key>\w+)/$', 'core.views.wallet', name='wallet'),
url(r'^w/(?P<key>\w+)/comments/$', 'core.views.comments', name='comments'),
url(r'^w/(?P<key>\w+)/pdf/$', 'core.views.download', {'format': "pdf"}, name='download'),
url(r'^w/(?P<key>\w+)/pdf-us/$', 'core.views.download', {'format': "pdf", "page_size":"US"}, name='download'),
url(r'^w/(?P<key>\w+)/odt/$', 'core.views.download', {'format': "odt"}, name='download'),
url(r'^w/(?P<key>\w+)/png/$', 'core.views.download', {'format': "png"}, name='download'),
url(r'^w/(?P<key>\w+)/wajax/$', 'core.views.wajax', name='wajax'),
)
urlpatterns += patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'qrcode/(?P<key>\w+)/$','core.views.qrcode_view', name='qrcode'),
)
if settings.BCTIP_MOD:
import bctip.urls_custom
urlpatterns += bctip.urls_custom.urlpatterns
|
norn/bctip
|
bctip/urls.py
|
urls.py
|
py
| 1,711 |
python
|
en
|
code
| 13 |
github-code
|
6
|
19025440782
|
import os
import sys
import json
import torch
import traceback
# def returnFalse():
# return False
# torch.cuda.is_available = returnFalse
from scipy.io import wavfile
# from python.speaker_diarization.pipeline.speaker_diarization import SpeakerDiarization
class Diarization(object):
def __init__(self, logger, PROD, device, models_manager):
super(Diarization, self).__init__()
self.logger = logger
self.PROD = PROD
self.models_manager = models_manager
self.device = device
self.ckpt_path = None
# self.model = torch.hub.load('pyannote/pyannote-audio', 'dia')
self.model = load_model(f'{"./resources/app" if self.PROD else "."}/python/speaker_diarization/hub/')
# self.logger.info(str(self.model))
# self.model = BaseModel.from_pretrained(f'{"./resources/app" if self.PROD else "."}/python/audio_source_separation/assModel.pt')
self.isReady = True
def set_device (self, device):
self.device = device
self.model = self.model.to(device)
self.model.device = device
def runTask (self, data, websocket=None):
return self.diarize(data, websocket)
async def diarize (self, data, websocket):
inPath = data["inPath"]
mergeSameOutput = data["toolSettings"]["mergeSingleOutputFolder"]
outputAudacityLabels = data["toolSettings"]["outputAudacityLabels"]
if websocket is not None:
await websocket.send(json.dumps({"key": "task_info", "data": "Reading file"}))
audacity_file = []
# self.logger.info(f'diarization | {data["inPath"]} | {data["outputAudacityLabels"]} | {data} | {outputAudacityLabels}')
out_folder = f'{"./resources/app" if self.PROD else "."}/python/speaker_diarization/output/'
try:
rate, data = wavfile.read(inPath)
if websocket is not None:
await websocket.send(json.dumps({"key": "task_info", "data": "Splitting file"}))
diarization = self.model({'audio': inPath})
out_file_counter = 0
total_tracks = len(diarization._tracks)
for turn, _, speaker in diarization.itertracks(yield_label=True):
if websocket is not None:
await websocket.send(json.dumps({"key": "task_info", "data": f'Outputting chunks: {out_file_counter+1}/{total_tracks}'}))
start_s = turn.start
end_s = turn.end
# Skip audio chunks less than 1 second long
if end_s-start_s < 1:
continue
if outputAudacityLabels:
audacity_file.append('{:.6f}\t{:.6f}\tSpeaker_{}'.format(start_s, end_s, speaker))
split_data = data[int(start_s*rate):int(end_s*rate)]
folder_name = ".".join(inPath.split("/")[-1].split(".")[:-1]).replace(".", "_")
if not mergeSameOutput:
out_folder = f'{"./resources/app" if self.PROD else "."}/python/speaker_diarization/output/{folder_name}/speaker {speaker}'
os.makedirs(out_folder, exist_ok=True)
if mergeSameOutput:
wavfile.write(f'{out_folder}/{folder_name}_{str(out_file_counter).zfill(7)}.wav', rate, split_data)
else:
wavfile.write(f'{out_folder}/{folder_name}_{speaker}_{str(out_file_counter).zfill(7)}.wav', rate, split_data)
out_file_counter += 1
except:
self.logger.info(traceback.format_exc())
raise
if outputAudacityLabels:
with open(f'{out_folder}/audacity.txt', "w+", encoding="utf8") as f:
f.write("\n".join(audacity_file))
if websocket is not None:
await websocket.send(json.dumps({"key": "tasks_next"}))
#
#
#
# This is a huge mess, but pyannote very much wants models to be downloaded from the internet
# For future-proofing reasons, I don't want that, so I had to change a lot of the library code,
# and the way the models were loaded, such that torchhub isn't used, and instead the local model files are used.
#
#
#
def load_model (_HUB_DIR):
import typing
import shutil
import functools
import yaml
import zipfile
from pyannote.audio.features import Pretrained as _Pretrained
from pyannote.pipeline import Pipeline as _Pipeline
dependencies = ['pyannote.audio', 'torch']
_HUB_REPO = 'https://github.com/pyannote/pyannote-audio-hub'
_ZIP_URL = f'{_HUB_REPO}/raw/master/{{kind}}s/{{name}}.zip'
_PRETRAINED_URL = f'{_HUB_REPO}/raw/master/pretrained.yml'
# path where pre-trained models and pipelines are downloaded and cached
# _HUB_DIR = f'{"./resources/app" if self.PROD else "."}/python/speaker_diarization/hub'
# _HUB_DIR = pathlib.Path(os.environ.get("PYANNOTE_AUDIO_HUB",
# "~/.pyannote/hub")).expanduser().resolve()
# download pretrained.yml if needed
_PRETRAINED_YML = _HUB_DIR + 'pretrained.yml'
print(f'_PRETRAINED_YML, {_PRETRAINED_YML}')
# if not _PRETRAINED_YML.exists():
# msg = (
# f'Downloading list of pretrained models and pipelines '
# f'to "{_PRETRAINED_YML}".'
# )
# print(msg)
# from pyannote.audio.utils.path import mkdir_p
# mkdir_p(_PRETRAINED_YML.parent)
# torch.hub.download_url_to_file(_PRETRAINED_URL,
# _PRETRAINED_YML,
# progress=True)
def _generic(name: str,
duration: float = None,
step: float = 0.25,
batch_size: int = 32,
device: typing.Optional[typing.Union[typing.Text, torch.device]] = None,
pipeline: typing.Optional[bool] = None,
force_reload: bool = False) -> typing.Union[_Pretrained, _Pipeline]:
"""Load pretrained model or pipeline
Parameters
----------
name : str
Name of pretrained model or pipeline
duration : float, optional
Override audio chunks duration.
Defaults to the one used during training.
step : float, optional
Ratio of audio chunk duration used for the internal sliding window.
Defaults to 0.25 (i.e. 75% overlap between two consecutive windows).
Reducing this value might lead to better results (at the expense of
slower processing).
batch_size : int, optional
Batch size used for inference. Defaults to 32.
device : torch.device, optional
Device used for inference.
pipeline : bool, optional
Wrap pretrained model in a (not fully optimized) pipeline.
force_reload : bool
Whether to discard the existing cache and force a fresh download.
Defaults to use existing cache.
Returns
-------
pretrained: `Pretrained` or `Pipeline`
Usage
-----
>>> sad_pipeline = torch.hub.load('pyannote/pyannote-audio', 'sad_ami')
>>> scores = model({'audio': '/path/to/audio.wav'})
"""
# print("name", name)
model_exists = name in _MODELS
pipeline_exists = name in _PIPELINES
# print(f'PRE model_exists, {model_exists}')
# print(f'PRE pipeline_exists, {pipeline_exists}')
if model_exists and pipeline_exists:
# print(f'model_exists and pipeline_exists')
# pass
# if pipeline is None:
# msg = (
# f'Both a pretrained model and a pretrained pipeline called '
# f'"{name}" are available. Use option "pipeline=True" to '
# f'load the pipeline, and "pipeline=False" to load the model.')
# raise ValueError(msg)
if pipeline:
kind = 'pipeline'
# zip_url = _ZIP_URL.format(kind=kind, name=name)
# sha256 = _PIPELINES[name]
return_pipeline = True
else:
kind = 'model'
# zip_url = _ZIP_URL.format(kind=kind, name=name)
# sha256 = _MODELS[name]
return_pipeline = False
elif pipeline_exists:
# elif False:
# print(f'pipeline_exists')
# pass
# pass
if pipeline is None:
pipeline = True
if not pipeline:
msg = (
f'Could not find any pretrained "{name}" model. '
f'A pretrained "{name}" pipeline does exist. '
f'Did you mean "pipeline=True"?'
)
raise ValueError(msg)
kind = 'pipeline'
# zip_url = _ZIP_URL.format(kind=kind, name=name)
# sha256 = _PIPELINES[name]
return_pipeline = True
elif model_exists:
# print(f'model_exists')
# pass
if pipeline is None:
pipeline = False
kind = 'model'
# zip_url = _ZIP_URL.format(kind=kind, name=name)
# sha256 = _MODELS[name]
return_pipeline = pipeline
if name.startswith('emb_') and return_pipeline:
msg = (
f'Pretrained model "{name}" has no associated pipeline. Use '
f'"pipeline=False" or remove "pipeline" option altogether.'
)
raise ValueError(msg)
else:
# print("ERROR====================")
pass
# msg = (
# f'Could not find any pretrained model nor pipeline called "{name}".'
# )
# raise ValueError(msg)
# if sha256 is None:
# msg = (
# f'Pretrained {kind} "{name}" is not available yet but will be '
# f'released shortly. Stay tuned...'
# )
# raise NotImplementedError(msg)
pretrained_dir = _HUB_DIR + f'/{kind}s'
pretrained_subdir = pretrained_dir + f'/{name}'
pretrained_zip = pretrained_dir + f'/{name}.zip'
# import pathlib
# pretrained_subdir = pathlib.Path(pretrained_subdir)
# if not pretrained_subdir.exists() or force_reload:
# if pretrained_subdir.exists():
# shutil.rmtree(pretrained_subdir)
# from pyannote.audio.utils.path import mkdir_p
# mkdir_p(pretrained_zip.parent)
# try:
# msg = (
# f'Downloading pretrained {kind} "{name}" to "{pretrained_zip}".'
# )
# print(msg)
# torch.hub.download_url_to_file(zip_url,
# pretrained_zip,
# hash_prefix=sha256,
# progress=True)
# except RuntimeError as e:
# shutil.rmtree(pretrained_subdir)
# msg = (
# f'Failed to download pretrained {kind} "{name}".'
# f'Please try again.')
# raise RuntimeError(msg)
# # unzip downloaded file
# with zipfile.ZipFile(pretrained_zip) as z:
# z.extractall(path=pretrained_dir)
if kind == 'model':
params_yml = None
params_yml_parent = None
params_yml_c1 = os.listdir(pretrained_subdir)
for c1 in params_yml_c1:
params_yml_c2 = [fold for fold in os.listdir(f'{pretrained_subdir}/{c1}') if os.path.isdir(f'{pretrained_subdir}/{c1}/{fold}')]
for c2 in params_yml_c2:
params_yml_c3 = os.listdir(f'{pretrained_subdir}/{c1}/{c2}')
for c3 in params_yml_c3:
params_yml_c4 = os.listdir(f'{pretrained_subdir}/{c1}/{c2}/{c3}')
for c4 in params_yml_c4:
if c4=="params.yml":
params_yml_parent = f'{pretrained_subdir}/{c1}/{c2}/{c3}'
params_yml = f'{pretrained_subdir}/{c1}/{c2}/{c3}/params.yml'
break
# print(f'----------params_yml, {params_yml}')
# print(f'----------params_yml_parent, {params_yml_parent}')
# params_yml, = pretrained_subdir.glob('*/*/*/*/params.yml')
# pretrained = _Pretrained(validate_dir=params_yml.parent,
pretrained = _Pretrained(validate_dir=params_yml_parent,
duration=duration,
step=step,
batch_size=batch_size,
device=device)
# if return_pipeline:
# if name.startswith('sad_'):
# from pyannote.audio.pipeline.speech_activity_detection import SpeechActivityDetection
# print("HERE PRE SpeechActivityDetection")
# pipeline = SpeechActivityDetection(scores=pretrained)
# print("HERE POST")
# elif name.startswith('scd_'):
# from pyannote.audio.pipeline.speaker_change_detection import SpeakerChangeDetection
# print("HERE PRE SpeakerChangeDetection")
# pipeline = SpeakerChangeDetection(scores=pretrained)
# print("HERE POST")
# elif name.startswith('ovl_'):
# from pyannote.audio.pipeline.overlap_detection import OverlapDetection
# print("HERE PRE OverlapDetection")
# pipeline = OverlapDetection(scores=pretrained)
# print("HERE POST")
# else:
# # this should never happen
# msg = (
# f'Pretrained model "{name}" has no associated pipeline. Use '
# f'"pipeline=False" or remove "pipeline" option altogether.'
# )
# raise ValueError(msg)
# return pipeline.load_params(params_yml)
return pretrained
elif kind == 'pipeline':
from pyannote.audio.pipeline.utils import load_pretrained_pipeline
params_yml = None
params_yml_parent = None
# print(f'START pretrained_subdir, {pretrained_subdir}')
# params_yml_c1 = os.listdir(pretrained_subdir)
params_yml_c1 = [fold for fold in os.listdir(f'{pretrained_subdir}') if os.path.isdir(f'{pretrained_subdir}/{fold}')]
for c1 in params_yml_c1:
# params_yml_c2 = os.listdir(f'{pretrained_subdir}/{c1}'.replace("//","/"))
params_yml_c2 = [fold for fold in os.listdir(f'{pretrained_subdir}/{c1}') if os.path.isdir(f'{pretrained_subdir}/{c1}/{fold}')]
for c2 in params_yml_c2:
params_yml_c3 = os.listdir(f'{pretrained_subdir}/{c1}/{c2}')
for c3 in params_yml_c3:
if c3=="params.yml":
params_yml_parent = f'{pretrained_subdir}/{c1}/{c2}'
params_yml = f'{pretrained_subdir}/{c1}/{c2}/params.yml'
break
# params_yml, *_ = pretrained_subdir.glob('*/*/params.yml')
# return load_pretrained_pipeline(params_yml.parent)
# print("=== ptp PRE")
ptp = load_pretrained_pipeline(params_yml_parent)
# print("=== ptp POST")
return ptp
with open(_PRETRAINED_YML, 'r') as fp:
_pretrained = yaml.load(fp, Loader=yaml.SafeLoader)
# print(f'_pretrained, {_pretrained}')
___stuff = {}
_MODELS = _pretrained['models']
# print(f'_MODELS, {_MODELS}')
for name in _MODELS:
# print(f'_MODELS name, {name}')
# locals()[name] = functools.partial(_generic, name)
___stuff[name] = functools.partial(_generic, name)
_PIPELINES = _pretrained['pipelines']
# print(f'_PIPELINES, {_PIPELINES}')
for name in _PIPELINES:
# print(f'_PIPELINES name, {name}')
# locals()[name] = functools.partial(_generic, name)
___stuff[name] = functools.partial(_generic, name)
_SHORTCUTS = _pretrained['shortcuts']
# print(f'_SHORTCUTS, {_SHORTCUTS}')
for shortcut, name in _SHORTCUTS.items():
# print(f'_SHORTCUTS name, {name}')
# locals()[shortcut] = locals()[name]
___stuff[shortcut] = ___stuff[name]
return ___stuff["dia"]()
|
DanRuta/xva-trainer
|
python/speaker_diarization/model.py
|
model.py
|
py
| 16,869 |
python
|
en
|
code
| 78 |
github-code
|
6
|
19521291101
|
'''
Created on Oct 15, 2011
@author: waxwing
'''
import os, sys, commands
from com.android.monkeyrunner import MonkeyRunner
if __name__ == '__main__':
blacklist = ['main.py', 'monkeytools.py']
monkeys = os.listdir(os.path.abspath(os.path.dirname(sys.argv[0])))
monkeys = filter (lambda m: m[-3:] == '.py', monkeys)
monkeys = filter (lambda m: m not in blacklist, monkeys)
m = MonkeyRunner.choice('Select a script', monkeys)
if m != -1:
commands.getoutput('monkeyrunner ' + monkeys[m])
|
oliver32767/android-tools
|
monkeys/main.py
|
main.py
|
py
| 523 |
python
|
en
|
code
| 3 |
github-code
|
6
|
72009646268
|
def shoppingTime(memberId, money):
item = [
{'name': 'Sepatu Stacattu', 'price': 1500000},
{'name': 'Baju Zoro', 'price': 500000},
{'name': 'Baju H&H', 'price': 250000},
{'name': 'Sweater Uniklooh', 'price': 175000},
{'name': 'Casing Handphone', 'price': 50000},
]
if memberId == '':
return 'anda bukan member!'
elif money < 50000:
return 'uang anda tidak cukup'
shopping_result = {
'memberId': memberId,
'money': money,
'listPurchased': [],
'changeMoney': money
}
for listItem in item:
if shopping_result['changeMoney'] >= listItem['price']:
shopping_result['listPurchased'].append(listItem['name'])
shopping_result['changeMoney'] -= listItem['price']
return shopping_result
# Driver Code
print(shoppingTime('1820RzKrnWn08', 2475000))
'''
{ memberId: '1820RzKrnWn08',
money: 2475000,
listPurchased:
[ 'Sepatu Stacattu',
'Baju Zoro',
'Baju H&N',
'Sweater Uniklooh',
'Casing Handphone' ],
changeMoney: 0 }
'''
print(shoppingTime('82Ku8Ma742', 170000))
'''
{ memberId: '82Ku8Ma742',
money: 170000,
listPurchased:
[ 'Casing Handphone' ],
changeMoney: 120000 }
'''
print(shoppingTime('', 2475000))
# Mohon maaf, toko X hanya berlaku untuk member saja
print(shoppingTime('234JdhweRxa53', 15000))
# Mohon maaf, uang tidak cukup
print(shoppingTime())
# Mohon maaf, toko X hanya berlaku untuk member saja
|
iswanulumam/cp-alta
|
python/6-array-of-dictionary/3-shopping-time.py
|
3-shopping-time.py
|
py
| 1,416 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35910630002
|
from typing import Dict
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from app.routers import visualize
app = FastAPI()
app = FastAPI(
title="Model Visualizer",
description="",
version="0.10.0",
)
app.mount("/home/lirakr/repos/rnd-mermaid/app/static", StaticFiles(directory="/home/lirakr/repos/rnd-mermaid/app/static"), name="static")
@app.get(
"/",
summary="Status",
responses={200: {"content": {"application/json": {"example": {"status": "OK"}}}}},
)
async def index() -> Dict[str, str]:
"""
Show application status and docker image details
"""
return {"status": "OK"}
app.include_router(visualize.router)
|
LirakR/rnd-mermaid
|
app/main.py
|
main.py
|
py
| 684 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21088218856
|
from copy import deepcopy
import numpy as np
from action_execution.config_keys import ExecutionConfigKeys
from action_execution.execution_models.model import ExecutionModelBase
from action_execution.geometry.vector import Vector2, Vector3
from action_execution.geometry.pose import Pose3
class FreeSpace(ExecutionModelBase):
def __init__(self, action_id='', data_logger=None, **kwargs):
super(FreeSpace, self).__init__(model_id='free_space')
self.frame_id = ''
self.manipulated_object = None
self.objects = list()
self.surface = None
if ExecutionConfigKeys.FRAME_ID in kwargs:
self.frame_id = kwargs[ExecutionConfigKeys.FRAME_ID]
if ExecutionConfigKeys.MANIPULATED_OBJECT in kwargs:
self.manipulated_object = kwargs[ExecutionConfigKeys.MANIPULATED_OBJECT]
if ExecutionConfigKeys.OBJECTS_ON_SURFACE in kwargs:
self.objects = kwargs[ExecutionConfigKeys.OBJECTS_ON_SURFACE]
if ExecutionConfigKeys.SURFACE in kwargs:
self.surface = kwargs[ExecutionConfigKeys.SURFACE]
def generate_data(self, number_of_samples):
'''Generates a set of samples
Keyword arguments:
number_of_samples -- number of samples to generate
Returns:
candidate_poses -- a list of 'action_execution.geometry.pose.Pose3' objects
'''
candidate_poses = list()
# we get the z-projections of the objects that are already on the surface
object_polygons = list()
for obj in self.objects:
polygon = obj.get_z_projection()
object_polygons.append(polygon)
# we generate samples in the free space on the surface, i.e.
# we ignore samples that cause collisions with the other objects
collected_samples = 0
while collected_samples < number_of_samples:
obj_copy = deepcopy(self.manipulated_object)
# we generate a random position on the surface
position = Vector2.random_vector(self.surface.bbox.min, self.surface.bbox.max)
obj_copy.planar_translate_to(position)
# we generate a random orientation around z;
# we don't rotate the object around the other axes
z_orientation = np.random.uniform(0., 2.*np.pi)
obj_copy.rotate_around_z(z_orientation)
# we check if the object would collide with any of the other
# objects in the newly generated pose
manipulated_object_polygon = obj_copy.get_z_projection()
collision = False
for obj in object_polygons:
if obj.intersects(manipulated_object_polygon):
collision = True
break
# we take the generated pose as a valid candidate if the
# object doesn't collide with any of the objects on the surface
if not collision:
position = Vector3(position.x,
position.y,
self.surface.pose.position.z)
orientation = Vector3(obj_copy.pose.orientation.x,
obj_copy.pose.orientation.y,
z_orientation)
pose = Pose3(self.manipulated_object.pose.frame_id, position, orientation)
candidate_poses.append(pose)
collected_samples += 1
success_probabilities = np.ones(number_of_samples) / (number_of_samples * 1.)
return {'candidate_poses': candidate_poses,
'success_probabilities': success_probabilities}
def process_data(self, data):
'''Recalculates the success probabilities of the given data
by only counting poses that are in free space. Returns
a new data dictionary with the updated probabilities.
Keyword arguments:
data -- a dictionary with two keys - "candidate_poses" and "success_probabilities",
each of which is a list
'''
candidate_poses = data['candidate_poses']
success_probabilities = np.array(data['success_probabilities'])
free_pose = np.ones(len(success_probabilities), dtype=bool)
# we get the z-projections of the objects that are already on the surface
object_polygons = list()
for obj in self.objects:
polygon = obj.get_z_projection()
object_polygons.append(polygon)
free_pose_count = 0.
for i, pose in enumerate(candidate_poses):
obj_copy = deepcopy(self.manipulated_object)
obj_copy.planar_translate_to(Vector2(pose.position.x, pose.position.y))
obj_copy.rotate_around_z(pose.orientation.z)
# we check if the object would collide with any of the other
# objects in the newly generated pose
manipulated_object_polygon = obj_copy.get_z_projection()
collision = False
for obj in object_polygons:
if obj.intersects(manipulated_object_polygon):
collision = True
break
if not collision:
free_pose_count += 1.
else:
free_pose[i] = False
p_factor = 0.
if free_pose_count > 0:
p_factor = 1. / free_pose_count
for i, p in enumerate(success_probabilities):
if free_pose[i]:
success_probabilities[i] = p * p_factor
return {'candidate_poses': candidate_poses,
'success_probabilities': success_probabilities}
def input_to_dict(self):
model_dict = {self.id: dict()}
model_dict[self.id]['frame_id'] = self.frame_id
model_dict[self.id]['manipulated_object'] = self.manipulated_object.to_dict()
model_dict[self.id]['static_objects'] = list()
for obj in self.objects:
model_dict[self.id]['static_objects'].append(obj.to_dict())
model_dict[self.id]['surface'] = self.surface.to_dict()
return model_dict
def result_to_dict(self, results):
result_dict = {self.id: dict()}
result_dict[self.id]['candidate_poses'] = list()
result_dict[self.id]['success_probabilities'] = list()
for pose in results['candidate_poses']:
result_dict[self.id]['candidate_poses'].append(pose.to_dict())
for prob in results['success_probabilities']:
result_dict[self.id]['success_probabilities'].append(prob)
return result_dict
|
alex-mitrevski/action-execution
|
action_execution/execution_models/FreeSpace.py
|
FreeSpace.py
|
py
| 6,572 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14888013645
|
def readOnly_file():
f = None
try:
f = open('my_write_file','w')
f.write('vb this is the new content .....')
except ValueError:
print('unsupported mode error')
else:
print('data updated .......')
finally:
if f != None:
f.close()
def writeOnly_file():
try:
f = open('myfile.txt','r')
f.write(' ....i am kavipriya ')
# f = open('myfile.txt','r')
# print(f.read())
except FileNotFoundError:
print('file not found')
except:
print('something is wrong')
finally:
f.close()
readOnly_file()
# writeOnly_file()
|
ekpriya/python-files
|
pythonworks/file handling/file_using_exception.py
|
file_using_exception.py
|
py
| 652 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29214768086
|
from django.shortcuts import render
from utils import Word
def home(request):
context = {}
if request.method == "POST":
text = request.POST['text']
context['results'] = Word(text).result()
context['text'] = text
return render(request, 'home.html', context)
|
dest81/test-jerry
|
words_stats/views.py
|
views.py
|
py
| 296 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19594690506
|
import tkinter
import mysql.connector
from tkinter import *
from tkinter import ttk
from tkinter.ttk import Treeview
from tkinter import messagebox
from PIL import Image, ImageTk
db = mysql.connector.connect(
host="localhost",
user="root",
password="1234",
database="bmh204"
)
mycursor = db.cursor()
def login():
if Eku.get() == "" or Esif.get() == "":
messagebox.showerror("Hata", "Kullanıcı Veya Şifrenizi Kontrol Ediniz")
else:
try:
mycursor.execute("select * from Maykod where tc ='%s' and sifre = %s " % (Eku.get(), Esif.get()))
row = mycursor.fetchone()
if row == None:
messagebox.showerror("Hata", "Kullanıcı Veya Şifrenizi Kontrol Ediniz")
else:
formgiris.destroy()
giris()
except EXCEPTION as es:
messagebox.showerror("Hata", f"Kullanıcı Veya Şifrenizi Kontrol Ediniz:{str(es)}")
db.commit()
def giris():
anasayfa()
def admin():
def admingir():
if aku.get() == "" or asif.get() == "":
messagebox.showerror("Hata", "Kullanıcı Veya Şifrenizi Kontrol Ediniz")
elif aku.get() != "a" or asif.get() != "1":
messagebox.showerror("Hata", "Kullanıcı Veya Şifrenizi Kontrol Ediniz")
else:
Login()
admin.destroy()
admin = Toplevel()
admin.title("Yetkili Formu")
admin.geometry('600x400')
my_picet = Image.open("adminbg.jpg")
resized = my_picet.resize((600, 400), Image.ANTIALIAS)
new_picet = ImageTk.PhotoImage(resized)
my_laben = Label(admin, image=new_picet)
my_laben.place(x=0, y=0)
frame1 = Frame(admin, bg="#e53c09")
frame1.place(relx=0.2, rely=0.15, relwidth=0.6, relheight=0.2)
cv = Canvas(admin, bg='white', width=420, height=200)
cv.place(x=100, y=100)
yetki = Label(admin, text="Admin Panel Login", fg="black", bg="#e53c09", font="Times 18 italic").place(x=130, y=65)
aku = Label(admin, text="Kullanıcı Adı:", fg="black", bg="white", font="Times 22 italic").place(x=150, y=100)
aku = Entry(admin, bd=1, width=25)
aku.place(x=150, y=150)
asif = Label(admin, text="Şifre :", fg="black", bg="white", font="Times 22 italic").place(x=150, y=190)
asif = Entry(admin, bd=1, width=25)
asif.place(x=150, y=250)
Kaydet = Button(admin, text="Giriş Yap", fg="Blue", bg="white", font="Times 22 italic", command=admingir)
Kaydet.place(x=180, y=280)
admin.mainloop()
def anasayfa():
ana = Tk()
ana.title('MAYKOD ANASAYFA')
ana.geometry('1550x900')
def yonetim():
top = Toplevel()
top.geometry('1000x1000')
top.title('Yönetim Ekibi')
top.iconbitmap("maykod.ico")
cmy = Canvas(top, width=1000, height=1000)
L1 = Label(top, text="YÖNETİM EKİBİ", bg="#00bcdd", font="Times 45 ").place(x=150, y=40)
img = PhotoImage(file="ybg.png")
my_image = cmy.create_image(0, 0, anchor=NW, image=img)
cmy.create_rectangle(1550, 120, 0, 20, fill='#00bcdd')
cmy.pack()
photo12 = PhotoImage(file='maykod.png')
photoRezised12 = photo12.subsample(2, 2)
cmy.create_image(75, 100, image=photoRezised12)
photo22 = PhotoImage(file='mşü1.png')
photoRezised22 = photo22.subsample(2, 2)
cmy.create_image(900, 100, image=photoRezised22)
frame1 = Frame(top, bg="#1608d6")
frame1.place(relx=0.35, rely=0.1, relwidth=0.25, relheight=0.18)
frame2 = Frame(top, bg="#1608d6")
frame2.place(relx=0.15, rely=0.3, relwidth=0.18, relheight=0.18)
frame3 = Frame(top, bg="#1608d6")
frame3.place(relx=0.55, rely=0.3, relwidth=0.18, relheight=0.18)
frame4 = Frame(top, bg="#1608d6")
frame4.place(relx=0.15, rely=0.55, relwidth=0.18, relheight=0.18)
frame5 = Frame(top, bg="#1608d6")
frame5.place(relx=0.55, rely=0.55, relwidth=0.18, relheight=0.18)
frame6 = Frame(top, bg="#1608d6")
frame6.place(relx=0.05, rely=0.75, relwidth=0.18, relheight=0.18)
frame7 = Frame(top, bg="#1608d6")
frame7.place(relx=0.35, rely=0.75, relwidth=0.18, relheight=0.18)
frame8 = Frame(top, bg="#1608d6")
frame8.place(relx=0.65, rely=0.75, relwidth=0.18, relheight=0.18)
photo = PhotoImage(frame1, file='pp.png')
photoRezised = photo.subsample(4, 4)
cmy.create_image(480, 250, image=photoRezised)
lbl=Label(top,text="Mehmet Can ARSLAN \n MAYKOD Başkanı", font="Comic 13 italic")
lbl.place(x=370, y=320)
photo2 = PhotoImage(frame2, file='esra.png')
photoRezised2 = photo2.subsample(2, 2)
cmy.create_image(250, 450, image=photoRezised2)
lbl = Label(top, text="Esra YILDIRIM \n MAYKOD Başkan Yardımcısı", font="Comic 13 italic")
lbl.place(x=140, y=505)
photo3 = PhotoImage(frame3, file='Volkan.png')
photoRezised3 = photo3.subsample(2, 2)
cmy.create_image(700, 450, image=photoRezised3)
lbl = Label(top, text="Volkan AKGÖL \n MAYKOD Başkan Yardımcısı", font="Comic 13 italic")
lbl.place(x=590, y=505)
photo4 = PhotoImage(frame4, file='merve.png')
photoRezised4 = photo4.subsample(2, 2)
cmy.create_image(250, 650, image=photoRezised4)
lbl = Label(top, text="Merve OT \n MAYKOD Yazman", font="Comic 13 italic")
lbl.place(x=140, y=705)
photo5 = PhotoImage(frame5, file='beyda.png')
photoRezised5= photo5.subsample(3, 3)
cmy.create_image(700, 650, image=photoRezised5)
lbl = Label(top, text="Beyda ÇETİN \n MAYKOD Sayman", font="Comic 13 italic")
lbl.place(x=590, y=705)
photo6 = PhotoImage(frame6, file='alper.png')
photoRezised6 = photo6.subsample(2, 2)
cmy.create_image(150, 850, image=photoRezised6)
lbl = Label(top, text="Alper KOÇAK \n Kurucu Üye", font="Comic 13 italic")
lbl.place(x=80, y=905)
photo7 = PhotoImage(frame7, file='neşe.png')
photoRezised7 = photo7.subsample(2, 2)
cmy.create_image(460, 850, image=photoRezised7)
lbl = Label(top, text="Neşe VUROL \n MAYKOD Sekteteri", font="Comic 13 italic")
lbl.place(x=350, y=905)
photo8 = PhotoImage(frame8, file='eda.png')
photoRezised8 = photo8.subsample(2, 2)
cmy.create_image(830, 850, image=photoRezised8)
lbl = Label(top, text="Edanur TAŞÇI \n Denetleme Kurul Üyesi", font="Comic 13 italic")
lbl.place(x=720, y=905)
top.mainloop()
def iletisim():
ilet = Toplevel()
ilet.geometry('1000x900')
ilet.title('Yönetim Ekibi')
ilet.iconbitmap("maykod.ico")
cv = Canvas(ilet, bg='white', width=10000, height=10000)
cv.pack()
cv.create_rectangle(1550, 120, 0, 20, fill='#00bcdd')
img = PhotoImage(file="ana.png")
my_image = cv.create_image(0, 0, anchor=NW, image=img)
photo7 = PhotoImage(file='mşü1.png')
photoRezised7 = photo7.subsample(2, 2)
cv.create_image(900, 100, image=photoRezised7)
photo = PhotoImage(file='mail.png')
photoRezised = photo.subsample(3, 3)
cv.create_image(65, 400, image=photoRezised)
photo6 = PhotoImage(file='maykod.png')
photoRezised6 = photo6.subsample(2, 2)
cv.create_image(75, 100, image=photoRezised6)
photo5 = PhotoImage(file='okul.png')
photoRezised5 = photo5.subsample(6, 6)
cv.create_image(65, 500, image=photoRezised5)
photo2 = PhotoImage(file="twiter.png")
photoRezised2 = photo2.subsample(12, 12)
cv.create_image(65, 720, image=photoRezised2)
photo3 = PhotoImage(file="insta.png")
photoRezised3 = photo3.subsample(12, 12)
cv.create_image(65, 632, image=photoRezised3)
photo4 = PhotoImage(file="tel.png")
photoRezised4 = photo4.subsample(5, 5)
cv.create_image(65, 825, image=photoRezised4)
frame1 = Frame(ilet, bg="#1608d6")
frame1.place(relx=0.07, rely=0.2, relwidth=0.8, relheight=0.05)
L1 = Label(ilet, text="İLETİŞİM", bg="white", font="Times 45 ").place(x=150, y=40)
Lf1 = Label(ilet, text="MUŞ ALPARSLAN ÜNİVERSİTESİ YAZILIM KULÜBÜ", bg="#1608d6", fg="white", font="Comic 20 italic").place(x=145, y=180)
Lf2 = Label(ilet, text="E-MAİL Adresi:", bg="#0d0075", fg="white",font="Comic 20 italic").place(x=150, y=380)
Lf2yan = Label(ilet, text="[email protected]", bg="#0d0075",fg="white", font="Comic 20 italic").place(x=600, y=380)
Lf3 = Label(ilet, text="Muş Alparslan Üniversitesi", bg="#0d0075",fg="white", font="Comic 20 italic").place(x=150, y=450)
Lf3yan = Label(ilet, text="https://www.alparslan.edu.tr/tr", bg="#0d0075",fg="white", font="Comic 20 italic").place(x=600, y=450)
Lbu = Label(ilet, text="Bize Ulaşın", bg="#0d0075",fg="white", font="Times 30 italic").place(x=150, y=500)
Lf4 = Label(ilet, text="Instagram adresimiz:", bg="#0d0075", fg="white",font="Comic 20 italic").place(x=150, y=600)
Lf4yan = Label(ilet, text="maykodmsu", bg="#0d0075", fg="white",font="Comic 20 italic").place(x=600, y=600)
Lf5 = Label(ilet, text="twitter adresimiz:", bg="#0d0075", fg="white",font="Comic 20 italic").place(x=150, y=700)
Lf5yan = Label(ilet, text="@MaykodMSU", bg="#0d0075", fg="white",font="Comic 20 italic").place(x=600, y=700)
Lf6 = Label(ilet, text="Yönetici tel:", bg="#0d0075", fg="white",font="Comic 20 italic").place(x=150, y=800)
Lf6yan = Label(ilet, text="0 (545) 720 28 66", fg="white",bg="#0d0075", font="Comic 20 italic").place(x=600, y=800)
ilet.mainloop()
def hakkında():
root = Toplevel()
root.geometry("1100x1000")
mycanvas = Canvas(root, bg="white", width=1100, height=1000)
mycanvas.create_rectangle(1550, 120, 0, 0, fill='#00bcdd')
mlabel = Label(mycanvas, text="KULÜP FAALİYETLERİMİZ", bg="#00bcdd", font="Times 35 ").place(x=150, y=40)
mycanvas.pack()
photo1 = PhotoImage(file='maykod.png')
photoRezised1 = photo1.subsample(2, 2)
mycanvas.create_image(75, 100, image=photoRezised1)
photo2 = PhotoImage(file='mşü1.png')
photoRezised2 = photo2.subsample(2, 2)
mycanvas.create_image(1000, 100, image=photoRezised2)
root.mainloop()
canvas = Canvas(ana, width=1550, height=900)
image = ImageTk.PhotoImage(Image.open("ana.png"))
canvas.create_image(0, 0, anchor=NW, image=image)
canvas.pack()
canvas.create_rectangle(1550, 120, 0, 20, fill='#00bcdd')
my_picet = Image.open("mşü.png")
resized = my_picet.resize((1349, 124), Image.ANTIALIAS)
new_picet = ImageTk.PhotoImage(resized)
my_laben = Label(image=new_picet)
my_laben.place(x=100, y=750)
AnaSayfa = Button(ana, text="Anasayfa", bg='#00bcdd', borderwidth='0', fg='#00007f', font="Times 20 italic")
AnaSayfa.place(x=320, y=50)
Kulup = Button(ana, text="Kulüp", bg='#00bcdd', borderwidth='0', fg='#00007f', font="Times 20 italic", command=hakkında)
Kulup.place(x=500, y=50)
yonet = Button(ana, text="Yönetim", bg='#00bcdd', borderwidth='0', fg='#00007f', font="Times 20 italic", command=yonetim)
yonet.place(x=650, y=50)
foto = Button(ana, text="Fotoğraf Galerisi", bg='#00bcdd', borderwidth='0', fg='#00007f', font="Times 20 italic")
foto.place(x=800, y=50)
iletisim = Button(ana, text="İletişim", bg='#00bcdd', borderwidth='0', fg='#00007f', font="Times 20 italic", command=iletisim)
iletisim.place(x=1050, y=50)
my_pice = Image.open("maykod.png")
resized = my_pice.resize((130, 130), Image.ANTIALIAS)
new_pice = ImageTk.PhotoImage(resized)
my_laben = Label(image=new_pice)
my_laben.place(x=50, y=50)
my_pic = Image.open("mşü2.png")
resized = my_pic.resize((130, 130), Image.ANTIALIAS)
new_pic = ImageTk.PhotoImage(resized)
my_labe = Label(image=new_pic)
my_labe.place(x=1370, y=50)
my_picen = Image.open("admin.png")
resized = my_picen.resize((90, 60), Image.ANTIALIAS)
new_picen = ImageTk.PhotoImage(resized)
Admin = Button(image=new_picen, text="Giriş", fg="red", borderwidth='0', bg='#00007f', command=admin, font="arial 25")
Admin.place(x=1225, y=40)
my_ana = Image.open("s1.png")
resizedana = my_ana.resize((1124, 400), Image.ANTIALIAS)
new_picana = ImageTk.PhotoImage(resizedana)
my_labeana = Label(image=new_picana)
my_labeana.place(x=250, y=250)
def icice():
messagebox.showinfo("aliş")
menu = Menu(ana)
ana.config(menu=menu)
def quit():
ana.destroy()
subMenu = Menu(menu)
menu.add_cascade(label="File", font="Times 20", menu=subMenu)
subMenu.add_command(label="Admin", font="Times 13", command=admin)
subMenu.add_command(label="Destek", font="Times 13", command=icice)
subMenu.add_separator()
subMenu.add_command(label="EXIT", font="Times 13", command=quit)
ana.mainloop()
formgiris = Tk()
formgiris.title('MAYKOD')
formgiris.geometry('1600x650')
formgiris.iconbitmap('maykod.ico')
canvas = Canvas(formgiris, width=5000, height=5000, bg="white")
canvas.pack()
frame_orta=Frame(formgiris, bg="yellow")
frame_orta.place(relx=0.427, rely=0, relwidth=0.005, relheight=1)
my_picet = Image.open("yen3.jpg")
resized = my_picet.resize((900, 650), Image.ANTIALIAS)
new_picet = ImageTk.PhotoImage(resized)
my_laben = Label(image=new_picet)
my_laben.place(x=690, y=0)
def kayitolma():
kayitol = Toplevel()
kayitol.title("Kayıt Olma Formu")
kayitol.geometry('1600x566')
canvasa = Canvas(kayitol, width=5000, height=5000, bg="white")
canvasa.pack()
img = PhotoImage(file="yen3.png")
my_image = canvasa.create_image(0, 0, anchor=NW, image=img)
frame_orta = Frame(kayitol, bg="yellow")
frame_orta.place(relx=0.485, rely=0, relwidth=0.005, relheight=1)
formgiris.withdraw()
def kaydet():
if (etck.get() == "" or esifre.get() == "" or eadi.get() == "" or esoyadi.get() == "" or eemail.get() == "" or eil.get() == "" or eilce.get() == "" or ebolum.get() == ""):
messagebox.showerror("Hata", "Lütfen Bütün Alanları Doldurun")
else:
try:
mycursor.execute("INSERT INTO Maykod (tc,sifre,adi,soyadi,email,il,ilce,bolum) VALUES " \
"('%s','%s','%s','%s','%s','%s','%s','%s')" % (
etck.get(), esifre.get(), eadi.get(), esoyadi.get(), eemail.get(), eil.get(),
eilce.get(), ebolum.get()))
messagebox.showinfo("Durum", "Kaydınız Başarıyla Tamamlanmıştır")
formgiris.deiconify()
kayitol.destroy()
db.commit()
except EXCEPTION as es:
messagebox.showerror("Hata", f"Boş alanları kontrol ediniz:{str(es)}")
def geri():
formgiris.deiconify()
kayitol.destroy()
tck = Label(kayitol, text=" Kullanıcı Adı:", fg="black", bg="white", font="Times 20 italic").place(x=1000, y=80)
etck = Entry(kayitol, bd=1, width=25)
etck.place(x=1280, y=80)
lsifre = Label(kayitol, text="Şifre :", fg="black", bg="white", font="Times 20 italic").place(x=1090, y=120)
esifre = Entry(kayitol, bd=1, width=25)
esifre.place(x=1280, y=120)
ladi = Label(kayitol, text="Adı :", fg="black", bg="white", font="Times 20 italic").place(x=1100, y=160)
eadi = Entry(kayitol, bd=1, width=25)
eadi.place(x=1280, y=160)
lsoyadi = Label(kayitol, text="Soyadi :", fg="black", bg="white", font="Times 20 italic").place(x=1060, y=200)
esoyadi = Entry(kayitol, bd=1, width=25)
esoyadi.place(x=1280, y=200)
lemail = Label(kayitol, text="Email :", fg="black", bg="white", font="Times 20 italic").place(x=1070, y=240)
eemail = Entry(kayitol, bd=1, width=25)
eemail.place(x=1280, y=240)
lil = Label(kayitol, text="İL :", fg="black", bg="white", font="Times 20 italic").place(x=1070, y=280)
eil = Entry(kayitol, bd=1, width=25)
eil.place(x=1280, y=280)
lilce = Label(kayitol, text="İlçe :", fg="black", bg="white", font="Times 20 italic").place(x=1070, y=320)
eilce = Entry(kayitol, bd=1, width=25)
eilce.place(x=1280, y=320)
lbolum = Label(kayitol, text="Bölüm :", fg="black", bg="white", font="Times 20 italic").place(x=1070, y=360)
ebolum = Entry(kayitol, bd=1, width=25)
ebolum.place(x=1280, y=360)
Kaydet = Button(kayitol, text="Kaydol", fg="black", bg="white", font="Times 20 italic", command=kaydet)
Kaydet.place(x=1280, y=400)
geri = Button(kayitol, text="Geri Dön", fg="black", bg="white", font="Times 20 italic", command=geri)
geri.place(x=1400, y=400)
kayitol.mainloop()
def Login():
def listele():
liste.delete(*liste.get_children())
mycursor.execute('select * from Maykod')
results = mycursor.fetchall()
for row in results:
sifre = row[2]
adi = row[3]
soyadi = row[4]
email = row[5]
il = row[6]
ilce = row[7]
bolum = row[8]
liste.insert("", 0, text=row[0], values=(row[1], sifre, adi, soyadi, email, il, ilce, bolum))
def ekle():
mycursor.execute("INSERT INTO Maykod (tc,sifre,adi,soyadi,email,il,ilce,bolum) VALUES "\
"('%s','%s','%s','%s','%s','%s','%s','%s')" % (
Etc.get(), Esif.get(), Ead.get(), Esad.get(), Email.get(), Eil.get(), Eilce.get(), Ebolum.get()))
db.commit()
listele()
def guncelle():
mycursor.execute("UPDATE Maykod SET tc='%s',sifre='%s',adi='%s',soyadi='%s',email='%s',il='%s',ilce='%s',bolum='%s'" \
" WHERE id='%s'" % (Etc.get(), Esif.get(), Ead.get(), Esad.get(), Email.get(), Eil.get(), Eilce.get(), Ebolum.get(), Eid.get()))
db.commit()
listele()
def sil():
mycursor.execute("DELETE FROM Maykod WHERE id=%s " % (Eid.get()))
db.commit()
listele()
def getir(event):
idno = liste.item(liste.selection()[0])['text']
mycursor.execute("SELECT * FROM Maykod WHERE id = %s" % (idno))
results = mycursor.fetchone()
Eid.delete(0, END)
Eid.insert(0, results[0])
Etc.delete(0, END)
Etc.insert(0, results[1])
Esif.delete(0, END)
Esif.insert(0, results[2])
Ead.delete(0, END)
Ead.insert(0, results[3])
Esad.delete(0, END)
Esad.insert(0, results[4])
Email.delete(0, END)
Email.insert(0, results[5])
Eil.delete(0, END)
Eil.insert(0, results[6])
Eilce.delete(0, END)
Eilce.insert(0, results[7])
Ebolum.delete(0, END)
Ebolum.insert(0, results[8])
def listetikla(event):
idtext = liste.item(liste.selection()[0])['values'][0]
tctext = liste.item(liste.selection()[0])['values'][1]
sifretext = liste.item(liste.selection()[0])['values'][2]
adtext = liste.item(liste.selection()[0])['values'][3]
soyadtext = liste.item(liste.selection()[0])['values'][4]
emailtext = liste.item(liste.selection()[0])['values'][5]
iltext = liste.item(liste.selection()[0])['values'][6]
ilcetext = liste.item(liste.selection()[0])['values'][7]
bolumtext = liste.item(liste.selection()[0])['values'][8]
Eid.delete(0, END)
Eid.insert(0, idtext)
Etc.delete(0, END)
Etc.insert(0, tctext)
Esif.delete(0, END)
Esif.insert(0, sifretext)
Ead.delete(0, END)
Ead.insert(0, adtext)
Esad.delete(0, END)
Esad.insert(0, soyadtext)
Email.delete(0, END)
Email.insert(0, emailtext)
Eil.delete(0, END)
Eil.insert(0, iltext)
Eilce.delete(0, END)
Eilce.insert(0, ilcetext)
Ebolum.delete(0, END)
Ebolum.insert(0, bolumtext)
form = Toplevel()
form.title('Maykod')
form.geometry('1500x800')
form.configure(background="grey")
my_pice = Image.open("adminpanel.jpg")
resizede = my_pice.resize((1300, 650), Image.ANTIALIAS)
new_pice = ImageTk.PhotoImage(resizede)
my_laben = Label(form,image=new_pice)
my_laben.place(x=100, y=50)
Lid = Label(form, text="ID", bg="#454f50",fg="white", font="Times 15 italic").place(x=1120, y=120)
Eid = Entry(form, bd=1)
Eid.place(x=1150, y=150)
ltc = Label(form, text="TC", bg="#454f50",fg="white", font="Times 15 italic").place(x=1120, y=170)
Etc = Entry(form, bd=1)
Etc.place(x=1150, y=200)
Lad = Label(form, text="ADI", bg="#454f50",fg="white", font="Times 15 italic").place(x=1120, y=220)
Ead = Entry(form, bd=1)
Ead.place(x=1150, y=250)
Lsad = Label(form, text="SOYADI", bg="#454f50",fg="white", font="Times 15 italic").place(x=1120, y=270)
Esad = Entry(form, bd=1)
Esad.place(x=1150, y=300)
Lsif = Label(form, text="ŞİFRE", bg="#454f50", fg="white", font="Times 15 italic").place(x=1120, y=320)
Esif = Entry(form, bd=1)
Esif.place(x=1150, y=350)
Lmail = Label(form, text="E-MAİL", bg="#454f50", fg="white", font="Times 15 italic").place(x=1120, y=370)
Email = Entry(form, bd=1)
Email.place(x=1150, y=400)
Lil = Label(form, text="İL", bg="#454f50", fg="white", font="Times 15 italic").place(x=1120, y=420)
Eil = Entry(form, bd=1)
Eil.place(x=1150, y=450)
Lilce = Label(form, text="İLÇE", bg="#454f50", fg="white",font="Times 15 italic").place(x=1120, y=470)
Eilce = Entry(form, bd=1)
Eilce.place(x=1150, y=500)
lbolum = Label(form, text="BÖLÜM", bg="#454f50", fg="white", font="Times 15 italic").place(x=1120, y=520)
Ebolum = Entry(form, bd=1)
Ebolum.place(x=1150, y=550)
Kaydet = Button(form, text="Kaydet", command=ekle)
Kaydet.place(x=1100, y=650)
sil = Button(form, text="Sil", command=sil)
sil.place(x=1180, y=650)
guncelle = Button(form, text="Güncelle", command=guncelle)
guncelle.place(x=1230, y=650)
liste = Treeview(form, height=10, selectmode="extended")
liste["columns"] = ('sut1', 'sut2', 'sut3', 'sut4', 'sut5', 'sut6', 'sut7', 'sut8')
liste.place(x=120, y=100)
liste.column("#0", width=50)
liste.heading("#0", text="id",)
liste.column("sut1", width=100)
liste.heading("sut1", text="tc")
liste.column("sut2", width=90)
liste.heading("sut2", text="sifre")
liste.column("sut3", width=120)
liste.heading("sut3", text="adi")
liste.column("sut4", width=120)
liste.heading("sut4", text="soyadi")
liste.column("sut5", width=120)
liste.heading("sut5", text="email")
liste.column("sut6", width=90)
liste.heading("sut6", text="il")
liste.column("sut7", width=120)
liste.heading("sut7", text="ilce")
liste.column("sut8", width=120)
liste.heading("sut8", text="bolum")
liste.bind('<ButtonRelease-1>', getir)
style = ttk.Style()
style.theme_use("default")
style.configure("Treeview",
background="yellow",
foreground="black",
fieldbackground="silver"
)
style.map('Treeview',
background=[('selected', 'blue')])
listele()
form.mainloop()
lgir = Label(formgiris, text="MAYKOD", fg="black", bg="white", font="Times 40 italic").place(x=200, y=0)
Lkul = Label(formgiris, text="Kullanıcı Adı:", fg="black", bg="white", font="Times 18 italic").place(x=150, y=180)
Eku = Entry(formgiris, bd=1, width=25)
Eku.place(x=150, y=210)
Lsif = Label(formgiris, text="Şifre :", fg="black", bg="white", font="Times 18 italic").place(x=150, y=250)
Esif = Entry(formgiris, bd=1, width=25)
Esif.place(x=150, y=280)
Kaydet = Button(formgiris, text="Giriş Yap", fg="black", bg="white", font="Times 22 italic", command=login)
Kaydet.place(x=150, y=330)
Kayit = Button(formgiris, text="Kayıt Ol", fg="black", bg="white", font="Times 22 italic", command=kayitolma)
Kayit.place(x=300, y=330)
formgiris.mainloop()
|
arslncanm/Kulup_otomasyon_Python_tkinter
|
MAYKOD/main.py
|
main.py
|
py
| 24,326 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70264770749
|
import subprocess as sp
import pymysql
import pymysql.cursors
import datetime
def search():
try:
# letter = input(First letter)
query = "select H.sport_name from equipment as H where H.quantity in (select max(quantity) from equipment); "
print(query)
cur.execute(query)
con.commit()
print("Sports with max equipment fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def maxEquip():
try:
query = "select H.sport_name from equipment as H where H.quantity in (select max(quantity) from equipment); "
print(query)
cur.execute(query)
con.commit()
print("Sports with max equipment fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def lhostel():
try:
query = "select H.hostel_name from hostel as H where H.no_of_students in (select min(no_of_students) from hostel);"
print(query)
cur.execute(query)
con.commit()
print("Least populated hostel fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def amount():
try:
query = "select sum(P.salary) from professors;"
print(query)
cur.execute(query)
con.commit()
print("Total salary fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def avgStd():
try:
query = "select avg(S.no_of_students) from subjects as S;"
print(query)
cur.execute(query)
con.commit()
print("Avg no. of fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def labSub():
try:
query = "select count(*) from subjects as S where S.labs = 'Y';"
print(query)
cur.execute(query)
con.commit()
print("Subjects having lab fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def studgt30():
try:
query = "select course_id, subject_name from subjects where subjects.no_of_students > 30;"
print(query)
cur.execute(query)
con.commit()
print("Subjects having more than 30 students fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def nonEquipSports():
try:
query = "select S.sport_name from sports as S left join equipment as E on S.sport_name = E.sport_name where E.quantity is not NULL and E.quantity > 0;"
print(query)
cur.execute(query)
con.commit()
print("Sports with no equipment fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch sport details")
print(">>>>>>>>>>>>>>",e)
return
def deptBuilding():
try:
building_no = int(input("Buildin No: "))
query = "select * form department as D where D.building_no = %d;" % (building_no)
print(query)
cur.execute(query)
con.commit()
print("Department Details fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch department details")
print(">>>>>>>>>>>>>>",e)
return
def profDetails():
try:
prof_id = int(input("Professor ID: "))
query = "select * form professors as P where P.prof_id = %d;" % (prof_id)
print(query)
cur.execute(query)
con.commit()
print("Professor Details fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch professor details")
print(">>>>>>>>>>>>>>",e)
return
def studentDetails():
try:
rollno = int(input("Roll No: "))
query = "select * form students as S where S.roll_no = %d;" % (rollno)
print(query)
cur.execute(query)
con.commit()
print("Student Details fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch student details")
print(">>>>>>>>>>>>>>",e)
return
def equipDetails():
try:
sport = input("Enter Sport Name: ")
query = "select * from equipment as E where E.sport_name = '%s';" % (sport)
print(query)
cur.execute(query)
con.commit()
print("Equipment Details fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch equipment details")
print(">>>>>>>>>>>>>>",e)
return
def allEquip():
try:
query = "select * from equipment;"
print(query)
cur.execute(query)
con.commit()
print("Equipment Details fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch equipment details")
print(">>>>>>>>>>>>>>",e)
return
def subDetails():
try:
sub = int(input("Course_id : "))
query = "select * from subjects where course_id = %d;" % (sub)
print(query)
cur.execute(query)
con.commit()
print("Details fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch subject details")
print(">>>>>>>>>>>>>>",e)
return
def newClub():
try:
row = {}
print("Enter Club details: ")
row["name"] = input("Name: ")
row["no_of_members"] = int(input("No. of members: "))
no_of_coords = int(input("No. of Coordinators (max 3): "))
row["coord1"] = input("Coord 1 : ")
if(no_of_coords > 1):
row["coord2"] = input("Coord 2 : ")
else:
row["coord2"] = "NULL"
if(no_of_coords > 2):
row["coord3"] = input("Coord 3 : ")
else:
row["coord3"] = "NULL"
query = " "
print(query)
cur.execute(query)
con.commit()
print("Added new club!")
except Exception as e:
con.rollback()
print("Failed to add new club")
print(">>>>>>>>>>>>>>",e)
return
def recruitProf():
try:
row = {}
print("Enter new proff's details: ")
# name = (input("Name (Fname Minit Lname): ")).split(' ')
name = (input("Name (Fname Minit Lname): "))
# row["Fname"] = name[0]
# row["Minit"] = name[1]
# row["Lname"] = name[2]
row["Prof_id"] = int(input("Prof_id: "))
row["Sex"] = input("Sex(F/M): ")
row["Salary"] = int(input("Salary: "))
row["Bdate"] = input("Birth Date (YYYY-MM-DD): ")
row["Dept"] = (input("Department: "))
row["course_id"] = int(input("course_id: "))
row["super_prof_id"] = int(input("super_prof_id: "))
# derive age
bdate = row["Bdate"]
blist = bdate.split('-')
dob = datetime.date(int(blist[0]),int(blist[1]),int(blist[2]))
today = datetime.date.today()
age = today.year - dob.year - ((today.month, today.day) < (dob.month, dob.day))
query = " INSERT INTO professors values ('%d','%s','%c','%s,'%d','%d','%s','%s,'%d')" % (
row["Prof_id"], name, row["Sex"], row["Dept"], row["Salary"], row["course_id"], row["Bdate"],age, row["super_prof_id"])
print(query)
cur.execute(query)
con.commit()
print("Added Student to the Database!")
except Exception as e:
con.rollback()
print("Failed to insert into database")
print(">>>>>>>>>>>>>", e)
return
def admitAStudent():
try:
row = {}
print("Enter new student's details: ")
# name = (input("Name (Fname Minit Lname): ")).split(' ')
name = (input("Name (Fname Minit Lname): "))
# row["Fname"] = name[0]
# row["Minit"] = name[1]
# row["Lname"] = name[2]
row["Roll_No"] = int(input("Roll No: "))
# row["CGPA"] = input("CGPA: ")
row["Sex"] = input("Sex(F/M): ")
row["Batch"] = input("Batch: ")
row["Bdate"] = input("Birth Date (YYYY-MM-DD): ")
row["Email"] = (input("Email: "))
row["Dept"] = (input("Department: "))
row["Hostel"] = input("Hostel: ")
row["Password"] = (input("Password: "))
# derive age
bdate = row["Bdate"]
blist = bdate.split('-')
dob = datetime.date(int(blist[0]),int(blist[1]),int(blist[2]))
today = datetime.date.today()
age = today.year - dob.year - ((today.month, today.day) < (dob.month, dob.day))
query = " INSERT INTO students values ('%d', NULL,'%c','%s',%d,'%s','%s','%s','%s','%s','%s')" % (
row["Roll_No"], row["Sex"], row["Batch"], age, row["Dept"], row["Email"], row["Bdate"], name, row["Password"], row["Hostel"])
# null is for cgpa
print(query)
cur.execute(query)
con.commit()
print("Added Student to the Database!")
except Exception as e:
con.rollback()
print("Failed to insert into database")
print(">>>>>>>>>>>>>", e)
return
def dispatch(ch):
"""
Function that maps helper functions to option entered
"""
if (ch == 1):
admitAStudent()
elif(ch == 2):
recruitProf()
# elif(ch == 3):
# option3()
# elif(ch == 4):
# option4()
else:
print("Error: Invalid Option")
# Global
while (1):
tmp = sp.call('clear', shell=True)
# Can be skipped if you want to hardcode username and password
username = input("Username: ")
password = input("Password: ")
try:
# Set db name accordingly which have been create by you
# Set host to the server's address if you don't want to use local SQL server
con = pymysql.connect(host='localhost',
port=3306,
user=username,
password=password,
db='project_final',
cursorclass=pymysql.cursors.DictCursor)
tmp = sp.call('clear', shell=True)
if (con.open):
print("Connected")
else:
print("Failed to connect")
tmp = input("Enter any key to CONTINUE>")
with con.cursor() as cur:
while (1):
tmp = sp.call('clear', shell=True)
# Here taking example of Employee Mini-world
print("1. Option 1") # Hire an Employee
print("2. Option 2") # Fire an Employee
print("3. Option 3") # Promote Employee
print("4. Option 4") # Employee Statistics
print("5. Logout")
ch = int(input("Enter choice> "))
tmp = sp.call('clear', shell=True)
if ch == 5:
exit()
else:
dispatch(ch)
tmp = input("Enter any key to CONTINUE>")
except Exception as e:
tmp = sp.call('clear', shell=True)
print(e)
print("Connection Refused: Either username or password is incorrect or user doesn't have access to database")
tmp = input("Enter any key to CONTINUE>")
|
VanshMarda/Data-and-Application
|
Project_Phase_4/MiniWorld.py
|
MiniWorld.py
|
py
| 11,599 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32741409083
|
import math
import numba
import numpy as np
def main():
starts, ends, rdf = np.loadtxt("rdf.dat").T
density = 1200 / 1.0**3
n_bins = len(rdf)
bin_width = ends[0] - starts[0]
corrector = np.zeros(n_bins)
kernel = compute_kernel(rdf, bin_width)
for step in range(100):
corrector = bin_width * kernel @ (rdf - 1 - density * corrector)
direct_rdf = np.exp(np.log(rdf) - density * corrector)
for r, raw_gr, direct_gr in zip(starts, rdf, direct_rdf):
print(f"{r:.3f}\t{raw_gr:.3f}\t{direct_gr:.3f}")
def compute_kernel(rdf, bin_width):
n_bins = len(rdf)
kernel = np.zeros((n_bins, n_bins))
@numba.njit
def integrate(r, s, div=1000):
theta_step = math.pi / div
integral = 0
for theta_bin in range(div):
theta = theta_bin * theta_step
distance = math.hypot(s * math.sin(theta), r - s * math.cos(theta))
distance_bin = int(distance / bin_width)
if distance_bin < n_bins:
integral += (rdf[distance_bin] - 1) * math.sin(theta)
integral *= 2 * math.pi * s**2 * theta_step
return integral
for r_bin in range(n_bins):
for s_bin in range(n_bins):
r = bin_width * r_bin
s = bin_width * s_bin
kernel[r_bin, s_bin] = integrate(r, s)
return kernel
main()
|
snsinfu/bit5
|
test418-ornstein_zernike/oz.py
|
oz.py
|
py
| 1,377 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12392640532
|
# Complete the countInversions function below.
def countInversions(arr):
# arr is empty or null
if not arr:
return arr
# arr is one element
if len(arr) == 1:
return arr
left, right = 0, len(arr) - 1
return merge_sort(arr, left, right)
def merge_sort(arr, left, right):
inversions = 0
if left >= right:
return inversions
mid = (left + right) // 2
# print(f"left: {arr[left:mid+1]}, right: {arr[mid+1:right+1]}")
inversions += merge_sort(arr, left, mid)
inversions += merge_sort(arr, mid + 1, right)
inversions += merge(arr, left, right)
return inversions
def merge(arr, left, right):
inversions = 0
mid = (left + right) // 2
l, s, r = 0, left, 0
arr_left = arr[left : mid + 1]
arr_right = arr[mid + 1 : right + 1]
while l < len(arr_left) and r < len(arr_right):
if arr_left[l] <= arr_right[r]:
arr[s] = arr_left[l]
l += 1
else:
arr[s] = arr_right[r]
r += 1
inversions += len(arr_left) - l
s += 1
while l < len(arr_left):
arr[s] = arr_left[l]
l += 1
s += 1
while r < len(arr_right):
arr[s] = arr_right[r]
r += 1
s += 1
return inversions
if __name__ == "__main__":
t = [[1, 1, 1, 2, 2], [2, 1, 3, 1, 2]]
for arr in t:
result = countInversions(arr)
print(result)
|
jintang413/hackerrank
|
src/countinginversions.py
|
countinginversions.py
|
py
| 1,445 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3648010096
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="waterstructureCreator",
version="0.0.1",
author="Nicolas G. Hoermann",
author_email="[email protected]",
description=
"Creation of water structures on substrates",
long_description=long_description,
long_description_content_type="text/markdown",
url="",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'scipy==1.7.1', 'numpy==1.17.5', 'matplotlib==3.1.3', 'ipython==7.26.0', 'scikit-learn==0.24.1', 'ase==3.20.1', 'pymatgen==2020.11.11'
],
extras_require={'testing': ['pytest>=5.0']},
python_requires='==3.8.3',
)
|
computationalelectrochemistrygroup/WaterStructureCreator
|
setup.py
|
setup.py
|
py
| 868 |
python
|
en
|
code
| 3 |
github-code
|
6
|
71066784189
|
from collections import defaultdict
from copy import copy, deepcopy
from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum, auto, IntEnum
from typing import List, Tuple, Dict, Optional, Any
from dictdiffer import diff
from blaseball_mike.chronicler import get_entities
from ChangeSource import ChangeSource
from Player import Player
class TimestampSource(Enum):
FEED = auto()
CHRON_PLAYER = auto()
CHRON_GAME_EVENT = auto()
MANUAL = auto()
class ModDuration(IntEnum):
PERMANENT = 0
SEASON = 1
WEEKLY = 2
GAME = 3
ITEM = 4
LEAGUE = 5
class Effect:
def apply(self, player: Player) -> None:
raise NotImplementedError("Don't instantiate Effect")
def _duration_attribute(duration: ModDuration) -> Optional[str]:
if duration == ModDuration.GAME:
return "gameAttr"
elif duration == ModDuration.WEEKLY:
return "weekAttr"
elif duration == ModDuration.SEASON:
return "seasAttr"
elif duration == ModDuration.PERMANENT:
return "permAttr"
return None
@dataclass
class ModEffect(Effect):
from_mod: Optional[str]
to_mod: Optional[str]
type: ModDuration
def apply(self, player: Player) -> None:
attribute = _duration_attribute(self.type)
if attribute is None:
# This signifies that this mod effect is not stored on the player
return
if self.from_mod is not None:
player.data[attribute].remove(self.from_mod)
if self.to_mod is not None:
player.data[attribute].append(self.to_mod)
@dataclass
class SetStateEffect(Effect):
path: List[str]
value: Any
def apply(self, player: Player) -> None:
player.set_state(self.path, self.value)
@dataclass
class IncrementCounterEffect(Effect):
path: List[str]
def apply(self, player: Player) -> None:
player.increment_counter(self.path)
@dataclass
class ResetCounterEffect(Effect):
path: List[str]
def apply(self, player: Player) -> None:
player.reset_counter(self.path)
@dataclass
class Change:
source: ChangeSource
timestamp: datetime
timestamp_source: TimestampSource
effects: List[Effect]
def apply(self, player: Player) -> None:
for effect in self.effects:
effect.apply(player)
def _get_mod_effect(event: dict) -> ModEffect:
metadata = event['metadata']
if event['type'] == 106 or event['type'] == 146:
return ModEffect(from_mod=None,
to_mod=metadata['mod'],
type=ModDuration(metadata['type']))
elif event['type'] == 107 or event['type'] == 147:
return ModEffect(from_mod=metadata['mod'],
to_mod=None,
type=ModDuration(metadata['type']))
elif event['type'] == 148:
return ModEffect(from_mod=metadata['from'],
to_mod=metadata['to'],
type=ModDuration(metadata['type']))
raise ValueError("Not chron mod add/remove/change event")
def _player_id(event: dict) -> str:
assert len(event['playerTags']) == 1
return event['playerTags'][0]
def check_equality_recursive(chron: dict, ours: dict, path=""):
if type(chron) != type(ours):
raise RuntimeError(f"Mismatched type for {path}, expected " +
str(type(ours)) + " but chron has " +
str(type(chron)))
if isinstance(chron, list):
if len(chron) != len(ours):
raise RuntimeError(f"Mismatched length for {path}, expected " +
str(len(ours)) + " but chron has " +
str(len(chron)))
for i, (chron_elem, ours_elem) in enumerate(zip(chron, ours)):
check_equality_recursive(chron_elem, ours_elem, f"{path}.{i}")
if isinstance(chron, dict):
chron_keys = set(chron.keys())
our_keys = set(ours.keys())
if chron_keys - our_keys:
raise RuntimeError(f"Chron has additional key(s) for {path}: " +
", ".join(chron_keys - our_keys))
if our_keys - chron_keys:
raise RuntimeError(f"Chron is missing key(s) for {path}: " +
", ".join(our_keys - chron_keys))
assert chron_keys == our_keys
for key in chron_keys:
check_equality_recursive(chron[key], ours[key], f"{path}.{key}")
class Players:
def __init__(self, start_time: datetime):
self.players: Dict[str, Player] = {}
self.changes: Dict[str, List[Change]] = defaultdict(lambda: [])
for player in get_entities("player",
at=start_time,
cache_time=None):
self.players[player['entityId']] = Player(player)
def associate_chron_updates(self, chron_updates: List[dict]):
assert len(chron_updates) > 0
chron_update_time = chron_updates[0]['validFrom']
for chron_update in chron_updates:
player_id = chron_update['entityId']
player = deepcopy(self.players[player_id])
last_matching_player, last_matching_i = None, None
for i, change in enumerate(self.changes[player_id]):
change.apply(player)
if player.data == chron_update['data']:
last_matching_i = i
last_matching_player = deepcopy(player)
if last_matching_i is None:
print(list(diff(self.players[player_id].data,
chron_update['data'])))
raise RuntimeError("Unable to account for chron change")
# Changes up to last_matching_i are yielded, the rest are saved for
# the next chron update
last_matching_i += 1
changes = self.changes[player_id][:last_matching_i]
self.changes[player_id] = self.changes[player_id][last_matching_i:]
# Verification
for change in changes:
change.apply(self.players[player_id])
assert self.players[player_id].data == last_matching_player.data
yield chron_update, changes
for key, changes in self.changes.items():
for change in changes:
if chron_update_time - change.timestamp > timedelta(seconds=300):
raise RuntimeError("Chron update didn't account for "
f"{len(changes)} changes to ${key}")
def apply_event(self, event: dict) -> None:
print("Applying:", event['description'])
if 'parent' in event['metadata']:
changes = Players._find_change_by_parent_type[
event['metadata']['parent']['type']](self, event)
else:
changes = Players._find_change_by_own_type[
event['type']](self, event)
for player_id, change in changes:
self.changes[player_id].append(change)
def _find_change_superyummy(self, event: dict) -> List[Tuple[str, Change]]:
mod_effect = _get_mod_effect(event)
state_effect = SetStateEffect(path=['permModSources', mod_effect.to_mod],
value=['SUPERYUMMY'])
return [(_player_id(event),
Change(source=ChangeSource.SUPERYUMMY,
timestamp=event['created'],
timestamp_source=TimestampSource.FEED,
effects=[mod_effect, state_effect]))]
def _find_recorded_change_from_score(self, event: dict) \
-> List[Tuple[str, Change]]:
if event['type'] == 107 and event['metadata']['mod'] == 'COFFEE_RALLY':
return [(_player_id(event),
Change(source=ChangeSource.USE_FREE_REFILL,
timestamp=event['created'],
timestamp_source=TimestampSource.FEED,
effects=[_get_mod_effect(event)]))]
raise RuntimeError("Didn't find change type from hit")
def _find_unrecorded_change_from_hit(self, event: dict) \
-> List[Tuple[str, Change]]:
# I hope the player who hit the hit is guaranteed to be first.
return [(event['playerTags'][0],
Change(source=ChangeSource.HIT,
timestamp=event['created'],
timestamp_source=TimestampSource.FEED,
effects=[IncrementCounterEffect(['consecutiveHits'])]))]
def _find_unrecorded_change_from_non_hit(self, event: dict) \
-> List[Tuple[str, Change]]:
# TODO Get the player ID from blarser
return [("",
Change(source=ChangeSource.NON_HIT,
timestamp=event['created'],
timestamp_source=TimestampSource.FEED,
effects=[ResetCounterEffect(['consecutiveHits'])]))]
_find_change_by_parent_type = {
92: _find_change_superyummy,
4: _find_recorded_change_from_score, # stolen base
10: _find_recorded_change_from_score, # hit
}
_find_change_by_own_type = {
7: _find_unrecorded_change_from_non_hit,
# 9 is a home run, which has the same effects as hit
9: _find_unrecorded_change_from_hit,
10: _find_unrecorded_change_from_hit,
}
|
beiju/blaseball-player-changes
|
v1/Players.py
|
Players.py
|
py
| 9,476 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6701762608
|
import discord
from discord.ext import commands
class HostPlugin(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def host(self, ctx):
await ctx.send("What is the time of the flight?")
flight_time = await self.bot.wait_for('message', check=lambda m: m.author == ctx.author)
await ctx.send("What is the time of departure?")
departure_time = await self.bot.wait_for('message', check=lambda m: m.author == ctx.author)
await ctx.send("Thank you! Announcing in the channel...")
announcement = f"Flight at {flight_time.content} departing at {departure_time.content}."
channel = self.bot.get_channel(991475748756009014)
await channel.send(announcement)
def setup(bot):
bot.add_cog(HostPlugin(bot))
|
MayyCookie/swissannc
|
flighta 2.py
|
flighta 2.py
|
py
| 822 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43133183051
|
#!/usr/bin/python
#encoding=utf-8
#@author:[email protected]
#@version:1.0
#@desc:Spider日志模块
#@date:2012-11-23
from ConfParser import confparser
from sys import _getframe
from time import strftime
import os
#支持debug,notice,warning三个日志级别
class slogger:
def __init__(self):
pass
@staticmethod
def warning(loginfo={}):
try:
confdictory = confparser.confdictory('spider')
except:
#默认log配置
confdictory = {'slogpath':'log','slogfile':'spider.log','sloglevel':'debug'}
logpath = ''
noticelog = ''
warninglog = ''
loginfo = loginfo
nowstr = strftime('%Y%m%d%H')
noticefile = ''
warningpath = ''
if confdictory.has_key('slogpath'):
logpath = confdictory['slogpath']
if confdictory.has_key('slogfile'):
noticefile = confdictory['slogfile']
warningpath = noticefile + '.' + str(nowstr) + '.wf'
warningfile = logpath + '/' + warningpath
try:
frame = _getframe(1)
except:
frame = _getframe()
curfile = frame.f_code.co_filename
linenum = frame.f_lineno
now = strftime('%Y-%m-%d %H:%M:%S')
logstr = '[level] warning [time] ' + str(now) + ' [fileinfo] ' + os.path.abspath(curfile) + ':' + str(linenum)
if isinstance(loginfo,str):
logstr += ' [msg] ' + loginfo
elif isinstance(loginfo,dict):
for info in loginfo:
if isinstance(loginfo[info],str):
logstr += ' [' + info + '] ' + loginfo[info]
else:
try:
logstr += ' [' + info + '] (' + str(loginfo[info]) + ')'
except:
logstr += ' [' + info + '] Error Format'
else:
logstr += ' [msg] (' + str(loginfo) + ')'
logstr += '\n'
if warningfile != None:
new_link = 0
if not os.path.exists(warningfile) or not os.path.isfile(warningfile):
new_link = 1
f_handler = open(warningfile,'a+')
f_handler.write(logstr)
f_handler.close()
warninglink = logpath + '/' + noticefile + '.wf'
if not os.path.islink(warninglink) or new_link == 1:
if os.path.islink(warninglink):
os.unlink(warninglink)
os.symlink(warningpath,warninglink)
@staticmethod
def notice(loginfo={}):
try:
confdictory = confparser.confdictory('spider')
except:
#默认log配置
confdictory = {'slogpath':'log','slogfile':'spider.log','sloglevel':'debug'}
logpath = ''
noticelog = ''
loginfo = loginfo
nowstr = strftime('%Y%m%d%H')
confpath = ''
noticepath = ''
if confdictory.has_key('slogpath'):
logpath = confdictory['slogpath']
if confdictory.has_key('slogfile'):
confpath = confdictory['slogfile']
noticepath = confpath + '.' + str(nowstr)
noticefile = logpath + '/' + noticepath
try:
frame = _getframe(1)
except:
frame = _getframe()
curfile = frame.f_code.co_filename
linenum = frame.f_lineno
now = strftime('%Y-%m-%d %H:%M:%S')
logstr = '[level] notice [time] ' + str(now) + ' [fileinfo] ' + os.path.abspath(curfile) + ':' + str(linenum)
if isinstance(loginfo,str):
logstr += ' [msg] ' + loginfo
elif isinstance(loginfo,dict):
for info in loginfo:
if isinstance(loginfo[info],str):
logstr += ' [' + info + '] ' + loginfo[info]
else:
try:
logstr += ' [' + info + '] (' + str(loginfo[info]) + ')'
except:
logstr += ' [' + info + '] Error Format'
else:
logstr += ' [msg] (' + str(loginfo) + ')'
logstr += '\n'
if noticefile != None:
new_link = 0
#创建软链接
if not os.path.exists(noticefile) or not os.path.isfile(noticefile):
new_link = 1
f_handler = open(noticefile,'a+')
f_handler.write(logstr)
f_handler.close()
noticelink = logpath + '/' + confpath
if not os.path.islink(noticelink) or new_link == 1:
if os.path.islink(noticelink):
os.unlink(noticelink)
os.symlink(noticepath,noticelink)
if __name__ == '__main__':
frame = _getframe()
curfile = frame.f_code.co_filename
linenum = frame.f_lineno
now = strftime('%Y-%m-%d %H:%M:%S')
slogger.warning({'aa':'bb','aa1':{'ss':'tt'},'aa2':[1,2,3,4]})
|
ygliang2009/pysearch
|
SLogger.py
|
SLogger.py
|
py
| 3,955 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36021786185
|
import matplotlib.pyplot as plt;
import numpy as np;
data = np.loadtxt('jV_steady.dat', skiprows=1);
ref = np.loadtxt('G0610_cell3/1suns.dat', skiprows=3);
V_steady = data[:,0];
J_steady = data[:,1];
V_ref = ref[:,0];
J_ref = ref[:,1]*(-10);
V_steady += 0.0;
J_steady += 0;
# Plot all results
plt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k');
# jV steady
plt.axis((-0.1,1.4,-210,0));
plt.plot(V_steady, J_steady, 'k-');
plt.scatter(V_ref, J_ref, s=50, color='orange');
plt.yscale('linear');
plt.title('Fitting');
plt.grid(True);
|
dglowienka/drift-diffusion_mini-modules
|
Spatial/JV_with_ref.py
|
JV_with_ref.py
|
py
| 563 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24430769754
|
import numpy as np
from numpy.linalg import *
dist = np.array([228.52, 440.12]) # distance tube
longH = np.array([172, 383.60]) # longueur tube horizontal
longA = np.array([192.37, 436.71]) # longueur tube vertical
dDist = dist[1] - dist[0]
dLongH = longH[1] - longH[0]
dLongA = longA[1] - longA[0]
print('dDist:', dDist, ' dH:', dLongH, ' dA:', dLongA)
print(172-228.52)
aA = dLongA/dDist
print('dA/dDist=', aA)
print(longA[1] - aA*dist[1])
# dLongH/dDist
dLD = dDist/dLongH
print(dLD)
|
Sim0nD3p/PySM
|
layout/central/storeOverview/storeViewerWidget/script.py
|
script.py
|
py
| 498 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13000612743
|
import os
import time
import numpy as np
import torch
import cv2
import subprocess
import argparse
from PIL import Image, ImageDraw
from facenet_pytorch import MTCNN
from optical_flow import OpticalFlowTracker
parser = argparse.ArgumentParser(description='Face tracking using Optical Flow.')
parser.add_argument('--input', type=str, required=False, help='Path to the video file.', default = "videos/face-demographics-walking-and-pause.mp4")
parser.add_argument('--output', type=str, required=False, help='Path to the directory where output frames will be saved.', default = "tracked_face")
# Get length of video in seconds
def get_length(filename):
result = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
"format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", filename],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return float(result.stdout)
# IOU (Intersection Over Union): Area of overlap/area of union threshold
def calculate_iou(box1, box2):
# Calculate the (x, y)-coordinates of the intersection rectangle
xi1 = max(box1[0], box2[0])
yi1 = max(box1[1], box2[1])
xi2 = min(box1[2], box2[2])
yi2 = min(box1[3], box2[3])
inter_area = max(0, xi2 - xi1 + 1) * max(0, yi2 - yi1 + 1)
# Calculate the area of both rectangles
box1_area = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
box2_area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
# Calculate the intersection over union
iou = inter_area / float(box1_area + box2_area - inter_area)
return iou
def main():
args = parser.parse_args()
# Use GPU
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
# Load face detection model
mtcnn = MTCNN(keep_all=True, device=device)
video_dir = args.input
video = cv2.VideoCapture(video_dir)
frames = []
trackers = []
while video.isOpened():
ret, frame = video.read()
if not ret:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(Image.fromarray(frame))
video.release()
frames_dir = args.output
os.makedirs(frames_dir, exist_ok=True)
video_length = get_length(video_dir)
num_frames = len(frames)
fps = num_frames / video_length
print("Video FPS: " + str(fps))
frames_tracked = []
track_face = True
for i, frame in enumerate(frames):
print('\rTracking frame: {}'.format(i + 1), end='')
frame_draw = frame.copy()
draw = ImageDraw.Draw(frame_draw)
frame_np = np.array(frame)
if track_face:
# Detect faces
boxes, _ = mtcnn.detect(frame)
# if a face is detected
if boxes is not None:
# sort by y coordinate of the box (topmost face)
boxes = sorted(boxes, key=lambda y: y[1])
# Only track the topmost face
box = boxes[0]
tracker_exists = False
for tracker in trackers:
iou = calculate_iou(box, tracker.bbox)
if iou > 0.5:
tracker_exists = True
break
if not tracker_exists:
tracker = OpticalFlowTracker(box.tolist(), frame_np, time.time())
tracker.start_frame_idx = i
trackers.append(tracker)
track_face = False
if trackers: # If there is a tracker in the list
tracker = trackers[0]
tracker.end_frame_idx = i
print("\nTracking in process...")
updated_bbox = tracker.update(frame_np)
updated_bbox = updated_bbox.tolist() # convert numpy array to list
# Ensure that the coordinates are valid
print(updated_bbox)
if updated_bbox[0] < updated_bbox[2] and updated_bbox[1] < updated_bbox[3] and updated_bbox[0] > 0 and updated_bbox[0] > 0 and updated_bbox[1] > 0 and updated_bbox[2] > 0 and updated_bbox[3] > 0:
draw.rectangle(updated_bbox, outline=(255, 0, 0), width=1)
else:
# If not valid, calculate wait time, remove tracker and restart face tracking
tracking_duration = (tracker.end_frame_idx - tracker.start_frame_idx + 1) / fps
print(f'Duration of tracking for person: {tracking_duration} seconds')
trackers.remove(tracker)
track_face = True
# Add to frame list
tracked_frame = frame_draw.resize((640, 360), Image.BILINEAR)
frames_tracked.append(tracked_frame)
# Save frame to file
tracked_frame.save(os.path.join(frames_dir, f'frame_{i+1:04d}.png'))
print('\nFinished')
if __name__ == "__main__":
main()
|
nishadi930313/Labmate
|
face_tracking.py
|
face_tracking.py
|
py
| 5,042 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20416367196
|
#coding: utf-8
#Autor: Fernanda Bezerra
vet_vu = []
vet_qnt = []
vet_vt = []
cont_ov = 0
for i in range(0,3):
vu = float(input("Digite o valor unitário do produto:"))
qnt = int(input("Digite a quantidade vendida do produto:"))
vt = vu*qnt
vet_vu.append(vu)
vet_qnt.append(qnt)
vet_vt.append(vt)
vt = 0
for i in range(0,3):
if vet_qnt[i] > cont_ov:
cont_ov = i
for i in range(0,3):
print("Quantidade vendida:",vet_qnt[i],"Valor unitário:",vet_vu[i],"Valor total:",vet_vt[i])
print("Valor total das vendas:",sum(vet_vt), "Valor da comissão:",sum(vet_vt)*0.05,"Objeto mais vendido:",cont_ov)
|
nandabezerran/programming-fundamentals
|
Ex - LT/Ex - LT - Vetores/LT-Vet-02.py
|
LT-Vet-02.py
|
py
| 603 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
73227662588
|
import pandas as pd
import imp
import QR_Code_VCard_WC_copy
import imp
from tkinter import *
from tkinter.ttk import *
from tkinter.filedialog import askopenfile
import time
import os
from pathlib import Path
global current_path
current_path=Path.cwd()
def open_file():
global file_path
file_path = askopenfile(mode='r', filetypes=[('Excel', '*csv')])
#print(file_path)
file.set(os.path.basename(file_path.name))
percent.set("")
adhar.pack_forget()
window.update_idletasks()
if file_path or logo is not None:
pass
def open_logo():
global logo
logo = askopenfile(mode='r')
#print(file_path)
image.set(os.path.basename(logo.name))
percent.set("")
adhar.pack_forget()
button_status.set("Create QR Code")
button.pack()
window.update_idletasks()
if logo or file_path is not None:
pass
def close():
window.quit()
exit()
def start():
#Path location
if file_path == "":
path=r"{current_path}/BR+A Bussiness Card/01_CSV/Employee Information.csv".format(current_path=current_path)
else:
path=r"{}".format(file_path.name)
#Workbook Path
global employee_data
employee_data = pd.read_csv(path)
#Creating QR Code
global Employee_info
global x
x=0
bar['value'] = 0
percent.set("")
for i in employee_data.itertuples():
Employee_info=i
x=x+1
percentage = round((x/len(employee_data.index))*100)
#print(percentage)
file_total.set("QR Code(s) created: "+str(x))
imp.reload(QR_Code_VCard_WC_copy)
if percentage >= bar['value']+2:
bar['value'] = percentage
window.update_idletasks()
try:
bar['value'] = 100
percent.set("QR Code(s) Created and CSV file Updated Succesfully !!!")
window.update_idletasks()
employee_data.to_csv(r"{current_path}/01_CSV/Employee Information.csv".format(current_path=current_path), index=False, sep=',')
#print("QR Code(s) Created and CSV file Updated Succesfully !!!")
button.pack_forget()
button2.pack()
button_status.set("Close")
window.update_idletasks()
except:
#print("CSV file opened by another user and not updated!!! Please close csv file and restart script.")
percent.set("CSV file opened by another user and not updated!!! Please close csv file and restart script.")
button_status.set("Try again")
window.update_idletasks()
#exit()
#Window Interface
window = Tk()
window.title("BR+A-Virtual QR Code Generator")
window.geometry("400x200")
percent = StringVar()
file = StringVar()
image = StringVar()
file_total = StringVar()
button_status = StringVar()
file.set('Choose File')
image.set('Choose Background')
button_status.set("Create QR Code(s)")
Title_label = Label(window, text="Virtual QR Code Generator").pack()
bar = Progressbar(window, orient=HORIZONTAL, length=300)
bar.pack(pady=10,padx=10)
percent_label = Label(window, textvariable=percent)
percent_label.pack()
file_label = Label(window, textvariable=file_total).pack()
adhar = Label(window,text='Upload Excel')
adhar.pack()
adharbtn = Button(window,textvariable=file,command=lambda: open_file())
adharbtn.pack()
adharbtn = Button(window,textvariable=image,command=lambda: open_logo())
adharbtn.pack()
button = Button(window,textvariable=button_status, command=start)
button2 = Button(window,textvariable=button_status, command=close)
window.mainloop()
window.quit()
exit()
|
JonJones98/Virtual-Business-Card-Generator
|
06_Scripts/Excel_connection_csv.py
|
Excel_connection_csv.py
|
py
| 3,698 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24929837495
|
import minimax
import time
import sys
class TicTacToe():
''' This is a tic tac toe game class. It holds most of the methods
that will manipulate the game board such as: player_turn and computer_turn '''
def __init__(self, player_char = None, player2_char = None):
self.puzzle = [[" ", " ", " "],
[" ", " ", " "],
[" ", " ", " "]]
self.player_char = player_char
self.player2_char = player2_char
def print_game(self):
''' prints the game board in a pleasing way '''
print("\t [col]")
print(" 0 1 2")
for i in range(3):
if i == 1:
print(" [row]" + str(i) + "[", end = " ")
else:
print("\t" + str(i) + "[", end = " ")
for j in range(3):
print(self.puzzle[i][j] + " ", end="")
print("]")
def player_turn(self, player_char = None):
''' Will prompt the player for a row and column to assign the
player's character '''
enter_prompt_string = "Enter your choice\nRow (space) Column : "
invalid_prompt_string = "\nInvalid selection, try again\n"
invalid = True
if player_char == None:
player_char = "X"
while invalid:
for char in enter_prompt_string:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(.029)
choice = input()
if len(choice) != 3:
for char in invalid_prompt_string:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(.029)
continue
choice_list = choice.split()
row = int(choice_list[0])
col = int(choice_list[1])
if row > 2 or row < 0 or col > 2 or col < 0 or self.puzzle[row][col] != " ":
for char in invalid_prompt_string:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(.029)
else:
self.puzzle[row][col] = player_char
invalid = False
def player_turn_UI(self, choice_list, player_char = None):
row = int(choice_list[0])
col = int(choice_list[1])
if row > 2 or row < 0 or col > 2 or col < 0 or self.puzzle[row][col] != " ":
raise IndexError
else:
self.puzzle[row][col] = player_char
def check_status(self):
''' calls on its helper function to return True or False.
Determining whether the game board has a winner '''
return self.check_row() or self.check_col() or self.check_diag()
def check_row(self):
''' helper function to check status that returns a bool
if any rows have a winner '''
for i in range(3):
if self.puzzle[i][0] == self.puzzle[i][1] == self.puzzle[i][2] and self.puzzle[i][0] != " ":
return True
return False
def check_col(self):
''' helper function to check_status that returns a bool
determing whether there is a winner in any of the cols '''
for i in range(3):
if self.puzzle[0][i] == self.puzzle[1][i] == self.puzzle[2][i] and self.puzzle[0][i] != " ":
return True
return False
def check_diag(self):
''' helper function to check_status, which returns a bool
determining whether there is a winner in any of the rows '''
if self.puzzle[0][0] == self.puzzle[1][1] == self.puzzle[2][2] and self.puzzle[0][0] != " ":
return True
elif self.puzzle[0][2] == self.puzzle[1][1] == self.puzzle[2][0] and self.puzzle[0][2] != " ":
return True
else:
return False
def computer_turn(self, computer_char, player_char):
''' This method is used when it is currently the computers turn.
This method calls a function that uses the minimax algorithm, and
returns the best move determined by it. '''
choice_tup = minimax.best_move(self.puzzle, computer_char, player_char)
self.puzzle[choice_tup[0]][choice_tup[1]] = computer_char
def is_full(self):
''' Returns True if the puzzle is full, False if not '''
list_pos = minimax.get_pos(self.puzzle)
if len(list_pos) == 0:
return True
return False
def game_end(self):
if self.check_status() or self.is_full():
return True
return False
def scheck_status(puzzle):
''' Statically checks a puzzle array '''
return scheck_row(puzzle) or scheck_col(puzzle) or scheck_diag(puzzle)
def scheck_row(puzzle):
''' helper to scheck_status '''
for i in range(3):
if puzzle[i][0] == puzzle[i][1] == puzzle[i][2] and puzzle[i][0] != " ":
return True
return False
def scheck_col(puzzle):
''' helper to scheck_status '''
for i in range(3):
if puzzle[0][i] == puzzle[1][i] == puzzle[2][i] and puzzle[0][i] != " ":
return True
return False
def scheck_diag(puzzle):
''' helper to scheck_status '''
if puzzle[0][0] == puzzle[1][1] == puzzle[2][2] and puzzle[0][0] != " ":
return True
elif puzzle[0][2] == puzzle[1][1] == puzzle[2][0] and puzzle[0][2] != " ":
return True
else:
return False
|
noah415/tic-tac-toe
|
TicTacToe.py
|
TicTacToe.py
|
py
| 5,548 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75070674748
|
#! /usr/bin/env python3
import json
def find_if (pred, collection):
try:
return next(filter(pred, collection))
except StopIteration:
return None
class Transition:
def __init__ (self, initial_state_name):
self.initial = initial_state_name
self.states = []
self.current = self.initial
def regist_state (self, state):
self.states.append(state)
DOT_TEMPLATE = """
digraph transition {{
graph [
charset = "UTF-8"
, label = "transition graph"
, labelloc = "t"
, labeljust = "c"
, bgcolor = "#ffffff"
, fontcolor = black
, fontsize = 18
, style = "filled"
, rankdir = TB
, margin = 0.2
, splines = spline
, ranksep = 1.0
, nodesep = 0.9
];
node [
colorscheme = "rdylgn11"
, style = "solid"
, fontsize = 16
, fontcolor = black
, fontname = "Migu 1M"
, color = black
, fillcolor = 7
, fixedsize = true
, height = 0.6
, width = 1.2
];
edge [
style = solid
, fontsize = 10
, fontcolor = black
, fontname = "Migu 1M"
, color = black
, labelfloat = true
, labeldistance = 2.5
, labelangle = 70
];
{nodes}
{edges}
}}
"""
NODE_TEMPLATE = "{0} [shape = box];\n"
EDGE_TEMPLATE = "{0} -> {1} [label = \"{2}\n({3})\", arrowhead = normal];\n"
def to_diagram (self):
nodes = "s [shape = circle, width = 0.1];\n"
edges = Transition.EDGE_TEMPLATE.format("s", self.initial, "", "初期状態")
for st in self.states:
nodes += Transition.NODE_TEMPLATE.format(st.name)
for cond in st.conditions:
edges += Transition.EDGE_TEMPLATE.format(st.name, cond.next, cond.name, cond.comment)
return Transition.DOT_TEMPLATE.format(nodes = nodes, edges = edges)
def to_json (self):
jsondict = {"initial": self.initial, "states": []}
for st in self.states:
statedict = {"name": st.name, "conditions": []}
for cond in st.conditions:
statedict["conditions"].append(\
{"name": cond.name, "next": cond.next, "comment": cond.comment})
jsondict["states"].append(statedict)
return json.dumps(jsondict, ensure_ascii = False)
def from_json (self, jsonstr):
jsondict = json.loads(jsonstr)
self.initial = jsondict["initial"]
self.current = self.initial
self.states = []
statedicts = jsondict["states"]
for st in statedicts:
state = TransitionState(st["name"])
conditiondicts = st["conditions"]
for cond in conditiondicts:
def incomplete_state ():
raise RuntimeError("incomplete state: {0}. (load from json)".format(cond["name"]))
state.regist_condition(cond["name"], cond["next"], incomplete_state, cond["comment"])
self.regist_state(state)
def update_check_fn (self, state_name, condition_name, check_fn):
state_info = find_if(lambda e: e.name == state_name, self.states)
if state_info is None:
raise RuntimeError("unregistered state: {0}".format(state_name))
condition_info = find_if(lambda e: e.name == condition_name, state_info.conditions)
if condition_info is None:
raise RuntimeError("unregistered condition: {0}, at {1}".format(condition_name, state_name))
condition_info.check = check_fn
def fill_check_fn (self, check_fn):
for st in self.states:
for cond in st.conditions:
cond.check = check_fn
def initialize (self):
self.current = self.initial
def transit (self, condition_name):
state_info = find_if(lambda e: e.name == self.current, self.states)
if state_info is None:
raise RuntimeError("unknown state: {0}".format(self.current))
condition_info = find_if(lambda e: e.name == condition_name, state_info.conditions)
if condition_info is None:
raise RuntimeError("unregistered condition: {0}, at {1}".format(condition_name, self.current))
if condition_info.check():
self.current = condition_info.next
print("transit to {0}".format(self.current))
return True
else:
print("fail transit by condition: {0}".format(condition_name))
return False
class TransitionState:
def __init__ (self, name):
self.name = name
self.conditions = []
def regist_condition (self, condition):
self.conditions.append(condition)
def regist_condition (self, name, next_state_name, check_fn, comment):
self.conditions.append(TransitionCondition(name, next_state_name, check_fn, comment))
class TransitionCondition:
def __init__ (self, name, next_state_name, check_fn, comment):
self.name = name
self.next = next_state_name
self.check = check_fn
self.comment = comment
|
SPNSPN/state-json
|
py/transition.py
|
transition.py
|
py
| 4,337 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27247441921
|
revision = '4160ccb58402'
down_revision = None
branch_labels = None
depends_on = None
import json
import os
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
sections = {
'update_authorized_keys': 'local',
'authorized_keys_file': 'local',
'githome_executable': 'local',
'githome_id': 'githome',
}
def upgrade():
con = op.get_bind()
old_cfg = table('configsetting',
column('key', sa.String),
column('json_value', sa.String))
# check we know where to put each key
for key, value in con.execute(old_cfg.select()):
if key not in sections:
raise RuntimeError('Cannot migrate configuration, unknown '
'configuration value: {}'.format(key))
new_cfg = op.create_table('config',
sa.Column('key', sa.String(), nullable=False),
sa.Column('section', sa.String(), nullable=False),
sa.Column('data', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('key', 'section')
)
section = sections[key]
new_recs = [{
'key': key,
'section': sections[key],
'data': value,
} for key, value in con.execute(old_cfg.select())]
op.bulk_insert(new_cfg, new_recs)
import githome
gh_client = os.path.join(os.path.dirname(githome.__file__), 'gh_client')
op.bulk_insert(new_cfg, [
{'section': 'local', 'key': 'authorized_keys_start_marker',
'data': r'"# -- added by githome {}, do not remove these markers --\n"'},
{'section': 'local', 'key': 'authorized_keys_end_marker',
'data': r'"# -- end githome {}. keep trailing newline! --\n"'},
{'section': 'local', 'key': 'use_gh_client',
'data': json.dumps(True)},
{'section': 'local', 'key': 'gh_client_socket',
'data': json.dumps('ghclient.sock')},
{'section': 'local', 'key': 'gh_client_executable',
'data': json.dumps(gh_client)},
])
# rename config key githome_id to id
op.execute(new_cfg.update().where(new_cfg.c['key'] == 'githome_id')
.values(key='id'))
op.rename_table('user', 'users')
op.rename_table('public_key', 'public_keys')
op.drop_table('configsetting')
|
mbr/githome
|
alembic/versions/4160ccb58402_update_from_previous_version.py
|
4160ccb58402_update_from_previous_version.py
|
py
| 2,292 |
python
|
en
|
code
| 2 |
github-code
|
6
|
4630502954
|
import tkinter as Tk
from math import floor
import numpy as np
from PIL import Image,ImageTk
## ---------------------- ##
##| CLASSES |##
## ---------------------- ##
class Texture:
def __init__(self,path):
self._img = Tk.PhotoImage(file=path)
def getImg(self): return self._img
class Textures:
def __init__(self):
dirt = Texture('.\\Textures\\dirt.gif')
rock = Texture('.\\Textures\\rock.gif')
water = Texture('.\\Textures\\water.gif')
grass = Texture('.\\Textures\\grass.gif')
snowyGrass = Texture('.\\Textures\\snowyGrass.gif')
sand = Texture('.\\Textures\\sand.gif')
wood = Texture('.\\Textures\\wood.gif')
leaf = Texture('.\\Textures\\leaf.gif')
redFlower = Texture('.\\Textures\\redFlower.gif')
self.__textures = {'dirt':dirt,'rock':rock,'water':water,'grass':grass,'sand':sand,'wood':wood,'leaf':leaf,'redFlower':redFlower,'snowyGrass':snowyGrass}
def getDict(self): return self.__textures
class Camera:
def __init__(self,can,env):
self.__height = int(can['height'])
self.__width = int(can['width'])
self.__can = can
self.__env = env
self.__scale = 40 # Block rendering size - DO NOT CHANGE WITHOUT RESIZE TEXTURES
# Camera position when starting
self.__posx = 8
self.__posy = 25
self.__chunkNumber = floor(self.__posx/16)
# Options
self.__renderingDistanceInChunks = 2
self.__moveVertical = 8
self.__moveHorizontal = 16
self.__skyUpdateTime = 1
self.__horzCamFollowing = 5
self.__vertCamFollowing = 5
# Data sets
self.__skies = dict()
self.__brightnesses = dict()
# skyRendering initialization
self.computeAndLoadImages()
backgroundImage = self.__skies['sky-0']
brightnessImage = self.__brightnesses['br-0']
self.__sky = self.__can.create_image(self.__width//2,self.__height//2,image=backgroundImage)
self.__brightness = self.__can.create_image(self.__width//2,self.__height//2,image=brightnessImage)
# Get useful values
def getScale(self): return self.__scale
def getPosx(self): return self.__posx
def getPosy(self): return self.__posy
# Convert a frame position into canvas position
def position2pixel(self,x,y):
xc = self.__posx
yc = self.__posy
xr = x-xc
yr = y-yc
px = self.__width//2 + xr*self.__scale
py = self.__height//2 - yr*self.__scale
return (px,py)
# Display stuff
def displayBlock(self,block):
x = block.getx()
y = block.gety()
(px1,py1) = self.position2pixel(x,y)
self.__can.delete(block.getDisplayAdress())
try:
img = block.getImg()
adress = self.__can.create_image(px1+self.__scale//2,py1-self.__scale//2,image=img)
except:
px2 = px1 + self.__scale
py2 = py1 - self.__scale
adress = self.__can.create_rectangle(px1,py1,px2,py2,fill=block.getColor())
block.setDisplayAdress(adress)
def displayChunk(self,chunk):
chunk.activate()
for blk in chunk.getBlocks().items():
self.displayBlock(blk[1])
def displayPlayer(self,player):
x1 = player.getPosx() - 0.25
y1 = player.getPosy() -0.9
x2 = x1 + 0.5
y2 = y1 + 1.8
(px1,py1) = self.position2pixel(x1,y1)
(px2,py2) = self.position2pixel(x2,y2)
displayAdress = self.__can.create_rectangle(px1,py1,px2,py2,fill='black')
player.setDisplayAdress(displayAdress)
def displayEnv(self,env):
for chunk in env.getChunks().items():
self.displayChunk(chunk[1])
# Move stuff
def moveBlock(self,block,dx,dy):
self.__can.move(block.getDisplayAdress(),dx*self.__scale,-dy*self.__scale)
def moveChunk(self,chunk,dx,dy):
for blk in chunk.getBlocks().items():
self.moveBlock(blk[1],dx,dy)
def movePlayer(self,player,dx,dy):
self.__can.move(player.getDisplayAdress(),dx*self.__scale,-dy*self.__scale)
self.__can.tag_raise(player.getDisplayAdress())
def moveEnv(self,dx,dy):
for chunk in self.__env.getChunks().items():
self.moveChunk(chunk[1],dx,dy)
# Chunk rendering methods
def eraseChunk(self,chunk):
chunk.disactivate()
for blk in chunk.getBlocks().items():
self.__can.delete(blk[1].getDisplayAdress())
def updateChunkRendeering(self,player):
playerChunk = player.getChunkNumber()
for chunk in self.__env.getChunks().items():
if abs(chunk[1].getChunkNumber() - playerChunk) > self.__renderingDistanceInChunks:
self.eraseChunk(chunk[1])
for n in range(-self.__renderingDistanceInChunks+playerChunk,self.__renderingDistanceInChunks+playerChunk):
if str(n) in self.__env.getChunks().keys():
if not self.__env.getChunks()[str(n)].isActive():
self.displayChunk(self.__env.getChunks()[str(n)])
else:
self.__env.createChunk(n)
self.displayChunk(self.__env.getChunks()[str(n)])
# Sky and brightness
def computeAndLoadImages(self):
print(' Creating and loading skies...')
T = self.__env.getDayAndNightCyclesDuration()
for t in range(0,T,self.__skyUpdateTime):
try:
self.__skies['sky-'+str(t)] = Tk.PhotoImage(file=".\\skies\\sky-"+str(int(t))+".gif")
except:
skyColor(t,self.__width,self.__height,T)
self.__skies['sky-'+str(t)] = Tk.PhotoImage(file=".\\skies\\sky-"+str(int(t))+".gif")
print(' Creating and loading brightnesses...')
for t in range(0,T,self.__skyUpdateTime):
try:
self.__brightnesses['br-'+str(t)] = Tk.PhotoImage(file=".\\brightnesses\\br-"+str(int(t))+".png")
except:
brightness(t,self.__width,self.__height,T)
self.__brightnesses['br-'+str(t)] = Tk.PhotoImage(file=".\\brightnesses\\br-"+str(int(t))+".png")
def updateSkyAndBrightnessRendering(self,t1,t2):
if floor(t1/self.__skyUpdateTime) == floor(t2/self.__skyUpdateTime) -1:
self.__can.delete(self.__sky)
self.__can.delete(self.__brightness)
T = self.__env.getDayAndNightCyclesDuration()
backgroundImage = self.__skies['sky-'+str(int(t2%T))]
brightnessImage = self.__brightnesses['br-'+str(int(t2%T))]
self.__sky = self.__can.create_image(self.__width//2,self.__height//2,image=backgroundImage)
self.__brightness = self.__can.create_image(self.__width//2,self.__height//2,image=brightnessImage)
self.reorder()
# Set all stuff on the good plane
def reorder(self):
self.__can.tag_lower(self.__sky)
self.__can.tag_raise(self.__brightness)
# Camera function call
def bind(self,player,env,t1,t2):
playerPosx = player.getPosx()
camPosx = self.__posx
playerPosy = player.getPosy()
camPosy = self.__posy
diffx = playerPosx-camPosx
diffy = playerPosy-camPosy
if (diffx/self.__horzCamFollowing)**2 + (diffy/self.__vertCamFollowing)**2 > 1:
self.moveEnv(-diffx,-diffy)
self.__posx += diffx
self.__posy += diffy
self.__can.delete(player.getDisplayAdress())
self.displayPlayer(player)
self.updateSkyAndBrightnessRendering(t1,t2)
## ---------------------- ##
##| ADDITIONAL FUNCTIONS |##
## ---------------------- ##
# Create the sky images
def skyColor(time,w,h,dayAndNightCycleTime):
T = dayAndNightCycleTime
transitionTime = dayAndNightCycleTime//6
size = (100,100)
img = Image.new('RGB', size)
upColor = [[0,0,0],[0,7,107],[0,65,163],[0,7,107]]
downColor = [[0,0,0],[250,196,0],[150, 192, 255],[250,196,0]]
if time < T//4 - transitionTime//2:
Cu1 = upColor[0]
Cu2 = upColor[1]
Cd1 = downColor[0]
Cd2 = downColor[1]
alpha = 0
elif time < T//4:
Cu1 = upColor[0]
Cu2 = upColor[1]
Cd1 = downColor[0]
Cd2 = downColor[1]
alpha = (time-(T//4 - transitionTime//2))/(transitionTime//2)
elif time < T//4 + transitionTime//2:
Cu1 = upColor[1]
Cu2 = upColor[2]
Cd1 = downColor[1]
Cd2 = downColor[2]
alpha = (time-T//4)/(transitionTime//2)
elif time < 3*T//4 - transitionTime//2:
Cu1 = upColor[2]
Cu2 = upColor[2]
Cd1 = downColor[2]
Cd2 = downColor[2]
alpha = 0
elif time < 3*T//4:
Cu1 = upColor[2]
Cu2 = upColor[3]
Cd1 = downColor[2]
Cd2 = downColor[3]
alpha = (time-(3*T//4 - transitionTime//2))/(transitionTime//2)
elif time < 3*T//4 + transitionTime//2:
Cu1 = upColor[3]
Cu2 = upColor[0]
Cd1 = downColor[3]
Cd2 = downColor[0]
alpha = (time-3*T//4)/(transitionTime//2)
else:
Cu1 = upColor[0]
Cu2 = upColor[0]
Cd1 = downColor[0]
Cd2 = downColor[0]
alpha = 1
R = np.linspace(Cu1[0]+(Cu2[0]-Cu1[0])*alpha,Cd1[0]+(Cd2[0]-Cd1[0])*alpha,100)
G = np.linspace(Cu1[1]+(Cu2[1]-Cu1[1])*alpha,Cd1[1]+(Cd2[1]-Cd1[1])*alpha,100)
B = np.linspace(Cu1[2]+(Cu2[2]-Cu1[2])*alpha,Cd1[2]+(Cd2[2]-Cd1[2])*alpha,100)
for i in range(100):
for j in range(100):
color = (int(R[j]),int(G[j]),int(B[j]))
img.putpixel((i,j),color)
img = img.resize((w*2,h*2))
img.save('.\\skies\\sky-'+str(int(time))+'.gif', "GIF")
# Create the brightness images
def brightness(time,w,h,dayAndNightCycleTime):
T = dayAndNightCycleTime
transitionTime = dayAndNightCycleTime//6
size = (w,h)
maxOpacity = 200
if time <T//4 - transitionTime//2:
transparency = maxOpacity
elif time < T//4 + transitionTime//2:
transparency = int(-(time-(T//4 - transitionTime//2))/transitionTime*maxOpacity+maxOpacity)
elif time < 3*T//4 - transitionTime//2:
transparency = 0
elif time < 3*T//4 + transitionTime//2:
transparency = int((time-(3*T//4 - transitionTime//2))/transitionTime*maxOpacity)
else:
transparency = maxOpacity
print(transparency)
img = Image.new('RGBA', size,(0,0,0,transparency))
img.save('.\\brightnesses\\br-'+str(int(time))+'.png', "PNG")
|
MaximePerriquet/PyCraft
|
rendering.py
|
rendering.py
|
py
| 10,623 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36643660386
|
# -*- coding: utf-8 -*-
"""
@author: QgZhan
@contact: [email protected]
@file: cifar.py
@time: 2022/4/19 11:19
"""
import os
from torch.utils.data import Dataset
from dataloader.dataloader_utils import *
from torchvision import datasets, transforms
from spikingjelly.datasets import cifar10_dvs
from torch.utils.data.sampler import SubsetRandomSampler
# your own data dir
DIR = {'CIFAR10': '/data/zhan/CV_data/cifar10',
'CIFAR10DVS': '/data/zhan/Event_Camera_Datasets/CIFAR10DVS',
'CIFAR10DVS_CATCH': '/data/zhan/Event_Camera_Datasets/CIFAR10DVS_dst_cache'
}
def get_cifar10(batch_size, train_set_ratio=1.0):
"""
get the train loader and test loader of cifar10.
:return: train_loader, test_loader
"""
trans_train = transforms.Compose([transforms.Resize(48),
transforms.RandomCrop(48, padding=4),
transforms.RandomHorizontalFlip(), # 随机水平翻转
CIFAR10Policy(), # TODO: 待注释
transforms.ToTensor(),
# transforms.RandomGrayscale(), # 随机变为灰度图
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), # 归一化
# transforms.Normalize((0., 0., 0.), (1, 1, 1)),
# Cutout(n_holes=1, length=16) # 随机挖n_holes个length * length的洞
])
trans_test = transforms.Compose([transforms.Resize(48),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
train_data = datasets.CIFAR10(DIR['CIFAR10'], train=True, transform=trans_train, download=True)
test_data = datasets.CIFAR10(DIR['CIFAR10'], train=False, transform=trans_test, download=True)
# take train set by train_set_ratio
n_train = len(train_data)
split = int(n_train * train_set_ratio)
indices = list(range(n_train))
random.shuffle(indices)
train_sampler = SubsetRandomSampler(indices[:split])
if train_set_ratio < 1.0:
train_dataloader = DataLoaderX(train_data, batch_size=batch_size, shuffle=False, num_workers=8, drop_last=True,
sampler=train_sampler, pin_memory=True)
else:
train_dataloader = DataLoaderX(train_data, batch_size=batch_size, shuffle=True, num_workers=8, drop_last=True,
pin_memory=True)
test_dataloader = DataLoaderX(test_data, batch_size=batch_size, shuffle=True, num_workers=8, drop_last=False,
pin_memory=True)
return train_dataloader, test_dataloader
def get_cifar10_DVS(batch_size, T, split_ratio=0.9, train_set_ratio=1, size=48, encode_type='TET'):
"""
get the train loader and test loader of cifar10.
:param batch_size:
:param T:
:param split_ratio: the ratio of train set: test set
:param train_set_ratio: the real used train set ratio
:param size:
:param encode_type:
:return: train_loader, test_loader
"""
if encode_type is "spikingjelly":
trans = DVSResize((size, size), T)
train_set_pth = os.path.join(DIR['CIFAR10DVS_CATCH'], f'train_set_{T}_{split_ratio}_{size}.pt')
test_set_pth = os.path.join(DIR['CIFAR10DVS_CATCH'], f'test_set_{T}_{split_ratio}_{size}.pt')
if os.path.exists(train_set_pth) and os.path.exists(test_set_pth):
train_set = torch.load(train_set_pth)
test_set = torch.load(test_set_pth)
else:
origin_set = cifar10_dvs.CIFAR10DVS(root=DIR['CIFAR10DVS'], data_type='frame', frames_number=T,
split_by='number', transform=trans)
train_set, test_set = split_to_train_test_set(split_ratio, origin_set, 10)
if not os.path.exists(DIR['CIFAR10DVS_CATCH']):
os.makedirs(DIR['CIFAR10DVS_CATCH'])
torch.save(train_set, train_set_pth)
torch.save(test_set, test_set_pth)
elif encode_type is "TET":
path = '/data/zhan/Event_Camera_Datasets/CIFAR10DVS/temporal_effecient_training_0.9'
train_path = path + '/train'
test_path = path + '/test'
train_set = DVSCifar10(root=train_path)
test_set = DVSCifar10(root=test_path)
elif encode_type is "3_channel":
path = '/data/zhan/Event_Camera_Datasets/CIFAR10DVS/temporal_effecient_training_0.9'
train_path = path + '/train'
test_path = path + '/test'
train_set = Channel_3_DVSCifar10(root=train_path)
test_set = Channel_3_DVSCifar10(root=test_path)
# take train set by train_set_ratio
n_train = len(train_set)
split = int(n_train * train_set_ratio)
indices = list(range(n_train))
random.shuffle(indices)
train_sampler = SubsetRandomSampler(indices[:split])
# valid_sampler = SubsetRandomSampler(indices[split:])
# generate dataloader
# train_data_loader = DataLoaderX(dataset=train_set, batch_size=batch_size, shuffle=True, drop_last=True,
# num_workers=8, pin_memory=True)
train_data_loader = DataLoaderX(dataset=train_set, batch_size=batch_size, shuffle=False, drop_last=True,
sampler=train_sampler, num_workers=8,
pin_memory=True) # SubsetRandomSampler 自带shuffle,不能重复使用
test_data_loader = DataLoaderX(dataset=test_set, batch_size=batch_size, shuffle=False, drop_last=False,
num_workers=8, pin_memory=True)
return train_data_loader, test_data_loader
def get_cifar100(batch_size):
"""
get the train loader and test loader of cifar100.
:return: train_loader, test_loader
"""
trans_t = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[n / 255. for n in [129.3, 124.1, 112.4]],
std=[n / 255. for n in [68.2, 65.4, 70.4]]),
Cutout(n_holes=1, length=16)
])
trans = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[n / 255. for n in [129.3, 124.1, 112.4]],
std=[n / 255. for n in [68.2, 65.4, 70.4]])])
train_data = datasets.CIFAR100(DIR['CIFAR100'], train=True, transform=trans_t, download=True)
test_data = datasets.CIFAR100(DIR['CIFAR100'], train=False, transform=trans, download=True)
train_dataloader = DataLoaderX(train_data, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True)
test_dataloader = DataLoaderX(test_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
return train_dataloader, test_dataloader
class DVSCifar10(Dataset):
# This code is form https://github.com/Gus-Lab/temporal_efficient_training
def __init__(self, root, train=True, transform=True, target_transform=None):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train
self.resize = transforms.Resize(size=(48, 48)) # 48 48
self.tensorx = transforms.ToTensor()
self.imgx = transforms.ToPILImage()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
data, target = torch.load(self.root + '/{}.pt'.format(index))
# if self.train:
new_data = []
for t in range(data.size(0)):
new_data.append(self.tensorx(self.resize(self.imgx(data[t, ...]))))
data = torch.stack(new_data, dim=0)
if self.transform:
flip = random.random() > 0.5
if flip:
data = torch.flip(data, dims=(3,))
off1 = random.randint(-5, 5)
off2 = random.randint(-5, 5)
data = torch.roll(data, shifts=(off1, off2), dims=(2, 3))
if self.target_transform is not None:
target = self.target_transform(target)
return data, target.long().squeeze(-1)
def __len__(self):
return len(os.listdir(self.root))
class Channel_3_DVSCifar10(Dataset):
def __init__(self, root, train=True, transform=True, target_transform=None):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train
self.resize = transforms.Resize(size=(48, 48)) # 48 48
self.tensorx = transforms.ToTensor()
self.imgx = transforms.ToPILImage()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
data, target = torch.load(self.root + '/{}.pt'.format(index))
T, C, H, W = data.shape
# if self.train:
new_data = []
for t in range(T):
tmp = data[t, ...] # (2, H, W)
tmp = torch.cat((tmp, torch.zeros(1, H, W)), dim=0) # (3, H, W)
mask = (torch.randn((H, W)) > 0).to(data)
tmp[2].data = tmp[0].data * mask + tmp[1].data * (1 - mask)
new_data.append(self.tensorx(self.resize(self.imgx(tmp))))
data = torch.stack(new_data, dim=0)
if self.transform:
flip = random.random() > 0.5
if flip:
data = torch.flip(data, dims=(3,))
off1 = random.randint(-5, 5)
off2 = random.randint(-5, 5)
data = torch.roll(data, shifts=(off1, off2), dims=(2, 3))
if self.target_transform is not None:
target = self.target_transform(target)
return data, target.long().squeeze(-1)
def __len__(self):
return len(os.listdir(self.root))
|
rtao499/SAANet
|
dataloader/cifar.py
|
cifar.py
|
py
| 10,609 |
python
|
en
|
code
| 2 |
github-code
|
6
|
30471382710
|
from tkinter import *
from csv import *
import mainn
import orderr
class login_screen:
b = 0
def getEntry(self):
details_list = []
def openorder():
self.root7.destroy()
o=orderr.orderscr()
o.ord()
#csv file
with open("register.csv",'r') as reg_file:
read = reader(reg_file)
for i in read:
if i != []:
details_list.append(i)
login_id = self.entry_1.get()
#test condition for opening the orderr window.
if len(login_id) == 7:
a = 0
for i in details_list:
if login_id == i[4]:
i.insert(0,"user")
with open("details.csv",'w') as det:
det.truncate()
write = writer(det)
write.writerow(i) #contains the current user details.
det.close()
a = 1
#closing login window and moving to orderr screen.
openorder()
break
if a == 0 and self.b == 0:
Label(self.root7 , text = "Invalid User ID" , fg = "red").pack()
self.b = 1
else:
if self.b == 0:
Label(self.root7 , text = "Invalid User ID" , fg = "red").pack()
self.b = 1
def login(self):
self.root7 = Tk()
self.root7.geometry('500x500')
self.root7.title("Login Form")
def backfn():
self.root7.destroy()
ms=mainn.mainscr()
ms.screen()
self.label_0 = Label(self.root7, text="LOGIN",width=20,font=("bold", 20))
self.label_0.place(x=90,y=53)
self.label_1 = Label(self.root7, text="ROLL NO",width=20,font=("bold", 10))
self.label_1.place(x=70,y=130)
self.entry_1 = Entry(self.root7)
self.entry_1.place(x=240,y=130)
self.button=Button(self.root7, text='login',width=20,bg='brown',fg='black',command=self.getEntry)
self.button.place(x=270,y=200)
self.button111=Button(self.root7, text='back',width=20,bg='brown',fg='black',command=backfn)
self.button111.place(x=100,y=200)
self.root7.mainloop()
|
prithiknataraj/OOPS-Laundry_Service
|
OOPS Laundry Service/loginscreen.py
|
loginscreen.py
|
py
| 2,596 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42631511615
|
#!/usr/bin/env python3
# Program to implement tweet classification
import nltk
import re
import sys
from collections import Counter
import pandas as pd
nltk.download('punkt')
# Read files
train_file = sys.argv[1]
test_file = sys.argv[2]
output_file = sys.argv[3]
train = open(train_file, 'r', errors='ignore').read()
test = open(test_file, 'r', errors='ignore').read()
location_wise_data, location_counts = {},{}
bag_of_words = []
# Preprocessing data by removing spacial characters
def preprocess_data(fileData):
clean_data = re.sub(r'[^a-zA-Z\d,_\s]', '', fileData)
clean_data = re.sub('([_]+)_','_', clean_data)
clean_data = re.sub('([ ]+)',' ', clean_data)
clean_data = clean_data.replace("\n"," ")
return clean_data
# Created a dictionary of dictionary to store
# Location : {word : count}
def populate_train_data(clean_train):
prev_start, prev_city = -1, ''
bag_of_words_str = ''
# Regular expression matches with the format of city,_state
for m in re.compile(r'\w{4,},_\w+ ').finditer(clean_train):
if(prev_start != -1 and prev_city != ''): # empty initially
if prev_city not in location_wise_data:
data = {}
tweet = clean_train[prev_start+len(prev_city)+1:m.start()]
tweet = tweet.replace(",","")
location_wise_data[prev_city] = tweet
location_counts[prev_city] = 1
bag_of_words_str += tweet
else:
data = location_wise_data.get(prev_city)
tweet = clean_train[prev_start+len(prev_city)+1:m.start()]
tweet = tweet.replace(",","")
location_wise_data[prev_city] =location_wise_data.get(prev_city)+ ' ' +tweet
location_counts[prev_city] = location_counts.get(prev_city)+1
bag_of_words_str += tweet
prev_start = m.start()
prev_city = m.group()
prev_city = prev_city.replace(" ","")
bag_of_words_str = re.sub('([ ]+) ',' ', bag_of_words_str)
bag_of_words = bag_of_words_str.split(" ")
# Function to generate tokens from tweet
# Find the probability of each word as count of word in location / number of words in a location
def generate_tokens_prob():
for k,v in (location_wise_data.items()):
list_of_words = v.lower().split(" ")
# Remove stop words
list_of_words = [x for x in list_of_words if x not in ['', '_', ',','\'','a','an','and','are','the','as', 'at', 'be' ,'by' ,'us','it','too','she' ,'for', 'from', 'has','he', 'in', 'yes','is', 'its', 'of', 'on', 'that', 'to', 'was', 'were', 'will', 'with','my','you','mine','yours','we','can','this','our','because','him','his','her']]
total_words = len(list_of_words)
location_wise_data[k] = Counter(list_of_words)
counter_dict = location_wise_data.get(k)
for k2,v2 in counter_dict.items():
counter_dict[k2] = v2 / total_words
clean_train = preprocess_data(train)
clean_test = test
populate_train_data(clean_train)
generate_tokens_prob()
# Test data is stored in dataframe
prev_start, prev_city = -1, ''
cols = ['actual','clean_tweet','tweet', 'predicted']
list_data = []
for m in re.compile(r'\w{4,},_\w+ ').finditer(clean_test):
if(prev_start != -1 and prev_city != ''): # empty initially
tweet = clean_test[prev_start+len(prev_city)+1:m.start()]
clean_tweet = re.sub(r'[^a-zA-Z\d\s]', '', tweet)
list_data.append([prev_city, clean_tweet, tweet, ''])
prev_start = m.start()
prev_city = m.group()
prev_city = prev_city.replace(" ","")
# To store last row
tweet = clean_test[prev_start+len(prev_city)+1:len(clean_test)]
clean_tweet = re.sub(r'[^a-zA-Z\d\s]', '', tweet)
clean_tweet = clean_tweet.replace("\n"," ")
list_data.append([prev_city, clean_tweet, tweet, ''])
test_df = pd.DataFrame(list_data, columns=cols)
# Applying naive bayes to find the probablity of location given list of words and then returning the location having maximum probablity
for index, row in test_df.iterrows():
wordList = row['clean_tweet'].lower().split(" ")
probabilies_by_city = {}
for city in location_counts.keys():
prob = 1
for word in wordList:
try:
# Naive bayes assumes that words are independent given location
prob = prob * location_wise_data.get(city).get(word)
except:
# If a word is not found in the given location, allocate a lowest probability to that word
prob = prob * 0.0000001
# Probablity of any location is 1/length of cities
probabilies_by_city[city] = prob * (1/len(location_wise_data))
row['predicted'] = max(probabilies_by_city, key = probabilies_by_city.get)
# FInding accuracy of test data
correct, wrong = 0, 0
for index, row in test_df.iterrows():
if(row['actual'] == row['predicted']):
correct += 1
else:
wrong +=1
print('Test Accuracy - ', correct/ (correct+wrong)*100)
#Writing to Output
f = open(output_file, "w+")
for index, row in test_df.iterrows():
# Actual tweet is used instead of cleaned tweet data
f.write(row['predicted'] + " " + row['actual'] + " " + row['tweet'])
f.close()
#Printing Top 5 words associated with each location
location_with_top_words = {}
cities = []
top_words = []
for k,v in (location_wise_data.items()):
li = []
cities.append(k)
for k2, v2 in v.most_common(5):
li.append(k2)
top_words.append(li)
location_with_top_words[k] = li
# Used panda tables to display locations having top 5 words
Table = {"Location ":cities, "Top 5 words ":top_words}
TableDF = pd.DataFrame(Table)
print(TableDF)
|
tanvi5/Games-and-Bayes
|
part2/geolocate.py
|
geolocate.py
|
py
| 5,766 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14400330096
|
#!/usr/bin/env python3.8
"""
given a string return list of all the possible permutations of the string and count of possible permutations
"""
count = 0
list = []
def permutation(string,prefix):
global count
if len(string)==0:
count += 1
list.append(prefix)
else:
for i in range(len(string)):
permutation(string[0:i]+string[i+1:], prefix+string[i])
def permutation1(string):
out = []
if len(string) == 1:
out = [string]
else:
for i, char in enumerate(string):
for perm in permutation1(string[:i]+string[i+1:]):
out += [char + perm]
return out
string = "ABC"
permutation(string, "")
print(list, count)
list = permutation1(string)
print(list, len(list))
|
dnootana/Python
|
Interview/string_permutation.py
|
string_permutation.py
|
py
| 682 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7306633247
|
import torch
from tqdm import tqdm
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
from data_loader import get_loader
from CNNtoRNN import CNNtoRNN
def train():
transform = transforms.Compose(
[
transforms.Resize((356, 356)),
transforms.RandomCrop((299, 299)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # mean and std for each channel (RGB)
]
)
train_loader, dataset = get_loader(
root_folder="../images",
captions_file="../captions.txt",
transform=transform,
num_workers=2
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
load_model = False
save_model = False
train_CNN = False
# Hyperparameters
embed_size = 256
hidden_size = 256
vocab_size = len(dataset.vocab)
num_layers = 1
learning_rate = 3e-4
num_epochs = 2
model = CNNtoRNN(embed_size, hidden_size, vocab_size, num_layers).to(device)
criterion = nn.CrossEntropyLoss(ignore_index=dataset.vocab.stoi["<PAD>"])
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
for name, param in model.encoderCNN.inception.named_parameters():
if "fc.weight" in name or "fc.bias" in name:
param.requires_grad = True
else:
param.requires_grad = False
model.train()
for epoch in range(num_epochs):
for idx, (imgs, captions) in tqdm(
enumerate(train_loader), total=len(train_loader), leave=False
):
imgs = imgs.to(device)
captions = captions.to(device)
outputs = model(imgs, captions[:-1])
loss = criterion(
outputs.reshape(-1, outputs.shape[2]), captions.reshape(-1)
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if __name__ =="__main__":
train()
|
KarstenKu-Hub/ImageCaptioning
|
train.py
|
train.py
|
py
| 2,050 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25868835750
|
dan = {'name': 'Dan',
'age': 27,
'city': 'Madison',
'state': 'WI'}
pierce = {'name': 'Pierce',
'city': 'Madison',
'state': 'WI'}
def print_location(user):
if 'name' not in user:
raise ValueError('user must have name')
if 'city' not in user:
raise ValueError('user must have city')
if 'state' not in user:
raise ValueError('user must have state')
print(f"{user['name']} is living in {user['city']}, {user['state']}")
def print_age(user):
if 'name' not in user:
raise ValueError('user must have name')
if 'age' not in user:
raise ValueError('user must have age')
print(f"{user['name']} is {user['age']}.")
try:
print_location(dan)
print_location(pierce)
except ValueError as e:
print(f'ERROR: {e}')
try:
print_age(dan)
print_age(pierce) # raises ValueError
except ValueError as e:
print(f'ERROR: {e}')
|
madison-python/decorators-and-descriptors
|
decorator2.py
|
decorator2.py
|
py
| 944 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75204308988
|
print('''Various Array Operations
1. Linear Search
2. Binary Search
3. Lowest Number
4. Selection Sort
Press any other key to exit''')
def create_array():
L = [int(input('Enter element ')) for I in range(int(input('Enter size ')))]
print(L)
return L
def low(L):
print(f'Lowest no : {min(L)}')
def lin_search(L):
x = int(input('Enter element to be searched : '))
for I in range(len(L)):
if L[I] == x:
print('Element found at index position :',I)
return
print('Element not found')
def binary_search(L, high , low , x):
L = sel_sort(L)
if high < low :
return 'Element not found'
mid = ( high + low )//2
if L[mid]>x :
return binary_search(L, mid-1 , low , x)
elif L[mid]<x :
return binary_search(L, high , mid+1 , x)
else:
return f'Element found at index position (in the sorted array{L}) at {mid}'
def sel_sort(L):
for i in range(len(L)):
min_idx = i
for j in range(i+1, len(L)):
if L[min_idx] > L[j]:
min_idx = j
L[i], L[min_idx] = L[min_idx], L[i]
return L
L = create_array()
while True:
choice = input('Enter choice : ')
if choice == '1':
lin_search(L)
elif choice == '2':
x = int(input('Enter element to be searched : '))
print(binary_search(L, len(L)-1 , 0 ,x))
elif choice == '3':
low(L)
elif choice == '4':
print(f'Sorted Array : {sel_sort(L)}')
else:
raise SystemExit
|
CS-ION/Class-12-Practicals
|
Practicals/20.py
|
20.py
|
py
| 1,600 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75163131066
|
import pprint, random, datetime
class Cliente():
_nomes = ['ERIC RUIZ', 'ROBERTA DE LIMA', 'DEIVIDI SCALZAVARA', 'ADOLFO NETO', 'JOSE MONESTEL', 'WAGNER CORREIA', 'JACEGUAY ZUKOSKI', 'MICHEL SOUZA', 'MAYRA RODRIGUES', 'MICHEL DUARTE', 'MARCIO FOSSA', 'MARCEL BORNANCIN', 'ELOISA PERIN', 'TIAGO WIPPEL', 'LUCAS FISCHER', 'DIEGO PRANDO', 'ADRIANO WEIGUERT NAGASAVA', 'FERNANDO MIRANDA', 'LUIS MONTES', 'MARCELO DE SOUZA']
_ruas = ['Av. Brasil', 'Rua Uruguai', 'Rua das Acácias', 'Rua Bulcão Viana', 'Av Marcos Konder']
_cidades = ['Itajaí', 'Florianópolis', 'Brusque', 'Navegantes']
_paises = ['Brasil']
def __init__(self):
self.nome = random.choice(self._nomes)
self.email = self.generateEmail()
self.endereco = {}
self.endereco['pais'] = random.choice(self._paises)
self.endereco['cidade'] = random.choice(self._cidades)
self.endereco['rua'] = random.choice(self._ruas)
self.endereco['numero'] = random.randint(10,999)
self.endereco['complemento'] = ''
def generateEmail(self, domain="fakemail.net"):
return self.nome.lower().replace(' ', '_') + "@" + domain
def getRandom():
clientes = []
for n in Cliente._nomes:
clientes.append({
'nome': n,
'contato': {
'email': n.lower().replace(' ', '_') + "@fakemail.net",
'telefone': '9' + str(random.randint(80000000, 99999999))
},
'endereco': {
'cidade': random.choice(Cliente._cidades),
'complemento': '',
'numero': random.randint(1, 999),
'pais': random.choice(Cliente._paises),
'rua': random.choice(Cliente._ruas)
}
})
return clientes
class Venda():
_clientes = [
{"_id": "5dc58145cfb83d37c2e6d1d8", "nome": "ERIC RUIZ"},
{"_id": "5dc58145cfb83d37c2e6d1d9", "nome": "ROBERTA DE LIMA"},
{"_id": "5dc58145cfb83d37c2e6d1da", "nome": "DEIVIDI SCALZAVARA"},
{"_id": "5dc58145cfb83d37c2e6d1db", "nome": "ADOLFO NETO"},
{"_id": "5dc58145cfb83d37c2e6d1dc", "nome": "JOSE MONESTEL"},
{"_id": "5dc58145cfb83d37c2e6d1dd", "nome": "WAGNER CORREIA"},
{"_id": "5dc58145cfb83d37c2e6d1de", "nome": "JACEGUAY ZUKOSKI"},
{"_id": "5dc58145cfb83d37c2e6d1df", "nome": "MICHEL SOUZA"},
{"_id": "5dc58145cfb83d37c2e6d1e0", "nome": "MAYRA RODRIGUES"},
{"_id": "5dc58145cfb83d37c2e6d1e1", "nome": "MICHEL DUARTE"},
{"_id": "5dc58145cfb83d37c2e6d1e2", "nome": "MARCIO FOSSA"},
{"_id": "5dc58145cfb83d37c2e6d1e3", "nome": "MARCEL BORNANCIN"},
{"_id": "5dc58145cfb83d37c2e6d1e4", "nome": "ELOISA PERIN"},
{"_id": "5dc58145cfb83d37c2e6d1e5", "nome": "TIAGO WIPPEL"},
{"_id": "5dc58145cfb83d37c2e6d1e6", "nome": "LUCAS FISCHER"},
{"_id": "5dc58145cfb83d37c2e6d1e7", "nome": "DIEGO PRANDO"},
{"_id": "5dc58145cfb83d37c2e6d1e8", "nome": "ADRIANO WEIGUERT NAGASAVA"},
{"_id": "5dc58145cfb83d37c2e6d1e9", "nome": "FERNANDO MIRANDA"},
{"_id": "5dc58145cfb83d37c2e6d1ea", "nome": "LUIS MONTES"},
{"_id": "5dc58145cfb83d37c2e6d1eb", "nome": "MARCELO DE SOUZA"}
]
_produtos = {
'smartphone': [
{'nome': 'Galaxy s10', 'valor_unitario': 999.99},
{'nome': 'Xiaomi Redmi', 'valor_unitario': 768.89},
{'nome': 'iPhone 11 pro', 'valor_unitario': 6899.0},
{'nome': 'LG K9', 'valor_unitario': 648.99},
{'nome': 'Moto G7 Play', 'valor_unitario': 829.90}
],
'notebook': [
{'nome': 'Lenovo Carbon', 'valor_unitario': 9999.98},
{'nome': 'Mac Book Air', 'valor_unitario': 4680.0},
{'nome': 'Dell XPS', 'valor_unitario': 7699.79},
{'nome': 'Alienware', 'valor_unitario': 12350.0},
{'nome': 'Positivo Motion', 'valor_unitario': 1450.0},
],
'tablet': [
{'nome': 'Galaxy Tab A10', 'valor_unitario': 899},
{'nome': 'Multilaser M7S', 'valor_unitario': 375.5},
{'nome': 'Amazon Fire7', 'valor_unitario': 359.99},
{'nome': 'iPad', 'valor_unitario': 2159.89},
{'nome': 'Acer Iconia', 'valor_unitario': 499.0}
],
'monitor': [
{'nome': 'LG Led 20-M37', 'valor_unitario': 1289.0},
{'nome': 'Samsung 32 Curve', 'valor_unitario': 2790.99},
{'nome': 'Philips LED 185', 'valor_unitario': 269.9},
{'nome': 'AOC 24 Freesync', 'valor_unitario': 619.29}
],
'câmera digital': [
{'nome': 'Canon Rebel SL2', 'valor_unitario': 3000},
{'nome': 'Sony W800', 'valor_unitario': 659},
{'nome': 'Leica V-lux t114', 'valor_unitario': 12300},
{'nome': 'Nikon Coolpix S8100', 'valor_unitario': 899},
],
'headset': [
{'nome': 'Razer Kraken', 'valor_unitario': 328.9},
{'nome': 'AKG K92', 'valor_unitario': 219.90},
{'nome': 'Sony MDR-5A', 'valor_unitario': 414.62},
{'nome': 'Apple Beats Studio', 'valor_unitario': 1599}
],
'carregador': [
{'nome': 'Qi wireless 10w', 'valor_unitario': 12.99},
{'nome': 'Universal 3 USB 3A', 'valor_unitario': 27.8},
{'nome': 'Qualcomm Turbo 3A', 'valor_unitario': 36.5}
]
}
def getRandom():
classificacao_produto = random.choice(list(Venda._produtos.keys()))
produto = random.choice(Venda._produtos[classificacao_produto])
cliente = random.choice(Venda._clientes)
return {
'nome_produto': produto['nome'],
'valor_unitario': produto['valor_unitario'],
'classificacao_produto': classificacao_produto,
'quantidade': random.choice([1,1,1,1,1,2,2,2,3,4]),
'nome_cliente': cliente['nome'],
'id_cliente': cliente['_id'],
'data_venda': datetime.date(
random.randint(2017,2019),
random.randint(1,12),
random.randint(1,28)
).isoformat()
}
def getRandomss():
c = random.choice(Venda._clientes)
for c in Venda._clientes:
vendas = random.randint(4,7)
while vendas > 0:
venda = Venda.getRandom()
venda['id_cliente'] = c['_id']
venda['nome_cliente'] = c['nome']
pp.pprint(venda)
vendas = vendas - 1
# def getRandom():
# return {
# 'nome_produto':
# 'valor_unitario':
# 'classificacao':
# 'quantidade':
# 'nome_cliente':
# 'id_cliente':
# 'data_venda':
# }
pp = pprint.PrettyPrinter()
# pp.pprint(Cliente.nomes)
|
e-ruiz/big-data
|
01-NoSQL/atividade-03/big_data_atividade_3.py
|
big_data_atividade_3.py
|
py
| 7,003 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
28905652001
|
xyxyAreaFill = [1115, 830, 1670, 1375] #[x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
xywhAreaFill = xyxyAreaFill #[x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
xywhAreaFill[0] = xyxyAreaFill[0] #x top-left
xywhAreaFill[1] = xyxyAreaFill[1] #y top-left
xywhAreaFill[2] = xyxyAreaFill[2]-xyxyAreaFill[0] #width
xywhAreaFill[3] = xyxyAreaFill[3]-xyxyAreaFill[1] #height
areaSize = [3,3]
pointLocation = []
for j in range(areaSize[1]+1):
for i in range(areaSize[0]+1):
x = xywhAreaFill[2]/areaSize[0]*i + xywhAreaFill[0]
y = xywhAreaFill[3]/areaSize[1]*j + xywhAreaFill[1]
pointLocation.append([x,y])
print (pointLocation[1][0])
print(len(pointLocation))
|
benediktusbryan/Box-in-Area-Detection-System-with-AI
|
coba.py
|
coba.py
|
py
| 707 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21211275641
|
def mySolution():
n = 4
storage = [1, 5, 10, 100]
# 1 10 100
# 1 5 10 100
sum1, sum2 = 0, 0
for i in range(len(storage)):
if i % 2 == 0:
sum2 += storage[i]
else:
sum1 += storage[i]
print(max(sum1, sum2))
def dp():
n = int(input())
arr = list(map(int, input().split()))
d = [0] * len(arr)
d[0] = arr[0]
d[1] = max(arr[0], d[1])
for i in range(2, n):
d[i] = max(d[i - 1], d[i - 2] + arr[i])
print(d[n - 1])
|
kimkimj/Algorithm
|
python/DP/foodStorage.py
|
foodStorage.py
|
py
| 514 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73947121148
|
# utf-8
from random import randint
def teste_de_miller(p, b):
"""p>2 ímpar e b inteiro tal que b != 0 em IZ_p
Retorno
-------
True se p é composto
False se teste inconclusivo
Exemplos
--------
>>> teste_de_miller(27, 2)
True
>>> teste_de_miller(25, 7)
False
>>> teste_de_miller(25, 2)
True
"""
def calcula_q_k(n):
q = n - 1
k = 0
while True:
if q // 2 != q / 2:
return q, k
q = q // 2
k += 1
# "Passo 0": Checagens
if (p % 2) == 0: return True
# Para entendermos melhor
menos_um = p - 1
# Passo 1
q, k = calcula_q_k(p)
# Passo 2
chute = pow(b, q, p)
if chute == 1 or chute == menos_um:
return False
# Passo 3
contador = 1
while contador < k:
chute = pow(chute, 2, p)
if chute == 1: return True
if chute == menos_um: return False
contador += 1
# Passo 4
return True
def teste_de_miller_rabin(p, num_iter=10):
"""Testa p com Miller-Rabin e bases aleatórias
Retorno
-------
True se p é composto
False se p é primo (provavelmente)
Notas
-----
Teorema de Rabin: Teste de Miller acerta em 3/4 das bases entre 2 e p-2.
Então, 10 iterações é suficiente para a resposta ser precisa.
"""
for _ in range(num_iter):
# escolhendo uma base
base = 0
while (base % p) == 0:
base = randint(0, 10**4)
if teste_de_miller(p, base):
return True
return False
if __name__ == "__main__":
# Primeiro usando o testmod
from doctest import testmod
testmod()
|
d-nct/cripto-python
|
miller_rabin.py
|
miller_rabin.py
|
py
| 1,728 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
7259480306
|
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import cv2
import sys
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(6,200)
self.fc2 = nn.Linear(200,100)
self.fc3 = nn.Linear(100,50)
self.fc4 = nn.Linear(50,4)
def forward(self, x):
x = F.relu(self.fc4(F.relu(self.fc3(F.relu(self.fc2(F.relu(self.fc1(x))))))))
return x
net = Net()
input = Variable(torch.randn(1,6), requires_grad=True)
out = net(input)
import torch.optim as optim
criterion = torch.nn.SmoothL1Loss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
data=[]
f=open('data.csv', "r")
lines = f.readlines()
for line in lines:
line=line.rstrip()
data.append([int(s) for s in line.split(",")])
min_loss=sys.maxsize
for epoch in range(100):
for i, data2 in enumerate(data):
x1, y1,x2,y2,x3,y3, bx1, by1, bx2, by2 = iter(data2)
X, Y = Variable(torch.FloatTensor([x1, y1, x2, y2, x3, y3]), requires_grad=True), Variable(torch.FloatTensor([bx1, by1, bx2, by2]), requires_grad=False)
optimizer.zero_grad()
outputs = net(X)
loss = criterion(outputs, Y)
loss.backward()
optimizer.step()
if (i!=0 and i % 99 == 0):
print("Epoch {} - loss: {}".format(epoch, loss.data))
if(loss<min_loss):
min_loss=loss
torch.save(net.state_dict(), 'model.pth')
(x,y,w,h)=(net(Variable(torch.Tensor([310, 134, 391, 258, 470, 207]))))
print((x,y,w,h))
def draw_humans1(npimg, x, y, w, h, imgcopy=False):
if imgcopy:
npimg = np.copy(npimg)
image_h, image_w = npimg.shape[:2]
cv2.line(npimg, (x,y),(x,y+h),CocoColors[0],4)
cv2.line(npimg, (x,y+h),(x+w,y+h),CocoColors[1],4)
cv2.line(npimg, (x+w,y),(x+w,y+h),CocoColors[2],4)
cv2.line(npimg, (x+w,y),(x,y),CocoColors[3],4)
return npimg
CocoColors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
oriImg = cv2.imread("images/sample3_cam2_627.jpg")
out = draw_humans1(oriImg,x,y,abs(w-x),abs(h-y))
cv2.imshow('result.png',out)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
asbudhkar/Hand-Detector-with-Pose-Estimation
|
train.py
|
train.py
|
py
| 2,462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13933582750
|
from django import forms
from .models import TodoList
class TodoListForm(forms.ModelForm):
class Meta:
model = TodoList
fields = ['task_title', 'task_description', 'task_status']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['task_title'].widget.attrs.update({'class': 'form-control'})
self.fields['task_description'].widget.attrs.update({'class':'form-control'})
|
priyanka-infobeans/infoToDoList
|
infobeans_todolist/todolist_app/forms.py
|
forms.py
|
py
| 451 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35914161974
|
class Solution:
"""
@param nums: the given array
@return: the minimum difference between their sums
"""
def findMin(self, nums: list) -> int:
sum_of_nums = sum(nums)
m = sum_of_nums // 2
n = len(nums)
dp = [0] * (m + 1)
for i in range(1, n + 1):
for j in range(m, -1, -1):
if j < nums[i - 1]:
break
dp[j] = max(dp[j - nums[i - 1]] + nums[i - 1], dp[j])
return sum_of_nums - 2 * dp[m]
|
Super262/LintCodeSolutions
|
algorithms/dp/problem0724.py
|
problem0724.py
|
py
| 517 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20160930177
|
from django.urls import path
from web import views
app_name ="web"
urlpatterns = [
path('',views.index,name="index"),
path("create/",views.create_product,name="create_product"),
path('deleted/<int:id>/',views.deleted_product,name="deleted_product"),
path('edit/<int:id>/',views.edit_product,name="edit_product"),
path('<int:id>/',views.product,name="product"),
]
|
Aswathy-G/advanceddjango-project
|
web/urls.py
|
urls.py
|
py
| 389 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5962356858
|
from aoc_helpers.perf_helpers import *
from aoc_helpers.input_helpers import *
from collections import defaultdict
from collections import Counter
import string
import time
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import Voronoi, voronoi_plot_2d
from scipy.spatial import cKDTree
def PolygonArea(corners):
n = len(corners) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area) / 2.0
return area
def get_bounds(points):
x_max = 0
y_max = 0
x_min = 1000000000000
y_min = 1000000000000
for point in points:
if point[0] < x_min:
x_min = point[0]
if point[0] > x_max:
x_max = point[0]
if point[1] < y_min:
y_min = point[1]
if point[1] > y_max:
y_max = point[1]
return (x_min, x_max, y_min, y_max)
@timeit
def get_solution():
input_strings = input_lines("test_input.txt")
data = list(map(lambda line: [int(n) for n in line.split(", ")], input_strings))
points = np.array(data)
vor = Voronoi(points)
# print(vor.regions)
# print(vor.vertices)
# print(vor.point_region)
# for each item in vor.regions
# if the region is finite
# get the corresponding point from vor.point_region
# and associate it with points
largest_area = 0
largest_area_index = -1
for i in range(len(vor.regions)):
if i == 0:
continue
# print(np.where(vor.point_region == i)[0][0])
region = vor.regions[i]
if -1 in region:
# Region is not finite
continue
# Region with point indexed at `i` is finite
# Compute area
verts = [vor.vertices[n] for n in region]
area = PolygonArea(verts)
# print(verts)
# print(area)
if area > largest_area:
largest_area = area
# largest_area_index = i
largest_area_index = np.where(vor.point_region == i)[0][0]
print("Largest finite region comes from point {0} and has an area of {1}".format(largest_area_index, largest_area))
bounds = get_bounds(points)
sampling_points = []
points_str = ""
for y in range(bounds[2] - 1, bounds[3] + 1):
line_str = ""
for x in range(bounds[0] - 1, bounds[1] + 1):
line_str += "({0}, {1})".format(x + 0.5, y + 0.5)
sampling_points.append([x + 0.5, y + 0.5])
points_str += line_str + "\n"
print("Bounds: {0}".format(bounds))
print("Sampling Points:\n{0}".format(points_str))
voronoi_kdtree = cKDTree(points)
test_point_dist, test_point_regions = voronoi_kdtree.query(sampling_points)
f = list(map(lambda x: string.ascii_uppercase[x], test_point_regions))
print(Counter(f).most_common(26))
print(f)
# for y in range(bounds[2] - 1, bounds[3] + 1):
# for x in range(bounds[0] - 1, bounds[1] + 1):
# pass
# print(Counter(test_point_regions))
# print(Counter(test_point_regions).most_common(1))
print("Sampled area of largest finite poly is {0}".format(test_point_regions[largest_area_index + 1]))
voronoi_plot_2d(vor)
plt.show()
print(get_solution())
|
colejd/AdventOfCode2018
|
day_06/day6part1_borked.py
|
day6part1_borked.py
|
py
| 3,325 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28513937002
|
#usr/bin/python3.8
# 读和写文件
# open() 将会返回一个 file 对象,基本语法格式如下:
# open(filename, mode)
# filename:包含了你要访问的文件名称的字符串值。
# mode:决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。
# 打开一个文件
f = open("/tmp/foo.txt", "w")
f.write( "Python 是一个非常好的语言。\n是的,的确非常好!!\n" )
f.close()
f = open("/tmp/foo.txt", "r")
str = f.read()
print(str)
# 关闭打开的文件
f.close()
|
BeiGuoDeXue/python
|
15、输入输出/read_write_file.py
|
read_write_file.py
|
py
| 618 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
14755463895
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
# ==========================Core Module================================
class conv_block(nn.Module):
def __init__(self, ch_in, ch_out):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True),
nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class up_conv(nn.Module):
def __init__(self, ch_in, ch_out):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.up(x)
return x
class U_Net(nn.Module):
def __init__(self, img_ch=3, output_ch=1):
super(U_Net, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(ch_in=img_ch, ch_out=32)
self.Conv2 = conv_block(ch_in=32, ch_out=64)
self.Conv3 = conv_block(ch_in=64, ch_out=128)
self.Conv4 = conv_block(ch_in=128, ch_out=256)
self.Conv5 = conv_block(ch_in=256, ch_out=512)
self.Up5 = up_conv(ch_in=512, ch_out=256)
self.Up_conv5 = conv_block(ch_in=512, ch_out=256)
self.Up4 = up_conv(ch_in=256, ch_out=128)
self.Up_conv4 = conv_block(ch_in=256, ch_out=128)
self.Up3 = up_conv(ch_in=128, ch_out=64)
self.Up_conv3 = conv_block(ch_in=128, ch_out=64)
self.Up2 = up_conv(ch_in=64, ch_out=32)
self.Up_conv2 = conv_block(ch_in=64, ch_out=32)
self.Conv_1x1 = nn.Conv2d(32, output_ch, kernel_size=1, stride=1, padding=0)
def forward(self, x):
# encoding path
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2)
# print("x4", x2.shape)
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3)
# print("x4", x3.shape)
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4)
# print("x4", x4.shape)
x5 = self.Maxpool(x4)
x5 = self.Conv5(x5)
# print("x4", x5.shape)
# decoding + concat path
d5 = self.Up5(x5)
# print("x4", d5.shape)
d5 = torch.cat((x4, d5), dim=1)
# print("x4", d5.shape)
d5 = self.Up_conv5(d5)
# print("x4", d5.shape)
d4 = self.Up4(d5)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
d1 = self.Conv_1x1(d2)
# d1 = F.softmax(d1,dim=1) # mine
# return d1
out = nn.Sigmoid()(d1)
return out
#
# if __name__ == '__main__':
# net =U_Net(img_ch=3, output_ch=1)
# print(net)
# x = torch.rand((2, 3, 224, 224))
# print(net.forward(x).shape)
# from torchstat import stat
#
# model = U_Net()
# stat(model, (3, 224, 224))
|
ikkbic/My-Repositories
|
segmentionn_models_trans/UNet-1.py
|
UNet-1.py
|
py
| 3,430 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30729211140
|
import threading
from random import randint
import pika
import time
from src.klein_queue.errors import KleinQueueError
from src.klein_queue.rabbitmq.publisher import Publisher
from src.klein_queue.rabbitmq.consumer import Consumer
from klein_config.config import EnvironmentAwareConfig
test_config = {
"rabbitmq": {
"host": ["localhost"],
"port": 5672,
"username": "doclib",
"password": "doclib",
}
}
class TestConsumer:
def test_consumption(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
assert msg == {'msg': 'test_message'}
assert properties.delivery_mode == 2
event.set()
cons.stop()
return handler_fn
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.consume",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.consume"
}
})
consumer = Consumer(config, "consumer")
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
publisher = Publisher(config, "publisher")
publisher.start()
publisher.publish({'msg': 'test_message'})
# timeout = 10 seconds on waiting for message to arrive
message_received_in_time = event.wait(10)
assert message_received_in_time
consumer.stop()
publisher.stop()
def test_exchange_creation(self):
test_message = {"id": "d5d581bb-8b42-4d1e-bbf9-3fee91ab5920"}
delivery = pika.spec.Basic.Deliver()
def handler_fn(msg, basic_deliver=None, **kwargs):
nonlocal delivery, waiting
delivery = basic_deliver
waiting = False
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.test-queue",
"auto_acknowledge": False,
"concurrency": 3,
"exchange": "test-exchange"
},
"publisher": {
"queue": "pytest.test-queue",
"exchange": "test-exchange"
},
})
consumer = Consumer(config, "consumer", handler_fn)
consumer.start()
test_publisher = Publisher(config, "publisher")
test_publisher.start()
test_publisher.publish(test_message)
waiting = True
while waiting:
pass
assert delivery.exchange == "test-exchange"
assert delivery.routing_key == "pytest.test-queue"
test_publisher.stop()
consumer.stop()
def test_worker_concurrency(self):
workers = randint(2, 5)
events = []
def handler_fn(msg, **kwargs):
event_id = msg['event']
events[event_id].set()
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.concurrency",
"concurrency": workers,
"auto_acknowledge": True
},
"publisher": {
"queue": "pytest.concurrency"
}
})
consumer = Consumer(config, "consumer", handler_fn)
# check number of threads spawned
assert len(consumer._consumer._workers) == workers
c = threading.Thread(target=consumer.run)
c.start()
publisher = Publisher(config, "publisher")
publisher.start()
for i in range(workers):
# send one message for each worker
events.append(threading.Event())
publisher.publish({'event': i})
for i in range(workers):
message_received_in_time = events[i].wait(5)
assert message_received_in_time
consumer.stop()
publisher.stop()
def test_default_exception_handler(self):
retries = 0
waiting = True
expected_retries = 10
def handler_fn(msg, **kwargs):
nonlocal waiting, retries
retries += 1
if retries >= expected_retries:
# Stop waiting and don't requeue
waiting = False
raise KleinQueueError("forced error")
else:
# Requeue the message
raise KleinQueueError("forced error", requeue=True)
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.default_exceptions",
"auto_acknowledge": False,
"concurrency": 3,
},
"publisher": {
"queue": "pytest.default_exceptions"
}
})
consumer = Consumer(config, "consumer", handler_fn)
consumer.start()
publisher = Publisher(config, "publisher")
publisher.start()
publisher.publish("message")
timeout = time.time() + 60
while waiting:
if time.time() > timeout:
# Fails this test if the expected number of retries has not been reached within the time limit.
assert False
time.sleep(1)
pass
consumer.stop()
publisher.stop()
def test_error_publishing_exception_handler(self):
test_message = {"id": "d5d581bb-8b42-4d1e-bbf9-3fee91ab5920"}
error_message = ""
error_properties = pika.BasicProperties()
message_properties = pika.BasicProperties()
def handler_fn(msg, properties=None, **kwargs):
nonlocal message_properties
message_properties = properties
raise KleinQueueError("forced error")
def error_handler_fn(msg, properties=None, **kwargs):
nonlocal waiting, error_message, error_properties
error_message = msg
error_properties = properties
waiting = False
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.exceptions",
"auto_acknowledge": False,
"concurrency": 3,
},
"publisher": {
"queue": "pytest.exceptions"
},
"error_publisher": {
"queue": "errors"
},
"error_consumer": {
"queue": "errors",
"auto_acknowledge": True
}
})
error_publisher = Publisher(config, "error_publisher")
error_publisher.start()
upstream_publisher = Publisher(config, "consumer")
upstream_publisher.start()
from src.klein_queue.rabbitmq.exceptions import new_error_publishing_exception_handler
exception_handler = new_error_publishing_exception_handler("consumer", upstream_publisher, error_publisher)
consumer = Consumer(config, "consumer", handler_fn, exception_handler=exception_handler)
consumer.start()
error_consumer = Consumer(config, "error_consumer", error_handler_fn)
error_consumer.start()
test_publisher = Publisher(config, "publisher")
test_publisher.start()
test_publisher.publish(test_message)
waiting = True
while waiting:
pass
test_publisher.stop()
upstream_publisher.stop()
error_publisher.stop()
consumer.stop()
error_consumer.stop()
assert message_properties.delivery_mode == 2
assert message_properties.headers['x-retry'] == 3
assert test_message == error_message
assert error_properties.delivery_mode == 2
assert error_properties.headers['x-consumer'] == "consumer"
assert "KleinQueueError" in error_properties.headers['x-exception']
assert error_properties.headers['x-message'] == "forced error"
assert error_properties.headers['x-queue'] == 'pytest.exceptions'
assert "forced error" in error_properties.headers['x-stack-trace']
assert error_properties.headers["x-original-routing-key"] == "pytest.exceptions"
assert error_properties.headers["x-original-exchange"] == ""
def test_on_empty_queue_callback_should_run_once_single_msg(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_empty_queue_fn(tracker=[]): # make use of shared instance of list
event.set()
tracker.append(1)
assert len(tracker) == 1 # Run the first time only
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.on_empty_queue_callback_should_run_once_single_msg",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.on_empty_queue_callback_should_run_once_single_msg"
}
})
consumer = Consumer(config, "consumer", on_empty_queue_fn=on_empty_queue_fn)
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
publisher = Publisher(config, "publisher")
publisher.start()
publisher.publish({'msg': 'test_message'})
# on waiting for message to arrive and then hit empty queue
message_received_in_time = event.wait(10)
assert message_received_in_time
consumer.stop()
publisher.stop()
def test_on_empty_queue_callback_should_be_called_once_multiple_msg(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_empty_queue_fn(tracker=[]):
event.set()
tracker.append(1)
assert len(tracker) == 1 # Run once only
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.consume",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.consume"
}
})
consumer = Consumer(config, "consumer", on_empty_queue_fn=on_empty_queue_fn)
consumer.set_handler(handle_handle(consumer))
publisher = Publisher(config, "publisher")
publisher.start()
publisher.publish({'msg': 'test_message'})
publisher.publish({'msg': 'test_message'})
publisher.publish({'msg': 'test_message'})
c = threading.Thread(target=consumer.run)
c.start()
# waiting for message to arrive and then hit empty queue
message_received_in_time = event.wait(30)
assert message_received_in_time
consumer.stop()
publisher.stop()
def test_on_empty_queue_callback_should_not_be_called(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_empty_queue_fn():
event.set()
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.on_empty_not_called",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.on_empty_not_called"
}
})
consumer = Consumer(config, "consumer", on_empty_queue_fn=on_empty_queue_fn)
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
# timeout = 60 seconds. event should not be reached as no message is sent
message_received_in_time = event.wait(10)
assert not message_received_in_time
consumer.stop()
def test_on_stop_callback_should_be_called_after_closed_no_msg(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_stop_fn():
event.set()
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.on_stop_callback_should_be_called_after_closed_no_msg",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.on_stop_callback_should_be_called_after_closed_no_msg"
}
})
consumer = Consumer(config, "consumer", on_stop_fn=on_stop_fn)
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
time.sleep(1) # Give the thread time to do its thing
consumer.stop()
# timeout = 60 seconds.
message_received_in_time = event.wait(10)
assert message_received_in_time
def test_on_stop_callback_should_not_be_called_before_closed_no_msg(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_stop_fn():
event.set()
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.on_stop_callback_should_not_be_called_before_closed_no_msg",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.on_stop_callback_should_not_be_called_before_closed_no_msg"
}
})
consumer = Consumer(config, "consumer", on_stop_fn=on_stop_fn)
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
# timeout = 60 seconds.
message_received_in_time = event.wait(10)
assert not message_received_in_time
consumer.stop()
def test_on_stop_callback_should_be_called_after_closed_with_msg(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_stop_fn():
event.set()
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.on_stop_callback_should_be_called_after_closed_with_msg",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.on_stop_callback_should_be_called_after_closed_with_msg"
}
})
consumer = Consumer(config, "consumer", on_stop_fn=on_stop_fn)
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
publisher = Publisher(config, "publisher")
publisher.start()
publisher.publish({'msg': 'test_message'})
time.sleep(1) # Give the thread time to do its thing
publisher.stop()
consumer.stop()
# timeout = 60 seconds.
message_received_in_time = event.wait(10)
assert message_received_in_time
def test_on_stop_callback_should_not_be_called_before_closed_with_msg(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_stop_fn():
event.set()
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.on_stop_callback_should_not_be_called_before_closed_with_msg",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.on_stop_callback_should_not_be_called_before_closed_with_msg"
}
})
consumer = Consumer(config, "consumer", on_stop_fn=on_stop_fn)
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
publisher = Publisher(config, "publisher")
publisher.start()
publisher.publish({'msg': 'test_message'})
# timeout = 60 seconds.
message_received_in_time = event.wait(10)
assert not message_received_in_time
publisher.stop()
consumer.stop()
|
mdcatapult/py-queue
|
tests/rabbitmq/test_consumer.py
|
test_consumer.py
|
py
| 17,024 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5092513077
|
# For taking integer inputs.
import math
def inp():
return(int(input()))
# For taking List inputs.
def inlist():
return(list(map(int, input().split())))
# For taking string inputs. Actually it returns a List of Characters, instead of a string, which is easier to use in Python, because in Python, Strings are Immutable.
def instr():
s = input()
return(list(s[:len(s)]))
# For taking space seperated integer variable inputs.
def invr():
return(map(int, input().split()))
r, c = invr()
a = []
for _ in range(r):
a.append(instr())
for i in range(r):
h = a[i].count("#")
if h == 0:
for j in range(c):
a[i][j] = "-1"
for i in range(c):
count = 0
for j in range(r):
if a[j][i] == "#":
count += 1
if count == 0:
for j in range(r):
a[j][i] = "-1"
for i in range(r):
f = 0
for j in range(c):
if a[i][j] != "-1":
f = 1
print(a[i][j], end="")
if f:
print()
|
sudiptob2/atcoder-training
|
Medium 100/12.Grid Compression.py
|
12.Grid Compression.py
|
py
| 1,015 |
python
|
en
|
code
| 2 |
github-code
|
6
|
355842723
|
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class HakCuti(Document):
def fill_employee_details(employees):
return frappe.db.sql('''
select
t1.name as employee, t1.employee_name
from
`tabEmployee` t1, `tabSalary Structure Employee` t2
where
t1.docstatus!=2
and t1.name = t2.employee
'''({"employee": employee, "employee_name":employee_name}), as_dict=1)
|
bawaaaang/absensi
|
absensi/absensi/doctype/hak_cuti/hak_cuti.py
|
hak_cuti.py
|
py
| 460 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13703480148
|
# buttontest
# create at 2015/6/15
# autor: qianqians
import sys
sys.path.append('../../')
from plask import *
import json
def test():
app = plaskapp('0.0.0.0', 5000)
p = pypage('edittest', 'http://127.0.0.1:5000/', pyhtmlstyle.margin_left)
p.add_page_route('/')
e = pyedit('edit', pyedit.text, pyhtmlstyle.margin_left, p)
b = pybutton('button', 'button', pyhtmlstyle.margin_left, p)
e.set_location(0, 1)
b.set_location(180, 0)
t = pyparagraph('text', "text", pyhtmlstyle.margin_auto, p)
t.set_location(0, 0)
t.set_visibility(False)
ev = uievent('http://127.0.0.1:5000/', b, pyelement.onclick)
params = jparams()
params.append("input", e.client_get_input_text())
onsev = on_server_response()
onsev.add_call(e.server_get_input_text("output"))
onsev.add_call(e.server_set_visible(False))
onsev.add_call(b.server_set_visible(False))
onsev.add_call(t.server_set_visible(True))
onsev.add_call(t.server_set_text_simplate("456"))
sev = server_event("submit", params, onsev)
def on_click(p):
return {"output":"456"}
sev.add_onevent(on_click)
ev.add_server_event(sev)
b.register_uievent(ev)
p.init()
app.run()
if __name__ == '__main__':
test()
|
theDarkForce/plask
|
test/login/logintest.py
|
logintest.py
|
py
| 1,172 |
python
|
en
|
code
| 2 |
github-code
|
6
|
11728318125
|
import numpy as np
from tabulate import tabulate
from clustering.external_evaluation import calculate_purity
from clustering.k_means import KMeans
from data_preparation.inverted_index import InvertedIndex
from data_preparation.pre_processing import parse_corpus, pre_process_corpus
corpus, y_true, titles = parse_corpus()
preprocessed_corpus = pre_process_corpus(corpus)
y_true = [y_true.index(l) for l in y_true]
def generate_matrix(preprocessed_corpus):
inverted_index = InvertedIndex()
for i in range(len(preprocessed_corpus)):
for term in preprocessed_corpus[i].split():
inverted_index.parse_term(term, i)
document_term_matrix = np.array(inverted_index.make_document_by_term_matrix())
return document_term_matrix
matrix = generate_matrix(preprocessed_corpus)
k = KMeans(5, 1000)
document_clusters = k.assign_documents_to_cluster(matrix)
y_pred = document_clusters[0]
clusters = document_clusters[1]
cluster_tightness = document_clusters[2]
top_documents = document_clusters[3]
def write_clusters():
with open("clusters.txt", "w") as f:
for i in range(len(clusters)):
data = []
f.write(
"Cluster #%d contains the following %d documents: "
% (i, len(clusters[i]))
)
f.write("\n\n")
for j in range(len(clusters[i])):
id = clusters[i][j]
data.append([id, titles[id]])
f.write(tabulate(data, headers=["Document ID", "Document Title"]))
f.write("\n\n")
def sort_tuples(tuples):
# sort tuples in ascending order by the second element
# (distance from the centroid), which acts as the key
tuples.sort(key=lambda x: x[1])
return tuples
def show_summary():
for i in range(len(top_documents)):
data = []
print("The top 3 documents in cluster #%d are:\n " % i)
sortedTuples = sort_tuples(top_documents[i])[:3]
for j in sortedTuples:
data.append([j[0], titles[j[0]]])
print(tabulate(data, headers=["Document ID", "Document Title"]))
print()
def show_RSS():
data = []
for i in range(len(cluster_tightness)):
data.append([i, cluster_tightness[i]])
print(tabulate(data, headers=["Cluster ID", "RSS"]))
print("\nThe total RSS is %.2f." % sum(cluster_tightness))
def show_purity():
purity = calculate_purity(y_pred, y_true)
print("The purity is %.2f." % (100 * purity))
def display_menu():
# display menu shown to user
print("")
print(60 * "-", "Menu", 60 * "-")
print("1. Show Cluster Summary")
print("2. Calculate RSS")
print("3. Calculate Purity")
print("4. Write Clusters")
print("5. Exit")
print(127 * "-")
print("")
def wait_for_input():
input("\nPlease press Enter to continue...")
status = True
# main loop to display the menu
while status:
display_menu()
selection = input("Please enter your selection (1-4): ")
print()
if selection == "1":
show_summary()
wait_for_input()
elif selection == "2":
show_RSS()
wait_for_input()
elif selection == "3":
show_purity()
wait_for_input()
elif selection == "4":
write_clusters()
wait_for_input()
elif selection == "5":
print("\nThe program will now terminate.")
status = False
else:
# prompt user for a valid selection
input("Please select a valid option from the menu.\n")
|
nzabdelke/News-Clustering
|
main.py
|
main.py
|
py
| 3,536 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71969293949
|
import argparse
import logging
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='ML Trainer')
parser.add_argument('--project', type=str, help='Google Cloud project ID to use.')
parser.add_argument('--region', type=str, help='Which zone to run the analyzer.')
parser.add_argument('--cluster', type=str, help='The name of the cluster to run job.')
parser.add_argument('--package', type=str,
help='GCS Path of XGBoost distributed trainer package.')
parser.add_argument('--output', type=str, help='GCS path to use for output.')
parser.add_argument('--conf', type=str, help='GCS path of the training json config file.')
parser.add_argument('--rounds', type=int, help='Number of rounds to train.')
parser.add_argument('--workers', type=int, help='Number of workers to use for training.')
parser.add_argument('--train', type=str, help='GCS path of the training libsvm file pattern.')
parser.add_argument('--eval', type=str, help='GCS path of the eval libsvm file pattern.')
parser.add_argument('--analysis', type=str, help='GCS path of the analysis input.')
parser.add_argument('--target', type=str, help='Target column name.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
api = _utils.get_client()
logging.info('Submitting job...')
spark_args = [args.conf, str(args.rounds), str(args.workers), args.analysis, args.target,
args.train, args.eval, args.output]
job_id = _utils.submit_spark_job(
api, args.project, args.region, args.cluster, [args.package],
'ml.dmlc.xgboost4j.scala.example.spark.XGBoostTrainer', spark_args)
logging.info('Job request submitted. Waiting for completion...')
_utils.wait_for_job(api, args.project, args.region, job_id)
with open('/output.txt', 'w') as f:
f.write(args.output)
logging.info('Job completed.')
if __name__== "__main__":
main()
|
kubeflow/kfp-tekton-backend
|
components/deprecated/dataproc/train/src/train.py
|
train.py
|
py
| 1,939 |
python
|
en
|
code
| 8 |
github-code
|
6
|
3919530622
|
# standard python
import base64
import bz2
import datetime
import json
import multiprocessing
import optparse
import os
import re
import socket
import sys
import time
import urllib.parse
import urllib.request
# custom browser driver
from webxray.ChromeDriver import ChromeDriver
class Client:
def __init__(self, server_url, pool_size=None):
"""
Init allows us to set a custom pool_size, otherwise
we base on CPU count.
"""
self.server_url = server_url
if pool_size:
self.pool_size = pool_size
else:
self.pool_size = multiprocessing.cpu_count()
# __init__
def get_and_process_client_tasks(self,proc_num):
"""
This is the main loop that should run indefintely. Purpose is to
send server "ready" message to get tasks which are either wait,
get_scan, or get_policy. If unable to get commands it will
wait and try again in 5 seconds. If command is get_scan or
get_policy, the appropriate action will be taken and results
will be sent as POST data back to server.
"""
local_test = False
debug = True
if local_test:
client_id = 'local_client'
wbxr_server_url = 'http://127.0.0.1:5000/'
else:
client_id = socket.gethostname()
wbxr_server_url = self.server_url
if debug: print(f'{client_id} [{proc_num}]\t😀 starting')
# main loop
while True:
# set up request
request = urllib.request.Request(
wbxr_server_url,
headers = {
'User-Agent' : 'wbxr_client_v0_0',
}
)
data = urllib.parse.urlencode({'ready':True,'client_id':client_id})
data = data.encode('utf8')
# attempt to get commands
if debug: print(f'[{proc_num}]\t📥 fetching commands')
try:
command_params = json.loads(urllib.request.urlopen(request,data,timeout=60).read().strip().decode('utf-8'))
except:
print(f'[{proc_num}]\t👎 Unable to contact server, will wait and try again.')
time.sleep(5)
continue
# process commands
task = command_params['task']
print('[%s]\t👉 TASK IS: %s' % (proc_num, task))
if task == 'wait':
time.sleep(10)
continue # restarts main loop
elif task == 'get_scan' or task == 'get_policy' or task == 'get_crawl' or task == 'get_random_crawl':
target = command_params['target']
client_config = command_params['client_config']
else:
print(f'[{proc_num}]\t🥴 CANNOT READ COMMAND SET, EXITING')
return
if debug: print('[%s]\t🚗 setting up driver' % proc_num)
if client_config['client_browser_type'] == 'chrome':
browser_driver = ChromeDriver(client_config, port_offset=proc_num)
else:
print('[%s]\t🥴 INVALID BROWSER TYPE, HARD EXIT!' % proc_num)
exit()
print(f'[{proc_num}]\t🏃♂️ GOING TO {task} on {str(target)[:30]}...')
if task == 'get_scan':
task_result = browser_driver.get_scan(target)
elif task == 'get_crawl':
task_result = browser_driver.get_crawl(target)
elif task == 'get_policy':
task_result = browser_driver.get_scan(target, get_text_only=True)
elif task == 'get_random_crawl':
task_result = browser_driver.get_random_crawl(target)
# unpack result
success = task_result['success']
task_result = task_result['result']
# if scan was successful we will have a big chunk of data
# so we compress it to speed up network xfer and reduce disk
# utilization while it is in the result queue
if success:
if debug: print(f'[{proc_num}]\t🗜️ compressing output for {str(target)[:30]}...')
task_result = base64.urlsafe_b64encode(bz2.compress(bytes(json.dumps(task_result),'utf-8')))
# build request to post results to server
if debug: print(f'[{proc_num}]\t📤 returning output')
data = urllib.parse.urlencode({
'client_id' : client_id,
'success' : json.dumps(success),
'target' : json.dumps(target),
'task' : task,
'task_result' : task_result
})
data = data.encode('utf-8')
# send the request
request = urllib.request.Request(
wbxr_server_url,
headers = {
'User-Agent' : 'wbxr_client_v0_0',
}
)
# adding charset parameter to the Content-Type header.
request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8")
# note we can lose this result
try:
print(f'[{proc_num}]\t📥 RESPONSE: %s' % (urllib.request.urlopen(request,data,timeout=600).read().decode('utf-8')))
continue
except:
print(f'[{proc_num}]\t😖 Unable to post results!!!')
time.sleep(5)
return
# get_and_process_client_tasks
def run_client(self):
if sys.platform == 'darwin' and multiprocessing.get_start_method(allow_none=True) != 'forkserver':
multiprocessing.set_start_method('forkserver')
# processes all need a number, this also gets
# used as a port offset
proc_nums = []
for i in range(0,self.pool_size):
proc_nums.append(i)
# start workers
myPool = multiprocessing.Pool(self.pool_size)
myPool.map(self.get_and_process_client_tasks, proc_nums)
# run_client
# Client
|
thezedwards/webXray
|
webxray/Client.py
|
Client.py
|
py
| 4,988 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36185365035
|
'''
Ahmad Abu Hanifah
A1C020026
Teknik Otomasi Pertanaian
'''
import numpy as np
import matplotlib.pyplot as plt
dOsp = 6.5
vmin = 0 # kecepatan aliran udara (L/s)
vmax = 2 # kecepatan aliran udara (L/s)
V = 1000000 #Volume sistem (L)
kLa = 0.045 # per menit
n = 4 # Jumlah aerator
# a = 0.4 # Luas permukaan antarmuka udara-air (m2/liter)
a = 400 # Luas permukaan kolam (m2)
def NilaidO(dOi, tn, ti, ):
dOn = dOi + (tn-ti)*((v*n*a*2.5)/V-(kLa/60)*dOi)
return dOn
time = np.linspace(1, 3000, 100)
dOact = np.zeros(time.size)
dOsetp = np.zeros(time.size)
i = 0
dO0 = 3
dOi = dO0
dOn = dO0
dOsetp[:] = dOsp
print("time", "error", "v", "DO aktual")
for t in time:
dOi = dOn
# menghitung error
err = dOi - dOsp
# kontroller OnOff
if err < 0:
v = vmax # pemanas hidup -> On
else:
v = vmin # pemanas mati -> Off
if i == 0:
ti = 0
# Hitung respon sistem
dOn = NilaidO(dOi, t, ti)
ti = t
print(f"{t}, {err}, {v}, {dOn}")
dOact[i] = dOn
# perulangan waktu selesai dan kembali ke atas
i = i + 1
# Plot hasil simulasi
plt.title("Simulasi Sistem Kontrol On-Off")
plt.xlabel("Waktu (s)")
plt.ylabel("DO (mg/L)")
plt.plot(time, dOact, "-b", label="DO Aktual")
plt.plot(time, dOsetp, "--r", label="DO Set-point")
plt.legend(loc="lower right", frameon=False)
plt.show()
|
AbuHanifah1878/Teknik_Otomasi_Pertanian
|
KontrolDOOnOff.py
|
KontrolDOOnOff.py
|
py
| 1,349 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27089285988
|
import sys
import json
unique = {}
start = ['a','f','l','q','u']
end = ['e','k','p','t','z']
if(len(sys.argv) != 4):
print("========================================================================================================")
print("SORRY!! Please provide the path to the INPUT json file, the OUTPUT file, alphabet selection number [0-5]")
print("========================================================================================================")
print("Example: python3 Fan_in.py ./dummy.json ./output.txt 2 ")
print("========================================================================================================")
sys.exit()
f = open(sys.argv[1])
index = int(sys.argv[3])
if(index < 0 or index > 5):
print("INDEX should be between 0 and 5 only")
sys.exit()
for line in f:
data = json.loads(line)
try:
if(data is None or data['created_time'] is None):
continue
if(data['message'] is None):
continue
if('actor' not in data or 'username' not in data['actor'] or 'transactions' not in data or data['transactions'] is None or 'target' not in data['transactions'][0] or 'username' not in data['transactions'][0]['target']):
continue
tusername = data['transactions'][0]['target']['username']
username = data['actor']['username']
ltuser = tusername[0].lower()
if(index != 5 and (ltuser < start[index] or ltuser > end[index])):
continue
if(index == 5 and (ltuser >= 'a' or ltuser <= 'z')):
continue
if(tusername not in unique):
unique[tusername] = {'T':0,'users':set()}
if(username not in unique[tusername]):
unique[tusername]['users'].add(username.strip())
unique[tusername]['T'] += 1
except Exception as e:
continue
f.close()
outputfile1 = open(sys.argv[2] + str(index),"w")
for k,v in unique.items():
s = str(len(v['users']))+ " " + str(v['T'])
outputfile1.write(s + "\n")
outputfile1.close()
|
STEELISI/Venmo
|
Fan_in.py
|
Fan_in.py
|
py
| 2,106 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43381026267
|
import boto3
def lambda_handler(event, context):
sns = boto3.client('sns')
message = event.get('message', 'Default message')
params = {
'Message': message,
'TopicArn': 'arn:aws:sns:us-east-1:896553604990:LiveScore'
}
try:
response = sns.publish(**params)
message_id = response['MessageId']
print('Message published:', message_id)
return response
except Exception as e:
print('Error publishing message:', str(e))
raise e
|
bayarbayasgalanj/cloud_computing
|
Project/lambda_function.py
|
lambda_function.py
|
py
| 512 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7859721528
|
import re
with open('regexps.ini') as fichier:
text = fichier.read()
for line in text.split("\n"):
match = re.match(r"([a-zA-Z]*)\s*=\s*([^;]*[^\s;])", line)
print(match)
if match:
print("Ligne conforme: ", line)
|
mercator-ocean/python-notes
|
exercices/makina/stdlib/regexps_1.py
|
regexps_1.py
|
py
| 240 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38959225786
|
# Sparse Matrix Representation using lists
def sparseMatrix(sparseMatrix, m, n):
# initialize size as 0
size = 0
for i in range(m):
for j in range(n):
if (sparseMatrix[i][j] != 0):
size += 1
# number of columns in compressMatrix(size) should
# be equal to number of non-zero elements in sparseMatrix
rows, cols = (3, size)
compressMatrix = [[0 for i in range(cols)] for j in range(rows)]
k = 0
for i in range(m):
for j in range(n):
if (sparseMatrix[i][j] != 0):
compressMatrix[0][k] = i
compressMatrix[1][k] = j
compressMatrix[2][k] = sparseMatrix[i][j]
k += 1
print("Sparse representation:")
for i in compressMatrix:
print(i)
m = int(input("Enter row size: "))
n = int(input("Enter col size: "))
print("Enter elements:")
mat = [[int(input()) for x in range (n)] for y in range(m)]
print("Sparse matrix is:")
for i in range(m):
for j in range(n):
print(mat[i][j],end = " ")
print()
sparseMatrix(mat, m, n)
|
9Mugen/int108
|
sparse_matrix.py
|
sparse_matrix.py
|
py
| 996 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18307501302
|
import warnings
from copy import deepcopy
from typing import Union, List, Tuple, Dict
import numpy as np
from aequilibrae.matrix import AequilibraeMatrix
from aequilibrae.paths.graph import Graph
from aequilibrae.paths.results import AssignmentResults
class TrafficClass:
"""Traffic class for equilibrium traffic assignment
.. code-block:: python
>>> from aequilibrae import Project
>>> from aequilibrae.matrix import AequilibraeMatrix
>>> from aequilibrae.paths import TrafficClass
>>> project = Project.from_path("/tmp/test_project")
>>> project.network.build_graphs()
>>> graph = project.network.graphs['c'] # we grab the graph for cars
>>> graph.set_graph('free_flow_time') # let's say we want to minimize time
>>> graph.set_skimming(['free_flow_time', 'distance']) # And will skim time and distance
>>> graph.set_blocked_centroid_flows(True)
>>> proj_matrices = project.matrices
>>> demand = AequilibraeMatrix()
>>> demand = proj_matrices.get_matrix("demand_omx")
>>> demand.computational_view(['matrix'])
>>> tc = TrafficClass("car", graph, demand)
>>> tc.set_pce(1.3)
"""
def __init__(self, name: str, graph: Graph, matrix: AequilibraeMatrix) -> None:
"""
Instantiates the class
:Arguments:
**name** (:obj:`str`): UNIQUE class name.
**graph** (:obj:`Graph`): Class/mode-specific graph
**matrix** (:obj:`AequilibraeMatrix`): Class/mode-specific matrix. Supports multiple user classes
"""
if not np.array_equal(matrix.index, graph.centroids):
raise ValueError("Matrix and graph do not have compatible sets of centroids.")
if matrix.matrix_view.dtype != graph.default_types("float"):
raise TypeError("Matrix's computational view need to be of type np.float64")
self.__config = {}
self.graph = graph
self.logger = graph.logger
self.matrix = matrix
self.pce = 1.0
self.vot = 1.0
self.mode = graph.mode
self.class_flow: np.array
self.results = AssignmentResults()
self.fixed_cost = np.zeros(graph.graph.shape[0], graph.default_types("float"))
self.fixed_cost_field = ""
self.fc_multiplier = 1.0
self._aon_results = AssignmentResults()
self._selected_links = {} # maps human name to link_set
self.__id__ = name
graph_config = {
"Mode": graph.mode,
"Block through centroids": graph.block_centroid_flows,
"Number of centroids": graph.num_zones,
"Links": graph.num_links,
"Nodes": graph.num_nodes,
}
self.__config["Graph"] = str(graph_config)
mat_config = {
"Source": matrix.file_path or "",
"Number of centroids": matrix.zones,
"Matrix cores": matrix.view_names,
}
if len(matrix.view_names) == 1:
mat_config["Matrix totals"] = {
nm: np.sum(np.nan_to_num(matrix.matrix_view)[:, :]) for nm in matrix.view_names
}
else:
mat_config["Matrix totals"] = {
nm: np.sum(np.nan_to_num(matrix.matrix_view)[:, :, i]) for i, nm in enumerate(matrix.view_names)
}
self.__config["Matrix"] = str(mat_config)
def set_pce(self, pce: Union[float, int]) -> None:
"""Sets Passenger Car equivalent
:Arguments:
**pce** (:obj:`Union[float, int]`): PCE. Defaults to 1 if not set
"""
if not isinstance(pce, (float, int)):
raise ValueError("PCE needs to be either integer or float ")
self.pce = pce
def set_fixed_cost(self, field_name: str, multiplier=1):
"""Sets value of time
:Arguments:
**field_name** (:obj:`str`): Name of the graph field with fixed costs for this class
**multiplier** (:obj:`Union[float, int]`): Multiplier for the fixed cost. Defaults to 1 if not set
"""
if field_name not in self.graph.graph.columns:
raise ValueError("Field does not exist in the graph")
self.fc_multiplier = float(multiplier)
self.fixed_cost_field = field_name
if np.any(np.isnan(self.graph.graph[field_name].values)):
self.logger.warning(f"Cost field {field_name} has NaN values. Converted to zero")
if self.graph.graph[field_name].min() < 0:
msg = f"Cost field {field_name} has negative values. That is not allowed"
self.logger.error(msg)
raise ValueError(msg)
def set_vot(self, value_of_time: float) -> None:
"""Sets value of time
:Arguments:
**value_of_time** (:obj:`Union[float, int]`): Value of time. Defaults to 1 if not set
"""
self.vot = float(value_of_time)
def set_select_links(self, links: Dict[str, List[Tuple[int, int]]]):
"""Set the selected links. Checks if the links and directions are valid. Translates link_id and
direction into unique link id used in compact graph.
Supply links=None to disable select link analysis.
:Arguments:
**links** (:obj:`Union[None, Dict[str, List[Tuple[int, int]]]]`): name of link set and
Link IDs and directions to be used in select link analysis"""
self._selected_links = {}
for name, link_set in links.items():
if len(name.split(" ")) != 1:
warnings.warn("Input string name has a space in it. Replacing with _")
name = str.join("_", name.split(" "))
link_ids = []
for link, dir in link_set:
if dir == 0:
query = (self.graph.graph["link_id"] == link) & (
(self.graph.graph["direction"] == -1) | (self.graph.graph["direction"] == 1)
)
else:
query = (self.graph.graph["link_id"] == link) & (self.graph.graph["direction"] == dir)
if not query.any():
raise ValueError(f"link_id or direction {(link, dir)} is not present within graph.")
# Check for duplicate compressed link ids in the current link set
for comp_id in self.graph.graph[query]["__compressed_id__"].values:
if comp_id in link_ids:
warnings.warn(
"Two input links map to the same compressed link in the network"
f", removing superfluous link {link} and direction {dir} with compressed id {comp_id}"
)
else:
link_ids.append(comp_id)
self._selected_links[name] = np.array(link_ids, dtype=self.graph.default_types("int"))
self.__config["select_links"] = str(links)
@property
def info(self) -> dict:
config = deepcopy(self.__config)
return {self.__id__: config}
def __setattr__(self, key, value):
if key not in [
"graph",
"logger",
"matrix",
"pce",
"mode",
"class_flow",
"results",
"_aon_results",
"__id__",
"vot",
"fixed_cost",
"fc_multiplier",
"fixed_cost_field",
"_selected_links",
"_TrafficClass__config",
]:
raise KeyError("Traffic Class does not have that element")
self.__dict__[key] = value
|
AequilibraE/aequilibrae
|
aequilibrae/paths/traffic_class.py
|
traffic_class.py
|
py
| 7,635 |
python
|
en
|
code
| 140 |
github-code
|
6
|
33093309616
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Decoder(nn.Module):
''' This class contains the implementation of Decoder Module.
Args:
embedding_dim: A integer indicating the embedding size.
output_dim: A integer indicating the size of output dimension.
hidden_dim: A integer indicating the hidden size of rnn.
n_layers: A integer indicating the number of layers in rnn.
dropout: A float indicating the dropout.
'''
def __init__(self, embedding_dim, output_dim, hidden_dim, n_layers, dropout):
super().__init__()
self.embedding_dim = embedding_dim
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.dropout = dropout
self.embedding = nn.Embedding(output_dim, embedding_dim)
self.rnn = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first = False).to(device)
self.linear = nn.Linear(hidden_dim, output_dim).to(device)
self.dropout = nn.Dropout(dropout).to(device)
def forward(self, input, hidden, cell):
# input is of shape [batch_size]
# hidden is of shape [n_layer * num_directions, batch_size, hidden_size]
# cell is of shape [n_layer * num_directions, batch_size, hidden_size]
input = input.unsqueeze(0)
# input shape is [1, batch_size]. reshape is needed rnn expects a rank 3 tensors as input.
# so reshaping to [1, batch_size] means a batch of batch_size each containing 1 index.
embedded = self.embedding(input)
embedded = self.dropout(embedded)
# embedded is of shape [1, batch_size, embedding_dim]
output, (hidden, cell) = self.rnn(embedded, (hidden, cell))
# generally output shape is [sequence_len, batch_size, hidden_dim * num_directions]
# generally hidden shape is [num_layers * num_directions, batch_size, hidden_dim]
# generally cell shape is [num_layers * num_directions, batch_size, hidden_dim]
# sequence_len and num_directions will always be 1 in the decoder.
# output shape is [1, batch_size, hidden_dim]
# hidden shape is [num_layers, batch_size, hidden_dim]
# cell shape is [num_layers, batch_size, hidden_dim]
predicted = F.log_softmax(self.linear(output), dim = 2) # linear expects as rank 2 tensor as input
# predicted shape is [batch_size, output_dim]
return predicted, hidden, cell
class AttnDecoder(nn.Module):
def __init__(self, embedding_dim, output_dim, hidden_dim, n_layers, dropout, max_length):
super(AttnDecoder, self).__init__()
self.hidden_size = hidden_dim
self.output_dim = output_dim
self.embedding = nn.Embedding(output_dim, embedding_dim)
self.num_layers = n_layers
self.max_length = max_length
self.dropout_p = dropout
self.attn = nn.Linear(self.hidden_size + embedding_dim, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size + embedding_dim, embedding_dim)
self.dropout = nn.Dropout(self.dropout_p)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, self.num_layers, dropout=dropout)
self.linear = nn.Linear(hidden_dim, output_dim)
def forward(self, input, hidden, cell, encoder_outputs):
embedded = self.embedding(input)
encoder_outputs = encoder_outputs.view(-1, self.hidden_size, self.max_length)
attn_weights = F.softmax(self.attn(torch.cat((embedded, hidden[0]), 1)), dim=1).unsqueeze(0).view(-1, self.max_length, 1)
#encoder_outputs = encoder_outputs.view(-1, self.hidden_size, self.max_length)
attn_applied = torch.bmm(encoder_outputs, attn_weights)
output = torch.cat((embedded, attn_applied[:, :, 0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output, (hidden, cell) = self.lstm(output, (hidden, cell))
predicted = F.log_softmax(self.linear(output), dim = 2)
return predicted, hidden, cell
class RecurrentEncoder(nn.Module):
''' Sequence to sequence networks consists of Encoder and Decoder modules.
This class contains the implementation of Encoder module.
Args:
input_dim: A integer indicating the size of input dimension.
emb_dim: A integer indicating the size of embeddings.
hidden_dim: A integer indicating the hidden dimension of RNN layers.
n_layers: A integer indicating the number of layers.
dropout: A float indicating dropout.
'''
def __init__(self, input_dim, emb_dim, hidden_dim, n_layers, dropout, bi_directional=False):
super().__init__()
self.input_dim = input_dim
self.emb_dim = emb_dim
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.dropout = dropout
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hidden_dim, n_layers, dropout=dropout, bidirectional=False)
self.hrnn = nn.LSTM(hidden_dim,hidden_dim, n_layers, dropout = dropout, bidirectional = False)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
# src is of shape [sentence_length, batch_size], it is time major
# embedded is of shape [sentence_length, batch_size, embedding_size]
embedded = self.embedding(src)
embedded = self.dropout(embedded)
# Decode the hidden state of the last time step
# inputs to the rnn is input, (h, c); if hidden, cell states are not passed means default initializes to zero.
# input is of shape [sequence_length, batch_size, input_size]
# hidden is of shape [num_layers * num_directions, batch_size, hidden_size]
# cell is of shape [num_layers * num_directions, batch_size, hidden_size]
outputs, (hidden, cell) = self.rnn(embedded)
outputs, (hidden, cell) = self.hrnn(outputs)
# outputs are always from the top hidden layer, if bidirectional outputs are concatenated.
# outputs shape [sequence_length, batch_size, hidden_dim * num_directions]
return outputs, hidden, cell
class Encoder(nn.Module):
''' Sequence to sequence networks consists of Encoder and Decoder modules.
This class contains the implementation of Encoder module.
Args:
input_dim: A integer indicating the size of input dimension.
emb_dim: A integer indicating the size of embeddings.
hidden_dim: A integer indicating the hidden dimension of RNN layers.
n_layers: A integer indicating the number of layers.
dropout: A float indicating dropout.
'''
def __init__(self, input_dim, emb_dim, hidden_dim, n_layers, dropout, bi_directional=False):
super().__init__()
self.input_dim = input_dim
self.emb_dim = emb_dim
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.dropout = dropout
self.bi_directional = bi_directional
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hidden_dim, n_layers, dropout=dropout, bidirectional=bi_directional)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
# src is of shape [sentence_length, batch_size], it is time major
# embedded is of shape [sentence_length, batch_size, embedding_size]
embedded = self.embedding(src)
embedded = self.dropout(embedded)
# Decode the hidden state of the last time step
# inputs to the rnn is input, (h, c); if hidden, cell states are not passed means default initializes to zero.
# input is of shape [sequence_length, batch_size, input_size]
# hidden is of shape [num_layers * num_directions, batch_size, hidden_size]
# cell is of shape [num_layers * num_directions, batch_size, hidden_size]
outputs, (hidden, cell) = self.rnn(embedded)
# outputs are always from the top hidden layer, if bidirectional outputs are concatenated.
# outputs shape [sequence_length, batch_size, hidden_dim * num_directions]
if self.bi_directional:
outputs = outputs[:, :, self.hidden_dim:] + outputs[:, :, :self.hidden_dim]
hidden = hidden[:2,:,:] + hidden[2:,:,:]
cell = cell[:2,:,:] + cell[2:,:,:]
#hidden = hidden.view(self.n_layers,-1,self.hidden_dim)
#cell = cell.view(self.n_layers,-1,self.hidden_dim)
return outputs, hidden, cell
|
facebookresearch/UNLU
|
codes/rnn.py
|
rnn.py
|
py
| 8,608 |
python
|
en
|
code
| 34 |
github-code
|
6
|
33526668503
|
#Crie um programa que declare uma matriz de dimensão 3×3 e preencha com valores lidos pelo teclado. No final, mostre a matriz na tela, com a formatação correta.
lista_números=[]
i=0
matriz=int(input("Qual a matriz? "))
matriz_1=matriz
for c in range(matriz*matriz):
lista_números.append(float(input("Digite um número para a sua matriz: ")))
for c in range(matriz):
for i in range(i,matriz_1):
print([lista_números[i]], end=" ")
i+=matriz-(matriz-1)
matriz_1+=matriz
print()
|
cauavsb/python
|
mundo-3-py/ex15.py
|
ex15.py
|
py
| 509 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
23361190894
|
import os
import numpy as np
from numpy import array, zeros, diag, diagflat, dot
import numpy
from flask import Flask, render_template, request, redirect, url_for
from werkzeug.utils import secure_filename
import copy
from os.path import join, dirname, realpath
UPLOAD_FOLDER = './uploads/'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
UPLOADS_PATH = join(dirname(realpath(__file__)), 'static/uploads/..')
ITERATION_LIMIT = 1000
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def check_roots(A, B, x):
result=list()
answer = list()
message = None
print(x)
print(A)
print(B)
for row in range(len(A)):
line_result = 0.0
for col in range(len(A)):
check = A[row][col] * x[col]
line_result += check
result.append(round(line_result))
print(result)
for i in range(len(result)):
if result[i] == B[i]:
answer.append(True)
else:
answer.append(False)
print(answer)
if len(answer) == 3:
if answer == [True, True, True]:
message = 'Root is correct!'
else:
message = 'Root is incorrect!'
else:
if answer == [True, True]:
message = 'Root is correct!'
else:
message = 'Root is incorrect!'
return message
def dd(X):
result = None
D = np.diag(np.abs(X)) # Find diagonal coefficients
S = np.sum(np.abs(X), axis=1) - D # Find row sum without diagonal
if np.all(D > S):
result = 'Matrix is diagonally dominant!'
else:
result = 'Matrix is not diagonally dominant!'
return result
def Jacobi(A, b):
x=None
if x is None:
x = zeros(len(A[0]))
D = diag(A)
print(D)
print(diagflat(D))
R = A - diagflat(D)
for i in range(ITERATION_LIMIT):
x = (b - dot(R,x)) / D
return x.tolist()
def Jordan_Causs(n, a, b):
j = 0
for i in a:
length = len(b)
i.append(b[j])
j+=1
x = np.zeros(n)
for i in range(n):
if a[i][i] == 0.0:
sys.exit('Divide by zero detected!')
for j in range(n):
if i != j:
ratio = a[j][i]/a[i][i]
for k in range(n+1):
a[j][k] = a[j][k] - ratio * a[i][k]
for i in range(n):
x[i] = a[i][n]/a[i][i]
return x
def TakeMatrix(Matrix_a):
array = Matrix_a[0].split(' ')
delta1 = list()
for i in array:
delta1.append(int(i))
print(delta1)
size = len(delta1)
line1 = list()
line2 = list()
line3 = list()
line4 = list()
delta = list()
if size == 9:
for i in delta1[:3]:
line1.append(i)
delta.append(line1)
for i in delta1[3:6]:
line2.append(i)
delta.append(line2)
for i in delta1[6:9]:
line3.append(i)
delta.append(line3)
if size == 4:
for i in delta1[:2]:
line1.append(i)
delta.append(line1)
for i in delta1[2:]:
line2.append(i)
delta.append(line2)
# delta = [[delta1[0], delta1[1], delta1[2]],[delta1[3], delta1[4], delta1[5]], [delta1[6], delta1[7], delta1[8]]]
return delta
def TakeB(Matrix_b):
array = Matrix_b[0].split(' ')
delta1 = list()
for i in array:
delta1.append(int(i))
return delta1
def SwapRows(A, B, row1, row2):
A[row1], A[row2] = A[row2], A[row1]
B[row1], B[row2] = B[row2], B[row1]
def DivideRow(A, B, row, divider):
A[row] = [a / divider for a in A[row]]
B[row] /= divider
def CombineRows(A, B, row, source_row, weight):
A[row] = [(a + k * weight) for a, k in zip(A[row], A[source_row])]
B[row] += B[source_row] * weight
def Gauss(A, B):
column = 0
while (column < len(B)):
current_row = None
for r in range(column, len(A)):
if current_row is None or abs(A[r][column]) > abs(A[current_row][column]):
current_row = r
if current_row is None:
return None
if current_row != column:
SwapRows(A, B, current_row, column)
DivideRow(A, B, column, A[column][column])
for r in range(column + 1, len(A)):
CombineRows(A, B, r, column, -A[r][column])
column += 1
X = [0 for b in B]
for i in range(len(B) - 1, -1, -1):
X[i] = B[i] - sum(x * a for x, a in zip(X[(i + 1):], A[i][(i + 1):]))
return X
def Zeidel(A, b):
x = [.0 for i in range(len(A))]
Iteration = 0
converge = False
pogr = 0.
while not converge:
x_new = copy.copy(x)
for i in range(len(A)):
s1 = sum(A[i][j] * x_new[j] for j in range(i))
s2 = sum(A[i][j] * x[j] for j in range(i + 1, len(A)))
x_new[i] = (b[i] - s1 - s2) / A[i][i]
pogr = sum(abs(x_new[i] - x[i]) for i in range(len(A)))
converge = pogr < 1e-6
Iteration += 1
x = x_new
return x
@app.route('/')
def hello_world():
return render_template('main_menu.html')
@app.route('/task_1', methods=['post', 'get'])
def Task_One():
a_list = list()
b_list = list()
array = []
array1 = []
result = []
ch = None
if request.method == 'POST':
check = request.form.get('check')
print(check)
a = request.form.get('A')
b = request.form.get('B')
a_list.append(a)
b_list.append(b)
array = TakeMatrix(a_list)
array1 = TakeB(b_list)
M3 = numpy.array(array)
v3 = numpy.array(array1)
result = numpy.linalg.solve(M3, v3)
if check == 'on':
ch = check_roots(array, array1, result)
else:
pass
return render_template('task_1.html', array=array, array1=array1, result=result, ch=ch)
@app.route('/task_2', methods=['post', 'get'])
def Task_Two():
a_list = list()
b_list = list()
array = []
array1 = []
result = []
ch= None
if request.method == 'POST':
check = request.form.get('check')
a = request.form.get('A')
b = request.form.get('B')
a_list.append(a)
b_list.append(b)
array = TakeMatrix(a_list)
array1 = TakeB(b_list)
result = Gauss(array, array1)
if check == 'on':
ch = check_roots(array, array1, result)
else:
pass
return render_template('task_2.html', array=array, array1=array1, result=result, ch=ch)
@app.route('/task_3', methods=['post', 'get'])
def Task_3():
a_list = list()
b_list = list()
array = []
array1 = []
result = []
ch = None
check_matrix = None
if request.method == 'POST':
check = request.form.get('check')
a = request.form.get('A')
b = request.form.get('B')
if a != None and b != None:
a_list.append(a)
b_list.append(b)
array = TakeMatrix(a_list)
array1 = TakeB(b_list)
check_matrix = dd(array)
result = Zeidel(array, array1)
else:
result = None
if check == 'on':
ch = check_roots(array, array1, result)
else:
pass
return render_template('task_3.html', result=result, check_matrix=check_matrix, ch=ch)
@app.route('/task_4', methods=['post', 'get'])
def Task_4():
a_list = list()
b_list = list()
array = []
array1 = []
result = []
ch = None
if request.method == 'POST':
check = request.form.get('check')
a = request.form.get('A')
b = request.form.get('B')
a_list.append(a)
b_list.append(b)
array = TakeMatrix(a_list)
array1 = TakeB(b_list)
result = Jordan_Causs(3, array, array1)
if check == 'on':
ch = check_roots(array, array1, result)
else:
pass
return render_template('task_4.html', array=array, array1=array1, result=result, ch=ch)
@app.route('/task_5', methods=['post', 'get'])
def Task_5():
a_list = list()
b_list = list()
array = []
array1 = []
result = []
ch = None
check_matrix = None
if request.method == 'POST':
check = request.form.get('check')
a = request.form.get('A')
b = request.form.get('B')
a_list.append(a)
b_list.append(b)
array = TakeMatrix(a_list)
array1 = TakeB(b_list)
check_matrix = dd(array)
result = Jacobi(array, array1)
if check == 'on':
ch = check_roots(array, array1, result)
else:
pass
return render_template('task_5.html', result=result, check_matrix=check_matrix, ch=ch)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['post', 'get'])
def Read_From_File():
pick = None
filename= None
list_a = list()
list_b = list()
result = None
if request.method == 'POST':
option = request.form.get('op')
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
with open('./uploads/'+filename, 'r') as file:
line = file.read()[2:].split('\n')
for lin in line:
new_list = lin.split(' ')
arr = list()
for i in new_list:
arr.append(float(i))
list_a.append(arr)
for i in list_a:
list_b.append(i[-1])
del i[-1]
pick = option
print(pick)
if pick == '1':
M3 = numpy.array(list_a)
v3 = numpy.array(list_b)
result = numpy.linalg.solve(M3, v3)
if pick == '2':
result = Gauss(list_a, list_b)
if pick == '3':
result = Zeidel(list_a, list_b)
if pick == '4':
result = Jordan_Causs(3, list_a, list_b)
if pick == '5':
result = Jacobi(list_a, list_b)
print(result)
return render_template('upload_file.html', pick=pick, list_a=list_a, list_b=list_b, result=result)
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
|
konstantinkonstantinovich/Numerical-Methods-Sprint01-
|
Sprint01/app.py
|
app.py
|
py
| 9,328 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25185578484
|
import unittest
import logging
from generative_notch.pipeline.trait_assembler.trait_assembler import flatten_dict, interpolate_instructions
CONFIG = {
'batch_name': 'MyShinyBatch',
'render': {
'width': '1920',
'height': '1080'
}
}
CONTEXT = {
'combination_id': '001'
}
INSTRUCTIONS = [
{
'node': '$F_MyImageLoader',
'property': 'Attributes, File Source',
'value': 'SubjectPhoto_{batch_name}_{combination_id}_{render.width}x{render.height}.png'
}
]
CORRECTLY_INTERPOLATED = [
{
'node': '$F_MyImageLoader',
'property': 'Attributes, File Source',
'value': 'SubjectPhoto_MyShinyBatch_001_1920x1080.png'
}
]
class TestStableDiffusionTraitAssembler(unittest.TestCase):
def test_flatten_config(self):
result = flatten_dict(
dictionary=CONFIG,
reducer='.'
)
self.assertEqual(
result,
{
'batch_name': 'MyShinyBatch',
'render.width': '1920',
'render.height': '1080'
}
)
def test_interpolate_instructions(self):
flat_cfg = flatten_dict(CONFIG)
extended_context = dict(CONTEXT, **flat_cfg)
result = interpolate_instructions(INSTRUCTIONS, extended_context)
self.assertEqual(
result,
CORRECTLY_INTERPOLATED
)
if __name__ == '__main__':
unittest.main()
|
thomaswinged/generative-notch
|
tests/test_trait_assembler.py
|
test_trait_assembler.py
|
py
| 1,460 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13895462756
|
########################################################################################
# Module with functions for parametric estimation of GC
########################################################################################
import numpy as np
import scipy.linalg
from .tools import *
def YuleWalker(X, m, maxlags=100):
'''
Estimate the VAR model coefficients by solving the YW equations.
Inputs:
> X : Data with size [Number of variables, Number of observations].
> m : Model order
Outputs:
> AR_yw : Coefficient matrix
> eps_yw:
'''
Nvars = X.shape[0]
N = X.shape[1]
# Compute cross-correlations matrices for each lag
lag, Rxx = xcorr(X,X,maxlags)
# Reorganizing data to compute crosscorrelation matrix
b = X.T[m:]
A = np.zeros([N-m,Nvars*m])
count = 0
for i in np.arange(0,m):
for j in range(0,Nvars):
A[:,count] = X.T[m-i-1:N-i-1,j]
count += 1
r = np.matmul(A.T,b)/N#np.reshape( Rxx[1:m+1], (Nvars*m,Nvars) )
R = np.matmul(A.T, A)/N
AR_yw = np.matmul(scipy.linalg.inv(R).T,r).T
AR_yw = AR_yw.T.reshape((m,Nvars,Nvars))
eps_yw = Rxx[0]
for i in range(m):
eps_yw += np.matmul(-AR_yw[i].T,Rxx[i+1])
return AR_yw, eps_yw
def compute_transfer_function(AR, sigma, f, Fs):
m = AR.shape[0]
Nvars = AR.shape[1]
H = np.zeros([Nvars,Nvars,f.shape[0]]) * (1 + 1j)
S = np.zeros([Nvars,Nvars,f.shape[0]]) * (1 + 1j)
for i in range(0,m+1):
comp = np.exp(-1j * f * 2 * np.pi * i/Fs)
if i == 0:
for j in range(comp.shape[0]):
H[:,:,j] += np.eye(Nvars) * comp[j]
else:
for j in range(comp.shape[0]):
H[:,:,j] += -AR[i-1].T * comp[j]
for i in range(f.shape[0]):
H[:,:,i] = np.linalg.inv(H[:,:,i])
for i in range(f.shape[0]):
S[:,:,i] = np.matmul( np.matmul(H[:,:,i], sigma), np.conj(H[:,:,i]).T )
return H, S
|
ViniciusLima94/pyGC
|
pygc/parametric.py
|
parametric.py
|
py
| 1,814 |
python
|
en
|
code
| 30 |
github-code
|
6
|
2965742594
|
from odoo import http
from odoo.http import request
from odoo.addons.web.controllers.main import ensure_db
import werkzeug
import logging
_logger = logging.getLogger(__name__)
class SimpleUrlController(http.Controller):
@http.route('/redir', type='http', auth="user")
def redirect(self, **args):
ensure_db()
if not request.session.uid:
return werkzeug.utils.redirect('/web/login', 303)
request.uid = request.session.uid
if len(args) != 1:
_logger.debug("Wrong number of GET parameters ({})".format(args))
return werkzeug.utils.redirect('/web')
key, value = args.popitem()
rule_model = request.env['base_simple_urls.redirect_rule']
matching_rule = rule_model.search([('get_variable', '=', key)])
if not matching_rule:
_logger.debug(
"Redirect rule for GET parameters not found ({})".format(args)
)
return werkzeug.utils.redirect('/web')
if len(matching_rule) > 1:
_logger.debug(
"Multiple rules for GET parameters found ({})".format(args)
)
return werkzeug.utils.redirect('/web')
''' Do a case insensitive search to the model and field defined in the
redirect rule, e.g. product.product's default_code field '''
target_model = request.env[matching_rule[0].model_id.model]
if matching_rule[0].field_id.ttype == 'integer':
matching_ids = target_model.search(
[(matching_rule[0].field_id.name, '=', value)]
)
else:
matching_ids = target_model.search(
[(matching_rule[0].field_id.name, '=ilike', value)]
)
if len(matching_ids) != 1:
_logger.debug(
"Wrong number of search results. GET params: {}".format(args)
)
return werkzeug.utils.redirect('/web')
''' Form the URL and redirect the user '''
url_params = {
'view_type': 'form',
'model': matching_rule[0].model_id.model,
'id': matching_ids[0].id,
'action': matching_rule[0].action_id.id,
}
url_string = '/web#{}'.format(werkzeug.url_encode(url_params))
return werkzeug.utils.redirect(url_string)
|
Tawasta/server-tools
|
base_simple_urls/controllers/simple_urls.py
|
simple_urls.py
|
py
| 2,344 |
python
|
en
|
code
| 3 |
github-code
|
6
|
69957260347
|
#! /usr/bin/python3
#-*-coding: utf-8-*-
def add_rxn(name, D_mets, model, rev=True): # this function would be defined to add reactions to the model
r_name = name
r_obj = cobra.Reaction(rname)
r_obj.name = r_name
r_obj.id = r_name
model.add_reaction(r_obj)
r_obj.add_metabolites(D_mets)
r_obj.objective_coefficient = 0
if rev:
r_obj.bounds = (-inf,inf)
else:
r_obj.bounds = (0,inf)
def set_fixed_flux(r_id, val, model): # function probably defined to define fixed flux => consider reactions and bounds
r_obj = model.reactions.get_by_id(r_id) # search the reaction by id in the model (get_by_id)
r_obj.bounds = (val,val) # fix the limits of the reaction, which are the same for both the inferior and the superior limits
# => Fixed limits
def set_bounds(r_id, val_tuple, model): # function defined to define bounds for reactions ; retrieve the reaction from its id
r_obj = model.reactions.get_by_id(r_id) # creation of a summary
r_obj.bounds = val_tuple # assign bounds (= superior and inferior limits of the reaction)
def set_fixed_flux_ratio(r_dict, model): # function defined to fix flux ratio
if len(r_dict) == 2:
r_id1 = list(r_dict.keys())[0]
r_obj1 = model.reactions.get_by_id(r_id1)
r_v1 = list(r_dict.values())[0]
r_id2 = list(r_dict.keys())[1]
r_obj2 = model.reactions.get_by_id(r_id2)
r_v2 = list(r_dict.values())[1]
const = model.problem.Constraint(r_v1 * r_obj2.flux_expression - r_v2 * r_obj1.flux_expression, lb = 0, ub = 0)
model.add_cons_vars(const)
return const
|
chloea31/AraCore
|
src/init_fba/manipulate_model.py
|
manipulate_model.py
|
py
| 1,639 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73734292027
|
import json
import os
from account import Account
home_path = os.getenv("HOME")
config = json.load(open(os.path.join(home_path, ".config", "revChatGPT", "config.json")))
cache = json.load(open(os.path.join(home_path, ".cache", "revChatGPT", "config.json")))
# 从配置读取 token
session_token = config['accounts'][0]['session_token']
access_token = cache['access_token']
account = Account("fkxxyz", "[email protected]", "xxxxxxxx", session_token, "/tmp", config['proxy'])
# 尝试用 access_token 访问
is_logged_in = account.login_with_session_info()
# 用 session_token 登录得到 access_token
if not is_logged_in:
is_logged_in = account.login()
|
fkxxyz/rev-chatgpt-web
|
test.py
|
test.py
|
py
| 662 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1245466067
|
import pandas as pd
from absplice.utils import get_abs_max_rows
def variant_to_string(df):
chrom = str(df['#Chrom'])
if 'chr' not in chrom:
chrom = 'chr' + chrom
return chrom + ':' + str(df['Pos']) + ':' + df['Ref'] + '>' + df['Alt']
df_pred = pd.read_csv(snakemake.input['model'], sep='\t', skiprows=1)
df_pred = df_pred.rename(columns={'GeneID': 'gene_id', 'GeneName': 'gene_name'})
df_pred = df_pred[~df_pred['gene_id'].isna()]
df_pred['variant'] = df_pred.apply(lambda x: variant_to_string(x), axis=1)
df_pred = df_pred[['variant', 'gene_id', 'gene_name', 'RawScore', 'PHRED']]
# join sample info
if 'var_samples_df' in snakemake.input.keys():
df_vcf_annotation = pd.read_csv(snakemake.input['var_samples_df'])
df_pred = df_pred.set_index('variant').join(
df_vcf_annotation.set_index('variant'),
how='inner')\
.reset_index()
df_pred.to_parquet(
snakemake.output['model_postprocess'],
index=False)
|
gagneurlab/AbSplice_analysis
|
workflow/scripts/common/splicing_result/postprocess_preds/cadd_splice.py
|
cadd_splice.py
|
py
| 972 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16010028786
|
from random import random
import toga
from colosseum import CSS
def build(app):
data = []
for x in range(5):
data.append([str(x) for x in range(5)])
label = toga.Label('No row selected.')
def selection_handler(widget, row):
label.text = 'You selected row: {}'.format(row) if row is not None else 'No row selected'
table = toga.Table(headings=['heading_{}'.format(x) for x in range(5)],
data=data,
style=CSS(flex=1),
on_select=selection_handler)
def insert_handler(widget):
table.data.insert(0, [str(round(random() * 100)) for _ in range(5)])
table._impl.refresh()
print('Rows', len(table.data.data))
def delete_handler(widget):
if len(table.data.data) > 0:
table.data.remove(table.data.data[0])
table._impl.refresh()
else:
print('Table is empty!')
btn_style = CSS(flex=1)
btn_insert = toga.Button('Insert Row', on_press=insert_handler, style=btn_style)
btn_delete = toga.Button('Delete Row', on_press=delete_handler, style=btn_style)
btn_box = toga.Box(children=[btn_insert, btn_delete], style=CSS(flex_direction='row'))
box = toga.Box(children=[table, btn_box, label], style=CSS(flex=1, flex_direction='column', padding=10))
return box
def main():
return toga.App('Test Table', 'org.pybee.helloworld', startup=build)
if __name__ == '__main__':
app = main()
app.main_loop()
|
Ocupe/toga_test_app_collection
|
table/table/app.py
|
app.py
|
py
| 1,513 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17516466448
|
import torch
from torch import nn
__all__ = [
'_CONV_DICT',
'_CONV_TRANS_DICT',
'_AVG_POOL_DICT',
'_MAX_POOL_DICT',
'_NORM_DICT',
'_REFLECTION_PAD_DICT',
'_CENTER_CROP_DICT',
'_ACTIVATION_DICT',
'activation_from_str'
]
def center_crop_1d(layer: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
_, _, layer_width = layer.size()
_, _, target_width = target.size()
assert layer_width >= target_width
diff_x = (layer_width - target_width) // 2
return layer[:, :,
diff_x:(diff_x + target_width)]
def center_crop_2d(layer: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
_, _, layer_height, layer_width = layer.size()
_, _, target_height, target_width = target.size()
assert layer_height >= target_height
assert layer_width >= target_width
diff_x = (layer_width - target_width) // 2
diff_y = (layer_height - target_height) // 2
return layer[:, :,
diff_y:(diff_y + target_height),
diff_x:(diff_x + target_width)]
def center_crop_3d(layer: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
_, _, layer_depth, layer_height, layer_width = layer.size()
_, _, target_depth, target_height, target_width = layer.size()
assert layer_depth >= target_depth
assert layer_height >= target_height
assert layer_width >= target_width
diff_x = (layer_width - target_width) // 2
diff_y = (layer_height - target_height) // 2
diff_z = (layer_depth - target_depth) // 2
return layer[:, :,
diff_z:(diff_z + target_depth),
diff_y:(diff_y + target_height),
diff_x:(diff_x + target_width)]
_CONV_DICT = {
1: nn.Conv1d,
2: nn.Conv2d,
3: nn.Conv3d
}
_CONV_TRANS_DICT = {
1: nn.ConvTranspose1d,
2: nn.ConvTranspose2d,
3: nn.ConvTranspose3d
}
_AVG_POOL_DICT = {
1: nn.AvgPool1d,
2: nn.AvgPool2d,
3: nn.AvgPool3d
}
_MAX_POOL_DICT = {
1: nn.MaxPool1d,
2: nn.MaxPool2d,
3: nn.MaxPool3d
}
_NORM_DICT = {
'batch': {
1: nn.BatchNorm1d,
2: nn.BatchNorm2d,
3: nn.BatchNorm3d
}
}
_REFLECTION_PAD_DICT = {
1: nn.ReflectionPad1d,
2: nn.ReflectionPad2d
}
_CENTER_CROP_DICT = {
1: center_crop_1d,
2: center_crop_2d,
3: center_crop_3d
}
_ACTIVATION_DICT = {
'relu': nn.ReLU(),
'elu': nn.ELU(),
'selu': nn.SELU(),
'sigmoid': nn.Sigmoid(),
'leaky_relu': nn.LeakyReLU(),
'softplus': nn.Softplus()
}
def activation_from_str(activation_str: str):
return _ACTIVATION_DICT[activation_str]
|
broadinstitute/CellMincer
|
cellmincer/models/components/functions.py
|
functions.py
|
py
| 2,557 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8463884764
|
from django.utils import timezone
from rest_framework import status
from rest_framework.generics import CreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from logistics.models import Logistic, LogisticRate
from receivers.models import Receiver
from senders.models import Sender
from users.authentications import CustomTokenAuthentication
from users.permissions import CustomPermission
from . import models, serializers
class OrderCreateView(APIView):
authentication_classes = [CustomTokenAuthentication]
permission_classes = [CustomPermission]
def post(self, request, *args, **kwargs):
user, _ = self.authentication_classes[0]().authenticate(request)
serializer = serializers.OrderSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
order_data = serializer.create(serializer.validated_data)
order_db = models.Order()
updated_order_data = order_db.update_order(order_data["id"], {"user_id": user["id"]})
logistics_db = Logistic()
logistic = logistics_db.get_logistic("id", updated_order_data["logistic_id"])
if not logistic:
return Response({"error": "Logistics Not Available"}, status=status.HTTP_400_BAD_REQUEST)
receiver_id = updated_order_data["receiver_id"]
sender_id = updated_order_data["sender_id"]
receiver_db = Receiver()
receiver = receiver_db.get_receiver("id", receiver_id)
to_region = receiver["region"]
sender_db = Sender()
sender = sender_db.get_sender("id", sender_id)
from_region = sender["region"]
logistic_id = logistic["id"]
logistic_rate_db = LogisticRate()
logistic_rate = logistic_rate_db.get_logistics_rate_price(
from_region=from_region, to_region=to_region, logistic_id=logistic_id
)
price = logistic_rate[0]["price"]
print(price)
order_db = models.Order()
updated_order_data = order_db.update_order(order_data["id"], {"price": price})
serializer = serializers.OrderSerializer(data=updated_order_data)
if serializer.is_valid():
response_data = {"data": serializer.data, "message": "Order Created Successfully"}
return Response(response_data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class OrderDetailView(RetrieveUpdateDestroyAPIView):
authentication_classes = [CustomTokenAuthentication]
permission_classes = [CustomPermission]
def get(self, request, id, *args, **kwargs):
user, _ = self.authentication_classes[0]().authenticate(request)
order_db = models.Order()
data = order_db.get_order("id", id)
if data["user_id"] != user["id"]:
return Response({"error": "You do not have the permission"}, status=status.HTTP_401_UNAUTHORIZED)
serializer = serializers.OrderSerializer(data=data)
if serializer.is_valid(raise_exception=True):
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_200_OK)
def update(self, request, id, *args, **kwargs):
user, _ = self.authentication_classes[0]().authenticate(request)
data = request.data
serializer = serializers.OrderSerializer(data=data)
if serializer.is_valid(raise_exception=True):
order_db = models.Order()
order = order_db.get_order("id", id)
if order["user_id"] != user["id"]:
return Response({"error": "You do not have the permission"}, status=status.HTTP_401_UNAUTHORIZED)
order = order_db.update_order(id, serializer.validated_data)
order = order_db.update_order(id, {"updated_at": str(timezone.now())})
serializer = serializers.OrderSerializer(data=order)
if serializer.is_valid():
data = {"data": serializer.data, "message": "Order Updated Successfully"}
return Response(data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, id, *args, **kwargs):
user, _ = self.authentication_classes[0]().authenticate(request)
data = request.data
order_db = models.Order()
order = order_db.get_order("id", id)
if order["user_id"] != user["id"]:
return Response({"error": "You do not have the permission"}, status=status.HTTP_401_UNAUTHORIZED)
order = order_db.update_order(id, {"is_active": False, "updated_at": str(timezone.now())})
data = {"data": order, "message": "Order set as inactive"}
return Response(data, status=status.HTTP_200_OK)
|
Duade10/ditosell-api
|
orders/views.py
|
views.py
|
py
| 4,947 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32172334196
|
def fix_quotes(text: str) -> str:
"""
将文本中的全角引号左右互换的问题修正.
e.g.
”...“ -> “...”
"""
lst = 0
out = []
while True:
lq = min(text.find("“", lst), text.find("”", lst))
if lq == -1:
break
rq = min(text.find("“", lq + 1), text.find("”", lq + 1))
if rq == -1:
break
out.append(text[lst:lq])
out.append("“")
out.append(text[lq + 1:rq])
out.append("”")
lst = rq + 1
out.append(text[lst:])
return "".join(out)
def fix_quotes_file_inplace(filename: str):
"""
在文件中就地进行 `fix_quotes`.
"""
with open(filename) as f:
text = f.read()
text = fix_quotes(text)
with open(filename, "w") as f:
f.write(text)
|
byronwanbl/pdf-comb
|
fix_quotes.py
|
fix_quotes.py
|
py
| 843 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42812438276
|
from __future__ import print_function
import numpy as np
from skimage import io
from tqdm import tqdm
import argparse
import os
from config import palette, invert_palette
def convert_to_color(arr_2d, palette=palette):
""" grayscale labels to RGB-color encoding """
arr_3d = np.zeros((arr_2d.shape[0], arr_2d.shape[1], 3), dtype=np.uint8)
for c, i in palette.items():
m = arr_2d == c
arr_3d[m] = i
return arr_3d
def convert_from_color(arr_3d, palette=invert_palette):
""" RGB-color encoding to grayscale labels """
arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8)
for c, i in palette.items():
m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2)
arr_2d[m] = i
return arr_2d
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("images", help="Images to process (at least one)",
nargs='+')
parser.add_argument("--to-color",
help="Convert from grayscale labels"
"to RGB encoded labels",
action="store_true")
parser.add_argument("--from-color",
help="Convert from RGB encoded labels"
"to grayscale labels",
action="store_true")
parser.add_argument("--out",
help="Folder where to save the modified images",
type=str)
args = parser.parse_args()
files = args.images
if args.to_color and args.from_color:
raise ValueError("Cannot specify both --from-color"
"and --to-color at the same time")
elif args.to_color:
convert_fun = convert_to_color
elif args.from_color:
convert_fun = convert_from_color
else:
raise ValueError("You need to specify whether to convert"
"from or to the RGB color labels")
if args.out is None:
OUTPUT_FOLDER = './out'
else:
OUTPUT_FOLDER = args.out
if os.path.isdir(OUTPUT_FOLDER):
print("WARNING : output folder {} exists !".format(OUTPUT_FOLDER))
else:
os.mkdir(OUTPUT_FOLDER)
for f in tqdm(files):
filename = f.split('/')[-1]
img = io.imread(f)
new_img = convert_fun(img)
io.imsave(OUTPUT_FOLDER + '/' + filename, new_img)
|
nshaud/DeepNetsForEO
|
legacy/notebooks/convert_gt.py
|
convert_gt.py
|
py
| 2,413 |
python
|
en
|
code
| 468 |
github-code
|
6
|
37962588904
|
import socket
class Topic:
def __init__(self, host="byond.oraclestation.com", port=5000, key="default_pwd"):
self.host = host
self.port = port
self.key = key
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto=socket.IPPROTO_TCP)
sock.connect((self.host, self.port))
sock.settimeout(5)
self.sock = sock
def __del__(self):
if hasattr(self, 'sock'):
self.sock.close()
def send_topic(self, query):
query = "?" + query + "&key=" + self.key
packet = bytearray([0, 0x83, 0, 0, 0, 0, 0, 0, 0]) + query.encode('ascii') + bytearray([0])
length = (len(packet) - 4).to_bytes(2, byteorder='little', signed=False)
packet[2] = length[1]
packet[3] = length[0]
sent = self.sock.send(packet)
if sent != len(packet):
raise Exception("Could not send data!")
receive = self.sock.recv(512)
if len(receive) > 5:
response = receive[5:-1].decode('ascii')
return response
return ""
|
OracleStation/PySS13Bot
|
Topic.py
|
Topic.py
|
py
| 1,125 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37123499778
|
import nltk
from Model.Model import Model
from View.View import View
from docx import Document
from datetime import datetime
from classes.Document import MyDocument
import os
import string
import pymorphy2
from tkinter import filedialog
from tkinter import messagebox
import tkinter as tk
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import math
import numpy as np
import re
import heapq
class Controller:
def __init__(self, root):
self.model = Model()
self.view = View(root,self)
def __punctuation(self, str):
punctuation = string.punctuation
translator = str.maketrans('', '', punctuation)
result = str.translate(translator)
characters_to_remove = ['"', '“', '”', '«', '»']
for char in characters_to_remove:
result = result.replace(char, '')
return result
def __get_synonyms(self,word):
morph = pymorphy2.MorphAnalyzer()
normal_form = morph.parse(word)[0].normal_form
synonyms = []
for synset in morph.parse(normal_form)[0].lexeme:
synonyms.append(synset.word)
return synonyms
def create_dictionary_by_documents(self):
dictionary = []
documents = self.model.get_documents()
for doc in documents:
dictionary+=self.__punctuation(doc.text.lower()).split()
dictionary = list(set(dictionary))
self.model.set_dictionary(dictionary)
def create_binary_vector_documents(self):
dictionary = self.model.get_dictionary()
docs = self.model.get_documents()
matrix_of_docs = []
for doc in docs:
vector_doc = []
for word in dictionary:
vector_doc.append(1 if word in doc.text else 0)
matrix_of_docs.append(vector_doc)
self.model.set_docs_vectors(matrix_of_docs)
def create_binary_vector_query(self, query):
query = self.__punctuation(query).lower()
query = query.split()
query_termins_synonyms = []
for word in query:
query_termins_synonyms+= list(set(self.__get_synonyms(word)))
dictionary = self.model.get_dictionary()
vector_binary_query = []
for word in dictionary:
vector_binary_query.append(1 if word in query_termins_synonyms else 0)
self.model.set_query_vector(vector_binary_query)
def calculate_similar(self):
matrix_docs = self.model.get_docs_vectors()
query_vector = np.array(self.model.get_query_vector())
e_query_vector = np.linalg.norm(query_vector)
similar = {}
id = 0
for vector in matrix_docs:
vec = np.array(vector)
e_vec = np.linalg.norm(vec)
if (e_vec * e_query_vector) != 0:
query_equals_doc = (np.dot(vec, query_vector))/(e_vec * e_query_vector)
similar[id]=query_equals_doc
id+=1
else:
query_equals_doc = "Nan"
similar[id] = query_equals_doc
id += 1
sorted_similar = {k: v for k, v in sorted(similar.items(),reverse=True, key=lambda item: item[1])}
self.model.set_result_similar(sorted_similar)
def open_word_file(self):
documents = []
file_path = filedialog.askopenfilenames(filetypes=[("Word Files", "*.docx")])
if file_path:
for path in file_path:
doc = Document(path)
doc_name = os.path.basename(path)
doc_content = "\n".join([paragraph.text for paragraph in doc.paragraphs])
doc_created_date = datetime.fromtimestamp(os.path.getctime(path)).strftime('%H:%M - %d.%m.%Y').split(
"-")
document = MyDocument(doc_name, path, doc_content, doc_created_date[1], doc_created_date[0])
documents.append(document)
self.model.set_documents(documents)
self.update_log("Files uploaded")
def update_log(self, message):
self.view.log_text.config(state=tk.NORMAL) # Делаем текстовое поле активным
self.view.log_text.insert(tk.END, message + "\n") # Добавляем запись
self.view.log_text.config(state=tk.DISABLED) # Делаем текстовое поле неактивным
self.view.log_text.see(tk.END)
def check_is_nan(self, similar):
for key, value in similar.items():
if value == "Nan":
self.update_log("Совпадения не найдены.")
return False
else: return True
def start(self):
if not self.model.get_documents():
messagebox.showinfo("Ошибка", "Вы не загрузили документы")
return 0
if not self.view.query_entry.get():
messagebox.showinfo("Ошибка", "Введите языковой запрос")
return 0
self.create_dictionary_by_documents()
self.create_binary_vector_documents()
self.create_binary_vector_query(self.view.query_entry.get())
self.calculate_similar()
if not self.check_is_nan(self.model.get_result_similar()):
return 0
docs_id = list(self.model.get_result_similar().keys())
self.update_log("Наиболее подходящие документы:")
for id in range(len(docs_id)):
self.update_log(f"{id+1}. "+self.model.get_document_by_id(docs_id[id]).title + f": {self.model.get_result_similar()[docs_id[id]]}")
self.view.show_open_files_button()
def generate_annotation(self):
path = f"../docs/"
article_text = ""
selected_index = self.view.listbox.curselection()
if selected_index:
selected_file = self.view.listbox.get(selected_index[0])
file_path = os.path.join(path, selected_file)
print(file_path)
try:
if file_path.endswith('.docx'):
doc = Document(file_path)
for paragraph in doc.paragraphs:
article_text += paragraph.text + '\n'
elif file_path.endswith('.txt'):
with open(file_path, 'r', encoding='utf-8') as file:
article_text = file.read()
else:
print("Неподдерживаемый формат файла.")
except Exception as e:
print(f"Произошла ошибка при чтении файла: {e}")
print(article_text)
article_text = re.sub(r'\[[0-9]*\]', ' ', article_text)
article_text = re.sub(r'\s+', ' ', article_text)
formatted_article_text = re.sub('[^а-яА-Я]', ' ', article_text)
formatted_article_text = re.sub(r'\s+', ' ', formatted_article_text)
sentence_list = nltk.sent_tokenize(article_text)
stopwords = nltk.corpus.stopwords.words('russian')
word_frequencies = {}
for word in nltk.word_tokenize(formatted_article_text):
if word not in stopwords:
if word not in word_frequencies.keys():
word_frequencies[word] = 1
else:
word_frequencies[word] += 1
print(word_frequencies.values())
maximum_frequency = max(word_frequencies.values())
for word in word_frequencies.keys():
word_frequencies[word] = (word_frequencies[word] / maximum_frequency)
sentence_scores = {}
for sent in sentence_list:
for word in nltk.word_tokenize(sent.lower()):
if word in word_frequencies.keys():
if len(sent.split(' ')) < 30:
if sent not in sentence_scores.keys():
sentence_scores[sent] = word_frequencies[word]
else:
sentence_scores[sent] += word_frequencies[word]
summary_sentences = heapq.nlargest(3, sentence_scores, key=sentence_scores.get)
summary = ' '.join(summary_sentences)
self.update_log(f"\n{selected_file}: {summary}")
def update_file_list(self):
docs_id = list(self.model.get_result_similar().keys())
self.view.listbox.delete(0, tk.END)
for id in range(len(docs_id)):
self.view.listbox.insert(tk.END, self.model.get_document_by_id(docs_id[id]).title)
def open_new_files(self):
path = f"../docs/"
selected_index = self.view.listbox.curselection()
if selected_index:
selected_file = self.view.listbox.get(selected_index[0])
os.startfile(path+selected_file)
def recall_metric(self, a, c): # and average precision
return a/(a+c) #r
def precision_metric(self, a, b):
return a/(a+b) # p
def accuracy_metric(self, a, b, c, d):
return (a+d)/(a+b+c+d)
def error_metric(self, a, b, c, d):
return (b+c)/(a+b+c+d)
def f_measure_metric(self,r, p):
return 2/((1/p)+(1/r))
def precision_n_metric(self,a):
return a/3
def r_precision_metric(self, a):
return 2/a
def grafik(self):
recall = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 1.0])
p = []
for el in recall:
if el > 0.5:
p.append(0)
else: p.append(1)
p = np.array(p)
# Сортируем оценки уверенности в порядке убывания
sorted_indices = np.argsort(recall)[::-1]
p_sorted = p[sorted_indices]
# Инициализируем списки для хранения точности и полноты на 11 уровнях
precision_at_recall = []
recall_levels = np.linspace(0, 1, 11) # 11 равномерно распределенных уровней полноты от 0 до 1
# Вычисляем точность на каждом уровне полноты
for recall_level in recall_levels:
cutoff = int(recall_level * len(p_sorted))
y_true_cutoff = p_sorted[:cutoff]
precision = np.sum(y_true_cutoff) / (
cutoff + 1e-9) # Добавляем маленькое значение для избегания деления на ноль
precision_at_recall.append(precision)
# Интерполируем значения
interpolated_precision = np.maximum.accumulate(precision_at_recall[::-1])[::-1]
# Создаем фигуру matplotlib
fig = Figure(figsize=(8, 6))
ax = fig.add_subplot(111)
# Построение кривой полноты/точности с точками и интерполированными значениями
ax.step(recall_levels, precision_at_recall, marker='o', label='Точки')
ax.plot(recall_levels, interpolated_precision, linestyle='--', label='Интерполированная линия')
ax.set_xlabel('Полнота (Recall)')
ax.set_ylabel('Точность (Precision)')
ax.set_title('Кривая полноты/точности с интерполированными значениями')
ax.grid(True)
ax.legend()
canvas = FigureCanvasTkAgg(fig, master=self.view.metrics_window)
canvas_widget = canvas.get_tk_widget()
canvas_widget.pack()
def calculate_metrics(self):
amount_relevant_docs = len(self.model.get_relevant_documents()) # a
amount_irrelevant_docs = len(self.model.get_irrelevant_documents()) # b
amount_bad_relevant_docs = len(self.model.get_bad_relevant_documents()) # d
not_finded_docs = 0 # c
reccal = self.recall_metric(amount_relevant_docs,not_finded_docs)
precision = self.precision_metric(amount_relevant_docs, amount_irrelevant_docs)
accuracy = self.accuracy_metric(amount_relevant_docs,amount_irrelevant_docs, not_finded_docs, amount_bad_relevant_docs)
error = self.error_metric(amount_relevant_docs,amount_irrelevant_docs, not_finded_docs, amount_bad_relevant_docs)
f_measure = self.f_measure_metric(reccal, precision)
precision_n = self.precision_n_metric(amount_relevant_docs)
r_precision = self.r_precision_metric(amount_relevant_docs)
txt = f"Recall: {reccal} \n" \
f"Precision: {precision}\n" \
f"Average precision: {reccal}\n" \
f"Accuracy: {accuracy}\n" \
f"F-measure: {f_measure}\n" \
f"Precision by n: {[precision_n]}\n" \
f"R-precision: {r_precision}\n"
self.view.label_metrics.config(text=txt)
def calculate_idfs(self):
# Создайте словарь для хранения числа документов, содержащих каждый термин
term_document_count = {}
documents = self.model.get_documents
total_documents = len(documents)
termins = []
for doc in documents:
unique_terms = set(doc.text.split())
for term in unique_terms:
termins.append(term)
for doc in documents:
for term in termins:
if term in set(doc.text.split()):
term_document_count[term] = term_document_count.get(term, 0) + 1
idf_values = {}
for term, doc_count in term_document_count.items():
idf = math.log(total_documents / (doc_count + 1)) # Добавляем 1, чтобы избежать деления на 0
idf_values[term] = idf
self.model.set_IDFS(idf_values)
def calculated_weight_termins_and_L_vector_in_documents(self):
documents = self.model.get_documents()
IDFS = self.model.get_IDFS()
WTDS = []
L_vector = []
if not IDFS:
return False
for doc in documents:
term_document_count = {}
Li = []
for key in IDFS:
term_document_count[key] = doc.text.count(key) * IDFS[key]
if key in doc.text:
Li.append(1)
else:
Li.append(0)
L_vector.append(Li)
WTDS.append(term_document_count)
self.model.set_L_vector(L_vector)
self.model.set_WTDS(WTDS)
def search_query_transformation(self, user_query):
user_termins = set(user_query.split())
IDFS = self.model.get_IDFS()
query_vector = []
for termin in user_termins:
if termin in IDFS:
value = IDFS[termin] * user_query.count(termin)
query_vector.append(value)
self.model.set_query_vector(query_vector)
|
F1linnn/info-search-system
|
Controller/Controller.py
|
Controller.py
|
py
| 14,983 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15837575627
|
# -*- coding: utf-8 -*-
# @Time : 2022/6/17 15:05
# @Author : renyumeng
# @Email : [email protected]
# @File : Solve.py
# @Project : ProbabilityTheoryAndMathematicalStatisticsExperiments
import numpy as np
import scipy.stats as sts
class Solve:
def __init__(self, N) -> None:
self.n: int = N
self._random_num: np.ndarray = self.get_normal_num
self._describe_num: tuple = self.get_describe
self.mean: float = self.get_mean
self._describe_variance: float = self._describe_num[-3]
self.func_variance: float = self.get_variance
def __str__(self) -> str:
return f"""使用describe函数得到的方差:{self._describe_variance}\n使用公式计算出的方差:{self.func_variance}"""
@property
def get_normal_num(self) -> np.ndarray:
_normal_num: np.array = sts.norm.rvs(loc=0, scale=1, size=self.n)
return _normal_num
@property
def get_describe(self) -> tuple:
_describe_ans: tuple = sts.describe(self._random_num)
return _describe_ans
@property
def get_mean(self) -> float:
_mean: float = self._random_num.mean()
return _mean
@property
def get_variance(self) -> float:
temp_array: np.ndarray = self._random_num.copy()
_mean: float = self.mean
ans: float = 0
for i in range(len(temp_array)):
ans += (temp_array[i] - _mean) ** 2
ans /= (self.n - 1)
return ans
if __name__ == "__main__":
newSolve: Solve = Solve(10)
print(newSolve)
|
renyumeng1/ProbabilityTheoryAndMathematicalStatisticsExperiments
|
firstExper/第三题/Solve.py
|
Solve.py
|
py
| 1,551 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42493210531
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 13 13:15:49 2018
@author: michal
"""
import networkx as nx
from copy import deepcopy
#import numpy as np
from solution import Solution
from random import sample
class PublicationMatcher:
def primitiveMaxPointsOfRest(self, publications):
allPointsOfRest = self.countMaxPublicationPoints(publications)
result = []
for p in publications:
allPointsOfRest -= self.publicationDict[p].points
result.append(allPointsOfRest)
return result
def maxPointsOfRestFromFlowTheory(self, publications, maxW):
result = []
for i in range(len(publications)):
result.append( self.maxPointsFromFlowTheory( publications[i:], maxW ) )
return result
def buildFlowGraph(self, publications):
flowG = nx.DiGraph()
flowG.add_node("s")
flowG.add_node("t")
pubs = publications
allAuthors = []
for p in pubs:
publication = self.publicationDict[p]
flowG.add_edge("s", p , capacity = publication.size, weight = - int(publication.points /publication.size) )
authors = list(self.pubGraph.neighbors(p))
allAuthors += authors
for a in authors:
flowG.add_edge(p, a)
allAuthors = list(set(allAuthors))
for a in allAuthors:
flowG.add_edge(a, "t", capacity = self.authorsDict[a].slots )
return flowG
def maxPointsFromFlowTheory(self, publications, maxW, returnDict =False):
W = int(100*maxW)
flowG = self.buildFlowGraph(publications)
maxFlow, flowDict = nx.maximum_flow(flowG, "s", "t")
if maxFlow < W:
W = maxFlow
flowG.nodes["s"]["demand"] = -W
flowG.nodes["t"]["demand"] = W
flowCost, flowDict = nx.network_simplex(flowG)
if returnDict:
data = { "maxPoints" : -flowCost/100, "maxSlots" : W/100, "flowGraph" : flowG, "flowDict" : flowDict}
return data
return -flowCost
def maxPointsIncludingSolution(self, solution, publications, maxW):
# W = int(100*maxW)
flowG = self.buildFlowGraph(publications)
p2a = solution.publication2authors
i = 0
for p in p2a:
flowG.remove_edge(p, p2a[p])
newSink = "s" + str(i)
newVent = "t" + str(i)
flowG.add_node( newVent, demand = self.publicationDict[p].size )
flowG.add_edge(p, newVent)
flowG.add_node( newSink, demand = -self.publicationDict[p].size )
flowG.add_edge( newSink, p2a[p])
i+=1
maxFlow, flowDict = nx.maximum_flow(flowG, "s", "t")
if maxFlow < maxW:
maxW = maxFlow
flowG.nodes["s"]["demand"] = -maxW
flowG.nodes["t"]["demand"] = maxW
flowCost, flowDict = nx.network_simplex(flowG)
return -flowCost
def getSortedPublicationByAuthor(self):
author2allPublications, author2pubNo = self.generateAuthor2Publications()
author2publications = self.generateSingleAuthor2PubDict()
publications = self.getAllPublicationsFromMainGraph()
pubOut = []
pubUsed = set()
for a in author2publications:
uniquePubs = author2publications[a]
pubOut += uniquePubs
pubUsed |= set(uniquePubs)
restPubs = author2allPublications[a]
restPubs = list( set(restPubs) - pubUsed)
pubOut += restPubs
pubUsed |= set(restPubs)
rest = list( set(publications) - pubUsed)
pubOut += rest
return pubOut
def getSortedPublicationByPoints(self):
publications = self.getAllPublicationsFromMainGraph()
sortedPubObjects = sorted( self.publicationList , key=lambda x: x.points, reverse=True)
outList = []
for p in sortedPubObjects:
# print( p.points)
if p.id in publications:
outList.append(p.id)
return outList
def branchAndBoundHeuristic(self, maxWeight, minimalPoints = 0, maxSolutionsNo = 20000, publications = [], maxPoints = []):
minimalPoints = int(round(minimalPoints*100))
if not publications:
publications = self.getAllPublicationsFromMainGraph()
maxPointsOfRest = maxPoints
if not maxPoints :
maxPoints = self.maxPointsOfRestFromFlowTheory(publications, maxWeight)
# print(maxPoints)
print("Maksymalne punkty z teori przeplywu - obliczone")
print(maxPoints)
maxWeight = int(round(maxWeight*100))
minSizePerWeight = int( maxSolutionsNo/maxWeight )
queue = [ Solution() ]
pubLen = len(publications)
progressFile = open("progress.log", 'w' )
progressFile.close()
inpossibleBranches = 0
toHeavyBranches = 0
toCheapBranches = 0
bestPointsForWeight = {}
for n, publication in enumerate(publications):
authors = list(self.pubGraph.neighbors(publication))
maxPointsOfRest = maxPoints[n]
newQueue = []
for solution in queue:
for author in authors:
newSolution = deepcopy(solution)
solutionPossible = newSolution.addConnection(self.authorsDict[ author], self.publicationDict[publication] )
if not solutionPossible:
inpossibleBranches += 1
continue
##
if newSolution.actualWeight > maxWeight:
toHeavyBranches += 1
continue
#
if newSolution.actualPoints + maxPointsOfRest < minimalPoints:
toCheapBranches += 1
continue
weight = newSolution.actualWeight
if weight in bestPointsForWeight:
if newSolution.actualPoints > bestPointsForWeight[weight]:
bestPointsForWeight[weight] = newSolution.actualPoints
else:
bestPointsForWeight[weight] = newSolution.actualPoints
points = newSolution.actualPoints
if len(queue) > 0.5*maxSolutionsNo:
if bestPointsForWeight[weight] * 0.9 > points:
continue
newQueue.append(deepcopy(newSolution))
if solution.actualPoints + maxPointsOfRest >= minimalPoints:
newQueue.append(deepcopy(solution))
else:
toCheapBranches += 1
queue = newQueue
if len(queue) > maxSolutionsNo:
newQueue = []
for solution in queue:
weight = solution.actualWeight
points = solution.actualPoints
if bestPointsForWeight[weight] * 0.9 < points:
newQueue.append(solution)
queue = newQueue
if len(newQueue) > maxSolutionsNo:
mass2solutions = {}
for solution in newQueue:
weight2dict = solution.actualWeight
if not weight2dict in mass2solutions:
mass2solutions[weight2dict] = [ solution ]
else:
mass2solutions[weight2dict].append(solution)
newQueue = []
for mass in mass2solutions:
if len(mass2solutions[mass]) <= minSizePerWeight:
newQueue += mass2solutions[mass]
else:
newQueue += sample( mass2solutions[mass], minSizePerWeight )
queue = newQueue
progressFile = open("progress.log", 'a' )
progressFile.write("#########################\n")
progressFile.write(str(float(n/pubLen)*100) + " % "+str(n)+"\n")
progressFile.write("in queue: " + str(len(queue))+"\n")
progressFile.write("impossible branches: "+ str(inpossibleBranches)+"\n")
progressFile.write("to heavy branches: "+ str(toHeavyBranches)+"\n")
progressFile.write("to cheap branches: "+ str(toCheapBranches)+"\n")
progressFile.close()
if not queue:
print("nic nie znaleziono!")
return
bestSolution = None
bestPoints = 0
lowestPoints = 10000
# print("wszystkie rozwiazania: ", len(queue))
for solution in queue:
if solution.actualPoints > bestPoints:
bestPoints = solution.actualPoints
bestSolution = solution
if solution.actualPoints < lowestPoints:
lowestPoints = solution.actualPoints
return bestSolution
def branchAndBound(self, maxWeight, minimalPoints = 0, publications = [], maxPoints = []):
minimalPoints = int(round(minimalPoints*100))
if not publications:
publications = self.getAllPublicationsFromMainGraph()
maxPointsOfRest = maxPoints
if not maxPoints :
maxPoints = self.maxPointsOfRestFromFlowTheory(publications, maxWeight)
# print(maxPoints)
print("Maksymalne punkty z teori przeplywu - obliczone")
print(maxPoints)
maxWeight = int(round(maxWeight*100))
queue = [ Solution() ]
pubLen = len(publications)
progressFile = open("progress.log", 'w' )
progressFile.close()
inpossibleBranches = 0
toHeavyBranches = 0
toCheapBranches = 0
for n, publication in enumerate(publications):
authors = list(self.pubGraph.neighbors(publication))
maxPointsOfRest = maxPoints[n]
newQueue = []
for solution in queue:
for author in authors:
newSolution = deepcopy(solution)
solutionPossible = newSolution.addConnection(self.authorsDict[ author], self.publicationDict[publication] )
if not solutionPossible:
inpossibleBranches += 1
continue
##
if newSolution.actualWeight > maxWeight:
toHeavyBranches += 1
continue
#
if newSolution.actualPoints + maxPointsOfRest < minimalPoints:
toCheapBranches += 1
continue
newQueue.append(deepcopy(newSolution))
if solution.actualPoints + maxPointsOfRest >= minimalPoints:
newQueue.append(deepcopy(solution))
else:
toCheapBranches += 1
queue = newQueue
progressFile = open("progress.log", 'a' )
progressFile.write("#########################\n")
progressFile.write(str(float(n/pubLen)*100) + " % "+str(n)+"\n")
progressFile.write("in queue: " + str(len(queue))+"\n")
progressFile.write("impossible branches: "+ str(inpossibleBranches)+"\n")
progressFile.write("to heavy branches: "+ str(toHeavyBranches)+"\n")
progressFile.write("to cheap branches: "+ str(toCheapBranches)+"\n")
progressFile.close()
if not queue:
print("nic nie znaleziono!")
return
bestSolution = None
bestPoints = 0
lowestPoints = 10000
# print("wszystkie rozwiazania: ", len(queue))
for solution in queue:
if solution.actualPoints > bestPoints:
bestPoints = solution.actualPoints
bestSolution = solution
if solution.actualPoints < lowestPoints:
lowestPoints = solution.actualPoints
return bestSolution
def countIdenticalElements( vector2test, vectorKnown):
count = 0
for el in vectorKnown:
if el in vector2test:
count +=1
return count
|
chemiczny/pubMatch
|
pubMatch/publicationMatcher.py
|
publicationMatcher.py
|
py
| 13,317 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35292064506
|
import pandas as pd
import datetime
import pickle
import numpy as np
from sklearn.linear_model import SGDRegressor
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
import random
# Codes des compagnies aériennes et l'équivalent du code interne après numérisation de la feature.
carrier_dict = {'AA':0, 'AS':1, 'B6':2, 'DL':3, 'EV':4, 'F9':5, 'HA':6, 'NK':7, 'OO':8, 'UA':9, 'VX':10,'WN':11}
# Distance entre les destinations
tripDistances=pd.DataFrame()
# codes numérique et codes textuelles des aéroports
airport_codes=pd.DataFrame()
# New Year day, Martin Luther King Jr. Day, Presidents' Day, Memorial Day
# Independence Day, Labor Day, Columbus Day, Veterans Day,
# Thanksgiving, Christmas Day
holidays = [datetime.date(2018, 1, 1),datetime.date(2019, 1, 1), datetime.date(2020, 1, 1),
datetime.date(2018, 1, 15),datetime.date(2019, 1, 21), datetime.date(2020, 1, 20),
datetime.date(2018, 2, 19), datetime.date(2019, 2, 18), datetime.date(2020, 2, 17),
datetime.date(2018, 5, 28), datetime.date(2019, 5, 27), datetime.date(2020, 5, 25),
datetime.date(2018, 7, 4), datetime.date(2019, 7, 4), datetime.date(2020, 7, 4),
datetime.date(2018, 9, 3), datetime.date(2019, 9, 2), datetime.date(2020, 9, 7),
datetime.date(2018,10, 8), datetime.date(2019,10, 14), datetime.date(2020,10, 12),
datetime.date(2018, 11, 11), datetime.date(2019, 11, 11), datetime.date(2020, 11, 11),
datetime.date(2018, 11, 22), datetime.date(2019, 11, 28), datetime.date(2020, 11, 26),
datetime.date(2018, 12, 25), datetime.date(2019, 12, 25), datetime.date(2020, 12, 25)]
# Notre modèle de prédiction sauvegardé dans un fichier
predictionModel = SGDRegressor()
encoder = OneHotEncoder()
scaler = StandardScaler()
error_info = ''
def init(model_file='data/flights_delays_model.pkl', trip_distance_file='data/tripDistance.pkl', airport_code_file='data/airportCodesDF.pkl', encoder_file='data/categ_featuresEncoder.pkl', scaler_file='data/numfeaturesScaler.pkl') :
global predictionModel, tripDistances, airport_codes,encoder, scaler
predictionModel = joblib.load(model_file)
pkl_file = open(trip_distance_file, 'rb')
tripDistances = pickle.load(pkl_file)
pkl_file = open(airport_code_file, 'rb')
airport_codes = pickle.load(pkl_file)
encoder = joblib.load(encoder_file)
scaler = joblib.load(scaler_file)
# Retourne le numéro de semaine correspondant à la date
def getWeekNum(day, month,year) :
global error_info
try :
fl_date = datetime.date(year, month, day)
return fl_date.isocalendar()[1]
except Exception as err:
error_info += 'Invalid date entered (' + str(day) + '/' + str(month) + '/' + str(year) + ') :' + str(err) + '. '
raise(err)
# Retourne le jour de la semaine (1 = lundi, ...)
def getWeekDay(day, month,year) :
global error_info
try :
return datetime.date(year, month, day).weekday() + 1
except Exception as err:
error_info += 'Invalid date entered (' + str(day) + '/' + str(month) + '/' + str(year) + ') :' + str(err) + '. '
raise(err)
# retourne le code numérique correspondant au code de la compagnies
def getCarrierCodeNum(unique_carrier_code):
global error_info
if unique_carrier_code in carrier_dict :
return carrier_dict[unique_carrier_code]
else :
error_info += 'Cannot find carrier code (' + unique_carrier_code + '). '
raise ValueError('Bad carrier code')
# retourne la distance de vols entre 2 aéroports
def getTripDistance(origin_code, destination_code):
global error_info
try:
distance = np.array(float(tripDistances[(tripDistances.ORIGIN == origin_code) &
(tripDistances.DEST == destination_code)].DISTANCE.drop_duplicates()))
return distance
except Exception as err:
error_info += 'Route was not found in the data. Please try a different nearby city or a new route.'
raise(err)
# Retourne le code numérique de l'aéoport d'origine (si true) ou destination si false.
def getAirportCodeNum(airport_code, origin=True):
global error_info
try :
if origin :
return int(airport_codes[airport_codes.AIRPORT_CODE == airport_code].ORIGIN_CODE)
else :
return int(airport_codes[airport_codes.AIRPORT_CODE == airport_code].DEST_CODE)
except Exception as err:
error_info += 'No airport found with code ' + str(airport_code) + '. '
raise(err)
# Retourne le nombre de jour à proximité d'un jour férié
def getNumDaysToHoliday(day, month, year):
if year not in [2018, 2019, 2020] :
error_info += 'No data found for the year ' + str(year) + '. '
raise ValueError('Bad year')
c_date = datetime.date(year, month, day)
return np.min(np.abs(np.array(c_date) - np.array(holidays))).days
# Utilisation de notre modèle pour prédire le retard éventuel.
def delay_prediction(originCode, destCode, carrier, day, month, year, dep_hour) :
global error_info
error_info=''
try :
origin_code_num = getAirportCodeNum(originCode, True)
dest_code_num = getAirportCodeNum(destCode, False)
carrier_code_num = carrier_dict[carrier]
weekday = getWeekDay(day, month, year)
week_num = getWeekNum(day, month, year)
hdays = getNumDaysToHoliday(day, month, year)
distance = getTripDistance(originCode, destCode)
numerical_values = np.c_[distance, hdays]
# Scale the features
numerical_values_scaled = scaler.transform(numerical_values)
categorical_values = np.zeros(8)
categorical_values[0] = int(month)
categorical_values[1] = int(day)
categorical_values[2] = int(weekday)
categorical_values[3] = int(week_num)
categorical_values[4] = int(dep_hour)
categorical_values[5] = int(carrier_code_num)
categorical_values[6] = int(origin_code_num)
categorical_values[7] = int(dest_code_num)
categorical_values_encoded = encoder.transform([categorical_values]).toarray()
travel = np.c_[numerical_values_scaled, categorical_values_encoded]
pred_delay = predictionModel.predict(travel)
return int(pred_delay[0]),error_info
except Exception as err:
print(error_info)
print ('Prediction error.', err)
return None, error_info
def test() :
tcarrier = ['AA', 'AS', 'DL', 'HA', 'UA']
tday = [1,10, 6, 9, 23, 30, 26, 12, 6, 9]
tmonth = [1,2, 3, 4, 5, 6, 7, 8, 9, 10,11,12]
tcode = ['BOS', 'JFK', 'SEA', 'SAN', 'DCA']
tdep_hour = [1, 2, 4, 7, 9, 12, 10, 15, 14, 17, 19, 20, 21, 22, 23]
for i in range(1000) :
origcode = random.choice(tcode)
destcode = random.choice(tcode)
carrier = random.choice(tcarrier)
day = random.choice(tday)
month = random.choice(tmonth)
dep_hour = random.choice(tdep_hour)
d = delay_prediction(origcode, destcode, carrier, day, month, 2018, dep_hour)
if d is not None :
if d > 5 :
print(origcode, destcode,carrier,day, month, dep_hour)
print("delay", d)
print("----------")
|
makboulhoussen/flightdelay
|
web-interface/webdelay/delayapi/flightDelayPred.py
|
flightDelayPred.py
|
py
| 7,291 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73474178748
|
from fastapi import APIRouter, Depends, FastAPI
from src.dependencies.auth import firebase_authentication
from src.routes.audios import views as audios_views
from src.routes.auth import views as auth_views
from src.routes.users import views as users_views
api_router = APIRouter()
api_router.include_router(auth_views.router, tags=['Authentication'])
api_router.include_router(users_views.router,
tags=['Reciter'],
prefix='/reciters',
dependencies=[Depends(firebase_authentication)])
api_router.include_router(audios_views.router,
tags=['Audios'],
prefix='/audios',
dependencies=[Depends(firebase_authentication)])
def init_api(app: FastAPI) -> None:
app.include_router(api_router)
|
CrowdsourcingApps/Crowdsourcing-Ayat
|
src/routes/__init__.py
|
__init__.py
|
py
| 846 |
python
|
en
|
code
| 2 |
github-code
|
6
|
32469363998
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def helper(self, l, r):
if l > r: return None
max_idx = l
for i in range(l, r+1):
if self.nums[i] > self.nums[max_idx]:
max_idx = i
return TreeNode(self.nums[max_idx], self.helper(l, max_idx-1), self.helper(max_idx+1, r))
def constructMaximumBinaryTree(self, nums: List[int]) -> TreeNode:
self.nums = nums
return self.helper(0, len(nums)-1)
|
MdAbedin/leetcode
|
0601 - 0700/0654 Maximum Binary Tree.py
|
0654 Maximum Binary Tree.py
|
py
| 673 |
python
|
en
|
code
| 7 |
github-code
|
6
|
3476194370
|
from collections.abc import MutableMapping
from collections.abc import MutableSequence
from dpath import options
from dpath.exceptions import InvalidKeyName
import dpath.segments
_DEFAULT_SENTINAL = object()
MERGE_REPLACE = (1 << 1)
MERGE_ADDITIVE = (1 << 2)
MERGE_TYPESAFE = (1 << 3)
def __safe_path__(path, separator):
'''
Given a path and separator, return a tuple of segments. If path is
already a non-leaf thing, return it.
Note that a string path with the separator at index[0] will have the
separator stripped off. If you pass a list path, the separator is
ignored, and is assumed to be part of each key glob. It will not be
stripped.
'''
if not dpath.segments.leaf(path):
segments = path
else:
segments = path.lstrip(separator).split(separator)
# FIXME: This check was in the old internal library, but I can't
# see a way it could fail...
for i, segment in enumerate(segments):
if (separator and (separator in segment)):
raise InvalidKeyName("{} at {}[{}] contains the separator '{}'"
"".format(segment, segments, i, separator))
if options.CONVERT_INT_LIKE_SEGMENTS:
# Attempt to convert integer segments into actual integers.
final = []
for segment in segments:
try:
final.append(int(segment))
except:
final.append(segment)
segments = final
return segments
def new(obj, path, value, separator='/', creator=None):
'''
Set the element at the terminus of path to value, and create
it if it does not exist (as opposed to 'set' that can only
change existing keys).
path will NOT be treated like a glob. If it has globbing
characters in it, they will become part of the resulting
keys
creator allows you to pass in a creator method that is
responsible for creating missing keys at arbitrary levels of
the path (see the help for dpath.path.set)
'''
segments = __safe_path__(path, separator)
if creator:
return dpath.segments.set(obj, segments, value, creator=creator)
return dpath.segments.set(obj, segments, value)
def delete(obj, glob, separator='/', afilter=None):
'''
Given a obj, delete all elements that match the glob.
Returns the number of deleted objects. Raises PathNotFound if no paths are
found to delete.
'''
globlist = __safe_path__(glob, separator)
def f(obj, pair, counter):
(segments, value) = pair
# Skip segments if they no longer exist in obj.
if not dpath.segments.has(obj, segments):
return
matched = dpath.segments.match(segments, globlist)
selected = afilter and dpath.segments.leaf(value) and afilter(value)
if (matched and not afilter) or selected:
key = segments[-1]
parent = dpath.segments.get(obj, segments[:-1])
try:
# Attempt to treat parent like a sequence.
parent[0]
if len(parent) - 1 == key:
# Removing the last element of a sequence. It can be
# truly removed without affecting the ordering of
# remaining items.
#
# Note: In order to achieve proper behavior we are
# relying on the reverse iteration of
# non-dictionaries from dpath.segments.kvs().
# Otherwise we'd be unable to delete all the tails
# of a list and end up with None values when we
# don't need them.
del parent[key]
else:
# This key can't be removed completely because it
# would affect the order of items that remain in our
# result.
parent[key] = None
except:
# Attempt to treat parent like a dictionary instead.
del parent[key]
counter[0] += 1
[deleted] = dpath.segments.foldm(obj, f, [0])
if not deleted:
raise dpath.exceptions.PathNotFound("Could not find {0} to delete it".format(glob))
return deleted
def set(obj, glob, value, separator='/', afilter=None):
'''
Given a path glob, set all existing elements in the document
to the given value. Returns the number of elements changed.
'''
globlist = __safe_path__(glob, separator)
def f(obj, pair, counter):
(segments, found) = pair
# Skip segments if they no longer exist in obj.
if not dpath.segments.has(obj, segments):
return
matched = dpath.segments.match(segments, globlist)
selected = afilter and dpath.segments.leaf(found) and afilter(found)
if (matched and not afilter) or (matched and selected):
dpath.segments.set(obj, segments, value, creator=None)
counter[0] += 1
[changed] = dpath.segments.foldm(obj, f, [0])
return changed
def get(obj, glob, separator='/', default=_DEFAULT_SENTINAL):
'''
Given an object which contains only one possible match for the given glob,
return the value for the leaf matching the given glob.
If the glob is not found and a default is provided,
the default is returned.
If more than one leaf matches the glob, ValueError is raised. If the glob is
not found and a default is not provided, KeyError is raised.
'''
if glob == '/':
return obj
globlist = __safe_path__(glob, separator)
def f(obj, pair, results):
(segments, found) = pair
if dpath.segments.match(segments, globlist):
results.append(found)
if len(results) > 1:
return False
results = dpath.segments.fold(obj, f, [])
if len(results) == 0:
if default is not _DEFAULT_SENTINAL:
return default
raise KeyError(glob)
elif len(results) > 1:
raise ValueError("dpath.util.get() globs must match only one leaf : %s" % glob)
return results[0]
def values(obj, glob, separator='/', afilter=None, dirs=True):
'''
Given an object and a path glob, return an array of all values which match
the glob. The arguments to this function are identical to those of search().
'''
yielded = True
return [v for p, v in search(obj, glob, yielded, separator, afilter, dirs)]
def search(obj, glob, yielded=False, separator='/', afilter=None, dirs=True):
'''
Given a path glob, return a dictionary containing all keys
that matched the given glob.
If 'yielded' is true, then a dictionary will not be returned.
Instead tuples will be yielded in the form of (path, value) for
every element in the document that matched the glob.
'''
globlist = __safe_path__(glob, separator)
def keeper(segments, found):
'''
Generalized test for use in both yielded and folded cases.
Returns True if we want this result. Otherwise returns False.
'''
if not dirs and not dpath.segments.leaf(found):
return False
matched = dpath.segments.match(segments, globlist)
selected = afilter and afilter(found)
return (matched and not afilter) or (matched and selected)
if yielded:
def yielder():
for segments, found in dpath.segments.walk(obj):
if keeper(segments, found):
yield (separator.join(map(dpath.segments.int_str, segments)), found)
return yielder()
else:
def f(obj, pair, result):
(segments, found) = pair
if keeper(segments, found):
dpath.segments.set(result, segments, found, hints=dpath.segments.types(obj, segments))
return dpath.segments.fold(obj, f, {})
def merge(dst, src, separator='/', afilter=None, flags=MERGE_ADDITIVE):
'''
Merge source into destination. Like dict.update() but performs deep
merging.
NOTE: This does not do a deep copy of the source object. Applying merge
will result in references to src being present in the dst tree. If you do
not want src to potentially be modified by other changes in dst (e.g. more
merge calls), then use a deep copy of src.
NOTE that merge() does NOT copy objects - it REFERENCES. If you merge
take these two dictionaries:
>>> a = {'a': [0] }
>>> b = {'a': [1] }
... and you merge them into an empty dictionary, like so:
>>> d = {}
>>> dpath.util.merge(d, a)
>>> dpath.util.merge(d, b)
... you might be surprised to find that a['a'] now contains [0, 1].
This is because merge() says (d['a'] = a['a']), and thus creates a reference.
This reference is then modified when b is merged, causing both d and
a to have ['a'][0, 1]. To avoid this, make your own deep copies of source
objects that you intend to merge. For further notes see
https://github.com/akesterson/dpath-python/issues/58
flags is an OR'ed combination of MERGE_ADDITIVE, MERGE_REPLACE,
MERGE_TYPESAFE.
* MERGE_ADDITIVE : List objects are combined onto one long
list (NOT a set). This is the default flag.
* MERGE_REPLACE : Instead of combining list objects, when
2 list objects are at an equal depth of merge, replace
the destination with the source.
* MERGE_TYPESAFE : When 2 keys at equal levels are of different
types, raise a TypeError exception. By default, the source
replaces the destination in this situation.
'''
filtered_src = search(src, '**', afilter=afilter, separator='/')
def are_both_mutable(o1, o2):
mapP = isinstance(o1, MutableMapping) and isinstance(o2, MutableMapping)
seqP = isinstance(o1, MutableSequence) and isinstance(o2, MutableSequence)
if mapP or seqP:
return True
return False
def merger(dst, src, _segments=()):
for key, found in dpath.segments.kvs(src):
# Our current path in the source.
segments = _segments + (key,)
if len(key) == 0 and not options.ALLOW_EMPTY_STRING_KEYS:
raise InvalidKeyName("Empty string keys not allowed without "
"dpath.options.ALLOW_EMPTY_STRING_KEYS=True: "
"{}".format(segments))
# Validate src and dst types match.
if flags & MERGE_TYPESAFE:
if dpath.segments.has(dst, segments):
target = dpath.segments.get(dst, segments)
tt = type(target)
ft = type(found)
if tt != ft:
path = separator.join(segments)
raise TypeError("Cannot merge objects of type"
"{0} and {1} at {2}"
"".format(tt, ft, path))
# Path not present in destination, create it.
if not dpath.segments.has(dst, segments):
dpath.segments.set(dst, segments, found)
continue
# Retrieve the value in the destination.
target = dpath.segments.get(dst, segments)
# If the types don't match, replace it.
if ((type(found) != type(target)) and (not are_both_mutable(found, target))):
dpath.segments.set(dst, segments, found)
continue
# If target is a leaf, the replace it.
if dpath.segments.leaf(target):
dpath.segments.set(dst, segments, found)
continue
# At this point we know:
#
# * The target exists.
# * The types match.
# * The target isn't a leaf.
#
# Pretend we have a sequence and account for the flags.
try:
if flags & MERGE_ADDITIVE:
target += found
continue
if flags & MERGE_REPLACE:
try:
target['']
except TypeError:
dpath.segments.set(dst, segments, found)
continue
except:
raise
except:
# We have a dictionary like thing and we need to attempt to
# recursively merge it.
merger(dst, found, segments)
merger(dst, filtered_src)
return dst
|
gshanko125298/Prompt-Engineering-In-context-learning-with-GPT-3-and-LLMs
|
myenve/Lib/site-packages/dpath/util.py
|
util.py
|
py
| 12,695 |
python
|
en
|
code
| 3 |
github-code
|
6
|
30061421331
|
from bs4 import BeautifulSoup
import requests
import json
HEADING_ORDER = [
"defensePhysical",
"defensePhysicalStrike",
"defensePhysicalSlash",
"defensePhysicalPierce",
"defenseMagic",
"defenseFire",
"defenseLightning",
"defenseHoly",
"immunity",
"robustness",
"focus",
"vitality",
"poise",
"weight",
]
def extract_from_html(content, slot):
soup = BeautifulSoup(content, features="html.parser")
for table_row in soup.find_all("tr"):
name_cell = table_row.find("td")
if name_cell is None:
continue
name = name_cell.find_all("a")[-1].get_text().strip()
armor = {
"name": name,
"slot": slot,
"weight": 0,
"poise": 0,
"immunity": 0,
"robustness": 0,
"focus": 0,
"vitality": 0,
"defensePhysical": 0,
"defensePhysicalStrike": 0,
"defensePhysicalSlash": 0,
"defensePhysicalPierce": 0,
"defenseMagic": 0,
"defenseFire": 0,
"defenseLightning": 0,
"defenseHoly": 0,
}
for attribute, cell in zip(HEADING_ORDER, [x for x in table_row.children if x != "\n"][1:len(HEADING_ORDER) + 1]):
cell_text = cell.get_text()
if "defense" in attribute or attribute == "weight":
armor[attribute] = float(cell_text)
else:
armor[attribute] = int(cell_text)
yield armor
if __name__ == "__main__":
armor_data = []
armor_data.extend(extract_from_html(requests.get(
"https://eldenring.wiki.fextralife.com/Helms").text, "head"))
armor_data.extend(extract_from_html(requests.get(
"https://eldenring.wiki.fextralife.com/Chest+Armor").text, "body"))
armor_data.extend(extract_from_html(requests.get(
"https://eldenring.wiki.fextralife.com/Gauntlets").text, "arms"))
armor_data.extend(extract_from_html(requests.get(
"https://eldenring.wiki.fextralife.com/Leg+Armor").text, "legs"))
armor_data.sort(key=lambda x: x["name"])
with open("armor_data.json", "w") as f:
json.dump(armor_data, f, indent=2)
|
lewisc64/Elden-Ring-Poise-Optimizer
|
data/sources/wiki/scrape_wiki.py
|
scrape_wiki.py
|
py
| 2,230 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73727858748
|
#!/usr/bin/env python3
import argparse
import boutvecma
import easyvvuq as uq
import chaospy
import os
import numpy as np
import time
import matplotlib.pyplot as plt
CAMPAIGN_NAME = "Conduction."
def refine_sampling_plan(campaign, analysis, number_of_refinements):
"""
Refine the sampling plan.
Parameters
----------
number_of_refinements (int)
The number of refinement iterations that must be performed.
Returns
-------
None. The new accepted indices are stored in analysis.l_norm and the admissible indices
in sampler.admissible_idx.
"""
sampler = campaign.get_active_sampler()
for _ in range(number_of_refinements):
# compute the admissible indices
sampler.look_ahead(analysis.l_norm)
print(f"Code will be evaluated {sampler.n_new_points[-1]} times")
# run the ensemble
campaign.execute().collate(progress_bar=True)
# accept one of the multi indices of the new admissible set
data_frame = campaign.get_collation_result()
analysis.adapt_dimension("T", data_frame)
analysis.save_state(f"{campaign.campaign_dir}/analysis.state")
def plot_grid_2D(campaign, analysis, i, filename="out.pdf"):
fig = plt.figure(figsize=[12, 4])
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
accepted_grid = campaign.get_active_sampler().generate_grid(analysis.l_norm)
ax1.plot(accepted_grid[:, 0], accepted_grid[:, 1], "o")
ax2.plot(accepted_grid[:, 2], accepted_grid[:, 3], "o")
ax1.set_title(f"iteration {i}")
fig.tight_layout()
fig.savefig(filename)
def custom_moments_plot(results, filename, i):
fig, ax = plt.subplots()
xvalues = np.arange(len(results.describe("T", "mean")))
ax.fill_between(
xvalues,
results.describe("T", "mean") - results.describe("T", "std"),
results.describe("T", "mean") + results.describe("T", "std"),
label="std",
alpha=0.2,
)
ax.plot(xvalues, results.describe("T", "mean"), label="mean")
try:
ax.plot(xvalues, results.describe("T", "1%"), "--", label="1%", color="black")
ax.plot(xvalues, results.describe("T", "99%"), "--", label="99%", color="black")
except RuntimeError:
pass
ax.grid(True)
ax.set_ylabel("T")
ax.set_xlabel(r"$\rho$")
ax.set_title("iteration " + str(i))
ax.legend()
fig.savefig(filename)
def first_time_setup():
encoder = boutvecma.BOUTEncoder(
template_input="../../models/conduction/data/BOUT.inp"
)
# decoder = boutvecma.LogDataBOUTDecoder(variables=["T"])
decoder = boutvecma.SimpleBOUTDecoder(variables=["T"])
params = {
"conduction:chi": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:scale": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:gauss_width": {"type": "float", "min": 0.0, "max": 1e3, "default": 0.2},
"T:gauss_centre": {
"type": "float",
"min": 0.0,
"max": 2 * np.pi,
"default": np.pi,
},
}
actions = uq.actions.local_execute(
encoder,
os.path.abspath(
"../../build/models/conduction/conduction -q -q -q -q -d . |& tee run.log"
),
decoder,
root=".",
)
campaign = uq.Campaign(name=CAMPAIGN_NAME, actions=actions, params=params)
vary = {
"conduction:chi": chaospy.Uniform(0.2, 4.0),
"T:scale": chaospy.Uniform(0.5, 1.5),
"T:gauss_width": chaospy.Uniform(0.5, 1.5),
"T:gauss_centre": chaospy.Uniform(0.5 * np.pi, 1.5 * np.pi),
}
sampler = uq.sampling.SCSampler(
vary=vary,
polynomial_order=1,
quadrature_rule="C",
sparse=True,
growth=True,
midpoint_level1=True,
dimension_adaptive=True,
)
campaign.set_sampler(sampler)
print(f"Output will be in {campaign.campaign_dir}")
sampler = campaign.get_active_sampler()
print(f"Computing {sampler.n_samples} samples")
time_start = time.time()
campaign.execute().collate(progress_bar=True)
# Create an analysis class and run the analysis.
analysis = create_analysis(campaign)
campaign.apply_analysis(analysis)
analysis.save_state(f"{campaign.campaign_dir}/analysis.state")
plot_grid_2D(campaign, analysis, 0, f"{campaign.campaign_dir}/grid0.png")
for i in np.arange(1, 10):
refine_once(campaign, analysis, i)
time_end = time.time()
print(f"Finished, took {time_end - time_start}")
return campaign
def create_analysis(campaign):
return uq.analysis.SCAnalysis(sampler=campaign.get_active_sampler(), qoi_cols=["T"])
def refine_once(campaign, analysis, iteration):
refine_sampling_plan(campaign, analysis, 1)
campaign.apply_analysis(analysis)
analysis.save_state(f"{campaign.campaign_dir}/analysis.state")
results = campaign.last_analysis
plot_grid_2D(
campaign,
analysis,
iteration,
f"{campaign.campaign_dir}/grid{iteration:02}.png",
)
moment_plot_filename = os.path.join(
f"{campaign.campaign_dir}", f"moments{iteration:02}.png"
)
sobols_plot_filename = os.path.join(
f"{campaign.campaign_dir}", f"sobols_first{iteration:02}.png"
)
results.plot_sobols_first(
"T",
ylabel=f"iteration{iteration}",
xlabel=r"$\rho$",
filename=sobols_plot_filename,
)
plt.ylim(0, 1)
plt.savefig(f"{campaign.campaign_dir}/sobols{iteration:02}.png")
custom_moments_plot(results, moment_plot_filename, iteration)
with open(f"{campaign.campaign_dir}/last_iteration", "w") as f:
f.write(f"{iteration}")
def plot_results(campaign, moment_plot_filename, sobols_plot_filename):
results = campaign.get_last_analysis()
results.plot_sobols_first("T", xlabel=r"$\rho$", filename=sobols_plot_filename)
fig, ax = plt.subplots()
xvalues = np.arange(len(results.describe("T", "mean")))
ax.fill_between(
xvalues,
results.describe("T", "mean") - results.describe("T", "std"),
results.describe("T", "mean") + results.describe("T", "std"),
label="std",
alpha=0.2,
)
ax.plot(xvalues, results.describe("T", "mean"), label="mean")
try:
ax.plot(xvalues, results.describe("T", "1%"), "--", label="1%", color="black")
ax.plot(xvalues, results.describe("T", "99%"), "--", label="99%", color="black")
except RuntimeError:
pass
ax.grid(True)
ax.set_ylabel("T")
ax.set_xlabel(r"$\rho$")
ax.legend()
fig.savefig(moment_plot_filename)
print(f"Results are in:\n\t{moment_plot_filename}\n\t{sobols_plot_filename}")
def reload_campaign(directory):
"""Reload a campaign from a directory
Returns the campaign, analysis, and last iteration number
"""
campaign = uq.Campaign(
name=CAMPAIGN_NAME,
db_location=f"sqlite:///{os.path.abspath(directory)}/campaign.db",
)
analysis = create_analysis(campaign)
analysis.load_state(f"{campaign.campaign_dir}/analysis.state")
with open(f"{campaign.campaign_dir}/last_iteration", "r") as f:
iteration = int(f.read())
return campaign, analysis, iteration
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"conduction_sc",
description="Adaptive dimension refinement for 1D conduction model",
)
parser.add_argument(
"--restart", type=str, help="Restart previous campaign", default=None
)
parser.add_argument(
"-n", "--refinement-num", type=int, default=1, help="Number of refinements"
)
args = parser.parse_args()
if args.restart is None:
first_time_setup()
else:
campaign, analysis, last_iteration = reload_campaign(args.restart)
for iteration in range(
last_iteration + 1, last_iteration + args.refinement_num + 1
):
refine_once(campaign, analysis, iteration)
|
boutproject/VECMA-hackathon
|
workflows/sc_adaptive_restartable/example_restartable_sc_adaptive.py
|
example_restartable_sc_adaptive.py
|
py
| 8,019 |
python
|
en
|
code
| 2 |
github-code
|
6
|
41675773440
|
# 적록색약
import sys
sys.setrecursionlimit(1000000)
input = sys.stdin.readline
N = int(input())
area1 = []
area2 = []
for _ in range(N):
lst1 = []
lst2 = []
for s in list(input().strip()):
lst1.append(s)
if s == "G":
lst2.append("R")
else:
lst2.append(s)
area1.append(lst1)
area2.append(lst2)
direction = [(1, 0), (0, -1), (-1, 0), (0, 1)]
visit = [[False for _ in range(N)] for _ in range(N)]
res1 = 0
def dfs1(color, i, j):
for d in direction:
next_i = i + d[0]
next_j = j + d[1]
if (
0 <= next_i < N
and 0 <= next_j < N
and not visit[next_i][next_j]
and area1[next_i][next_j] == color
):
visit[next_i][next_j] = True
dfs1(color, next_i, next_j)
for i in range(N):
for j in range(N):
if not visit[i][j]:
dfs1(area1[i][j], i, j)
res1 += 1
########################################################
visit = [[False for _ in range(N)] for _ in range(N)]
res2 = 0
def dfs2(color, i, j):
for d in direction:
next_i = i + d[0]
next_j = j + d[1]
if (
0 <= next_i < N
and 0 <= next_j < N
and not visit[next_i][next_j]
and area2[next_i][next_j] == color
):
visit[next_i][next_j] = True
dfs2(color, next_i, next_j)
for i in range(N):
for j in range(N):
if not visit[i][j]:
dfs2(area2[i][j], i, j)
res2 += 1
print(res1, res2)
|
jisupark123/Python-Coding-Test
|
알쓰/week4/10026.py
|
10026.py
|
py
| 1,597 |
python
|
en
|
code
| 1 |
github-code
|
6
|
11948500012
|
# takes an input file and output file
# input file is a Gcal in csv format
# output file: add 2 commas beggining of line if there are no commas in the line
import os, sys
inputFile = open(sys.argv[1], "rt")
outFile = open(sys.argv[2], "wt")
for line in inputFile:
if line.count(",") == 0:
strName = line.split("@")[0]
line = strName + "," + strName + "," + line
outFile.write(line)
inputFile.close()
outFile.close()
|
umabot/pyCleanGcal
|
addCommas.py
|
addCommas.py
|
py
| 444 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71608494589
|
# coding=utf-8
import logging
from datetime import datetime
import markupsafe
from playhouse.shortcuts import dict_to_model, model_to_dict
from app import components
from app.notes.model import Note, TaggedNote
from app.tags import tagService
from app.categories import categoryService
class NoteService(components.Service):
name = "notes"
model_class = Note
def __init__(self):
super().__init__()
def fetch_all_items(self, category_filter, milestone_filter):
user_id = components.current_user_id()
category_select = categoryService.category_filter_helper(Note, user_id, category_filter)
milestone_select = []
# milestone_filter == "all"
# milestone_filter == "unassigned"
# else ...
return Note.select(Note).where(
Note.is_deleted == False,
*category_select,
*milestone_select,
Note.owner_id == user_id
).order_by(Note.edited.desc()).objects()
def create_item(self, item_json):
(item_json, tags) = self._select_and_sanitize_tags(item_json)
# Check if user has ownership over the category given
if ("category" in item_json and item_json["category"] and not categoryService.read_item(item_json["category"])):
raise components.BadRequestError()
item = dict_to_model(Note, item_json)
item.content = markupsafe.escape(markupsafe.Markup(item.content))
item.owner = components.current_user()
item.save(force_insert=True)
item.tags.add(tags)
return item
def update_item(self, item_id, item_json):
myItem = self.read_item(item_id)
(item_json, tags) = self._select_and_sanitize_tags(item_json)
item = dict_to_model(Note, item_json)
with components.DB.atomic():
item.id = int(myItem.id)
item.changed()
item.save()
item.tags.clear()
item.tags.add(tags)
return item
raise RuntimeError("Could not update note")
def serialize_item(self, item):
item_json = model_to_dict(item, exclude=(
Note.is_deleted,
Note.owner,
Note.tags
), recurse=False)
tags = [tag for tag in item.tags]
item_json["tags"] = [tag.tag for tag in tags]
return item_json
def sanitize_fields(self, item_json):
if "due_date" in item_json:
due_date = datetime.fromtimestamp(int(item_json["due_date"])).date() if item_json["due_date"] else None
item_json["due_date"] = due_date
return super().sanitize_fields(item_json)
def _select_and_sanitize_tags(self, item_json):
tags = []
item_json = self.sanitize_fields(item_json)
if "tags" in item_json:
tags = tagService.bulk_search_or_insert(item_json["tags"])
del item_json["tags"]
logging.debug("Selected tags:" + ",".join([tag.tag for tag in tags]))
return (item_json, tags)
noteService = NoteService()
# ----------------------------------------
class Module(components.Module):
from app.notes.controller import NoteListController, NoteController
name = "notes"
services = [noteService]
models = [Note, TaggedNote]
controllers = [NoteListController, NoteController]
module = Module()
|
caiwan/cai-notepad
|
backend/app/notes/__init__.py
|
__init__.py
|
py
| 3,356 |
python
|
en
|
code
| 6 |
github-code
|
6
|
31569984020
|
# normal libraries
from inspect import signature # used in the method eval of the class
import numpy as np
import scipy.stats # functions of statistics
# other files
from corai_error import Error_type_setter
from scipy.integrate import simps
# my libraries
np.random.seed(124)
# section ######################################################################
# #############################################################################
# some information
# -------------------------------------------------------------------------------------------------------
# list of the possible kernels:
# fct_top_hat
# fct_plain
# fct_truncnorm
# fct_biweight
#
#
# the functions are correct, they scale and shift the way it is supposed.
# However they are written in the following way : f_t(t_i) = K( t_i - t )
# example of kernels:
# list_of_kernels =
# [Kernel(fct_top_hat, name="wide top hat", a=-450, b=450),
# Kernel(fct_top_hat, name="normal top hat", a=-200, b=200),
# Kernel(fct_truncnorm, name="wide truncnorm", a=-500, b=500, sigma=350),
# Kernel(fct_truncnorm, name="normal truncnorm", a=-350, b=350, sigma=250)]
# -------------------------------------------------------------------------------------------------------
# the functions only work for positive time. If one input negative times, it messes up the orientation.
# section ######################################################################
# #############################################################################
# class
class Kernel:
# kernel is a functor, used for weighting some computations.
# the evaluation gives back a list of np.array
# the function should hand in the list of np.arrays non scaled.
# the parameters of the function (to be called) are gathered before:
# the weights do not change inside the estimation process.
# the name is for identification in plots
def __init__(self, fct_kernel, name=' no name ', **kwargs):
self.fct_kernel = fct_kernel
self.name = name
self.__dict__.update(kwargs)
def __repr__(self):
return f"Function is {repr(self._fct_kernel)} and name {self.name}."
def __call__(self, T_t, eval_point, T_max, debug=False):
# getting the length over each dimensions for the kernel.
shape_T_t = [len(T_t[i]) for i in range(len(T_t))] # recall each dim has different nb of jumps
# ans is the kernel evaluated on the jumps
ans = self._fct_kernel(T_t=T_t, eval_point=eval_point, shape_T_t=shape_T_t,
**{k: self.__dict__[k] for k in self.__dict__ if
k in signature(self._fct_kernel).parameters})
# ans is a list of np arrays. It is normalized such that it is a kernel.
# then I want to scale every vector.
# The total integral should be T_max, so I multiply by T_max
# If it isn't fct plain, then I have to scale.
if self._fct_kernel.__name__ != 'fct_plain':
# I want to rescale the results for the kernels that are not covering seen part. For that reason,
# I compute the integral of the kernel, and scale accordingly.
tt_integral = [np.linspace(0, T_max, int(5E5))] # in a list to respect the format list of list of T_t.
yy = self._fct_kernel(T_t=tt_integral, eval_point=eval_point, shape_T_t=[1],
**{k: self.__dict__[k] for k in self.__dict__ if
k in signature(self._fct_kernel).parameters})
integral = simps(yy[0], tt_integral[0])
# yy[0] bc function gives back a list of arrays.
for i in range(len(shape_T_t)):
ans[i] = ans[i] / integral * T_max
# *= do not work correctly since the vectors are not the same type (int/float).
# I also divide by the sum, the vector is normalized, however,
# possibly we're on the edge and we need to take that into account.
if debug:
print(f"inside kernel debug, "
f"that's my integral : "
f"{np.sum(ans[0][:-1]) * T_max / (len(ans[0]) - 1)}. "
f"Name : {self.fct_kernel.__name__}.")
return ans
# section ######################################################################
# #############################################################################
# getters setters
@property
def fct_kernel(self):
return self._fct_kernel
@fct_kernel.setter
def fct_kernel(self, new_fct_kernel):
self._fct_kernel = new_fct_kernel
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
if isinstance(new_name, str):
self._name = new_name
else:
raise Error_type_setter(f'Argument is not an string.')
# section ######################################################################
# #############################################################################
# kernels' functions
def fct_top_hat(T_t, shape_T_t, eval_point, a=-200, b=200):
output = []
for i in range(len(shape_T_t)):
vector = np.array(T_t[i])
# -1 if x < 0, 0 if x==0, 1 if x > 0.
output.append(1 / (2 * (b - a)) *
(np.sign(vector - eval_point - a) +
np.sign(b - vector + eval_point))
)
return output
def fct_plain(T_t, shape_T_t, eval_point):
# no scaling parameter, would be full to use scaling on plain.
return [np.full(shape_T_t[i], 1) for i in range(len(shape_T_t))] # full of 1.
def fct_truncnorm(T_t, shape_T_t, eval_point, a=-300, b=300, sigma=200):
output = []
for i in range(len(shape_T_t)):
output.append(scipy.stats.truncnorm.pdf(np.array(T_t[i]), a / sigma, b / sigma,
loc=eval_point, scale=sigma))
return output
def fct_truncnorm_test(T_t, shape_T_t, eval_point, a=-300, b=300, sigma=200):
output = []
i = 0 # for output[i] after, but there shouldn't be any problem.
for i in range(len(shape_T_t)):
output.append(2 * scipy.stats.truncnorm.pdf(np.array(T_t[i]), a / sigma, b / sigma,
loc=eval_point, scale=sigma))
output[i][T_t[i] < eval_point] = 0
return output
def fct_biweight(T_t, shape_T_t, eval_point, a=-300, b=300):
# if important, I can generalize biweight with function beta.
# Thus creating like 4 kernels with one function ( BETA(1), BETA(2)...)
assert a == -b, "The kernel only accepts symmetrical bounds."
output = []
for i in range(len(shape_T_t)):
xx = (np.array(T_t[i]) - (a + b) / 2 - eval_point) * 2 / (b - a)
# the correct order is eval_point - T_t,
# bc we evaluate at eval_point but translated by T_t,
# if kernel not symmetric a != b, then we also need to translate by the mid of them.
xx[(xx < -1) | (xx > 1)] = 1
output.append(15 / 16 * np.power(1 - xx * xx, 2) * 2 / (b - a))
return output
def fct_epa(T_t, shape_T_t, eval_point, a=-300, b=300):
assert a == -b, "The kernel only accepts symmetrical bounds."
output = []
for i in range(len(shape_T_t)):
xx = (np.array(T_t[i]) - (a + b) / 2 - eval_point) * 2 / (b - a)
# the correct order is eval_point - T_t,
# bc we evaluate at eval_point but translated by T_t,
# if kernel not symmetric a != b, then we also need to translate by the mid of them.
xx[(xx < -1) | (xx > 1)] = 1
output.append(3 / 4 * (1 - xx * xx) * 2 / (b - a))
return output
|
Code-Cornelius/ITiDeEP
|
src/hawkes/kernel.py
|
kernel.py
|
py
| 7,949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39434532575
|
from collections import Counter
import zarr
from fastai.tabular.all import *
from fastai.data.all import *
from fastai.vision.gan import *
from fastai import *
from tsai.all import *
from torch import nn
import numpy as np
import seaborn as sns
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import torch.nn.functional as F
from model import stagerNetAAE, stagerNetCritic
from utils import LossAttrMetric, GetLatentSpace, norm_batch, UnfreezeFcCrit, \
SwitchAttribute, distrib_regul_regression, hist_lab, plot_results
# Load the config file
config_file = 'config.json'
with open(config_file, 'r') as file:
config = json.load(file)
# Set the device on which you want to train the model
device = torch.device(config['device'])
torch.cuda.set_device(device)
lab_area = torch.Tensor(np.load(f'{config["labels_path"]}/area_db.npy'))[:,None]
lab_arousal = torch.Tensor(np.load(f'{config["labels_path"]}/arousal_db.npy'))[:,None]
lab_duration = torch.Tensor(np.load(f'{config["labels_path"]}/duration_db.npy'))[:,None]
# Define the labels
# 1) discrete labels
lab_area = torch.Tensor(np.load(f'{config["labels_path"]}/area_db.npy'))[:,None]
lab_arousal = torch.Tensor(np.load(f'{config["labels_path"]}/arousal_db.npy'))[:,None]
lab_duration = torch.Tensor(np.load(f'{config["labels_path"]}/duration_db.npy'))[:,None]
lab_all = torch.Tensor(4*lab_area + 2*lab_arousal + lab_duration)
lab_discrete = torch.hstack((lab_area,lab_duration,lab_arousal))
# 2) switch to match the desired encoding
tmp = copy(lab_all)
lab_all[tmp==3] = 4
lab_all[tmp==4] = 3
# 3) 3-level labels ("low", "medium", "high")
lab3 = deepcopy(lab_all)
lab3[:] = 0
lab3[lab_all>1] = 1
lab3[lab_all>5] = 2
# 4) 4-level labels ("all metrics at low level", "1 metrics at high level", "2 metrics at high level", "all metrics at high level")
lab4 = deepcopy(lab_all)
lab4[lab_all>0] = 1
lab4[lab_all>3] = 2
lab4[lab_all==7] = 3
# 5) normalize the label values
lab_norm_area = torch.Tensor(np.load(f'{config["labels_path"]}/norm_area_db.npy')).unsqueeze(-1)
lab_norm_duration = torch.Tensor(np.load(f'{config["labels_path"]}/norm_duration_db.npy')).unsqueeze(-1)
lab_norm = torch.hstack((lab_norm_area,lab_norm_duration,lab_arousal))
#normalize the binary arousal value with respect to the std of area and duration labels
lab_arousal_tmp = torch.Tensor([-1 if x==0 else 1 for x in lab_arousal]).unsqueeze(-1)
lab_norm_arousal = lab_arousal_tmp * (lab_norm_area.std() + lab_norm_duration.std()) / 2
lab_gather = torch.hstack((lab_norm_area,lab_norm_duration,lab_norm_arousal))
lab_gather = lab_gather.mean(dim=1).unsqueeze(-1) # mean of all metrics
# 6) Gather all the labels in a list in right order
label_stack = torch.hstack((lab_gather, lab_area, lab_duration, lab_arousal, lab3, lab4))
# Define dls
if config['load_dls']:
dls = torch.load(config['dls_path']) # should be a .pkl file
else:
# Read your data (.zarr file)
path = Path(config['data_path'])
X = zarr.open(path, mode='r')
t = torch.Tensor(X)
print('data properly read')
# Define splitter
n_train_samples = round(len(t)*config['trainset_part'])
n_total_samples = len(t)
splits = (L(range(n_train_samples), use_list=True),
L(np.arange(n_train_samples, n_total_samples), use_list=True))
splitter = IndexSplitter(splits[1])
getters = [ItemGetter(0), ItemGetter(1)]
dblock = DataBlock(blocks=(TSTensorBlock,TSTensorBlock),
getters=getters,
splitter=splitter,
batch_tfms=norm_batch())
src = itemify(t.to('cpu'),label_stack.to('cpu'))
dls = dblock.dataloaders(src, bs=config['bs'], val_bs=config['val_bs'], drop_last=True)
torch.save(dls, config['dls_path'])
# free memory space
del X
time.sleep(.2)
torch.cuda.empty_cache()
print('memory flushed')
dls = dls.to(device)
print('dls:')
print(dls.one_batch())
### Train the AutoEncoder part ###
acc_factor = config['acc_factor']
latent_dim = config['latent_dim']
model = stagerNetAAE(latent_dim=latent_dim,acc_factor=acc_factor)
model = model.to(device)
if config['train_ae']:
metrics = [rmse]
learn = Learner(dls, model, loss_func = model.ae_loss_func, metrics=metrics, opt_func=ranger)
learning_rate = learn.lr_find()
learn.fit_flat_cos(n_epoch=config['n_epoch'], lr=learning_rate.valley,
cbs=[
GradientAccumulation(n_acc=dls.bs*acc_factor),
TrackerCallback(),
SaveModelCallback(fname=config['ae_filename']),
EarlyStoppingCallback(min_delta=1e-4,patience=config['patience'])])
state_dict = torch.load(f'models/{config["ae_filename"]}.pth') # load the best weights
### Train the Classifier part ###
classif_filename = config['classif_filename']
model.load_state_dict(state_dict, strict=False)
#define the metrics to show
metrics = [LossAttrMetric("gather_loss"), LossAttrMetric("simple_loss"),
LossAttrMetric("area_loss"), LossAttrMetric("duration_loss"),
LossAttrMetric("arousal_loss"), LossAttrMetric("ord_loss")]
#freeze the discriminator weights
for name, param in model.named_parameters():
if "fc_crit" in name:
param.requires_grad_(False)
if config['train_classif_discrete']:
#define the losses to montitor
monitor_loss = ['area_loss','duration_loss','arousal_loss']
#set the learning rates
learning_rates = [1e-3,5e-4,2e-4]
# Start curriculum learning
total_cycles = config['nb_of_metrics']
for i in range(total_cycles):
curr_filename = str(classif_filename)+'_level'+str(i+1)
model.level = i+1
met = metrics[1:i+3] + metrics[-1:]
learn = Learner(dls, model, loss_func=model.classif_loss_func,
metrics=met, opt_func=ranger)
learn.fit_flat_cos(config['n_epoch'], lr=learning_rates[i],
cbs=[
GradientAccumulation(n_acc=dls.bs*acc_factor),
TrackerCallback(monitor=monitor_loss[i]),
SaveModelCallback(fname=curr_filename,monitor=monitor_loss[i]),
EarlyStoppingCallback(min_delta=1e-4,patience=config['patience'],monitor=monitor_loss[i]),
SwitchAttribute(attribute_name='global_loss', switch_every=5)
])
learn.load(curr_filename)
model.load_state_dict(learn.model.state_dict())
state_dict = torch.load(f'models/{classif_filename}_level3.pth') # load the best weights
model.load_state_dict(state_dict, strict=False)
if config['train_regress']:
model.level = 0
model.dropout_rate = .1
learn = Learner(dls, model, loss_func=model.classif_loss_func,
metrics=metrics, opt_func=ranger)
learn.fit_flat_cos(config['n_epoch'], lr=1e-3,
cbs=[
GradientAccumulation(n_acc=dls.bs*acc_factor),
TrackerCallback(monitor='gather_loss'),
SaveModelCallback(fname=classif_filename, monitor='gather_loss'),
EarlyStoppingCallback(min_delta=1e-4,patience=config['patience'],monitor='gather_loss'),
SwitchAttribute(attribute_name='global_loss', switch_every=5)])
np.save('results/'+str(classif_filename)+'_losses.npy', learn.recorder.losses)
np.save('results/'+str(classif_filename)+'_values.npy', learn.recorder.values)
state_dict = torch.load(f'models/{config["classif_filename"]}.pth') # load the best weights
### Train the Adversarial part ###
model.load_state_dict(state_dict, strict=False)
adv_filename = config['aae_filename']
if config['train_aae']:
metrics = [LossAttrMetric("classif_loss"), LossAttrMetric("recons_loss"),
LossAttrMetric("adv_loss")]
learn = Learner(dls, model, loss_func=model.aae_loss_func,
metrics=metrics, opt_func=ranger)
learn.fit_flat_cos(config['n_epoch'], lr=1e-3,
cbs=[
GradientAccumulation(n_acc=dls.bs*acc_factor),
TrackerCallback(monitor='classif_loss'),
SaveModelCallback(fname=adv_filename, monitor='classif_loss'),
EarlyStoppingCallback(min_delta=1e-4,patience=config['patience'],monitor='classif_loss'),
UnfreezeFcCrit(switch_every=2),
SwitchAttribute(attribute_name='global_loss', switch_every=5)])
state_dict = torch.load(f'models/{adv_filename}.pth') # load the best weights
### Extract the latent space ###
result_filename = config['result_filename']
model.load_state_dict(state_dict, strict=False)
learn = Learner(dls,model,loss_func=model.aae_loss_func)
if config['load_latent_space']:
new_zi = torch.load(f'data/z_{result_filename}.pt')
print(f'latent space loaded with shape {new_zi.shape}')
else:
learn.zi_valid = torch.tensor([]).to(device)
learn.get_preds(ds_idx=0,cbs=[GetLatentSpace(cycle_len=1)])
new_zi = learn.zi_valid
learn.zi_valid = torch.tensor([]).to(device)
learn.get_preds(ds_idx=1,cbs=[GetLatentSpace(cycle_len=1)])
new_zi = torch.vstack((new_zi,learn.zi_valid))
print("new_zi shape: "+str(new_zi.shape))
torch.save(new_zi,f'data/z_{result_filename}.pt')
### Display the latent space ###
plot_results(new_zi.to(device),lab_gather,learn,result_filename)
|
numediart/xAAEnet
|
main.py
|
main.py
|
py
| 9,638 |
python
|
en
|
code
| 1 |
github-code
|
6
|
855878754
|
#!/usr/bin/env python
# This example shows how to extract portions of an unstructured grid
# using vtkExtractUnstructuredGrid. vtkConnectivityFilter is also used
# to extract connected components.
#
# The data found here represents a blow molding process. Blow molding
# requires a mold and parison (hot, viscous plastic) which is shaped
# by the mold into the final form. The data file contains several steps
# in time for the analysis.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create a reader to read the unstructured grid data. We use a
# vtkDataSetReader which means the type of the output is unknown until
# the data file is read. So we follow the reader with a
# vtkCastToConcrete and cast the output to vtkUnstructuredGrid.
reader = vtk.vtkDataSetReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/blow.vtk")
reader.SetScalarsName("thickness9")
reader.SetVectorsName("displacement9")
castToUnstructuredGrid = vtk.vtkCastToConcrete()
castToUnstructuredGrid.SetInputConnection(reader.GetOutputPort())
warp = vtk.vtkWarpVector()
warp.SetInput(castToUnstructuredGrid.GetUnstructuredGridOutput())
# The connectivity filter extracts the first two regions. These are
# know to represent the mold.
connect = vtk.vtkConnectivityFilter()
connect.SetInputConnection(warp.GetOutputPort())
connect.SetExtractionModeToSpecifiedRegions()
connect.AddSpecifiedRegion(0)
connect.AddSpecifiedRegion(1)
moldMapper = vtk.vtkDataSetMapper()
moldMapper.SetInputConnection(reader.GetOutputPort())
moldMapper.ScalarVisibilityOff()
moldActor = vtk.vtkActor()
moldActor.SetMapper(moldMapper)
moldActor.GetProperty().SetColor(.2, .2, .2)
moldActor.GetProperty().SetRepresentationToWireframe()
# Another connectivity filter is used to extract the parison.
connect2 = vtk.vtkConnectivityFilter()
connect2.SetInputConnection(warp.GetOutputPort())
connect2.SetExtractionModeToSpecifiedRegions()
connect2.AddSpecifiedRegion(2)
# We use vtkExtractUnstructuredGrid because we are interested in
# looking at just a few cells. We use cell clipping via cell id to
# extract the portion of the grid we are interested in.
extractGrid = vtk.vtkExtractUnstructuredGrid()
extractGrid.SetInputConnection(connect2.GetOutputPort())
extractGrid.CellClippingOn()
extractGrid.SetCellMinimum(0)
extractGrid.SetCellMaximum(23)
parison = vtk.vtkGeometryFilter()
parison.SetInputConnection(extractGrid.GetOutputPort())
normals2 = vtk.vtkPolyDataNormals()
normals2.SetInputConnection(parison.GetOutputPort())
normals2.SetFeatureAngle(60)
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.0, 0.66667)
parisonMapper = vtk.vtkPolyDataMapper()
parisonMapper.SetInputConnection(normals2.GetOutputPort())
parisonMapper.SetLookupTable(lut)
parisonMapper.SetScalarRange(0.12, 1.0)
parisonActor = vtk.vtkActor()
parisonActor.SetMapper(parisonMapper)
# graphics stuff
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(parisonActor)
ren.AddActor(moldActor)
ren.SetBackground(1, 1, 1)
ren.ResetCamera()
ren.GetActiveCamera().Azimuth(60)
ren.GetActiveCamera().Roll(-90)
ren.GetActiveCamera().Dolly(2)
ren.ResetCameraClippingRange()
renWin.SetSize(500, 375)
iren.Initialize()
renWin.Render()
iren.Start()
|
VisTrails/VisTrails
|
examples/vtk_examples/VisualizationAlgorithms/ExtractUGrid.py
|
ExtractUGrid.py
|
py
| 3,366 |
python
|
en
|
code
| 100 |
github-code
|
6
|
25329151854
|
import numpy as np
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def sigmoid_prime(y_hat):
return y_hat * (1 - y_hat)
class Perceptron:
"""Perceptron implements a simple perceptron cell."""
def __init__(self, inputs):
self.weights = np.random.randn(1, inputs)
self.bias = 0
def predict(self, inputs):
z = np.dot(self.weights, inputs) + self.bias
return sigmoid(z)
def train(self, x, y, epochs=100, eta=1):
for i in range(epochs):
# 1. make a prediction for given training input
y_hat = self.predict(x)
print("loss={}".format((1. / 2.) * np.power(y - y_hat, 2)))
# 2. estimate error (delta)
delta = (y_hat - y) * sigmoid_prime(y_hat)
# 3. calculate adjustments for weights and bias
dW = np.dot(delta, x.T)
dB = delta
# 4. update weights and bias
self.weights = self.weights - eta * dW
self.bias = self.bias - eta * dB
if __name__ == "__main__":
p = Perceptron(2)
# OR gate input combinations and outputs
x = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]]).T
y = np.array([[0, 1, 1, 1]])
print("Before Training:\n{} -> {}".format(x.T, p.predict(x)))
p.train(x, y)
print("After Training:\n{} -> {}".format(x.T, p.predict(x)))
|
spy16/snowman
|
scripts/percep.py
|
percep.py
|
py
| 1,414 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72497450429
|
from bs4 import BeautifulSoup
import requests, os
#Configuration Variables
search_refs = True
build_path = "API"
API_URL = "https://pythonapi.upbge.org/"
#Further addons
headers = {"bge" + os.sep + "types.py" : """
import mathutils
inf = 0
class CListValue:
def __init__(self, ctype):
self.__ret__ = ctype
self.__i__ = None
self.__itf__ = False
def __instanceme__(self):
if self.__i__ == None:
self.__i__ = self.__ret__()
return self.__i__
def __getitem__(self, key): return self.__instanceme__()
def __setitem__(self, key, val): return self.__instanceme__()
def get(self, key): return self.__instanceme__()
def __iter__(self): return self
def __next__(self):
self.__itf__ = not self.__itf__
if self.__itf__: return self.__instanceme__()
else: raise StopIteration
""",
"bge" + os.sep + "logic.py" :
"""globalDict = {}
keyboard = None
mouse = None
joysticks = []
"""}
erase = {"bge" + os.sep + "logic.py" : [
"""globalDict = None
keyboard = None
mouse = None
joysticks = None"""]}
fixes = {
"RandomMusic": [(", transition=(5)", ", transition=(5,0,0))")]
}
def dataToPath(dp):
i = dp.rfind(".")
return os.path.normpath(dp[:len(dp) if i == -1 else i].replace(".", "/") + ".py")
class File:
done_files = []
done_urls = []
registred_class = {}
def __init__(self, url, recursive=False, prefix=""):
self.current_class = ""
self.current_module = ""
self.recursive = recursive
self.makePage(url, recursive=recursive, prefix=prefix)
def getType(self, dl, noerror=False):
if dl==None: raise Exception("dl should not be None")
if type(dl)!=str:
try: t = dl.dd.table.tbody.tr.td.get_text()
except Exception: return "None"
else: t=dl
t=t.replace("\t", "")
t=t.replace("‘s", "")
#Correctors
if t == "MeshProxy": t = "KX_MeshProxy"
if t == "boolen": t = "bool"
#Registred
if t == self.current_class: return "self"
if t in File.registred_class.keys():
m = File.registred_class[t]
if self.current_module == m: return t + "()"
else: return m + '.' + t +"()"
for k, v in File.registred_class.items():
m = v+'.'+k
if m == t: return m + "()"
#Direct addressing
if t in ["float", "int", "bool"]: return t + "()"
if t in ["boolean", "boolean.", "bool"]: return "bool()"
if t == "double": return "float()"
if t in ["integer", "bitfield"]: return "int()"
if t in ["string", "str"]: return "str()"
if t in ["matrix", "Matrix", "mathutils.Matrix"]:
if self.current_module != "mathutils": return "mathutils.Matrix()"
else: return "Matrix()"
if t in ["vector", "Vector", "mathutils.Vector"]:
if self.current_module != "mathutils": return "mathutils.Vector()"
else: return "Vector()"
if t == "list" and not noerror: return "list()"
if t == "dict" and not noerror: return "dict()"
if t == "tuple" and not noerror: return "tuple()"
if t == "Quaternion":
if self.current_module != "mathutils": return "mathutils.Quaternion()"
else: return "Quaternion()"
#Special cases
if t == "list of functions and/or methods": return "list()"
if t == "3d vector.": return "mathutils.Vector()"
if t == "3-tuple (float, 3-tuple (x, y, z), 3-tuple (x, y, z))": return "(float, (0,0,0), (0,0,0))"
if t.startswith("\n3-tuple (KX_GameObject, 3-tuple (x, y, z), 3-tuple (nx, ny, nz))"):
return "(KX_GameObject, (0,0,0), (0,0,0), KX_PolyProxy, (0,0))"
if t == "list [x, y]": return "[0,0]"
if t in ["(integer, integer)", "(int,int)", "(int, int)"]: return "(0,0)"
if t == "list [str]": return "[str()]"
if t == "list [r, g, b]": return "[0,0,0]"
if t == "list[x, y, z]": return "[0,0,0]"
if t == "(Vector, float) pair": return "(Vector(), float())"
if t == "Matrix4x4 (read only)": return "mathutils.Matrix()"
if t == "tuple of two ints": return "(0,0)"
if t == "sequence of two ints": return "[0,0]"
if t == "sequence of two floats": return "[0.0,0.0]"
if t == "sequence of three ints": return "[0,0,0]"
if t == "sequence of four sequences of two ints": return "[[0,0],[0,0],[0,0],[0,0]]"
if t == "sequence of four sequences of five ints": return "[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]"
if t == "Buffer\n": return "bgl.Buffer()"
if t == "sequence supporting index/string lookups and iteration.": return "dict()"
#Addressing of containers
for st in ["list of ", "CListValue of "]:
if t.startswith(st):
h=self.getType(t[len(st):], True)
if h != "None":
if h.endswith("()"): h=h[:-2]
if h=="self": h=self.current_class
if self.current_module == "bge.types":
return "CListValue(" + h + ")"
else: return "bge.types.CListValue(" + h + ")"
if t.startswith("Vector"):
if self.current_module != "mathutils": return "mathutils.Vector()"
else: return "Vector()"
#Last chances to get it right
for ch in ['\n', ' ', ',']:
if ch in t:
for x in t.split(ch):
h=self.getType(x, True)
if h!="None": return h
for x in ["non-negative", "None"]:
if x in t: return "None"
if not noerror:
if type(dl) != str and search_refs:
links = dl.dd.table.tbody.tr.td.find_all("a")
url = File.done_urls[-1]
base_url = url[:url.rfind("/")+1]
for l in links:
link = l["href"]
if not "CListValue" in link:
link = base_url + link[:link.rfind("#")]
File(link, recursive=True)
return self.getType(dl, noerror)
print("Unknown type:", t)
return "None"
def getReturnType(self, o):
if o.dd.table == None: return "None"
for tr in o.dd.table.tbody.find_all("tr"):
if tr.th.string=="Return type:":
return self.getType(tr.td.get_text())
return "None"
def makePage(self, url, tab='', recursive=False, prefix=""):
if url in File.done_urls: return
else: File.done_urls.append(url)
if not url.endswith(".html"):
print("Skipped:", url)
return
print("Building page: ", url)
r = requests.get(url).text
soup = BeautifulSoup(r, "html.parser")
body = soup.body.find("h1").parent
if body.p.get_text().startswith("base class"):
link = body.p.a["href"]
link = url[:url.rfind("/")+1] + link[:link.rfind("#")]
if recursive==True:
File(link, recursive)
#Get current module, autodetect class vs module using case sensitive.
self.current_module = prefix + url[url.rfind('/')+1:url.rfind(".html")]
i = self.current_module.rfind(".")
if i != -1:
if not self.current_module.split(".")[-1][0:1].islower():
self.current_module = self.current_module[:i]
dest = url[url.rfind('/')+1:url.rfind(".html")]
else: dest = self.current_module + "."
else: dest = self.current_module + "."
#Identify Class or Module level data
code = ""
for dl in soup.find_all("dl"):
dtype=dl.get("class")
if dtype[0]=="class":
code += '\n' + self.makePythonClass(dl) + '\n'
if dtype[0]=="data":
name = dl.dt["id"]
#Make sure it's at module level
if len(name.split('.')) == len(self.current_module.split('.'))+1:
value = "None"
for th in dl.find_all("th"):
if th.get_text() == "Value:":
value = th.parent.td.get_text()
code += name.split('.')[-1] + " = " + value + "\n"
if dtype[0]=="function":
name = dl.dt["id"]
if len(name.split('.')) == len(self.current_module.split('.'))+1:
code += self.writeFunction(dl, False, '')
#Write the file
odest = dataToPath(dest)
dest = build_path + os.sep + odest
if os.sep in dest:
os.makedirs(os.path.dirname(dest), exist_ok=True)
if dest in File.done_files:
with open(dest, "a+", encoding="utf-8") as out: out.write(code)
else:
try: code = headers[odest] + code
except KeyError: pass
try:
for x in erase[odest]: code=code.replace(x, "")
except KeyError: pass
with open(dest, "w", encoding="utf-8") as out: out.write(code)
File.done_files.append(dest)
def makePythonClassTitle(self, dt):
cn = dt["id"]
self.current_class = cn[cn.rfind(".")+1:]
File.registred_class[self.current_class] = self.current_module
code = "class " + self.current_class + '('
for x in dt.find_all("em"):
if x.get("class"): continue
if not x.string[0].isupper(): continue
if x.string in ["A", "B", "C", "D", "E", "F"]: continue
code += x.string + ','
if code.endswith(","): return code[:-1] + '):\n'
else: return code [:-1]+ ":\n"
def makePythonClass(self, dl, tab=''):
tab+='\t'
docstring = '"""' + dl.dd.p.get_text() + '"""'
code = self.makePythonClassTitle(dl.dt) + tab + docstring + '\n\n'
temp_code = tab + "def __init__(self, "
for x in dl.dt.find_all("em"):
if x.get("class"): continue
if not x.string[0].islower() and not x.string in ["A", "B", "C", "D", "E", "F"]: continue
if not "=" in x.string: temp_code += x.string+"=None, "
else:
if x.string.split("=")[1][0]== '<':
temp_code += x.string.split("=")[0] + "=None, "
else:
temp_code += x.string + ', '
temp_code = temp_code[:-2] + "):\n"
tab+='\t'
for o in dl.dd.find_all("dl"):
if o["class"][0]=="data":
temp_code += tab + "self." + o.dt.code.string + " = int()\n"
if o["class"][0]=="attribute":
temp_code += tab + "self." + o.dt.code.string + " = " + self.getType(o) + '\n'
if not temp_code.endswith(":\n"): code += temp_code
tab=tab[:-1]
for o in dl.dd.find_all("dl"):
if o["class"][0]=="method":
code += self.writeFunction(o, True, tab)
if self.current_class in fixes:
for el in fixes[self.current_class]:
x, y = el
code = code.replace(x, y)
return code
def writeFunction(self, o, is_method=True, tab='\t'):
if is_method:
code = '\n' + tab + "def " + o.dt.code.string + "(self, "
else:
code = '\n' + tab + "def " + o.dt.find_all("code")[-1].string + "("
for arg in o.dt.find_all("em"):
m = arg.string.split("=")
if len(m)>1 and any([m[1].startswith(x) for x in ["KX_", "IMB_"]]):
code += m[0] + '=None, '
else: code += arg.string + ', '
if code.endswith("("): code += "):"
else: code = code[:-2]+"):"
try:
docstring = '"""' + o.dd.p.get_text() + '"""'
code += '\n' + tab + '\t' + docstring + '\n'
except Exception: code += " pass\n"
rt = self.getReturnType(o)
if rt != "None":
if code.endswith(" pass\n"): code=code[:-len(" pass\n")]+"\n"
tab+='\t'
if "bge." in rt: code += tab + "import bge\n"
code += tab + "return " + rt + '\n'
tab=tab[:-1]
if "deprecated" in code or "Deprecated" in code: return ""
return code
def build(url): File(url, recursive=True, prefix="core." if "api/" in url else "")
def build_bge(url):
build(url + "mathutils.html")
build(url + "bge.types.KX_MeshProxy.html")
build(url + "bge.types.KX_CharacterWrapper.html")
build(url + "bge.types.KX_VehicleWrapper.html")
build(url + "bge.types.SCA_PythonController.html")
build(url + "bge.types.KX_Scene.html")
build(url + "bge.logic.html")
build(url + "bge.texture.html")
build(url + "bge.events.html")
build(url + "bge.app.html")
build(url + "bge.constraints.html")
init="from . import logic, types, texture, events, app, constraints"
init_path = build_path + os.sep + "bge" + os.sep + "__init__.py"
with open(init_path, "w", encoding="utf-8") as out: out.write(init)
def build_core(url):
build(url + "api/media.html")
build(url + "api/event.html")
build(url + "api/sequencer.html")
build(url + "api/utils.html")
init="from . import media, event, utils, sequencer\nmedia.music=media.AudioFile()"
init_path = build_path + os.sep + "core" + os.sep + "__init__.py"
with open(init_path, "w", encoding="utf-8") as out: out.write(init)
def test():
test_bge()
test_core()
def test_bge():
import traceback
sys.path.append(build_path)
try:
import mathutils, bge
v=mathutils.Vector()
m=mathutils.Matrix()
scn = bge.logic.getCurrentScene()
o = scn.objects["some"]
a=o.isPlayingAction()
b=o.parent.addDebugProperty("LOL")
o.endObject()
print("Test BGE: OK")
except Exception: traceback.print_exc()
def test_core():
import traceback
sys.path.append(build_path)
try:
import core
core.media.music.filepath = ""
print("Test CORE: OK")
except Exception: traceback.print_exc()
build_path = os.path.normpath(build_path)
import sys
if len(sys.argv) == 1:
build_bge(API_URL)
build_core("http://coredoc.royalwebhosting.net/")
test()
print("Done.")
if len(sys.argv) == 2:
if sys.argv[1] == "-test": test()
|
elmeunick9/UPBGE-CommunityAddon
|
documentation/BGEMockGen/make.py
|
make.py
|
py
| 14,473 |
python
|
en
|
code
| 6 |
github-code
|
6
|
33126999128
|
from sklearn.model_selection import train_test_split
from src.config import config
from mindspore import Tensor
import mindspore
class ModelDataProcessor:
def __init__(self):
self.get_dict()
def get_dict(self):
self.word_dict = {}
with open(config.vocab_file, 'r') as f:
cnt = 0
for line in f:
line = line.rstrip()
self.word_dict[line] = cnt
cnt += 1
def process_file(self, file_name:str):
setences_list = []
with open(file_name, 'r', encoding='Windows-1252') as f:
for line in f:
text = line.rstrip().split()
setences_list.append(text)
return setences_list
def process_data(self, file_name_pos, file_name_neg):
setences_list_pos = self.process_file(file_name_pos)
setences_list_neg = self.process_file(file_name_neg)
# 添加标签
setences_list = setences_list_pos + setences_list_neg
labels = [1 for i in range(len(setences_list_pos))] + [0 for i in range(len(setences_list_neg))]
# 制作数据集
X_train, X_test, y_train, y_test = train_test_split(setences_list, labels, test_size=0.3, shuffle=True, random_state=0, stratify=labels)
return X_train, X_test, y_train, y_test
def get_data(self):
# 提供给训练文件获取分割好的数据集
file_name_pos = './data/rt-polaritydata/pos.txt'
file_name_neg = './data/rt-polaritydata/neg.txt'
X_train, X_test, y_train, y_test = self.process_data(file_name_pos, file_name_neg)
return X_train, X_test, y_train, y_test
def get_data_loader(self):
X_train, X_test, y_train, y_test = self.get_data()
# 中间应该还增加对文本的编码
train_text_ids = [[self.word_dict[word] for word in item] for item in X_train]
test_text_ids = [[self.word_dict[word] for word in item] for item in X_test]
return train_text_ids, test_text_ids, y_train, y_test
def get_batch(self, x, y):
assert len(x) == len(y) , "error shape!"
n_batches = int(len(x) / config.batch_size) # 统计共几个完整的batch
for i in range(n_batches - 1):
x_batch = x[i*config.batch_size: (i + 1)*config.batch_size]
y_batch = y[i*config.batch_size: (i + 1)*config.batch_size]
lengths = [len(seq) for seq in x_batch]
max_length = max(lengths)
for i in range(len(x_batch)):
x_batch[i] = x_batch[i] + [0 for j in range(max_length-len(x_batch[i]))]
yield x_batch, y_batch
if __name__ == '__main__':
data_processor = ModelDataProcessor()
X_train, X_test, y_train, y_test = data_processor.get_data_loader()
for x_batch, y_batch in data_processor.get_batch(X_train, y_train):
x_batch = Tensor(x_batch, mindspore.int32)
y_batch = Tensor(y_batch, mindspore.int32)
print(x_batch)
print(y_batch)
|
Xie-Minghui/DPCNN_MS0
|
src/data_loader.py
|
data_loader.py
|
py
| 3,040 |
python
|
en
|
code
| 1 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.