metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jishnub/HelioseismicKernels.jl",
"score": 3
} |
#### File: HelioseismicKernels.jl/src/click_and_generate_filter.py
```python
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os,sys
from astropy.io import fits
import warnings
class Point():
def __init__(self,Line2D):
self.x = Line2D.get_xdata()[0]
self.y = Line2D.get_ydata()[0]
self.artist = Line2D
def remove(self):
self.artist.remove()
self.artist=None
'''
Shift + click to create points,
press c to finalize the curve,
press d to delete the curve
'''
class Poly():
def __init__(self):
self.points=[]
self.fitted_curve=None
def fit(self,order=4):
#~ If the number of points are not sufficient, remove fitted polynomial if it exists
if len(self.points)<order+1:
if self.fitted_curve is not None: self.fitted_curve.remove()
self.fitted_curve=None
return (None,None)
#~ If there are sifficient points, fit the points with a polynomial function
xcoords = [pt.x for pt in self.points]
ycoords = [pt.y for pt in self.points]
pfit = np.polyfit(xcoords,ycoords,order)
#~ Generate points on a fine grid along the fitted polynomial
#~ This is to plot a continuous line
fit_t = np.polyval(pfit,phi)
#~ Update fitted curve
if self.fitted_curve is not None: self.fitted_curve.remove()
self.fitted_curve,=plt.plot(phi,fit_t,'g')
plt.ylim(0,t_max)
#~ String to format fitted polynomial like p_2*k**2 + p_1*k + p_0
fmtstr=" + ".join(["{}*phi^"+str(i) if i>1 else "{}*phi" if i==1 else "{}" for i in range(len(pfit))[::-1]])
polystr=fmtstr.format(*pfit).replace("+ -","- ")
fitstr=[]
for index,p_i in enumerate(pfit[::-1]):
fitstr.append("index("+str(index)+") = "+str(p_i))
fitstr="\n".join(fitstr)
return polystr,fitstr
def remove_match(self,artist):
''' Find and remove the point that was clicked on '''
for point in self.points:
if point.artist == artist:
point.remove()
self.points.remove(point)
break
def clear(self):
''' Refresh the working slate by deleting plots and lines.
Points and lines already finalized are untouched. '''
for point in self.points:
point.remove()
if self.fitted_curve is not None:
self.fitted_curve.remove()
self.fitted_curve=None
self.points=[]
class Track_interactions():
def __init__(self,figure):
self.button_press_event=figure.canvas.mpl_connect('button_press_event', self.onclick)
self.key_press_event=figure.canvas.mpl_connect('key_press_event', self.onpress)
self.key_release_event=figure.canvas.mpl_connect('key_release_event', self.onrelease)
self.pick_event=figure.canvas.mpl_connect('pick_event', self.onpick)
self.shift_pressed=False
self.poly=Poly()
def onpick(self,event):
''' Remove a point when it is clicked on '''
self.poly.remove_match(event.artist)
self.poly.fit()
plt.draw()
def onclick(self,event):
''' Add a point at the (x,y) coordinates of click '''
if event.button == 1 and self.shift_pressed:
pt_artist,=plt.plot([event.xdata],[event.ydata],marker='o',color='b',linestyle='none',picker=5)
self.poly.points.append(Point(pt_artist))
self.poly.fit()
plt.draw()
def onpress(self,event):
if event.key == 'c':
polystr,fitstr = self.poly.fit()
if polystr is None: return
print(polystr,"\n",fitstr,"\n")
self.poly.fitted_curve.set_color("black")
plt.draw()
self.poly=Poly()
elif event.key=="d":
self.poly.clear()
elif event.key=="shift":
self.shift_pressed = True
plt.draw()
def onrelease(self,event):
if event.key=='shift':
self.shift_pressed = False
filename="/home/jishnu/kernels_scratch/kernels/C_phi_t.fits"
data=np.squeeze(fits.open(filename)[0].data)
nt,nphi=data.shape
dnu = 2e-6; T=1/dnu; dt=T/nt;
phi=np.linspace(0,2*np.pi,nphi)
t=np.arange(nt)*dt
t_max = 2*3600
t = t[t<=t_max]
phi_max = np.pi
phi = phi[phi<=phi_max]
data = data[np.ix_(t<=t_max,phi<=phi_max)]
#########################################################################################
figure=plt.figure()
plt.pcolormesh(phi,t,data/abs(data).max(),cmap='Greys',vmax=0.1, vmin=-0.1)
_=Track_interactions(figure)
plt.show()
``` |
{
"source": "jishnuchander/ibmr",
"score": 2
} |
#### File: ibmr/spot_auth/views.py
```python
from django.shortcuts import render, redirect, render_to_response
from django.http import HttpResponse
import sys
import json
import spotipy
import spotipy.util as util
import requests
import pusher
import time
from .tasks import pusher_task, lyrics, song_suggestions
import random
import string
from SentimentAnalysis import Sentiment_Analysis as sa
from azlyrics.azlyrics import lyrics
# Create your views here.
client_id = "client_id"
client_secret = "client_secret"
lyrics_api_key = "lyrics_api_key"
pusher_client = pusher.Pusher(
app_id='app_id',
key='key',
secret='secret',
cluster='cluster',
ssl=True
)
def index(request):
return render(request, 'spot-auth/main.html')
def mob_view(request):
return render(request, 'spot-auth/mob_home.html')
def upload(request):
return render(request, 'spot-auth/upload.html')
def web_view(request):
return render(request, 'spot-auth/auth.html', {'image': False})
def show_tracks(tracks):
for i, item in enumerate(tracks['items']):
track = item['track']
print(" %d %32.32s %s" % (i, track['artists'][0]['name'],
track['name']))
def callback(request):
code = request.GET.get("code")
url = "https://accounts.spotify.com/api/token"
grant_type = "authorization_code"
redirect_uri = "http://127.0.0.1:8000/callback/"
client = {'client_id': client_id, 'client_secret': client_secret,
'grant_type': grant_type, 'redirect_uri': redirect_uri, 'code': code}
res = requests.post(url, data=client)
r_json = res.json()
access_token = r_json['access_token']
token_type = r_json['token_type']
scope = r_json['scope']
expires_in = r_json['expires_in']
refresh_token = r_json['refresh_token']
if access_token:
sp = spotipy.Spotify(auth=access_token)
user = sp.current_user()
user_name = user['display_name']
user_profile = user['external_urls']['spotify']
user_id = user['id']
img_uri = user['images'][0]['url']
playlist = sp.playlist('2YRe7HRKNRvXdJBp9nXFza', fields="tracks,next")
tracks = playlist['tracks']
lyrics.delay(user_name, user_id, img_uri, tracks, lyrics_api_key)
context = {'image': True, 'uri': img_uri, 'user_id': user_id}
return render(request, 'spot-auth/callback.html', context)
def auth(request):
# scope = "user-top-read"
scope = "user-library-read playlist-read-private user-read-currently-playing user-read-recently-played user-follow-read user-top-read user-read-email"
url = "https://accounts.spotify.com/authorize"
redirect_uri = "http://127.0.0.1:8000/callback/"
params = {'client_id': client_id, 'scope': scope, 'redirect_uri': redirect_uri}
return redirect(f"https://accounts.spotify.com/authorize?client_id={client_id}&redirect_uri={redirect_uri}&scope={scope}&response_type=code&state=1234abc&show_dialog=false")
def guest(request):
context = {'image': True, 'uri': 'guest', 'user_id': 'guest'}
return render(request, 'spot-auth/callback.html', context)
def vision_api(request):
pusher_channel = randomword(15)
pusher_event = randomword(15)
pusher_key = '<KEY>'
context = {'pusher_channel': pusher_channel,
'pusher_event': pusher_event, 'pusher_key': pusher_key}
import io
import chardet
import base64
from PIL import Image, ImageDraw
file_name = request.FILES['myfile'].file
user_id = request.POST.get('userid')
login = request.POST.get('login')
count = request.POST.get('upload_count')
try:
file = base64.b64encode(file_name.getvalue()).decode()
except:
file_name.seek(0)
data = file_name.read()
file_raw = io.BytesIO(data)
image = Image.open(file_raw)
a, b = image.size
print(a, b, "this is original size")
r = int(((1000000*b)/a)**(0.5))
c = int(1000000/r)
print(c, r, "this is converted")
image = image.resize((c, r), Image.ANTIALIAS)
output = io.BytesIO()
image.save('resized_image.jpg', quality=95)
with open("resized_image.jpg", "rb") as image_file:
file = base64.b64encode(image_file.read()).decode()
print(image.size)
song_suggestions.delay(file, pusher_channel, pusher_event, user_id, login, count)
return render(request, 'spot-auth/results.html', context)
def results(request):
return render(request, 'spot-auth/results.html')
def task(uid):
pusher_client.trigger(uid, 'my-event', {'message': 'hello world 0'})
time.sleep(2)
pusher_client.trigger(uid, 'my-event', {'message': 'hello world 2'})
time.sleep(2)
pusher_client.trigger(uid, 'my-event', {'message': 'hello world 4'})
time.sleep(2)
pusher_client.trigger(uid, 'my-event', {'message': 'hello world 6'})
print('6')
return
def randomword(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def pi(request):
pusher_channel = randomword(15)
pusher_event = randomword(15)
pusher_key = '<KEY>'
pusher_task.delay(pusher_channel, pusher_event)
context = {'pusher_channel': pusher_channel,
'pusher_event': pusher_event, 'pusher_key': pusher_key}
return render(request, 'spot-auth/pusher.html', context)
def test(request):
return render(request, 'spot-auth/main.html')
``` |
{
"source": "jishnuchander/NMT-Chatbot",
"score": 3
} |
#### File: jishnuchander/NMT-Chatbot/data_format.py
```python
import sqlite3
import json
from datetime import datetime
timeframe = '2011-08'
sql_transaction = []
connection = sqlite3.connect('{}.db'.format(timeframe))
c = connection.cursor()
def create_table():
c.execute("""CREATE TABLE IF NOT EXISTS parent_reply
(parent_id TEXT PRIMARY KEY, comment_id TEXT UNIQUE,
parent TEXT, comment TEXT, subreddit TEXT, unix TEXT,
score INT)""")
def format_data(data):
data = data.replace("\n"," newlinechar ").replace("\r"," newlinechar ").replace('"',"'")
return data
def transaction_bldr(sql):
global sql_transaction
sql_transaction.append(sql)
if len(sql_transaction) > 1000:
c.execute('BEGIN TRANSACTION')
for s in sql_transaction:
try:
c.execute(s)
except:
pass
connection.commit()
sql_transaction = []
def find_parent(pid):
sql = "SELECT comment FROM parent_reply WHERE comment_id = '{}' LIMIT 1".format(pid)
c.execute(sql)
result = c.fetchone()
if result !=None:
return result[0]
else:
return False
#except Exception as e:
# return False
def find_existing_score(pid):
sql = "SELECT score FROM parent_reply WHERE parent_id = '{}' LIMIT 1".format(pid)
c.execute(sql)
result = c.fetchone()
if result !=None:
return result[0]
else:
return False
def acceptable(data):
if len(data.split(' ')) > 50 or len(data) < 1:
return False
elif len(data) > 1000:
return False
elif data == '[deleted]':
return False
elif data == '[removed]':
return False
else:
return True
def sql_insert_replace_comment(commentid,parentid,parent,comment,subreddit,time,score):
try:
sql = """UPDATE parent_reply SET parent_id = ?, comment_id = ?, parent = ?, comment = ?, subreddit = ?, unix = ?, score = ? WHERE parent_id =?;""".format(parentid, commentid, parent, comment, subreddit, int(time), score, parentid)
transaction_bldr(sql)
except Exception as e:
print('s0 insertion',str(e))
def sql_insert_has_parent(commentid,parentid,parent,comment,subreddit,time,score):
try:
sql = """INSERT INTO parent_reply (parent_id, comment_id, parent, comment, subreddit, unix, score) VALUES ("{}","{}","{}","{}","{}",{},{});""".format(parentid, commentid, parent, comment, subreddit, int(time), score)
transaction_bldr(sql)
except Exception as e:
print('s0 insertion',str(e))
def sql_insert_no_parent(commentid,parentid,comment,subreddit,time,score):
try:
sql = """INSERT INTO parent_reply (parent_id, comment_id, comment, subreddit, unix, score) VALUES ("{}","{}","{}","{}",{},{});""".format(parentid, commentid, comment, subreddit, int(time), score)
transaction_bldr(sql)
except Exception as e:
print('s0 insertion',str(e))
if __name__=="__main__":
create_table()
row_counter = 0
paired_rows = 0
with open("C:/Users/Jishnu/.spyder-py3/RC_2011-08".format(timeframe.split('-')[0],timeframe),buffering=1000) as f:
for row in f:
#print(row)
row_counter+=1
row = json.loads(row)
parent_id = row['parent_id']
body = format_data(row['body'])
created_utc = row['created_utc']
score = row['score']
comment_id = row['name']
subreddit = row['subreddit']
parent_data = find_parent(parent_id)
if score >=3:
if acceptable(body):
existing_comment_score = find_existing_score(parent_id)
if existing_comment_score:
if score > existing_comment_score:
sql_insert_replace_comment(comment_id,parent_id,parent_data,body,subreddit,created_utc, score)
else:
if acceptable(body):
if parent_data:
sql_insert_has_parent(comment_id,parent_id,parent_data,body,subreddit,created_utc,score)
paired_rows += 1
else:
sql_insert_no_parent(comment_id,parent_id,body,subreddit,created_utc,score)
if row_counter % 100000 == 0:
print('Total Rows Read: {}, Paired Rows: {}, Time: {}'.format(row_counter, paired_rows, str(datetime.now())))
``` |
{
"source": "jishnujayakumar/meta-dataset",
"score": 2
} |
#### File: meta_dataset/data/dump_episodes.py
```python
r"""Dumps Meta-Dataset episodes to disk as tfrecords files.
Episodes are stored as a pair of `{episode_number}-train.tfrecords` and
`{episode_number}-test.tfrecords` files, each of which contains serialized
TFExample strings for the support and query set, respectively.
python -m meta_dataset.data.dump_episodes \
--gin_config=meta_dataset/learn/gin/setups/\
data_config_string.gin --gin_config=meta_dataset/learn/gin/\
setups/variable_way_and_shot.gin \
--gin_bindings="DataConfig.num_prefetch=<num_prefetch>"
"""
import json
import os
from absl import app
from absl import flags
from absl import logging
import gin
from meta_dataset.data import config
from meta_dataset.data import dataset_spec as dataset_spec_lib
from meta_dataset.data import learning_spec
from meta_dataset.data import pipeline
from meta_dataset.data import utils
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
flags.DEFINE_multi_string('gin_config', None,
'List of paths to the config files.')
flags.DEFINE_multi_string('gin_bindings', None,
'List of Gin parameter bindings.')
flags.DEFINE_string('output_dir', '/tmp/cached_episodes/',
'Root directory for saving episodes.')
flags.DEFINE_integer('num_episodes', 600, 'Number of episodes to sample.')
flags.DEFINE_string('dataset_name', 'omniglot', 'Dataset name to create '
'episodes from.')
flags.DEFINE_enum_class('split', learning_spec.Split.TEST, learning_spec.Split,
'See learning_spec.Split for '
'allowed values.')
flags.DEFINE_boolean(
'ignore_dag_ontology', False, 'If True the dag ontology'
' for Imagenet dataset is not used.')
flags.DEFINE_boolean(
'ignore_bilevel_ontology', False, 'If True the bilevel'
' sampling for Omniglot dataset is not used.')
tf.flags.DEFINE_string('records_root_dir', '',
'Root directory containing a subdirectory per dataset.')
FLAGS = flags.FLAGS
def main(unused_argv):
logging.info(FLAGS.output_dir)
tf.io.gfile.makedirs(FLAGS.output_dir)
gin.parse_config_files_and_bindings(
FLAGS.gin_config, FLAGS.gin_bindings, finalize_config=True)
dataset_spec = dataset_spec_lib.load_dataset_spec(
os.path.join(FLAGS.records_root_dir, FLAGS.dataset_name))
data_config = config.DataConfig()
episode_descr_config = config.EpisodeDescriptionConfig()
use_dag_ontology = (
FLAGS.dataset_name in ('ilsvrc_2012', 'ilsvrc_2012_v2') and
not FLAGS.ignore_dag_ontology)
use_bilevel_ontology = (
FLAGS.dataset_name == 'omniglot' and not FLAGS.ignore_bilevel_ontology)
data_pipeline = pipeline.make_one_source_episode_pipeline(
dataset_spec,
use_dag_ontology=use_dag_ontology,
use_bilevel_ontology=use_bilevel_ontology,
split=FLAGS.split,
episode_descr_config=episode_descr_config,
# TODO(evcu) Maybe set the following to 0 to prevent shuffling and check
# reproducibility of dumping.
shuffle_buffer_size=data_config.shuffle_buffer_size,
read_buffer_size_bytes=data_config.read_buffer_size_bytes,
num_prefetch=data_config.num_prefetch)
dataset = data_pipeline.take(FLAGS.num_episodes)
images_per_class_dict = {}
# Ignoring dataset number since we are loading one dataset.
for episode_number, (episode, _) in enumerate(dataset):
logging.info('Dumping episode %d', episode_number)
train_imgs, train_labels, _, test_imgs, test_labels, _ = episode
path_train = utils.get_file_path(FLAGS.output_dir, episode_number, 'train')
path_test = utils.get_file_path(FLAGS.output_dir, episode_number, 'test')
utils.dump_as_tfrecord(path_train, train_imgs, train_labels)
utils.dump_as_tfrecord(path_test, test_imgs, test_labels)
images_per_class_dict[os.path.basename(path_train)] = (
utils.get_label_counts(train_labels))
images_per_class_dict[os.path.basename(path_test)] = (
utils.get_label_counts(test_labels))
info_path = utils.get_info_path(FLAGS.output_dir)
with tf.io.gfile.GFile(info_path, 'w') as f:
f.write(json.dumps(images_per_class_dict, indent=2))
if __name__ == '__main__':
app.run(main)
``` |
{
"source": "jishnujayakumar/MLRC2020-EmbedKGQA",
"score": 2
} |
#### File: KGQA/RoBERTa/dataloader.py
```python
import torch
import random
from torch.utils.data import Dataset, DataLoader
from collections import defaultdict
import os
import unicodedata
import re
import time
from collections import defaultdict
from tqdm import tqdm
import numpy as np
from transformers import *
from helpers import *
class DatasetWebQSP(Dataset):
def __init__(self, data, entities, entity2idx, transformer_name, kg_model):
self.data = data
self.entities = entities
self.entity2idx = entity2idx
self.pos_dict = defaultdict(list)
self.neg_dict = defaultdict(list)
self.index_array = list(self.entities.keys())
self.transformer_name = transformer_name
self.pre_trained_model_name = get_pretrained_model_name(transformer_name)
self.tokenizer = None
self.set_tokenizer()
self.max_length = 64
self.kg_model = kg_model
def set_tokenizer(self):
if self.transformer_name == 'RoBERTa':
self.tokenizer = RobertaTokenizer.from_pretrained(self.pre_trained_model_name)
elif self.transformer_name == 'XLNet':
self.tokenizer = XLNetTokenizer.from_pretrained(self.pre_trained_model_name)
elif self.transformer_name == 'ALBERT':
self.tokenizer = AlbertTokenizer.from_pretrained(self.pre_trained_model_name)
elif self.transformer_name == 'SentenceTransformer':
self.tokenizer = AutoTokenizer.from_pretrained(self.pre_trained_model_name)
elif self.transformer_name == 'Longformer':
self.tokenizer = LongformerTokenizer.from_pretrained(self.pre_trained_model_name)
else:
print('Incorrect transformer specified:', self.transformer_name)
exit(0)
def __len__(self):
return len(self.data)
def pad_sequence(self, arr, max_len=128):
num_to_add = max_len - len(arr)
for _ in range(num_to_add):
arr.append('<pad>')
return arr
def toOneHot(self, indices):
indices = torch.LongTensor(indices)
batch_size = len(indices)
vec_len = len(self.entity2idx)
one_hot = torch.FloatTensor(vec_len)
one_hot.zero_()
# one_hot = -torch.ones(vec_len, dtype=torch.float32)
one_hot.scatter_(0, indices, 1)
return one_hot
def __getitem__(self, index):
data_point = self.data[index]
question_text = data_point[1]
question_tokenized, attention_mask = self.tokenize_question(question_text)
head_id = self.entity2idx[data_point[0].strip()]
tail_ids = []
for tail_name in data_point[2]:
tail_name = tail_name.strip()
#TODO: dunno if this is right way of doing things
if tail_name in self.entity2idx:
tail_ids.append(self.entity2idx[tail_name])
tail_onehot = self.toOneHot(tail_ids)
return question_tokenized, attention_mask, head_id, tail_onehot
def tokenize_question(self, question):
if self.transformer_name != "SentenceTransformer":
question = f"<s>{question}</s>"
question_tokenized = self.tokenizer.tokenize(question)
question_tokenized = self.pad_sequence(question_tokenized, self.max_length)
question_tokenized = torch.tensor(self.tokenizer.encode(
question_tokenized, # Question to encode
add_special_tokens = False # Add '[CLS]' and '[SEP]', as per original paper
))
attention_mask = []
for q in question_tokenized:
# 1 means padding token
if q == 1:
attention_mask.append(0)
else:
attention_mask.append(1)
return question_tokenized, torch.tensor(attention_mask, dtype=torch.long)
else:
encoded_que = self.tokenizer.encode_plus(question, padding='max_length', max_length=self.max_length, return_tensors='pt')
return encoded_que['input_ids'][0], encoded_que['attention_mask'][0]
# def _collate_fn(batch):
# print(len(batch))
# exit(0)
# question_tokenized = batch[0]
# attention_mask = batch[1]
# head_id = batch[2]
# tail_onehot = batch[3]
# question_tokenized = torch.stack(question_tokenized, dim=0)
# attention_mask = torch.stack(attention_mask, dim=0)
# return question_tokenized, attention_mask, head_id, tail_onehot
class DataLoaderWebQSP(DataLoader):
def __init__(self, *args, **kwargs):
super(DataLoaderWebQSP, self).__init__(*args, **kwargs)
# self.collate_fn = _collate_fn
``` |
{
"source": "jishnujayakumar/specter",
"score": 2
} |
#### File: specter/smash-bert/shb_train.py
```python
"""## Imports"""
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import spacy
from collections import defaultdict
import sys
from torch.optim import AdamW
import torch.nn.utils.rnn as U
nlp = spacy.load("en_core_web_sm")
import json
from transformers import DistilBertTokenizer, DistilBertModel
import os
bert_variant = os.environ['SHB_BERT_VARIANT']
bert_tokenizer = DistilBertTokenizer.from_pretrained(bert_variant)
NUMPATH = f"{os.environ['ELECTER_HULK_DIR']}/{os.environ['LEGAL_DATA_DIR']}/SHB/{os.environ['SHB_BERT_VARIANT']}"
os.system(f"mkdir -p {NUMPATH}")
filen = f"{NUMPATH}/pre-processed-data.json"
SAVEPATH = f"{NUMPATH}/output/"
"""## Model Hyperparameters"""
EMBEDDING_DIM = 768
BATCH_SIZE = 1
LR = 1e-5
L2REG = 0.00001
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
NUM_EPOCHS = 30
DEV_SPLIT = 0.2
"""# Setup for experiments
### Load numericalized data
"""
ct = 0
with open(filen) as fr:
traindev_num = json.load(fr)
#traindev_num = traindev_num[:10]
#with open(NUMPATH + "test.json") as fr:
# test_num = json.load(fr)
"""## Model"""
class BiLSTMAttn(nn.Module):
def __init__(self, embedding_dim, dropout = 0.1):
super().__init__()
self.embedding_dim = embedding_dim
self.rnn = nn.GRU(embedding_dim, embedding_dim // 2, bidirectional = True, batch_first = True)
self.fc = nn.Linear(embedding_dim, embedding_dim)
self.context = nn.Parameter(torch.rand(embedding_dim, ))
self.dropout = nn.Dropout(dropout)
def forward(self, flat_input, input_lens): # flat_input: [S',E'], input_lens: [B']
batch_size = input_lens.shape[0]
assert flat_input.shape[0] == torch.sum(input_lens), "Mismatch of sequences"
# pack sequence
packed_input = U.pack_sequence(torch.split(flat_input, input_lens.tolist()), enforce_sorted = False)
# pass sequence through RNN
packed_output, _ = self.rnn(packed_input) # [S',E'] --> [S',E']
output = U.pad_packed_sequence(packed_output, batch_first = True)[0] # [S',E'] --> [B',s',E']
activated_output = self.dropout(torch.tanh(self.fc(output))) # [B',s',E'] --> [B',s',E']
# calculate attention probabilities
logits = torch.bmm(activated_output, self.context.expand(batch_size, self.embedding_dim).unsqueeze(2)) # [B',s',E'], [B',E',1] --> [B',s',1]
# [B',s']
input_mask = (torch.arange(input_lens.max(), device=input_lens.device).expand(batch_size, input_lens.max()) < input_lens.unsqueeze(1)).float()
logits[input_mask == 0] = -9999
probs = F.softmax(logits, dim = 1)
#print(probs.shape, output.shape)
# calculate final representation
weighted_output = torch.sum(probs * output, dim = 1).squeeze(1) # [B',s',1], [B',s',E'] --> [B',E']
return weighted_output
class DummyLayer(nn.Module):
def __init__(self):
super().__init__()
self.dummy = nn.Parameter(torch.ones(1, dtype = torch.float))
def forward(self,x):
return x + self.dummy - self.dummy
class MashRNN(nn.Module):
def __init__(self, E, device = DEVICE):
super().__init__()
self.embedding_dim = E
self.device = device
# word-level encoder
#self.word_encoder = BiLSTMAttn(self.embedding_dim)
# sentence-level encoder
self.sent_encoder = BiLSTMAttn(self.embedding_dim)
# paragraph-level encoder
self.para_encoder = BiLSTMAttn(self.embedding_dim)
def _seg_to_mask(self, segments):
return (torch.arange(segments.max(), device=segments.device).expand(segments.shape[0], segments.max()) < segments.unsqueeze(1)).float()
# text: [B,w,E], segments: dict(seg_type, [S'])
def forward(self, embedded_text, segments):
batch_size = embedded_text.shape[0]
# convert to packed tensor
# [B,w]
sents_per_doc_mask = self._seg_to_mask(segments['sents_per_doc'])
# [B,w,E] --> [W,E]
#print(embedded_text.shape, segments['sents_per_doc'] )
sentembs = embedded_text.view(-1, self.embedding_dim)[sents_per_doc_mask.view(-1) > 0]
# word-level encoder
# wl_docembs = self.word_encoder(wordembs, segments['words_per_doc']) # [W,E] --> [B,E]
# sentence-level encoder
# sl_sentembs = self.word_encoder(wordembs, segments['words_per_sent']) # [W,E] --> [S,E]
# sl_docembs = self.sent_encoder(sl_sentembs, segments['sents_per_doc']) # [S,E] --> [B,E]
# smash rnn paragraph-level encoder
#pl_sentembs = self.word_encoder(wordembs, segments['words_per_sent']) # [W,E] --> [S,E]
pl_paraembs = self.sent_encoder(sentembs, segments['sents_per_para']) # [S,E] --> [P,E]
pl_docembs = self.para_encoder(pl_paraembs, segments['paras_per_doc']) # [P,E] --> [B,E]
document_embeddings = torch.cat([pl_docembs], dim = 1) # 2 * [B,E] --> [B,2E]
return document_embeddings
class SmashRNN(nn.Module):
def __init__(self, E, device = DEVICE, dropout = 0.1):
super().__init__()
#self.vocab_size = V
self.embedding_dim = E
self.device = device
#Smash RNN
#self.embedder = nn.Embedding(self.vocab_size, self.embedding_dim, padding_idx = pad_idx)
#Smash BERT
self.embedder = DistilBertModel.from_pretrained(bert_variant)
# freeze the bottom 3 layers, only top 2 layers will be finetuned
for name, param in self.embedder.named_parameters():
if "transformer" in name:
layernum = int(name.split('.')[2])
param.requires_grad = False if layernum < 5 else True
self.dummy_layer = DummyLayer()
self.src_encoder = MashRNN(self.embedding_dim, device)
self.trg_encoder = MashRNN(self.embedding_dim, device)
self.fc = nn.Linear(2 * self.embedding_dim, self.embedding_dim)
self.classifier = nn.Linear(self.embedding_dim, 1)
self.dropout = nn.Dropout(dropout)
def forward(self, src_text, src_segs, trg_text, trg_segs):
#Smash RNN
#src_wordemb = self.embedder(src_text) # [B,ws] --> [B,ws,E]
#trg_wordemb = self.embedder(trg_text) # [B,wt] --> [B,wt,E]
#src_wordemb = self.embedder(src_text)[0][:, 0, :]
#trg_wordemb = self.embedder(trg_text)[0][:, 0, :]
src_sents = src_text # [S,SL]
trg_sents = trg_text
mask_src = (src_sents > 0).float().cuda()
mask_trg = (trg_sents > 0).float().cuda()
#print(src_sents.shape, mask_src)
src_sentemb = self.embedder(src_sents, self.dummy_layer(mask_src))[0][:, 0, :] # [S,E]
#print(src_sentemb.shape)
trg_sentemb = self.embedder(trg_sents, self.dummy_layer(mask_trg))[0][:, 0, :] # [S,E]
#docs = U.pad_sequence(torch.split(sents, batch['doc_lens'].tolist()), batch_first=True) # [D,DL,E]
src_docemb = self.src_encoder(src_sentemb, src_segs) # [B,ws,E] --> [B,3E]
trg_docemb = self.trg_encoder(trg_sentemb, trg_segs) # [B,ws,E] --> [B,3E]
common_embedding = torch.cat([src_docemb, trg_docemb], dim = 1) # 2 * [B,3E] --> [B,6E]
activations = self.dropout(F.relu(self.fc(common_embedding))) # [B,6E] --> [B,E]
logits = self.dropout(self.classifier(activations)).squeeze(1) # [B,E] --> [B,1] --> [B]
predictions = (torch.sigmoid(logits) > 0.5).int()
return logits, predictions
TRAINDEV_LEN = len(traindev_num)
DEV_LEN = int(DEV_SPLIT * TRAINDEV_LEN)
TRAIN_LEN = TRAINDEV_LEN - DEV_LEN
train_num = traindev_num[:TRAIN_LEN]
dev_num = traindev_num[TRAIN_LEN:]
"""### Dataset Statistics"""
print("\tDataset Statistics")
print("Train Dataset size:", TRAIN_LEN)
print("Dev Dataset size:", DEV_LEN)
"""## Batching
Shuffle data randomly, convert numericalized batches into padded tensors and split into batches
"""
import random
def batchify(dataset, batch_size = BATCH_SIZE, device = DEVICE):
#shuffled_dataset = random.sample(dataset, len(dataset))
index = 0
while index < len(dataset):
sliced = dataset[index : min(index + batch_size, len(dataset))]
batch = {'src_text': [], 'trg_text': [], 'src_segs': defaultdict(list), 'trg_segs': defaultdict(list), 'labels': []}
for instance in sliced:
for i in range(instance['src_segs']['sents_per_doc'][0]):
batch['src_text'].append(torch.tensor(instance['src_text'][i], dtype = torch.long))
for j in range(instance['trg_segs']['sents_per_doc'][0]):
batch['trg_text'].append(torch.tensor(instance['trg_text'][j], dtype = torch.long))
for segname, segbound in instance['src_segs'].items():
batch['src_segs'][segname].extend(segbound)
for segname, segbound in instance['trg_segs'].items():
batch['trg_segs'][segname].extend(segbound)
batch['labels'].append(instance['label'])
batch['src_text'] = U.pad_sequence(batch['src_text'], batch_first = True).to(device)
batch['trg_text'] = U.pad_sequence(batch['trg_text'], batch_first = True).to(device)
for segname, segbound in batch['src_segs'].items():
batch['src_segs'][segname] = torch.tensor(segbound, dtype = torch.int).to(device)
for segname, segbound in batch['trg_segs'].items():
batch['trg_segs'][segname] = torch.tensor(segbound, dtype = torch.int).to(device)
batch['labels'] = torch.tensor(batch['labels'], dtype = torch.float).to(device)
# print(batch['labels'])
yield batch
index += len(batch['labels'])
"""## Define model and optimizer"""
model = SmashRNN(EMBEDDING_DIM, device = DEVICE).to(DEVICE)
optimizer = AdamW(model.parameters(), lr = LR, weight_decay = L2REG)
"""## Evaluate metrics
Calculate Precision, Recall and F1-Score
"""
from sklearn.metrics import precision_score, recall_score, f1_score
def eval_metrics(pred, gold):
metrics = {}
metrics['precision'] = precision_score(gold, pred)
metrics['recall'] = recall_score(gold, pred)
metrics['f1'] = f1_score(gold, pred)
return metrics
"""## Training Loop
Run one complete epoch in either train or eval mode. Perform forward (and backward) passes, collect losses and predictions, and evaluate metrics.
"""
import math
def traindev_loop(dataset, batch_size = BATCH_SIZE, train = False):
num_batches = math.ceil(len(dataset) / batch_size)
if train:
model.train()
else:
model.eval()
all_pred, all_gold = [], []
total_loss = 0
skipped = 0
# print(num_batches)
with tqdm(total = num_batches, file = sys.stdout) as pbar:
if train:
pbar.set_description("Train:")
else:
pbar.set_description("Dev:")
for i, batch in enumerate(batchify(dataset, batch_size=batch_size)):
# print("%5d"%i, end='\r')
if DEVICE == 'cuda':
torch.cuda.empty_cache()
try:
logits, pred = model(batch['src_text'], batch['src_segs'], batch['trg_text'], batch['trg_segs'])
gold = batch['labels']
# print(logits.dtype, gold.dtype)
loss = F.binary_cross_entropy_with_logits(logits, gold)
if train:
optimizer.zero_grad()
loss.backward()
optimizer.step()
all_pred.extend(pred.tolist())
all_gold.extend(gold.tolist())
total_loss += loss.item()
except RuntimeError:
skipped += 1
continue
finally:
pass
pbar.update(1)
metrics = eval_metrics(all_pred, all_gold)
mean_loss = total_loss / num_batches
return mean_loss, metrics, skipped
"""## Training and Development Phase
### Load saved model
"""
#try:
#model.load_state_dict(torch.load(SAVEPATH + "checkpoint.pt", map_location = DEVICE))
#except FileNotFoundError:
# pass
"""### Load best metrics"""
#try:
# with open(SAVEPATH + "dev_metrics.json") as fr:
# bestmetrics = json.load(fr)
#except FileNotFoundError:
# bestmetrics = {'precision': 0, 'recall': 0, 'f1': 0}
#for metric, value in bestmetrics.items():
# print(metric, ':', value)
"""### Run epochs"""
bestmetrics = {'f1':0, 'precision' : 0, 'recall' : 0}
for epoch in range(NUM_EPOCHS):
trainloss, trainmetrics, trainskipped = traindev_loop(train_num, batch_size = BATCH_SIZE, train = True)
devloss, devmetrics, devskipped = traindev_loop(dev_num, batch_size = BATCH_SIZE, train = False)
print("Epoch: %4d Tr-Loss: %8.3f Tr-F1: %5.4f Tr-Sk: %5d D-Loss: %8.3f D-F1: %5.4f D-Sk: %5d" % (epoch + 1, trainloss, trainmetrics['f1'], trainskipped, devloss, devmetrics['f1'], devskipped))
if devmetrics['f1'] > bestmetrics['f1']:
bestmetrics = devmetrics
torch.save(model.state_dict(), SAVEPATH + "best_model.pt")
with open(SAVEPATH + "dev_metrics.json", 'w') as fw:
json.dump(bestmetrics, fw)
torch.save(model.state_dict(), SAVEPATH + "checkpoint.pt")
```
#### File: OtherAlgos/DSDR/dsdr.py
```python
import numpy as np
class DSDR:
"""<NAME>, et al. Document Summarization based onData Reconstruction (2012)
http://www.aaai.org/ocs/index.php/AAAI/AAAI12/paper/viewPaper/4991
"""
@staticmethod
def lin(V, m, lamb):
'''DSDR with linear reconstruction
Parameters
==========
- V : 2d array_like, the candidate data set
- m : int, the number of sentences to be selected
- lamb : float, the trade off parameter
Returns
=======
- L : list, the set of m summary sentences indices
'''
L = []
B = np.dot(V, V.T) / lamb
n = len(V)
for t in range(m):
scores = []
for i in range(n):
score = np.sum(B[:,i] ** 2) / (1. + B[i,i])
scores += [(score, i)]
max_score, max_i = max(scores)
L += [max_i]
B = B - np.outer(B[:,max_i], B[:,max_i]) / (1. + B[max_i,max_i])
return L
@staticmethod
def non(V, gamma, eps=1.e-8):
'''DSDR with nonnegative linear reconstruction
Parameters
==========
- V : 2d array_like, the candidate sentence set
- gamma : float, > 0, the trade off parameter
- eps : float, for converge
Returns
=======
- beta : 1d array, the auxiliary variable to control candidate sentences
selection
'''
V = np.array(V)
n = len(V)
A = np.ones((n,n))
beta = np.zeros(n)
VVT = np.dot(V, V.T) # V * V.T
np.seterr(all='ignore')
while True:
_beta = np.copy(beta)
beta = (np.sum(A ** 2, axis=0) / gamma) ** .5
while True:
_A = np.copy(A)
A *= VVT / np.dot(A, VVT + np.diag(beta))
A = np.nan_to_num(A) # nan (zero divide by zero) to zero
if np.sum(A - _A) < eps: break
if np.sum(beta - _beta) < eps: break
return beta
if __name__ == '__main__':
pass
``` |
{
"source": "JishnuJP/random_request",
"score": 3
} |
#### File: JishnuJP/random_request/randomRequest.py
```python
import requests
import json
from random import choice
class randomAgent:
def __init__(self,**kwargs):
with open('useragents.txt') as f:
self.agents = [line.rstrip() for line in f]
for key, value in kwargs.items():
if key == 'Browser':
[self.agents.remove(agent) for agent in self.agents if value not in agent]
## possibility of an exception when the value of 'Browser' is not matching
def getRandomAgent(self):
return choice(self.agents)
class randomProxy:
def __init__(self,**kwargs):
self.url = 'https://www.proxy-list.download/api/v1/get'
self.types = ['http','https','socks4','socks5']
self.anons = ['transparent','anonymous','elite']
for key, value in kwargs.items():
if key == 'Type':
if value in self.types:
self.url = self.url + f'?type={value}'
else:
raise TypeError(f'''{Type} is Invalid protocol,
Valid protocols are {self.types}''')
elif key =='Anon':
if value in self.anons:
self.url = self.url+ f'&anon={Anon}'
else:
pass
elif key == 'Country':
validity,self.code = self.getCodes(value)
if validity:
self.url = self.url +f'&country={self.code}'
else:
pass
response = requests.get(self.url)
if response.status_code != 200:
raise Exception(f'Request Failed url is : {self.url}')
self.proxies = response.content.decode('utf-8')
self.proxies = self.proxies.split('\r\n')
self.proxies.remove('')
def getCodes(self, Country):
with open('country_codes.json') as f:
country_codes = json.load(f)
if Country == None:
return False, None
for i in country_codes:
if Country.upper() in i.upper():
return True,country_codes[i]
def getRandomProxy(self):
return choice(self.proxies)
class randomRequest(randomAgent, randomProxy):
def __init__(self,**kwargs):
# expected arguments = > header, proxy, User-Agent
#super().__init__(**kwargs)
randomAgent.__init__(self,**kwargs)
randomProxy.__init__(self,**kwargs)
self.head = {}
for key, value in kwargs.items():
if key =='header' :
self.head = value
elif key == 'proxy':
if type(value)==list:
self.proxies = value
elif type(value) == str:
self.proxies = [value]
else:
raise TypeError(f'{type(value)} is not acceptable, give list or string')
def verifty(self, headers, proxies):
try:
resp = requests.get('https://httpbin.org/get',headers = headers, proxies = proxies)
except:
return False
if resp.status_code == 200:
return True
else:
return False
if __name__ == '__main__':
randReq = randomRequest(Type = 'https')
agent = randReq.getRandomAgent()
proxy = randReq.getRandomProxy()
print(f'Agent : \n {agent} \n Proxy: \n {proxy}')
if randReq.verifty(headers = {'User-Agent':agent},proxies = {'https':proxy}):
print('Request is Valid')
else:
print('Request is Invalid')
``` |
{
"source": "jishnup11/-Fast-CNN-Fast-Optimisation-of-CNN-Architecture-Using-Genetic-Algorithm",
"score": 4
} |
#### File: jishnup11/-Fast-CNN-Fast-Optimisation-of-CNN-Architecture-Using-Genetic-Algorithm/network.py
```python
import random
import logging
from train import train_and_score
class Network():
def __init__(self, nn_param_choices=None):
self.accuracy = 0.
self.nn_param_choices = nn_param_choices
self.network = {} # (dic): represents CNN network parameters
def create_random(self):
"""Create a random network."""
for key in self.nn_param_choices:
self.network[key] = random.choice(self.nn_param_choices[key])
def create_set(self, network):
self.network = network
def train(self, dataset):
if self.accuracy == 0.:
self.accuracy = train_and_score(self.network, dataset)
def print_network(self):
"""Print out a network."""
logging.info(self.network)
logging.info("Network accuracy: %.2f%%" % (self.accuracy * 100))
```
#### File: jishnup11/-Fast-CNN-Fast-Optimisation-of-CNN-Architecture-Using-Genetic-Algorithm/train.py
```python
from keras.datasets import mnist, cifar10
from keras.models import Sequential
import numpy as np
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
from keras.optimizers import SGD
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
# Helper: Early stopping.
early_stopper = EarlyStopping(patience=3)
def get_cifar10():
"""Retrieve the CIFAR dataset and process the data."""
# Set defaults.
batch_size = 64
# Get the data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
classes = np.unique(y_train)
nb_classes = len(classes)
nRows,nCols,nDims = x_train.shape[1:]
x_train = x_train.reshape(x_train.shape[0], nRows, nCols, nDims)
x_test = x_test.reshape(x_test.shape[0], nRows, nCols, nDims)
input_shape = (nRows, nCols, nDims)
#print(input_shape)
# Change to float datatype
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# Scale the data to lie between 0 to 1
x_train /= 255
x_test /= 255
# Change the labels from integer to categorical data
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def compile_model(network, nb_classes, input_shape):
# Get our network parameters.
nb_layers = network['nb_layers']
activation = network['activation']
learning_rate = network['learning_rate']
weight_decay = network['weight_decay']
momentum = network['momentum']
model = Sequential()
# Add each layer.
for i in range(nb_layers):
# Need input shape for first layer.
if i == 0:
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
else:
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25)) # hard-coded dropout
# Output layer.
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(nb_classes, activation='softmax'))
sgd = SGD(lr=learning_rate, momentum=momentum, decay= weight_decay)
model.compile(loss='categorical_crossentropy', optimizer= 'rmsprop',
metrics=['accuracy'])
return model
def train_and_score(network, dataset):
if dataset == 'cifar10':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_cifar10()
model = compile_model(network, nb_classes, input_shape)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=30, # using early stopping, so no real limit
verbose=0,
validation_data=(x_test, y_test),
callbacks=[early_stopper])
score = model.evaluate(x_test, y_test, verbose=0)
return score[1] # 1 is accuracy. 0 is loss.
``` |
{
"source": "jishnupdas/Photometry-tools",
"score": 2
} |
#### File: jishnupdas/Photometry-tools/filter.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from astropy import time, coordinates as coord, units as u
from astropy.io import fits
from astropy.time import Time
import glob
import os
#%%
# =============================================================================
# This script runs through files containing detected sources from Sextractor and Psfex
# source files have the following columns, refer back to photometry script
# and sextractor to add/remove other columns
# Index(['X_IMAGE', 'Y_IMAGE', 'ALPHAWIN_J2000', 'DELTAWIN_J2000', 'SNR_WIN',
# 'MAG_POINTSOURCE', 'MAGERR_POINTSOURCE'],
# dtype='object')
#
# The output is a dataframe and Lightcurve csv written to disk
# =============================================================================
#%%
'''
The coordinates for variable and check stars
'''
V_x,V_y = 1230,1750
C1x,C2x = 1670,288
C2x,C2y = 960,2054
#%%
#root_folder = '../../'
#os.chdir(root_folder)
os.chdir('/home/jishnu/JCBT/TYC3315/14Nov2/reduced/sources')
os.chdir('..')
ffiles = glob.glob('*proc.fits')
ffiles.sort()
#%%
def read(filename): #read a csv file and returns a dataframe
df = pd.read_csv(filename,sep='\t')
df = df.drop([df.columns[0]],axis=1)
return df
#%%
def search(x,y,df,t):
'''
#here x,y are pixel coords of target star, t is the size of the box
used for search
'''
flt = df[ #setting condtion so to consider sources inside a box around(x,y)
(df['X_IMAGE'] < (x+t)) &
(df['X_IMAGE'] > (x-t)) &
(df['Y_IMAGE'] < (x+t)) &
(df['Y_IMAGE'] > (x-t))
]
print(flt)
return flt['MAG_POINTSOURCE'],flt['MAGERR_POINTSOURCE'] #returns pandas series object
#%%
def cone_search(x,y,df,dist):
'''
#another implematation of previous function
takes in (x,y) coordinates, a dataframe and search radius
returns the magnitude and error if any source is
found around the given coordinates
'''
#uses distance formula to check how close the point is, to given threshold
fdf = df[
(np.sqrt((df['X_IMAGE']-x)**2 +(df['Y_IMAGE']-y)**2) < dist)
]
print(fdf)
return fdf['MAG_POINTSOURCE'],fdf['MAGERR_POINTSOURCE']
#%%
def coord_search(ra,dec,df,dist):
'''
Uses RA,DEC instead of pixel coordinates to search
Use this function only if there is accurate WCS information and the
fits file has been platesolved
'''
fdf = df[ #uses distance formula to check how close the point is, to given threshold
(np.sqrt((df['ALPHAWIN_J2000']-ra)**2 +
(df['DELTAWIN_J2000']-dec)**2) < dist)
]
print(fdf)
if len(fdf['MAG_POINTSOURCE'])==0:
return 0,0 #returns 0 if no sources found
else:
return fdf['MAG_POINTSOURCE'],fdf['MAGERR_POINTSOURCE']
#%%
def sorting(x,y,files,t):
'''
Takes (x,y), files and distance threshold and returns the magnitude
and error of the source located at (x,y)
'''
mag,err = [],[]
for f in files: #honestly this could have been done in one function..
print(f+'\n'*2)
mags = cone_search(x,y,read(f),t) #pick your poison here
mag.append(mags[0])
err.append(mags[1])
print('\n'*2+'-'*50)
return mag,err #returns the magnitude and error columns, as pandas series
#%%
def filter(lst,err): #serves same purpose as the above function, now you might ask why there are two tools for the same job, to which i have no answer. They are nice, first one i came up with, in my head; and second one, well Internet people! https://stackoverflow.com/questions/12141150/from-list-of-integers-get-number-closest-to-a-given-value
'''
Input is pandas series of mag and error, if there are more than one elements
go into the function, else just return the value from series
Takes the magnitude and associated error columns, and removes duplicates
(sometimes there are more than 1 source in the given (x,y) and d search region)
The function tries to find the correct source by taking mean of the list
and looking to reject other sources based on that
Should not be used if the sources are clumped up and has the same mag
Needs better filtering, until then; stuck with this dirty way
'''
opt,Err = [0],[]
for i in range(len(lst)):
try:
if len(lst[i])==1:
opt.append(lst[i].iloc[0])
Err.append(err[i].iloc[0])
else: #fancy pants way to do the earlier function
opt.append(min([lst[i].iloc[n] for n in range(len(lst[i]))],
key=lambda x:abs(x-np.mean(opt))))
Err.append(min([err[i].iloc[n] for n in range(len(err[i]))],
key=lambda x:abs(x-np.mean(opt))))
except:
opt.append(0)
Err.append(0)
return opt[1:],Err #returns 2 lists, mag and err. Do what you want with them
#%%
def Filter(lst,err):
opt = [0] # creating list with one element to avoid np raising error
# while taking mean
Err = []
for i in range(len(lst)):
if len(lst[i])==1:
opt.append(lst[i].iloc[0])
Err.append(err[i].iloc[0])
else:
if (abs(np.mean(opt)-lst[i].iloc[0])) > (abs(np.mean(opt)-lst[i].iloc[1])):
opt.append(lst[i].iloc[1])
Err.append(err[i].iloc[1])
else:
opt.append(lst[i].iloc[0])
Err.append(err[i].iloc[0])
return opt[1:],Err
#%%
def get_info(imageName):
loc = coord.EarthLocation.of_site('vbo')
hdul = fits.open(imageName)
ut=(hdul[0].header['UT'])
ra=(hdul[0].header['RA'])
dec=(hdul[0].header['DEC'])
am=(hdul[0].header['AIRMASS'])
t = Time(hdul[0].header['DATE-OBS'], scale='utc')
jd=(t.jd) #we only use JD.
obs_target = coord.SkyCoord(hdul[0].header['RA'],hdul[0].header['DEC'],
unit = (u.hourangle,u.deg),frame = 'icrs'
)
times = time.Time(t.jd, format='mjd',scale='utc', location=loc
)
ltt_bary = times.light_travel_time(obs_target)
time_barycentre = times.tdb + ltt_bary
bjd=(time_barycentre.value)
hdul.close()
return ut,ra,dec,am,jd,bjd
#USAGE
#ut,ra,dec,am,jd,bjd = get_info(imagename)
#%%
df = pd.DataFrame()
ut,ra,dec,am,jd,bjd = [],[],[],[],[],[]
#%%
def main():
for f in range(len(ffiles)):
UT,RA,DEC,AM,JD,BJD = get_info(ffiles[f])
ut.append(UT)
ra.append(RA)
dec.append(DEC)
am.append(AM)
jd.append(JD)
bjd.append(BJD)
folder = 'sources/'
os.chdir(folder)
Files = [i+'_source_cat.csv' for i in ffiles] #can just read the files off disk but where's the fun in that.
m,e = sorting(V_x,V_y,Files,100) #loopy logic of this function! yay me
Vmag,Verr = filter(m,e)
m,e = sorting(C1x,C2x,Files,100)
C1mag,C1err = filter(m,e)
m,e = sorting(C2x,C2y,Files,100)
C2mag,C2err = filter(m,e)
# m,e = sorting(50.381181,47.433372,Files,0.01) #loopy logic of this function! yay me
# Vmag,Verr = filter(m,e)
df = pd.DataFrame()
df = df.assign(
**{'UT':ut,'RA':ra,'DEC':dec,'airmass':am,
'JD':jd,'BJD':bjd,'Vmag':Vmag,'v_err':Verr,
'C1':C1mag,'C1_err':C1err,'C2':C2mag,'C2_err':C2err
})
df['Vmag_d'] = df['Vmag']-df['C2']
df.to_csv('LC.csv')
return df
#%%
plt.figure(figsize=(8,6))
plt.plot(bjd,df.Vmag,'o',label='v')
plt.plot(bjd,df.C1,'o',label='C1')
plt.plot(bjd,df.C2,'o',label='C2')
plt.legend()
#%%
plt.plot(bjd,df.Vmag-df.C1,'o',label='v-c1')
plt.plot(bjd,df.Vmag-df.C2,'o',label='v-c2')
plt.plot(bjd,df.C1-df.C2,'o',label='c1-c2')
plt.legend()
``` |
{
"source": "jishnupdas/SOM-Classifier-for-lightcurves",
"score": 3
} |
#### File: jishnupdas/SOM-Classifier-for-lightcurves/Classifier_step1.py
```python
import os
import Lctools
import pandas as pd
import matplotlib.pyplot as plt
#%%
fpath = '/media/jishnu/ExternalHDD/TESSDATA/lc_sector1/clean/'
df = pd.read_csv('low_period.csv')
filenames = list(df.filename)
files = [fpath+f for f in filenames] #make a list with fullpath to file
svpath = '/home/jishnu/Documents/TESS/tess_data/1D_lc/'
impath = '/home/jishnu/Documents/TESS/tess_data/1D_lc_plots/'
#%%
def main(file):
'''
parameters:
-----------
file : a text file with 3 columns; MJD,mag & mag_error
function:
---------
performs lombscargle on the timeseries data,
computes the period after removing any noise frequencies
checks if the period is double the actual one
shifts the phase so the primary minima falls at 0 phase
perform a phase binning with 64 bins (mean)
writes the 1d array to a file
which will be fed into the classifier NN
Returns:
-------
1D phase binned array
writes phased lc image to disk
'''
fname = file.split('/')[-1]
tgt = Lctools.Lctools()
tgt.set_lc(file)
tgt.set_noise_frequencies('noise_freq')
period = tgt.lomb_scargle()
df = tgt.build_df()
ph = tgt.phase_correction()
status = tgt.check_double_period()
# status = tgt.check_period_spline()
if status is not False:
df = tgt.build_df()
ph = tgt.phase_correction()
else:
pass
binned = tgt.normalise(tgt.phase_bin())
# print('normalized binned lc of %s'%fname)
with open('logs_LS','a+') as log:
log.write(fname+','+str(tgt.period)+','+str(status)+'\n')
# print('writing 1D phase binned lc to file')
with open(svpath+'1D_'+fname,'w+') as LCarray:
for mag in binned:
LCarray.write(str(mag)+'\n')
# plot_phased_lc(df.phase,df.mag,tgt.period,impath+fname)
# del period,df,ph
#%%
def plot_phased_lc(phases,mag,period,name):
plt.style.use('seaborn')
plt.figure(figsize = (4,3))
plt.title('Period : %.6f'%period)
plt.xlabel('Phase')
plt.ylabel('Flux')
plt.gca().invert_yaxis()
plt.plot(phases,mag,'.k',alpha=0.8)
plt.savefig(name+'_s.png',dpi=100)
# plt.show()
plt.close()
#%%
for file in files:
try:
fname = file.split('/')[-1]
if os.path.exists(svpath+'1D_'+fname) == False:
main(file)
except:
with open('errorlog','a+') as errlog:
errlog.write(file+'\n')
print('encountered an error with file %s'%file.split('/')[-1])
print('Done!')
```
#### File: jishnupdas/SOM-Classifier-for-lightcurves/SOM.py
```python
import glob
import pickle
import numpy as np
import seaborn as sns
from minisom import MiniSom
import matplotlib.pyplot as plt
#%%
class SOM:
'''
Class to easily train, use, save and load the SOM clustering/classifier
packages used;
Pickle #to save and load files
Minisom #minimal implementation of Self Organizing maps
'''
def __init__(self,files=None,data=None,som=None,network_h=None,
network_w=None,coords=None,x=None,y=None,fnames=None):
'''initializing all the necessary values
Parameters:
-----------
files : list of files, which contain the 1D phase binned data
data : An array of arrays,each element is a 1D phase binned LC
som : self organising map NN with hxw neurons
network_h : height of the network
network_w : width of the network
coords : An array which contains som.winner for all LCs in data
x,y : x,y coords from coords
'''
self.files = files
self.fnames = fnames
self.data = data
self.som = som
self.network_h = 50
self.network_w = 50
self.coords = coords
self.x = x
self.y = y
def set_files(self,path):
'''
takes path to the files as arg; returns list of files in the path
'''
self.files = glob.glob(path+'*')
def get_arr(self,file):
'''
Get data from a file as an np array:
reject files which has nan values in them
nan can break the SOM classifier
'''
data = np.loadtxt(file)
if np.isnan(data).any() == True:
return np.nan
else:
return data
def set_data(self):
'''
opens each file in the folder and reads the data
into an array, and appends that to the data array
if it doesnt contain any nan values
'''
self.fnames,self.data,err_f = [],[],[]
for f in self.files:
arr = self.get_arr(f)
if arr is not np.nan:
self.fnames.append(f)
self.data.append(arr)
else:
err_f.append(f)
def set_som(self,sigma,learning_rate):
'''
initializes the network:
by default 50x50 with 0.1 sigma and 1.5 lr is initialized
'''
self.som = MiniSom(x = self.network_h,y = self.network_w,
input_len = 32, sigma = sigma,
learning_rate = learning_rate)
self.som.random_weights_init(self.data)
#initialize random weights to the network
def train_som(self,number):
'''
tains the network with 'number' iterations by randomly taking
'number' of elements from the data array
'''
self.som.train_random(self.data, number)
def save_model(self,outfile):
'''
Save the trained model
'''
with open(outfile+'.p', 'wb') as outfile:
pickle.dump(self.som, outfile)
def load_model(self,som_file):
'''
Load the saved model
'''
with open(som_file, 'rb') as infile:
self.som = pickle.load(infile)
def get_coords(self):
'''
Runs each of the elements of the dataset through the SOM
and gets the winner and appends it to the coords array
'''
self.coords = []
err = []
self.x = []
self.y = []
for d in self.data:
try:
coord = np.array(self.som.winner(d))
self.coords.append(coord)
self.x.append(coord[0])
self.y.append(coord[1])
except:
#print("err with ",str(d))
err.append(d)
#getting x,y points
# self.x = [i[0] for i in self.coords]
# self.y = [i[1] for i in self.coords]
return self.x,self.y
def plot_winners(self):
x,y = self.x,self.y
plt.style.use('seaborn')
plt.figure(figsize=(9,9))
plt.plot(x,y,'.',alpha=0.15)
sns.kdeplot(x,y,cmap='Blues',shade=True,bw=1.5,shade_lowest=False, alpha=0.8)
plt.show()
plt.close()
``` |
{
"source": "jishnusen/pykarel",
"score": 3
} |
#### File: jishnusen/pykarel/Karel.py
```python
import os
import sys
import time
NORTH = 0
WEST = 1
SOUTH = 2
EAST = 3
class World:
symbols = {
"wall": "#",
"beeper": "o",
"path": " "
}
def __init__(self, filename, robots=[]):
self.grid = []
self.height = 0
self.width = 0
self.step_delay = 0.5
self.robots = []
self.breakpoint = False
self.step = False
with open(filename, 'r') as f:
self.grid = [list(line[:-1]) for line in f.readlines()]
self.height = len(self.grid)
self.width = len(self.grid[0])
for robot in robots:
self.add_robot(robot)
self.print_world(fast=True)
def add_robot(self, robot):
'''Add robot to world'''
robot.world = self
if robot.x < 0:
robot.x = self.width + robot.x
if robot.y < 0:
robot.y = self.height + robot.y
if 0 > robot.x >= self.width:
raise ValueError("X value outside world boundaries!")
if 0 > robot.y >= self.height:
raise ValueError("Y value outside world boundaries!")
if self.grid[robot.y][robot.x] == World.symbols["wall"]:
raise ValueError("Robot placed on a wall!")
if self.grid[robot.y][robot.x] in Robot.dirs:
raise ValueError("Robot placed on another robot!")
self.robots.append(robot)
self.print_world(fast=True)
def set_speed(self, speed):
'''Sets world simulation speed to a range [1, 4]'''
speeds = [0.5, 0.15, 0.03, 0]
try:
self.step_delay = speeds[speed-1]
except IndexError:
pass
def print_world(self, fast=False):
output = "\n"*100
lines = []
for y in range(self.height):
lines.append(self.grid[y][:])
for robot in self.robots:
lines[robot.y][robot.x] = Robot.dirs[robot.dir]
for line in lines:
output += ''.join(line) + '\n'
if self.breakpoint or self.step:
output += "Press ENTER to continue."
print(output, end="")
input()
self.breakpoint = False
else:
if not fast:
time.sleep(self.step_delay)
print(output, end="")
def add_breakpoint(self):
'''Adds single breakpoint, press ENTER to continue'''
self.breakpoint = True
def begin_step(self):
'''Adds breakpoints between all commands until end_step()'''
self.step = True
def end_step(self):
'''Resume running commands normally'''
self.step = False
class Robot:
dirs = {
NORTH: '▲',
WEST: '◄',
SOUTH: '▼',
EAST: '►'
}
#~ dirs = ['^', '<', 'v', '>']
moves = {
NORTH: {'x': 0, 'y':-1},
WEST: {'x':-1, 'y': 0},
SOUTH: {'x': 0, 'y': 1},
EAST: {'x': 1, 'y': 0}
}
class PutBeeperError(BaseException):
pass
class PickBeeperError(BaseException):
pass
class OutOfBeepersError(BaseException):
pass
class MovementError(BaseException):
pass
def __init__(self, x, y, dir, beepers=0):
self.dir = dir # 0 = N, 1 = W, 2 = S, 3 = E
self.x = x-1
self.y = y-1
self.beepercount = beepers
def front_is_clear(self):
'''Return True if space in front of robot can be moved into'''
newx = self.x + Robot.moves[self.dir]['x']
newy = self.y + Robot.moves[self.dir]['y']
if 0 <= newx < self.world.width and 0 <= newy < self.world.height:
if self.world.grid[newy][newx] != World.symbols["wall"]:
for robot in self.world.robots:
if robot.x == newx and robot.y == newy:
return False
return True
return False
def move(self):
'''Move robot forward one step'''
if not Robot.front_is_clear(self):
raise Robot.MovementError("Movement blocked!")
self.x += Robot.moves[self.dir]['x']
self.y += Robot.moves[self.dir]['y']
self.world.print_world()
def turnleft(self):
'''Rotate robot 90 degrees left'''
self.dir = (self.dir + 1) % 4
self.world.print_world()
def turnright(self):
'''Rotate robot 90 degrees left'''
self.dir = (self.dir - 1) % 4
self.world.print_world()
def check_beeper(self):
'''Return True if space in front contains a beeper'''
newx = self.x + Robot.moves[self.dir]['x']
newy = self.y + Robot.moves[self.dir]['y']
if 0 <= newx < self.world.width and 0 <= newy < self.world.height:
if self.world.grid[newy][newx] == World.symbols["beeper"]:
for robot in self.world.robots:
if robot.x == newx and robot.y == newy:
return False
return True
return False
def beeper_count(self):
return self.beepercount
def pick_beeper(self):
'''Pick up beeper in front of robot and add it to the beeper storage'''
if not self.check_beeper():
raise Robot.PickBeeperError("No beeper at location!")
newx = self.x + Robot.moves[self.dir]['x']
newy = self.y + Robot.moves[self.dir]['y']
self.world.grid[newy][newx] = World.symbols["path"]
self.beepercount += 1
self.world.print_world()
def put_beeper(self):
'''Place beeper from robot's beeper storage in front of the robot'''
if not self.front_is_clear():
raise Robot.PutBeeperError("No space to place beeper!")
if self.check_beeper():
raise Robot.PutBeeperError("Space already contains beeper!")
if self.beepercount < 1:
raise Robot.OutOfBeepersError("Not carrying any beepers!")
newx = self.x + Robot.moves[self.dir]['x']
newy = self.y + Robot.moves[self.dir]['y']
self.world.grid[newy][newx] = World.symbols["beeper"]
self.beepercount -= 1
self.world.print_world()
``` |
{
"source": "jishubasak/eCRESEARCH",
"score": 2
} |
#### File: Web App/demo/app.py
```python
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from django_plotly_dash import DjangoDash
import dash_bootstrap_components as dbc
import plotly.express as px
import dash
import os
import numpy as np
import logging
from PIL import Image
import base64
import io
import plotly.graph_objs as go
import pickle
import cv2
from dash.dependencies import Input, Output, State
from IPython.display import display, IFrame, HTML
from dash.exceptions import PreventUpdate
import json
import os
import time
import uuid
from copy import deepcopy
import csv
import sys
import pathlib
import importlib
from flask_caching import Cache
import dash_reusable_components as drc
import utils as utils
import keras
from keras.models import load_model
external_stylesheets = ['https://codepen.io/amyoshino/pen/jzXypZ.css']
# external_stylesheets = ['https://stackpath.bootstrapcdn.com/bootswatch/4.4.1/cerulean/bootstrap.min.css']
APP_PATH = str(pathlib.Path(__file__).parent.resolve())
DEBUG = True
LOCAL = True
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
product_df = pd.read_csv('data/products.csv')
labels = product_df['Products'].unique()
labels_values = product_df['Links'].unique()
options = [{'label': x, 'value': y} for x,y in zip(labels,labels_values)]
#Scaling within range 1-10
def scaling(x,a,b,minimum,maximum):
return float(((b-a)*(x-minimum)/(maximum-minimum))+a)
# resets the callbacks
app.callback_map = {}
# sets the title
app.title = 'Ecfullfill Capstone'
# html content
app.layout = html.Div([
#Card Groups
html.Div([
html.Div([
html.Div(id='output-container-button-5',
children='Enter Price'),
html.Div(dcc.Input(id='input-box-5', type='text',required=True,
style={'position':'relative'})),
html.Button('Submit', id='button-5',
style={'position':'relative'}),
],className='two columns'),
],className='row'),
html.Br(),
html.Br(),
#Select Products
html.Div([
html.Div([
html.Div(id='output-container-button-6',
children='Select Product'),
dcc.Dropdown(
id='product-dropdown',
options=options,value='shampoo'),
],className='six columns'),
html.Div([
html.Div(id='output-container-button-7',
children='Select Tags'),
dcc.Dropdown(
id='multi-dropdown',
multi=True),
html.Div(id='display-selected-values'),
],className='six columns')
],className='row'),
html.Br(),
html.Br(),
html.Div([
dcc.Tabs([
# Tab 1
dcc.Tab([
html.Div([
dcc.Upload(
id='upload-image',
children=html.Div([
'Drag and Drop or ',
html.A('Select Image')
]),
style={
'width': '90%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
accept="image/*",
# Dont Allow multiple files to be uploaded
multiple=True
),
html.Div([
#Product Length Button
html.Div([
html.Div(dcc.Input(id='product-length', type='text',required=True, placeholder = 'Product Length (in)',
style={'position':'relative'})),
],className = 'two columns'),
#Product Width Button
html.Div([
html.Div(dcc.Input(id='product-width', type='text',required=True, placeholder = 'Product Width (in)',
style={'position':'relative'})),
],className = 'two columns'),
#Product Height Button
html.Div([
html.Div(dcc.Input(id='product-height', type='text',required=True, placeholder = 'Product Height (in)',
style={'position':'relative'})),
],className = 'two columns'),
#Product Weight Button
html.Div([
html.Div(dcc.Input(id='product-weight', type='text',required=True, placeholder = 'Product Weight (oz)',
style={'position':'relative'})),
],className = 'two columns'),
html.Div([
html.Button(id='run-operation', n_clicks=0, children='Run Operation'),
html.Div(id='run-model'),
],className = 'two columns'),
], className = 'row'),
],className='row'),
html.Br(),
html.Br(),
html.Div([
#Upload COmponent
html.Div([
#Image Upload Graph
html.Div([
dcc.Graph(id='image_graph')
])
],className='six columns'),
#World Map
html.Div([
html.Div([
dcc.Graph(id='the_graph')
]),
html.Div([
html.Div(id='output_state'),
]),
],className='six columns')
],className = 'row'),
], className="container-fluid", label="Product Perfomance Insight"),
# Tab 2
dcc.Tab([
dcc.Textarea(
placeholder='Enter a value...',
value='This is a TextArea component',
style={'width': '100%'}
)
], className="container-fluid",
label="Product Highlight Generator")
], className="tabs-section")
], className="main-content"),
], className="main")
#Dropdown Callbacks
@app.callback(
Output('multi-dropdown', 'options'),
[Input('product-dropdown', 'value')])
def set_tags_options(selected_product):
if DEBUG:
print(selected_product)
with open('data/{}.pkl'.format(str(selected_product)), 'rb') as handle:
loaded_pkl = pickle.load(handle)
tagged_df = loaded_pkl['tags_datasets']['USA'].columns[4:20]
return [{'label': i, 'value': i} for i in tagged_df]
@app.callback(
Output('multi-dropdown', 'value'),
[Input('multi-dropdown', 'options')])
def set_tags_value(available_options):
return available_options[0]['value']
#Meta Data Input
# @app.callback(Output('output-state', 'children'),
# [Input('submit-button-state', 'n_clicks')],
# [State('input-1-state', 'value'),
# State('input-2-state', 'value')])
# def update_output(n_clicks, input1, input2):
# return u'''
# The Button has been pressed {} times,
# Input 1 is "{}",
# and Input 2 is "{}"
# '''.format(n_clicks, input1, input2)
@app.callback(
[Output('output_state', 'children'),
Output(component_id='the_graph', component_property='figure')],
[Input('run-operation', 'n_clicks'),
Input('upload-image', 'contents'),
Input('product-dropdown', 'value')],
[State('product-length', 'value'),
State('product-width', 'value'),
State('product-height', 'value'),
State('product-weight', 'value')]
)
def update_output(num_clicks,image_contents,selected_product,product_length,product_width,product_height,product_weight):
if product_length is None:
raise PreventUpdate
elif product_width is None:
raise PreventUpdate
elif product_height is None:
raise PreventUpdate
elif product_weight is None:
raise PreventUpdate
else:
temp_meta = np.array([float(product_length),float(product_height),float(product_width),float(product_weight)])
meta_input = temp_meta.reshape(1,4,1,1)
if image_contents is not None:
print('Image contents found')
image_b64 = image_contents[0].split(",")[1]
base64_decoded = base64.b64decode(image_b64)
image = Image.open(io.BytesIO(base64_decoded))
image_np = np.array(image)
target_size = (128,128)
image_input = cv2.resize(image_np, target_size)
print(image_input.shape)
image_input = image_input.reshape(1,128,128,3)
predictions_temp = {}
with open('data/{}.pkl'.format(str(selected_product)), 'rb') as handle:
loaded_pkl = pickle.load(handle)
models = loaded_pkl['models']
for country in models:
predictions_temp.update({country:models[country].predict({'main_input': image_input, 'meta_input': meta_input})[0][0]})
iso_changer = {'UK':'GBR','India':'IND','Australia':'AUS','USA':'USA'}
df_temp = pd.DataFrame(data=predictions_temp.items(),columns = ['Country','Success Index'])
df_temp['iso_alpha'] = df_temp['Country'].map(iso_changer)
min_tar = min(df_temp['Success Index'])
max_tar = max(df_temp['Success Index'])
df_temp['Success Index'] = df_temp['Success Index'].apply(lambda x: scaling(x,1,10,min_tar,max_tar))
df = df_temp.copy()
print(df)
fig = px.choropleth(df, locations="iso_alpha",
color="Success Index",
hover_name="Country",
projection='natural earth',
title='Performing Index of the Product by Country',
color_continuous_scale=px.colors.sequential.Plasma)
fig.update_layout(title=dict(font=dict(size=28),x=0.5,xanchor='center'),
margin=dict(l=60, r=60, t=50, b=50))
return ('High Index reflects that the product is likely to be successfull \
if sold in that country',fig)
# @app.callback(Output('country-gdp-graph', 'figure'),
# [Input('world-map', 'clickData')])
# def update_graph(clickData):
# '''Update the carbon ppbv trend graph in Tab 1.
# A callback function that is triggered when a country in the map in Tab 1 is
# clicked. The country is retrieved from the clickData and is then used to
# generate a line graph showing the trends in carbon ppbv of a country
# across all years.
# Parameters
# ----------
# clickData : dict
# The dictionary containing the details of the clicked point on the map.
# Returns
# -------
# dict
# Return the updated carbon ppbv trend graph figure
# '''
# title = ''
# data = []
# if clickData:
# country = clickData['points'][0]['location']
# else:
# country = 'USA'
# data = [{'x': df.iloc[:, 1:].columns.tolist(),
# 'y': df.loc[country].iloc[1:].values.tolist(),
# 'type': 'line'}]
# title = df.loc[country, 'country']
# layout = dict(title='{} Carbon ppbv'.format(title),
# xaxis={'title': 'year'},
# yaxis={'title': 'Carbon ppbv'}
# )
# fig = dict(data=data, layout=layout)
# return fig
#
# image_source = "https://raw.githubusercontent.com/michaelbabyn/plot_data/master/bridge.jpg"
@app.callback(Output('image_graph','figure'),
[Input('upload-image', 'contents')])
def update_output(list_of_contents):
if list_of_contents is not None:
# if DEBUG:
# print(list_of_contents)
image_b64 = list_of_contents[0].split(",")[1]
base64_decoded = base64.b64decode(image_b64)
image = Image.open(io.BytesIO(base64_decoded))
image_np = np.array(image)
fig = px.imshow(image_np)
fig.update_layout(title=dict(font=dict(size=28),x=0.5,xanchor='center'),
margin=dict(l=60, r=60, t=50, b=50))
else:
image = Image.open(r'C:\Users\Dell-pc\Desktop\dash\images\ecfullfill.png')
image_np = np.array(image)
fig = px.imshow(image_np)
fig.update_layout(title=dict(font=dict(size=28),x=0.5,xanchor='center'),
margin=dict(l=60, r=60, t=50, b=50))
return fig
# Running the server
if __name__ == "__main__":
app.run_server(debug=True)
``` |
{
"source": "jishubasak/Game-of-Zones",
"score": 3
} |
#### File: jishubasak/Game-of-Zones/kypy.py
```python
from scipy.spatial.distance import cityblock
from scipy.spatial.distance import euclidean
from sklearn.metrics import calinski_harabaz_score, silhouette_score, adjusted_mutual_info_score, adjusted_rand_score
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def intra_to_inter(X, y, dist, r):
"""Compute intracluster to intercluster distance ratio
Parameters
----------
X : array
Design matrix with each row corresponding to a point
y : array
Class label of each point
dist : callable
Distance between two points. It should accept two arrays, each
corresponding to the coordinates of each point
r : integer
Number of pairs to sample
Returns
-------
ratio : float
Intracluster to intercluster distance ratio
"""
random_choices = []
numerators = []
denominators = []
np.random.seed(11)
for i in range(r):
random_choices.append(np.random.randint(0, len(X), 2))
for i in range(len(random_choices)):
if random_choices[i][0] == random_choices[i][1]:
continue
elif y[random_choices[i][0]] == y[random_choices[i][1]]:
numerators.append(dist(X[random_choices[i][0]],
X[random_choices[i][1]]))
else:
denominators.append(dist(X[random_choices[i][0]],
X[random_choices[i][1]]))
return (np.asarray(numerators).mean()) / (np.asarray(denominators).mean())
def cluster_range(X, clusterer, k_start, k_stop, actual=None):
chs = []
iidrs = []
inertias = []
scs = []
ys = []
amis = []
ars = []
ps = []
for i in range(k_start, k_stop+1):
clusterer2 = clusterer
clusterer2.n_clusters = i
ys.append(clusterer2.fit_predict(X))
iidrs.append(intra_to_inter(X, ys[-1], euclidean, 50))
chs.append(calinski_harabaz_score(X, ys[-1]))
inertias.append(clusterer2.inertia_)
scs.append(silhouette_score(X, ys[-1]))
keys = ['ys', 'iidrs', 'chs', 'inertias', 'scs']
values = [ys, iidrs, chs, inertias, scs]
if actual is not None:
for i in ys:
ps.append(purity(actual, i))
ars.append(adjusted_rand_score(actual, i))
amis.append(adjusted_mutual_info_score(actual, i))
keys.extend(['ps', 'ars', 'amis'])
values.append(ps)
values.append(ars)
values.append(amis)
return dict(zip(keys, values))
else:
return dict(zip(keys, values))
def plot_internal(inertias, chs, iidrs, scs):
"""Plot internal validation values"""
fig, ax = plt.subplots(nrows=2, ncols=2, dpi=200)
ks = np.arange(2, len(inertias)+2)
ax[0,0].plot(ks, inertias, '-o', label='SSE')
ax[0,1].plot(ks, chs, '-ro', label='CH')
ax[0,0].set_xlabel('$k$')
ax[0,0].set_ylabel('SSE')
ax[0,1].set_ylabel('CH')
# lines, labels = ax.get_legend_handles_labels()
# ax2 = ax.twinx()
ax[1,0].plot(ks, iidrs, '-go', label='Inter-intra')
ax[1,1].plot(ks, scs, '-ko', label='Silhouette coefficient')
ax[1,0].set_ylabel('Inter-Intra')
# lines2, labels2 = ax2.get_legend_handles_labels()
# ax2.legend(lines+lines2, labels+labels2)
ax[1,1].set_ylabel('Silhouette Score')
fig.tight_layout()
return fig
def plot_clusters(X, ys):
"""Plot clusters given the design matrix and cluster labels"""
k_max = len(ys) + 1
k_mid = k_max//2 + 2
fig, ax = plt.subplots(2, k_max//2, dpi=150, sharex=True, sharey=True,
figsize=(7,4), subplot_kw=dict(aspect='equal'),
gridspec_kw=dict(wspace=0.01))
for k,y in zip(range(2, k_max+1), ys):
if k < k_mid:
ax[0][k%k_mid-2].scatter(*zip(*X), c=y, s=1, alpha=0.8)
ax[0][k%k_mid-2].set_title('$k=%d$'%k)
else:
ax[1][k%k_mid].scatter(*zip(*X), c=y, s=1, alpha=0.8)
ax[1][k%k_mid].set_title('$k=%d$'%k)
return ax
``` |
{
"source": "Jishu-Epic/RShell",
"score": 2
} |
#### File: Jishu-Epic/RShell/rban.py
```python
__author__ = "3P1C"
__version__ = 1.0
__copyright__ = "Copyright © 2019"
import random
banner = '''
d8888b. .d8888. db db d88888b db db
88 `8D 88' YP 88 88 88' 88 88
88oobY' `8bo. 88ooo88 88ooooo 88 88
88`8b `Y8b. 88~~~88 88~~~~~ 88 88
88 `88. db 8D 88 88 88. 88booo. 88booo.
88 YD `8888Y' YP YP Y88888P Y88888P Y88888P
'''
banner2 = '''
___ ___ _ _ _
| _`\ ( _`\ ( ) (_ ) (_ )
| (_) )| (_(_)| |__ __ | | | |
| , / `\__ \ | _ `\ /'__`\ | | | |
| |\ \ ( )_) || | | |( ___/ | | | |
(_) (_)`\____)(_) (_)`\____)(___)(___)
'''
banner3 = r'''
____ ____ __ ___ ___
/\ _`\ /\ _`\ /\ \ /\_ \ /\_ \
\ \ \L\ \ \ \,\L\_\ \ \ \___ __ \//\ \ \//\ \
\ \ , / \/_\__ \ \ \ _ `\ /'__`\ \ \ \ \ \ \
\ \ \\ \ /\ \L\ \ \ \ \ \ \ /\ __/ \_\ \_ \_\ \_
\ \_\ \_\ \ `\____\ \ \_\ \_\\ \____\ /\____\ /\____\
\/_/\/ / \/_____/ \/_/\/_/ \/____/ \/____/ \/____/
'''
#print(banner3)
def ban():
a = banner,banner2,banner3
c = random.randint(1,3)
if c == 1:
return a[0]
elif c == 2:
return a[1]
elif c == 3:
return a[2]
``` |
{
"source": "jishuguang/language",
"score": 3
} |
#### File: qa/model/bert.py
```python
import os
import torch
from torch import nn
from torch.nn import functional as F
from language.lm.model.bert import BertEncoder
from language.utils.log import get_logger
logger = get_logger()
class Bert(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.bert_encoder = BertEncoder(**kwargs)
# QA
embedding_dim = kwargs['embedding_dim']
self.linear = nn.Linear(embedding_dim, 2)
def forward(self, data):
"""
:param data: dict, {key: Tensor}.
:return: (Tensor, Tensor), shape(n, s), start_prediction, end_prediction.
"""
embedding = self.bert_encoder(data) # (n, s, e)
prediction = self.linear(embedding) # (n, s, 2)
segment = data['segments'].unsqueeze(-1) # (n, s, 1)
pad_mask = data['pad_mask'].unsqueeze(-1) # (n, s, 1)
valid_mask = segment - pad_mask
valid_mask[:, 0, :] = 1 # <cls> is valid
offset = torch.zeros_like(valid_mask, device=valid_mask.device)
offset[valid_mask == 0] = -1E6
prediction = prediction * valid_mask + offset
start_pred = prediction[:, :, 0] # (n, s)
end_pred = prediction[:, :, 1] # (n, s)
return start_pred, end_pred
def forward_train(self, data):
"""
:param data: dict, {key: Tensor}.
:return: Tensor, Shape(1), loss.
"""
start_pred, end_pred = self(data) # (n, s)
start = data['start'][:, 0] # (n)
end = data['end'][:, 0] # (n)
loss = F.cross_entropy(start_pred, start) + F.cross_entropy(end_pred, end)
return loss
def forward_infer(self, data):
"""
:param data: dict, {key: Tensor}.
:return: Tensor, Shape(n, 2), prediction.
"""
start_pred, end_pred = self(data) # (n, s)
no_answer_score = start_pred[:, 0] + end_pred[:, 0] # <cls>
start_pred = start_pred[:, 1:]
end_pred = end_pred[:, 1:]
start_max_values, start_max_indies = torch.cummax(start_pred, dim=1)
answer_score = start_max_values + end_pred
max_answer_score, end_index = torch.max(answer_score, dim=1) # (n)
start_index = start_max_indies[torch.arange(end_index.shape[0], device=end_index.device), end_index]
return no_answer_score > max_answer_score, start_index + 1, end_index + 1
def load_bert_model(self, model_path):
"""
:param model_path: path to bert pretrain model.
:return: None.
"""
logger.info(f'Loading bert model: {model_path}.')
if not os.path.exists(model_path):
raise FileExistsError(f'{model_path} does not exist.')
self.bert_encoder = torch.load(model_path)
```
#### File: qa/utils/trainer.py
```python
import os
import torch
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils import data
from language.utils.log import get_logger
from language.utils.serialization import save_model
logger = get_logger()
class Trainer:
def __init__(self, train_cfg, evaluator, model, train_data, save_dir):
self._cfg = train_cfg
self._evaluator = evaluator
self._model = model.to(train_cfg.device.name)
self._setup_data(train_data)
self._setup_scheduler()
self._save_dir = save_dir
self._key_metric = train_cfg.key_metric
def _setup_data(self, train_data):
batch_size = self._cfg.device.batch_size
num_worker = self._cfg.device.num_worker
logger.info(f'batch_size: {batch_size}')
logger.info(f'num_workers: {num_worker}')
self._total_steps = len(train_data) // batch_size
self._train_iter = data.DataLoader(train_data, shuffle=True,
batch_size=batch_size, num_workers=num_worker,
drop_last=True)
def _setup_scheduler(self):
learn_paras = {
'lr': self._cfg.learn.lr,
'weight_decay': self._cfg.learn.weight_decay
}
self._optimizer = getattr(optim, self._cfg.learn.method)(self._model.parameters(), **learn_paras)
self._scheduler = MultiStepLR(self._optimizer, self._cfg.learn.milestones, gamma=self._cfg.learn.gamma)
def train(self):
best_key_metric = 0
for epoch in range(self._cfg.learn.epochs):
self._train_one_epoch(epoch)
metric_dict = self._evaluator.evaluate(self._model)
msg = f'Epoch {epoch}: {" | ".join([f"{key}: {value:.4f}" for key, value in metric_dict.items()])}.'
key_metric = metric_dict[self._key_metric]
logger.info(msg)
if key_metric >= best_key_metric:
best_key_metric = key_metric
# save model
save_model(self._model, os.path.join(self._save_dir, 'model', 'model_best.pth'))
# save evaluation result
with open(os.path.join(self._save_dir, 'model', 'evaluation.txt'), 'a') as f:
f.write(msg + os.linesep)
save_model(self._model, os.path.join(self._save_dir, 'model', 'model_last.pth'))
self._scheduler.step()
def _train_one_epoch(self, epoch):
step = 0
self._model.train()
for train_batch in self._train_iter:
self._optimizer.zero_grad()
self._to_device(train_batch)
loss = self._model.forward_train(train_batch)
logger.info(f'[Epoch {epoch}][Step {step}/{self._total_steps}] '
f'lr: {self._scheduler.get_last_lr()[0]:.5f}'
f' | loss {loss:.4f}')
loss.backward()
self._optimizer.step()
step += 1
def _to_device(self, train_batch):
"""Move a batch to specified device."""
device = self._cfg.device.name
for key, value in train_batch.items():
if isinstance(value, torch.Tensor):
train_batch[key] = value.to(device)
```
#### File: tagging/dataset/__init__.py
```python
from .sequence import build_sequence_dataset
from language.utils.log import get_logger
logger = get_logger()
DATASET_TYPES = {
'sequence': build_sequence_dataset
}
def build_dataset(cfg, **kwargs):
dataset_type = cfg.type
logger.info(f'Dataset type is set to {dataset_type}.')
if dataset_type in DATASET_TYPES:
logger.info(f'Building dataset {cfg.name}...')
return DATASET_TYPES[dataset_type](**cfg, **kwargs)
else:
raise ValueError(f'Invalid dataset type: {dataset_type}.')
```
#### File: language/utils/random.py
```python
import numpy as np
class RandomChoices:
"""Fast random choices with cache."""
def __init__(self, population, weights, k):
self._population = np.array(population)
weights = np.array(weights)
self._p = weights / weights.sum()
self._k = k
self._reset()
def _reset(self):
self._candidates = np.random.choice(self._population, size=self._k, replace=True, p=self._p)
self._index = 0
def __call__(self):
if self._index == self._k:
self._reset()
self._index += 1
return self._candidates[self._index - 1]
```
#### File: language/tools/train_tagging.py
```python
import argparse
import os
import time
from logging import FileHandler
from language.tagging.dataset import build_dataset
from language.tagging.model import build_model
from language.tagging.utils.trainer import Trainer
from language.tagging.utils.evaluator import Evaluator
from language.utils.log import get_logger
from language.utils.config import get_cfg_defaults
from language.utils.serialization import load_model
logger = get_logger()
def load_pretrain_model(model, pretrain_cfg):
if pretrain_cfg is None:
return model
if 'resume' in pretrain_cfg:
load_model(model, pretrain_cfg.resume, resume=True)
elif 'load' in pretrain_cfg:
load_model(model, pretrain_cfg.load)
elif 'bert' in pretrain_cfg:
model.load_bert_model(pretrain_cfg.bert)
return model
def train(cfg):
train_dataset = build_dataset(cfg.data, purpose='train')
val_dataset = build_dataset(cfg.data, purpose='val')
model = build_model(cfg.model, vocab=train_dataset.get_vocab(), tag_vocab=train_dataset.get_tag_vocab())
load_pretrain_model(model, cfg.pretrain)
evaluator = Evaluator(cfg.evaluator, val_dataset)
Trainer(cfg.trainer, evaluator, model, train_dataset, cfg.save.dir).train()
def main():
parser = argparse.ArgumentParser(description='Train sequence tagging model.')
parser.add_argument('--config', required=True, type=str, help='Path to config.')
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg.merge_from_file(args.config)
cfg.save.dir = os.path.join(cfg.save.dir, time.strftime("%Y%m%d%H%M%S"))
os.makedirs(cfg.save.dir, exist_ok=True)
cfg.freeze()
logger.addHandler(FileHandler(os.path.join(cfg.save.dir, f'train.log')))
logger.info(f'Loading config {args.config}.')
logger.info(f'Config:\n {cfg}')
train(cfg)
if __name__ == '__main__':
main()
``` |
{
"source": "jishuguang/recommender",
"score": 3
} |
#### File: recommender/dataset/point_dataset.py
```python
import random
from .base_dataset import BaseDataset, SupervisedBaseDataset
from utils.log import get_logger
logger = get_logger()
class PointTrainDataset(SupervisedBaseDataset):
def _setup(self):
super()._setup()
total_amount = len(self)
# if any action is true, we treat it as positive.
self._positive_indexes = list(self._df.index[self._df[self._cfg.input.action].agg('sum', axis=1) > 0])
self._negative_indexes = list(set(range(total_amount)) - set(self._positive_indexes))
self._positive_amount = len(self._positive_indexes)
# adjust data_size = positive + negative_sample
self._len = int(min(self._positive_amount * (1 + self._cfg.negative_sample), total_amount))
logger.info(f'Positive sample size: {self._positive_amount}; '
f'Negative sample size: {total_amount - self._positive_amount}')
logger.info(f'Adjust dataset size to {len(self)}')
def __getitem__(self, index):
if index < self._positive_amount:
real_index = self._positive_indexes[index]
else:
real_index = random.choice(self._negative_indexes)
return super().__getitem__(real_index)
DATASETS = {
'train': PointTrainDataset,
'val': SupervisedBaseDataset,
'test': BaseDataset
}
def build_point_dataset(data_cfg, purpose='train'):
if purpose in DATASETS:
return DATASETS[purpose](data_cfg, purpose)
else:
raise ValueError(f'Unsupported purpose {purpose}.')
```
#### File: recommender/model/__init__.py
```python
from model.module.interaction import BinaryClassInteraction
from model.fm import build_fm
from model.deepfm import build_deepfm
from model.xdeepfm import build_xdeepfm
from utils.log import get_logger
logger = get_logger()
# TODO: Design a arch which consists of linear, deep and interaction part, and fm/deepfm/xdeepfm are all its instance.
MODELS = {
'fm': build_fm,
'deepfm': build_deepfm,
'xdeepfm': build_xdeepfm
}
def build_model(model_cfg):
logger.info(f'Building model...')
if model_cfg.name in MODELS.keys():
model = MODELS[model_cfg.name](model_cfg)
if getattr(model_cfg, 'binary_class_interaction', False):
model = BinaryClassInteraction(model_cfg.output_dim, model)
logger.info(f'Model \"{type(model).__name__}\" is built:\n'
f'{model}')
return model
else:
raise ValueError(f'model: {model_cfg.name} is not found.')
```
#### File: model/module/linear.py
```python
import torch
from torch import nn
class OneHotLinear(nn.Module):
"""Linear layer for one hot features."""
def __init__(self, one_hot_dims, output_dim):
"""
:param one_hot_dims: List[int], dimensions of one-hot categorical features.
:param output_dim: int, output dimension.
"""
super().__init__()
# offsets of the features, note that the offset for the first feature is 0
self._offsets = torch.tensor([0] + one_hot_dims, dtype=torch.int).cumsum(0)[:-1]
input_dim = sum(one_hot_dims)
# embed is used as weight
self.one_hot_embed = nn.Embedding(input_dim, output_dim)
self._input_dim = input_dim
def forward(self, x):
"""
:param x: Tensor, (n, len(one_hot_dims)).
:return: Tensor, (n, output_dim).
"""
x = x + self._offsets.to(x.device)
x = self.one_hot_embed(x).sum(dim=1)
return x
```
#### File: recommender/test/test_dataset.py
```python
from unittest import TestCase
from torch.utils.data import DataLoader
from utils.config import get_cfg_defaults
from dataset import build_dataset
from utils.log import get_logger
logger = get_logger()
class TestDataset(TestCase):
@classmethod
def setUpClass(cls):
cfg = get_cfg_defaults()
cfg_file = r'C:\Users\gg\Documents\MySpace\git\recommender\experiments\2021_WeChat_Challenge\fm_offline.yml'
cfg.merge_from_file(cfg_file)
cfg.freeze()
cls._data_cfg = cfg.data
logger.info(f'Data config:\n{cls._data_cfg}')
def test_data_loader(self):
val_dataset = build_dataset(self._data_cfg, 'test')
val_iter = DataLoader(val_dataset, shuffle=True, batch_size=2, num_workers=1, drop_last=False)
for data in val_iter:
print(data)
break
```
#### File: recommender/tools/infer.py
```python
import argparse
import os
import time
import pandas as pd
import torch
from torch.utils import data
from tqdm import tqdm
import numpy as np
from model import build_model
from utils.serialization import load_model
from utils.config import get_cfg_defaults
from dataset import build_dataset
from utils.log import get_logger
logger = get_logger()
class Infer:
def __init__(self, model_path, config_path):
self._cfg = get_cfg_defaults()
self._cfg.merge_from_file(config_path)
self._cfg.freeze()
self._save_dir = os.path.dirname(model_path)
self._setup_model(model_path)
self._setup_data()
def _setup_model(self, model_path):
self._model = build_model(self._cfg.model).to(self._cfg.evaluator.device.name)
load_model(self._model, model_path)
def _setup_data(self):
test_data = build_dataset(self._cfg.data, 'test')
self._data_name = os.path.basename(self._cfg.data.action.test).split('.')[0]
batch_size = self._cfg.evaluator.device.batch_size
num_worker = self._cfg.evaluator.device.num_worker
self._test_iter = data.DataLoader(test_data, shuffle=False,
batch_size=batch_size, num_workers=num_worker,
drop_last=False)
def infer(self):
self._model.eval()
users = list()
items = list()
preds = list()
logger.info(f'Infering...')
for batch in tqdm(self._test_iter):
device = self._cfg.evaluator.device.name
for key, value in batch.items():
if isinstance(value, torch.Tensor):
batch[key] = value.to(device)
user = batch['userid']
item = batch['itemid']
with torch.no_grad():
pred = self._model.infer(batch)
preds.append(pred.cpu())
users.append(user.cpu())
items.append(item.cpu())
users = torch.cat(users)
items = torch.cat(items)
preds = torch.cat(preds)
self._dump_result(users, items, preds)
def _dump_result(self, users, items, preds):
result = torch.cat([users.unsqueeze(1), items.unsqueeze(1), preds], dim=1)
result_df = pd.DataFrame(result.numpy())
result_df.columns = ['userid', 'feedid'] + self._cfg.evaluator.action_name
result_df['userid'] = result_df['userid'].astype(np.long)
result_df['feedid'] = result_df['feedid'].astype(np.long)
path = os.path.join(self._save_dir, f'{self._data_name}_{time.strftime("%Y%m%d%H%M%S")}.csv')
logger.info(f'Dumping inferred result to {path}.')
result_df.to_csv(path, float_format='%.6f', index=False)
def main():
parser = argparse.ArgumentParser(description='Evaluate a model.')
parser.add_argument('--model', required=True, type=str, help='The path to model.')
parser.add_argument('--config', required=True, type=str, help='The path to config.')
args = parser.parse_args()
Infer(args.model, args.config).infer()
if __name__ == '__main__':
main()
```
#### File: recommender/tools/modify_id.py
```python
import argparse
import os
import pandas as pd
from dataset.vocabulary import build_vocabulary
from utils.log import get_logger
logger = get_logger()
class IdModifier:
def __init__(self, vocab_type, vocab_name, path):
voc = build_vocabulary(vocab_type, vocab_name)
voc.setup_by_csv(path)
self._voc = voc
def modify(self, csv_path, column, output_dir):
logger.info(f'Reading csv {csv_path}...')
df = pd.read_csv(csv_path)
logger.info(f'Modifying id...')
df[column] = self._voc.get_id(df[column])
if not os.path.exists(output_dir):
os.mkdir(output_dir)
output_name = os.path.basename(csv_path).replace('.csv', '_id_modified.csv')
output_file = os.path.join(output_dir, output_name)
logger.info(f'Saving to {output_file}...')
df.to_csv(output_file, index=False)
def main():
parser = argparse.ArgumentParser(description='Modify id using vocabulary.')
parser.add_argument('--vocab', required=True, type=str, help='Path to vocabulary.')
parser.add_argument('--vocab_type', required=True, choices=['cat', 'num'],
help='Type of the vocabulary.')
parser.add_argument('--csv', required=True, type=str, help='Path to csv file to modify.')
parser.add_argument('--column', required=True, type=str, help='Column to modify.')
parser.add_argument('--output', required=False, type=str, help='Output directory, default to input directory.')
args = parser.parse_args()
IdModifier(args.vocab_type, 'dummy_name', args.vocab).modify(args.csv, args.column, args.output)
if __name__ == '__main__':
main()
``` |
{
"source": "jiskattema/kernel_tuner",
"score": 3
} |
#### File: examples/fortran/test_vector_add.py
```python
import json
import numpy as np
from kernel_tuner import run_kernel
def test():
with open('vector_add.F90', 'r') as f:
kernel_string = f.read()
size = 10000000
a = np.random.randn(size).astype(np.float32)
b = np.random.randn(size).astype(np.float32)
c = np.zeros_like(b)
n = np.int32(size)
args = [c, a, b, n]
tune_params = dict()
tune_params["N"] = size
tune_params["NTHREADS"] = 4
answer = run_kernel("vector_add", kernel_string, size, args, tune_params, lang="fortran", compiler="gfortran")
assert np.allclose(answer[0], a+b, atol=1e-8)
if __name__ == "__main__":
test()
```
#### File: kernel_tuner/strategies/minimize.py
```python
from __future__ import print_function
from collections import OrderedDict
import logging
import numpy
import scipy.optimize
from kernel_tuner import util
supported_methods = ["Nelder-Mead", "Powell", "CG", "BFGS", "L-BFGS-B", "TNC", "COBYLA", "SLSQP"]
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
method = tuning_options.strategy_options.get("method", "L-BFGS-B")
# scale variables in x to make 'eps' relevant for multiple variables
tuning_options["scaling"] = True
bounds, x0, _ = get_bounds_x0_eps(tuning_options)
kwargs = setup_method_arguments(method, bounds)
options = setup_method_options(method, tuning_options)
args = (kernel_options, tuning_options, runner, results)
opt_result = scipy.optimize.minimize(_cost_func, x0, args=args, method=method, options=options, **kwargs)
if tuning_options.verbose:
print(opt_result.message)
return results, runner.dev.get_environment()
def _cost_func(x, kernel_options, tuning_options, runner, results):
""" Cost function used by minimize """
error_time = 1e20
logging.debug('_cost_func called')
logging.debug('x: ' + str(x))
# snap values in x to nearest actual value for each parameter unscale x if needed
if tuning_options.scaling:
params = unscale_and_snap_to_nearest(x, tuning_options.tune_params, tuning_options.eps)
else:
params = snap_to_nearest_config(x, tuning_options.tune_params)
logging.debug('params ' + str(params))
# we cache snapped values, since those correspond to results for an actual instance of the kernel
x_int = ",".join([str(i) for i in params])
if x_int in tuning_options.cache:
results.append(tuning_options.cache[x_int])
return tuning_options.cache[x_int]["time"]
# check if this is a legal (non-restricted) parameter instance
if tuning_options.restrictions:
legal = util.check_restrictions(tuning_options.restrictions, params, tuning_options.tune_params.keys(), tuning_options.verbose)
if not legal:
error_result = OrderedDict(zip(tuning_options.tune_params.keys(), params))
error_result["time"] = error_time
tuning_options.cache[x_int] = error_result
return error_time
# compile and benchmark this instance
res, _ = runner.run([params], kernel_options, tuning_options)
# append to tuning results
if res:
results.append(res[0])
return res[0]['time']
return error_time
def get_bounds_x0_eps(tuning_options):
"""compute bounds, x0 (the initial guess), and eps"""
values = list(tuning_options.tune_params.values())
if "x0" in tuning_options.strategy_options:
x0 = tuning_options.strategy_options.x0
else:
x0 = None
if tuning_options.scaling:
eps = numpy.amin([1.0 / len(v) for v in values])
# reducing interval from [0, 1] to [0, eps*len(v)]
bounds = [(0, eps * len(v)) for v in values]
if x0:
# x0 has been supplied by the user, map x0 into [0, eps*len(v)]
for i, e in enumerate(values):
x0[i] = eps * values[i].index(x0[i])
else:
x0 = [0.5 * eps * len(v) for v in values]
else:
bounds = get_bounds(tuning_options.tune_params)
if not x0:
x0 = [(min_v + max_v) / 2.0 for (min_v, max_v) in bounds]
eps = 1e9
for v_list in values:
vals = numpy.sort(v_list)
eps = min(eps, numpy.amin(numpy.gradient(vals)))
tuning_options["eps"] = eps
logging.debug('get_bounds_x0_eps called')
logging.debug('bounds ' + str(bounds))
logging.debug('x0 ' + str(x0))
logging.debug('eps ' + str(eps))
return bounds, x0, eps
def get_bounds(tune_params):
""" create a bounds array from the tunable parameters """
bounds = []
for values in tune_params.values():
sorted_values = numpy.sort(values)
bounds.append((sorted_values[0], sorted_values[-1]))
return bounds
def setup_method_arguments(method, bounds):
""" prepare method specific arguments """
kwargs = {}
# pass bounds to methods that support it
if method in ["L-BFGS-B", "TNC", "SLSQP"]:
kwargs['bounds'] = bounds
return kwargs
def setup_method_options(method, tuning_options):
""" prepare method specific options """
kwargs = {}
# Note that not all methods iterpret maxiter in the same manner
if "maxiter" in tuning_options.strategy_options:
maxiter = tuning_options.strategy_options.maxiter
else:
maxiter = 100
kwargs['maxiter'] = maxiter
if method in ["Nelder-Mead", "Powell"]:
kwargs['maxfev'] = maxiter
elif method == "L-BFGS-B":
kwargs['maxfun'] = maxiter
# pass eps to methods that support it
if method in ["CG", "BFGS", "L-BFGS-B", "TNC", "SLSQP"]:
kwargs['eps'] = tuning_options.eps
elif method == "COBYLA":
kwargs['rhobeg'] = tuning_options.eps
# not all methods support 'disp' option
if not method in ['TNC']:
kwargs['disp'] = tuning_options.verbose
return kwargs
def snap_to_nearest_config(x, tune_params):
"""helper func that for each param selects the closest actual value"""
params = []
for i, k in enumerate(tune_params.keys()):
values = numpy.array(tune_params[k])
idx = numpy.abs(values - x[i]).argmin()
params.append(int(values[idx]))
return params
def unscale_and_snap_to_nearest(x, tune_params, eps):
"""helper func that snaps a scaled variable to the nearest config"""
x_u = [i for i in x]
for i, v in enumerate(tune_params.values()):
# create an evenly spaced linear space to map [0,1]-interval
# to actual values, giving each value an equal chance
# pad = 0.5/len(v) #use when interval is [0,1]
pad = 0.5 * eps # use when interval is [0, eps*len(v)]
linspace = numpy.linspace(pad, (eps * len(v)) - pad, len(v))
# snap value to nearest point in space, store index
idx = numpy.abs(linspace - x[i]).argmin()
# safeguard that should not be needed
idx = min(max(idx, 0), len(v) - 1)
# use index into array of actual values
x_u[i] = v[idx]
return x_u
``` |
{
"source": "jiskra/openmv",
"score": 3
} |
#### File: Portenta-H7/19-Low-Power/himax_wakeup_on_motion_detection.py
```python
import sensor, image, time, pyb, machine
from pyb import Pin, ExtInt
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.set_framerate(15)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_THRESHOLD, 10)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_WINDOW, (0, 0, 320, 240))
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_ENABLE, True)
def on_motion(line):
pass
led = pyb.LED(3)
ext = ExtInt(Pin("PC15"), ExtInt.IRQ_RISING, Pin.PULL_DOWN, on_motion)
while(True):
led.off()
sensor.ioctl(sensor.IOCTL_HIMAX_OSC_ENABLE, True) # Switch to internal OSC
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR) # Clear MD flag
machine.sleep() # Enter low-power mode, will wake up on MD interrupt.
sensor.ioctl(sensor.IOCTL_HIMAX_OSC_ENABLE, False) # Switch back to MCLK
led.on()
for i in range(0, 60): # Capture a few frames
img = sensor.snapshot()
```
#### File: Portenta-H7/34-Remote-Control/image_transfer_raw_as_the_remote_device.py
```python
import image, network, rpc, sensor, struct
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
# The RPC library above is installed on your OpenMV Cam and provides mutliple classes for
# allowing your OpenMV Cam to be controlled over CAN, I2C, SPI, UART, or WIFI.
################################################################
# Choose the interface you wish to control your OpenMV Cam over.
################################################################
# Uncomment the below line to setup your OpenMV Cam for control over CAN.
#
# * message_id - CAN message to use for data transport on the can bus (11-bit).
# * bit_rate - CAN bit rate.
# * sampling_point - Tseg1/Tseg2 ratio. Typically 75%. (50.0, 62.5, 75.0, 87.5, etc.)
#
# NOTE: Master and slave message ids and can bit rates must match. Connect master can high to slave
# can high and master can low to slave can lo. The can bus must be terminated with 120 ohms.
#
# interface = rpc.rpc_can_slave(message_id=0x7FF, bit_rate=1000000, sampling_point=75.0)
# Uncomment the below line to setup your OpenMV Cam for control over I2C.
#
# * slave_addr - I2C address.
#
# NOTE: Master and slave addresses must match. Connect master scl to slave scl and master sda
# to slave sda. You must use external pull ups. Finally, both devices must share a ground.
#
# interface = rpc.rpc_i2c_slave(slave_addr=0x12)
# Uncomment the below line to setup your OpenMV Cam for control over SPI.
#
# * cs_pin - Slave Select Pin.
# * clk_polarity - Idle clock level (0 or 1).
# * clk_phase - Sample data on the first (0) or second edge (1) of the clock.
#
# NOTE: Master and slave settings much match. Connect CS, SCLK, MOSI, MISO to CS, SCLK, MOSI, MISO.
# Finally, both devices must share a common ground.
#
interface = rpc.rpc_spi_slave(cs_pin="P3", clk_polarity=1, clk_phase=0)
# Uncomment the below line to setup your OpenMV Cam for control over UART.
#
# * baudrate - Serial Baudrate.
#
# NOTE: Master and slave baud rates must match. Connect master tx to slave rx and master rx to
# slave tx. Finally, both devices must share a common ground.
#
# interface = rpc.rpc_uart_slave(baudrate=7500000)
################################################################
# Call Backs
################################################################
# When called sets the pixformat and framesize, takes a snapshot
# and then returns the frame buffer shape to store the image in.
#
# data is a 4 byte pixformat and 4 byte framesize.
def raw_image_snapshot(data):
pixformat, framesize = struct.unpack("<II", data)
sensor.set_pixformat(pixformat)
sensor.set_framesize(framesize)
img = sensor.snapshot()
return struct.pack("<IIII", sensor.width(), sensor.height(), sensor.get_pixformat(), img.size())
def raw_image_read_cb():
interface.put_bytes(sensor.get_fb().bytearray(), 5000) # timeout
# Read data from the frame buffer given a offset and size.
# If data is empty then a transfer is scheduled after the RPC call finishes.
#
# data is a 4 byte size and 4 byte offset.
def raw_image_read(data):
if not len(data):
interface.schedule_callback(raw_image_read_cb)
return bytes()
else:
offset, size = struct.unpack("<II", data)
return memoryview(sensor.get_fb().bytearray())[offset:offset+size]
# Register call backs.
interface.register_callback(raw_image_snapshot)
interface.register_callback(raw_image_read)
# Once all call backs have been registered we can start
# processing remote events. interface.loop() does not return.
interface.loop()
```
#### File: Portenta-H7/99-Tests/unittests.py
```python
import os, sensor, gc
TEST_DIR = "unittest"
TEMP_DIR = "unittest/temp"
DATA_DIR = "unittest/data"
SCRIPT_DIR = "unittest/script"
if not (TEST_DIR in os.listdir("")):
raise Exception('Unittest dir not found!')
print("")
test_failed = False
def print_result(test, passed):
s = "Unittest (%s)"%(test)
padding = "."*(60-len(s))
print(s + padding + ("PASSED" if passed == True else "FAILED"))
for test in sorted(os.listdir(SCRIPT_DIR)):
if test.endswith(".py"):
test_passed = True
test_path = "/".join((SCRIPT_DIR, test))
try:
exec(open(test_path).read())
gc.collect()
if unittest(DATA_DIR, TEMP_DIR) == False:
raise Exception()
except Exception as e:
test_failed = True
test_passed = False
print_result(test, test_passed)
if test_failed:
print("\nSome tests have FAILED!!!\n\n")
else:
print("\nAll tests PASSED.\n\n")
```
#### File: OpenMV/16-Codes/find_barcodes.py
```python
import sensor, image, time, math
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.VGA) # High Res!
sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed).
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
clock = time.clock()
# Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's
# OV7725 camera module. Barcode detection will also work in RGB565 mode but at
# a lower resolution. That said, barcode detection requires a higher resolution
# to work well so it should always be run at 640x480 in grayscale...
def barcode_name(code):
if(code.type() == image.EAN2):
return "EAN2"
if(code.type() == image.EAN5):
return "EAN5"
if(code.type() == image.EAN8):
return "EAN8"
if(code.type() == image.UPCE):
return "UPCE"
if(code.type() == image.ISBN10):
return "ISBN10"
if(code.type() == image.UPCA):
return "UPCA"
if(code.type() == image.EAN13):
return "EAN13"
if(code.type() == image.ISBN13):
return "ISBN13"
if(code.type() == image.I25):
return "I25"
if(code.type() == image.DATABAR):
return "DATABAR"
if(code.type() == image.DATABAR_EXP):
return "DATABAR_EXP"
if(code.type() == image.CODABAR):
return "CODABAR"
if(code.type() == image.CODE39):
return "CODE39"
if(code.type() == image.PDF417):
return "PDF417"
if(code.type() == image.CODE93):
return "CODE93"
if(code.type() == image.CODE128):
return "CODE128"
while(True):
clock.tick()
img = sensor.snapshot()
codes = img.find_barcodes()
for code in codes:
img.draw_rectangle(code.rect())
print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps())
print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args)
if not codes:
print("FPS %f" % clock.fps())
```
#### File: OpenMV/18-MAVLink/mavlink_apriltags_landing_target.py
```python
import image, math, pyb, sensor, struct, time
# Parameters #################################################################
uart_baudrate = 115200
MAV_system_id = 1
MAV_component_id = 0x54
MAX_DISTANCE_SENSOR_enable = True
lens_mm = 2.8 # Standard Lens.
lens_to_camera_mm = 22 # Standard Lens.
sensor_w_mm = 3.984 # For OV7725 sensor - see datasheet.
sensor_h_mm = 2.952 # For OV7725 sensor - see datasheet.
# Only tags with a tag ID in the dictionary below will be accepted by this
# code. You may add as many tag IDs to the below dictionary as you want...
# For each tag ID you need to provide then length of the black tag border
# in mm. Any side of the tag black border square will work.
valid_tag_ids = {
0 : 165, # 8.5" x 11" tag black border size in mm
1 : 165, # 8.5" x 11" tag black border size in mm
2 : 165, # 8.5" x 11" tag black border size in mm
}
##############################################################################
# Camera Setup
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
x_res = 160 # QQVGA
y_res = 120 # QQVGA
f_x = (lens_mm / sensor_w_mm) * x_res
f_y = (lens_mm / sensor_h_mm) * y_res
c_x = x_res / 2
c_y = y_res / 2
h_fov = 2 * math.atan((sensor_w_mm / 2) / lens_mm)
v_fov = 2 * math.atan((sensor_h_mm / 2) / lens_mm)
def z_to_mm(z_translation, tag_size): # z_translation is in decimeters...
return (((z_translation * 100) * tag_size) / 165) - lens_to_camera_mm
# Link Setup
uart = pyb.UART(3, uart_baudrate, timeout_char = 1000)
# Helper Stuff
packet_sequence = 0
def checksum(data, extra): # https://github.com/mavlink/c_library_v1/blob/master/checksum.h
output = 0xFFFF
for i in range(len(data)):
tmp = data[i] ^ (output & 0xFF)
tmp = (tmp ^ (tmp << 4)) & 0xFF
output = ((output >> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF
tmp = extra ^ (output & 0xFF)
tmp = (tmp ^ (tmp << 4)) & 0xFF
output = ((output >> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF
return output
MAV_DISTANCE_SENSOR_message_id = 132
MAV_DISTANCE_SENSOR_min_distance = 1 # in cm
MAV_DISTANCE_SENSOR_max_distance = 10000 # in cm
MAV_DISTANCE_SENSOR_type = 0 # MAV_DISTANCE_SENSOR_LASER
MAV_DISTANCE_SENSOR_id = 0 # unused
MAV_DISTANCE_SENSOR_orientation = 25 # MAV_SENSOR_ROTATION_PITCH_270
MAV_DISTANCE_SENSOR_covariance = 0 # unused
MAV_DISTANCE_SENSOR_extra_crc = 85
# http://mavlink.org/messages/common#DISTANCE_SENSOR
# https://github.com/mavlink/c_library_v1/blob/master/common/mavlink_msg_distance_sensor.h
def send_distance_sensor_packet(tag, tag_size):
global packet_sequence
temp = struct.pack("<lhhhbbbb",
0,
MAV_DISTANCE_SENSOR_min_distance,
MAV_DISTANCE_SENSOR_max_distance,
min(max(int(z_to_mm(tag.z_translation(), tag_size) / 10), MAV_DISTANCE_SENSOR_min_distance), MAV_DISTANCE_SENSOR_max_distance),
MAV_DISTANCE_SENSOR_type,
MAV_DISTANCE_SENSOR_id,
MAV_DISTANCE_SENSOR_orientation,
MAV_DISTANCE_SENSOR_covariance)
temp = struct.pack("<bbbbb14s",
14,
packet_sequence & 0xFF,
MAV_system_id,
MAV_component_id,
MAV_DISTANCE_SENSOR_message_id,
temp)
temp = struct.pack("<b19sh",
0xFE,
temp,
checksum(temp, MAV_DISTANCE_SENSOR_extra_crc))
packet_sequence += 1
uart.write(temp)
MAV_LANDING_TARGET_message_id = 149
MAV_LANDING_TARGET_min_distance = 1/100 # in meters
MAV_LANDING_TARGET_max_distance = 10000/100 # in meters
MAV_LANDING_TARGET_frame = 8 # MAV_FRAME_BODY_NED
MAV_LANDING_TARGET_extra_crc = 200
# http://mavlink.org/messages/common#LANDING_TARGET
# https://github.com/mavlink/c_library_v1/blob/master/common/mavlink_msg_landing_target.h
def send_landing_target_packet(tag, w, h, tag_size):
global packet_sequence
temp = struct.pack("<qfffffbb",
0,
((tag.cx() / w) - 0.5) * h_fov,
((tag.cy() / h) - 0.5) * v_fov,
min(max(z_to_mm(tag.z_translation(), tag_size) / 1000, MAV_LANDING_TARGET_min_distance), MAV_LANDING_TARGET_max_distance),
0.0,
0.0,
0,
MAV_LANDING_TARGET_frame)
temp = struct.pack("<bbbbb30s",
30,
packet_sequence & 0xFF,
MAV_system_id,
MAV_component_id,
MAV_LANDING_TARGET_message_id,
temp)
temp = struct.pack("<b35sh",
0xFE,
temp,
checksum(temp, MAV_LANDING_TARGET_extra_crc))
packet_sequence += 1
uart.write(temp)
# Main Loop
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
tags = sorted(img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y), key = lambda x: x.w() * x.h(), reverse = True)
if tags and (tags[0].id() in valid_tag_ids):
if MAX_DISTANCE_SENSOR_enable: send_distance_sensor_packet(tags[0], valid_tag_ids[tags[0].id()])
send_landing_target_packet(tags[0], img.width(), img.height(), valid_tag_ids[tags[0].id()])
img.draw_rectangle(tags[0].rect())
img.draw_cross(tags[0].cx(), tags[0].cy())
print("Distance %f mm - FPS %f" % (z_to_mm(tags[0].z_translation(), valid_tag_ids[tags[0].id()]), clock.fps()))
else:
print("FPS %f" % clock.fps())
```
#### File: OpenMV/34-Remote-Control/image_transfer_raw_as_the_controller_device.py
```python
import image, network, omv, rpc, sensor, struct, time
# The RPC library above is installed on your OpenMV Cam and provides mutliple classes for
# allowing your OpenMV Cam to control over CAN, I2C, SPI, UART, or WIFI.
##############################################################
# Choose the interface you wish to control an OpenMV Cam over.
##############################################################
# Uncomment the below line to setup your OpenMV Cam for controlling over CAN.
#
# * message_id - CAN message to use for data transport on the can bus (11-bit).
# * bit_rate - CAN bit rate.
# * sampling_point - Tseg1/Tseg2 ratio. Typically 75%. (50.0, 62.5, 75.0, 87.5, etc.)
#
# NOTE: Master and slave message ids and can bit rates must match. Connect master can high to slave
# can high and master can low to slave can lo. The can bus must be terminated with 120 ohms.
#
# interface = rpc.rpc_can_master(message_id=0x7FF, bit_rate=1000000, sampling_point=75.0)
# Uncomment the below line to setup your OpenMV Cam for controlling over I2C.
#
# * slave_addr - I2C address.
# * rate - I2C Bus Clock Frequency.
#
# NOTE: Master and slave addresses must match. Connect master scl to slave scl and master sda
# to slave sda. You must use external pull ups. Finally, both devices must share a ground.
#
# interface = rpc.rpc_i2c_master(slave_addr=0x12, rate=1000000)
# Uncomment the below line to setup your OpenMV Cam for controlling over SPI.
#
# * cs_pin - Slave Select Pin.
# * freq - SPI Bus Clock Frequency
# * clk_polarity - Idle clock level (0 or 1).
# * clk_phase - Sample data on the first (0) or second edge (1) of the clock.
#
# NOTE: Master and slave settings much match. Connect CS, SCLK, MOSI, MISO to CS, SCLK, MOSI, MISO.
# Finally, both devices must share a common ground.
#
interface = rpc.rpc_spi_master(cs_pin="P3", freq=20000000, clk_polarity=1, clk_phase=0)
# Uncomment the below line to setup your OpenMV Cam for controlling over UART.
#
# * baudrate - Serial Baudrate.
#
# NOTE: Master and slave baud rates must match. Connect master tx to slave rx and master rx to
# slave tx. Finally, both devices must share a common ground.
#
# interface = rpc.rpc_uart_master(baudrate=7500000)
##############################################################
# Call Back Handlers
##############################################################
def get_frame_buffer_call_back(pixformat, framesize, cutthrough, silent):
if not silent: print("Getting Remote Frame...")
result = interface.call("raw_image_snapshot", struct.pack("<II", pixformat, framesize))
if result is not None:
w, h, pixformat, size = struct.unpack("<IIII", result)
img = image.Image(w, h, pixformat, copy_to_fb=True) # Alloc cleared frame buffer.
if cutthrough:
# Fast cutthrough data transfer with no error checking.
# Before starting the cut through data transfer we need to sync both the master and the
# slave device. On return both devices are in sync.
result = interface.call("raw_image_read")
if result is not None:
# GET BYTES NEEDS TO EXECUTE NEXT IMMEDIATELY WITH LITTLE DELAY NEXT.
# Read all the image data in one very large transfer.
interface.get_bytes(img.bytearray(), 5000) # timeout
else:
# Slower data transfer with error checking.
# Transfer 32/8 KB chunks.
chunk_size = (1 << 15) if omv.board_type() == "H7" else (1 << 13)
if not silent: print("Reading %d bytes..." % size)
for i in range(0, size, chunk_size):
ok = False
for j in range(3): # Try up to 3 times.
result = interface.call("raw_image_read", struct.pack("<II", i, chunk_size))
if result is not None:
img.bytearray()[i:i+chunk_size] = result # Write the image data.
if not silent: print("%.2f%%" % ((i * 100) / size))
ok = True
break
if not silent: print("Retrying... %d/2" % (j + 1))
if not ok:
if not silent: print("Error!")
return None
return img
else:
if not silent: print("Failed to get Remote Frame!")
return None
clock = time.clock()
while(True):
clock.tick()
# You may change the pixformat and the framesize of the image transfered from the remote device
# by modifying the below arguments.
#
# When cutthrough is False the image will be transferred through the RPC library with CRC and
# retry protection on all data moved. For faster data transfer set cutthrough to True so that
# get_bytes() and put_bytes() are called after an RPC call completes to transfer data
# more quicly from one image buffer to another. Note: This works because once an RPC call
# completes successfully both the master and slave devices are synchronized completely.
#
img = get_frame_buffer_call_back(sensor.RGB565, sensor.QQVGA, cutthrough=True, silent=True)
if img is not None:
pass # You can process the image here.
print(clock.fps())
```
#### File: scripts/libraries/modbus.py
```python
import struct
class ModbusRTU():
def __init__(self, uart, slave_id=0x01, register_num=30):
self.SLAVE_ID = slave_id
self.uart = uart
self.register_num = register_num
self.REGISTER = [0]*self.register_num
self.CRC16_TABLE = [
0x0000,0xC0C1,0xC181,0x0140,0xC301,0x03C0,0x0280,0xC241,0xC601,
0x06C0,0x0780,0xC741,0x0500,0xC5C1,0xC481,0x0440,0xCC01,0x0CC0,
0x0D80,0xCD41,0x0F00,0xCFC1,0xCE81,0x0E40,0x0A00,0xCAC1,0xCB81,
0x0B40,0xC901,0x09C0,0x0880,0xC841,0xD801,0x18C0,0x1980,0xD941,
0x1B00,0xDBC1,0xDA81,0x1A40,0x1E00,0xDEC1,0xDF81,0x1F40,0xDD01,
0x1DC0,0x1C80,0xDC41,0x1400,0xD4C1,0xD581,0x1540,0xD701,0x17C0,
0x1680,0xD641,0xD201,0x12C0,0x1380,0xD341,0x1100,0xD1C1,0xD081,
0x1040,0xF001,0x30C0,0x3180,0xF141,0x3300,0xF3C1,0xF281,0x3240,
0x3600,0xF6C1,0xF781,0x3740,0xF501,0x35C0,0x3480,0xF441,0x3C00,
0xFCC1,0xFD81,0x3D40,0xFF01,0x3FC0,0x3E80,0xFE41,0xFA01,0x3AC0,
0x3B80,0xFB41,0x3900,0xF9C1,0xF881,0x3840,0x2800,0xE8C1,0xE981,
0x2940,0xEB01,0x2BC0,0x2A80,0xEA41,0xEE01,0x2EC0,0x2F80,0xEF41,
0x2D00,0xEDC1,0xEC81,0x2C40,0xE401,0x24C0,0x2580,0xE541,0x2700,
0xE7C1,0xE681,0x2640,0x2200,0xE2C1,0xE381,0x2340,0xE101,0x21C0,
0x2080,0xE041,0xA001,0x60C0,0x6180,0xA141,0x6300,0xA3C1,0xA281,
0x6240,0x6600,0xA6C1,0xA781,0x6740,0xA501,0x65C0,0x6480,0xA441,
0x6C00,0xACC1,0xAD81,0x6D40,0xAF01,0x6FC0,0x6E80,0xAE41,0xAA01,
0x6AC0,0x6B80,0xAB41,0x6900,0xA9C1,0xA881,0x6840,0x7800,0xB8C1,
0xB981,0x7940,0xBB01,0x7BC0,0x7A80,0xBA41,0xBE01,0x7EC0,0x7F80,
0xBF41,0x7D00,0xBDC1,0xBC81,0x7C40,0xB401,0x74C0,0x7580,0xB541,
0x7700,0xB7C1,0xB681,0x7640,0x7200,0xB2C1,0xB381,0x7340,0xB101,
0x71C0,0x7080,0xB041,0x5000,0x90C1,0x9181,0x5140,0x9301,0x53C0,
0x5280,0x9241,0x9601,0x56C0,0x5780,0x9741,0x5500,0x95C1,0x9481,
0x5440,0x9C01,0x5CC0,0x5D80,0x9D41,0x5F00,0x9FC1,0x9E81,0x5E40,
0x5A00,0x9AC1,0x9B81,0x5B40,0x9901,0x59C0,0x5880,0x9841,0x8801,
0x48C0,0x4980,0x8941,0x4B00,0x8BC1,0x8A81,0x4A40,0x4E00,0x8EC1,
0x8F81,0x4F40,0x8D01,0x4DC0,0x4C80,0x8C41,0x4400,0x84C1,0x8581,
0x4540,0x8701,0x47C0,0x4680,0x8641,0x8201,0x42C0,0x4380,0x8341,
0x4100,0x81C1,0x8081,0x4040]
def any(self):
return self.uart.any()
def clear(self):
self.REGISTER = [0]*self.register_num
def crc16(self, data):
crc = 0xFFFF
for char in data:
crc = (crc >> 8) ^ self.CRC16_TABLE[((crc) ^ char) & 0xFF]
return struct.pack('<H',crc)
def handle(self, debug = False):
REQUEST = self.uart.read()
if debug:
print("GOT REQUEST: ", REQUEST)
additional_address = REQUEST[0]
error_check = REQUEST[-2:]
function_code = REQUEST[1]
data = REQUEST[2:]
RESPONSE = struct.pack('b', self.SLAVE_ID)
if self.crc16(REQUEST[:-2]) != error_check:
if debug:
print("crc not match")
return 0 # do nothing
if self.SLAVE_ID != additional_address:
if debug:
print("OpenMV slave id: ", self.SLAVE_ID)
print("got cmd id: ", additional_address)
return 0 # do nothing
if function_code == 0x03:
starting_address = struct.unpack('>h', data[:2])[0]
quantity_of_registers = struct.unpack('>h', data[2:4])[0]
response_registers = []
try:
for i in range(starting_address, starting_address+quantity_of_registers):
response_registers.append(self.REGISTER[i])
except IndexError as err:
RESPONSE += struct.pack('b', function_code|0x80)
RESPONSE += struct.pack('b', 0x02) # Illegal Data Address
if debug:
print("Illegal Data Address: ")
print(err)
else:
RESPONSE += struct.pack('b', function_code)
RESPONSE += struct.pack('b', 2*quantity_of_registers)
for b in response_registers:
RESPONSE += struct.pack('>h', b)
elif function_code == 0x06:
register_address = struct.unpack('>h', data[:2])[0]
register_value = struct.unpack('>h', data[2:4])[0]
try:
self.REGISTER[register_address] = register_value
except IndexError as err:
RESPONSE += struct.pack('b', function_code|0x80)
RESPONSE += struct.pack('b', 0x02) # Illegal Data Address
if debug:
print("Illegal Data Address: ")
print(err)
else:
RESPONSE += struct.pack('b', function_code)
RESPONSE += struct.pack('>h', register_address)
RESPONSE += struct.pack('>h', self.REGISTER[register_address])
elif function_code == 0x10:
starting_address = struct.unpack('>h', data[:2])[0]
quantity_of_registers = struct.unpack('>h', data[2:4])[0]
byte_of_registers = struct.unpack('b', data[4:5])[0]
try:
if byte_of_registers != 2*quantity_of_registers:
raise struct.error
values = struct.unpack('>%dh'%quantity_of_registers, data[5:])
except struct.error as err:
RESPONSE += struct.pack('b', function_code|0x80)
RESPONSE += struct.pack('b', 0x03) # Illegal Data Value
if debug:
print("Illegal Data Value, data length error")
else:
try:
for i in range(quantity_of_registers):
self.REGISTER[starting_address+i] = values[i]
except IndexError as err:
RESPONSE += struct.pack('b', function_code|0x80)
RESPONSE += struct.pack('b', 0x02) # Illegal Data Address
if debug:
print("Illegal Data Address: ")
print(err)
else:
RESPONSE += struct.pack('b', function_code)
RESPONSE += struct.pack('>h', starting_address)
RESPONSE += struct.pack('>h', quantity_of_registers)
else:
RESPONSE += struct.pack('b', function_code|0x80)
RESPONSE += struct.pack('b', 0x01) # Illegal Function
RESPONSE += self.crc16(RESPONSE)
if debug:
print("FUNCTION CODE: ", function_code)
print("RESPONSE: ", RESPONSE)
self.uart.write(RESPONSE)
```
#### File: unittest/script/02-rgb_to_grayscale.py
```python
def unittest(data_path, temp_path):
import image
gs = image.rgb_to_grayscale((120, 200, 120))
return (gs == 169)
```
#### File: unittest/script/05-save_decriptor.py
```python
def unittest(data_path, temp_path):
import image, os
# Load image and find keypoints
img = image.Image(data_path+"/graffiti.pgm", copy_to_fb=True)
kpts1 = img.find_keypoints(max_keypoints=150, threshold=20, normalized=False)
# Save descriptor
image.save_descriptor(kpts1, temp_path+"/graffiti2.orb")
# Load descriptor
kpts2 = image.load_descriptor(temp_path+"/graffiti2.orb")
# Match keypoints
match = image.match_descriptor(kpts1, kpts2, threshold=85)
return (match.cx() == 138 and match.cy() == 117 and \
match.x() == 36 and match.y() == 34 and \
match.w() == 251 and match.h() == 167 and \
match.count() == 150 and match.theta() == 0)
```
#### File: unittest/script/16-find_datamatrices.py
```python
def unittest(data_path, temp_path):
import image
img = image.Image("unittest/data/datamatrix.pgm", copy_to_fb=True)
matrices = img.find_datamatrices()
return len(matrices) == 1 and matrices[0][0:] == (34, 15, 90, 89, 'https://openmv.io/', 0.0, 18, 18, 18, 0)
``` |
{
"source": "jisngprk/dsGPT2",
"score": 3
} |
#### File: dsGPT2/libs/mongo_wrapper.py
```python
from torch.utils.data import Dataset
import pymongo
import json
from collections import OrderedDict
import logging
logger = logging.getLogger(__name__)
class MongoWrapper:
"""
Load single turn Q,A data
"""
def __init__(self, config_path, filter_func=None):
"""
1. MongoDB collection들을 통합된 인덱스로 접근할 수 있음
2. 개별 collection의 idx는 개수, 순서, 유니크를 보장해야함
:param config_path: db config 경로
"""
with open(config_path) as fp:
db_config = json.load(fp)
self.db_config = db_config
self.filter_func = filter_func
conn_str = db_config['MONGO_CONNECTION_STRING']
con_db = db_config['MONGO_CONNECTION_DB']
collection_list = db_config['COLLECTIONS']
self.connection = pymongo.MongoClient(conn_str)
self.db = self.connection.get_database(con_db)
self.collections = self._load_collections(collection_list)
self.meta_info = self._load_metainfo(collection_list)
self.ndoc = None
logging.info("[Mongo]: Loaded %s" % self.meta_info)
def __len__(self):
if not self.ndoc:
ndoc = 0
for value in self.meta_info.values():
ndoc += value['num_docs']
self.ndoc = ndoc
return self.ndoc
def __getitem__(self, idx):
docs = []
if isinstance(idx, slice):
for nidx in range(idx.start, idx.stop):
collection_name, idx = self._convert_idx(nidx)
data = self.collections[collection_name].find({'idx': idx})[0]
if self.filter_func:
data = self.filter_func(data)
doc = {'data': data, 'collection_name': collection_name}
docs.append(doc)
return docs
else:
collection_name, idx = self._convert_idx(idx)
data = self.collections[collection_name].find({'idx': idx})[0]
if self.filter_func:
data = self.filter_func(data)
doc = {'data': data, 'collection_name': collection_name}
docs.append(doc)
return docs
def _load_collections(self, collection_list):
if not isinstance(collection_list, list):
collection_list = [collection_list]
collections = dict()
for col in collection_list:
collections[col] = self.db[col]
logger.info("[Mongo]: %s is loaded" % col)
return collections
def _load_metainfo(self, collection_list):
meta_info_conn = self.db['meta_info']
meta_info = OrderedDict()
for item in list(meta_info_conn.find({})):
if item['collection_name'] not in collection_list:
continue
collection_name = item['collection_name']
sub_dict = {'num_docs': item['num_docs']}
meta_info.update({collection_name: sub_dict})
prev = 0
for name, info in meta_info.items():
sub_info = {'sidx': prev, 'eidx': prev + info['num_docs']}
prev = prev + info['num_docs']
info.update(sub_info)
return meta_info
def _convert_idx(self, idx):
"""
collection 따라서 idx 를 변환하기
:param idx:
:return:
"""
collection_name = None
for name, info in self.meta_info.items():
if idx >= info['sidx'] and idx < info['eidx']:
idx = idx - info['sidx']
collection_name = name
break
return collection_name, idx
def _get_update_op(self, doc, fields):
if not isinstance(fields, list):
fields = [fields]
set_dict = dict()
for f in fields:
set_dict[f] = doc[f]
return pymongo.UpdateOne({'_id': doc['_id']}, {"$set": set_dict}, upsert=True)
def _get_insert_op(self, doc):
return pymongo.InsertOne(doc)
def update_docs(self, docs, fields):
if not isinstance(docs, list):
docs = [docs]
ops = []
for doc in docs:
op = self._get_update_op(doc, fields)
ops.append(op)
return ops
def insert_docs(self, docs, collection_name):
if collection_name not in self.collections:
raise KeyError
if not isinstance(docs, list):
docs = [docs]
ops = []
for doc in docs:
op = self._get_insert_op(doc)
ops.append(op)
# logging.info(ops[:10])
self.collections[collection_name].bulk_write(ops, ordered=False)
def update_meta_info(self, collection_name):
is_update = False
if collection_name in self.meta_info:
is_update = True
total_docs = self.collections[collection_name].count_documents({})
logging.info("[Update]: collection - %s " % collection_name)
logging.info("[Update]: total docs - %s " % total_docs)
logging.info("[Update]: meta info - %s " % is_update)
if is_update:
self.db['meta_info'].update_one({'collection_name': collection_name},
{'$set':{'num_docs': total_docs}})
else:
self.db['meta_info'].insert_one({'collection_name': collection_name,
'num_docs': total_docs})
collection_list = self.db_config['COLLECTIONS']
self.meta_info = self._load_metainfo(collection_list)
def export_to_file(self, fpath, collection_name):
logging.info("[Export]: %s" % fpath)
info = self.meta_info[collection_name]
info = dict(info)
num_docs = int(info['num_docs'])
with open(fpath, 'w') as fp:
text_lines = []
for idx in range(num_docs):
doc = self.__getitem__(idx)[0]
text = doc['data']['filt_text']
text += '\n'
text_lines.append(text)
if idx % 10000 == 0:
fp.writelines(text_lines)
text_lines = []
logging.info("[Write]: %d" % idx)
def create_single_index(self, collection_name, index_name, order=1):
self.collections[collection_name].create_index([(index_name, order)])
```
#### File: jisngprk/dsGPT2/vocab_downloader.py
```python
import os
import logging
import sys
import multiprocessing
import random
from libs.mongo_wrapper import MongoWrapper
from arguments import get_preprocessing_args
logging.basicConfig(format='[%(asctime)s] %(levelname)s: %(message)s', stream=sys.stdout, level=logging.DEBUG)
# NUM_PROCESS = 33
# DIR_PATH = './data_files/vocab_web'
# TRAIN_FILE_NAME = 'vocab_train'
class Downloader(multiprocessing.Process):
def __init__(self, _id, idx_list, fpath, config_path):
super().__init__()
self._id = str(_id)
self._idx_list = idx_list
self.fpath = fpath
self.config_path = config_path
self.dataset = None
def run(self):
self.dataset = MongoWrapper(self.config_path)
with open(self.fpath, 'w') as fp:
text_lines = []
for count, idx in enumerate(self._idx_list):
doc = self.dataset[idx][0]
text = doc['data']['filt_text']
text += '\n'
text_lines.append(text)
if count % 1000 == 0:
fp.writelines(text_lines)
text_lines = []
logging.info("[Write (pid: %s)]: %d" % (self._id, count))
if text_lines:
fp.writelines(text_lines)
if __name__ == '__main__':
args = get_preprocessing_args()
logging.info(args)
if not os.path.exists(args.vocab_train_dir):
logging.info("[Make dir]: %s" % args.vocab_train_dir)
os.makedirs(args.vocab_train_dir)
md = MongoWrapper(args.config_src)
ndata = len(md)
idx_list = list(range(ndata))
idx_list = random.sample(idx_list, args.nsample)
nstep = args.nsample//args.num_process
logging.info("[Download]: %d samples" % args.nsample)
fname_list = []
mplist = []
for i in range(args.num_process+1):
file_length = len(idx_list[nstep*i:nstep*(i+1)])
logging.info("[File length]: %d" % file_length)
if file_length == 0:
continue
fpath = os.path.join(args.vocab_train_dir, args.vocab_train_fname + str(i) + '.txt')
cleaner = Downloader(_id=i,
idx_list=idx_list[nstep*i:nstep*(i+1)],
fpath=fpath,
config_path=args.config_src)
cleaner.daemon = True
mplist.append(cleaner)
fname_list.append(fpath)
for mp in mplist:
mp.start()
for mp in mplist:
mp.join()
``` |
{
"source": "JISock/models",
"score": 2
} |
#### File: synthetic_data_training_depth_cups/testing/object_detection_tutorial.py
```python
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from scipy.misc import imread, imsave
import pickle
import ruamel.yaml as yaml
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
sys.path.append("../../..")
from utils import label_map_util
from utils import visualization_utils as vis_util
# # Model preparation
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = '../training/faster_rcnn_inception_resnet_v2_atrous_synthetic/frozen_inference_graph.pb'
# PATH_TO_CKPT = '/home/juil/workspace/tensorflow_object_detection/object_detection/models/model/train/inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = '../../../data/synthetic_label_map_cup.pbtxt'
print(PATH_TO_LABELS)
NUM_CLASSES = 1
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# ## Helper code
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# # Detection
# PATH_TO_TEST_IMAGES_DIR = '/home/juil/Downloads/training_scene_generator_20170901/sixd_toolkit-master/output/render'
# PATH_TO_TEST_IMAGES_DIR = '/home/juil/workspace/6DOF-datasets/doumanoglou/test/01'
# # img_path = PATH_TO_TEST_IMAGES_DIR+'/coffee_{:03d}/depth/{:04d}.png'
# img_path = PATH_TO_TEST_IMAGES_DIR+'/depth/{:04d}.png'
# gt_path = PATH_TO_TEST_IMAGES_DIR+'/gt.yml'
#
# Detection_results_and_GT = list()
# idx = 0
# with detection_graph.as_default():
# with tf.Session(graph=detection_graph) as sess:
# with open(gt_path, 'r') as f:
# gt = yaml.load(f, Loader=yaml.CLoader)
# for img_idx in range(1, 50):
# print(idx)
# # image = Image.open(img_path.format(img_idx))
# img = imread(img_path.format(img_idx))
# height = img.shape[0]
# width = img.shape[1]
# output_im = np.zeros((height, width, 3))
# output_im[:, :, 0] = img
# output_im[:, :, 1] = img
# output_im[:, :, 2] = img
# imsave('temp.png', output_im)
# image = Image.open('temp.png')
# image_np = load_image_into_numpy_array(image)
# rgb_image_np = load_image_into_numpy_array(Image.open('/home/juil/workspace/6DOF-datasets/doumanoglou/test/01/rgb/{:04d}.png'
# .format(img_idx)))
# rgb_image_np2 = load_image_into_numpy_array(
# Image.open('/home/juil/workspace/6DOF-datasets/doumanoglou/test/01/rgb/{:04d}.png'
# .format(img_idx)))
# # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
# image_np_expanded = np.expand_dims(image_np, axis=0)
# image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# # Each box represents a part of the image where a particular object was detected.
# boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# # Each score represent how level of confidence for each of the objects.
# # Score is shown on the result image, together with the class label.
# scores = detection_graph.get_tensor_by_name('detection_scores:0')
# classes = detection_graph.get_tensor_by_name('detection_classes:0')
# num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# # Actual detection.
# (boxes, scores, classes, num_detections) = sess.run(
# [boxes, scores, classes, num_detections],
# feed_dict={image_tensor: image_np_expanded})
# vis_util.visualize_boxes_and_labels_on_image_array(
# image_np,
# np.squeeze(boxes),
# np.squeeze(classes).astype(np.int32),
# np.squeeze(scores),
# category_index,
# use_normalized_coordinates=True,
# line_thickness=4,
# max_boxes_to_draw=100,
# min_score_thresh=.1
# )
# im_width, im_height = image_np.shape[0:2]
# Scaled_boxes = np.zeros([int(num_detections[0]), 4])
# Scaled_boxes[:, 0] = boxes[0, 0:int(num_detections[0]), 0] * im_width
# Scaled_boxes[:, 1] = boxes[0, 0:int(num_detections[0]), 1] * im_height
# Scaled_boxes[:, 2] = boxes[0, 0:int(num_detections[0]), 2] * im_width
# Scaled_boxes[:, 3] = boxes[0, 0:int(num_detections[0]), 3] * im_height
#
# # for i in range(0, 2):
# # Scaled_boxes[:, i] = boxes[0, 0:int(num_detections[0]), i] * im_width
# # for i in range(2, 4):
# # Scaled_boxes[:, i] = boxes[0, 0:int(num_detections[0]), i] * im_height
# Scaled_scores = np.zeros([int(num_detections[0]), ])
# Scaled_scores[:] = scores[0][0:int(num_detections[0])]
# # ---------------------------------------------------------------------------groundtruth
# GroundTruth = np.zeros([len(gt[img_idx]), 4])
# for c, obj in enumerate(gt[img_idx], 0):
# GroundTruth[c, 0] = float(obj['obj_bb'][1])
# GroundTruth[c, 1] = float(obj['obj_bb'][0])
# GroundTruth[c, 2] = float(obj['obj_bb'][1] + obj['obj_bb'][3])
# GroundTruth[c, 3] = float(obj['obj_bb'][0] + obj['obj_bb'][2])
#
# image_np2 = load_image_into_numpy_array(image)
# boxes2 = boxes
# boxes2.fill(0)
# scores2 = scores
# scores2.fill(0)
# classes2 = classes
# for c, obj in enumerate(gt[img_idx], 0):
# # boxes[0, c, 0:2] = np.divide(map(float,obj['obj_bb'][0:2]),im_width)
# # boxes[0, c, 2:4] = np.divide(map(float,obj['obj_bb'][0:2])+map(float,obj['obj_bb'][4:-3:-1]),im_height)
# # ymin
# boxes2[0, c, 0] = np.divide(float(obj['obj_bb'][1]), im_width)
# # xmin
# boxes2[0, c, 1] = np.divide(float(obj['obj_bb'][0]), im_height)
# # ymax
# boxes2[0, c, 2] = np.divide(float(obj['obj_bb'][1] + obj['obj_bb'][3]), im_width)
# # xmax
# boxes2[0, c, 3] = np.divide(float(obj['obj_bb'][0] + obj['obj_bb'][2]), im_height)
# scores2[0][c] = 1.0
# vis_util.visualize_boxes_and_labels_on_image_array(
# image_np2,
# np.squeeze(boxes2),
# np.squeeze(classes2).astype(np.int32),
# np.squeeze(scores2),
# category_index,
# use_normalized_coordinates=True,
# line_thickness=4,
# max_boxes_to_draw=100,
# min_score_thresh=.4
# )
#
# Detection_results_and_GT.append({'Number_of_detection': num_detections, 'detected_boxes': Scaled_boxes,
# 'detected_scores': Scaled_scores, 'GroundTruth': GroundTruth})
#
# plt.imsave(fname='/home/juil/Downloads/synthetic_data_analysis/analysis/detection_result/real/rgb_detection_real_{}.png'.format(idx),arr=rgb_image_np)
# plt.imsave(fname='/home/juil/Downloads/synthetic_data_analysis/analysis/detection_result/synthetic/rgb_gt_real_{}.png'.format(idx), arr=rgb_image_np2)
# idx += 1
#
#
# with open('Detection_results_and_GT_real_data_46983.pkl', 'wb') as handle:
# pickle.dump(Detection_results_and_GT, handle)
#
#
#
# PATH_TO_TEST_IMAGES_DIR = '/home/juil/Downloads/training_scene_generator_20170901/sixd_toolkit-master/output/render'
# # PATH_TO_TEST_IMAGES_DIR = '/home/juil/workspace/6DOF-datasets/doumanoglou/test/01'
# img_path = PATH_TO_TEST_IMAGES_DIR+'/coffee_{:03d}/depth/{:04d}.png'
# # img_path = PATH_TO_TEST_IMAGES_DIR + '/depth/{:04d}.png'
# gt_path = PATH_TO_TEST_IMAGES_DIR + '/coffee_{:03d}/gt.yml'
# idx=0
# Detection_results_and_GT = list()
# with detection_graph.as_default():
# with tf.Session(graph=detection_graph) as sess:
# for scene in range(4000,4003):
# with open(gt_path.format(scene), 'r') as f:
# gt = yaml.load(f, Loader=yaml.CLoader)
# for img_idx in range(1,72):
# print(idx)
# img = imread(img_path.format(scene,img_idx))
# height = img.shape[0]
# width = img.shape[1]
# output_im = np.zeros((height, width, 3))
# output_im[:, :, 0] = img
# output_im[:, :, 1] = img
# output_im[:, :, 2] = img
# imsave('temp.png', output_im)
# image = Image.open('temp.png')
# image_np = load_image_into_numpy_array(image)
# rgb_image_np = load_image_into_numpy_array(Image.open(PATH_TO_TEST_IMAGES_DIR+'/coffee_{:03d}/rgb/{:04d}.png'.format(scene,img_idx)))
# rgb_image_np2 = load_image_into_numpy_array(Image.open(PATH_TO_TEST_IMAGES_DIR+'/coffee_{:03d}/rgb/{:04d}.png'.format(scene,img_idx)))
# # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
# image_np_expanded = np.expand_dims(image_np, axis=0)
# image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# # Each box represents a part of the image where a particular object was detected.
# boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# # Each score represent how level of confidence for each of the objects.
# # Score is shown on the result image, together with the class label.
# scores = detection_graph.get_tensor_by_name('detection_scores:0')
# classes = detection_graph.get_tensor_by_name('detection_classes:0')
# num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# # Actual detection.
# (boxes, scores, classes, num_detections) = sess.run(
# [boxes, scores, classes, num_detections],
# feed_dict={image_tensor: image_np_expanded})
# vis_util.visualize_boxes_and_labels_on_image_array(
# image_np,
# np.squeeze(boxes),
# np.squeeze(classes).astype(np.int32),
# np.squeeze(scores),
# category_index,
# use_normalized_coordinates=True,
# line_thickness=4,
# max_boxes_to_draw = 100,
# min_score_thresh=.1
# )
# im_width, im_height = image_np.shape[0:2]
# Scaled_boxes = np.zeros([int(num_detections[0]), 4])
# Scaled_boxes[:, 0] = boxes[0, 0:int(num_detections[0]), 0] * im_width
# Scaled_boxes[:, 1] = boxes[0, 0:int(num_detections[0]), 1] * im_height
# Scaled_boxes[:, 2] = boxes[0, 0:int(num_detections[0]), 2] * im_width
# Scaled_boxes[:, 3] = boxes[0, 0:int(num_detections[0]), 3] * im_height
# # for i in range(0,2):
# # Scaled_boxes[:,i] = boxes[0,0:int(num_detections[0]),i]*im_width
# # for i in range(2,4):
# # Scaled_boxes[:,i] = boxes[0,0:int(num_detections[0]),i]*im_height
# Scaled_scores = np.zeros([int(num_detections[0]),])
# Scaled_scores[:] = scores[0][0:int(num_detections[0])]
# # ---------------------------------------------------------------------------groundtruth
# GroundTruth = np.zeros([len(gt[img_idx]), 4])
# for c, obj in enumerate(gt[img_idx], 0):
# GroundTruth[c, 0] = float(obj['obj_bb'][0])
# GroundTruth[c, 1] = float(obj['obj_bb'][1])
# GroundTruth[c, 2] = float(obj['obj_bb'][0] + obj['obj_bb'][2])
# GroundTruth[c, 3] = float(obj['obj_bb'][1] + obj['obj_bb'][3])
#
# image_np2 = load_image_into_numpy_array(image)
# boxes2 = boxes
# boxes2.fill(0)
# scores2 = scores
# scores2.fill(0)
# classes2 = classes
# for c,obj in enumerate(gt[img_idx],0):
# # boxes[0, c, 0:2] = np.divide(map(float,obj['obj_bb'][0:2]),im_width)
# # boxes[0, c, 2:4] = np.divide(map(float,obj['obj_bb'][0:2])+map(float,obj['obj_bb'][4:-3:-1]),im_height)
# # ymin
# boxes2[0, c, 0] = np.divide(float(obj['obj_bb'][0]), im_height)
# # xmin
# boxes2[0, c, 1] = np.divide(float(obj['obj_bb'][1]),im_width)
# # ymax
# boxes2[0, c, 2] = np.divide(float(obj['obj_bb'][0] + obj['obj_bb'][2]), im_height)
# # xmax
# boxes2[0, c, 3] = np.divide(float(obj['obj_bb'][1] + obj['obj_bb'][3]), im_width)
# scores2[0][c] = 1.0
# vis_util.visualize_boxes_and_labels_on_image_array(
# image_np2,
# np.squeeze(boxes2),
# np.squeeze(classes2).astype(np.int32),
# np.squeeze(scores2),
# category_index,
# use_normalized_coordinates=True,
# line_thickness=4,
# max_boxes_to_draw=100,
# min_score_thresh=.1
# )
# # plt.imsave(fname='/home/juil/Downloads/synthetic_data_analysis/analysis/detection_result/synthetic_detection/detection_synthetic_{}.png'.format(idx), arr=image_np)
# # plt.imsave(fname='/home/juil/Downloads/synthetic_data_analysis/analysis/detection_result/synthetic_gt/gt_synthetic_{}.png'.format(idx), arr=image_np2)
# Detection_results_and_GT.append({'Number_of_detection':num_detections,'detected_boxes': Scaled_boxes,'detected_scores': Scaled_scores,'GroundTruth': GroundTruth})
# idx+=1
#
# with open('results_synthetic_dataset.pkl', 'wb') as handle:
# pickle.dump(Detection_results_and_GT, handle)
# # Detection
# PATH_TO_TEST_IMAGES_DIR = '/home/juil/Downloads/training_scene_generator_20170901/sixd_toolkit-master/output/render'
PATH_TO_TEST_IMAGES_DIR = '/home/juil/Downloads/synthetic_data_analysis/generation'
# img_path = PATH_TO_TEST_IMAGES_DIR+'/coffee_{:03d}/depth/{:04d}.png'
img_path = PATH_TO_TEST_IMAGES_DIR+'/depth/{:04d}.png'
gt_path = PATH_TO_TEST_IMAGES_DIR+'/gt.yml'
Detection_results_and_GT = list()
idx = 0
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
with open(gt_path, 'r') as f:
gt = yaml.load(f, Loader=yaml.CLoader)
for img_idx in range(1, 50):
print(idx)
# image = Image.open(img_path.format(img_idx))
img = imread(img_path.format(img_idx))
height = img.shape[0]
width = img.shape[1]
output_im = np.zeros((height, width, 3))
output_im[:, :, 0] = img
output_im[:, :, 1] = img
output_im[:, :, 2] = img
imsave('temp.png', output_im)
image = Image.open('temp.png')
image_np = load_image_into_numpy_array(image)
rgb_image_np = load_image_into_numpy_array(Image.open('/home/juil/workspace/6DOF-datasets/doumanoglou/test/01/rgb/{:04d}.png'
.format(img_idx)))
rgb_image_np2 = load_image_into_numpy_array(
Image.open('/home/juil/workspace/6DOF-datasets/doumanoglou/test/01/rgb/{:04d}.png'
.format(img_idx)))
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=4,
max_boxes_to_draw=100,
min_score_thresh=.1
)
im_width, im_height = image_np.shape[0:2]
Scaled_boxes = np.zeros([int(num_detections[0]), 4])
Scaled_boxes[:, 0] = boxes[0, 0:int(num_detections[0]), 0] * im_width
Scaled_boxes[:, 1] = boxes[0, 0:int(num_detections[0]), 1] * im_height
Scaled_boxes[:, 2] = boxes[0, 0:int(num_detections[0]), 2] * im_width
Scaled_boxes[:, 3] = boxes[0, 0:int(num_detections[0]), 3] * im_height
# for i in range(0, 2):
# Scaled_boxes[:, i] = boxes[0, 0:int(num_detections[0]), i] * im_width
# for i in range(2, 4):
# Scaled_boxes[:, i] = boxes[0, 0:int(num_detections[0]), i] * im_height
Scaled_scores = np.zeros([int(num_detections[0]), ])
Scaled_scores[:] = scores[0][0:int(num_detections[0])]
# ---------------------------------------------------------------------------groundtruth
GroundTruth = np.zeros([len(gt[img_idx]), 4])
for c, obj in enumerate(gt[img_idx], 0):
GroundTruth[c, 0] = float(obj['obj_bb'][1])
GroundTruth[c, 1] = float(obj['obj_bb'][0])
GroundTruth[c, 2] = float(obj['obj_bb'][1] + obj['obj_bb'][3])
GroundTruth[c, 3] = float(obj['obj_bb'][0] + obj['obj_bb'][2])
image_np2 = load_image_into_numpy_array(image)
boxes2 = boxes
boxes2.fill(0)
scores2 = scores
scores2.fill(0)
classes2 = classes
for c, obj in enumerate(gt[img_idx], 0):
# boxes[0, c, 0:2] = np.divide(map(float,obj['obj_bb'][0:2]),im_width)
# boxes[0, c, 2:4] = np.divide(map(float,obj['obj_bb'][0:2])+map(float,obj['obj_bb'][4:-3:-1]),im_height)
# ymin
boxes2[0, c, 0] = np.divide(float(obj['obj_bb'][1]), im_width)
# xmin
boxes2[0, c, 1] = np.divide(float(obj['obj_bb'][0]), im_height)
# ymax
boxes2[0, c, 2] = np.divide(float(obj['obj_bb'][1] + obj['obj_bb'][3]), im_width)
# xmax
boxes2[0, c, 3] = np.divide(float(obj['obj_bb'][0] + obj['obj_bb'][2]), im_height)
scores2[0][c] = 1.0
vis_util.visualize_boxes_and_labels_on_image_array(
image_np2,
np.squeeze(boxes2),
np.squeeze(classes2).astype(np.int32),
np.squeeze(scores2),
category_index,
use_normalized_coordinates=True,
line_thickness=4,
max_boxes_to_draw=100,
min_score_thresh=.4
)
Detection_results_and_GT.append({'Number_of_detection': num_detections, 'detected_boxes': Scaled_boxes,
'detected_scores': Scaled_scores, 'GroundTruth': GroundTruth})
# plt.imsave(fname='/home/juil/Downloads/synthetic_data_analysis/analysis/detection_result/real/rgb_detection_real_{}.png'.format(idx),arr=rgb_image_np)
# plt.imsave(fname='/home/juil/Downloads/synthetic_data_analysis/analysis/detection_result/synthetic/rgb_gt_real_{}.png'.format(idx), arr=rgb_image_np2)
idx += 1
with open('Detection_results_and_GT_real_generated_data_46983.pkl', 'wb') as handle:
pickle.dump(Detection_results_and_GT, handle)
``` |
{
"source": "JiSoft/python_test_api",
"score": 3
} |
#### File: python_test_api/youtrack/test_create_issue.py
```python
import unittest
from my_test_api import TestAPI
class TestCreateIssue(TestAPI):
def test_create_issue(self):
params = {
'project': 'API',
'summary': 'test issue by robots',
'description': 'You are mine ! ',
}
response = self.put('/issue/', params)
issue_id = response.headers['Location'].split('/')[-1]
print('Created item ID is ', issue_id)
self.assertEquals(response.status_code, 201)
response = self.get('/issue/' + issue_id)
self.assertEquals(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JisoonPark/writeup",
"score": 3
} |
#### File: resource/Flag_collision/crc.py
```python
from zlib import crc32
import string, random
# ---- Utilities ----
POLYNOMIAL = 0x104C11DB7 # Generator polynomial. Do not modify, because there are many dependencies
MASK = (1 << 32) - 1
def get_crc32(data):
crc = crc32(data, 0)
return reverse32(crc & MASK)
def reverse32(x):
y = 0
for i in range(32):
y = (y << 1) | (x & 1)
x >>= 1
return y
# ---- Polynomial arithmetic ----
# Returns polynomial x multiplied by polynomial y modulo the generator polynomial.
def multiply_mod(x, y):
# Russian peasant multiplication algorithm
z = 0
while y != 0:
z ^= x * (y & 1)
y >>= 1
x <<= 1
if (x >> 32) & 1 != 0:
x ^= POLYNOMIAL
return z
# Returns polynomial x to the power of natural number y modulo the generator polynomial.
def pow_mod(x, y):
# Exponentiation by squaring
z = 1
while y != 0:
if y & 1 != 0:
z = multiply_mod(z, x)
x = multiply_mod(x, x)
y >>= 1
return z
# Computes polynomial x divided by polynomial y, returning the quotient and remainder.
def divide_and_remainder(x, y):
if y == 0:
raise ValueError("Division by zero")
if x == 0:
return (0, 0)
ydeg = get_degree(y)
z = 0
for i in range(get_degree(x) - ydeg, -1, -1):
if (x >> (i + ydeg)) & 1 != 0:
x ^= y << i
z |= 1 << i
return (z, x)
# Returns the reciprocal of polynomial x with respect to the modulus polynomial m.
def reciprocal_mod(x):
# Based on a simplification of the extended Euclidean algorithm
y = x
x = POLYNOMIAL
a = 0
b = 1
while y != 0:
q, r = divide_and_remainder(x, y)
c = a ^ multiply_mod(q, b)
x = y
y = r
a = b
b = c
if x == 1:
return a
else:
raise ValueError("Reciprocal does not exist")
def get_degree(x):
return x.bit_length() - 1
def modify_crc32(data, offset, newcrc, printstatus=False):
length = len(data)
if offset + 4 > length:
raise ValueError("Byte offset plus 4 exceeds file length")
# Read entire file and calculate original CRC-32 value
crc = get_crc32(data)
if printstatus:
print("Original CRC-32: {:08X}".format(reverse32(crc)))
# Compute the change to make
delta = crc ^ newcrc
delta = multiply_mod(reciprocal_mod(pow_mod(2, (length - offset) * 8)), delta)
# Patch 4 bytes in the file
bytes4 = bytearray(data[offset:offset + 4])
if len(bytes4) != 4:
raise IOError("Cannot read 4 bytes at offset")
for i in range(4):
bytes4[i] ^= (reverse32(delta) >> (i * 8)) & 0xFF
data = data[:offset] + str(bytes4) + data[offset + 4:]
if printstatus:
print("Computed and wrote patch")
# Recheck entire file
if get_crc32(data) != newcrc:
return None
elif printstatus:
print("New CRC-32 successfully verified")
return data
s = string.letters + string.digits
def is_printable(msg):
for c in msg:
if ord(c) < 32 or ord(c) > 126 or c in "\n\t\r^\",":
return False
return True
def get_crc_pair(l):
l -= 6
while True:
r1 = "ASIS{" + "".join([random.choice(s) for _ in range(l)]) + "}"
crc = get_crc32(r1)
if crc < 0:
continue
r2 = "ASIS{" + "".join([random.choice(s) for _ in range(l)]) + "}"
for i in range(5, l + 2):
r2 = modify_crc32(r2, i, crc, False)
if r1 != None and crc32(r1) == crc32(r2) and is_printable(r2):
print "\"%d\":\"%s,%s\""%(l + 6, r1, r2)
return "%s,%s"%(r1, r2)
```
#### File: resource/pyc_decompile/ex.py
```python
import time
from sys import exit
from hashlib import sha512
import requests
def make_flag():
now = time.localtime(time.time())
seed = time.strftime('%m/%d/HJEJSH', time.localtime())
hs = sha512(seed).hexdigest()
start = now.tm_hour % 3 + 1
end = start * ((now.tm_min + 2) % 30 + 10)
ok = hs[start:end]
return ok
URL = 'http://wargame.kr:8080/pyc_decompile'
data = {'flag' : make_flag()}
response = requests.get(URL, params = data)
#print "server time : " + response.headers['Date']
#print "local time : " + time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
print response.text
``` |
{
"source": "jisson/django-simple-domain",
"score": 2
} |
#### File: django-simple-domain/django_simple_domain/middleware_1_10.py
```python
from django.contrib.sites.models import Site
from django.utils.deprecation import MiddlewareMixin
from django_simple_domain import django_simple_domain_settings
"""
According to Django 1.10 release notes, Middleware have been updated:
'''
Changed in Django 1.10:
A new style of middleware was introduced for use with the new MIDDLEWARE setting.
If you’re using the old MIDDLEWARE_CLASSES setting, you’ll need to adapt old, custom middleware
before using the new setting. This document describes new-style middleware. Refer to this page in older versions
of the documentation for a description of how old-style middleware works.
@see: https://docs.djangoproject.com/en/1.10/topics/http/middleware/#upgrading-middleware
'''
That python module provide middleware compatible with django 1.10
"""
__author__ = 'Jisson | <EMAIL>'
# TODO: Reformat middleware classes to handle compatibility issues
class SetDynamicSitesMiddleware(MiddlewareMixin):
"""
That middleware change SITE_ID dynamically in a thread safe way.
"""
@staticmethod
def process_request(request):
"""
Assign the correct value to the SITE_ID property, ensuring thread safe access to that value.
"""
domain_name = django_simple_domain_settings.DOMAIN_NAME
try:
current_site = Site.objects.get(domain=domain_name)
django_simple_domain_settings.settings.SITE_ID._set(int(current_site.id))
except Site.DoesNotExist:
# TODO: It should not happen
django_simple_domain_settings.settings.SITE_ID._set(1)
return None
```
#### File: django-simple-domain/django_simple_domain/services.py
```python
import logging
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured, FieldError, ValidationError
from django_simple_domain.site_id_local import SiteID
__author__ = 'Jisson | <EMAIL>'
std_logger = logging.getLogger(__name__)
def get_or_create_sites(domain_name):
"""
Retrieve or create a Site entity based on given domain_name.
:param domain_name: The domain name to look for. That property should come from user settings
:type domain_name: str
:return: A Site entity if found
:raise ImproperlyConfigured
"""
try:
site = Site.objects.get(domain=domain_name)
except Site.DoesNotExist:
site = Site(domain=domain_name, name=_("Created with django_simple_domain"))
site.save()
except Site.MultipleObjectsReturned:
raise ImproperlyConfigured("Multiple site object returned for defined SIMPLE_SITES_DOMAIN_NAME. "
"Please verify sites models in your database.")
except (FieldError, ValidationError) as e:
std_logger.error(e)
raise ImproperlyConfigured("Can't create Site model with given SIMPLE_SITES_DOMAIN_NAME. "
"Please ensure that the provided domain name is correctly formatted.")
return site
def check_installed_apps_setting():
"""
Check if INSTALLED_APPS contains 'django.contrib.sites' which is required to run 'django_simple_domain'.
:raise ImproperlyConfigured: if 'django.contrib.sites' not in INSTALLED_APPS
"""
if not apps.is_installed('django.contrib.sites'):
error_message = _("django.contrib.sites must be added to INSTALLED_APPS to use django_simple_domain")
raise ImproperlyConfigured(error_message)
def check_site_id_setting(site_id):
"""
Check if SITE_ID was correctly defined by verifying if it is an instance of
'django_simple_domain.site_id_local.SiteId'.
:param site_id: Define the setting to use for the django site framework
:type site_id: django_simple_domain.site_id_local.SiteId
:raise ImproperlyConfigured: if SITE_ID was not correctly defined in global settings
"""
if site_id is None or not isinstance(site_id, SiteID):
error_message = _("SITE_ID is invalid. Please verify that SITE_ID is an instance of type "
"'django_simple_domain.site_id_local.SiteId'")
raise ImproperlyConfigured(error_message)
``` |
{
"source": "jiss-software/jiss-rendering-service",
"score": 2
} |
#### File: jiss-rendering-service/handler/Pdf.py
```python
import core
import tornado
import os
import uuid
# 'grayscale %s' % '-g'
# -O, --orientation <orientation> Set orientation to Landscape or Portrait
# -s, --page-size <Size> Set paper size to: A4, Letter, etc.
# -page-height <unitreal> Page height
# -s, --page-size <Size> Set paper size to: A4, Letter, etc.
# --page-width <unitreal> Page width
# --background Do print background (default)
# --no-background Do not print background
# --disable-external-links Do not make links to remote web pages
# --enable-external-links
# --images Do load or print images (default)
# --no-images Do not load or print images
# --disable-javascript Do not allow web pages to run javascript
# --enable-javascript
# --javascript-delay <msec> Wait some milliseconds for javascript
# finish (default 200)
# --print-media-type Use print media-type instead of screen
# --no-print-media-type Do not use print media-type instead of screen (default)
# --disable-smart-shrinking Disable the intelligent shrinking strategy
# used by WebKit that makes the pixel/dpi
# ratio none constant
# --enable-smart-shrinking Enable the intelligent shrinking strategy
# used by WebKit that makes the pixel/dpi
# ratio none constant (default)
# --zoom <float> Use this zoom factor (default 1)
class PdfHandler(core.BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
self.logger.info('Request to generate PDF from url')
name = '/tmp/pdf-%s.pdf' % str(uuid.uuid4())
os.system('wkhtmltopdf %s %s' % (self.get_query_argument('url'), name))
self.response_file(name)
def _get_args(self):
# Read arguments
args = {
'angle': int(self.request.headers.get('X-Jiss-Angle', default=45)),
'blur': int(self.request.headers.get('X-Jiss-Blur', default=0)),
'color': self.request.headers.get('X-Jiss-Color', default=None),
'position': self.request.headers.get('X-Jiss-Position', default='center_middle'),
'proportion': float(self.request.headers.get('X-Jiss-Proportion', default=0.9)),
'opacity': float(self.request.headers.get('X-Jiss-Opacity', default=0.3)),
'text': self.request.headers.get('X-Jiss-Text', default='Demo'),
'resize': self.request.headers.get('X-Jiss-Resize', default=None),
'repeat': self.request.headers.get('X-Jiss-Repeat', default=None)
# -B, --margin-bottom <unitreal> Set the page bottom margin
# -L, --margin-left <unitreal> Set the page left margin (default 10mm)
# -R, --margin-right <unitreal> Set the page right margin (default 10mm)
# -T, --margin-top <unitreal> Set the page top margin
}
# Parse if needed
args['color'] = [int(x) for x in args['color'].split(',')] if args['color'] else WHITE
args['resize'] = [int(x) for x in args['resize'].split('x')] if args['resize'] else None
args['repeat'] = args['repeat'] and args['repeat'] in ['True', 'true']
self.logger.info('Args: %s' % self._dumps(args))
return args
```
#### File: jiss-rendering-service/handler/Resize.py
```python
import core
import tornado
import uuid
import time
from utils import open_remote_image, resize, open_image
class ResizeHandler(core.BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
self.logger.info('Request to resize IMG from url')
name = '/tmp/rs-%s.png' % str(uuid.uuid4())
args = self._get_args()
resize(open_remote_image(self.get_query_argument('url')), name, args)
self.response_file(name)
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
self.logger.info('Request to resize IMG for request file')
args = self._get_args()
for item in self.request.files.values():
for file_info in item:
name = '/tmp/rs-%s-%s.png' % (time.time(), file_info['filename'])
resize(open_image(file_info['body']), name, args)
self.response_file(name)
return
def _get_args(self):
# Read arguments
args = {
'resize': self.request.headers.get('X-Jiss-Resize', default=None)
}
# Parse if needed
args['resize'] = [int(x) for x in args['resize'].split('x')] if args['resize'] else None
self.logger.info('Args: %s' % self._dumps(args))
return args
```
#### File: jiss-rendering-service/handler/Watermark.py
```python
import core
import tornado
import uuid
import time
from utils import open_remote_image, add_watermark, open_image, WHITE
class WatermarkHandler(core.BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
self.logger.info('Request watermark generation for remote file')
name = '/tmp/wm-%s.png' % str(uuid.uuid4())
args = self._get_args()
add_watermark(open_remote_image(self.get_query_argument('url')), name, args)
self.response_file(name)
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
self.logger.info('Request watermark generation for request file')
args = self._get_args()
for item in self.request.files.values():
for file_info in item:
name = '/tmp/wm-%s-%s.png' % (time.time(), file_info['filename'])
add_watermark(open_image(file_info['body']), name, args)
self.response_file(name)
return
def _get_args(self):
# Read arguments
args = {
'angle': int(self.request.headers.get('X-Jiss-Angle', default=45)),
'blur': int(self.request.headers.get('X-Jiss-Blur', default=0)),
'color': self.request.headers.get('X-Jiss-Color', default=None),
'position': self.request.headers.get('X-Jiss-Position', default='center_middle'),
'proportion': float(self.request.headers.get('X-Jiss-Proportion', default=0.9)),
'opacity': float(self.request.headers.get('X-Jiss-Opacity', default=0.3)),
'text': self.request.headers.get('X-Jiss-Text', default='Demo'),
'resize': self.request.headers.get('X-Jiss-Resize', default=None),
'repeat': self.request.headers.get('X-Jiss-Repeat', default=None)
}
# Parse if needed
args['color'] = [int(x) for x in args['color'].split(',')] if args['color'] else WHITE
args['resize'] = [int(x) for x in args['resize'].split('x')] if args['resize'] else None
args['repeat'] = args['repeat'] and args['repeat'] in ['True', 'true']
self.logger.info('Args: %s' % self._dumps(args))
return args
``` |
{
"source": "Jistrokz/STNScripts",
"score": 2
} |
#### File: Jistrokz/STNScripts/TheScrapper - Linux.py
```python
__author__ = "<NAME>"
__version__ = "1.0"
__date__ = "2021-12-15"
import sys
import argparse
from collections import defaultdict
from datetime import datetime, timedelta
import os
import copy
import gzip
import subprocess
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
import traceback
def EvaluateLogPaths():
paths = []
print("[.] Automatically evaluating the folders to which apps write logs ...")
command = "lsof 2>/dev/null | grep '\\.log' | sed 's/.* \\//\\//g' | sort | uniq"
PatchEval = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
output = PatchEval.communicate()[0].splitlines()
for o in output:
path = os.path.dirname(o)
if isinstance(path, bytes):
path = path.decode("utf-8")
if path in paths:
continue
paths.append(path)
if args.debug:
print("[D] Adding PATH: %s" % path)
return paths
class TheScrapper(object):
DetectStrings = ['${jndi:ldap:', '${jndi:rmi:/', '${jndi:ldaps:/', '${jndi:dns:/', '${jndi:nis:/', '${jndi:nds:/', '${jndi:corba:/', '${jndi:iiop:/']
PlainStrings = {
"https://gist.github.com/Neo23x0/e4c8b03ff8cdf1fa63b7d15db6e3860b#gistcomment-3991502": [
" header with value of BadAttributeValueException: "
],
"https://gist.github.com/Neo23x0/e4c8b03ff8cdf1fa63b7d15db6e3860b#gistcomment-3991700": [
"at java.naming/com.sun.jndi.url.ldap.ldapURLContext.lookup(",
".log4j.core.lookup.JndiLookup.lookup(JndiLookup"
],
"https://github.com/Neo23x0/log4shell-detector/issues/5#issuecomment-991963675": [
'${base64:JHtqbmRp'
],
"https://github.com/tangxiaofeng7/CVE-2021-44228-Apache-Log4j-Rce/issues/1": [
'Reference Class Name: foo'
]
}
def __init__(self, MaxDistance, debug, quick, summary):
self.PrepareDetections(MaxDistance)
self.debug = debug
self.quick = quick
self.summary = summary
def DecodeLine(self, line):
while "%" in line:
LineBefore = line
line = unquote(line)
if line == LineBefore:
break
return line
def CheckLine(self, line):
DecodedLine = self.DecodeLine(line)
for ref, strings in self.PlainStrings.items():
for s in strings:
if s in line or s in DecodedLine:
return s
DecodedLine = DecodedLine.lower()
linechars = list(DecodedLine)
dp = copy.deepcopy(self.DetectionPad)
for c in linechars:
for DetectionString in dp:
if c == dp[DetectionString]["chars"][dp[DetectionString]["level"]]:
if dp[DetectionString]["level"] == 1 and not dp[DetectionString]["CurrentDistance"] == 1:
dp[DetectionString]["CurrentDistance"] = 0
dp[DetectionString]["level"] = 0
dp[DetectionString]["level"] += 1
dp[DetectionString]["CurrentDistance"] = 0
if dp[DetectionString]["level"] > 0:
dp[DetectionString]["CurrentDistance"] += 1
if dp[DetectionString]["CurrentDistance"] > dp[DetectionString]["MaxDistance"]:
dp[DetectionString]["CurrentDistance"] = 0
dp[DetectionString]["level"] = 0
if len(dp[DetectionString]["chars"]) == dp[DetectionString]["level"]:
return DetectionString
def ScanFile(self, FilePath):
MatchesInFile = []
try:
if "log." in FilePath and FilePath.endswith(".gz"):
with gzip.open(FilePath, 'rt') as gzlog:
c = 0
for line in gzlog:
c += 1
if self.quick and not "2021" in line and not "2022" in line:
continue
result = self.CheckLine(line)
if result:
MatchesDict = {
"LineNumber": c,
"MatchString": result,
"line": line.rstrip()
}
MatchesInFile.append(MatchesDict)
else:
with open(FilePath, 'r') as logfile:
c = 0
for line in logfile:
c += 1
if self.quick and not "2021" in line and not "2022" in line:
continue
# Analyze the line
result = self.CheckLine(line)
if result:
MatchesDict = {
"LineNumber": c,
"MatchString": result,
"line": line.rstrip()
}
MatchesInFile.append(MatchesDict)
except UnicodeDecodeError as e:
if self.debug:
print("[E] Can't process FILE: %s REASON: most likely not an ASCII based log file" % FilePath)
except PermissionError as e:
print("[E] Can't access %s due to a permission problem." % FilePath)
except Exception as e:
print("[E] Can't process FILE: %s REASON: %s" % (FilePath, traceback.print_exc()))
return MatchesInFile
def ScanPath(self, path):
matches = defaultdict(lambda: defaultdict())
for root, directories, files in os.walk(path, followlinks=False):
for filename in files:
FilePath = os.path.join(root, filename)
if self.debug:
print("[.] Processing %s ..." % FilePath)
MatchesFound = self.ScanFile(FilePath)
if len(MatchesFound) > 0:
for m in MatchesFound:
matches[FilePath][m['LineNumber']] = [m['line'], m['MatchString']]
if not self.summary:
for match in matches:
for LineNumber in matches[match]:
print('[!] FILE: %s LineNumber: %s DeobfuscatedString: %s LINE: %s' % (match, LineNumber, matches[match][LineNumber][1], matches[match][LineNumber][0]))
# Result
NumberofDetections = 0
NumberOfFilesWithDetections = len(matches.keys())
for FilePath in matches:
NumberofDetections += len(matches[FilePath].keys())
if NumberofDetections > 0:
print("[!] %d files with exploitation attempts detected in PATH: %s" % (NumberOfFilesWithDetections, path))
if self.summary:
for match in matches:
for LineNumber in matches[match]:
print('[!] FILE: %s LineNumber: %d STRING: %s' % (match, LineNumber, matches[match][LineNumber][1]))
else:
print("[+] No files with exploitation attempts detected in path PATH: %s" % path)
return NumberofDetections
def PrepareDetections(self, MaxDistance):
self.DetectionPad = {}
for ds in self.DetectStrings:
self.DetectionPad[ds] = {}
self.DetectionPad[ds] = {
"chars": list(ds),
"MaxDistance": MaxDistance,
"CurrentDistance": 0,
"level": 0
}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='TheScrapper Exploitation Detectors')
group = parser.add_mutually_exclusive_group()
group.add_argument('-p', nargs='+', help='Path to scan', metavar='path', default='')
group.add_argument('-f', nargs='+', help='File to scan', metavar='path', default='')
group.add_argument('--auto', action='store_true', help='Automatically evaluate locations to which logs get written and scan these folders recursively (new default if no path is given)')
parser.add_argument('-d', type=int, help='Maximum distance between each character', metavar='distance', default=40)
parser.add_argument('--quick', action='store_true', help="Skip log lines that don't contain a 2021 or 2022 time stamp")
parser.add_argument('--debug', action='store_true', help='Debug output')
parser.add_argument('--summary', action='store_true', help='Show summary only')
args = parser.parse_args()
print("____ ___ _ _ _ _ _ ____ ___ _ _ ____ ____ ____ ____ ____ ___ ___ ____ ____ ")
print("[__ | |\ | | |\ | | __ | |__| |___ [__ | |__/ |__| |__] |__] |___ |__/ ")
print("___] | | \| | | \| |___ | | | |___ ___] |___ | \ | | | | |___ | \ ")
print(" ")
print(" Version %s, %s" % (__version__, __author__))
print("")
DateScanStart = datetime.now()
print("[.] Starting scan DATE: %s" % DateScanStart)
l4sd = TheScrapper(MaxDistance=args.d, debug=args.debug, quick=args.quick, summary=args.summary)
AllDetections = 0
if args.f:
files = args.f
for f in files:
if not os.path.isfile(f):
print("[E] File %s doesn't exist" % f)
continue
print("[.] Scanning FILE: %s ..." % f)
matches = defaultdict(lambda: defaultdict())
MatchesFound = l4sd.ScanFile(f)
if len(MatchesFound) > 0:
for m in MatchesFound:
matches[f][m['LineNumber']] = [m['line'], m['MatchString']]
for match in matches:
for LineNumber in matches[match]:
print('[!] FILE: %s LineNumber: %s DeobfuscatedString: %s LINE: %s' %
(match, LineNumber, matches[match][LineNumber][1], matches[match][LineNumber][0])
)
AllDetections = len(matches[f].keys())
else:
paths = args.p
AutoEvalPaths = False
if args.auto:
AutoEvalPaths = True
if len(paths) == 0 and not AutoEvalPaths:
print("[W] Warning: Please Select a path (-p path) otherwise, TheScrapper will activate the automatic path evaluation (--auto) for your convenience.")
AutoEvalPaths = True
if AutoEvalPaths:
LogPaths = EvaluateLogPaths()
paths = LogPaths
for path in paths:
if not os.path.isdir(path):
print("[E] Error: Path %s doesn't exist" % path)
continue
print("[.] Scanning FOLDER: %s ..." % path)
detections = l4sd.ScanPath(path)
AllDetections += detections
if AllDetections > 0:
print("[!!!] %d exploitation attempts detected in the complete scan" % AllDetections)
else:
print("[.] No exploitation attempts detected in the scan")
DateScanEnd = datetime.now()
print("[.] Finished scan DATE: %s" % DateScanEnd)
duration = DateScanEnd - DateScanStart
mins, secs = divmod(duration.total_seconds(), 60)
hours, mins = divmod(mins, 60)
print("[.] Scan took the following time to complete DURATION: %d hours %d minutes %d seconds" % (hours, mins, secs))
``` |
{
"source": "jistr/rejviz",
"score": 2
} |
#### File: rejviz/rejviz/libvirt_nets.py
```python
import logging
from xml.etree import ElementTree
import libvirt
LOG = logging.getLogger(__file__)
def get_libvirt_networks():
conn = libvirt.openReadOnly()
if not conn:
raise RuntimeError('Could not connect to libvirt.')
networks = [_fetch_network_data(n) for n in conn.listAllNetworks()]
conn.close()
return networks
def _fetch_network_data(network):
network_data = {'name': network.name()}
root = ElementTree.fromstring(network.XMLDesc())
network_data['dhcp'] = root.find('./ip/dhcp') is not None
network_data['netmask'] = root.find('./ip').attrib['netmask']
network_data['network'] = _gateway_ipaddr_to_network(
root.find('./ip').attrib['address'])
return network_data
def _gateway_ipaddr_to_network(ipaddr):
octets = ipaddr.split('.')
# smarty pants
octets[3] = str(int(octets[3]) - 1)
return '.'.join(octets)
```
#### File: rejviz/tests/test_libvirt_nets.py
```python
import mock
from rejviz import libvirt_nets
import rejviz.tests.utils as tutils
NETWORK_XMLDESC = """
<network>
<name>default</name>
<uuid>d2f553ca-f9fe-49cb-996d-934a69fc02da</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr0' stp='on' delay='0'/>
<mac address='52:54:00:f7:fc:83'/>
<ip address='192.168.122.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.122.2' end='192.168.122.254'/>
</dhcp>
</ip>
</network>
"""
class LibvirtNetsTest(tutils.TestCase):
@mock.patch('libvirt.openReadOnly')
@mock.patch('rejviz.libvirt_nets._fetch_network_data')
def test_get_libvirt_networks(self, _fetch_network_data, openReadOnly):
conn = openReadOnly.return_value
conn.listAllNetworks.return_value = ['net1', 'net2']
networks = libvirt_nets.get_libvirt_networks()
self.assertEqual([_fetch_network_data.return_value,
_fetch_network_data.return_value],
networks)
openReadOnly.assertCalledWith()
conn.listAllNetworks.assertCalledWith()
_fetch_network_data.assertCalledWith('net1')
_fetch_network_data.assertCalledWith('net2')
conn.close.assertCalledWith()
def test_fetch_network_data(self):
network_object = mock.MagicMock()
network_object.name.return_value = 'default'
network_object.XMLDesc.return_value = NETWORK_XMLDESC
network = libvirt_nets._fetch_network_data(network_object)
self.assertEqual('default', network['name'])
self.assertEqual(True, network['dhcp'])
self.assertEqual('192.168.122.0', network['network'])
self.assertEqual('255.255.255.0', network['netmask'])
def test_gateway_ipaddr_to_network(self):
# for 192.168.122.0/24
self.assertEqual(
'192.168.122.0',
libvirt_nets._gateway_ipaddr_to_network('192.168.122.1'))
# for 192.168.123.192/26
self.assertEqual(
'192.168.123.192',
libvirt_nets._gateway_ipaddr_to_network('192.168.123.193'))
```
#### File: rejviz/tests/test_tmp.py
```python
import mock
from testtools import matchers
import rejviz.tests.utils as tutils
from rejviz import tmp
class TmpTest(tutils.TestCase):
@mock.patch('rejviz.tmp.path.exists', return_value=False)
@mock.patch('rejviz.tmp.os.mkdir')
def test_create_dir(self, mkdir, exists):
tmp_dir = tmp.create_dir()
mkdir.assert_called_with(tmp_dir, 0o700)
self.assertThat(tmp_dir,
matchers.MatchesRegex('^/tmp/rejviz-builder-\\d+$'))
@mock.patch('rejviz.tmp.shutil.rmtree')
def test_remove_dir_ok(self, rmtree):
tmp.remove_dir('/tmp/rejviz-builder-123')
rmtree.assert_called_with('/tmp/rejviz-builder-123')
@mock.patch('rejviz.tmp.shutil.rmtree')
def test_remove_dir_bad_prefix(self, rmtree):
self.assertRaises(ValueError, tmp.remove_dir, '/tmp/rejviz-123')
self.assertEqual([], rmtree.mock_calls)
``` |
{
"source": "Jisu5/MsMovie",
"score": 2
} |
#### File: backend/accounts/views.py
```python
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from django.contrib.auth import get_user_model
User = get_user_model()
from .serializers import UserSerializer
# Create your views here.
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import CreateModelMixin
from django.contrib.auth import get_user_model
from .serializers import UserSerializer
from .models import UserProfile
class CreateUserView(CreateModelMixin, GenericViewSet):
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
class UserAPI(APIView):
def get(self, request):
user = request.user
serializer = UserSerializer(user)
return Response(serializer.data)
def post(self, request, format=None):
user = request.user
try:
UserProfile.objects.get(user = user)
return Response(status=status.HTTP_409_CONFLICT)
except UserProfile.DoesNotExist:
profile = UserProfile.objects.create(
user = user,
favorite = request.data.get('favorite')
)
return Response(status=status.HTTP_201_CREATED)
class OtherUserAPI(APIView):
def get(self, request, user_pk):
user = get_object_or_404(User, pk=user_pk)
serializer = UserSerializer(user)
return Response(serializer.data)
# class AuthInfoUpdateView(generics.UpdateAPIView):
# def put(self, request, user_pk):
# user = get_object_or_404(User, pk=user_pk)
``` |
{
"source": "jisuhan3201/beginvagan-app",
"score": 2
} |
#### File: beginvegan_app/products/models.py
```python
from django.db import models
from beginvegan_app.materials import models as material_models
from django.utils.html import format_html
# Create your models here.
class TimeStampedModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True, null=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
class Meta:
abstract = True
class ProductCategory(TimeStampedModel):
name = models.CharField(max_length=100, null=True, blank=True)
sub_category = models.ForeignKey('self', on_delete=models.SET_NULL, null=True)
def __str__(self):
return "{}".format(self.name)
class Meta:
db_table = 'product_categories'
# class ProductSubCategory(models.Model):
# name = models.CharField(max_length=100, null=True, blank=True)
# parent_category = models.ForeignKey(ProductCategory, on_delete=models.SET_NULL, null=True, related_name="parent_category")
# updated_at = models.DateTimeField(auto_now=True, null=True)
# def __str__(self):
# return "{}".format(self.name)
# class Meta:
# db_table = 'product_subcategories'
class Company(TimeStampedModel):
name = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
return "{}".format(self.name)
class Meta:
db_table = 'companies'
class Product(TimeStampedModel):
name = models.CharField(max_length=255, null=True, blank=True)
category = models.ForeignKey(ProductCategory, on_delete=models.SET_NULL, null=True, blank=True, related_name="category")
materials = models.ManyToManyField(material_models.Material, through="ProductMaterial", related_name="materials")
company = models.ForeignKey(Company, on_delete=models.SET_NULL, null=True, blank=True, related_name="company")
remove_materials = models.ManyToManyField(material_models.Material, through="RemoveMaterial", related_name="remove_materials")
def __str__(self):
return "{}".format(self.name)
class Meta:
db_table = 'products'
class ProductMaterial(TimeStampedModel):
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name="pm_product")
material = models.ForeignKey(material_models.Material, on_delete=models.CASCADE, related_name="pm_material")
def __str__(self):
return "{} - {}".format(self.product, self.material)
class Meta:
db_table = 'product_material'
class RemoveMaterial(TimeStampedModel):
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name="rm_product")
material = models.ForeignKey(material_models.Material, on_delete=models.CASCADE, related_name="rm_material")
def __str__(self):
return "{} - {}".format(self.product, self.material)
class Meta:
db_table = 'remove_material'
class ProductImage(TimeStampedModel):
title = models.CharField(max_length=255, null=True, blank=True)
image = models.ImageField(upload_to='img/raw_images', null=True, blank=True)
product = models.ForeignKey(Product, on_delete=models.SET_NULL, null=True, blank=True)
class Meta:
db_table = 'product_images'
``` |
{
"source": "jisuhan3201/osc-bge",
"score": 2
} |
#### File: osc_bge/agent/models.py
```python
from django.db import models
from osc_bge.users import models as user_models
# Create your models here.
class TimeStampedModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True, null=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
class Meta:
abstract = True
#Agent Head Table
class AgencyHead(TimeStampedModel):
PROGRAM_CHOICES = (
('secondary', 'Secondary'),
('college', 'College'),
('camp', 'Camp'),
)
name = models.CharField(max_length=80, null=True, blank=True)
location = models.CharField(max_length=140, null=True, blank=True)
number_branches = models.CharField(max_length=80, null=True, blank=True)
capacity_students = models.CharField(max_length=255, null=True, blank=True)
commission = models.CharField(max_length=140, null=True, blank=True)
promotion = models.CharField(max_length=255, null=True, blank=True)
others = models.CharField(max_length=255, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
def __str__(self):
return "{}".format(self.name)
class AgencyProgram(TimeStampedModel):
head = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True)
program = models.CharField(max_length=80, null=True, blank=True)
#Agent Branch Table
class Agency(TimeStampedModel):
head = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True, related_name='agent_branch')
name = models.CharField(max_length=140, null=True, blank=True)
location = models.CharField(max_length=140, null=True, blank=True)
capacity_students = models.CharField(max_length=255, null=True, blank=True)
commission = models.CharField(max_length=140, null=True, blank=True)
promotion = models.CharField(max_length=255, null=True, blank=True)
others = models.CharField(max_length=255, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
def __str__(self):
return "{}".format(self.name)
class AgencyBranchProgram(TimeStampedModel):
branch = models.ForeignKey(Agency, on_delete=models.CASCADE, null=True)
program = models.CharField(max_length=80, null=True, blank=True)
def set_filename_format(now, instance, filename):
return "{schoolname}-{microsecond}".format(
agentname=instance.agency,
microsecond=now.microsecond,
)
def agent_directory_path(instance, filename):
now = datetime.datetime.now()
path = "agents/{agentname}/{filename}".format(
agentname=instance.agency,
filename=set_filename_format(now, instance, filename),
)
return path
class AgencyHeadContactInfo(TimeStampedModel):
LEVEL_CHOICES = (
('s', 'S'),
('a', 'A'),
('b', 'B'),
('c', 'C'),
('d', 'D'),
)
agent = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True)
name = models.CharField(max_length=80, null=True, blank=True)
contracted_date = models.DateTimeField(auto_now=True, null=True)
phone = models.CharField(max_length=80, null=True, blank=True)
email = models.CharField(max_length=140, null=True, blank=True)
skype = models.CharField(max_length=80, null=True, blank=True)
wechat = models.CharField(max_length=80, null=True, blank=True)
location = models.CharField(max_length=140, null=True, blank=True)
level = models.CharField(max_length=80, null=True, blank=True)
image = models.ImageField(upload_to=agent_directory_path, null=True, blank=True)
def __str__(self):
return "{}".format(self.name)
class AgentRelationshipHistory(TimeStampedModel):
head = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True)
writer = models.CharField(max_length=80, null=True, blank=True)
name = models.CharField(max_length=80, null=True, blank=True)
date = models.DateField(null=True, blank=True)
location = models.CharField(max_length=140, null=True, blank=True)
category = models.CharField(max_length=80, null=True, blank=True)
priority = models.IntegerField(null=True, blank=True)
comment = models.TextField(null=True, blank=True)
class SecodnaryProgram(TimeStampedModel):
agent = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True)
preriod = models.CharField(max_length=80, null=True, blank=True)
target = models.IntegerField(null=True, blank=True)
new_students_fall = models.IntegerField(null=True, blank=True)
new_students_spring = models.IntegerField(null=True, blank=True)
total_new_students_bge = models.IntegerField(null=True, blank=True)
total_students_bge = models.IntegerField(null=True, blank=True)
terminating_students = models.IntegerField(null=True, blank=True)
comments = models.TextField(null=True, blank=True)
class Camp(TimeStampedModel):
agent = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True)
preriod = models.CharField(max_length=80, null=True, blank=True)
target = models.IntegerField(null=True, blank=True)
summer_camp = models.IntegerField(null=True, blank=True)
winter_camp = models.IntegerField(null=True, blank=True)
comments = models.TextField(null=True, blank=True)
class CollegeApplication(TimeStampedModel):
agent = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True)
preriod = models.CharField(max_length=80, null=True, blank=True)
college_application = models.IntegerField(null=True, blank=True)
other_program = models.IntegerField(null=True, blank=True)
comments = models.TextField(null=True, blank=True)
```
#### File: osc_bge/agent/views.py
```python
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, redirect
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.views import View
from django.db.models import Q
from django.core import serializers
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from . import models, forms
from osc_bge.users import models as user_models
from osc_bge.form import models as form_models
from osc_bge.form import forms as form_forms
from osc_bge.student import models as student_models
from osc_bge.student import forms as student_forms
from osc_bge.school import models as school_models
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import json
from decimal import Decimal
from datetime import datetime, date
class StatisticsView(LoginRequiredMixin, View):
login_url = '/accounts/login/'
def get(self, request):
user = request.user
agency = None
agency_head = None
if user.type == "counselor":
return HttpResponse(status=401)
elif user.type == 'agency_branch_admin':
try:
agency_branch_admin = user_models.AgencyAdminUser.objects.get(user=user)
agency = agency_branch_admin.agency
except user_models.AgencyAdminUser.DoesNotExist:
return HttpResponse(status=401)
elif user.type == 'agency_admin':
try:
agency_admin = user_models.AgencyHeadAdminUser.objects.get(user=user)
agency_head = agency_admin.agency_head
except user_models.AgencyHeadAdminUser.DoesNotExist:
return HttpResponse(status=401)
# 1st section statistics
total_counsel = 0
total_registered = 0
total_secondary = 0
total_college = 0
total_camp = 0
total_us_count = 0
total_ca_count = 0
total_uk_count = 0
total_au_count = 0
total_nz_count = 0
# 2nd Section statistics
monthly_data = {}
past_date_range = []
past_date_first = []
if self.request.GET.get('past_months'):
today = date.today()
past_months = int(self.request.GET.get('past_months'))
for number in range(0, past_months+1):
sd = today - relativedelta(months=number)
ed = today - relativedelta(months=number-1)
past_start_date = date(sd.year, sd.month, 1)
past_end_date = date(ed.year, ed.month, 1) - relativedelta(days=1)
past_date_range.append([past_start_date, past_end_date])
past_date_first.append(past_start_date)
if agency:
counselors = user_models.Counselor.objects.filter(agency=agency)
elif agency_head:
counselors = user_models.Counselor.objects.filter(agency__head=agency_head)
else:
counselors = user_models.Counselor.objects.all()
for counselor in counselors:
# For 1st Section statistics
query_start_date = self.request.GET.get('start_date', None)
query_end_date = self.request.GET.get('end_date', None)
if query_start_date or query_end_date:
found_counsels = form_models.Counsel.objects.filter(counselor=counselor).filter(
created_at__range=(query_start_date, query_end_date))
found_formalities = form_models.Formality.objects.filter(counsel__counselor=counselor).filter(
counsel__created_at__range=(query_start_date, query_end_date))
else:
found_counsels = form_models.Counsel.objects.filter(counselor=counselor)
found_formalities = form_models.Formality.objects.filter(counsel__counselor=counselor)
# For 2nd Section statistics
if past_date_range:
data_list = []
for date_range in past_date_range:
data_dict = {}
date_first = date_range[0]
monthly_counsels = form_models.Counsel.objects.filter(counselor=counselor).filter(
created_at__range=(date_range[0], date_range[1]))
monthly_formality_count = form_models.Formality.objects.filter(counsel__counselor=counselor).filter(
counsel__created_at__range=(date_range[0], date_range[1])).count()
if request.GET.get('school_type'):
monthly_counsels = monthly_counsels.filter(program_interested=request.GET.get('school_type'))
monthly_formality_count = form_models.Formality.objects.filter(counsel__counselor=counselor).filter(
counsel__created_at__range=(date_range[0], date_range[1])).filter(
counsel__program_interested=request.GET.get('school_type')).count()
monthly_counsels_count = monthly_counsels.count()
try:
monthly_success_rate = int(monthly_formality_count * 100 / monthly_counsels_count)
except ZeroDivisionError:
monthly_success_rate = 0
data_dict.update({
'date_first':date_first,
'monthly_counsels_count':monthly_counsels_count,
'monthly_formality_count':monthly_formality_count,
'monthly_success_rate':monthly_success_rate,
})
data_list.append(data_dict)
counselor_fullname = counselor.agency.name + " / " + counselor.user.first_name + " " + counselor.user.last_name
monthly_data.update({counselor_fullname:data_list})
counsel_count = found_counsels.count()
total_counsel += counsel_count
formality_count = found_formalities.count()
total_registered += formality_count
try:
apply_percentage = formality_count * 100 / counsel_count
apply_percentage = int(apply_percentage)
except ZeroDivisionError:
apply_percentage = 0
secondary_count = 0
college_count = 0
camp_count = 0
us_count = 0
ca_count = 0
uk_count = 0
au_count = 0
nz_count = 0
for counsel in found_counsels:
if counsel.program_interested == 'k12':
secondary_count += 1
elif counsel.program_interested == 'college':
college_count += 1
elif counsel.program_interested == 'camp':
camp_count += 1
else:
continue
if counsel.desire_country == 'us':
us_count += 1
elif counsel.desire_country == 'ca':
ca_count += 1
elif counsel.desire_country == 'uk':
uk_count += 1
elif counsel.desire_country == 'au':
au_count += 1
elif counsel.desire_country == 'nz':
nz_count += 1
else:
continue
counselor.counsel_count = counsel_count
counselor.formality_count = formality_count
counselor.apply_percentage = apply_percentage
counselor.secondary = secondary_count
counselor.college = college_count
counselor.camp = camp_count
counselor.us_count = us_count
counselor.ca_count = ca_count
counselor.uk_count = uk_count
counselor.au_count = au_count
counselor.nz_count = nz_count
counselor.save()
total_secondary += secondary_count
total_college += college_count
total_camp += camp_count
total_us_count += us_count
total_ca_count += ca_count
total_uk_count += uk_count
total_au_count += au_count
total_nz_count += nz_count
try:
total_success_rate = int(total_registered * 100 / total_counsel)
except ZeroDivisionError:
total_success_rate = 0
# Make range of months for templates
template_date_range = []
today = date.today()
for number in range(0, 13):
sd = today - relativedelta(months=number)
ed = today - relativedelta(months=number-1)
past_start_date = date(sd.year, sd.month, 1)
past_end_date = date(ed.year, ed.month, 1) - relativedelta(days=1)
template_date_range.append([past_start_date, past_end_date])
return render(request, 'agent/statistics.html', {
"counselors":counselors,
'total_counsel':total_counsel,
'total_registered':total_registered,
'total_success_rate':total_success_rate,
'total_secondary':total_secondary,
'total_college':total_college,
'total_camp':total_camp,
'total_us_count':total_us_count,
'total_ca_count':total_ca_count,
'total_uk_count':total_uk_count,
'total_au_count':total_au_count,
'total_nz_count':total_nz_count,
'monthly_data':monthly_data,
'past_date_first':past_date_first,
'template_date_range':template_date_range,
})
class CounselView(LoginRequiredMixin, View):
login_url = '/accounts/login/'
def search(self):
if self.request.GET.get('form_type') == 'secondary_form':
queryset = school_models.Secondary.objects.all().order_by('school__partnership', 'school__name')
school_type = self.request.GET.getlist('school_type', None)
if school_type:
queryset = queryset.filter(school__school_type__type__in=school_type)
student_body = self.request.GET.getlist('student_body', None)
if student_body:
queryset = queryset.filter(student_body__in=student_body)
grade = self.request.GET.getlist('grade', None)
if grade:
if len(grade) != 1:
queryset = queryset.filter(Q(grade_start__lte=grade[0]) | Q(grade_end__gte=grade[-1]))
to_schools = school_models.School.objects.filter(tb_of_og__grade__in=grade, tb_of_og__quantity__gt=0).distinct()
queryset = queryset.filter(school__in=to_schools)
else:
queryset = queryset.filter(grade_start__lte=grade[0], grade_end__gte=grade[0])
to_schools = school_models.School.objects.filter(tb_of_og__grade=grade[0], tb_of_og__quantity__gt=0).distinct()
queryset = queryset.filter(school__in=to_schools)
term = self.request.GET.getlist('term', None)
if term:
queryset = queryset.filter(school__term__in=term).distinct()
to_schools = school_models.School.objects.filter(tb_of_og__term__in=term, tb_of_og__quantity__gt=0).distinct()
queryset = queryset.filter(school__in=to_schools)
toefl_requirement = self.request.GET.get('toefl_requirement', None)
if toefl_requirement:
if toefl_requirement == '100':
queryset = queryset.filter(toefl_requirement__gte=int(toefl_requirement), toefl_requirement__isnull=False)
elif toefl_requirement == '0':
queryset = queryset.filter(toefl_requirement__isnull=True)
else:
queryset =queryset.filter(toefl_requirement__lte=int(toefl_requirement), toefl_requirement__isnull=False)
state = self.request.GET.getlist('state', None)
if state:
queryset = queryset.filter(state__in=state)
transfer = self.request.GET.get('transfer', None)
if transfer:
queryset = queryset.filter(school__transfer=True)
number_students = self.request.GET.getlist('number_students', None)
if number_students:
if not 's' in number_students:
queryset = queryset.exclude(school__number_students__lte=299).exclude(school__number_students__isnull=True)
if not 'm' in number_students:
queryset = queryset.exclude(school__number_students__gte=300, school__number_students__lte=699).exclude(school__number_students__isnull=True)
if not 'l' in number_students:
queryset = queryset.exclude(school__number_students__gte=700).exclude(school__number_students__isnull=True)
program_fee = self.request.GET.get('program_fee', None)
if program_fee:
if program_fee == 'xs':
queryset = queryset.filter(program_fee__lte=35000)
elif program_fee == 's':
queryset = queryset.filter(program_fee__lte=45000)
elif program_fee == 'm':
queryset = queryset.filter(program_fee__lte=60000)
elif program_fee == 'l':
queryset = queryset.filter(program_fee__gt=60000)
else:
pass
elif self.request.GET.get('form_type') == 'college_form':
queryset = school_models.College.objects.all().order_by('ranking')
school_type = self.request.GET.getlist('school_type', None)
if school_type:
queryset = queryset.filter(school__school_type__type__in=school_type)
toefl_requirement = self.request.GET.get('toefl_requirement', None)
if toefl_requirement:
if toefl_requirement == '100':
queryset = queryset.filter(toefl_requirement__gte=int(toefl_requirement), toefl_requirement__isnull=False)
else:
queryset =queryset.filter(toefl_requirement__lte=int(toefl_requirement), toefl_requirement__isnull=False)
state = self.request.GET.getlist('state', None)
if state:
queryset = queryset.filter(state__in=state)
partition = self.request.GET.getlist('partition', None)
if partition:
queryset = queryset.filter(college_type__in=partition)
elif self.request.GET.get('form_type') == 'name_form':
queryset = school_models.Secondary.objects.all().order_by('school__partnership', 'school__name')
school_name = self.request.GET.get('school_name')
if school_name:
queryset = queryset.filter(school__name__icontains=school_name)
school_id = self.request.GET.get('school_id')
if school_id:
queryset = school_models.Secondary.objects.filter(id=int(school_id))
elif self.request.GET.get('form_type') == 'college_name_form':
queryset = school_models.College.objects.all()
school_name = self.request.GET.get('school_name')
if school_name:
queryset = queryset.filter(school__name__icontains=school_name)
school_id = self.request.GET.get('school_id')
if school_id:
queryset = school_models.College.objects.filter(id=int(school_id))
else:
queryset = None
return queryset
def get(self, request):
secondaries = school_models.Secondary.objects.all().order_by('school__partnership', 'school__name')
colleges = school_models.College.objects.all().order_by('ranking')
search_schools = self.search()
return render(request, 'agent/counsel.html',
{
"secondaries":secondaries,
"colleges":colleges,
'search_schools':search_schools,
}
)
class CustomerRegisterView(LoginRequiredMixin, View):
login_url = '/accounts/login/'
def get_counselor(self):
user = self.request.user
try:
found_counselor = user_models.Counselor.objects.get(user=user)
except user_models.Counselor.DoesNotExist:
return HttpResponse(status=401)
return found_counselor
def get(self, request, counsel_num=None):
if counsel_num:
try:
found_counsel = form_models.Counsel.objects.get(pk=counsel_num)
except form_models.Counsel.DoesNotExist:
return HttpResponse(status=404)
if request.user.type == 'counselor':
found_counselor = self.get_counselor()
if not found_counsel.counselor == found_counselor:
return HttpResponse(status=401)
try:
student_history = student_models.StudentHistory.objects.get(student=found_counsel.student)
except:
student_history = None
return render(request, 'agent/register.html', {"counsel":found_counsel, "student_history": student_history})
return render(request, 'agent/register.html', {})
def post(self, request, counsel_num=None):
data = request.POST
if request.user.type == 'counselor':
found_counselor = self.get_counselor()
if counsel_num:
try:
found_counsel = form_models.Counsel.objects.get(pk=counsel_num)
except form_models.Counsel.DoesNotExist:
return HttpResponse(status=404)
if request.user.type == 'counselor':
if not found_counselor == found_counsel.counselor:
return HttpResponse(status=401)
parent_info = None
if (data.get('parentname') or data.get('parentcell') or
data.get('parentemail') or data.get('parentwechat')):
try:
parent_info = found_counsel.student.parent_info
parent_info.name = data.get('parentname')
parent_info.phone = data.get('parentcell')
parent_info.email = data.get('parentemail')
parent_info.wechat = data.get('parentwechat')
parent_info.save()
except:
parent_info = student_models.ParentInfo(
name=data.get('parentname'),
phone=data.get('parentcell'),
email=data.get('parentemail'),
wechat=data.get('parentwechat'),
)
parent_info.save()
student = found_counsel.student
if request.user.type == 'counselor':
student.counselor=found_counselor
student.parent_info = parent_info
student.name = data.get('name')
student.gender = data.get('gender')
student.birthday = data.get('birth')
student.email = data.get('email')
student.skype = data.get("skype")
student.nationality = data.get("nationality")
student.wechat = data.get('wechat')
student.phone = data.get('phone')
student.save()
image_form = student_forms.StudentImageForm(request.POST,request.FILES)
if image_form.is_valid():
student.image = image_form.cleaned_data['image']
student.save()
counsel = found_counsel
counsel.student = student
counsel.program_interested = data.get('program')
counsel.expected_departure = data.get('departure')
counsel.possibility = data.get('possibility')
counsel.client_class = data.get('class')
counsel.contact_first = data.get('contact1th')
counsel.contact_second = data.get('contact2th')
counsel.contact_third = data.get('contact3th')
counsel.detail = data.get('detail')
counsel.save()
try:
student_history = student_models.StudentHistory.objects.get(student=student)
student_history.student = student
student_history.current_grade = data.get('currentgrade')
student_history.current_school=data.get('currentschool')
student_history.apply_grade=data.get('gradeapply')
student_history.eng_level=data.get('englevel')
student_history.toefl=data.get('toefl')
student_history.toefljr=data.get('toefljr')
student_history.gpa=data.get('gpa')
student_history.sat=data.get('sat')
student_history.address=data.get('address')
student_history.save()
except student_models.StudentHistory.DoesNotExist:
student_history = student_models.StudentHistory(
student=student,
current_grade=data.get('currentgrade'),
current_school=data.get('currentschool'),
apply_grade=data.get('gradeapply'),
eng_level=data.get('englevel'),
toefl=data.get('toefl'),
toefljr=data.get('toefljr'),
gpa=data.get('gpa'),
sat=data.get('sat'),
address=data.get('address'),
)
student_history.save()
else:
parent_info = False
if (data.get('parentname') or data.get('parentcell') or
data.get('parentemail') or data.get('parentwechat')):
parent_info = student_models.ParentInfo(
name=data.get('parentname'),
phone=data.get('parentcell'),
email=data.get('parentemail'),
wechat=data.get('parentwechat'),
)
parent_info.save()
student = student_models.Student(
counselor=found_counselor,
parent_info=parent_info if parent_info else None,
name=data.get('name'),
gender=data.get('gender'),
birthday=data.get('birth'),
email=data.get('email'),
skype=data.get('skype'),
nationality=data.get('nationality'),
wechat=data.get('wechat'),
phone=data.get('phone'),
)
student.save()
image_form = student_forms.StudentImageForm(request.POST,request.FILES)
if image_form.is_valid():
student.image = image_form.cleaned_data['image']
student.save()
counsel = form_models.Counsel(
counselor=found_counselor,
student=student,
counseling_date=data.get('date'),
desire_country=data.get('country'),
program_interested=data.get('program'),
expected_departure=data.get('departure'),
possibility=data.get('possibility'),
client_class=data.get('class'),
contact_first=data.get('contact1th'),
contact_second=data.get('contact2th'),
contact_third=data.get('contact3th'),
detail=data.get('detail'),
)
counsel.save()
if (
data.get('toefl') or data.get('toefljr') or data.get('gpa') or
data.get('sat') or data.get('englevel') or data.get('currentschool') or
data.get('currentgrade') or data.get('gradeapply') or data.get('address')):
student_history = student_models.StudentHistory(
student=student,
current_grade=data.get('currentgrade'),
current_school=data.get('currentschool'),
apply_grade=data.get('gradeapply'),
eng_level=data.get('englevel'),
toefl=data.get('toefl'),
toefljr=data.get('toefljr'),
gpa=data.get('gpa'),
sat=data.get('sat'),
address=data.get('address'),
)
student_history.save()
return HttpResponseRedirect(request.path_info)
class ProspectiveView(LoginRequiredMixin, View):
login_url = '/accounts/login/'
def get_counselor(self):
user = self.request.user
try:
found_counselor = user_models.Counselor.objects.get(user=user)
except user_models.Counselor.DoesNotExist:
return HttpResponse(status=401)
return found_counselor
def search(self):
if self.request.user.type == 'counselor':
found_counselor = self.get_counselor()
queryset = form_models.Counsel.objects.filter(
counselor=found_counselor, student__school__isnull=True)
elif self.request.user.type == 'agency_branch_admin':
found_agent_branch_admin = user_models.AgencyAdminUser.objects.get(user=self.request.user)
found_counselors = user_models.Counselor.objects.filter(agency=found_agent_branch_admin.agency)
queryset = form_models.Counsel.objects.filter(
counselor__in=found_counselors, student__school__isnull=True)
elif self.request.user.type == 'agency_admin':
found_agent_admin = user_models.AgencyHeadAdminUser.objects.get(user=self.request.user)
found_counselors = user_models.Counselor.objects.filter(agency__head=found_agent_admin.agency_head)
queryset = form_models.Counsel.objects.filter(
counselor__in=found_counselors, student__school__isnull=True)
else:
queryset = form_models.Counsel.objects.filter(student__school__isnull=True)
query = self.request.GET.get('q', None)
if query:
queryset = queryset.filter(Q(student__name__icontains=query) |
Q(student__email__icontains=query) |
Q(student__phone__icontains=query)
)
registered = self.request.GET.get('registered', None)
if registered:
queryset = queryset.filter(Q(student__status=registered))
possibility = self.request.GET.get('possibility', None)
if possibility:
queryset = queryset.filter(Q(possibility=possibility))
departure = self.request.GET.get('departure', None)
if departure:
if departure == '3m':
queryset = queryset.filter(expected_departure__range=(datetime.now(), datetime.now()+relativedelta(months=3)))
elif departure == '6m':
queryset = queryset.filter(expected_departure__range=(datetime.now(), datetime.now()+relativedelta(months=6)))
elif departure == '12m':
queryset = queryset.filter(expected_departure__range=(datetime.now(), datetime.now()+relativedelta(months=12)))
else:
queryset = queryset.filter(Q(expected_departure__gt=datetime.now()+relativedelta(years=1)))
country = self.request.GET.get('country', None)
if country:
queryset = queryset.filter(Q(desire_country=country))
program = self.request.GET.get('type', None)
if program:
queryset = queryset.filter(Q(program_interested=program))
return queryset
def get(self, request):
found_counselor = self.get_counselor()
all_counsel = self.search()
paginator = Paginator(all_counsel, 10)
page = request.GET.get('page')
try:
counsels = paginator.get_page(page)
except PageNotAnInteger:
counsels = paginator.page(1)
except EmptyPage:
counsels = paginator.page(paginator.num_pages)
return render(request, 'agent/prospective.html', {"data":counsels})
class ApplicationRegisterView(LoginRequiredMixin, View):
login_url = '/accounts/login/'
def get_counselor(self):
user = self.request.user
try:
found_counselor = user_models.Counselor.objects.get(user=user)
except user_models.Counselor.DoesNotExist:
return HttpResponse(status=401)
return found_counselor
def get(self, request, counsel_num=None):
if counsel_num:
try:
found_counsel = form_models.Counsel.objects.get(pk=counsel_num)
except form_models.Counsel.DoesNotExist:
return HttpResponse(status=404)
if request.user.type == 'counselor':
found_counselor = self.get_counselor()
if not found_counsel.counselor == found_counselor:
return HttpResponse(status=401)
try:
student_history = student_models.StudentHistory.objects.get(student=found_counsel.student)
except:
student_history = None
countries = school_models.School.objects.values('country').order_by('country').distinct()
try:
found_formality = form_models.Formality.objects.get(counsel=found_counsel)
school_formalities = form_models.SchoolFormality.objects.filter(formality=found_formality).order_by('school_priority')
except:
school_formalities = None
if school_formalities:
formality_range = range(0, len(school_formalities))
empty_range = range(len(school_formalities)+1, 11)
else:
formality_range = None
empty_range = range(2, 11)
return render(
request,
'agent/register.html',
{
"counsel":found_counsel,
"student_history": student_history,
"countries": countries,
"formality_range": formality_range,
"empty_range": empty_range,
"school_formalities": school_formalities,
})
else:
return HttpResponse(status=400)
def post(self, request, counsel_num=None):
data = request.POST
if request.user.type == 'counselor':
found_counselor = self.get_counselor()
if counsel_num:
try:
found_counsel = form_models.Counsel.objects.get(pk=counsel_num)
except form_models.Counsel.DoesNotExist:
return HttpResponse(status=404)
if request.user.type == 'counselor':
if not found_counselor == found_counsel.counselor:
return HttpResponse(status=401)
parent_info = None
if (data.get('parentname') or data.get('parentcell') or
data.get('parentemail') or data.get('parentwechat')):
try:
parent_info = found_counsel.student.parent_info
parent_info.name = data.get('parentname')
parent_info.phone = data.get('parentcell')
parent_info.email = data.get('parentemail')
parent_info.wechat = data.get('parentwechat')
parent_info.save()
except:
parent_info = student_models.ParentInfo(
name=data.get('parentname'),
phone=data.get('parentcell'),
email=data.get('parentemail'),
wechat=data.get('parentwechat'),
)
parent_info.save()
student = found_counsel.student
if request.user.type == 'counselor':
student.counselor=found_counselor
student.parent_info = parent_info
student.name = data.get('name')
student.gender = data.get('gender')
student.birthday = data.get('birth')
student.email = data.get('email')
student.skype = data.get('skype')
student.nationality = data.get('nationality')
student.wechat = data.get('wechat')
student.phone = data.get('phone')
student.save()
image_form = student_forms.StudentImageForm(request.POST,request.FILES)
if image_form.is_valid():
student.image = image_form.cleaned_data['image']
student.save()
counsel = found_counsel
counsel.student = student
counsel.program_interested = data.get('program')
counsel.expected_departure = data.get('departure')
counsel.possibility = data.get('possibility')
counsel.client_class = data.get('class')
counsel.contact_first = data.get('contact1th')
counsel.contact_second = data.get('contact2th')
counsel.contact_third = data.get('contact3th')
counsel.detail = data.get('detail')
counsel.save()
try:
student_history = student_models.StudentHistory.objects.get(student=student)
student_history.student = student
student_history.current_grade = data.get('currentgrade')
student_history.current_school=data.get('currentschool')
student_history.apply_grade=data.get('gradeapply')
student_history.eng_level=data.get('englevel')
student_history.toefl=data.get('toefl')
student_history.toefljr=data.get('toefljr')
student_history.gpa=data.get('gpa')
student_history.sat=data.get('sat')
student_history.address=data.get('address')
student_history.save()
except student_models.StudentHistory.DoesNotExist:
student_history = student_models.StudentHistory(
student=student,
current_grade=data.get('currentgrade'),
current_school=data.get('currentschool'),
apply_grade=data.get('gradeapply'),
eng_level=data.get('englevel'),
toefl=data.get('toefl'),
toefljr=data.get('toefljr'),
gpa=data.get('gpa'),
sat=data.get('sat'),
address=data.get('address'),
)
student_history.save()
school_ids = []
class_start_days = []
courses = []
for i in range(1,11):
if data.get('app_school'+str(i)):
school_ids.append(data.get('app_school'+str(i)))
class_start_days.append(data.get('app_start'+str(i)))
courses.append(data.get('app_course'+str(i)))
else:
continue
if school_ids[0]:
# Update SchoolFormality by exsisting Formality
try:
formality = form_models.Formality.objects.get(counsel=counsel)
school_formalities = form_models.SchoolFormality.objects.filter(formality=formality)
school_formalities.delete()
#Create Formality and SchoolFormality
except form_models.Formality.DoesNotExist:
student.status = 'registered'
student.save()
formality = form_models.Formality(
counsel=counsel,
payment_complete=False,
)
formality.save()
for index, (school, class_start_day, course) in enumerate(zip(school_ids, class_start_days, courses)):
try:
found_school = school_models.School.objects.get(pk=int(school))
except school_models.School.DoesNotExist:
return HttpResponse(status=404)
school_formality = form_models.SchoolFormality(
formality=formality,
school=found_school,
school_priority=int(index+1),
class_start_at=class_start_day if class_start_day else None,
course=course if course else None,
)
school_formality.save()
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
return HttpResponseRedirect(request.path_info)
class ProcessView(LoginRequiredMixin, View):
login_url = '/accounts/login/'
def get_counselor(self):
user = self.request.user
try:
found_counselor = user_models.Counselor.objects.get(user=user)
except user_models.Counselor.DoesNotExist:
return HttpResponse(status=401)
return found_counselor
def get(self, request):
if request.user.type == 'counselor':
found_counselor = self.get_counselor()
found_formalities = form_models.Formality.objects.filter(
counsel__counselor=found_counselor)
elif request.user.type == 'agency_branch_admin':
found_agent_branch_admin = user_models.AgencyAdminUser.objects.get(user=request.user)
found_formalities = form_models.Formality.objects.filter(
counsel__counselor__agency=found_agent_branch_admin.agency)
elif request.user.type == 'agency_admin':
found_agent_admin = user_models.AgencyHeadAdminUser.objects.get(user=request.user)
found_formalities = form_models.Formality.objects.filter(
counsel__counselor__agency__head=found_agent_admin.agency_head)
else:
found_formalities = form_models.Formality.objects.all()
found_formalities = found_formalities.filter(
departure_confirmed=None,
canceled_at=None
)
try:
application_not_completed = found_formalities.filter(
Q(school_formality__enrolment_apply_done__isnull=True)|
Q(school_formality__enrolment_apply_done=False)).order_by('created_at').distinct()
except:
application_not_completed =None
try:
upcoming_school_interview = form_models.SchoolFormality.objects.filter(
Q(school_interview_done__isnull=True)|
Q(school_interview_done=False)).filter(formality__in=found_formalities).exclude(
school_interview_date__isnull=True).order_by('school_interview_date').distinct()
except:
upcoming_school_interview = None
try:
pending_admission_decision = found_formalities.filter(
school_formality__acceptance_date__isnull=True).order_by("created_at").distinct()
except:
pending_admission_decision=None
try:
pending_issue_i20 = found_formalities.filter(
Q(school_formality__i20_completed__isnull=True)|
Q(school_formality__i20_completed=False)).order_by("created_at").distinct()
except:
pending_issue_i20=None
try:
upcoming_visa_interview = found_formalities.filter(
visa_reserve_date__isnull=False,
visa_granted_date__isnull=True,
visa_rejected_date__isnull=True).order_by('visa_reserve_date').distinct()
except:
upcoming_visa_interview = None
try:
departure_schedule = found_formalities.exclude(
air_departure_date__isnull=True).order_by('air_departure_date').distinct()
except:
departure_schedule = None
if request.user.type == 'counselor':
found_counselor = self.get_counselor()
in_progress = form_models.Formality.objects.filter(
counsel__counselor=found_counselor)
process_completed = form_models.Formality.objects.filter(
counsel__counselor=found_counselor)
process_canceled = form_models.Formality.objects.filter(
counsel__counselor=found_counselor)
elif request.user.type == 'agency_branch_admin':
found_agent_branch_admin = user_models.AgencyAdminUser.objects.get(user=request.user)
in_progress = form_models.Formality.objects.filter(
counsel__counselor__agency=found_agent_branch_admin.agency)
process_completed = form_models.Formality.objects.filter(
counsel__counselor__agency=found_agent_branch_admin.agency)
process_canceled = form_models.Formality.objects.filter(
counsel__counselor__agency=found_agent_branch_admin.agency)
elif request.user.type == 'agency_admin':
found_agent_admin = user_models.AgencyHeadAdminUser.objects.get(user=request.user)
in_progress = form_models.Formality.objects.filter(
counsel__counselor__agency__head=found_agent_admin.agency_head)
process_completed = form_models.Formality.objects.filter(
counsel__counselor__agency__head=found_agent_admin.agency_head)
process_canceled = form_models.Formality.objects.filter(
counsel__counselor__agency__head=found_agent_admin.agency_head)
else:
in_progress = form_models.Formality.objects.all()
process_completed = form_models.Formality.objects.all()
process_canceled = form_models.Formality.objects.all()
in_progress = in_progress.filter(
departure_confirmed=None).filter(canceled_at=None).order_by("-created_at")
process_completed = process_completed.filter(canceled_at=None).exclude(
departure_confirmed=None).order_by("-departure_confirmed")
process_canceled = process_canceled.exclude(
canceled_at=None).order_by("-canceled_at")
return render(
request,
'agent/process.html',
{
"application_not_completed": application_not_completed,
"upcoming_school_interview": upcoming_school_interview,
"pending_admission_decision": pending_admission_decision,
"pending_issue_i20": pending_issue_i20,
"upcoming_visa_interview": upcoming_visa_interview,
"departure_schedule": departure_schedule,
"in_progress":in_progress,
"process_completed":process_completed,
"process_canceled":process_canceled,
}
)
class ProcessApplyView(LoginRequiredMixin, View):
login_url = '/accounts/login/'
def get_counselor(self):
user = self.request.user
try:
found_counselor = user_models.Counselor.objects.get(user=user)
except user_models.Counselor.DoesNotExist:
return HttpResponse(status=401)
return found_counselor
def get(self, request, formality_id):
try:
found_formality = form_models.Formality.objects.get(pk=formality_id)
except form_models.Formality.DoesNotExist:
return HttpResponse(status=400)
if request.user.type == 'counselor':
found_counselor = self.get_counselor()
if not found_formality.counsel.counselor == found_counselor:
return HttpResponse(status=401)
if request.user.type == 'counselor':
found_counselor = self.get_counselor()
in_progress = form_models.Formality.objects.filter(
counsel__counselor=found_counselor)
process_completed = form_models.Formality.objects.filter(
counsel__counselor=found_counselor)
process_canceled = form_models.Formality.objects.filter(
counsel__counselor=found_counselor)
elif request.user.type == 'agency_branch_admin':
found_agent_branch_admin = user_models.AgencyAdminUser.objects.get(user=request.user)
in_progress = form_models.Formality.objects.filter(
counsel__counselor__agency=found_agent_branch_admin.agency)
process_completed = form_models.Formality.objects.filter(
counsel__counselor__agency=found_agent_branch_admin.agency)
process_canceled = form_models.Formality.objects.filter(
counsel__counselor__agency=found_agent_branch_admin.agency)
elif request.user.type == 'agency_admin':
found_agent_admin = user_models.AgencyHeadAdminUser.objects.get(user=request.user)
in_progress = form_models.Formality.objects.filter(
counsel__counselor__agency__head=found_agent_admin.agency_head)
process_completed = form_models.Formality.objects.filter(
counsel__counselor__agency__head=found_agent_admin.agency_head)
process_canceled = form_models.Formality.objects.filter(
counsel__counselor__agency__head=found_agent_admin.agency_head)
else:
in_progress = form_models.Formality.objects.all()
process_completed = form_models.Formality.objects.all()
process_canceled = form_models.Formality.objects.all()
in_progress = in_progress.filter(
departure_confirmed=None).filter(canceled_at=None).order_by("-created_at")
process_completed = process_completed.filter(canceled_at=None).exclude(
departure_confirmed=None).order_by("-departure_confirmed")
process_canceled = process_canceled.exclude(
canceled_at=None).order_by("-canceled_at")
student_info = found_formality.counsel.student
student_history = student_info.student_history
school_formalities = form_models.SchoolFormality.objects.filter(formality=found_formality).order_by('school_priority')
formality_form = forms.FormalityForm
formset_init = form_models.FormalityFile.objects.filter(formality=found_formality).order_by('-created_at')
file_formset = forms.FileFormset()
datetime_form = forms.DateTimeForm
cancel_enrolment_form = forms.CancelEnrolmentForm
visa_interview_scheduling_form = forms.VisaReserveSchedulingForm(instance=found_formality)
visa_granted_form = forms.VisaGrantedForm(instance=found_formality)
visa_rejected_form = forms.VisaRejectedForm(instance=found_formality)
flight_departure_form = forms.FlightDepartureForm(instance=found_formality)
flight_arrive_form = forms.FlightArriveForm(instance=found_formality)
return render(
request,
'agent/formality.html',
{
"found_formality":found_formality,
"in_progress":in_progress,
"process_completed":process_completed,
"process_canceled":process_canceled,
"student_info":student_info,
"student_history":student_history,
"school_formalities":school_formalities,
"formality_form":formality_form,
"formset_init":formset_init,
"file_formset":file_formset,
"datetime_form":datetime_form,
'cancel_enrolment_form':cancel_enrolment_form,
"visa_interview_scheduling_form":visa_interview_scheduling_form,
"visa_granted_form": visa_granted_form,
"visa_rejected_form": visa_rejected_form,
"flight_departure_form": flight_departure_form,
"flight_arrive_form": flight_arrive_form,
}
)
def post(self, request, formality_id):
try:
found_formality = form_models.Formality.objects.get(pk=formality_id)
except form_models.Formality.DoesNotExist:
return HttpResponse(status=400)
try:
found_accommodation = form_models.AccommodationFormality.objects.get(formality=found_formality)
except form_models.AccommodationFormality.DoesNotExist:
found_accommodation = None
if request.user.type == 'counselor':
found_counselor = self.get_counselor()
if not found_formality.counsel.counselor == found_counselor:
return HttpResponse(status=401)
data = request.POST or request.FILES
if data:
if data.get('type') == 'registration':
print(data)
school_ids = data.getlist('school_id')
for school_id in school_ids:
try:
found_school_formality = form_models.SchoolFormality.objects.get(
formality=found_formality, school__id=int(school_id))
except form_models.SchoolFormality.DoesNotExist:
return HttpResponse(status=404)
if data.get('processing_fee_'+str(school_id)):
processing_fee = Decimal(data.get('processing_fee_'+str(school_id)))
else:
processing_fee = None
if data.get('processing_fee_done_'+str(school_id)) == 'on':
processing_fee_done = True
else:
processing_fee_done = False
found_school_formality.processing_fee = processing_fee
found_school_formality.processing_fee_done = processing_fee_done
found_school_formality.save()
school_formalities = form_models.SchoolFormality.objects.filter(formality=found_formality)
processing_fee_done_list = []
for school_formality in school_formalities:
processing_fee_done_list.append(school_formality.processing_fee_done)
if all(processing_fee_done_list):
found_formality.payment_complete=True
found_formality.apply_at=datetime.now()
else:
found_formality.payment_complete=False
found_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'cancel_registration':
found_formality.canceled_at = data.get('Date') if data.get('Date') else None
found_formality.cancel_reason=data.get('cancel_reason') if data.get('cancel_reason') else None
found_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == "file_upload":
formset = forms.FileFormset(request.POST, request.FILES)
if formset.is_valid():
for form in formset:
name = formset.cleaned_data.get('name')
file_source = formset.cleaned_data.get('file_source')
formality_file = form_models.FormalityFile(
formality=found_formality,
name=name,
file_source=file_source,
)
formality_file.save()
return HttpResponse(status=201)
else:
print(formset.errors)
return HttpResponse(status=400)
elif data.get('type') == "enrolment_application":
try:
found_school_formality = form_models.SchoolFormality.objects.get(pk=int(data.get('school_formality_id')))
except form_models.SchoolFormality.DoesNotExist:
return HttpResponse(status=400)
found_school_formality.prepared_passport = True if data.get('passport') else False
found_school_formality.prepared_transcript = True if data.get('transcript') else False
found_school_formality.prepared_eng_exams = True if data.get('eng_exams') else False
found_school_formality.prepared_recommendation = True if data.get('recommendation') else False
found_school_formality.prepared_essay = True if data.get('essay') else False
found_school_formality.enrolment_apply_done = True if data.get('enrolment_apply_done') else False
found_school_formality.enrolment_apply_fee = Decimal(data.get('enrolment_apply_fee')) if data.get('enrolment_apply_fee') else None
found_school_formality.enrolment_apply_done_date = datetime.now() if data.get('enrolment_apply_done') else False
found_school_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == "school_interview":
if request.user.type == 'counselor' or request.user.type == 'agency_admin' or request.user.type == 'agency_branch_admin':
return HttpResponse("You don't have permissions", status=401)
try:
found_school_formality = form_models.SchoolFormality.objects.get(pk=int(data.get('school_formality_id')))
except form_models.SchoolFormality.DoesNotExist:
return HttpResponse(status=400)
found_school_formality.school_interview_date = data.get('school_interview_date') if data.get('school_interview_date') else None
found_school_formality.school_interview_time = data.get('school_interview_time') if data.get('school_interview_time') else None
found_school_formality.mock_interview = True if data.get('mock_interview') else False
found_school_formality.school_interview_done = True if data.get('interview_done') else False
found_school_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == "accepted":
if request.user.type == 'counselor' or request.user.type == 'agency_admin' or request.user.type == 'agency_branch_admin':
return HttpResponse("You don't have permissions", status=401)
try:
found_school_formality = form_models.SchoolFormality.objects.get(pk=int(data.get('school_formality_id')))
except form_models.SchoolFormality.DoesNotExist:
return HttpResponse(status=400)
found_school_formality.acceptance_date = data.get('Date') if data.get('Date') else None
found_school_formality.acceptance_letter = True if data.get('acceptance_letter') else None
found_school_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'cancel_enrolment':
if request.user.type == 'counselor' or request.user.type == 'agency_admin' or request.user.type == 'agency_branch_admin':
return HttpResponse("You don't have permissions", status=401)
try:
found_school_formality = form_models.SchoolFormality.objects.get(pk=int(data.get('school_formality_id')))
except form_models.SchoolFormality.DoesNotExist:
return HttpResponse(status=400)
found_school_formality.cancel_enrolment_date = data.get('cancel_enrolment_date') if data.get('cancel_enrolment_date') else None
found_school_formality.cancel_enrolment_time = data.get('cancel_enrolment_time') if data.get('cancel_enrolment_time') else None
found_school_formality.save()
try:
found_student = student_models.Student.objects.get(counsel__formality__school_formality=found_school_formality)
except student_models.Student.DoesNotExist:
return HttpResponse("Student school formality error", status=400)
found_student.school=None
found_student.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'i20_request':
try:
found_school_formality = form_models.SchoolFormality.objects.get(pk=int(data.get('school_formality_id')))
except form_models.SchoolFormality.DoesNotExist:
return HttpResponse(status=400)
found_school_formality.i20_completed = True if data.get('i20_completed') else False
found_school_formality.i20_fee = Decimal(data.get('i20_fee')) if data.get('i20_fee') else None
found_school_formality.i20_receipt = True if data.get('i20_receipt') else False
found_school_formality.save()
if found_school_formality.i20_completed:
try:
found_student = student_models.Student.objects.get(counsel__formality__school_formality=found_school_formality)
except student_models.Student.DoesNotExist:
return HttpResponse("Student school formality error", status=400)
found_student.school=found_school_formality.school
found_student.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'i20_received':
try:
found_school_formality = form_models.SchoolFormality.objects.get(pk=int(data.get('school_formality_id')))
except form_models.SchoolFormality.DoesNotExist:
return HttpResponse(status=400)
found_school_formality.i20_received_date = data.get('Date') if data.get('Date') else None
found_school_formality.i20_copy = True if data.get('i20_copy') else False
found_school_formality.i20_tracking = data.get('i20_tracking') if data.get('i20_tracking') else None
found_school_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'program_fee_payment':
try:
found_school_formality = form_models.SchoolFormality.objects.get(pk=int(data.get('school_formality_id')))
except form_models.SchoolFormality.DoesNotExist:
return HttpResponse(status=400)
found_school_formality.provider_application = True if data.get('provider_application') else False
found_school_formality.bge_program_application = True if data.get('bge_program_application') else False
found_school_formality.immunization = True if data.get('immunization') else False
found_school_formality.financial_support = True if data.get('financial_support') else False
found_school_formality.program_fee_completed = True if data.get('program_fee_completed') else False
found_school_formality.program_fee = Decimal(data.get('program_fee')) if data.get('program_fee') else None
found_school_formality.program_fee_receipt = True if data.get('program_fee_receipt') else False
found_school_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'visa_interview_scheduling':
found_formality.visa_reserve_date = data.get('visa_reserve_date') if data.get('visa_reserve_date') else None
found_formality.visa_reserve_time = data.get('visa_reserve_time') if data.get('visa_reserve_time') else None
found_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'visa_granted':
found_formality.visa_granted_date = data.get('visa_granted_date') if data.get('visa_granted_date') else None
found_formality.visa_granted_time = data.get('visa_granted_time') if data.get('visa_granted_time') else None
found_formality.visa_copy_recieved = True if data.get('visa_copy_recieved') else False
found_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'visa_rejected':
found_formality.visa_rejected_date = data.get('visa_rejected_date') if data.get('visa_rejected_date') else None
found_formality.visa_rejected_time = data.get('visa_rejected_time') if data.get('visa_rejected_time') else None
found_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'flight_ticketing':
found_formality.eticket_attached = True if data.get('eticket_attached') else False
found_formality.air_departure_date = data.get('air_departure_date') if data.get('air_departure_date') else None
found_formality.air_departure_time = data.get('air_departure_time') if data.get('air_departure_time') else None
found_formality.air_departure_port = data.get('air_departure_port') if data.get('air_departure_port') else None
found_formality.air_arrive_date = data.get('air_arrive_date') if data.get('air_arrive_date') else None
found_formality.air_arrive_time = data.get('air_arrive_time') if data.get('air_arrive_time') else None
found_formality.air_arrive_port = data.get('air_arrive_port') if data.get('air_arrive_port') else None
found_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'airport_pickup':
found_formality.pickup_num = data.get('pickup_num') if data.get('pickup_num') else None
found_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'accommodation_application':
if found_accommodation:
found_accommodation.with_animal = True if data.get('with_animal') else False
found_accommodation.with_child = True if data.get('with_child') else False
found_accommodation.with_other_student = True if data.get('with_other_student') else False
found_accommodation.other_preference = data.get('other_preference') if data.get('other_preference') else None
found_accommodation.application_at = datetime.now()
found_accommodation.save()
else:
accommodation = form_models.AccommodationFormality(
formality = found_formality,
with_animal = True if data.get('with_animal') else False,
with_child = True if data.get('with_child') else False,
with_other_student = True if data.get('with_other_student') else False,
other_preference = data.get('other_preference') if data.get('other_preference') else None,
application_at = datetime.now()
)
accommodation.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'homestay_recommendation':
if found_accommodation:
found_accommodation.recommendation_a = request.FILES.get('recommendation_a') if request.FILES.get('recommendation_a') else None
found_accommodation.recommendation_b = request.FILES.get('recommendation_b') if request.FILES.get('recommendation_b') else None
found_accommodation.recommendation_c = request.FILES.get('recommendation_c') if request.FILES.get('recommendation_c') else None
found_accommodation.recommendation_a_comment = data.get('recommendation_a_comment') if data.get('recommendation_a_comment') else None
found_accommodation.recommendation_b_comment = data.get('recommendation_b_comment') if data.get('recommendation_b_comment') else None
found_accommodation.recommendation_c_comment = data.get('recommendation_c_comment') if data.get('recommendation_c_comment') else None
found_accommodation.homestay_recommendation_at = datetime.now()
found_accommodation.save()
else:
accommodation = form_models.AccommodationFormality(
formality = found_formality,
recommendation_a = request.FILES.get('recommendation_a') if request.FILES.get('recommendation_a') else None,
recommendation_b = request.FILES.get('recommendation_b') if request.FILES.get('recommendation_b') else None ,
recommendation_c = request.FILES.get('recommendation_c') if request.FILES.get('recommendation_c') else None ,
recommendation_a_comment = data.get('recommendation_a_comment') if data.get('recommendation_a_comment') else None,
recommendation_b_comment = data.get('recommendation_b_comment') if data.get('recommendation_b_comment') else None,
recommendation_c_comment = data.get('recommendation_c_comment') if data.get('recommendation_c_comment') else None,
homestay_recommendation_at = datetime.now()
)
accommodation.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'host_selection':
if found_accommodation:
found_accommodation.host_selection = data.get('host_selection') if data.get('host_selection') else None
found_accommodation.host_selection_at = datetime.now()
found_accommodation.save()
else:
accommodation = form_models.AccommodationFormality(
formality = found_formality,
host_selection = data.get('host_selection') if data.get('host_selection') else None,
host_selection_at = datetime.now()
)
accommodation.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'parents_accommodation':
if found_accommodation:
found_accommodation.parent_accommodation_guest_num = data.get('parent_accommodation_guest_num') if data.get('parent_accommodation_guest_num') else None
found_accommodation.parent_length_of_stay = data.get('parent_length_of_stay') if data.get('parent_length_of_stay') else None
found_accommodation.parent_other_preference = data.get('parent_other_preference') if data.get('parent_other_preference') else None
found_accommodation.parent_accommodation_at = datetime.now()
found_accommodation.save()
else:
accommodation = form_models.AccommodationFormality(
formality = found_formality,
parent_accommodation_guest_num = data.get('parent_accommodation_guest_num') if data.get('parent_accommodation_guest_num') else None,
parent_length_of_stay = data.get('parent_length_of_stay') if data.get('parent_length_of_stay') else None,
parent_other_preference = data.get('parent_other_preference') if data.get('parent_other_preference') else None,
parent_accommodation_at = datetime.now()
)
accommodation.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'departure_ot':
found_formality.departure_ot = data.get('Date') if data.get('Date') else None
found_formality.save()
return HttpResponseRedirect(request.path_info)
elif data.get('type') == 'departure_confirmed':
found_formality.departure_confirmed = data.get('Date') if data.get('Date') else None
found_formality.save()
return HttpResponseRedirect(request.path_info)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
# Have to solve
@login_required(login_url='/accounts/login/')
def upload_files(request, formality_id):
data = request.POST
try:
found_formality = form_models.Formality.objects.get(id=formality_id)
except form_models.Formality.DoesNotExist:
return HttpResponse(status=400)
if data.getlist('delete_file'):
for file_id in data.getlist('delete_file'):
found_file = form_models.FormalityFile.objects.get(id=int(file_id))
found_file.delete()
return HttpResponseRedirect("/agent/process/"+str(formality_id))
if request.FILES.get('file_source1') or request.FILES.get('file_source2') or request.FILES.get('file_source3') or request.FILES.get('file_source4') or request.FILES.get('file_source5'):
file_form = form_forms.FileboxForm(request.POST,request.FILES)
if file_form.is_valid():
for num in range(1, 6):
file_source = request.FILES.get('file_source'+str(num))
if file_source:
formality_file = form_models.FormalityFile()
formality_file.formality = found_formality
formality_file.name = request.POST.get('filebox'+str(num))
formality_file.file_source = file_source
formality_file.save()
else:
return HttpResponse(status=400)
return HttpResponseRedirect("/agent/process/"+str(formality_id))
@login_required(login_url='/accounts/login/')
def load_states(request):
country = request.GET.get('country')
states = school_models.Secondary.objects.filter(school__country=country).values('state').order_by('state').distinct()
state_list = []
for state in states:
state_list.append(state)
states = school_models.College.objects.filter(school__country=country).values('state').order_by('state').distinct()
for state in states:
state_list.append(state)
result = json.dumps({"data":state_list})
return HttpResponse(result, content_type="application/json")
@login_required(login_url='/accounts/login/')
def load_schools(request):
state = request.GET.get('state')
if school_models.Secondary.objects.filter(state__iexact=state):
schools = school_models.School.objects.filter(secondary__state__iexact=state).order_by('name')
elif school_models.College.objects.filter(state__iexact=state):
schools = school_models.School.objects.filter(college__state__iexact=state).order_by('name')
else:
return HttpResponse(status=400)
data = serializers.serialize("json", schools)
return HttpResponse(data, content_type="application/json")
```
#### File: osc_bge/users/models.py
```python
import datetime
import os
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.core.files.storage import FileSystemStorage
from django.conf import settings
from osc_bge.agent import models as agent_models
from osc_bge.bge import models as bge_models
def set_filename_format(now, instance, filename):
""" file format setting e.g)
{username}-{date}-{microsecond}{extension}
username-2016-07-12-158859.png """
return "{username}-{date}-{microsecond}{extension}".format(
username=instance.username,
date=str(now.date()),
microsecond=now.microsecond,
extension=os.path.splitext(filename)[1],
)
def user_directory_path(instance, filename):
"""
image upload directory setting e.g)
images/{year}/{month}/{day}/{username}/{filename}
images/2016/7/12/username/username-2016-07-12-158859.png
"""
now = datetime.datetime.now()
path = "images/{year}/{month}/{day}/{username}/{filename}".format(
year=now.year,
month=now.month,
day=now.day,
username=instance.username,
filename=set_filename_format(now, instance, filename),
)
return path
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
USER_TYPE = (
('bge_admin', "BGE_Admin"),
('bge_team', 'BGE_Team'),
('bge_branch_admin', "BGE_Branch_Admin"),
('bge_accountant', 'BGE_Accountant'),
('agency_admin', 'Agency_Admin'),
('agency_branch_admin', 'Agency_Branch_Admin'),
('counselor', 'Counselor'),
)
address = models.CharField(max_length=255, null=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
type = models.CharField(max_length=140, null=True, choices=USER_TYPE, blank=True)
image = models.ImageField(upload_to=user_directory_path, null=True, blank=True)
def __str__(self):
return "{}".format(self.username)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
# Bge admission team
class BgeAdminUser(models.Model):
PARTITION_CHOICES = (
('entrance', 'ENTRANCE'),
('accounting', 'ACCOUNTING'),
('admin', 'ADMIN'),
)
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
group = models.ForeignKey(bge_models.BgeBranch, on_delete=models.SET_NULL, null=True)
partition = models.CharField(max_length=255, choices=PARTITION_CHOICES, null=True)
def __str__(self):
return "{}".format(self.user)
# Bge branch manager
class BgeBranchAdminUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True, related_name='branch_admin')
branch = models.ForeignKey(bge_models.BgeBranch, on_delete=models.SET_NULL, null=True, related_name='branch_admin')
def __str__(self):
return "{}".format(self.user)
# Bge branch coordi
class BgeBranchCoordinator(models.Model):
POSITION_CHOICES = (
('school_coordi', 'School coordinator'),
('student_coordi', 'Student coordinator'),
('host_coordi', 'Host coordinator'),
)
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True, related_name='coordinator')
branch = models.ForeignKey(bge_models.BgeBranch, on_delete=models.SET_NULL, null=True, related_name='branch_coordi')
position = models.CharField(max_length=255, choices=POSITION_CHOICES, null=True)
def __str__(self):
return "{} - {}".format(self.user, self.branch)
class AgencyHeadAdminUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True, related_name="agency_head_admin")
agency_head = models.ForeignKey(agent_models.AgencyHead, on_delete=models.SET_NULL, null=True)
def __str__(self):
return "{} - {}".format(self.agency_head, self.user)
class AgencyAdminUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True, related_name="agency_admin")
agency = models.ForeignKey(agent_models.Agency, on_delete=models.SET_NULL, null=True)
def __str__(self):
return "{} - {}".format(self.agency, self.user)
class Counselor(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True, related_name="counselor")
agency = models.ForeignKey(agent_models.Agency, on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
return "{}".format(self.user)
``` |
{
"source": "JisuHann/Autonomous-Driving-Cart-MEME",
"score": 3
} |
#### File: BetaBot/script/cam_test.py
```python
import cv2
import numpy as np
# cap = cv2.VideoCapture(0) #카몌라 불러오기
# if cap.isOpened() == False: #카메라 열림 확인
# exit()
# while True :
# ret, img = cap.read() #카메라 읽기
# cv2.imshow('preview', img) #읽은 이미지 보여주기
# if cv2.waitKey(10) >= 0 : #10ms간 대기, 입력이 있으면 종료
# break
# # 연결 끊기
# cap.release()
# cv2.destroyAllWindows()
import time
from Motor import *
from Ultrasonic import *
PWM=Motor()
ultrasonic=Ultrasonic()
def forward():
PWM.setMotorModel(1000, 1000, 1000, 1000) # Forward
print("The car is moving forward")
time.sleep(1)
def backward():
PWM.setMotorModel(-1000, -1000, -1000, -1000) # Back
print("The car is going backwards")
time.sleep(1)
def left():
PWM.setMotorModel(-1500, -1500, 2000, 2000) # Left
print("The car is turning left")
time.sleep(1)
def right():
PWM.setMotorModel(2000, 2000, -1500, -1500) # Right
print("The car is turning right")
time.sleep(1)
def stop():
PWM.setMotorModel(0, 0, 0, 0) # Stop
def alphabot_nav(x, y):
d = ultrasonic.get_distance()
print("distance: %s"%d)
if d >30:
if x>65:
print("move right")
right()
time.sleep(0.1)
stop()
elif x<35:
print("move left")
left()
time.sleep(0.1)
stop()
else:
print("move forward")
forward()
time.sleep(0.1)
stop()
else:
print("stop")
stop()
def read_rgb_image(image_name, show):
rgb_image = cv2.imread(image_name)
if show:
cv2.imshow("RGB Image",rgb_image)
return rgb_image
def filter_color(rgb_image, lower_bound_color, upper_bound_color):
#convert the image into the HSV color space
hsv_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2HSV)
cv2.imshow("hsv image",hsv_image)
#define a mask using the lower and upper bounds of the yellow color
mask = cv2.inRange(hsv_image, lower_bound_color, upper_bound_color)
return mask
def getContours(binary_image):
#_, contours, hierarchy = cv2.findContours(binary_image,
# cv2.RETR_TREE,
# cv2.CHAIN_APPROX_SIMPLE)
_, contours, hierarchy = cv2.findContours(binary_image.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
return contours
def draw_ball_contour(binary_image, rgb_image, contours):
global gx, gy
black_image = np.zeros([binary_image.shape[0], binary_image.shape[1],3],'uint8')
for c in contours:
area = cv2.contourArea(c)
perimeter= cv2.arcLength(c, True)
((x, y), radius) = cv2.minEnclosingCircle(c)
if (area>3000):
cv2.drawContours(rgb_image, [c], -1, (150,250,150), 1)
cv2.drawContours(black_image, [c], -1, (150,250,150), 1)
cx, cy = get_contour_center(c)
# print(cx, cy)
gx = cx
gy = cy
cv2.circle(rgb_image, (cx,cy),(int)(radius),(0,0,255),1)
cv2.circle(black_image, (cx,cy),(int)(radius),(0,0,255),1)
cv2.circle(black_image, (cx,cy),5,(150,150,255),-1)
#print ("Area: {}, Perimeter: {}".format(area, perimeter))
else:
gx = -100
gy = -100
#print ("number of contours: {}".format(len(contours)))
cv2.imshow("RGB Image Contours",rgb_image)
cv2.imshow("Black Image Contours",black_image)
def get_contour_center(contour):
M = cv2.moments(contour)
cx=-1
cy=-1
if (M['m00']!=0):
cx= int(M['m10']/M['m00'])
cy= int(M['m01']/M['m00'])
return cx, cy
def detect_ball_in_a_frame(image_frame):
yellowLower =(30, 100, 50)
yellowUpper = (60, 255, 255)
rgb_image = image_frame
binary_image_mask = filter_color(rgb_image, yellowLower, yellowUpper)
contours = getContours(binary_image_mask)
draw_ball_contour(binary_image_mask, rgb_image,contours)
def main():
video_capture = cv2.VideoCapture(0)
# video_capture = cv2.VideoCapture('video/tennis-ball-video.mp4')
width = video_capture.get(3) # float `width`
height = video_capture.get(4)
while(True):
ret, frame = video_capture.read()
detect_ball_in_a_frame(frame)
time.sleep(0.033)
if gx>0:
print((gx/width) * 100 , (gy/height) * 100)
alphabot_nav((gx/width) * 100, 0)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
cv2.waitKey(0)
cv2.destroyAllWindows()
```
#### File: BetaBot/script/move_cv.py
```python
from picamera.array import PiRGBArray
from picamera import PiCamera
import numpy as np
import cv2
import time
x_pos = 0
y_pos = 0
def read_rgb_image(image_name, show):
rgb_image = cv2.imread(image_name)
if show:
cv2.imshow("RGB Image",rgb_image)
return rgb_image
def filter_color(rgb_image, lower_bound_color, upper_bound_color):
#convert the image into the HSV color space
hsv_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2HSV)
cv2.imshow("hsv image",hsv_image)
#define a mask using the lower and upper bounds of the yellow color
mask = cv2.inRange(hsv_image, lower_bound_color, upper_bound_color)
return mask
def getContours(binary_image):
#_, contours, hierarchy = cv2.findContours(binary_image,
# cv2.RETR_TREE,
# cv2.CHAIN_APPROX_SIMPLE)
_, contours, hierarchy = cv2.findContours(binary_image.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
return contours
def draw_ball_contour(binary_image, rgb_image, contours):
global x_pos, y_pos
black_image = np.zeros([binary_image.shape[0], binary_image.shape[1],3],'uint8')
for c in contours:
area = cv2.contourArea(c)
print("area : ", str(area))
perimeter= cv2.arcLength(c, True)
((x, y), radius) = cv2.minEnclosingCircle(c)
if (area>3000):
cv2.drawContours(rgb_image, [c], -1, (150,250,150), 1)
cv2.drawContours(black_image, [c], -1, (150,250,150), 1)
cx, cy = get_contour_center(c)
print("cx : ", str(cx/640), "cy : ", str(cy/480) )
x_pos = cx/640*100
y_pos = cy/480*100
cv2.circle(rgb_image, (cx,cy),(int)(radius),(0,0,255),1)
cv2.circle(black_image, (cx,cy),(int)(radius),(0,0,255),1)
cv2.circle(black_image, (cx,cy),5,(150,150,255),-1)
#print ("Area: {}, Perimeter: {}".format(area, perimeter))
else:
x_pos = -1
y_pos = -1
#print ("number of contours: {}".format(len(contours)))
cv2.imshow("RGB Image Contours",rgb_image)
cv2.imshow("Black Image Contours",black_image)
def get_contour_center(contour):
M = cv2.moments(contour)
cx=-1
cy=-1
if (M['m00']!=0):
cx= int(M['m10']/M['m00'])
cy= int(M['m01']/M['m00'])
return cx, cy
def detect_ball_in_a_frame(image_frame):
yellowLower =(0,178,103)
yellowUpper = (190,255,246)
rgb_image = image_frame
binary_image_mask = filter_color(rgb_image, yellowLower, yellowUpper)
contours = getContours(binary_image_mask)
draw_ball_contour(binary_image_mask, rgb_image,contours)
import time
from Motor import *
from Ultrasonic import *
import time
from Led import *
led=Led()
def set_led(pos, color):
p = [0x20, 0x10,0x08, 0x04, 0x01, 0x02, 0x40, 0x80]
if pos =="all":
for i in p:
led.ledIndex(i,color[0], color[1], color[2])
elif pos == "left":
for i in range(4):
led.ledIndex(p[i],color[0], color[1], color[2])
for i in range(4):
led.ledIndex(p[i+4],0, 0, 0)
elif pos == "right":
for i in range(4):
led.ledIndex(p[i],0, 0, 0)
for i in range(4):
led.ledIndex(p[i+4],color[0], color[1], color[2])
# time.sleep(sleep_time)
PWM=Motor()
ultrasonic=Ultrasonic()
sleep_time=1/32
disatance_tolerance = 15
motor_speed = 0
def forward():
PWM.setMotorModel(500, 500, 500, 500) # Forward
set_led("all", [0,20,0])
print("The car is moving forward")
time.sleep(sleep_time)
def backward():
PWM.setMotorModel(-500, -500, -500, -500) # Back
print("The car is going backwards")
time.sleep(sleep_time)
def left():
PWM.setMotorModel(-750, -750, 1000, 1000) # Left
print("The car is turning left")
set_led("left", [0,10,0])
time.sleep(sleep_time)
def right():
PWM.setMotorModel(1000, 1000, -750, -750) # Right
set_led("right", [0,10,0])
print("The car is turning right")
time.sleep(sleep_time)
def stop_distance():
PWM.setMotorModel(0, 0, 0, 0) # Stop
set_led("all", [10,0,0])
print("stop due to distance")
def stop_noone():
PWM.setMotorModel(0, 0, 0, 0) # Stop
set_led("all", [10,10,0])
print("stop du to no one detected")
def alphabot_nav(x, y):
d = ultrasonic.get_distance()
print("distance: %s"%d)
if d< disatance_tolerance:
stop_distance()
elif d >disatance_tolerance and x>=0 and y>=0:
if x>65:
print("move right")
right()
# stop()
elif x<35:
print("move left")
left()
# stop()
else:
print("move forward")
forward()
# stop()
else:
stop_noone()
def main():
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
# allow the camera to warmup
time.sleep(0.1)
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
detect_ball_in_a_frame(frame.array)
time.sleep(sleep_time)
alphabot_nav(x_pos, y_pos)
# image = frame.array
# # show the frame
# cv2.imshow("Frame", image)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
cv2.waitKey(0)
cv2.destroyAllWindows()
```
#### File: Autonomous-Driving-Cart-MEME/keyword_spotting/real_time.py
```python
from queue import Queue
import sys
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import time
import pyaudio
import numpy as np
data_c = None
# Use 1101 for 2sec input audio
Tx = 5511 # The number of time steps input to the model from the spectrogram
n_freq = 101 # Number of frequencies input to the model at each time step of the spectrogram
# Use 272 for 2sec input audio
Ty = 1375# The number of time steps in the output of our model
"""## Build the model"""
from keras.models import load_model
model = load_model('./keyword_spotting/tr_model_t.h5')
#model = load_model('./tr_model_t.h5')
def detect_triggerword_spectrum(x):
x = x.swapaxes(0,1)
x = np.expand_dims(x, axis=0)
predictions = model.predict(x)
return predictions.reshape(-1)
def has_new_triggerword(predictions, chunk_duration, feed_duration, threshold=0.5):
predictions = predictions > threshold
chunk_predictions_samples = int(len(predictions) * chunk_duration / feed_duration)
chunk_predictions = predictions[-chunk_predictions_samples:]
level = chunk_predictions[0]
for pred in chunk_predictions:
if pred > level:
return True
else:
level = pred
return False
"""# Record audio stream from mic"""
chunk_duration = 0.5 # Each read length in seconds from mic.
fs = 44100 # sampling rate for mic
chunk_samples = int(fs * chunk_duration) # Each read length in number of samples.
# Each model input data duration in seconds, need to be an integer numbers of chunk_duration
feed_duration = 10
feed_samples = int(fs * feed_duration)
assert feed_duration/chunk_duration == int(feed_duration/chunk_duration)
def get_spectrogram(data):
nfft = 200 # Length of each window segment
fs = 8000 # Sampling frequencies
noverlap = 120 # Overlap between windows
nchannels = data.ndim
if nchannels == 1:
pxx, _, _ = mlab.specgram(data, nfft, fs, noverlap = noverlap)
elif nchannels == 2:
pxx, _, _ = mlab.specgram(data[:,0], nfft, fs, noverlap = noverlap)
return pxx
def plt_spectrogram(data):
nfft = 200 # Length of each window segment
fs = 8000 # Sampling frequencies
noverlap = 120 # Overlap between windows
nchannels = data.ndim
if nchannels == 1:
pxx, _, _, _ = plt.specgram(data, nfft, fs, noverlap = noverlap)
elif nchannels == 2:
pxx, _, _, _ = plt.specgram(data[:,0], nfft, fs, noverlap = noverlap)
return pxx
"""### Audio stream"""
def get_audio_input_stream(callback):
stream = pyaudio.PyAudio().open(
format=pyaudio.paInt16,
channels=1,
rate=fs,
input=True,
frames_per_buffer=chunk_samples,
input_device_index=0,
stream_callback=callback)
return stream
# Queue to communiate between the audio callback and main thread
q = Queue()
run = True
silence_threshold = 100
timeout = 0.5*60 # 0.5 minutes from now
# Data buffer for the input wavform
data = np.zeros(feed_samples, dtype='int16')
def callback(in_data, frame_count, time_info, status):
global run, timeout, data, silence_threshold
if time.time() > timeout:
run = False
data0 = np.frombuffer(in_data, dtype='int16')
if np.abs(data0).mean() < silence_threshold:
sys.stdout.write('------------dddd-----')
return (in_data, pyaudio.paContinue)
else:
sys.stdout.write('.............dddd....')
data = np.append(data,data0)
if len(data) > feed_samples:
data = data[-feed_samples:]
# Process data async by sending a queue.
q.put(data)
return (in_data, pyaudio.paContinue)
def check_where():
stream = get_audio_input_stream(callback)
stream.start_stream()
count=0
global run, timeout
try:
while count<timeout:
data = q.get()
spectrum = get_spectrogram(data)
preds = detect_triggerword_spectrum(spectrum)
new_trigger = has_new_triggerword(preds, chunk_duration, feed_duration)
if new_trigger:
print('I CAN HEAR TRIGGER##################')
else:
print('I CAN HEAR NOTHING$$$$$$$$$$$$$$$$$$')
#time.sleep(1)
count = count+1
except (KeyboardInterrupt, SystemExit):
stream.stop_stream()
stream.close()
timeout = time.time()
run = False
stream.stop_stream()
stream.close()
```
#### File: Autonomous-Driving-Cart-MEME/ros_integration/deepsort_ros.py
```python
import tf
import rospy
from geometry_msgs.msg import Twist, PoseWithCovarianceStamped
from nav_msgs.msg import Odometry
import time, math
status_code = 0
front_ultra = 0
def callback(msg):
# follows the conventional x, y, poses
global status_code, front_ultra
status_code = msg.pose.pose.position.x
front_ultra = msg.pose.pose.position.y
print('status code:',str(status_code), 'front ultrasonic distance:', front_ultra)
def publish_cmd_vel(x, y):
goal = 0.6
speed = 0.01
publisher = rospy.Publisher('result_cmd_vel', Twist, queue_size=1)
cmd = Twist()
cmd.linear.x = x
cmd.linear.y = y
cmd.linear.z = 0
rospy.sleep(1)
seconds = time.time()
if front_ultra < 10:
print('Too close to object in front')
else:
while time.time() - seconds < goal / speed:
publisher.publish(cmd)
def deepsort_result_to_ros(x, y):
rospy.init_node('deepsort_status')
odom_sub = rospy.Subscriber('/betabot_status', Odometry, callback)
# if __name__ == "__main__":
# rospy.init_node('deepsort_result')
# odom_sub = rospy.Subscriber('/betabot_status', Odometry, callback)
# publish_cmd_vel(x, y)
``` |
{
"source": "JisuHann/Pytorch-Model-implementation",
"score": 2
} |
#### File: Pytorch-Model-implementation/mask-rcnn/plain_test.py
```python
from detectron2.detectron2.config import get_cfg
from detectron2.detectron2.data import MetadataCatalog
from detectron2.detectron2.engine import DefaultPredictor
from detectron2.detectron2.utils.visualizer import Visualizer
from utils.utils import *
cfg = get_cfg()
def test_setup():
cfg.merge_from_file("./detectron/detectron2_repo/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
cfg.DATASETS.TRAIN = ("book",)
cfg.DATASETS.TEST = () # no metrics implemented for this dataset
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = "detectron://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl" # initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.001
cfg.SOLVER.MAX_ITER = 100 # 300 iterations seems good enough, but you can certainly train longer
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # 1 classes (person)
cfg.MODEL.DEVICE = 'cpu'
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
cfg.MODEL.WEIGHTS = "./detectron/output/model_final.pth"
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set the testing threshold for this model
cfg.DATASETS.TEST = ("book",)
global predictor
predictor = DefaultPredictor(cfg)
# model predict image
def segmentation_predict(path):
image = cv2.imread(path)
outputs = predictor(image) # noqa
v = Visualizer(image[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2) # noqa
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
img = Image.fromarray(v.get_image()[:, :, ::-1])
img.show() # show prediction result
mask_array = outputs['instances'][outputs['instances'].pred_classes == 0].pred_masks.numpy()
num_instances = mask_array.shape[0]
mask_array = np.moveaxis(mask_array, 0, -1)
mask_array_instance = []
output = np.zeros_like(image)
for i in range(num_instances):
mask_array_instance.append(mask_array[:, :, i:(i + 1)])
output = np.where(mask_array_instance[i] == True, i+1, output)
im = Image.fromarray(output).convert('L')
return
test_setup()
```
#### File: Pytorch-Model-implementation/U-Net/models.py
```python
from utils import *
# 1x1 convolution
def conv1x1(in_channels, out_channels, stride, padding):
model = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=padding),
nn.BatchNorm2d(out_channels)
)
return model
# 3x3 convolution
def conv3x3(in_channels, out_channels, stride, padding):
model = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=padding),
nn.BatchNorm2d(out_channels)
)
return model
class ResidualBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, downsample=False):
super(ResidualBlock, self).__init__()
self.downsample = downsample
if self.downsample:
self.layer = nn.Sequential(
conv1x1(in_channels, middle_channels, 2, 0),
conv3x3(middle_channels, middle_channels, 1, 1),
conv1x1(middle_channels, out_channels,1, 0)
)
self.downsize = conv1x1(in_channels, out_channels, 2, 0)
else:
self.layer = nn.Sequential(
conv1x1(in_channels, middle_channels, 1, 0),
conv3x3(middle_channels, middle_channels, 1, 1),
conv1x1(middle_channels, out_channels, 1, 0)
)
self.make_equal_channel = conv1x1(in_channels, out_channels, 1, 0)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
if self.downsample:
out = self.layer(x)
x = self.downsize(x)
return self.activation(out + x)
else:
out = self.layer(x)
if x.size() is not out.size():
x = self.make_equal_channel(x)
return self.activation(out + x)
def conv(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1), # 3: kernel size
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
class UNetWithResnet50Encoder(nn.Module):
def __init__(self, n_classes=50):
super().__init__()
self.n_classes = n_classes
self.layer1 = nn.Sequential(
nn.Conv2d(9, 64, 7, 2, 3),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.pool = nn.MaxPool2d(3, 2, 1, return_indices=True)
self.layer2 = nn.Sequential(
ResidualBlock(64, 64, 128, False),
ResidualBlock(128, 64, 128, False),
ResidualBlock(128, 64, 128, True)
)
self.bridge = conv(128, 128)
self.UnetConv1 = conv(256, 64)
self.UpConv1 = nn.Conv2d(128, 128, 3, padding=1)
self.upconv2_1 = nn.ConvTranspose2d(256, 64, 3, 2, 1)
self.upconv2_2 = nn.Conv2d(64, 64, 3, padding=1)
self.unpool = nn.MaxUnpool2d(3, 2, 1)
self.UnetConv2_1 = nn.ConvTranspose2d(64, 64, 3, 2, 1)
self.UnetConv2_2 = nn.ConvTranspose2d(128, 128, 3, 2, 1)
self.UnetConv2_3 = nn.Conv2d(128, 64, 3, padding=1)
#self.UnetConv3 = nn.Conv2d(64, self.n_classes, kernel_size=1, stride=1)
self.UnetConv3 = nn.Conv2d(64, n_classes, kernel_size=1, stride=1)
def forward(self, x, with_output_feature_map=False):
#x: torch.Size([32, 9, 512, 512])
out1 = self.layer1(x)
out1, indices = self.pool(out1)
out2 = self.layer2(out1)#torch.Size([32, 128, 64, 64])
x = self.bridge(out2) #torch.Size([32, 512, 64, 64])
x = self.UpConv1(x)
x = torch.cat((x, out2), dim=1) #torch.Size([32, 256, 64, 64])
x = self.upconv2_1(x, output_size=torch.Size([x.size(0),64,64,64]))
x = self.upconv2_2(x)
x = torch.cat((x, out1), dim=1)
x = self.UnetConv2_2(x, output_size=torch.Size([x.size(0), 128, 128, 128]))
x = self.UnetConv2_2(x, output_size=torch.Size([x.size(0), 128, 256, 256]))
x = self.UnetConv2_3(x)
x = self.UnetConv3(x)
return x
```
#### File: Pytorch-Model-implementation/U-Net/utils.py
```python
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import torch
from torch import nn
from torch import optim
import torch.optim as optim
import torch.nn.functional as F
from torchvision import transforms, models
from sklearn.model_selection import train_test_split
from torch.autograd import Variable
import torch.utils.data as data
import pandas as pd
import os
from PIL import Image
from torch.utils.data import DataLoader, TensorDataset
import math
import random
import cv2
import numpy as np
from cv2.cv2 import imread
from torch.utils.data import Dataset
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import PIL.Image as PIL
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR
def createFolder(dir):
try:
if not os.path.exists(dir):
os.makedirs(dir)
except OSError as err:
print("OS error: {0}".format(err))
``` |
{
"source": "jisujeong/studio",
"score": 2
} |
#### File: function/statistics/ttest.py
```python
from brightics.common.report import ReportBuilder, strip_margin, plt2MD, \
pandasDF2MD, keyValues2MD
from brightics.function.utils import _model_dict
from brightics.common.utils import check_required_parameters
import numpy as np
import pandas as pd
import math
from math import sqrt
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats
from scipy.stats import t
from scipy import mean
from statsmodels.stats.weightstats import ttest_ind
def one_sample_ttest(table, input_cols, alternatives, hypothesized_mean=0, conf_level=0.95):
n = len(table)
degree = n - 1
alpha = 1.0 - conf_level
out_table = pd.DataFrame()
# statistics
statistics = "t statistic, t distribution with %d degrees of freedom under the null hypothesis." % degree
# Print model
rb = ReportBuilder()
rb.addMD(strip_margin("""
## One Sameple T Test Result
| - Statistics = {s}
| - Hypothesized mean = {h}
| - Confidence level = {cl}
""".format(s=statistics, h=hypothesized_mean, cl=conf_level)))
for input_col in input_cols:
# model
alter_list = []
p_list = []
CI_list = []
# data
data = input_col
# estimates
result = stats.ttest_1samp(table[input_col], hypothesized_mean)
estimates = result[0]
cols = ['data', 'alternative_hypothesis', 'statistics', 'estimates', 'p_value', 'confidence_level', 'lower_confidence_interval', 'upper_confidence_interval']
for i in alternatives:
if (i == 'Greater'):
# alternative hypothesis
alternative_hypothesis = "true mean >" + str(hypothesized_mean)
# p-values
p_value = 1.0 - t.cdf(estimates, degree)
# confidence interval - greater
critical_val = t.ppf(1.0 - alpha, degree)
width = critical_val * np.std(table[input_col]) / math.sqrt(n - 1)
lower_conf_interval = np.mean(table[input_col]) - width
upper_conf_interval = math.inf
# model
alter = 'true mean > {hypothesized_mean}'.format(hypothesized_mean=hypothesized_mean)
alter_list.append(alter)
p_list.append(p_value)
conf_interval = '({lower_conf_interval}, {upper_conf_interval})'.format(lower_conf_interval=lower_conf_interval, upper_conf_interval=upper_conf_interval)
CI_list.append(conf_interval)
# out_table
list = []
list.append([data, alternative_hypothesis, statistics, estimates, p_value, conf_level, lower_conf_interval, upper_conf_interval])
out_table = out_table.append(pd.DataFrame(list, columns=cols))
if (i == 'Less'):
# alternative hypothesis
alternative_hypothesis = "true mean <" + str(hypothesized_mean)
p_value = t.cdf(estimates, degree)
# confidence interval - less
critical_val = t.ppf(1.0 - alpha, degree)
width = critical_val * np.std(table[input_col]) / math.sqrt(n - 1)
lower_conf_interval = -math.inf
upper_conf_interval = np.mean(table[input_col]) + width
# model
alter = 'true mean < {hypothesized_mean}'.format(hypothesized_mean=hypothesized_mean)
alter_list.append(alter)
p_list.append(p_value)
conf_interval = '({lower_conf_interval}, {upper_conf_interval})'.format(lower_conf_interval=lower_conf_interval, upper_conf_interval=upper_conf_interval)
CI_list.append(conf_interval)
# out_table
list = []
list.append([data, alternative_hypothesis, statistics, estimates, p_value, conf_level, lower_conf_interval, upper_conf_interval])
out_table = out_table.append(pd.DataFrame(list, columns=cols))
if (i == 'Two Sided'):
# alternative hypothesis
alternative_hypothesis = "true mean !=" + str(hypothesized_mean)
# p_value = (1.0 - t.cdf(abs(estimates), degree)) * 2.0
if (estimates >= 0):
p_value = 2.0 * t.cdf(-estimates, degree)
else:
p_value = 2.0 * t.cdf(estimates, degree)
# confidence interval - two-sided
critical_val = t.ppf(1.0 - alpha / 2, degree)
width = critical_val * np.std(table[input_col]) / math.sqrt(n - 1)
lower_conf_interval = np.mean(table[input_col]) - width
upper_conf_interval = np.mean(table[input_col]) + width
# model
alter = 'true mean != {hypothesized_mean}'.format(hypothesized_mean=hypothesized_mean)
alter_list.append(alter)
p_list.append(p_value)
conf_interval = '({lower_conf_interval}, {upper_conf_interval})'.format(lower_conf_interval=lower_conf_interval, upper_conf_interval=upper_conf_interval)
CI_list.append(conf_interval)
# out_table
list = []
list.append([data, alternative_hypothesis, statistics, estimates, p_value, conf_level, lower_conf_interval, upper_conf_interval])
out_table = out_table.append(pd.DataFrame(list, columns=cols))
# Print model
conf_level_percent = conf_level * 100
result_table = pd.DataFrame.from_items([
['alternative hypothesis', alter_list],
['p-value', p_list],
['%g%% confidence Interval' % conf_level_percent, CI_list]
])
result = dict()
result['result_table'] = result_table
rb.addMD(strip_margin("""
### Data = {input_col}
| - Estimates = {estimates}
|
| {result_table}
""".format(input_col=input_col, estimates=estimates, result_table=pandasDF2MD(result_table))))
# print model
result['report'] = rb.get()
return {'out_table':out_table, 'model':result}
def two_sample_ttest_for_stacked_data(table, response_cols, factor_col, alternatives, first, second, hypo_diff=0, equal_vari='pooled', confi_level=0.95):
if(type(table[factor_col][0]) == str):
table_first = table[table[factor_col] == first]
table_second = table[table[factor_col] == second]
else:
table_first = table[table[factor_col] == float(first)]
table_second = table[table[factor_col] == float(second)]
tmp_table = []
rb = ReportBuilder()
rb.addMD(strip_margin("""
## Two Sample T Test for Stacked Data Result
| - Hypothesized mean = {hypo_diff}
| - Confidence level = {confi_level}
""".format(hypo_diff=hypo_diff, confi_level=confi_level)))
for response_col in response_cols:
tmp_model = []
number1 = len(table_first[response_col])
number2 = len(table_second[response_col])
mean1 = (table_first[response_col]).mean()
mean2 = (table_second[response_col]).mean()
std1 = (table_first[response_col]).std()
std2 = (table_second[response_col]).std()
start_auto = 0
if(equal_vari == 'auto'):
start_auto = 1
f_value = (std1 ** 2) / (std2 ** 2)
f_test_p_value_tmp = scipy.stats.f.cdf(1 / f_value, number1 - 1, number2 - 1)
if(f_test_p_value_tmp > 0.5):
f_test_p_value = (1 - f_test_p_value_tmp) * 2
else:
f_test_p_value = f_test_p_value_tmp * 2
if(f_test_p_value < 0.05):
equal_vari = 'unequal'
else:
equal_vari = 'pooled'
ttestresult = ttest_ind(table_first[response_col], table_second[response_col], 'larger', usevar=equal_vari, value=hypo_diff)
if 'larger' in alternatives:
ttestresult = ttest_ind(table_first[response_col], table_second[response_col], 'larger', usevar=equal_vari, value=hypo_diff)
df = ttestresult[2]
if(equal_vari == 'pooled'):
std_number1number2 = sqrt(((number1 - 1) * (std1) ** 2 + (number2 - 1) * (std2) ** 2) / (number1 + number2 - 2))
margin = t.ppf((confi_level) , df) * std_number1number2 * sqrt(1 / number1 + 1 / number2)
if(equal_vari == 'unequal'):
margin = t.ppf((confi_level) , df) * sqrt(std1 ** 2 / (number1) + std2 ** 2 / (number2))
tmp_model += [['true difference in means > 0.0'] +
[ttestresult[1]] + [(mean1 - mean2 - margin, math.inf)]]
tmp_table += [['%s by %s(%s,%s)' % (response_col, factor_col, first, second)] +
['true difference in means > 0.0'] +
['t statistic, t distribution with %f degrees of freedom under the null hypothesis' % ttestresult[2]] +
[ttestresult[0]] + [ttestresult[1]] + [confi_level] + [mean1 - mean2 - margin] + [math.inf]]
if 'smaller' in alternatives:
ttestresult = ttest_ind(table_first[response_col], table_second[response_col], 'smaller', usevar=equal_vari, value=hypo_diff)
df = ttestresult[2]
if(equal_vari == 'pooled'):
std_number1number2 = sqrt(((number1 - 1) * (std1) ** 2 + (number2 - 1) * (std2) ** 2) / (number1 + number2 - 2))
margin = t.ppf((confi_level) , df) * std_number1number2 * sqrt(1 / number1 + 1 / number2)
if(equal_vari == 'unequal'):
margin = t.ppf((confi_level) , df) * sqrt(std1 ** 2 / (number1) + std2 ** 2 / (number2))
tmp_model += [['true difference in means < 0.0'] +
[ttestresult[1]] + [(-math.inf, mean1 - mean2 + margin)]]
tmp_table += [['%s by %s(%s,%s)' % (response_col, factor_col, first, second)] +
['true difference in means < 0.0'] +
['t statistic, t distribution with %f degrees of freedom under the null hypothesis' % ttestresult[2]] +
[ttestresult[0]] + [ttestresult[1]] + [confi_level] + [-math.inf] + [mean1 - mean2 + margin]]
if 'two-sided' in alternatives:
ttestresult = ttest_ind(table_first[response_col], table_second[response_col], 'two-sided', usevar=equal_vari, value=hypo_diff)
df = ttestresult[2]
if(equal_vari == 'pooled'):
std_number1number2 = sqrt(((number1 - 1) * (std1) ** 2 + (number2 - 1) * (std2) ** 2) / (number1 + number2 - 2))
margin = t.ppf((confi_level) , df) * std_number1number2 * sqrt(1 / number1 + 1 / number2)
if(equal_vari == 'unequal'):
margin = t.ppf((confi_level) , df) * sqrt(std1 ** 2 / (number1) + std2 ** 2 / (number2))
tmp_model += [['true difference in means != 0.0'] +
[ttestresult[1]] + [(mean1 - mean2 - margin, mean1 - mean2 + margin)]]
tmp_table += [['%s by %s(%s,%s)' % (response_col, factor_col, first, second)] +
['true difference in means != 0.0'] +
['t statistic, t distribution with %f degrees of freedom under the null hypothesis' % ttestresult[2]] +
[ttestresult[0]] + [ttestresult[1]] + [confi_level] + [mean1 - mean2 - margin] + [mean1 - mean2 + margin]]
result_model = pd.DataFrame.from_records(tmp_model)
result_model.columns = ['alternatives', 'p values', '%g%% confidence interval' % (confi_level * 100)]
rb.addMD(strip_margin("""
| #### Data = {response_col}
| - Statistics = t statistic, t distribution with {ttestresult2} degrees of freedom under the null hypothesis
|
| {result_model}
|
""".format(ttestresult2=ttestresult[2], response_col=response_col, ttestresult0=ttestresult[0], result_model=pandasDF2MD(result_model))))
if(start_auto == 1):
equal_vari = 'auto'
result = pd.DataFrame.from_records(tmp_table)
result.columns = ['data', 'alternative_hypothesis', 'statistics', 'estimates', 'p_value', 'confidence_level', 'lower_confidence_interval', 'upper_confidence_interval']
model = dict()
model['report'] = rb.get()
return {'out_table' : result, 'model' : model}
def paired_ttest(table, first_column, second_column, alternative, hypothesized_difference=0, confidence_level=0.95):
df = len(table) - 1
diff_mean = abs(table[first_column] - table[second_column]).mean()
std_dev = np.sqrt(((diff_mean - abs(table[first_column] - table[second_column])) * (diff_mean - abs(table[first_column] - table[second_column]))).mean())
ans = stats.ttest_rel(table[first_column], table[second_column] + hypothesized_difference)
t_value = ans[0]
p_value_ul = ans[1]
p_value_u = stats.t.sf(t_value, 149)
p_value_l = stats.t.cdf(t_value, 149)
left_u = diff_mean - std_dev * stats.t.isf((1 - confidence_level), df) / np.sqrt(df)
right_u = np.Infinity
left_l = -np.Infinity
right_l = diff_mean + std_dev * stats.t.isf((1 - confidence_level), df) / np.sqrt(df)
left_ul = diff_mean - std_dev * stats.t.isf((1 - confidence_level) / 2, df) / np.sqrt(df)
right_ul = diff_mean + std_dev * stats.t.isf((1 - confidence_level) / 2, df) / np.sqrt(df)
result_value_u = [{'data' : first_column + " , " + second_column,
'alternative_hypothesis' : "true difference in means > " + str(hypothesized_difference),
'statistics' : "t statistics, t distribution with " + str(df) + " degrees of freedom under the null hypothesis",
'estimates' : t_value,
'p_value' : p_value_u,
'confidence_level' : confidence_level,
'low_confidence_interval' : left_u,
'upper_confidence_interval' : right_u}]
result_value_l = [{'data' : first_column + " , " + second_column,
'alternative_hypothesis' : "true difference in means < " + str(hypothesized_difference),
'statistics' : "t statistics, t distribution with " + str(df) + " degrees of freedom under the null hypothesis",
'estimates' : t_value,
'p_value' : p_value_l,
'confidence_level' : confidence_level,
'low_confidence_interval' : left_l,
'upper_confidence_interval' : right_l}]
result_value_ul = [{'data' : first_column + " , " + second_column,
'alternative_hypothesis' : "true difference in means != " + str(hypothesized_difference),
'statistics' : "t statistics, t distribution with " + str(df) + " degrees of freedom under the null hypothesis",
'estimates' : t_value,
'p_value' : p_value_ul,
'confidence_level' : confidence_level,
'low_confidence_interval' : left_ul,
'upper_confidence_interval' : right_ul}]
df_result = pd.DataFrame()
df_u = pd.DataFrame(result_value_u, columns=['data', 'alternative_hypothesis', 'statistics', 'estimates', 'p_value', 'confidence_level', 'low_confidence_interval', 'upper_confidence_interval'])
df_l = pd.DataFrame(result_value_l, columns=['data', 'alternative_hypothesis', 'statistics', 'estimates', 'p_value', 'confidence_level', 'low_confidence_interval', 'upper_confidence_interval'])
df_ul = pd.DataFrame(result_value_ul, columns=['data', 'alternative_hypothesis', 'statistics', 'estimates', 'p_value', 'confidence_level', 'low_confidence_interval', 'upper_confidence_interval'])
if 'greater' in alternative:
df_result = df_result.append(df_u, ignore_index=True)
if 'less' in alternative:
df_result = df_result.append(df_l, ignore_index=True)
if 'twosided' in alternative:
df_result = df_result.append(df_ul, ignore_index=True)
params = {'Input columns' : first_column + ", " + second_column, 'Hypothesized difference' : str(hypothesized_difference), 'Confidence level' : str(confidence_level)}
rb = ReportBuilder()
rb.addMD(strip_margin("""
| ## Paired T Test Result
|
|df|mean_difference|standard_deviation|t_value
|--|--|--|--
|{deg_f}|{dm}|{sd}|{tv}
""".format(deg_f=df, dm=diff_mean, sd=std_dev, tv=t_value, params=dict2MD(params))))
if 'greater' in alternative:
rb.addMD(strip_margin("""
| - H0 : true diffrence in means is less than or equal to {hd}.
| - H1 : true diffrence in means is larger than {hd}.
|
|p_value|confidence_level|confidence_interval
|--|--|--
|{pvu}|{con_lv}|({l_u}, {r_u})
|
""".format(pvu=p_value_u, hd=str(hypothesized_difference), con_lv=str(confidence_level), l_u=left_u, r_u=right_u)))
if 'less' in alternative:
rb.addMD(strip_margin("""
| - H0 : true diffrence in means is larger than or equal to {hd}.
| - H1 : true diffrence in means is less than {hd}.
|
|p_value|confidence_level|confidence_interval
|--|--|--
|{pvl}|{con_lv}|({l_l}, {r_l})
|
""".format(pvl=p_value_l, hd=str(hypothesized_difference), con_lv=str(confidence_level), l_l=left_l, r_l=right_l)))
if 'twosided' in alternative:
rb.addMD(strip_margin("""
| - H0 : true diffrence in means is equal to {hd}.
| - H1 : true diffrence in means is not equal to {hd}.
|
|p_value|confidence_level|confidence_interval
|--|--|--
|{pvul}|{con_lv}|({l_ul}, {r_ul})
|
""".format(pvul=p_value_ul, hd=str(hypothesized_difference), con_lv=str(confidence_level), l_ul=left_ul, r_ul=right_ul)))
model = dict()
model['report'] = rb.get()
return{'out_table':df_result, 'model':model}
``` |
{
"source": "jisuk500/ML_learning",
"score": 4
} |
#### File: AlphaZero_book/chap2/2-2.py
```python
print("Hello World!")
# 2-3-2 변수와 연산자
a=1
b=2
c = a + b
print(a,b,c)
#%%
# 2-3-2 연산자
a = 5
s = 'a가 10 이상' if a>= 10 else 'a는 10 미만'
print(s)
#%%
# 2-3-3 문자열
# 여러 행의 문자열
text = '''텍스트1번째,
텍스트2번째'''
print(text)
# 문자열 추출
text = "Hello World"
print(text[1:3])
print(text[:5])
print(text[6:])
# 문자열에 변수 대입
a = 'test'
b = 100
c = 3.14159
print('문자열 = {}'.format(a))
print('부동소수점수(소수점둘째자리까지 = {:.2f}'.format(c))
print("여러변수 = {},{},{}".format(a,b,c))
#%%
# 2-3-4 리스트
my_list = [1,2,3,4]
print(my_list)
print(my_list[0])
print(my_list[1:3])
# 리스트 엘리먼트 변경
my_list[1:4] = [20,30]
print(my_list)
# range()를 활용한 리스트 생성
print(list(range(10)))
print(list(range(1,7)))
print(list(range(1,10,2)))
#%%
# 2-3-5 딕셔너리
# 딕셔너리 생성과 엘리먼트 취득
my_dic = {'apple':300, 'cherry':200, 'strawberry':3000}
print(my_dic)
# 딕셔너리 엘리먼트 추가 및 삭제
my_dic['apple'] = 400
print(my_dic)
#%%
# 2-3-6 튜플
my_tuple = (1,2,3,4)
print(my_tuple[0])
#%%
# 2-3-7 제어 구문
# 이프문
print('이프문')
num = 5
if num >= 10:
print("10이상")
else:
print("10이하")
# 여러 이프문
print('여러이프문')
num = 10
if num <5 :
print("5")
elif num >=5:
print("55")
else:
print("none")
# for 반복문
print("for 반복문")
for n in [1,2,3]:
print(n)
print(n*10)
for n in range(5):
print(n)
# while 반복문
print("while 반복문")
i = 0
while i<20:
i+=1
if i % 2 == 0:
continue
if i % 3 == 0:
print(i)
# enumerate 열거형
print("enumerate 열거형")
for num, fruit in enumerate(['a','b','c']):
print("{}:{}".format(num,fruit))
# 리스트 컴프리헨션
print("리스트 컴프리헨션")
my_list = []
for x in range(10):
my_list.append(x*2)
print(my_list)
my_list2 = [x*2for x in range(10)]
print(my_list2)
#%%
# 2-3-8 함수와 람다식
# 함수
print("함수")
def radian(x):
return x / 180 * 3.14159
for x in range(0,360,90):
print("각도 : {} / 라디안 : {}".format(x,radian(x)))
# 람다식
print("람다식")
lambda_radian = (lambda x:x/180*3.14159)
for x in range(0,360,90):
print("각도 : {} / 라디안 : {}".format(x,lambda_radian(x)))
#%%
# 클래스
print("클래스")
class HelloClass:
def __init__(self,msg):
self.msg = msg
def output(self):
print(self.msg)
hello = HelloClass('Hello World')
hello.output()
#%%
# 2-3-10 패키지 임포트와 컴포넌트 직접 호출
#패키지 임포트
print("패키지 임포트")
import numpy as np
a = np.array([[1,2,3],[4,5,6],[7,8,9]])
print(a)
#컴포넌트 직접 호출
print("컴포넌트 직접 호출")
from numpy import array
a = array([[1,2,3],[4,5,6],[7,8,9]])
print(a)
```
#### File: AlphaZero_book/chap6/dual_network.py
```python
from tensorflow.keras.layers import Activation, Add, BatchNormalization, Conv2D, Dense, GlobalAveragePooling2D, Input
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras import backend as K
import os
#%% 파라미터 준비
DN_FILTERS = 128 # 컨볼루셔널 레이어 커널 수(오리지널 256)
DN_RESIDUAL_NUM = 16 # 레지듀얼 블록 수 (오리지널 19)
DN_INPUT_SHAPE = (3,3,2) # 입력 형태
DN_OUTPUT_SIZE = 9 # 행동 수(배치수*(3x3))
#%% 컨볼루셔널 레이어 생성
def conv(filters):
return Conv2D(filters,3,padding='same', use_bias=False,
kernel_initializer='he_normal', kernel_regularizer=l2(0.0005))
#%% 레지듀얼 블록 생성
def residual_block():
def f(x):
sc = x
x = conv(DN_FILTERS)(x)
x = BatchNormalization()(x)
x = conv(DN_FILTERS)(x)
x = BatchNormalization()(x)
x = Add()([x,sc])
x = Activation('relu')(x)
return x
return f
#%% 듀얼 네트워크 생성
def dual_network(model_path="./model/best.h5"):
# 모델 생성이 완료된 경우, 처리하지 않음
if os.path.exists(model_path):
return
# 입력 레이어
input = Input(shape=DN_INPUT_SHAPE)
# 컨볼루셔널 레이어
x = conv(DN_FILTERS)(input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# 레지듀얼 블록 16개
for i in range(DN_RESIDUAL_NUM):
x = residual_block()(x)
# 풀링 레이어
x = GlobalAveragePooling2D()(x)
# 정책 출력
p = Dense(DN_OUTPUT_SIZE, kernel_regularizer=l2(0.0005),
activation='softmax', name='pi')(x)
# 가치 출력
v = Dense(1, kernel_regularizer=l2(0.0005))(x)
v = Activation('tanh',name='v')(v)
# 모델 생성
model = Model(inputs=input, outputs=[p,v])
# 모델 저장
model.save(model_path) # 베스트 플레이어 모델
return model
# # 모델 삭제
# K.clear_session()
# del model
#%% 동작 확인
if __name__ == "__main__":
m = dual_network()
```
#### File: AlphaZero_book/chap6/evaluate_best_player.py
```python
from game import State, random_action, alpha_beta_action, mcts_action
from pv_mcts import pv_mcts_action
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from pathlib import Path
import numpy as np
#%% 파라미터 준비
EP_GAME_COUNT = 10 # 평가 1회당 게임 수
#%% 선 수를 둔 플레이어 포인트
def first_player_point(ended_state):
# 1: 선 수 플레이어 승리, 0: 선 수 플레이어 패배, 0.5: 무승부
if ended_state.is_lose():
return 0 if ended_state.is_first_player() else 1
return 0.5
#%% 게임 1회 실행
def play(next_actions):
# 상태 생성
state = State()
# 게임 종료 시까지 반복
while True:
# 게임 종료 시
if state.is_done():
break
# 행동 얻기
next_action = next_actions[0] if state.is_first_player() else next_actions[1]
action = next_action(state)
# 다음 상태 얻기
state = state.next(action)
# 선 수 플레이어 포인트 반환
return first_player_point(state)
#%% 임의의 알고리즘 평가
def evaluate_algorithm_of(label, next_actions):
# 여러 차례 대전을 반복
total_point = 0
for i in range(EP_GAME_COUNT):
# 1 게임 실행
if i % 2 == 0:
total_point += play(next_actions)
else:
total_point += 1 - play(list(reversed(next_actions)))
# 출력
print("\rEvaluate {}/{}".format(i + 1, EP_GAME_COUNT), end='')
print('')
# 평균 포인트 계산
average_point = total_point / EP_GAME_COUNT
print(label,average_point)
#%% 베스트 플레이어 평가
def evaluate_best_player():
# 베스트 플레이어 모델 로드
model = load_model("./model/best.h5")
# PV_ MCTS로 행동 선택을 수행하는 함수 생성
next_pv_mcts_action = pv_mcts_action(model,0.0)
# vs 랜덤
next_actions = (next_pv_mcts_action, random_action)
evaluate_algorithm_of('VS Random', next_actions)
# vs 알파베타법
next_actions = (next_pv_mcts_action, alpha_beta_action)
evaluate_algorithm_of("VS Alpha Beta",next_actions)
# vs 몬테카를로 트리 탐색
next_actions = (next_pv_mcts_action, mcts_action)
evaluate_algorithm_of("VS MCTS",next_actions)
# 모델 삭제
K.clear_session()
del model
#%% 동작 확인
if __name__ == "__main__":
evaluate_best_player()
```
#### File: AlphaZero_book/chap6/train_network.py
```python
from dual_network import DN_INPUT_SHAPE
from tensorflow.keras.callbacks import LearningRateScheduler, LambdaCallback
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from pathlib import Path
import numpy as np
import pickle
#%% 파라미터 준비
RN_EPOCHS = 100 # 학습 횟수
#%% 학습 데이터 로드
def load_data():
history_path = sorted(Path('./data').glob('*.history'))[-1]
with history_path.open(mode='rb') as f:
return pickle.load(f)
#%% 듀얼 네트워크 학습
def train_network():
# 학습 데이터 로드
history = load_data()
xs, y_policies, y_values = zip(*history)
# 학습을 위한 입력 데이터 형태로 변환
a,b,c = DN_INPUT_SHAPE
xs = np.array(xs)
xs = xs.reshape(len(xs), c,a,b).transpose(0,2,3,1)
y_policies = np.array(y_policies)
y_values = np.array(y_values)
# 베스트 플레이어 모델 로드
model = load_model('./model/best.h5')
# 모델 컴파일
model.compile(loss = ['categorical_crossentropy','mse'], optimizer='adam')
# 학습률
def step_decay(epoch):
x = 0.001
if epoch >= 50: x = 0.0005
if epoch >= 80: x = 0.00025
return x
lr_decay = LearningRateScheduler(step_decay)
# 출력
print_callback = LambdaCallback(
on_epoch_begin=lambda epoch, logs:
print("\rTrain {}/{}".format(epoch+1, RN_EPOCHS), end=''))
# 학습 실행
model.fit(xs, [y_policies, y_values], batch_size=128, epochs=RN_EPOCHS,
verbose=0, callbacks=[lr_decay, print_callback])
print("")
# 최신 플레이어 모델 저장
model.save('./model/latest.h5')
# 모델 삭제
K.clear_session()
del model
#%% 테스트
if __name__ == "__main__":
train_network()
```
#### File: Object Detection/YOLOv1/Resnet50_subclass.py
```python
import tensorflow as tf
class conv_block(tf.keras.Model):
def __init__(self, filters, strides=(2, 2)):
super(conv_block, self).__init__()
self.filters1, self.filters2, self.filters3 = filters
self.strides = strides
self.conv1 = tf.keras.layers.Conv2D(self.filters1, (1, 1), strides=strides)
self.bn1 = tf.keras.layers.BatchNormalization()
self.relu1 = tf.keras.layers.ReLU()
self.conv2 = tf.keras.layers.Conv2D(self.filters2, (3, 3), strides=(1, 1), padding='same')
self.bn2 = tf.keras.layers.BatchNormalization()
self.relu2 = tf.keras.layers.ReLU()
self.conv3 = tf.keras.layers.Conv2D(self.filters3, (1, 1), strides=(1, 1))
self.bn3 = tf.keras.layers.BatchNormalization()
self.shortcut_conv = tf.keras.layers.Conv2D(self.filters3, (1, 1), strides=strides)
self.shortcut_bn = tf.keras.layers.BatchNormalization()
self.add = tf.keras.layers.Add()
self.add_relu = tf.keras.layers.ReLU()
def call(self, input_tensor, training=False):
x = self.conv1(input_tensor)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.bn3(x)
shortcut = self.shortcut_conv(input_tensor)
shortcut = self.shortcut_bn(shortcut)
x = self.add([x, shortcut])
x = self.add_relu(x)
return x
class identity_block(tf.keras.Model):
def __init__(self, filters):
super(identity_block, self).__init__()
self.filters1, self.filters2, self.filters3 = filters
self.conv1 = tf.keras.layers.Conv2D(self.filters1, (1, 1), strides=(1, 1))
self.bn1 = tf.keras.layers.BatchNormalization()
self.relu1 = tf.keras.layers.ReLU()
self.conv2 = tf.keras.layers.Conv2D(self.filters2, (3, 3), strides=(1, 1), padding='same')
self.bn2 = tf.keras.layers.BatchNormalization()
self.relu2 = tf.keras.layers.ReLU()
self.conv3 = tf.keras.layers.Conv2D(self.filters3, (1, 1), strides=(1, 1))
self.bn3 = tf.keras.layers.BatchNormalization()
self.add = tf.keras.layers.Add()
self.add_relu = tf.keras.layers.ReLU()
def call(self, input_tensor, training=False):
x = self.conv1(input_tensor)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.add([x, input_tensor])
x = self.add_relu(x)
return x
class ResNet50(tf.keras.Model):
def __init__(self, nb_classes):
super(ResNet50, self).__init__()
self.nb_classes = nb_classes
# Stage 1 (Conv1 Layer)
self.zero_padd_1_1 = tf.keras.layers.ZeroPadding2D(padding=(3, 3))
self.conv_1 = tf.keras.layers.Conv2D(64, (7, 7), strides=(2, 2))
self.bn_1 = tf.keras.layers.BatchNormalization()
self.relu_1 = tf.keras.layers.ReLU()
self.zero_padd_1_2 = tf.keras.layers.ZeroPadding2D(padding=(1, 1))
self.max_pool = tf.keras.layers.MaxPooling2D((3, 3), strides=(2, 2))
# Stage 2
self.stage2 = tf.keras.Sequential()
self.stage2.add(conv_block([64, 64, 256], strides=(1, 1)))
self.stage2.add(identity_block([64, 64, 256]))
self.stage2.add(identity_block([64, 64, 256]))
# Stage 3
self.stage3 = tf.keras.Sequential()
self.stage3.add(conv_block([128, 128, 512]))
self.stage3.add(identity_block([128, 128, 512]))
self.stage3.add(identity_block([128, 128, 512]))
self.stage3.add(identity_block([128, 128, 512]))
# Stage 4
self.stage4 = tf.keras.Sequential()
self.stage4.add(conv_block([256, 256, 1024]))
self.stage4.add(identity_block([256, 256, 1024]))
self.stage4.add(identity_block([256, 256, 1024]))
self.stage4.add(identity_block([256, 256, 1024]))
self.stage4.add(identity_block([256, 256, 1024]))
self.stage4.add(identity_block([256, 256, 1024]))
# Stage 5
self.stage5 = tf.keras.Sequential()
self.stage5.add(conv_block([512, 512, 2048]))
self.stage5.add(identity_block([512, 512, 2048]))
self.stage5.add(identity_block([512, 512, 2048]))
#self.gap = tf.keras.layers.GlobalAveragePooling2D()
#self.dense = tf.keras.layers.Dense(self.nb_classes, activation='softmax')
def call(self, input_tensor, training=False):
x = self.zero_padd_1_1(input_tensor)
x = self.conv_1(x)
x = self.bn_1(x)
x = self.relu_1(x)
x = self.zero_padd_1_2(x)
x = self.max_pool(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.stage5(x)
#x = self.gap(x)
#x = self.dense(x)
return x
model = ResNet50(1000)
model.build((1, 448, 448, 3))
model.summary()
```
#### File: ML_learning/open ai gym/cart v0.py
```python
import tensorflow as tf
import numpy as np
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, MaxPooling2D, Flatten, Conv2D, BatchNormalization, Activation
import random
from collections import deque
import gym
# %% initialize modules
env = gym.make("Breakout-v0")
# %% neural net constants
history_size = 4
input_size = env.observation_space.shape
input_size = (input_size[0], input_size[1],history_size)
output_size = env.action_space.n
lr = 0.001
k1 = 16
k2 = 32
k3 = 32
fc1 = 256
# %% gym game constants
dis = 0.99
REPLAY_MEMORY = 20000
# %% define network class
def NeuralNet():
model = Sequential()
model.add(Conv2D(k1, (3, 3), activation=None, padding='same',
input_shape=input_size, name='InputLayer'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(k2, (3, 3), activation=None,
padding='same', name='HiddenLayer1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(MaxPooling2D((2, 2)))
# model.add(Conv2D(k3, (3, 3), activation=None,
# padding='same', name='HiddenLayer2'))
# model.add(BatchNormalization())
# model.add(Activation('relu'))
# model.add(Dropout(0.3))
# model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(fc1, activation='relu', name='FcLayer'))
model.add(Dense(output_size, activation=None, name='OutputLayer'))
opt = optimizers.Adam(learning_rate=lr)
model.compile(loss='mean_squared_error', optimizer=opt, metrics=['accuracy'])
return model
mainDQN = NeuralNet()
mainDQN.summary()
targetDQN = NeuralNet()
targetDQN.set_weights(mainDQN.get_weights())
# %% DQN methods
def replay_train(mainDQN_, targetDQN_, train_batch):
x_stack = np.zeros(0).reshape([0] + list(input_size))
y_stack = np.zeros(0).reshape(0,output_size)
# get stored info from the buffer
for history_state, action, reward, next_history_state, done in train_batch:
history_state2d = history_state.reshape([1] + list(input_size))
history_state2d = history_state2d * 1.0/255.0
history_state2d = history_state2d.astype(np.float32)
next_history_state2d = next_history_state.reshape([1] + list(input_size))
next_history_state2d = next_history_state2d * 1.0/255.0
next_history_state2d = next_history_state2d.astype(np.float32)
Q = mainDQN(history_state2d).numpy()
# terminal?
if done:
Q[0,action] = reward
else:
Q[0,action] = reward + dis * np.max(targetDQN_(next_history_state2d).numpy())
x_stack = np.vstack([x_stack, history_state2d])
y_stack = np.vstack([y_stack, Q])
# train out network using target and predicted Q values on each episode
x_stack = x_stack.astype(np.float32)
y_stack = y_stack.astype(np.float32)
return mainDQN_.fit(x_stack,y_stack)
def one_hot(x):
return np.identity(6)[x:x+1]
# %% function for atari game
def rgb2gray(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
gray = gray.astype(np.uint8)
gray = gray.reshape(list(gray.shape)+[-1])
return gray
def initialize_state_history(initState):
grayimg = rgb2gray(initState)
hist = np.zeros(0).reshape(210,160,0)
for i in range(4):
hist = np.concatenate((hist,grayimg),axis=2)
return hist
def update_state_history(histState, newState):
histState = np.delete(histState,0,2)
grayimg = rgb2gray(newState)
histState = np.concatenate((histState,grayimg),axis=2)
return histState
# %%
def main():
max_episodes = 100000
#store the previous observations in replay memory
replay_buffer = deque()
best_loss = 99999999
global targetDQN
global mainDQN
for episode in range(max_episodes):
e = 1.0/((episode / 100) + 1)
done = False
step_count = 0
last_reward = 0
state = env.reset()
env.render()
history_state = initialize_state_history(state)
while not done:
if np.random.rand(1) < e:
action = env.action_space.sample()
else:
# choose an action by greedily from the Q-network
history_state2d = history_state.reshape([1] + list(input_size))
history_state2d = history_state2d * 1.0/255.0
history_state2d = history_state2d.astype(np.float32)
action = np.argmax(mainDQN(history_state2d).numpy())
# get net state and reward from environment
next_state, reward, done, _ = env.step(action)
next_history_state = update_state_history(history_state, next_state)
env.render()
# save the experience to our buffer
replay_buffer.append((history_state, action, reward, next_history_state, done))
if len(replay_buffer) > REPLAY_MEMORY:
replay_buffer.popleft()
state = next_state
history_state = next_history_state
step_count += 1
last_reward += reward
print("Episode: {}, steps: {}, score: {}".format(episode, step_count, last_reward))
if episode % 100 == 1: # train every 100 episodes
# get a random batch of experiences
for _ in range(10):
# minibatch works betters
minibatch = random.sample(replay_buffer, 100)
hist = replay_train(mainDQN, targetDQN, minibatch)
loss = hist.history['loss']
acc = hist.history['accuracy']
targetDQN.set_weights(mainDQN.get_weights())
print("loss: {}".format(loss[0]))
if best_loss > loss[0]:
best_loss = loss[0]
tf.keras.models.save_model(targetDQN,"targetDQN.h5")
print("saved new!")
return mainDQN
# %% run main
main()
# %%
```
#### File: tensorflow examples/2 basic models/Logistic regression.py
```python
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
import tensorflow as tf
import numpy as np
# %% MNIST dataset parameters
num_classes = 10 # 0~9 digits
num_features = 784 # 28 * 28
# training parameters
learning_rate = 0.01
training_steps = 1000
batch_size = 256
display_step = 50
# %% prepare MNIST data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# convert to float32
x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float16)
# Flatten image to 1-D vector of 784 features (28*28)
x_train, x_test = x_train.reshape(
[-1, num_features]), x_test.reshape([-1, num_features])
# normalize images vaue from [0,255] to [0 1]
x_train, x_test = x_train/255., x_test / 255.
# %%Use tf.data API to shuffle and batch data
train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)
# %%weight of shape [784,10] the 28 * 28 image features and total number of classes
W = tf.Variable(tf.ones([num_features, num_classes]), name='weight')
# Bias of shape [10] , the tota number of classes
b = tf.Variable(tf.zeros([num_classes]), name='bias')
# logistic regression (Wx + b)
def logistic_regression(x):
# apply softmax to normalize the logits to a probability distribution
return tf.nn.softmax(tf.matmul(x, W)+b)
# cross-Entropy loss function
def cross_entropy(y_pred, y_true):
# encode label to a one-hot vector
y_true = tf.one_hot(y_true, depth=num_classes)
# clip prediction values to aviod log(0) error
y_pred = tf.clip_by_value(y_pred, 1e-9, 1.)
# compute cross-entropy
return tf.reduce_mean(-tf.reduce_sum(y_true*tf.math.log(y_pred), 1))
# accuracy metric
def accuracy(y_pred, y_true):
# predicted class is the index of highest score in prediction vector(i.e. argmax)
correct_prediction = tf.equal(
tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# stochastic gradient descent optimizer
optimizer = tf.optimizers.SGD(learning_rate)
# %% Optimization process
def run_optimization(x, y):
# wrap conputation inside a gradientTape for automatic differetiation
with tf.GradientTape() as g:
pred = logistic_regression(x)
loss = cross_entropy(pred, y)
# compute gradients
gradients = g.gradient(loss, [W, b])
# update W and b following gradients
optimizer.apply_gradients(zip(gradients, [W, b]))
# %% Run training for the given number of steps
for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):
# run the oprimization to update W and b values
run_optimization(batch_x, batch_y)
if step % display_step == 0:
pred = logistic_regression(batch_x)
loss = cross_entropy(pred, batch_y)
acc = accuracy(pred, batch_y)
print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))
# %% test model on validation set
x_test_tensor = tf.constant(x_test, dtype=tf.float32)
pred = logistic_regression(x_test_tensor)
print("Test Accuracy: %f" % accuracy(pred, y_test))
# %% visualize predictions
import matplotlib.pyplot as plt
# %%predict 5 images from validation set
prediction_count = 20
n_images = np.random.randint(0,x_test.shape[0],(prediction_count))
test_images = x_test[n_images]
test_y_answer = y_test[n_images]
test_images_tensor = tf.constant(test_images, dtype=tf.float32)
predictions = logistic_regression(test_images_tensor)
# display image and model prediction
for i in range(prediction_count):
showimg = np.reshape(test_images[i], [28, 28])
showimg = np.uint8(showimg * 255)
plt.imshow(np.reshape(showimg, [28, 28]), cmap='gray')
plt.show()
print("Model prediction: %i, Answer: %i" % (np.argmax(predictions.numpy()[i]),test_y_answer[i]))
``` |
{
"source": "jisung0920/EyeCon-Server",
"score": 2
} |
#### File: jisung0920/EyeCon-Server/econ.py
```python
import socket
import numpy as np
import cv2
from scipy.spatial import distance as dist
from imutils import face_utils
import torchvision.transforms as transforms
import imutils
import dlib
import random
import os
import torch
import math
def recvAll(sock, count):
buf = b''
while count:
# count만큼의 byte를 읽어온다. socket이 이미지 파일의 bytes를 한번에 다 못 읽어오므로
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
def modeList(L):
return max(set(L), key=L.count)
def loadStateDict(state_direction_num,state_Q_num):
state_memory = dict()
state_memory['GazeRatioLR'] = np.array([1.0 for i in range(state_direction_num)])
state_memory['GazeRatioTB'] = np.array([1.0 for i in range(state_direction_num)])
state_memory['FacePointX'] = np.array([200 for i in range(state_direction_num)])
state_memory['FacePointY'] = np.array([300 for i in range(state_direction_num)])
state_memory['Click'] = [0 for i in range(state_Q_num)]
state_memory['Scroll'] = [0 for i in range(state_Q_num)]
state_memory['FER'] = [0 for i in range(state_Q_num)]
return state_memory
def rateToDistance(r_x,r_y,width,height, weight = 1.5) :
d_x = (width*r_x) * weight
d_y = (height*r_y) * weight
return int(d_x),int(d_y)
def loadClassifier(util_path) :
face_haar_path = util_path + 'haarcascade_frontalface_default.xml'
faceClassifier = cv2.CascadeClassifier(face_haar_path)
return faceClassifier
def getEAR(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
return (A + B) / (2.0 * C)
def isBlink(eyeLandmark,blink_th,start_idx,end_idx) :
eye = eyeLandmark[start_idx:end_idx]
EAR = getEAR(eye)
if EAR > blink_th:
return False
else :
return True
def getFaceXY(faceImage) :
face = faceImage[0]
x = face[0] + face[2]/2
y = face[1] + face[3]/2
return x,y
def getGazePoint(model,image,W,H):
result = model(image[None, ...])[0]
x_rate = 0
y_rate = 0
for i in [0,1,4,5,8,9,12,13] :
x_rate += result[i]
for i in [0,1,2,3,4,5,6,7] :
y_rate += result[i]
x = (W/4) + (1 - x_rate ) * W/2
y = (H/4) + (1 - y_rate ) * H/2
return x,y
def getExpression(faceFrame,gray,FERmodel) :
for (x, y, w, h) in faceFrame:
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(cv2.resize(roi_gray, (224, 224)), -1)
cv2.normalize(cropped_img, cropped_img, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
# cropped_img = transforms.ToTensor()
cropped_img = torch.from_numpy(cropped_img)
cropped_img = cropped_img.float()
output = FERmodel(cropped_img)
_, prediction = torch.max(output, 1)
prediction = prediction.data[0]
prediction = prediction.data[0]
return int(prediction.data[0]) ,( x, y)
return 0,(0,0)
def getGazeRatio(gray,faceLandmark,eye_points):
eye_region = np.array([(faceLandmark.part(eye_points[0]).x, faceLandmark.part(eye_points[0]).y),
(faceLandmark.part(eye_points[1]).x, faceLandmark.part(eye_points[1]).y),
(faceLandmark.part(eye_points[2]).x, faceLandmark.part(eye_points[2]).y),
(faceLandmark.part(eye_points[3]).x, faceLandmark.part(eye_points[3]).y),
(faceLandmark.part(eye_points[4]).x, faceLandmark.part(eye_points[4]).y),
(faceLandmark.part(eye_points[5]).x, faceLandmark.part(eye_points[5]).y)], np.int32)
height, width = gray.shape
mask = np.zeros((height, width), np.uint8)
# cv2.polylines(image, [eye_region], True, 255, 2)
# cv2.fillPoly(mask, [eye_region], 255)
eye = cv2.bitwise_and(gray, gray, mask=mask)
min_x = np.min(eye_region[:, 0])
max_x = np.max(eye_region[:, 0])
min_y = np.min(eye_region[:, 1])
max_y = np.max(eye_region[:, 1])
gray_eye = eye[min_y: max_y, min_x: max_x]
_, threshold_eye = cv2.threshold(gray_eye, 70, 255, cv2.THRESH_BINARY_INV)
height, width = threshold_eye.shape
left_side_threshold = threshold_eye[0: height, 0: int(width / 2)]
left_side_white = cv2.countNonZero(left_side_threshold)
right_side_threshold = threshold_eye[0: height, int(width / 2): width]
right_side_white = cv2.countNonZero(right_side_threshold)
upper_side_threshold = threshold_eye[0: int(height / 2), 0: width]
upper_side_white = cv2.countNonZero(upper_side_threshold)
down_side_threshold = threshold_eye[int(height / 2): height, 0: width]
down_side_white = cv2.countNonZero(down_side_threshold)
if left_side_white == 0:
horizontal_gaze_ratio = 1
elif right_side_white == 0:
horizontal_gaze_ratio = 5
else:
horizontal_gaze_ratio = left_side_white / right_side_white
if upper_side_white == 0:
vertical_gaze_ratio = 1
elif down_side_white == 0:
vertical_gaze_ratio = 5
else:
vertical_gaze_ratio = upper_side_white / down_side_white
return horizontal_gaze_ratio, vertical_gaze_ratio
# Model
# def getGazeXY(image,face,eyes) :
# x = 500 + random.randint(-50, 50)
# y = 800 + random.randint(-50, 50)
# return x,y
``` |
{
"source": "jisungyoon/clusim",
"score": 3
} |
#### File: clusim/clusim/clusteringerror.py
```python
class ClusteringError(Exception):
"""
Base Class for clustering errors.
"""
class EmptyClusteringError(ClusteringError):
"""
Raised when the clustering does not contain elements.
"""
def __str__(self):
return 'Clustering must have one or more elements in one or more clusters.'
class InvalidElementError(ClusteringError):
"""
Raised when an element is None or np.nan.
"""
def __str__(self):
return 'Elements cannot be None or NaN.'
class InvalidClusterError(ClusteringError):
"""
Raised when an element is None or np.nan.
"""
def __str__(self):
return 'Clusters cannot be None or NaN.'
class EmptyClusterError(ClusteringError):
"""
Raised when a clustering has empty clusters.
"""
def __init__(self, n_emptys = None, empty_list = None):
self.n_emptys = n_emptys
self.empty_list = empty_list
def __str__(self):
if self.empty_list:
return 'The following clusters contain 0 elements:\n{}'.format(self.empty_list)
elif self.n_emptys:
return 'There are {} clusters with 0 elements.'.format(self.n_emptys)
else:
return 'EmptyClusterError'
class UnassignedElementError(ClusteringError):
"""
Raised when elements are not assigned to clusters.
"""
def __init__(self, n_unassigned = None, unassigned_list = None):
self.n_unassigned = n_unassigned
self.unassigned_list = unassigned_list
def __str__(self):
if self.unassigned_list:
return 'The following elements were not assigned a cluster:\n{}'.format(self.unassigned_list)
elif self.n_unassigned:
return 'There are {} elements unassigned to a cluster.'.format(self.n_unassigned)
else:
return 'UnassignedElementError'
class ClusteringSimilarityError(Exception):
"""
Base Class for clustering similarity errors.
"""
def __str__(self):
return 'The element sets must be the same for both clusterings.'
```
#### File: clusim/clusim/dag.py
```python
import collections
import copy
import itertools
import networkx as nx
import numpy as np
class DAG(nx.DiGraph):
def roots(self):
return [node for node in self.nodes() if self.in_degree(node) == 0]
def leaves(self):
return [node for node in self.nodes() if self.out_degree(node) == 0]
def maxdist_from_roots(self, graph=None):
if graph is None:
graph = self
dist = {} # stores [node, distance] pair
for node in nx.topological_sort(graph):
# pairs of dist,node for all incoming edges
pairs = [(dist[v][0]+1, v) for v in graph.pred[node]]
if pairs:
dist[node] = max(pairs)
else:
dist[node] = (0, node)
return dist
def mindist_from_roots(self, graph=None):
if graph is None:
graph = self
dist = {} # stores [node, distance] pair
for node in nx.topological_sort(graph):
# pairs of dist,node for all incoming edges
pairs = [(dist[v][0]+1, v) for v in graph.pred[node]]
if pairs:
dist[node] = min(pairs)
else:
dist[node] = (0, node)
return dist
def height(self):
dfr = self.maxdist_from_roots()
max_dist = max([disttuple[0] for disttuple in dfr.itervalues()]) + 1
return max_dist
def cut_at_depth(self, depth=None, cuttype='shortestpath',
rescale_path_type='max'):
if cuttype == 'shortestpath':
dfr = self.mindist_from_roots()
node_dist = {node: dfr[node][0] for node in self.nodes()}
elif cuttype == 'rescaled':
node_dist = self.rescale(rescale_path_type)
cluster_list = [node for node in self.nodes()
if node_dist[node] == depth]
return cluster_list
def norm_linkage(self):
mlink = max(self.linkage_dist.values())
self.linkage_dist = {l: d/mlink for l, d
in self.linkage_dist.items()}
def rescale(self, rescale_path_type='max'):
if rescale_path_type == 'max':
dfr = self.maxdist_from_roots(self)
dtl = self.maxdist_from_roots(self.reverse(copy=True))
elif rescale_path_type == 'min':
dfr = self.mindist_from_roots(self)
dtl = self.mindist_from_roots(self.reverse(copy=True))
elif rescale_path_type == 'linkage':
try:
return self.linkage_dist
print("using linkage")
except:
return collections.defaultdict(int)
rescaled_level = {}
for node in self.nodes():
path_to_node = dfr[node][0]
path_from_node = dtl[node][0]
total_path_len = path_to_node + path_from_node
if total_path_len == 0.0:
rescaled_level[node] = 0.0
else:
rescaled_level[node] = float(path_to_node) / total_path_len
return rescaled_level
def draw(self, rescaled_level=None, rescale_path_type='max', ax=None,
**kwargs):
import matplotlib.pylab as plt
if ax is None:
fig, ax = plt.subplots(1, 1)
treepos = nx.drawing.nx_agraph.graphviz_layout(self, prog='dot')
treeys = [pt[1] for pt in treepos.values()]
mintreey = min(treeys)
maxtreey = max(treeys)
if rescaled_level is True:
rescaled_level = self.rescale(rescale_path_type=rescale_path_type)
treepos = {node: (
pt[0], mintreey +
(1.0 - rescaled_level[node]) * (maxtreey - mintreey))
for node, pt in treepos.items()}
elif type(rescaled_level) is dict:
treepos = {node: (
pt[0], mintreey +
(1.0 - rescaled_level[node]) * (maxtreey - mintreey))
for node, pt in treepos.items()}
nx.draw(self, pos=treepos, arrows=False, ax=ax, **kwargs)
return ax
def _tree_edges(self, n, r):
# From http://stackoverflow.com/questions/26896370/faster-way-to-build-a-tree-graph-using-networkx-python
# helper function for trees
# yields edges in rooted tree at 0 with n nodes and branching ratio r
nodes = iter(range(n))
parents = [next(nodes)] # stack of max length r
while parents:
source = parents.pop(0)
for i in range(r):
try:
target = next(nodes)
parents.append(target)
yield source, target
except StopIteration:
break
def make_regular_tree(self, N=1, r=2):
self.add_edges_from(list(self._tree_edges(N, r)))
self.linkage_dist = {n: d for n, (d, _) in
self.maxdist_from_roots().items()}
def make_complete_rary_tree(self, h=2, r=2):
self.add_edges_from(nx.balanced_tree(h=h, r=r,
create_using=nx.DiGraph()).edges)
self.linkage_dist = {n: d for n, (d, _) in
self.maxdist_from_roots().items()}
def swap_nodes(self, n1, n2, swap_parents=True, swap_children=False):
if swap_parents:
n1parents = list(self.predecessors(n1))
n2parents = list(self.predecessors(n2))
self.add_edges_from([(p, n1) for p in n2parents])
self.remove_edges_from([(p, n1) for p in n1parents])
self.add_edges_from([(p, n2) for p in n1parents])
self.remove_edges_from([(p, n2) for p in n2parents])
class Dendrogram(DAG):
def from_linkage(self, linkage_matrix, dist_rescaled=False):
N = linkage_matrix.shape[0] + 1
if dist_rescaled:
maxdist = max(linkage_matrix[:, 2])
distances = 1.0 - linkage_matrix[:, 2]/maxdist
linkage_dist = {ipt: 1.0 for ipt in range(N)}
else:
distances = linkage_matrix[:, 2]
linkage_dist = {ipt: 0.0 for ipt in range(N)}
for iclus in range(N - 1):
clus_id = N + iclus
linkage_dist[clus_id] = distances[iclus]
self.add_edges_from([(clus_id, int(linkage_matrix[iclus, 0])),
(clus_id, int(linkage_matrix[iclus, 1]))])
self.linkage_dist = linkage_dist
return self
def to_dendropy_tree(self, taxon_namespace=None, weighted=False):
import dendropy
tree = dendropy.Tree(taxon_namespace=taxon_namespace)
seed_node = self.roots()[0]
if weighted:
def edge_length(par, child):
return np.abs(self.linkage_dist[par] -
self.linkage_dist[child])
else:
def edge_length(par, child): return 1.0
tree_dict = {seed_node: tree.seed_node}
for clus in nx.topological_sort(self):
for child in self.successors(clus):
tree_dict[child] = tree_dict[clus].new_child(
edge_length=edge_length(clus, child))
for clus in self.leaves():
tree_dict[clus].taxon = taxon_namespace.get_taxon(str(clus))
return tree
def from_dendropy_tree(self, tree, keep_taxon=True):
import dendropy
linkage_dist = {}
for i, node in enumerate(tree.levelorder_node_iter()):
if keep_taxon and node.taxon is not None:
node_name = node.taxon.label
else:
node_name = i
node.label = node_name
if node.parent_node is not None:
self.add_edge(node.parent_node.label, node.label,
weight=node.edge_length)
linkage_dist[node.label] =\
linkage_dist[node.parent_node.label] + node.edge_length
else:
linkage_dist[node.label] = 0.0
self.linkage_dist = linkage_dist
return self
def from_newick(self, s, taxon_namespace=None, keep_taxon=True):
import dendropy
if taxon_namespace is None:
taxon_namespace = self.infer_namespace(s)
tree = dendropy.Tree(taxon_namespace=taxon_namespace)
tree = tree.get(data=s, schema="newick")
return self.from_dendropy_tree(tree, keep_taxon=keep_taxon)
def make_random_dendrogram(self, N=10):
return self.make_biased_dendrogram(N=N, p=0.0)
def infer_namespace(self, s):
import dendropy
edits = s.replace('(', "").replace(')', '').replace(';', '')
units = [t.split(':') for t in edits.split(',')]
names = []
for unitlist in units:
for s in unitlist:
try:
float(s)
except ValueError:
names.append(s)
taxon_namespace = dendropy.TaxonNamespace(names)
return taxon_namespace
def make_biased_dendrogram(self, N=10, p=1.0):
# make a random dendrogram with a bias towards each node appearing in
# its own cluster
self.add_node(0)
leaves = self.leaves()
i = 1
while len(leaves) < N:
if np.random.random() > p:
parent = leaves.pop(np.random.randint(0, len(leaves)))
else:
parent = leaves.pop(-1)
self.add_edges_from([(parent, i), (parent, i + 1)])
leaves.extend([i, i + 1])
i += 2
return self
def make_random_dendrogram_aglomerative(self, N=10, leaves=None):
# make a random dendrogram with a bias towards each node appearing in
# its own cluster
if leaves is None:
node_list = list(range(N))
else:
node_list = copy.copy(leaves)
self.add_nodes_from(node_list)
i = max(node_list) + 1
while len(node_list) > 1:
left = node_list.pop(np.random.randint(0, len(node_list)))
right = node_list.pop(np.random.randint(0, len(node_list)))
self.add_edges_from([(i, left), (i, right)])
node_list.append(i)
i += 1
return self
def random_prune_regraft(self):
roots = self.roots()
subtree_root = np.random.choice(self.nodes(), 1)[0]
while subtree_root in roots and (len(roots) == 1):
subtree_root = np.random.choice(self.nodes(), 1)[0]
subtree_vertices = self.subgraph(
nx.dfs_preorder_nodes(self, subtree_root)).nodes()
prune_vertex = list(self.predecessors(subtree_root))[0]
graft_vertex = np.random.choice(self.nodes(), 1)[0]
while (graft_vertex in subtree_vertices) or\
(graft_vertex in roots) or\
(graft_vertex == prune_vertex):
graft_vertex = np.random.choice(self.nodes(), 1)[0]
# now we have to do the graph edits
# merge connections through the pruned vertex
for source in self.predecessors(prune_vertex):
for sink in self.successors(prune_vertex):
if sink != subtree_root:
self.add_edge(source, sink)
# prune the vertex
self.remove_node(prune_vertex)
# reattach the pruned vertex
for source in list(self.predecessors(graft_vertex)):
self.add_edge(source, prune_vertex)
self.remove_edge(source, graft_vertex)
# reattach the subtree
self.add_edge(prune_vertex, subtree_root)
self.add_edge(prune_vertex, graft_vertex)
def random_tree_replace(self, cut_depth=0.0):
vertex_distance = self.rescale()
fixed_vertices = [v for v in self.nodes()
if vertex_distance[v] >= cut_depth]
fixed_subtree = self.subgraph(fixed_vertices)
fixed_roots = [node for node in fixed_subtree
if fixed_subtree.in_degree(node) == 0]
new_dendro = Dendrogram()
new_dendro.make_random_dendrogram_aglomerative(leaves=fixed_roots)
new_dendro.add_edges_from(fixed_subtree.edges())
return new_dendro
def random_node_deletion(self, percent=1.0):
nodes2avoid = self.roots() + self.leaves()
clusters2delete = np.random.choice(
[n for n in self.nodes() if n not in nodes2avoid],
int(percent * (self.number_of_nodes() - len(nodes2avoid))),
replace=False)
for c in clusters2delete:
self.add_edges_from(list(itertools.product(self.predecessors(c),
self.successors(c))))
self.remove_node(c)
return self
``` |
{
"source": "jisungyoon/pyscisci",
"score": 3
} |
#### File: pyscisci/pyscisci/database.py
```python
import os
import json
import gzip
from collections import defaultdict
import pandas as pd
import numpy as np
from nameparser import HumanName
from pyscisci.utils import isin_sorted, zip2dict, load_int, load_float, groupby_count
from pyscisci.metrics import *
from pyscisci.datasource.readwrite import load_preprocessed_data, append_to_preprocessed_df
class BibDataBase(object):
"""
Base class for all bibliometric database interfaces.
The BibDataBase provides a parasomonious structure for each of the specific data sources (MAG, WOS, etc.).
There are four primary types of functions:
1. *Parseing* Functions (data source specific) that parse the raw data files
2. *Loading* Functions that load DataFrames
3. *Processing* Functions that calculate advanced data types from the raw files
4. *Analysis* Functions that calculate Science of Science metrics.
Parameters
-------
:param path2database: str
The path to the database files
:param keep_in_memory: bool, default False
Flag to keep database files in memory once loaded
"""
def __init__(self, path2database = '', keep_in_memory = False, show_progress=True):
self.path2database = path2database
self.keep_in_memory = keep_in_memory
self.show_progress = show_progress
self._affiliation_df = None
self._pub_df = None
self._journal_df = None
self._author_df = None
self._pub2year = None
self._pub2ref_df = None
self._author2pub_df = None
self._paa_df = None
self._pub2refnoself_df = None
self._pub2field_df=None
self._fieldinfo_df = None
@property
def affiliation_df(self):
"""
The DataFrame keeping affiliation information. Columns may depend on the specific datasource.
Columns
-------
'AffiliationId', 'NumberPublications', 'NumberCitations', 'FullName', 'GridId', 'OfficialPage', 'WikiPage', 'Latitude', 'Longitude'
"""
if self._affiliation_df is None:
if self.keep_in_memory:
self._affiliation_df = self.load_affiliations(show_progress=self.show_progress)
else:
return self.load_affiliations(show_progress=self.show_progress)
return self._affiliation_df
@property
def author_df(self):
"""
The DataFrame keeping author information. Columns may depend on the specific datasource.
Columns
-------
'AuthorId', 'LastKnownAffiliationId', 'NumberPublications', 'NumberCitations', 'FullName', 'LastName', 'FirstName', 'MiddleName'
"""
if self._author_df is None:
if self.keep_in_memory:
self._author_df = self.load_authors(show_progress=self.show_progress)
else:
return self.load_authors(show_progress=self.show_progress)
return self._author_df
@property
def pub_df(self):
"""
The DataFrame keeping publication information. Columns may depend on the specific datasource.
Columns
-------
'PublicationId', 'Year', 'JournalId', 'FamilyId', 'Doi', 'Title', 'Date', 'Volume', 'Issue', 'DocType'
"""
if self._pub_df is None:
if self.keep_in_memory:
self._pub_df = self.load_publications(show_progress=self.show_progress)
else:
return self.load_publications(show_progress=self.show_progress)
return self._pub_df
@property
def pub2year(self):
"""
A dictionary mapping PublicationId to Year.
Columns
-------
'Journal': 'j', 'Book':'b', '':'', 'BookChapter':'bc', 'Conference':'c', 'Dataset':'d', 'Patent':'p', 'Repository':'r'
"""
if self._pub2year is None:
if self.keep_in_memory:
self._pub2year = self.load_pub2year()
else:
return self.load_pub2year()
return self._pub2year
@property
def journal_df(self):
"""
The DataFrame keeping journal information. Columns may depend on the specific datasource.
Columns
-------
'JournalId', 'FullName', 'Issn', 'Publisher', 'Webpage'
"""
if self._journal_df is None:
if self.keep_in_memory:
self._journal_df = self.load_journals(show_progress=self.show_progress)
else:
return self.load_journals(show_progress=self.show_progress)
return self._journal_df
@property
def pub2ref_df(self):
"""
The DataFrame keeping citing and cited PublicationId.
Columns
-------
CitingPublicationId, CitedPublicationId
"""
if self._pub2ref_df is None:
if self.keep_in_memory:
self._pub2ref_df = self.load_references(show_progress=self.show_progress)
else:
return self.load_references(show_progress=self.show_progress)
return self._pub2ref_df
@property
def pub2refnoself_df(self):
"""
The DataFrame keeping citing and cited PublicationId after filtering out the self-citations.
Columns
-------
CitingPublicationId, CitedPublicationId
"""
if self._pub2refnoself_df is None:
if self.keep_in_memory:
self._pub2refnoself_df = self.load_references(noselfcite=True, show_progress=self.show_progress)
else:
return self.load_references(noselfcite=True, show_progress=self.show_progress)
return self._pub2refnoself_df
@property
def paa_df(self):
"""
The DataFrame keeping all publication, author, affiliation relationships. Columns may depend on the specific datasource.
Columns
-------
'PublicationId', 'AuthorId', 'AffiliationId', 'AuthorSequence', 'OrigAuthorName', 'OrigAffiliationName'
"""
if self._paa_df is None:
if self.keep_in_memory:
self._paa_df = self.load_publicationauthoraffiliation(show_progress=self.show_progress)
else:
return self.load_publicationauthoraffiliation(show_progress=self.show_progress)
return self._paa_df
@property
def author2pub_df(self):
"""
The DataFrame keeping all publication, author relationships. Columns may depend on the specific datasource.
Columns
-------
'PublicationId', 'AuthorId'
"""
if self._paa_df is None:
if self.keep_in_memory:
self._paa_df = self.load_publicationauthoraffiliation(columns = ['AuthorId', 'PublicationId'],
duplicate_subset = ['AuthorId', 'PublicationId'], dropna = ['AuthorId', 'PublicationId'], show_progress=self.show_progress)
else:
return self.load_publicationauthoraffiliation(columns = ['AuthorId', 'PublicationId'],
duplicate_subset = ['AuthorId', 'PublicationId'], dropna = ['AuthorId', 'PublicationId'], show_progress=self.show_progress)
return self._paa_df
@property
def pub2field_df(self):
"""
The DataFrame keeping all publication field relationships. Columns may depend on the specific datasource.
Columns
-------
'PublicationId', 'FieldId'
"""
if self._pub2field_df is None:
if self.keep_in_memory:
self._pub2field_df = self.load_pub2field(show_progress=self.show_progress)
else:
return self.load_pub2field(show_progress=self.show_progress)
return self._pub2field_df
@property
def fieldinfo_df(self):
"""
The DataFrame keeping all publication field relationships. Columns may depend on the specific datasource.
Columns
-------
'FieldId', 'FieldLevel', 'NumberPublications', 'FieldName'
"""
if self._fieldinfo_df is None:
if self.keep_in_memory:
self._fieldinfo_df = self.load_fieldinfo(show_progress=self.show_progress)
else:
return self.load_fieldinfo(show_progress=self.show_progress)
return self._fieldinfo_df
## Basic Functions for loading data from either preprocessed sources or the raw database files
def load_affiliations(self, preprocess = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', dropna = None, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the Affiliation DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
columns : list, default None, Optional
Load only this subset of columns
isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
Affililation DataFrame.
"""
if show_progress:
show_progress='Loading Affiliations'
if preprocess and os.path.exists(os.path.join(self.path2database, 'affiliation')):
return load_preprocessed_data('affiliation', path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=prefunc2apply, postfunc2apply=postfunc2apply, show_progress=show_progress)
else:
return self.parse_affiliations()
def load_authors(self, preprocess = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', dropna = None, prefunc2apply=None, postfunc2apply=None, process_name = True, show_progress=True):
"""
Load the Author DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
columns : list, default None, Optional
Load only this subset of columns
isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
process_name : bool, default True, Optional
If True, then when processing the raw file, the package `NameParser <https://nameparser.readthedocs.io/en/latest/>`_
will be used to split author FullNames.
Returns
-------
DataFrame
Author DataFrame.
"""
if show_progress:
show_progress='Loading Authors'
if preprocess and os.path.exists(os.path.join(self.path2database, 'author')):
return load_preprocessed_data('author', path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=prefunc2apply, postfunc2apply=postfunc2apply, show_progress=show_progress)
else:
return self.parse_authors(process_name=process_name)
def load_publications(self, preprocess = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', dropna = None, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the Publication DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
:param preprocess: bool, default True, Optional
Attempt to load from the preprocessed directory.
columns : list, default None, Optional
Load only this subset of columns
isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
Publication DataFrame.
"""
if show_progress:
show_progress='Loading Publications'
if preprocess and os.path.exists(os.path.join(self.path2database, 'publication')):
return load_preprocessed_data(dataname='publication', path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=prefunc2apply, postfunc2apply=postfunc2apply, show_progress=show_progress)
else:
return self.parse_publications()
def load_pub2year(self):
if os.path.exists(os.path.join(self.path2database, 'pub2year.json.gz')):
with gzip.open(os.path.join(self.path2database, 'pub2year.json.gz'), 'r') as infile:
pub2year = json.loads(infile.read().decode('utf8'))
return {self.PublicationIdType(k):int(y) for k,y in pub2year.items() if not y is None}
def load_journals(self, preprocess = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', dropna = None, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the Journal DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
columns : list, default None, Optional
Load only this subset of columns
isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
Journal DataFrame.
"""
if show_progress:
show_progress='Loading Journals'
if preprocess and os.path.exists(os.path.join(self.path2database, 'journal')):
return load_preprocessed_data('journal', path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=prefunc2apply, postfunc2apply=postfunc2apply, show_progress=show_progress)
else:
return self.parse_publications()
def load_references(self, preprocess = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', noselfcite = False, dropna = None, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the Pub2Ref DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
columns : list, default None, Optional
Load only this subset of columns
isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
noselfcite : bool, default False, Optional
If True, then the preprocessed pub2ref files with self-citations removed will be used.
Returns
-------
DataFrame
Pub2Ref DataFrame.
"""
if noselfcite:
fileprefix = 'pub2refnoself'
else:
fileprefix = 'pub2ref'
if show_progress:
show_progress='Loading {}'.format(fileprefix)
if preprocess and os.path.exists(os.path.join(self.path2database, fileprefix)):
return load_preprocessed_data(fileprefix, path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=prefunc2apply, postfunc2apply=postfunc2apply, show_progress=show_progress)
else:
return self.parse_references()
def load_publicationauthoraffiliation(self, preprocess = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', dropna = None, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the PublicationAuthorAffilation DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
columns : list, default None, Optional
Load only this subset of columns
isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
PublicationAuthorAffilation DataFrame.
"""
if show_progress:
show_progress='Loading Publication Author Affiliation'
if preprocess and os.path.exists(os.path.join(self.path2database, 'publicationauthoraffiliation')):
return load_preprocessed_data('publicationauthoraffiliation', path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=prefunc2apply, postfunc2apply=postfunc2apply, show_progress=show_progress)
else:
return self.parse_publicationauthoraffiliation()
def load_pub2field(self, preprocess = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', dropna = None, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the Pub2Field DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
:param preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
:param columns : list, default None, Optional
Load only this subset of columns
:param isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
:param duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
:param duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
:param dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
Pub2Field DataFrame.
"""
if show_progress:
show_progress='Loading Fields'
if preprocess and os.path.exists(os.path.join(self.path2database, 'pub2field')):
return load_preprocessed_data('pub2field', path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=prefunc2apply, postfunc2apply=postfunc2apply, show_progress=show_progress)
else:
return self.parse_fields()
def load_fieldinfo(self, preprocess = True, columns = None, isindict = None, show_progress=False):
"""
Load the Field Information DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
:param preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
:param columns : list, default None, Optional
Load only this subset of columns
:param isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
:param duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
:param duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
:param dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
FieldInformation DataFrame.
"""
if show_progress:
show_progress='Loading Field Info'
if preprocess and os.path.exists(os.path.join(self.path2database, 'fieldinfo')):
return pd.read_hdf(os.path.join(self.path2database, 'fieldinfo', 'fieldnames.hdf'))
else:
return self.parse_fields()
def load_impact(self, preprocess = True, include_yearnormed = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', dropna = None, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the precomputed impact DataFrame from a preprocessed directory.
Parameters
----------
:param preprocess : bool, default True
Attempt to load from the preprocessed directory.
:param include_yearnormed: bool, default True
Normalize all columns by yearly average.
:param columns : list, default None
Load only this subset of columns
:param isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
:param duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
:param duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
:param dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
FieldInformation DataFrame.
"""
if show_progress:
show_progress='Loading Impact'
if include_yearnormed:
def normfunc(impactdf):
impactcolumns = [c for c in list(impactdf) if not c in ['PublicationId', 'Year']]
for c in impactcolumns:
impactdf[c+'_norm'] = impactdf[c]/impactdf[c].mean()
return impactdf
else:
def normfunc(impactdf):
return impactdf
if preprocess and os.path.exists(os.path.join(self.path2database, 'impact')):
return load_preprocessed_data('impact', path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=normfunc, show_progress=show_progress)
else:
raise self.compute_impact()
"""
To be rewritten for each specific data source (MAG, WOS, etc.)
"""
def download_from_source(self):
raise NotImplementedError
def parse_affiliations(self, preprocess = False):
raise NotImplementedError
def parse_authors(self, preprocess = False, process_name = True, num_file_lines = 5*10**6):
raise NotImplementedError
def parse_publications(self, preprocess = False, num_file_lines=10**7):
raise NotImplementedError
def parse_references(self, preprocess = False, num_file_lines=10**7):
raise NotImplementedError
def parse_publicationauthoraffiliation(self, preprocess = False, num_file_lines=10**7):
raise NotImplementedError
def parse_fields(self, preprocess = False, num_file_lines=10**7):
raise NotImplementedError
# Analysis
def author_productivity(self, df=None, colgroupby = 'AuthorId', colcountby = 'PublicationId', show_progress=False):
"""
Calculate the total number of publications for each author.
Parameters
----------
:param df : DataFrame, default None, Optional
A DataFrame with the author2publication information. If None then the database 'author2pub_df' is used.
:param colgroupby : str, default 'AuthorId', Optional
The DataFrame column with Author Ids. If None then the database 'AuthorId' is used.
:param colcountby : str, default 'PublicationId', Optional
The DataFrame column with Publication Ids. If None then the database 'PublicationId' is used.
Returns
-------
DataFrame
Productivity DataFrame with 2 columns: 'AuthorId', 'Productivity'
"""
if df is None:
df = self.author2pub_df
# we can use show_progress to pass a label for the progress bar
if show_progress:
show_progress='Author Productivity'
newname_dict = zip2dict([str(colcountby)+'Count', '0'], ['Productivity']*2)
return groupby_count(df, colgroupby, colcountby, count_unique=True, show_progress=show_progress).rename(columns=newname_dict)
def author_yearly_productivity(self, df=None, colgroupby = 'AuthorId', datecol = 'Year', colcountby = 'PublicationId', show_progress=False):
"""
Calculate the number of publications for each author in each year.
Parameters
----------
:param df : DataFrame, default None, Optional
A DataFrame with the author2publication information. If None then the database 'author2pub_df' is used.
:param colgroupby : str, default 'AuthorId', Optional
The DataFrame column with Author Ids. If None then the database 'AuthorId' is used.
:param datecol : str, default 'Year', Optional
The DataFrame column with Year information. If None then the database 'Year' is used.
:param colcountby : str, default 'PublicationId', Optional
The DataFrame column with Publication Ids. If None then the database 'PublicationId' is used.
Returns
-------
DataFrame
Productivity DataFrame with 3 columns: 'AuthorId', 'Year', 'YearlyProductivity'
"""
if df is None:
df = self.author2pub_df
# we can use show_progress to pass a label for the progress bar
if show_progress:
show_progress='Yearly Productivity'
newname_dict = zip2dict([str(colcountby)+'Count', '0'], ['YearlyProductivity']*2)
return groupby_count(df, [colgroupby, datecol], colcountby, count_unique=True, show_progress=show_progress).rename(columns=newname_dict)
def author_career_length(self, df = None, colgroupby = 'AuthorId', colrange = 'Year', show_progress=False):
"""
Calculate the career length for each author. The career length is the length of time from the first
publication to the last publication.
Parameters
----------
:param df : DataFrame, default None, Optional
A DataFrame with the author2publication information. If None then the database 'author2pub_df' is used.
:param colgroupby : str, default 'AuthorId', Optional
The DataFrame column with Author Ids. If None then the database 'AuthorId' is used.
:param colrange : str, default 'Year', Optional
The DataFrame column with Date information. If None then the database 'Year' is used.
Returns
-------
DataFrame
Productivity DataFrame with 2 columns: 'AuthorId', 'CareerLength'
"""
if df is None:
df = self.author2pub_df
# we can use show_progress to pass a label for the progress bar
if show_progress:
show_progress='Career Length'
newname_dict = zip2dict([str(colrange)+'Range', '0'], ['CareerLength']*2)
return groupby_range(df, colgroupby, colrange, show_progress=show_progress).rename(columns=newname_dict)
def author_productivity_trajectory(self, df =None, colgroupby = 'AuthorId', datecol = 'Year', colcountby = 'PublicationId', show_progress=False):
"""
Calculate the author yearly productivity trajectory. See :cite:`way2017misleading`
The algorithmic implementation can be found in :py:func:`citationanalysis.compute_yearly_productivity_traj`.
Parameters
----------
:param df : DataFrame, default None
A DataFrame with the author2publication information. If None then the database 'author2pub_df' is used.
:param colgroupby : str, default 'AuthorId'
The DataFrame column with Author Ids. If None then the database 'AuthorId' is used.
:param datecol : str, default 'Year'
The DataFrame column with Date information. If None then the database 'Year' is used.
:param colcountby : str, default 'PublicationId'
The DataFrame column with Publication Ids. If None then the database 'PublicationId' is used.
Returns
-------
DataFrame
Trajectory DataFrame with 5 columns: 'AuthorId', 't_break', 'b', 'm1', 'm2'
"""
if df is None:
df = self.author2pub_df
return compute_yearly_productivity_traj(df, colgroupby = colgroupby)
def author_hindex(self, df = None, colgroupby = 'AuthorId', colcountby = 'Ctotal', show_progress=False):
"""
Calculate the author yearly productivity trajectory. See :cite:`hirsch2005index` for the derivation.
The algorithmic implementation can be found in :py:func:`citationanalysis.compute_hindex`.
Parameters
----------
:param df : DataFrame, default None, Optional
A DataFrame with the author2publication information. If None then the database 'author2pub_df' is used.
:param colgroupby : str, default 'AuthorId', Optional
The DataFrame column with Author Ids. If None then the database 'AuthorId' is used.
:param colcountby : str, default 'Ctotal', Optional
The DataFrame column with Citation counts for each publication. If None then the database 'Ctotal' is used.
Returns
-------
DataFrame
Trajectory DataFrame with 2 columns: 'AuthorId', 'Hindex'
"""
if show_progress: print("Starting Author H-index. \nLoading Data.")
if df is None:
df = self.author2pub_df.merge(self.impact_df[['AuthorId', colcountby]], on='PublicationId', how='left')
if show_progress: print("Computing H-index.")
return compute_hindex(df, colgroupby = colgroupby, colcountby = colcountby, show_progress=show_progress)
def remove_selfcitations(self, preprocess=True, show_progress=False):
"""
Prcoess the pub2ref DataFrame and remove all citation relationships that share an Author.
Parameters
----------
:param preprocess : bool, default True, Optional
If True then the new preprocessed DataFrames are saved in pub2refnoself
Returns
-------
DataFrame
Pub2Ref DataFrame with 2 columns: 'CitingPublicationId', 'CitedPublicationId'
"""
if preprocess:
if not os.path.exists(os.path.join(self.path2database, 'pub2refnoself')):
os.mkdir(os.path.join(self.path2database, 'pub2refnoself'))
pub2authors = defaultdict(set)
for pid, aid in self.author2pub_df[['PublicationId', 'AuthorId']].values:
pub2authors[pid].add(aid)
fullrefdf = []
# loop through all pub2ref files
Nreffiles = sum('pub2ref' in fname for fname in os.listdir(os.path.join(self.path2database, 'pub2ref')))
for ifile in tqdm(range(Nreffiles), desc='Removing Self-citations', disable= not show_progress):
refdf = pd.read_hdf(os.path.join(self.path2database, 'pub2ref', 'pub2ref{}.hdf'.format(ifile)))
# get citing cited pairs with no common authors
noselfcite = np.array([len(pub2authors[citingpid] & pub2authors[citedpid]) == 0 for citingpid, citedpid in refdf.values])
# keep only citing-cited pairs without a common author
refdf = refdf.loc[noselfcite]
if preprocess:
refdf.to_hdf(os.path.join(self.path2database, 'pub2refnoself', 'pub2refnoself{}.hdf'.format(ifile)), key = 'pub2ref')
else:
fullrefdf.append(refdf)
if not preprocess:
return pd.concat(fullrefdf)
def compute_impact(self, preprocess=True, citation_horizons = [5,10], noselfcite = True):
"""
Calculate several of the common citation indices.
* 'Ctotal' : The total number of citations.
* 'Ck' : The total number of citations within the first k years of publcation, for each k value specified by `citation_horizons`.
* 'Ctotal_noself' : The total number of citations with self-citations removed.
* 'Ck' : The total number of citations within the first k years of publcation with self-citations removed, for each k value specified by `citation_horizons`.
Parameters
----------
:param preprocess : bool, default True, Optional
If True then the impact measures are saved in preprocessed files.
:param citation_horizons : list, default [5,10], Optional
The DataFrame column with Author Ids. If None then the database 'AuthorId' is used.
:param noselfcite : Bool, default 'True', Optional
If True then the noselfcitation pub2ref files are also processed.
Returns
-------
DataFrame
The impact DataFrame with at least two columns: 'PublicationId', 'Year', + citation columns
"""
# first load the publication year information
pub2year = self.load_pub2year()
# now get the reference list and merge with year info
pub2ref = self.pub2ref_df
# drop all citations that happend before the publication year
pub2ref = pub2ref.loc[[pub2year.get(citingpid, 0) >= pub2year.get(citedpid, 0) for citingpid, citedpid in pub2ref[['CitingPublicationId', 'CitedPublicationId']].values]]
# calcuate the total citations
citation_df = groupby_count(pub2ref, colgroupby='CitedPublicationId', colcountby='CitingPublicationId', unique=True )
citation_df.rename(columns={'CitingPublicationIdCount':'Ctotal', 'CitedPublicationId':'PublicationId'}, inplace=True)
# go from the larest k down
for k in np.sort(citation_horizons)[::-1]:
# drop all citations that happend after the k
#pub2ref = pub2ref.loc[pub2ref['CitingPublicationYear'] <= pub2ref['CitedPublicationYear'] + k]
pub2ref = pub2ref.loc[[pub2year.get(citingpid, 0) <= pub2year.get(citedpid, 0) + k for citingpid, citedpid in pub2ref[['CitingPublicationId', 'CitedPublicationId']].values]]
# recalculate the impact
k_citation_df = groupby_count(pub2ref, colgroupby='CitedPublicationId', colcountby='CitingPublicationId', unique=True )
k_citation_df.rename(columns={'CitingPublicationIdCount':'C{}'.format(k), 'CitedPublicationId':'PublicationId'}, inplace=True)
citation_df = citation_df.merge(k_citation_df, how='left', on='PublicationId')
# get the Cited Year
citation_df['Year'] = [pub2year.get(pid, 0) for pid in citation_df['PublicationId'].values]
if noselfcite:
del pub2ref
pub2ref = self.pub2refnoself_df
# drop all citations that happend before the publication year
pub2ref = pub2ref.loc[[pub2year.get(citingpid, 0) >= pub2year.get(citedpid, 0) for citingpid, citedpid in pub2ref[['CitingPublicationId', 'CitedPublicationId']].values]]
# calcuate the total citations
citation_noself_df = groupby_count(pub2ref, colgroupby='CitedPublicationId', colcountby='CitingPublicationId', unique=True )
citation_noself_df.rename(columns={'CitingPublicationIdCount':'Ctotal_noself', 'CitedPublicationId':'PublicationId'}, inplace=True)
# go from the larest k down
for k in np.sort(citation_horizons)[::-1]:
# drop all citations that happend after the k
#pub2ref = pub2ref.loc[pub2ref['CitingPublicationYear'] <= pub2ref['CitedPublicationYear'] + k]
pub2ref = pub2ref.loc[[pub2year.get(citingpid, 0) <= pub2year.get(citedpid, 0) + k for citingpid, citedpid in pub2ref[['CitingPublicationId', 'CitedPublicationId']].values]]
# recalculate the impact
k_citation_df = groupby_count(pub2ref, colgroupby='CitedPublicationId', colcountby='CitingPublicationId', unique=True )
k_citation_df.rename(columns={'CitingPublicationIdCount':'C{}_noself'.format(k), 'CitedPublicationId':'PublicationId'}, inplace=True)
citation_noself_df = citation_noself_df.merge(k_citation_df, how='left', on='PublicationId')
citation_df = citation_df.merge(citation_noself_df, how='left', on='PublicationId')
# set all nan to 0
citation_df.fillna(0, inplace=True)
if preprocess:
if not os.path.exists(os.path.join(self.path2database, 'impact')):
os.mkdir(os.path.join(self.path2database, 'impact'))
for y, cdf in citation_df.groupby('Year', sort=True):
cdf.to_hdf(os.path.join(self.path2database, 'impact', 'impact{}.hdf'.format(y)), mode='w', key ='impact')
else:
return citation_df
def compute_teamsize(self, save2pubdf = True, show_progress=False):
"""
Calculate the teamsize of publications, defined as the total number of Authors on the publication.
Parameters
----------
:param save2pubdf : bool, default True, Optional
If True the results are appended to the preprocessed publication DataFrames.
:param show_progress: bool, default False
If True, display a progress bar for the count.
Returns
-------
DataFrame
TeamSize DataFrame with 2 columns: 'PublicationId', 'TeamSize'
"""
if show_progress: print("Starting TeamSize computation. \nLoading Data.")
pub2authorseq_df = self.load_publicationauthoraffiliation(columns = ['PublicationId', 'AuthorId', 'AuthorSequence'],
duplicate_subset = ['PublicationId', 'AuthorId'], dropna = ['PublicationId', 'AuthorId'])
# register our pandas apply with tqdm for a progress bar
tqdm.pandas(desc='TeamSize', disable= not show_progress)
pub2teamsize = pub2authorseq_df.groupby('PublicationId', sort=False)['AuthorSequence'].progress_apply(lambda x: x.max()).astype(int).to_frame().reset_index().rename(columns={'AuthorSequence':'TeamSize'})
if save2pubdf:
if show_progress: print("Saving Teamsize.")
append_to_preprocessed_df(pub2teamsize, self.path2database, 'publication')
return pub2teamsize
def compute_yearly_citations(self, preprocess = True, show_progress=False):
if show_progress:
print("Starting Computation of Yearly Citations")
# first load the publication year information
pub2year = self.pub2year
# now get the reference list and merge with year info
pub2ref = self.pub2ref_df
pub2ref['CitingYear'] = [pub2year.get(citingpid, 0) for citingpid in pub2ref['CitingPublicationId'].values]
# drop all citations that happend before the publication year
pub2ref = pub2ref.loc[[citingyear >= pub2year.get(citedpid, 0) for citingyear, citedpid in pub2ref[['CitingYear', 'CitedPublicationId']].values]]
if show_progress:
print("Yearly Citation Data Prepared")
# calcuate the total citations
citation_df = groupby_count(pub2ref, colgroupby=['CitedPublicationId', 'CitingYear'], colcountby='CitingPublicationId', unique=True )
citation_df.rename(columns={'CitingPublicationIdCount':'YearlyCitations', 'CitedPublicationId':'PublicationId'}, inplace=True)
# get the Cited Year
citation_df['CitedYear'] = [pub2year.get(pid, 0) for pid in citation_df['PublicationId'].values]
citation_df.sort_values(by=['CitedYear', 'CitedPublicationId', 'CitingYear'], inplace=True)
if show_progress:
print("Yearly Citations Found")
if preprocess:
if not os.path.exists(os.path.join(self.path2database, 'temporalimpact')):
os.mkdir(os.path.join(self.path2database, 'temporalimpact'))
for y, cdf in citation_df.groupby('CitedYear', sort=True):
cdf.to_hdf(os.path.join(self.path2database, 'temporalimpact', 'temporalimpact{}.hdf'.format(y)), mode='w', key ='temporalimpact')
if show_progress:
print("Yearly Citations Saved")
else:
return citation_df
def filter_doctypes(self, doctypes = ['j', 'b', 'bc', 'c'], show_progress=False):
"""
Filter all of the publication files keeping only the publications of specified doctype.
:param list doctypes: optional
the list of doctypes
:return None:
"""
doctypes = np.sort(doctypes)
if show_progress: print("Starting DocType filter. \nFiltering Publications.")
valid_pubids = []
pub2year = {}
pub2doctype = {}
Nfiles = sum('publication' in fname for fname in os.listdir(os.path.join(self.path2database, 'publication')))
for ifile in range(Nfiles):
pubdf = pd.read_hdf(os.path.join(self.path2database, 'publication', 'publication{}.hdf'.format(ifile)))
pubdf.loc[isin_sorted(pubdf['DocType'].values, doctypes)]
pubdf.dropna(subset=['Year'], inplace=True)
pubdf['Year'] = pubdf['Year'].astype(int)
pubdf.to_hdf(os.path.join(self.path2database, 'publication', 'publication{}.hdf'.format(ifile)), key='pub', mode='w')
valid_pubids.extend(pubdf['PublicationId'].values)
for pid, y, dt in pubdf[['PublicationId', 'Year', 'DocType']].values:
pub2year[pid] = y
pub2doctype[pid] = dt
with gzip.open(os.path.join(self.path2database, 'pub2year.json.gz'), 'w') as outfile:
outfile.write(json.dumps(pub2year).encode('utf8'))
with gzip.open(os.path.join(self.path2database, 'pub2doctype.json.gz'), 'w') as outfile:
outfile.write(json.dumps(pub2doctype).encode('utf8'))
del pubdf
valid_pubids = np.sort(valid_pubids)
if show_progress: print("Filtering References.")
Nfiles = sum('pub2ref' in fname for fname in os.listdir(os.path.join(self.path2database, 'pub2ref')))
for ifile in range(Nfiles):
pub2refdf = pd.read_hdf(os.path.join(self.path2database, 'pub2ref', 'pub2ref{}.hdf'.format(ifile)))
pub2refdf = pub2refdf.loc[isin_sorted(pub2refdf['CitedPublicationId'].values, valid_pubids)]
pub2refdf = pub2refdf.loc[isin_sorted(pub2refdf['CitingPublicationId'].values, valid_pubids)]
pub2refdf.to_hdf(os.path.join(self.path2database, 'pub2ref', 'pub2ref{}.hdf'.format(ifile)),
key='pub2ref', mode='w')
if show_progress: print("Filtering Publication and Author.")
Nfiles = sum('publicationauthoraffiliation' in fname for fname in os.listdir(os.path.join(self.path2database, 'publicationauthoraffiliation')))
for ifile in range(Nfiles):
paa_df = pd.read_hdf(os.path.join(self.path2database, 'publicationauthoraffiliation', 'publicationauthoraffiliation{}.hdf'.format(ifile)))
paa_df = paa_df.loc[isin_sorted(paa_df['PublicationId'].values, valid_pubids)]
paa_df.to_hdf(os.path.join(self.path2database, 'publicationauthoraffiliation', 'publicationauthoraffiliation{}.hdf'.format(ifile)),
key='paa', mode='w')
if show_progress: print("Finished filtering DocType.")
``` |
{
"source": "jisunl23/wouldyouci",
"score": 3
} |
#### File: wouldyouci_database/crawling/movie_on_ci.py
```python
from bs4 import BeautifulSoup
import urllib.request
from dotenv import load_dotenv
import json
import requests
# import pyperclip
import time
import datetime
import os
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8')
from selenium import webdriver
# Explicitly wait
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
# selenium
chromedriver_dir=r'C:\Users\multicampus\Downloads\chromedriver\chromedriver.exe'
load_dotenv(verbose=True)
# CGV INFO
def updateCGV(url_option, tdate, cinema_pk):
global onscreen_pk
global onscreen_movie
CGV_ONSCREEN = []
tg_date = makeCGVDate(tdate)
iframe_base = 'http://www.cgv.co.kr/common/showtimes/iframeTheater.aspx?'
CGV_URL = 'http://www.cgv.co.kr'
iframe_url = iframe_base + url_option + '&date=' + tg_date
iframe_html = urllib.request.urlopen(iframe_url)
soup = BeautifulSoup(iframe_html, 'lxml')
movie_list = soup.find_all('div', {'class': 'col-times'})
for movie in movie_list:
# 영화 정보(영화사 영화 자세히보기 페이지)
movie_info = movie.find('div', {'class': 'info-movie'})
movie_atag = movie_info.find('a')
movie_href = movie_atag.get('href')
movie_code = getCGVMovieIdx(movie_href)
movie_name = getCGVMovieName(movie_code)
if onscreen_movie.get(movie_name):
onscreen_movie[movie_name]['CGV'] = movie_code
else:
onscreen_movie[movie_name] = {
'CGV': movie_code
}
# 상영관 정보
hall_list = movie.find_all('div', {'class': 'type-hall'})
for hall in hall_list:
hall_info = hall.find_all('li')
movie_d = getCGVStr(hall_info[0].text)
seat_total = getCGVStr(hall_info[2].text)[1:-1]
time_table = hall.find('div', {'class': 'info-timetable'})
atag_list = time_table.find_all('a')
for atag in atag_list:
new_onscreen_info = {
'pk': onscreen_pk,
'model': 'movies.onscreen',
}
new_onscreen_info_field = {}
atag_href = atag.get('href')
if atag_href == '/':
TICKET_URL = CGV_URL + '/ticket/?' + url_option + '&date=' + tg_date
seat_left = '준비중'
start_time = atag.find('em')
start_time = start_time.text
end_time = atag.find('span', {'class': 'end-time'}).text
end_time = deleteWord(end_time, 3, len(end_time))
info_hall = hall.find('div', {'class': 'info-hall'})
hall_name = info_hall.find_all('li')[1]
hall_name = getCGVStr(hall_name.text)
else:
TICKET_URL = CGV_URL + atag_href
start_time = atag.get('data-playstarttime')
start_time = makeStrtoTime(start_time)
end_time = atag.get('data-playendtime')
end_time = makeStrtoTime(end_time)
seat_left = atag.get('data-seatremaincnt')
hall_name = atag.get('data-screenkorname')
new_onscreen_info_field['cinema'] = cinema_pk
new_onscreen_info_field['movie'] = int(movie_code)
new_onscreen_info_field['cm_code'] = int(movie_code)
new_onscreen_info_field['date'] = tdate
new_onscreen_info_field['info'] = movie_d + ' | ' + hall_name
new_onscreen_info_field['start_time'] = start_time
new_onscreen_info_field['end_time'] = end_time
new_onscreen_info_field['total_seats'] = seat_total
new_onscreen_info_field['seats'] = seat_left
new_onscreen_info_field['url'] = TICKET_URL
new_onscreen_info['fields'] = new_onscreen_info_field
# print(new_onscreen_info)
CGV_ONSCREEN.append(new_onscreen_info)
onscreen_pk += 1
return CGV_ONSCREEN
def getCGVMovieName(tg_code):
CGV_MOVIE_DETAIL = 'http://www.cgv.co.kr/movies/detail-view/?midx='
detail_url = CGV_MOVIE_DETAIL + tg_code
detail_html = urllib.request.urlopen(detail_url)
detail_soup = BeautifulSoup(detail_html, 'lxml')
movie_name = detail_soup.find('div', {'class': 'title'})
res = movie_name.find('strong').text
return res
def getCGVStr(tg_text):
start_point = 0
tg_text_len = len(tg_text)
res = ''
for idx in range(tg_text_len):
if tg_text[idx] == ' ':
continue
elif tg_text[idx] == '\r':
continue
elif tg_text[idx] == '\n':
continue
else:
res += tg_text[idx]
return res
def getCGVMovieIdx(movie_url):
equal_idx = movie_url.index('=')
cgv_movie_code = movie_url[equal_idx+1:]
return cgv_movie_code
def makeStrtoTime(tg_str):
res = ''
tg_len = len(tg_str)
minute = tg_str[tg_len-2:]
hour = tg_str[:tg_len-2]
res = hour + ':' + minute
return res
def deleteWord(tg_str, st_idx, end_idx):
new_str = tg_str[st_idx:end_idx]
return new_str
# MEGABOX INFO
def updateMEGABOX(tg_url, tg_date, cinema_pk):
global onscreen_pk
global onscreen_movie
TICKET_BASE = 'https://www.megabox.co.kr/booking/seat?playSchdlNo='
driver.get(tg_url)
time.sleep(2)
# 내일 날짜로 조회
dotdate = getDotDate(tg_date)
dayxPath = '//*[@date-data=\"' + dotdate + '\"]'
tmr_btn = driver.find_element_by_xpath(dayxPath)
tmr_btn.click()
time.sleep(2)
source = driver.page_source
soup = BeautifulSoup(source, 'html.parser')
movie_list = soup.find_all('div', {'class': 'theater-list'})
MEGABOX_ONSCREEN = []
for movie_col in movie_list:
movie_info = movie_col.find('div', {'class': 'theater-tit'})
movie_name = checkMegaName(movie_info.find_all('p')[1].text)
theater_type_list = movie_col.find_all('div', {'class': 'theater-type-box'})
for box in theater_type_list:
theater_type = box.find('div', {'class': 'theater-type'})
hall_name = theater_type.find('p', {'class': 'theater-name'}).text
total_seat = theater_type.find('p', {'class': 'chair'}).text[2:-1]
theater_time = box.find('div', {'class': 'theater-time'})
movie_d = theater_time.find('div', {'class': 'theater-type-area'}).text
movie_info = movie_d + ' | ' + hall_name
movie_timetable = theater_time.find_all('td')
for movie_time in movie_timetable:
new_onscreen_info = {
'pk': onscreen_pk,
'model': 'movies.onscreen',
}
new_field = {
'cinema': cinema_pk,
'movie': '',
'date': tg_date,
'info': movie_info,
'start_time': '',
'end_time': '',
'total_seats': total_seat,
'seats': '',
'url': tg_url
}
if movie_time.get('play-de') != deleteSlash(tg_date):
return []
if movie_time.get('class') == 'end-time':
new_field['start_time'] = movie_time.find('p', {'class': 'time'}).text
new_field['seats'] = '매진'
else:
book_code = movie_time.get('play-schdl-no')
if book_code:
TICKET_URL = TICKET_BASE + book_code
else:
TICKET_URL = tg_url
movie_code = movie_time.get('rpst-movie-no')
# 상영작 업로드
if movie_name and movie_code:
if onscreen_movie.get(movie_name):
onscreen_movie[movie_name]['MEGABOX'] = movie_code
else:
onscreen_movie[movie_name] = {
'MEGABOX': movie_code
}
play_info = movie_time.find('div', {'class': 'play-time'})
if play_info:
play_time = play_info.find('p').text
start_end = divideTime(play_time)
seat_left = movie_time.find('p', {'class': 'chair'}).text[:-1]
new_field['start_time'] = start_end[0]
new_field['end_time'] = start_end[1]
new_field['seats'] = seat_left
if movie_code:
new_field['movie'] = int(movie_code)
new_field['cm_code'] = int(movie_code)
else:
continue
new_field['url'] = TICKET_URL
new_onscreen_info['fields'] = new_field
MEGABOX_ONSCREEN.append(new_onscreen_info)
onscreen_pk += 1
return MEGABOX_ONSCREEN
def getDashDate(tg_date):
res = tg_date[:4] + '-' + tg_date[4:6] + '-' + tg_date[6:]
return res
def divideTime(tg_time):
divideIdx = tg_time.index('~')
res1 = tg_time[:divideIdx]
res2 = tg_time[divideIdx+1:]
return res1, res2
def makeCGVDate(tg_date):
res = ''
for idx in range(len(tg_date)):
if tg_date[idx] == '-':
continue
else:
res += tg_date[idx]
return res
def checkMegaName(tg_str):
if tg_str[0] == '[':
endIdx = tg_str.index(']')
return tg_str[endIdx+2:]
elif tg_str[0] == '(':
endIdx = tg_str.index(')')
return tg_str[endIdx+2:]
else:
return tg_str
def getDotDate(tdate):
res = ''
for idx in range(len(tdate)):
if tdate[idx] == '-':
res += '.'
else:
res += tdate[idx]
return res
def updateLOTTE(tg_url, tg_date, cinema_pk):
global onscreen_pk
global onscreen_movie
driver.get(tg_url)
time.sleep(2)
ck_source = driver.page_source
ck_soup = BeautifulSoup(ck_source, 'html.parser')
ck_layer = ck_soup.find('div', {'id': 'layerGetPopup'})
if ck_layer.text:
popupLayer = driver.find_element_by_id('layerGetPopup')
ck_btn = popupLayer.find_element_by_class_name('btn_close.btnCloseLayer')
ck_btn.click()
time.sleep(1)
day_list = driver.find_elements_by_class_name('date')
ck_date = str(int(tg_date[-2:]))
LOTTE_ONSCREEN = []
# 내일 날짜로 조회
for day in day_list:
day_text = day.find_element_by_tag_name('strong').text
if day_text == ck_date:
tg_btn = day.find_element_by_tag_name('label')
tg_btn.click()
time.sleep(2)
break
source = driver.page_source
soup = BeautifulSoup(source, 'html.parser')
movie_list = soup.find_all('div', {'class': 'time_select_wrap ty2 timeSelect'})
for movie in movie_list:
movie_tit = movie.find('div', {'class': 'list_tit'})
movie_name = movie_tit.find('p').text
if movie_name == '테스트콘텐츠':
continue
movie_atag = movie_tit.find('a')
movie_href = movie_atag.get('href')
movie_code = findLotteCode(movie_href)
if onscreen_movie.get(movie_name):
onscreen_movie[movie_name]['LOTTE'] = movie_code
else:
onscreen_movie[movie_name] = {
'LOTTE': movie_code
}
movie_info_ul = movie.find('ul', {'class': 'list_hall mt20'})
movie_info_li = movie_info_ul.find_all('li')
movie_info_list = []
for info_li in movie_info_li:
movie_info_list.append(info_li.text)
movie_info = ' | '.join(movie_info_list)
timetable_ul = movie.find('ul', {'class': 'list_time'})
timetable_atag_list = timetable_ul.find_all('li')
for timetable_info in timetable_atag_list:
time_info = timetable_info.find('dd', {'class': 'time'})
start_time = time_info.find('strong').text
end_time_info = time_info.find('div', {'class': 'tooltip'}).text
end_time = strBeforeSpace(end_time_info)
seat_info = timetable_info.find('dd', {'class': 'seat'})
seat_left = seat_info.find('strong').text
seat_total = strBeforeSpace(seat_info.text)
hall_info = timetable_info.find('dd', {'class': 'hall'}).text
new_movie_info = movie_info + ' | ' + hall_info
new_onscreen_info = {
'pk': onscreen_pk,
'model': 'movies.onscreen',
'fields': {
'cinema': cinema_pk,
'movie': int(movie_code),
'date': tg_date,
'info': new_movie_info,
'start_time': start_time,
'end_time': end_time,
'total_seats': seat_total,
'seats': seat_left,
'url': tg_url,
'cm_code': int(movie_code)
}
}
onscreen_pk += 1
LOTTE_ONSCREEN.append(new_onscreen_info)
return LOTTE_ONSCREEN
def findLotteCode(tg_href):
idx = 0
for i in range(len(tg_href)):
if tg_href[i] == '=':
idx = i
break
if idx:
return tg_href[idx+1:]
def strBeforeSpace(tg_str):
idx = 0
for i in range(len(tg_str)-1, -1, -1):
if tg_str[i] == ' ':
idx = i+1
break
return tg_str[idx:]
def updateETC(tg_url, tg_date, cinema_pk):
global onscreen_pk
global onscreen_movie
if cinema_pk == 75 or cinema_pk == 84:
driver.get(tg_url)
time.sleep(3)
# 내일 찾기
tommorow_btn = driver.find_element_by_xpath('//*[@id="content"]/div[2]/div/div[1]/ul/li[3]/a')
tommorow_btn.click()
time.sleep(1)
source = driver.page_source
soup = BeautifulSoup(source, 'html.parser')
time_box = soup.find('div', {'class': 'theater-movie'})
movie_list = time_box.find_all('div', {'class': 'each-movie-time'})
CINEQ_ONSCREEN = []
for movie_div in movie_list:
movie_title = movie_div.find('div', {'class': 'title'})
movie_grade = movie_title.find('span').get('class')
movie_name = getMovieName(movie_title.text, movie_grade[0])
hall_list = movie_div.find_all('div', {'class': 'screen'})
for hall in hall_list:
hall_name = hall.find('div', {'class': 'screen-name'})
hall_info = hall_name.text
time_div = hall.find('div', {'class': 'time-block'})
time_list = time_div.find_all('div', {'class': 'time'})
for time_info in time_list:
movie_code = time_info.get('data-moviecode')
if not movie_code:
continue
else:
if onscreen_movie.get(movie_name):
onscreen_movie[movie_name]['CINEQ'] = str(int(movie_code))
else:
onscreen_movie[movie_name] = {
'CINEQ': str(int(movie_code))
}
end_time = time_info.find('span', {'class': 'to'}).text[3:]
seat_info = time_info.find('span', {'class': 'seats-status'}).text
seat_left, seat_total = getSeatInfo(seat_info)
start_text = time_info.find('a').text
start_time = getCineqTime(start_text)
new_onscreen_info = {
'pk': onscreen_pk,
'model': 'movies.onscreen',
'fields': {
'cinema': cinema_pk,
'movie': int(movie_code),
'date': tg_date,
'info': hall_info,
'start_time': start_time,
'end_time': end_time,
'total_seats': seat_total,
'seats': seat_left,
'url': tg_url,
'cm_code': int(movie_code)
}
}
onscreen_pk += 1
CINEQ_ONSCREEN.append(new_onscreen_info)
return CINEQ_ONSCREEN
else:
def getHallInfo(tg_str):
res1 = ''
res2 = ''
for i in range(len(tg_str)):
if tg_str[i] == '관' and res1 == '':
res1 = tg_str[:i+1]
elif tg_str[i] == ' ' and res2 == '':
res2 = tg_str[i+1:]
return res1, res2
def getEndTime(tg_str):
res = ''
for i in range(len(tg_str)):
if tg_str[i] == '~':
res = tg_str[i+2:]
break
return res
def renameYesTitle(tg_str):
res = tg_str
if res[len(tg_str)-1] == ')':
idx = res.index('(')
res = res[:idx-1]
if res[0] == '[':
idx = res.index(']')
res = res[idx+2:]
return res
TICKET_BASE = 'https://movie.yes24.com/Movie/Ticket?gId=&'
YES_ONSCREEN = []
driver.get(tg_url)
until_time = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.CLASS_NAME,"time_sel_cont")))
time.sleep(2)
source = driver.page_source
soup = BeautifulSoup(source, 'html.parser')
if not soup.find('div', {'class': 'show_time_slide'}):
return []
# 다음날 버튼 클릭
ck = True
tg_day = str(int(tg_date[-2:]))
day_list = driver.find_elements_by_class_name('show_time_slide')
for day in day_list:
span_text = day.find_element_by_tag_name('span').text
if span_text == tg_day:
ck = False
day.click()
time.sleep(1)
source = driver.page_source
soup = BeautifulSoup(source, 'html.parser')
break
if ck:
return []
time_container = soup.find('div', {'class': 'time_sel_cont'})
title_list = time_container.find_all('div', {'class': 'tit'})
time_list = time_container.find_all('ul', {'class': 'time_sel_list'})
if len(title_list) == 0:
return []
for idx in range(len(title_list)):
title = title_list[idx]
title_text = title.text
hall_info, movie_title = getHallInfo(title_text)
movie_name = renameYesTitle(movie_title)
timetable = time_list[idx]
li_list = timetable.find_all('li')
for li in li_list:
atag = li.find('a', {'class': 'time_info_box'})
pdate = atag.get('playdate')
if pdate == tg_date:
reserve_option = {
"mId" : atag.get('mid'),
"tId" : atag.get('tid'),
"playDate": deleteSlash(tg_date),
"pno": atag.get('ptid'),
}
movie_code = reserve_option['mId'][1:]
if onscreen_movie.get(movie_name):
onscreen_movie[movie_name]['YES'] = movie_code
else:
onscreen_movie[movie_name] = {
'YES': movie_code
}
book_option = urllib.parse.urlencode(reserve_option)
movie_url = TICKET_BASE + book_option
time_info = atag.find('div', {'class': 'time_info'})
start_time = time_info.find('div', {'class': 'time_start'}).text
playing_time = time_info.find('div', {'class': 'running_time'}).text
end_time = getEndTime(playing_time)
new_onscreen_info = {
'pk': onscreen_pk,
'model': 'movies.onscreen',
'fields': {
'cinema': cinema_pk,
'movie': int(movie_code),
'date': tg_date,
'info': hall_info,
'start_time': start_time,
'end_time': end_time,
'total_seats': '',
'seats': '',
'url': movie_url,
'cm_code': int(movie_code)
}
}
onscreen_pk += 1
YES_ONSCREEN.append(new_onscreen_info)
return YES_ONSCREEN
def getCineqTime(tg_str):
res = ''
ck = False
for i in range(len(tg_str)):
if tg_str[i] == ' ':
continue
elif tg_str[i] == '\n':
continue
elif tg_str[i] == ':':
ck = True
res += tg_str[i]
else:
if not ck:
res += tg_str[i]
else:
res += tg_str[i: i+2]
break
if len(res) < 5:
res = '0' + res
return res
def getSeatInfo(tg_str):
for i in range(len(tg_str)):
if tg_str[i] == '/':
return tg_str[:i-1], tg_str[i+2:]
return '', ''
def getMovieName(tg_title, tg_grade):
start_idx = 2
if tg_grade == 'rate-all':
start_idx = 1
res = tg_title[start_idx:-1]
if res[len(res)-1] == ')':
end_idx = res.index('(') -1
res = res[:end_idx]
return res
def deleteSlash(tg_str):
res = ''
for i in range(len(tg_str)):
if tg_str[i] == '-':
continue
else:
res += tg_str[i]
return res
today = datetime.date.today()
tr = today + datetime.timedelta(days=1)
tommorow = tr.strftime('%Y-%m-%d')
change_time = {
"24": "00",
"25": "01",
"26": "02",
"27": "03",
"28": "04",
"29": "05",
"30": "06"
}
onscreen_pk = int(tr.strftime('%Y%m%d0001')[2:])
def getScreenInfo():
global cinemas
global on_screen
global onscreen_movie
global driver
with open('cinemas.json', 'r', encoding='UTF-8-sig') as fr:
cinemas = json.load(fr)
with open('07_on_screen_today.json', 'r', encoding='UTF-8') as fr:
on_screen = json.load(fr)
with open('07_movie_dict_today.json', 'r', encoding='UTF-8') as fr:
onscreen_movie = json.load(fr)
driver = webdriver.Chrome(chromedriver_dir)
for cinema in cinemas:
base_url = cinema['fields']['url']
company = cinema['fields']['type']
new_on_screen = []
if company == 'CGV':
base_url_info = urllib.parse.urlsplit(base_url).query
new_on_screen = updateCGV(base_url_info, tommorow, cinema['pk'])
elif company == '메가박스':
new_on_screen = updateMEGABOX(base_url, tommorow, cinema['pk'])
elif company == '롯데시네마':
new_on_screen = updateLOTTE(base_url, tommorow, cinema['pk'])
else:
new_on_screen = updateETC(base_url, tommorow, cinema['pk'])
if new_on_screen:
for screen_info in new_on_screen:
for t in ['start_time', 'end_time']:
movie_time = screen_info['fields'][t]
if movie_time:
movie_hour = movie_time[:2]
if change_time.get(movie_hour):
screen_info['fields'][t] = change_time[movie_hour] + movie_time[2:]
# else:
# screen_info['fields'][t] = ''
on_screen.append(screen_info)
driver.quit()
with open('07_on_screen.json', 'w', encoding='UTF-8') as fp:
json.dump(on_screen, fp, ensure_ascii=False, indent=4)
with open('07_movie_dict.json', 'w', encoding='UTF-8') as fp:
json.dump(onscreen_movie, fp, ensure_ascii=False, indent=4)
``` |
{
"source": "JisunParkRea/djangotube_tutorial",
"score": 2
} |
#### File: video/tests/test_views.py
```python
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from video.models import Video
from video.views import my_video
class VideoListViewTest(TestCase):
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test methods
test_user = User.objects.create_user(username='test_user', email='<EMAIL>', password='<PASSWORD>')
Video.objects.create(author=test_user, title='코로나 국민예방수칙', category='info', video_key='G7rBBWeunHM')
def test_view_url_exists_at_desired_location(self):
response = self.client.get('/video/')
self.assertEqual(response.status_code, 200)
def test_view_url_accessible_by_name(self):
response = self.client.get(reverse('video_list'))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('video_list'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'video/video_list.html')
class MyVideoListViewTest(TestCase):
def setUp(self):
# Create user
test_user = User.objects.create_user(username='test_user', email='<EMAIL>', password='<PASSWORD>')
test_user.save()
# Create video
Video.objects.create(author=test_user, title='코로나 국민예방수칙', category='info', video_key='G7rBBWeunHM')
def test_redirect_if_not_logged_in(self):
response = self.client.get(reverse('my_video'))
self.assertRedirects(response, '/video/login/?next=/video/myVideo/')
def test_logged_in_uses_correct_template(self):
self.client.login(username='test_user', password='<PASSWORD>')
response = self.client.get(reverse('my_video'))
# Check our user is logged in
self.assertEqual(str(response.context['user']), 'test_user')
# Check that we got a response "success"
self.assertEqual(response.status_code, 200)
# Check we used correct template
self.assertTemplateUsed(response, 'video/my_video.html')
``` |
{
"source": "JisunParkRea/naverSearchAPI_practice",
"score": 2
} |
#### File: naverSearchAPI_practice/book/searchAPI.py
```python
import os, json
import sys
import urllib.request
from django.core.exceptions import ImproperlyConfigured
def search_local_cafe(where):
# Get CLIENT_ID, CLIENT_SECRET from secrets.json
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
secret_file = os.path.join(BASE_DIR, 'secrets.json') # secrets.json 파일 위치를 명시
with open(secret_file) as f:
secrets = json.loads(f.read())
def get_secret(setting, secrets=secrets):
try:
return secrets['naver'][setting]
except KeyError:
error_msg = "Set the {} environment variable".format(setting)
raise ImproperlyConfigured(error_msg)
client_id = get_secret("CLIENT_ID")
client_secret = get_secret("CLIENT_SECRET")
search_word = where # 검색 단어
search_word += "카페"
encText = urllib.parse.quote(search_word)
url = "https://openapi.naver.com/v1/search/local?query=" + encText + "&display=5&sort=comment" # json 결과
request = urllib.request.Request(url)
request.add_header("X-Naver-Client-Id",client_id)
request.add_header("X-Naver-Client-Secret",client_secret)
response = urllib.request.urlopen(request)
rescode = response.getcode()
if(rescode==200):
response_body = response.read()
res = response_body.decode('utf-8')
jres = json.loads(res)
#print(jres['items'])
return jres['items']
else:
return rescode
#print("Error Code:" + rescode)
``` |
{
"source": "jit9/simonsobs-sotodlib",
"score": 2
} |
#### File: sotodlib/pipeline_tools/hardware.py
```python
import numpy as np
from toast.pipeline_tools import Telescope, Focalplane, Site, Schedule, CES
from toast.timing import function_timer, Timer
from toast.utils import Logger
from .. import hardware
FOCALPLANE_RADII_DEG = {"LAT" : 3.6, "SAT0" : 17.8, "SAT1" : 17.8, "SAT2" : 17.8, "SAT3" : 17.2}
class SOTelescope(Telescope):
def __init__(self, name):
site = Site("Atacama", lat="-22.958064", lon="-67.786222", alt=5200)
super().__init__(name, site=site)
self.id = {
# Use the same telescope index for all SATs to re-use the
# atmospheric simulation
#'LAT' : 0, 'SAT0' : 1, 'SAT1' : 2, 'SAT2' : 3, 'SAT3' : 4
"LAT": 0,
"SAT0": 4,
"SAT1": 4,
"SAT2": 4,
"SAT3": 4,
}[name]
def add_hw_args(parser):
parser.add_argument(
"--hardware", required=False, default=None, help="Input hardware file"
)
parser.add_argument(
"--thinfp",
required=False,
type=np.int,
help="Thin the focalplane by this factor",
)
parser.add_argument(
"--bands",
required=True,
help="Comma-separated list of bands: LF1 (27GHz), LF2 (39GHz), "
"MFF1 (93GHz), MFF2 (145GHz), MFS1 (93GHz), MFS2 (145GHz), "
"UHF1 (225GHz), UHF2 (280GHz). "
"Length of list must equal --tubes",
)
parser.add_argument(
"--tubes",
required=True,
help="Comma-separated list of optics tubes: LT0 (UHF), LT1 (UHF), "
" LT2 (MFF), LT3 (MFF), LT4 (MFS), LT5 (MFS), LT6 (LF). "
"Length of list must equal --bands",
)
return
class BandParams:
def __init__(self, band_name, band_data):
self.name = band_name
self.net = band_data["NET"] * 1e-6 # uK -> K
self.fknee = band_data["fknee"] * 1e-3 # mHz -> Hz
self.fmin = band_data["fmin"] * 1e-3 # mHz -> Hz
# self.alpha = banddata[band]["alpha"]
self.alpha = 1 # hardwire a sensible number. 3.5 is not realistic.
self.A = band_data["A"]
self.C = band_data["C"]
self.lower = band_data["low"] # GHz
self.center = band_data["center"] # GHz
self.upper = band_data["high"] # GHz
return
class DetectorParams:
def __init__(self, det_data, band, wafer, tube, telescope, index):
"""
Args:
det_data (dict) : Dictionary of detector parameters
from sotodlib.hardware
band (BandParams) : band parameters act as defaults
wafer (int) : wafer number
tube (str) : tube name
telescope (str) : telescope name
index (int) : RNG index
"""
def get_par(key, default, scale=1):
if key in det_data:
return det_data[key] * scale
else:
return default
self.band = band.name
self.det_data = det_data
self.net = get_par("NET", band.net, 1e-6) # uK -> K
self.fknee = get_par("fknee", band.fknee, 1e-3) # mHz -> Hz
self.fmin = get_par("fmin", band.fmin, 1e-3) # mHz -> Hz
self.alpha = get_par("alpha", band.alpha)
self.alpha = 1 # hardwire a sensible number. 3.5 is not realistic.
self.A = get_par("A", band.A)
self.C = get_par("C", band.C)
self.lower = get_par("low", band.lower) # GHz
self.center = get_par("center", band.center) # GHz
self.upper = get_par("high", band.upper) # GHz
self.center = 0.5 * (self.lower + self.upper)
self.width = self.upper - self.lower
self.wafer = wafer
self.tube = tube
self.telescope = telescope
self.index = index
return
def get_dict(self):
det_dict = {
"NET": self.net,
"fknee": self.fknee,
"fmin": self.fmin,
"alpha": self.alpha,
"A": self.A,
"C": self.C,
"quat": self.det_data["quat"],
"fwhm": self.det_data["fwhm"],
"freq": self.center,
"bandcenter_ghz": self.center,
"bandwidth_ghz": self.width,
"index": self.index,
"telescope": self.telescope,
"tube": self.tube,
"wafer": self.wafer,
"band": self.band,
}
return det_dict
@function_timer
def get_hardware(args, comm, verbose=False):
""" Get the hardware configuration, either from file or by simulating.
Then trim it down to the bands that were selected.
"""
log = Logger.get()
telescope = get_telescope(args, comm, verbose=verbose)
if comm.world_rank == 0:
if args.hardware:
log.info(
"Loading hardware configuration from {}..." "".format(args.hardware)
)
hw = hardware.Hardware(args.hardware)
else:
log.info("Simulating default hardware configuration")
hw = hardware.get_example()
hw.data["detectors"] = hardware.sim_telescope_detectors(hw, telescope.name)
# Construct a running index for all detectors across all
# telescopes for independent noise realizations
det_index = {}
for idet, det in enumerate(sorted(hw.data["detectors"])):
det_index[det] = idet
match = {"band": args.bands.replace(",", "|")}
tubes = args.tubes.split(",")
# If one provides both telescopes and tubes, the tubes matching *either*
# will be concatenated
#hw = hw.select(telescopes=[telescope.name], tubes=tubes, match=match)
hw = hw.select(tubes=tubes, match=match)
if args.thinfp:
# Only accept a fraction of the detectors for
# testing and development
delete_detectors = []
for det_name in hw.data["detectors"].keys():
if (det_index[det_name] // 2) % args.thinfp != 0:
delete_detectors.append(det_name)
for det_name in delete_detectors:
del hw.data["detectors"][det_name]
ndetector = len(hw.data["detectors"])
if ndetector == 0:
raise RuntimeError(
"No detectors match query: telescope={}, "
"tubes={}, match={}".format(telescope, tubes, match)
)
log.info(
"Telescope = {} tubes = {} bands = {}, thinfp = {} matches {} detectors"
"".format(telescope.name, args.tubes, args.bands, args.thinfp, ndetector)
)
else:
hw = None
det_index = None
if comm.comm_world is not None:
hw = comm.comm_world.bcast(hw)
det_index = comm.comm_world.bcast(det_index)
return hw, telescope, det_index
@function_timer
def get_telescope(args, comm, verbose=False):
""" Determine which telescope matches the detector selections
"""
telescope = None
if comm.world_rank == 0:
hwexample = hardware.get_example()
tubes = args.tubes.split(",")
for tube in tubes:
for telescope_name, telescope_data in hwexample.data[
"telescopes"
].items():
if tube in telescope_data["tubes"]:
if telescope is None:
telescope = SOTelescope(telescope_name)
elif telescope.name != telescope_name:
raise RuntimeError(
"Tubes '{}' span more than one telescope".format(tubes)
)
break
if telescope is None:
raise RuntimeError(
"Failed to match tube = '{}' with a telescope".format(tube)
)
if comm.comm_world is not None:
telescope = comm.comm_world.bcast(telescope)
return telescope
def get_focalplane(args, comm, hw, det_index, verbose=False):
""" Translate hardware configuration into a TOAST focalplane dictionary
"""
if comm.world_rank == 0:
detector_data = {}
band_params = {}
for band_name, band_data in hw.data["bands"].items():
band_params[band_name] = BandParams(band_name, band_data)
# User may force the effective focal plane radius to be larger
# than the default. This will widen the size of the simulated
# atmosphere but has not other effect for the time being.
fpradius = None
try:
fpradius = args.focalplane_radius_deg
except:
pass
if fpradius is None:
fpradius = 0
for det_name, det_data in hw.data["detectors"].items():
# RNG index for this detector
index = det_index[det_name]
wafer = det_data["wafer"]
# Determine which tube has this wafer
for tube_name, tube_data in hw.data["tubes"].items():
if wafer in tube_data["wafers"]:
break
# Determine which telescope has this tube
for telescope_name, telescope_data in hw.data["telescopes"].items():
if tube_name in telescope_data["tubes"]:
break
fpradius = max(fpradius, FOCALPLANE_RADII_DEG[telescope_name])
det_params = DetectorParams(
det_data,
band_params[det_data["band"]],
wafer,
tube_name,
telescope_name,
index,
)
detector_data[det_name] = det_params.get_dict()
# Create a focal plane in the telescope
focalplane = Focalplane(
detector_data=detector_data,
sample_rate=args.sample_rate,
radius_deg=fpradius,
)
else:
focalplane = None
if comm.comm_world is not None:
focalplane = comm.comm_world.bcast(focalplane)
return focalplane
@function_timer
def load_focalplanes(args, comm, schedules, verbose=False):
""" Attach a focalplane to each of the schedules.
Args:
schedules (list) : List of Schedule instances.
Each schedule has two members, telescope
and ceslist, a list of CES objects.
Returns:
detweights (dict) : Inverse variance noise weights for every
detector across all focal planes. In [K_CMB^-2].
They can be used to bin the TOD.
"""
# log = Logger.get()
timer = Timer()
timer.start()
# Load focalplane information
timer1 = Timer()
timer1.start()
hw, telescope, det_index = get_hardware(args, comm, verbose=verbose)
focalplane = get_focalplane(args, comm, hw, det_index, verbose=verbose)
telescope.focalplane = focalplane
if comm.world_rank == 0 and verbose:
timer1.report_clear("Collect focaplane information")
for schedule in schedules:
# Replace the telescope created from reading the observing schedule but
# keep the weather object
weather = schedule.telescope.site.weather
schedule.telescope = telescope
schedule.telescope.site.weather = weather
detweights = telescope.focalplane.detweights
timer.stop()
if (comm.comm_world is None or comm.world_rank == 0) and verbose:
timer.report("Loading focalplane")
return detweights
```
#### File: simonsobs-sotodlib/tests/test_hardware.py
```python
import os
from unittest import TestCase
from collections import OrderedDict
from ._helpers import create_outdir
from sotodlib.hardware.config import Hardware, get_example
from sotodlib.hardware.sim import (sim_wafer_detectors,
sim_telescope_detectors)
from sotodlib.hardware.vis import plot_detectors
class HardwareTest(TestCase):
def setUp(self):
fixture_name = os.path.splitext(os.path.basename(__file__))[0]
self.outdir = create_outdir(fixture_name)
self.skip_plots = False
if "SOTODLIB_TEST_DISABLE_PLOTS" in os.environ:
self.skip_plots = os.environ["SOTODLIB_TEST_DISABLE_PLOTS"]
def test_config_example(self):
outpath = os.path.join(self.outdir, "hardware_example.toml.gz")
hw = get_example()
hw.dump(outpath, overwrite=True, compress=True)
hwcheck = Hardware(outpath)
checkpath = os.path.join(self.outdir, "hardware_example_check.toml.gz")
hwcheck.dump(checkpath, overwrite=True, compress=True)
return
def test_sim_wafer(self):
hw = get_example()
# Simulate all wafers
for tele, teleprops in hw.data["telescopes"].items():
platescale = teleprops["platescale"]
fwhm = teleprops["fwhm"]
for tube in teleprops["tubes"]:
tubeprops = hw.data["tubes"][tube]
for wafer in tubeprops["wafers"]:
outpath = os.path.join(
self.outdir, "wafer_{}.toml.gz".format(wafer))
dets = sim_wafer_detectors(hw, wafer, platescale, fwhm)
# replace detectors with this set for dumping
hw.data["detectors"] = dets
hw.dump(outpath, overwrite=True, compress=True)
if not self.skip_plots:
outpath = os.path.join(self.outdir,
"wafer_{}.pdf".format(wafer))
plot_detectors(dets, outpath, labels=True)
return
def test_sim_telescope(self):
hw = get_example()
for tele, teleprops in hw.data["telescopes"].items():
hw.data["detectors"] = sim_telescope_detectors(hw, tele)
outpath = os.path.join(self.outdir,
"telescope_{}.toml.gz".format(tele))
hw.dump(outpath, overwrite=True, compress=True)
if not self.skip_plots:
outpath = os.path.join(self.outdir,
"telescope_{}.pdf".format(tele))
plot_detectors(hw.data["detectors"], outpath, labels=False)
return
def test_sim_full(self):
hw = get_example()
hw.data["detectors"] = OrderedDict()
for tele, teleprops in hw.data["telescopes"].items():
dets = sim_telescope_detectors(hw, tele)
hw.data["detectors"].update(dets)
dbpath = os.path.join(self.outdir, "hardware.toml.gz")
hw.dump(dbpath, overwrite=True, compress=True)
check = Hardware(dbpath)
# Test selection of 90GHz detectors on wafers 25 and 26 which have
# "A" polarization configuration and are located in pixels 20-29.
wbhw = hw.select(
match={"wafer": ["25", "26"],
"band": "MF.1",
"pol": "A",
"pixel": "02."})
dbpath = os.path.join(self.outdir, "w25-26_b1_p20-29_A.toml.gz")
wbhw.dump(dbpath, overwrite=True, compress=True)
check = Hardware(dbpath)
self.assertTrue(len(check.data["detectors"]) == 20)
chkpath = os.path.join(self.outdir, "w25-26_b1_p20-29_A.txt")
with open(chkpath, "w") as f:
for d in check.data["detectors"]:
f.write("{}\n".format(d))
# Test selection of pixels on 27GHz wafer 44.
lfhw = hw.select(
match={"wafer": ["44"],
"pixel": "00."})
dbpath = os.path.join(self.outdir, "w44_bLF1_p000-009.toml.gz")
lfhw.dump(dbpath, overwrite=True, compress=True)
check = Hardware(dbpath)
self.assertTrue(len(check.data["detectors"]) == 40)
chkpath = os.path.join(self.outdir, "w44_bLF1_p000-009.txt")
with open(chkpath, "w") as f:
for d in check.data["detectors"]:
f.write("{}\n".format(d))
return
``` |
{
"source": "jitakirin/fabtools",
"score": 2
} |
#### File: fabtools/openvz/contextmanager.py
```python
from contextlib import contextmanager
import hashlib
import os
import posixpath
import tempfile
from fabric.api import (
env,
hide,
output,
settings,
sudo,
)
from fabric.operations import (
_AttributeString,
_execute,
_prefix_commands,
_prefix_env_vars,
_shell_wrap,
_sudo_prefix,
)
from fabric.state import default_channel
from fabric.utils import error
import fabric.operations
import fabric.sftp
from fabric.context_managers import (
quiet as quiet_manager,
warn_only as warn_only_manager,
)
@contextmanager
def guest(name_or_ctid):
"""
Context manager to run commands inside a guest container.
Supported basic operations are: `run`_, `sudo`_ and `put`_.
.. warning:: commands executed with ``run()`` will be run as
**root** inside the container.
Use ``sudo(command, user='foo')`` to run them as
an unpriviledged user.
Example::
from fabtools.openvz import guest
with guest('foo'):
run('hostname')
sudo('whoami', user='alice')
put('files/hello.txt')
.. _run: http://docs.fabfile.org/en/1.4.3/api/core/operations.html#fabric.operations.run
.. _sudo: http://docs.fabfile.org/en/1.4.3/api/core/operations.html#fabric.operations.sudo
.. _put: http://docs.fabfile.org/en/1.4.3/api/core/operations.html#fabric.operations.put
"""
# Monkey patch fabric operations
_orig_run_command = fabric.operations._run_command
_orig_put = fabric.sftp.SFTP.put
def run_guest_command(command, shell=True, pty=True, combine_stderr=True,
sudo=False, user=None, quiet=False, warn_only=False,
stdout=None, stderr=None, group=None, timeout=None):
"""
Run command inside a guest container
"""
# Use a non-login shell
_orig_shell = env.shell
env.shell = '/bin/bash -c'
# Use double quotes for the sudo prompt
_orig_sudo_prefix = env.sudo_prefix
env.sudo_prefix = 'sudo -S -p "%(sudo_prompt)s" '
# Try to cd to the user's home directory for consistency,
# as the default directory is "/" with "vzctl exec2"
if not env.cwd:
env.command_prefixes.insert(0, 'cd 2>/dev/null || true')
# Build the guest command
guest_command = _shell_wrap_inner(
_prefix_commands(_prefix_env_vars(command), 'remote'),
True,
_sudo_prefix(user) if sudo and user else None
)
host_command = "vzctl exec2 %s '%s'" % (name_or_ctid, guest_command)
# Restore env
env.shell = _orig_shell
env.sudo_prefix = _orig_sudo_prefix
if not env.cwd:
env.command_prefixes.pop(0)
# Run host command as root
return _run_host_command(host_command, shell=shell, pty=pty,
combine_stderr=combine_stderr)
def put_guest(self, local_path, remote_path, use_sudo, mirror_local_mode,
mode, local_is_path):
"""
Upload file to a guest container
"""
pre = self.ftp.getcwd()
pre = pre if pre else ''
if local_is_path and self.isdir(remote_path):
basename = os.path.basename(local_path)
remote_path = posixpath.join(remote_path, basename)
if output.running:
print("[%s] put: %s -> %s" % (
env.host_string,
local_path if local_is_path else '<file obj>',
posixpath.join(pre, remote_path)
))
# Have to bounce off FS if doing file-like objects
fd, real_local_path = None, local_path
if not local_is_path:
fd, real_local_path = tempfile.mkstemp()
old_pointer = local_path.tell()
local_path.seek(0)
file_obj = os.fdopen(fd, 'wb')
file_obj.write(local_path.read())
file_obj.close()
local_path.seek(old_pointer)
# Use temporary file with a unique name on the host machine
guest_path = remote_path
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(name_or_ctid)
hasher.update(guest_path)
host_path = hasher.hexdigest()
# Upload the file to host machine
rattrs = self.ftp.put(real_local_path, host_path)
# Copy file to the guest container
with settings(hide('everything'), cwd=""):
cmd = "cat \"%s\" | vzctl exec \"%s\" 'cat - > \"%s\"'" \
% (host_path, name_or_ctid, guest_path)
_orig_run_command(cmd, sudo=True)
# Revert to original remote_path for return value's sake
remote_path = guest_path
# Clean up
if not local_is_path:
os.remove(real_local_path)
# Handle modes if necessary
if (local_is_path and mirror_local_mode) or (mode is not None):
lmode = os.stat(local_path).st_mode if mirror_local_mode else mode
lmode = lmode & 07777
rmode = rattrs.st_mode & 07777
if lmode != rmode:
with hide('everything'):
sudo('chmod %o \"%s\"' % (lmode, remote_path))
return remote_path
fabric.operations._run_command = run_guest_command
fabric.sftp.SFTP.put = put_guest
yield
# Monkey unpatch
fabric.operations._run_command = _orig_run_command
fabric.sftp.SFTP.put = _orig_put
@contextmanager
def _noop():
yield
def _run_host_command(command, shell=True, pty=True, combine_stderr=True,
quiet=False, warn_only=False, stdout=None, stderr=None,
timeout=None):
"""
Run host wrapper command as root
(Modified from fabric.operations._run_command to ignore prefixes,
path(), cd(), and always use sudo.)
"""
manager = _noop
if warn_only:
manager = warn_only_manager
# Quiet's behavior is a superset of warn_only's, so it wins.
if quiet:
manager = quiet_manager
with manager():
# Set up new var so original argument can be displayed verbatim later.
given_command = command
# Handle context manager modifications, and shell wrapping
wrapped_command = _shell_wrap(
command, # !! removed _prefix_commands() & _prefix_env_vars()
shell,
_sudo_prefix(None) # !! always use sudo
)
# Execute info line
which = 'sudo' # !! always use sudo
if output.debug:
print("[%s] %s: %s" % (env.host_string, which, wrapped_command))
elif output.running:
print("[%s] %s: %s" % (env.host_string, which, given_command))
# Actual execution, stdin/stdout/stderr handling, and termination
result_stdout, result_stderr, status = _execute(
channel=default_channel(), command=wrapped_command, pty=pty,
combine_stderr=combine_stderr, invoke_shell=False, stdout=stdout,
stderr=stderr, timeout=timeout)
# Assemble output string
out = _AttributeString(result_stdout)
err = _AttributeString(result_stderr)
# Error handling
out.failed = False
out.command = given_command
out.real_command = wrapped_command
if status not in env.ok_ret_codes:
out.failed = True
msg = "%s() received nonzero return code %s while executing" % (
which, status
)
if env.warn_only:
msg += " '%s'!" % given_command
else:
msg += "!\n\nRequested: %s\nExecuted: %s" % (
given_command, wrapped_command
)
error(message=msg, stdout=out, stderr=err)
# Attach return code to output string so users who have set things to
# warn only, can inspect the error code.
out.return_code = status
# Convenience mirror of .failed
out.succeeded = not out.failed
# Attach stderr for anyone interested in that.
out.stderr = err
return out
def _shell_wrap_inner(command, shell=True, sudo_prefix=None):
"""
Conditionally wrap given command in env.shell (while honoring sudo.)
(Modified from fabric.operations._shell_wrap to avoid double escaping,
as the wrapping host command would also get shell escaped.)
"""
# Honor env.shell, while allowing the 'shell' kwarg to override it (at
# least in terms of turning it off.)
if shell and not env.use_shell:
shell = False
# Sudo plus space, or empty string
if sudo_prefix is None:
sudo_prefix = ""
else:
sudo_prefix += " "
# If we're shell wrapping, prefix shell and space, escape the command and
# then quote it. Otherwise, empty string.
if shell:
shell = env.shell + " "
command = '"%s"' % command # !! removed _shell_escape() here
else:
shell = ""
# Resulting string should now have correct formatting
return sudo_prefix + shell + command
``` |
{
"source": "jitakirin/python-cli-parsers",
"score": 3
} |
#### File: pcpd/argh_demo/simple.py
```python
import argh
@argh.arg('msg', help='message to return')
def hello(msg):
"""hello"""
yield msg
if __name__ == '__main__':
argh.dispatch_command(hello)
```
#### File: pcpd/defopt_demo/simple.py
```python
from defopt import run
def hello(msg):
"""hello
:param str msg: message to return
"""
print(msg)
if __name__ == '__main__':
run(hello)
``` |
{
"source": "jitakizushi/pyle",
"score": 3
} |
#### File: jitakizushi/pyle/pyle.py
```python
__version__ = "0.2"
import argparse
import cStringIO as StringIO
import re
import sh
import sys
import traceback
STANDARD_MODULES = {
're': re,
'sh': sh
}
def truncate_ellipsis(line, length=30):
"""Truncate a line to the specified length followed by ``...`` unless its shorter than length already."""
l = len(line)
return line if l < length else line[:length - 3] + "..."
def pyle_evaluate(command=None, modules=(), inplace=False, files=None, print_traceback=False):
"""The main method of pyle."""
eval_globals = {}
eval_globals.update(STANDARD_MODULES)
for module_arg in modules or ():
for module in module_arg.strip().split(","):
module = module.strip()
if module:
eval_globals[module] = __import__(module)
if not command:
# Default 'do nothing' program
command = 'line'
files = files or ['-']
eval_locals = {}
for file in files:
if file == '-':
file = sys.stdin
out_buf = sys.stdout if not inplace else StringIO.StringIO()
with (open(file, 'rb') if not hasattr(file, 'read') else file) as in_file:
for num, line in enumerate(in_file.readlines()):
was_whole_line = False
if line[-1] == '\n':
was_whole_line = True
line = line[:-1]
words = [word.strip() for word in re.split(r'\s+', line) if word]
eval_locals.update({'line': line, 'words': words, 'filename': in_file.name, 'num': num})
try:
out_line = eval(command, eval_globals, eval_locals)
except Exception as e:
sys.stdout.flush()
sys.stderr.write("At %s:%d ('%s'): %s\n" % (in_file.name, num, truncate_ellipsis(line), e))
if print_traceback:
traceback.print_exc(None, sys.stderr)
else:
if out_line is None:
continue
# If the result is something list-like or iterable, output each item space separated.
if not isinstance(out_line, str):
try:
out_line = u' '.join(unicode(part) for part in out_line)
except:
# Guess it wasn't a list after all.
out_line = unicode(out_line)
out_line = out_line or u''
out_buf.write(out_line)
if was_whole_line:
out_buf.write('\n')
if inplace:
with open(file, 'wb') as out_file:
out_file.write(out_buf.getvalue())
out_buf.close()
def pyle(argv=None):
"""Execute pyle with the specified arguments, or sys.argv if no arguments specified."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-m", "--modules", dest="modules", action='append',
help="import MODULE before evaluation. May be specified more than once.")
parser.add_argument("-i", "--inplace", dest="inplace", action='store_true', default=False,
help="edit files in place. When used with file name arguments, the files will be replaced by the output of the evaluation")
parser.add_argument("-e", "--expression", dest="expression",
help="an expression to evaluate for each line")
parser.add_argument('files', nargs='*',
help="files to read as input. If used with --inplace, the files will be replaced with the output")
parser.add_argument("--traceback", action="store_true", default=False,
help="print a traceback on stderr when an expression fails for a line")
args = parser.parse_args() if not argv else parser.parse_args(argv)
pyle_evaluate(args.expression, args.modules, args.inplace, args.files, args.traceback)
if __name__ == '__main__':
pyle()
``` |
{
"source": "jitdee-ai/darts-models",
"score": 2
} |
#### File: darts-models/darmo/nsga.py
```python
from timm.models.layers import drop_path
from ofa.imagenet_codebase.modules.layers import *
from ofa.layers import set_layer_from_config, MBInvertedConvLayer, ConvLayer, IdentityLayer, LinearLayer
from ofa.imagenet_codebase.networks.proxyless_nets import MobileInvertedResidualBlock
import torch
import torch.nn as nn
import torch
from .registry import register_model
import json
import pkg_resources
from .utils import _set_config, _load
url_cfgs = {
'eeea_c1' : 'https://github.com/jitdee-ai/darmo/releases/download/0.0.1/eeea_c1.pt',
'eeea_c2' : 'https://github.com/jitdee-ai/darmo/releases/download/0.0.1/eeea-c2.pt',
'ofa595_1k' : 'https://github.com/jitdee-ai/darmo/releases/download/0.0.1/ofa595_1k_b1a18d5.pt',
'ofa595_21k' : 'https://github.com/jitdee-ai/darmo/releases/download/0.0.1/ofa595_21k_ec9428e.pt',
}
def _create_network(config):
if config['name'].startswith("ofa595"):
config_file = pkg_resources.resource_filename(__name__, "config/ofa595.config")
else:
config_file = pkg_resources.resource_filename(__name__, "config/"+config['name']+".config")
config_subnet = json.load(open(config_file))
if config['name'] == "ofa595_21k":
config_subnet['classifier']['out_features'] = config['num_classes']
base_net = NSGANetV2.build_from_config(config_subnet, depth=None)
if config['name'] == "ofa595_21k":
base_net.classifier = None
_load(config, base_net, url_cfgs)
#if config['name'] == "ofa595_21k":
if config['name'].startswith("ofa595_"):
NSGANetV2.reset_classifier(base_net, 1536, config['num_classes'], dropout_rate=0.0)
base_net.num_features = 1536
return base_net
@register_model
def ofa595_1k(pretrained=True, num_classes=1000, auxiliary=True):
config = _set_config(_config={}, name= 'ofa595_1k', first_channels=None, layers=None, auxiliary=auxiliary,
genotype=None, last_bn=False, pretrained=pretrained, num_classes=num_classes)
return _create_network(config)
@register_model
def ofa595_21k(pretrained=True, num_classes=11221, auxiliary=True):
config = _set_config(_config={}, name= 'ofa595_21k', first_channels=None, layers=None, auxiliary=auxiliary,
genotype=None, last_bn=False, pretrained=pretrained, num_classes=11221)
return _create_network(config)
@register_model
def eeea_c1(pretrained=True, num_classes=1000, auxiliary=True):
config = _set_config(_config={}, name= 'eeea_c1', first_channels=46, layers=14, auxiliary=auxiliary,
genotype=None, last_bn=False, pretrained=pretrained, num_classes=num_classes)
return _create_network(config)
@register_model
def eeea_c2(pretrained=True, num_classes=1000, auxiliary=True):
config = _set_config(_config={}, name= 'eeea_c2', first_channels=46, layers=14, auxiliary=auxiliary,
genotype=None, last_bn=False, pretrained=pretrained, num_classes=num_classes)
return _create_network(config)
class MobileInvertedResidualBlock(MyModule):
"""
Modified from https://github.com/mit-han-lab/once-for-all/blob/master/ofa/
imagenet_codebase/networks/proxyless_nets.py to include drop path in training
"""
def __init__(self, mobile_inverted_conv, shortcut, drop_connect_rate=0.0):
super(MobileInvertedResidualBlock, self).__init__()
self.mobile_inverted_conv = mobile_inverted_conv
self.shortcut = shortcut
self.drop_connect_rate = drop_connect_rate
def forward(self, x):
if self.mobile_inverted_conv is None or isinstance(self.mobile_inverted_conv, ZeroLayer):
res = x
elif self.shortcut is None or isinstance(self.shortcut, ZeroLayer):
res = self.mobile_inverted_conv(x)
else:
# res = self.mobile_inverted_conv(x) + self.shortcut(x)
res = self.mobile_inverted_conv(x)
if self.drop_connect_rate > 0.:
res = drop_path(res, drop_prob=self.drop_connect_rate, training=self.training)
res += self.shortcut(x)
return res
@property
def module_str(self):
return '(%s, %s)' % (
self.mobile_inverted_conv.module_str if self.mobile_inverted_conv is not None else None,
self.shortcut.module_str if self.shortcut is not None else None
)
@property
def config(self):
return {
'name': MobileInvertedResidualBlock.__name__,
'mobile_inverted_conv': self.mobile_inverted_conv.config if self.mobile_inverted_conv is not None else None,
'shortcut': self.shortcut.config if self.shortcut is not None else None,
}
@staticmethod
def build_from_config(config):
mobile_inverted_conv = set_layer_from_config(config['mobile_inverted_conv'])
shortcut = set_layer_from_config(config['shortcut'])
return MobileInvertedResidualBlock(
mobile_inverted_conv, shortcut, drop_connect_rate=config['drop_connect_rate'])
class MyNetwork(MyModule):
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
def zero_last_gamma(self):
raise NotImplementedError
""" implemented methods """
def set_bn_param(self, momentum, eps):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
m.momentum = momentum
m.eps = eps
return
class MobileNetV3(MyNetwork):
def __init__(self, first_conv, blocks, final_expand_layer, feature_mix_layer, classifier, depth):
super(MobileNetV3, self).__init__()
self.first_conv = first_conv
self.blocks = nn.ModuleList(blocks)
self.final_expand_layer = final_expand_layer
self.feature_mix_layer = feature_mix_layer
self.classifier = classifier
self.depth = depth
self.channels = []
self.num_features = 1280
def forward(self, x):
x = self.first_conv(x)
for block in self.blocks:
x = block(x)
x = self.final_expand_layer(x)
x = x.mean(3, keepdim=True).mean(2, keepdim=True) # global average pooling
x = self.feature_mix_layer(x)
x = torch.squeeze(x)
x = self.classifier(x)
return x
def reset_classifier(self, num_classes, dropout=0.0):
self.num_classes = num_classes
self.drop_rate = dropout
del self.classifier
if self.num_classes:
self.classifier = nn.Linear(self.num_features, self.num_classes)
else:
self.classifier = None
class NSGANetV2(MobileNetV3):
"""
Modified from https://github.com/mit-han-lab/once-for-all/blob/master/ofa/
imagenet_codebase/networks/mobilenet_v3.py to include drop path in training
and option to reset classification layer
"""
@staticmethod
def build_from_config(config, drop_connect_rate=0.0, depth=None):
first_conv = set_layer_from_config(config['first_conv'])
final_expand_layer = set_layer_from_config(config['final_expand_layer'])
feature_mix_layer = set_layer_from_config(config['feature_mix_layer'])
classifier = set_layer_from_config(config['classifier'])
blocks = []
for block_idx, block_config in enumerate(config['blocks']):
block_config['drop_connect_rate'] = drop_connect_rate * block_idx / len(config['blocks'])
blocks.append(MobileInvertedResidualBlock.build_from_config(block_config))
net = MobileNetV3(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier, depth)
if 'bn' in config:
net.set_bn_param(**config['bn'])
else:
net.set_bn_param(momentum=0.1, eps=1e-3)
return net
def zero_last_gamma(self):
for m in self.modules():
if isinstance(m, MobileInvertedResidualBlock):
if isinstance(m.mobile_inverted_conv, MBInvertedConvLayer) and isinstance(m.shortcut, IdentityLayer):
m.mobile_inverted_conv.point_linear.bn.weight.data.zero_()
# @staticmethod
# def build_net_via_cfg(cfg, input_channel, last_channel, n_classes, dropout_rate):
# # first conv layer
# first_conv = ConvLayer(
# 3, input_channel, kernel_size=3, stride=2, use_bn=True, act_func='h_swish', ops_order='weight_bn_act'
# )
# # build mobile blocks
# feature_dim = input_channel
# blocks = []
# for stage_id, block_config_list in cfg.items():
# for k, mid_channel, out_channel, use_se, act_func, stride, expand_ratio in block_config_list:
# mb_conv = MBInvertedConvLayer(
# feature_dim, out_channel, k, stride, expand_ratio, mid_channel, act_func, use_se
# )
# if stride == 1 and out_channel == feature_dim:
# shortcut = IdentityLayer(out_channel, out_channel)
# else:
# shortcut = None
# blocks.append(MobileInvertedResidualBlock(mb_conv, shortcut))
# feature_dim = out_channel
# # final expand layer
# final_expand_layer = ConvLayer(
# feature_dim, feature_dim * 6, kernel_size=1, use_bn=True, act_func='h_swish', ops_order='weight_bn_act',
# )
# feature_dim = feature_dim * 6
# # feature mix layer
# feature_mix_layer = ConvLayer(
# feature_dim, last_channel, kernel_size=1, bias=False, use_bn=False, act_func='h_swish',
# )
# # classifier
# classifier = LinearLayer(last_channel, n_classes, dropout_rate=dropout_rate)
# return first_conv, blocks, final_expand_layer, feature_mix_layer, classifier
@staticmethod
def reset_classifier(model, last_channel, n_classes, dropout_rate=0.0):
model.classifier = LinearLayer(last_channel, n_classes, dropout_rate=dropout_rate)
``` |
{
"source": "jitendar-singh/jenkins-operator",
"score": 2
} |
#### File: features/steps/steps.py
```python
import os
import time
import logging
import urllib3
from behave import given, when, then
from datetime import date
from pyshould import should
from kubernetes import config, client
from smoke.features.steps.openshift import Openshift
from smoke.features.steps.project import Project
'''
If we need to install an operator manually using the cli
- ensure your catalog source is installed
- create an OperatorGroup
- create the Subscription object
'''
# Path to the yaml files
scripts_dir = os.getenv('OUTPUT_DIR')
# jenkins_crd = './manifests/jenkins-operator/0.7.0/'
catalogsource = './smoke/samples/catalog-source.yaml'
operatorgroup = os.path.join(scripts_dir,'operator-group.yaml')
subscription = os.path.join(scripts_dir,'subscription.yaml')
jenkins = os.path.join(scripts_dir,'jenkins.yaml')
deploy_pod = "jenkins-1-deploy"
samplebclst = ['sample-pipeline','nodejs-mongodb-example']
samplepipeline = "https://raw.githubusercontent.com/openshift/origin/master/examples/jenkins/pipeline/samplepipeline.yaml"
# variables needed to get the resource status
current_project = ''
config.load_kube_config()
v1 = client.CoreV1Api()
oc = Openshift()
podStatus = {}
# STEP
@given(u'Project "{project_name}" is used')
def given_project_is_used(context, project_name):
project = Project(project_name)
global current_project
current_project = project_name
context.current_project = current_project
context.oc = oc
if not project.is_present():
print("Project is not present, creating project: {}...".format(project_name))
project.create() | should.be_truthy.desc(
"Project {} is created".format(project_name))
print("Project {} is created!!!".format(project_name))
context.project = project
# STEP
@given(u'Project [{project_env}] is used')
def given_namespace_from_env_is_used(context, project_env):
env = os.getenv(project_env)
assert env is not None, f"{project_env} environment variable needs to be set"
print(f"{project_env} = {env}")
given_project_is_used(context, env)
@given(u'we have a openshift cluster')
def loginCluster(context):
print("Using [{}]".format(current_project))
@when(u'we create the catalog source using catalog-source.yaml')
def createCatalogsource(context):
res = oc.oc_create_from_yaml(catalogsource)
print(res)
@then(u'we create operator group using operator-group.yaml')
def createOperatorgroup(context):
res = oc.oc_create_from_yaml(operatorgroup)
print(res)
@then(u'we create subscription using subscriptions.yaml')
def createSubsObject(context):
res = oc.oc_create_from_yaml(subscription)
print(res)
@then(u'we check for the csv and csv version')
def verifycsv(context):
print('---> Getting the resources')
time.sleep(10)
if not 'jenkins-operator.0.0.0' in oc.search_resource_in_namespace('csv','jenkins-operator.0.0.0',current_project):
raise AssertionError
else:
res = oc.search_resource_in_namespace('csv','jenkins-operator.0.0.0',current_project)
print(res)
@then(u'we check for the operator group')
def verifyoperatorgroup(context):
if not 'jenkins-operator' in oc.search_resource_in_namespace('operatorgroup','jenkins-operator',current_project):
raise AssertionError
else:
res = oc.search_resource_in_namespace('operatorgroup','jenkins-operator',current_project)
print(res)
@then(u'we check for the subscription')
def verifysubs(context):
if not 'jenkins-operator' in oc.search_resource_in_namespace('subs','jenkins-operator',current_project):
raise AssertionError
else:
res = oc.search_resource_in_namespace('subs','jenkins-operator',current_project)
print(res)
@then(u'we check for the operator pod')
def verifyoperatorpod(context):
print('---> checking operator pod status')
context.v1 = v1
pods = v1.list_namespaced_pod(current_project)
for i in pods.items:
print("Getting pod list")
podStatus[i.metadata.name] = i.status.phase
print('---> Validating...')
if not i.metadata.name in oc.search_pod_in_namespace(i.metadata.name,current_project):
raise AssertionError
print('waiting to get pod status')
time.sleep(10)
for pod in podStatus.keys():
status = podStatus[pod]
if 'Running' in status:
print(pod)
print(podStatus[pod])
else:
raise AssertionError
@given(u'Jenkins operator is installed')
def verifyoperator(context):
verifyoperatorpod(context)
@when(u'we create the jenkins instance using jenkins.yaml')
def createinstance(context):
res = oc.oc_create_from_yaml(jenkins)
print(res)
@then(u'We check for the jenkins-example pod status')
def checkjenkinspod(context):
verifyoperatorpod(context)
@then(u'We check for the route')
def checkroute(context):
operator_name = 'jenkins-simple'
time.sleep(10)
route = oc.get_route_host(operator_name,current_project)
url = 'http://'+str(route)
print('--->App url:')
print(url)
if len(url) <= 0:
raise AssertionError
@given(u'The jenkins pod is up and runnning')
def checkJenkins(context):
time.sleep(10)
podStatus = {}
status = ""
pods = v1.list_namespaced_pod(current_project)
for i in pods.items:
print("Getting pod list")
print(i.status.pod_ip)
print(i.metadata.name)
print(i.status.phase)
podStatus[i.metadata.name] = i.status.phase
for pod in podStatus.keys():
status = podStatus[pod]
if 'Running' in status:
print("still checking pod status")
print(pod)
print(podStatus[pod])
elif 'Succeeded' in status:
print("checking pod status")
print(pod)
print(podStatus[pod])
else:
raise AssertionError
@when(u'The user enters new-app command with sample-pipeline')
def createPipeline(context):
# bclst = ['sample-pipeline','nodejs-mongodb-example']
res = oc.new_app_from_file(samplepipeline,current_project)
for item, value in enumerate(samplebclst):
if 'sample-pipeline' in oc.search_resource_in_namespace('bc',value, current_project):
print('Buildconfig sample-pipeline created')
elif 'nodejs-mongodb-example' in oc.search_resource_in_namespace('bc',value,current_project):
print('Buildconfig nodejs-mongodb-example created')
else:
raise AssertionError
print(res)
@then(u'Trigger the build using oc start-build')
def startbuild(context):
for item,value in enumerate(samplebclst):
res = oc.start_build(value,current_project)
if not value in res:
raise AssertionError
else:
print(res)
@then(u'nodejs-mongodb-example pod must come up')
def check_app_pod(context):
time.sleep(120)
podStatus = {}
podSet = set()
bcdcSet = set()
pods = v1.list_namespaced_pod(current_project)
for i in pods.items:
podStatus[i.metadata.name] = i.status.phase
podSet.add(i.metadata.name)
for items in podSet:
if 'build' in items:
bcdcSet.add(items)
elif 'deploy' in items:
bcdcSet.add(items)
app_pods = podSet.difference(bcdcSet)
for items in app_pods:
print('Getting pods')
print(items)
for items in app_pods:
for pod in podStatus.keys():
status = podStatus[items]
if not 'Running' in status:
raise AssertionError
print('---> App pods are ready')
@then(u'route nodejs-mongodb-example must be created and be accessible')
def connectApp(context):
print('Getting application route/url')
app_name = 'nodejs-mongodb-example'
time.sleep(30)
route = oc.get_route_host(app_name,current_project)
url = 'http://'+str(route)
print('--->App url:')
print(url)
http = urllib3.PoolManager()
res = http.request('GET', url)
connection_status = res.status
if connection_status == 200:
print('---> Application is accessible via the route')
print(url)
else:
raise Exception
``` |
{
"source": "jitendar-singh/Jenkins-TQR",
"score": 2
} |
#### File: Jenkins-TQR/src/nodejsagents.py
```python
import time
from selenium.common.exceptions import NoSuchElementException, ElementNotVisibleException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from agents import *
from selenium.webdriver.support.wait import WebDriverWait
from drivers import *
class NodejsAgent:
@classmethod
def pipelinejobsteps(cls):
driver.find_element(By.XPATH,
"//*[@id='main-panel']/div/div/div/form/table/tbody/tr[2]/td[3]/textarea").send_keys(
"Sample pipeline build to test nodejs agent & maven agent")
driver.find_element(By.XPATH, "//*[@id='main-panel']/div/div/div/form/table/tbody/tr[148]/td[3]/select").click()
driver.find_element(By.XPATH,
"//*[@id='main-panel']/div/div/div/form/table/tbody/tr[148]/td[3]/select/option[2]").click()
driver.find_element(By.XPATH, "//*[@id='main-panel']/div/div/div/form/table/tbody/tr[149]/td[2]/table/tbody/tr["
"12]/td[3]/select").click()
driver.find_element(By.XPATH, "//*[@id='main-panel']/div/div/div/form/table/tbody/tr[149]/td[2]/table/tbody/tr["
"12]/td[3]/select/option[2]").click()
time.sleep(5)
driver.find_element(By.XPATH, "//*[@id='main-panel']/div/div/div/form/table/tbody/tr[149]/td[2]/table/tbody/tr["
"13]/td[2]/table/tbody/tr[5]/td[3]/div/div[1]/table/tbody/tr[1]/td["
"3]/input").send_keys("https://github.com/akram/scrum-planner.git")
driver.find_element(By.ID, "yui-gen5-button").click()
time.sleep(5)
driver.find_element(By.XPATH, "//*[@id='yui-gen13-button']").click()
@classmethod
def setnodejs12agentimages(cls):
# time.sleep(5)
print("Manage Jenkins")
driver.find_element(By.XPATH, "//*[@id='tasks']/div[4]/a[2]").click()
'''
Explicit wait for Configure Clouds option
//*[@id="main-panel"]/div[13]/a/img
'''
wait = WebDriverWait(driver, 10)
element = wait.until(EC.element_to_be_clickable((By.XPATH, "//*[@id='main-panel']/div[13]/a/img")))
element.click()
print("Clicked the cloud & Node configure button")
'''
Explicitly waiting for the configure clouds link.
//*[@id="tasks"]/div[4]/a[2]
'''
waitconfigclouds = WebDriverWait(driver, 10)
congifurecloud = waitconfigclouds.until(EC.element_to_be_clickable((By.XPATH, "//*[@id='tasks']/div[4]/a[2]")))
congifurecloud.click()
print("Clicked the configure cloud link")
'''
Explicitly waiting for the pod template button.
//*[@id="yui-gen50-button"]
'''
waitbutton = WebDriverWait(driver, 20)
try:
button = waitbutton.until(EC.element_to_be_clickable((By.XPATH, "//*[@id='yui-gen50-button']")))
button.click()
print("Clicked the pod template button")
except (NoSuchElementException, ElementNotVisibleException, TimeoutException):
print("Exception: Pod Templates button not ready")
try:
waitnodejs = WebDriverWait(driver, 10)
nodejstemplate = waitnodejs.until(EC.element_to_be_clickable((By.XPATH, "//*[@id='yui-gen53-button']")))
nodejstemplate.click()
# driver.find_element(By.XPATH, ).click()
print("Clicked pod button")
podname = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, "//*[@id='yui-gen45"
"']/table/tbody/tr[1]/td["
"3]/input")))
driver.find_element(By.XPATH, "//*[@id='yui-gen45']/table/tbody/tr[1]/td[3]/input").clear()
driver.find_element(By.XPATH, "//*[@id='yui-gen45']/table/tbody/tr[1]/td[3]/input").send_keys("<KEY>")
print("Changed the pod name: " + nodejs12)
print(podname)
except (NoSuchElementException, ElementNotVisibleException, TimeoutException):
print("EXCEPTION: In pod name block")
try:
podlabel = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, "//*[@id='yui-gen45"
"']/table/tbody/tr["
"6]/td[3]/input")))
driver.find_element(By.XPATH, "//*[@id='yui-gen45']/table/tbody/tr[6]/td[3]/input").clear()
driver.find_element(By.XPATH, "//*[@id='yui-gen45']/table/tbody/tr[6]/td[3]/input").send_keys("<KEY>")
print("Changed pod label: " + nodejs12)
print(podlabel)
print("Changing the docker image")
driver.find_element(By.XPATH, "//*[@id='yui-gen27']/table/tbody/tr[5]/td[3]/input").clear()
print("Changing the docker image")
driver.find_element(By.XPATH, "//*[@id='yui-gen27']/table/tbody/tr[5]/td[3]/input").send_keys(nodejs12)
print("Changed the docker image")
except (NoSuchElementException, ElementNotVisibleException, TimeoutException):
print("EXCEPTION: In pod label section ")
driver.find_element(By.XPATH, "//*[@id='<PASSWORD>-gen55']").click()
print("Click on Apply")
waitsave = WebDriverWait(driver, 10)
elementsave = waitsave.until(EC.element_to_be_clickable((By.XPATH, "//*[@id='yui-gen72-button']")))
elementsave.click()
print("Click on Save")
# def createpipelinejob(self):
# super().createpipelinejob()
``` |
{
"source": "jitender0514/djnago-rename",
"score": 3
} |
#### File: management/commands/rename.py
```python
from django.core.management.base import BaseCommand
import os
class Command(BaseCommand):
help = 'Rename the Django Project'
def add_arguments(self, parser):
# new name of the project
parser.add_argument('new_project_name', type=str, help="The new project name (String)")
# current name of the project
parser.add_argument('--current_name',
default='boilerplate',
type=str,
help="The current project name (String)")
def handle(self, *args, **options):
new_name = options['new_project_name']
current_name = options['current_name']
# files that need to be update
files = ['{}/settings/base_settings.py'.format(current_name),
'{}/wsgi.py'.format(current_name),
'manage.py']
# folder that need to be update
folder = current_name
# update the files
for file in files:
with open(file, 'r') as f:
file_data = f.read()
file_data = file_data.replace(current_name, new_name)
with open(file, 'w') as f:
f.write(file_data)
# change name of the folder
os.rename(folder, new_name)
self.stdout.write(self.style.SUCCESS('Successfully updated the project name '
'from "{}" to "{}"'.format(current_name, new_name)))
``` |
{
"source": "Jitender46559/Eye-For-BlindPerson",
"score": 3
} |
#### File: Jitender46559/Eye-For-BlindPerson/Application.py
```python
import cv2
import pyttsx3
import keyboard
import ImageToSpeech as ITS
import numpy as np
from keras.models import load_model
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
textEngine = pyttsx3.init()
stroke = 2
color = (255, 255, 255)
model = load_model('model_CNN_V2.h5')
font = cv2.FONT_HERSHEY_SIMPLEX
Gender = {0: "Male", 1: "Female"}
def normalizeImage(img):
IMG_SIZ = 120
new_img = cv2.resize(img, (IMG_SIZ, IMG_SIZ))
image = new_img.reshape((120, 120, 1))
image = image.astype('float32') / 255
return image
i=0
while(True):
i+=1
ret, frame = cap.read()
if keyboard.is_pressed('i'):
cv2.imwrite('C:/Users/<NAME>/Desktop/Intel HackFury2/Image To String Data/{index}.jpg'.format(index=i), frame)
ITS.convertImageToString('C:/Users/<NAME>/Desktop/Intel HackFury2/Image To String Data/{index}.jpg'.format(index=i))
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(frame_gray, 1.2, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = frame_gray[y:y + h, x:x + w] # region of interest
roi_color = frame[y:y + h, x:x + w]
img = np.array(roi_gray)
img = normalizeImage(img)
prediction = model.predict([[img]]).argmax()
gender = Gender.get(prediction)
text = "Some {} is coming towards you.".format(gender)
if keyboard.is_pressed('p'):
textEngine.say(text)
textEngine.runAndWait()
textEngine.stop()
cv2.putText(frame, gender, (x, y), font, 1, color, stroke, cv2.LINE_AA)
cv2.imshow('Eye For Blindness', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
```
#### File: Jitender46559/Eye-For-BlindPerson/ImageToSpeech.py
```python
import pytesseract
import pyttsx3
from PIL import Image
def convertImageToString(image):
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files (x86)\Tesseract-OCR\tesseract.exe"
image = Image.open(image)
text = pytesseract.image_to_string(image)
print(text)
textEngine = pyttsx3.init()
textEngine.say(text)
textEngine.runAndWait()
textEngine.stop()
``` |
{
"source": "JitenDhandha/CFit",
"score": 3
} |
#### File: JitenDhandha/CFit/Main.py
```python
import Fitting as fit
import tkinter as tk
from tkinter import filedialog
import tkinter.ttk as ttk
####################################################################################
# USER INTERFACE #
####################################################################################
class GUI:
def __init__(self,master=None):
############################################################################
# INTERFACE SETTINGS #
############################################################################
self.master = master
master.title('CFit (Curve fitting Tool)')
master.geometry('1000x700') #Size of the window
master.resizable(0,0) #Making window un-resizeable
master.configure(background='white')
#Relative sizes of each row and column containing frames
master.grid_rowconfigure(0, weight=5)
master.grid_rowconfigure(1, weight=4)
master.grid_rowconfigure(2, weight=6)
master.grid_rowconfigure(3, weight=2)
master.grid_columnconfigure(0, weight=1)
master.grid_columnconfigure(1, weight=1)
############################################################################
# FRAMES #
############################################################################
self.frame1 = tk.LabelFrame(master, text=' Data Options ', background='white')
self.frame1.grid(row=0, rowspan=1, column=0, columnspan=1, sticky='NEWS', padx=2,pady=2)
self.frame2 = tk.LabelFrame(master, text=' File preview ', background='white')
self.frame2.grid(row=0, rowspan=1, column=1, columnspan=1, sticky='NEWS', padx=2,pady=2)
self.frame3 = tk.LabelFrame(master, text=' Plot Options ', background='white')
self.frame3.grid(row=1, rowspan=1, column=0, columnspan=2, sticky='NEWS', padx=2,pady=2)
self.frame4 = tk.LabelFrame(master, text=' Fit Options ', background='white')
self.frame4.grid(row=2, rowspan=1, column=0, columnspan=2, sticky='NEWS', padx=2,pady=2)
self.frame5 = tk.Frame(master, background='white')
self.frame5.grid(row=3, rowspan=1, column=0, columnspan=2, sticky='NEWS', padx=2,pady=2)
############################################################################
# VARIABLES #
############################################################################
self.fileLocation = str #String holding location of file
self.fileCheck = int #Integer denoting whether the file was successfully read
self.fileStatus = tk.StringVar() #String holding status of the file
#Types of fits
functionsList = list(fit.functions.keys())
self.fitTypes = ['Polynomial'] + functionsList[0:6] + ['Other functions'] + functionsList[6:]
self.fitType = tk.StringVar() #String holding the current type of fit chosen
self.viewGrid = tk.IntVar() #Boolean holding whether the user wants to see gridlines in plot
self.viewParameters = tk.IntVar() #Boolean holding whether the user wants to see fitting parameters in plot
self.viewResiduals = tk.IntVar() #Boolean holding whether the user wants to see a residuals plot
self.fitTypeStr = tk.StringVar() #String holding the the display of function form
self.fitCheck = int #Integer denoting whether the fit was successful
self.fitStatus = tk.StringVar() #String holding status of the fit
############################################################################
# FRAMES 1 #
############################################################################
#Fixing the size of the each row and column
self.frame1.grid_propagate(False)
#Relative sizes of each row and column
for i in range(0,2):
self.frame1.grid_columnconfigure(i, weight=1)
for i in range(0,3):
self.frame1.grid_rowconfigure(i, weight=1)
self.fileLocationLabel = tk.Label(self.frame1, text ='File directory: ', background='white')
self.fileLocationLabel.grid(row=0, rowspan=1, column=0, columnspan=1, sticky='NEWS', padx=5, pady=5)
self.fileLocationEntry = tk.Entry(self.frame1, justify='center', foreground='blue')
self.fileLocationEntry.insert(0, string='(no directory selected)')
self.fileLocationEntry.configure(state='readonly')
self.fileLocationEntry.grid(row=0, rowspan=1, column=1, columnspan=1, sticky='NEWS', padx=5, pady=5)
self.browseFileButton = tk.Button(self.frame1, text="Load data", command=self.browseFileFunc)
self.browseFileButton.grid(row=1, rowspan=1, column=0, columnspan=2, sticky='NEWS', padx=5, pady=5)
self.fileStatusLabel = tk.Label(self.frame1, justify='center', textvariable=self.fileStatus)
self.fileStatusLabel.grid(row=2, rowspan=1, column=0, columnspan=2, sticky='NEWS', padx=5, pady=5)
############################################################################
# FRAMES 2 #
############################################################################
#Fixing the size of the each row and column
self.frame2.grid_propagate(False)
#Relative sizes of each row and column
self.frame2.grid_columnconfigure(0, weight=1)
self.frame2.grid_rowconfigure(0, weight=1)
self.filePreviewListBox = tk.Listbox(self.frame2, borderwidth=0, height=5)
self.filePreviewListBox.configure(state='disable')
self.filePreviewListBox.grid(row=0, rowspan=1, column=0, columnspan=1, sticky='NEWS', padx=5, pady=5)
############################################################################
# FRAMES 3 #
############################################################################
#Fixing the size of the each row and column
self.frame3.grid_propagate(False)
#Relative sizes of each row and column
for i in range(0,3):
self.frame3.grid_columnconfigure(i, weight=1)
for i in range(0,3):
self.frame3.grid_rowconfigure(i, weight=1)
self.plotTitleLabel = tk.Label(self.frame3, text = 'Plot title: ', background='white')
self.plotTitleLabel.grid(row=0, rowspan=1, column=0, columnspan=1, sticky='NEWS', padx=5, pady=2)
self.plotTitleEntry = tk.Entry(self.frame3, foreground='blue', bd='2', relief='ridge')
self.plotTitleEntry.grid(row=0, rowspan=1, column=1, columnspan=1, sticky='NEWS', padx=5, pady=2)
self.xAxisTitleLabel = tk.Label(self.frame3, text = 'X axis label: ', background='white')
self.xAxisTitleLabel.grid(row=1, rowspan=1, column=0, columnspan=1, sticky='NEWS', padx=5, pady=2)
self.xAxisTitleEntry = tk.Entry(self.frame3, foreground='blue', bd='2', relief='ridge')
self.xAxisTitleEntry.grid(row=1, rowspan=1, column=1, columnspan=1, sticky='NEWS', padx=5, pady=2)
self.yAxisTitleLabel = tk.Label(self.frame3, text = 'Y axis label: ', background='white')
self.yAxisTitleLabel.grid(row=2, rowspan=1, column=0, columnspan=1, sticky='NEWS', padx=5, pady=2)
self.yAxisTitleEntry = tk.Entry(self.frame3, foreground='blue', bd='2', relief='ridge')
self.yAxisTitleEntry.grid(row=2, rowspan=1, column=1, columnspan=1, sticky='NEWS', padx=5, pady=2)
self.viewGridCheckButton = tk.Checkbutton(self.frame3, text="Grid lines", variable=self.viewGrid)
self.viewGridCheckButton.grid(row=0, rowspan=1, column=2, columnspan=1, sticky='NEWS', padx=5, pady=2)
self.viewParametersCheckButton = tk.Checkbutton(self.frame3, text="View parameters", variable=self.viewParameters)
self.viewParametersCheckButton.grid(row=1, rowspan=1, column=2, columnspan=1, sticky='NEWS', padx=5, pady=2)
self.viewResidualsCheckButton = tk.Checkbutton(self.frame3, text="View residuals", variable=self.viewResiduals)
self.viewResidualsCheckButton.grid(row=2, rowspan=1, column=2, columnspan=1, sticky='NEWS', padx=5, pady=2)
############################################################################
# FRAMES 4 #
############################################################################
#Fixing the size of the each row and column
self.frame4.grid_propagate(False)
#Relative sizes of each row and column
self.frame4.grid_columnconfigure(0, weight=1)
self.frame4.grid_columnconfigure(1, weight=2)
self.frame4.grid_columnconfigure(2, weight=4)
for i in range(4):
self.frame4.grid_rowconfigure(i, weight=1)
self.fitTypeLabel = tk.Label(self.frame4, text = 'Fit type: ', background='white')
self.fitTypeLabel.grid(row=0, rowspan=1, column=0, columnspan=1, sticky='NEWS', padx=5, pady=5)
self.fitTypeOptionMenu = ttk.OptionMenu(self.frame4, self.fitType, 'None', *self.fitTypes, command=self.fitTypeOptionMenuFunc)
self.fitTypeOptionMenu.configure(width=25)
self.fitTypeOptionMenu['menu'].entryconfigure(0, state = "disabled")
self.fitTypeOptionMenu['menu'].entryconfigure(7, state = "disabled")
self.fitTypeOptionMenu.grid(row=0, rowspan=1, column=1, columnspan=1, sticky='NEWS', padx=5, pady=5)
self.fitTypeStrLabel = tk.Label(self.frame4,textvariable=self.fitTypeStr,width=25,foreground='blue')
self.fitTypeStrLabel.grid(row=0, rowspan=1, column=2, columnspan=1, sticky='NEWS', padx=5, pady=5)
self.plotRawDataButton = tk.Button(self.frame4, text="Plot raw data", command=self.plotRawDataButtonFunc)
self.plotRawDataButton.grid(row=1, rowspan=2, column=0, columnspan=1, sticky='NEWS', padx=5, pady=5)
self.fitDataAutoButton = tk.Button(self.frame4, text="Fit data automatically", command=self.fitDataAutoButtonFunc)
self.fitDataManButton = tk.Button(self.frame4, text="Fit data using my guess:", command=self.fitDataManButtonFunc)
self.fitDataManEntry = tk.Entry(self.frame4, justify='center')
self.fitStatusLabel = tk.Label(self.frame4, justify='center', textvariable=self.fitStatus)
self.fitStatusLabel.grid(row=3, rowspan=1, column=0, columnspan=3, sticky='NEWS', padx=5, pady=5)
############################################################################
# FRAMES 5 #
############################################################################
#Fixing the size of the each row and column
self.frame5.grid_propagate(False)
#Relative sizes of each row and column
self.frame5.grid_columnconfigure(0, weight=8)
self.frame5.grid_columnconfigure(1, weight=1)
self.frame5.grid_columnconfigure(2, weight=8)
self.frame5.grid_rowconfigure(0, weight=1)
self.clearButton = tk.Button(self.frame5, text="Clear", width=10, command=self.clear)
self.clearButton.grid(row=0, rowspan=1, column=0, columnspan=1, sticky='NES', padx=5, pady=5)
self.helpButton = tk.Button(self.frame5, text="Help", width=8, command=self.help)
self.helpButton.grid(row=0, rowspan=1, column=1, columnspan=1, sticky='NEWS', padx=5, pady=5)
self.aboutButton = tk.Button(self.frame5, text="About", width=10, command=self.about)
self.aboutButton.grid(row=0, rowspan=1, column=2, columnspan=1, sticky='NWS', padx=5, pady=5)
############################################################################
# FUNCTIONS #
############################################################################
'''
This function adds a preview of the file in the file preview box.
'''
def setFilePreview(self):
#Clearing the preview
self.filePreviewListBox.configure(state='normal')
self.filePreviewListBox.delete(0, 'end')
#Opening the file and adding it to the preview box
lineNum = 0
with open(self.fileLocation, 'r') as file:
for line in file:
line = ' '.join(line.split())
for delims in [(' ,',','),(', ',','),(' ',',')]:
line = line.replace(*delims)
self.filePreviewListBox.insert(lineNum, line)
lineNum += 1
'''
This function lets the user browse the file, calls the readFile() function in
Fitting.py, sets file preview sets file status for user to see.
'''
def browseFileFunc(self):
#Clearing everything in the GUI first
self.clear()
#Letting user browse the file and calling readFile() in Fitting.py
self.fileLocation = filedialog.askopenfilename(title='Choose a file')
self.fileCheck = fit.readFile(self.fileLocation)
#Showing the directory of the file
self.fileLocationEntry.configure(state='normal')
self.fileLocationEntry.delete(0,'end')
if(self.fileLocation==''):
self.fileLocationEntry.insert(0, string='(no directory selected)')
else:
self.fileLocationEntry.insert(0, string=self.fileLocation)
self.fileLocationEntry.configure(state='readonly')
#Setting file status
if(self.fileCheck==0):
self.fileStatus.set("File opened successfully!")
elif(self.fileCheck==1):
self.fileStatus.set("")
elif(self.fileCheck==2):
self.fileStatus.set("Not a .txt or .csv file!")
elif(self.fileCheck==3):
self.fileStatus.set("Couldn't parse file properly.")
elif(self.fileCheck==4):
self.fileStatus.set("File must contain 2 or 3 columns!")
elif(self.fileCheck==5):
self.fileStatus.set("File contains NaNs or Infs.")
elif(self.fileCheck==6):
self.fileStatus.set("Y-errors need to be a positive number!")
#Checking if file can be previewed or not
if(self.fileCheck!=1 and self.fileCheck!=2):
self.setFilePreview()
'''
This function is called when the user selects a function from the drop down.
It sets the preview of the form of the function and also some variables in the
Fitting.py file.
'''
def fitTypeOptionMenuFunc(self, event):
#Setting the function and its preview
fit.function = self.fitType.get()
fit.numberOfParameters = fit.functions[fit.function].numberOfParameters
self.fitTypeStr.set(fit.functions[fit.function].unicodeFuncStr)
self.fitDataManEntry.delete(0,'end')
string = ''
for param in fit.functions[fit.function].unicodeParametersStr:
string += param + '=1, '
string = string[:-2]
self.fitDataManEntry.insert(0,string)
#Placing the fitting buttons and setting fit status
self.fitDataAutoButton.grid(row=1, rowspan=1, column=1, columnspan=1, sticky='NEWS', padx=5, pady=5)
self.fitDataManButton.grid(row=2, rowspan=1, column=1, columnspan=1, sticky='NEWS', padx=5, pady=5)
self.fitDataManEntry.grid(row=2, rowspan=1, column=2, columnspan=1, sticky='NEWS', padx=5, pady=5)
self.fitStatus.set("")
'''
This function calls the relevant function in Fitting.py to plot the raw data.
'''
def plotRawDataButtonFunc(self):
if(self.fileCheck==0):
fit.plotRawData(self.plotTitleEntry.get(),self.xAxisTitleEntry.get(),self.yAxisTitleEntry.get(),self.viewGrid.get())
'''
This function calls the relevant functions in Fitting.py to fit a polynomial
or a custom function automatically. It also sets the fit status and plots the
raw data with the best fit in case it is successful.
'''
def fitDataAutoButtonFunc(self):
#Checking if the data file was successfully opened
if(self.fileCheck==0):
#Calling the relevant function in Fitting.py to handle the rest
self.fitCheck = fit.guessParameters()
#Checking if the fit was successful and showing the plot.
if(self.fitCheck==0):
self.fitStatus.set("Fit attempt successful!")
fit.plotFitData(self.plotTitleEntry.get(),self.xAxisTitleEntry.get(),self.yAxisTitleEntry.get(),self.viewGrid.get(),self.viewParameters.get(),self.viewResiduals.get())
elif(self.fitCheck==1):
self.fitStatus.set("Max iterations performed but couldn't find a fit!")
elif(self.fitCheck==2):
self.fitStatus.set("Number of data points is less than number of fitting parameters!")
'''
This function calls the relevant functions in Fitting.py to fit a function manually
from the guess parameters input by the user.
'''
def fitDataManButtonFunc(self):
#Checking if the data file was successfully opened
if(self.fileCheck==0):
#Calling the relevant function in Fitting.py to handle the rest
self.fitCheck = fit.manualParameters(self.fitDataManEntry.get())
#Checking if the fit was successful and showing the plot.
if(self.fitCheck==0):
self.fitStatus.set("Fit attempt successful!")
fit.plotFitData(self.plotTitleEntry.get(),self.xAxisTitleEntry.get(),self.yAxisTitleEntry.get(),self.viewGrid.get(),self.viewParameters.get(),self.viewResiduals.get())
elif(self.fitCheck==1):
self.fitStatus.set("Max iterations performed but couldn't find a fit!")
elif(self.fitCheck==2):
self.fitStatus.set("Number of data points is less than number of fitting parameters!")
elif(self.fitCheck==3):
self.fitStatus.set("Invalid parameters!")
'''
This function resets everything in the GUI.
'''
def clear(self):
#RESETTING THINGS IN FITTING FILE
#Data related variables
fit.data = []
fit.x = []
fit.y = []
fit.y_err = []
fit.numberOfDataPoints = int
fit.ERR = bool
#Fit function related variables
fit.function = ''
fit.numberOfParameters = int
#Fitting variables
fit.fitStructure = []
fit.fitParameters = []
fit.fitErrors = []
fit.chiSquared = float
fit.redChiSquared = float
fit.redChiSquaredLimits = []
#RESETTING THINGS IN INTERFACE
#Frame 1
self.fileLocation = str
self.fileLocationEntry.configure(state='normal')
self.fileLocationEntry.delete(0,'end')
self.fileLocationEntry.insert(0, string='(no directory selected)')
self.fileLocationEntry.configure(state='readonly')
self.fileCheck = int
self.fileStatus.set("")
#Frame 2
self.filePreviewListBox.delete(0, 'end')
self.filePreviewListBox.configure(state='disable')
#Frame 3
self.plotTitleEntry.delete(0, 'end')
self.xAxisTitleEntry.delete(0, 'end')
self.yAxisTitleEntry.delete(0, 'end')
self.viewGrid.set(0)
self.viewParameters.set(0)
self.viewResiduals.set(0)
#Frame 4
self.fitType.set("None")
self.fitTypeStr.set("")
self.fitCheck = bool
self.fitStatus.set("")
self.fitDataAutoButton.grid_forget()
self.fitDataManButton.grid_forget()
self.fitDataManEntry.grid_forget()
'''
This function opens up a 'help' message box.
'''
def help(self):
self.infoText = 'How to use it:'\
'\n\u2022 Browse a .txt or .csv file with your data set. The file must have 2 columns (no errors along y-axis) or 3 columns (with errors along y-axis), separated by commas or spaces.'\
'\n\u2022 You can then choose to plot the raw data or choose a function to fit the data to. The tool has the option for fitting polynomials of degree 0 upto 5 along with some other standard functions.'\
'\n\u2022 Click \'Fit data automatically\' if you want the tool to attempt a fit by itself. Or click \'Fit data using my guess\' after providing an initial guess for the fitting parameters if you want to attempt a fit manually.'\
'\n\u2022 You should see the best fit line if the routine runs successfully. You can add plot annotations, grid lines or view the fit parameters and residuals plot as you wish!'\
'\n\nNote on errors:'\
'\nIn case no errors are provided along the y-axis, the error on the parameters and the chi-squared value are calculated based on an error of 1.00 (arbitrary unit) on each data point. Due to this, the errors on the fitting parameters dont have much meaning; however the best fit can still be useful!'\
'\n\nHave fun plotting!'
tk.messagebox.showinfo('Help', self.infoText)
'''
This function opens up an 'about' message box showing author and copyright status.
'''
def about(self):
self.aboutText = 'CFit (Curve fitting Tool)\n\u24EA 2020 <NAME>\nSchool of Physics and Astronomy\nThe University of Manchester'
tk.messagebox.showinfo('About', self.aboutText)
####################################################################################
# MAIN FUNCTION #
####################################################################################
def main():
root = tk.Tk()
GUI(root)
root.mainloop()
if __name__ == '__main__':
main()
``` |
{
"source": "jitendragangwar123/cp",
"score": 4
} |
#### File: array/python3/5_move_all_negative_elements.py
```python
def sort(arr):
# Start index 0.
start = 0
# End index
end = len(arr)-1
while start <= end:
# Swap all positive value with last index end & decrease end by 1.
if arr[start] >= 0:
arr[start], arr[end] = arr[end], arr[start]
end -= 1
else:
# If arr[start] is not positive then increase start by 1.
start += 1
if __name__ == "__main__":
arr = [-1, 2, -3, 4, 5, 6, -7, 8, 9]
sort(arr)
print(arr)
```
#### File: array/python3/6_union_intersection.py
```python
def compute_union(arr1, arr2):
union = []
index1 = 0
index2 = 0
while (index1 < len(arr1)) and (index2 < len(arr2)):
if arr1[index1] < arr2[index2]:
union.append(arr1[index1])
index1 += 1
elif arr1[index1] > arr2[index2]:
union.append(arr2[index2])
index2 += 1
else:
union.append(arr2[index2])
index1 += 1
index2 += 1
while index1 < len(arr1):
union.append(arr1[index1])
index1 += 1
while index2 < len(arr2):
union.append(arr2[index2])
index2 += 1
return union
# using modified merge function
def compute_intersection(arr1, arr2):
intersection = []
index1 = 0
index2 = 0
while (index1 < len(arr1)) and (index2 < len(arr2)):
if arr1[index1] < arr2[index2]:
index1 += 1
elif arr1[index1] > arr2[index2]:
index2 += 1
else:
intersection.append(arr2[index2])
index1 += 1
index2 += 1
return intersection
if __name__ == "__main__":
arr1 = [1, 3, 4, 5, 7]
arr2 = [2, 3, 5, 6]
# arr1=[2, 5, 6]
# arr2=[4, 6, 8, 10]
union = compute_union(arr1, arr2)
print('union : ', union)
intersection = compute_intersection(arr1, arr2)
print('intersection : ', intersection)
```
#### File: array/python3/9_minimize_maximum_difference.py
```python
def minimize_maximum_difference(arr, k):
if len(arr) < 2:
return -1
# sort the list.
arr.sort()
# If we add/subtract k to each every element difference will be same.
difference = arr[-1]-arr[0] # one probable solution
# add k to min element and substract k from max element.
max = arr[-1]-k
min = arr[0]+k
# swap is max < min after add and substract.
if max < min:
max, min = min, max
# for all the intermediate elements.
for i in range(1, len(arr)-1):
# greedy approach.
# we can add k or substract k.
temp_max = arr[i]+k
temp_min = arr[i]-k
# If any of those value comes in range of (min -> max) that does not change the difference.
if (temp_max <= max) or (temp_min >= min):
continue
# Outside range (min -> max).
# update with that value which one add less penalty.
if abs(max-temp_min) < abs(temp_max-min):
min = temp_min
else:
max = temp_max
if difference < abs(max-min):
return difference
else:
return abs(max-min)
if __name__ == "__main__":
arr1 = [1, 15, 10] # ANS 5
arr2 = [1, 5, 15, 10] # ANS 8
arr3 = [4, 6] # ANS 2
arr4 = [6, 10] # ANS 2
arr5 = [1, 10, 14, 14, 14, 15] # ANS 5
arr6 = [1, 2, 3] # ANS 2
k1 = 6
k2 = 3
k3 = 10
k4 = 3
k5 = 6
k6 = 2
print(minimize_maximum_difference(arr1, k1))
print(minimize_maximum_difference(arr2, k2))
print(minimize_maximum_difference(arr3, k3))
print(minimize_maximum_difference(arr4, k4))
print(minimize_maximum_difference(arr5, k5))
print(minimize_maximum_difference(arr6, k6))
``` |
{
"source": "JITENDRAMINDA/liveline",
"score": 2
} |
#### File: JITENDRAMINDA/liveline/ad.py
```python
from pyrogram import Client, Filters
import time
from pyrogram.errors import FloodWait
app = Client ("ssss",bot_token="<KEY>",api_id=814511,api_hash="44462f0f278503255d5cc30941b617a9")
@app.on_message(Filters.channel)
def main(client, message):
if message.text == "OVER":
x = random.choice(1,5)
if x == 1 :
file = open("1.txt","r")
s = file.readlines()
file.close()
client.send_message(message.chat.id,s)
if x == 2 :
file = open("2.txt","r")
s = file.readlines()
file.close()
client.send_message(message.chat.id,s)
if x == 3 :
file = open("3.txt","r")
s = file.readlines()
file.close()
client.send_message(message.chat.id,s)
if x == 4 :
file = open("4.txt","r")
s = file.readlines()
file.close()
client.send_message(message.chat.id,s)
@app.on_message(Filters.command('set'))
def ran(client,message):
x = message.from_user.id
if x == 491634139 :
with open("message.text.split(' ')[1].txt","w") as file:
file.write(message.reply_to_message.text)
file.close()
message.reply("Done")
app.run()
@app.on_message(Filters.private & Filters.sticker)
def forawrd(client, message):
client.send_message(message.chat.id,message.sticker.file_id )
app.run()
```
#### File: JITENDRAMINDA/liveline/del.py
```python
from pyrogram import Client, Filters
import time
app = Client ("ssss",bot_token="<KEY>",api_id=814511,api_hash="44462f0f278503255d5cc30941b617a9")
@app.on_message(Filters.channel)
def main(client, message):
file = open("sure.txt" , "r")
lines = file.readlines()
file.close()
for b in lines:
if b == "atv":
client.delete_messages(message.chat.id, message.message_id)
@app.on_message(Filters.command("atv"))
def main(client, message):
with open("sure.txt" , "w") as files:
files.write("atv")
files.close()
message.reply("okk, ab me message bhejane wali ki maa chodta tu soja")
@app.on_message(Filters.command("dtv"))
def main(client, message):
with open("sure.txt" , "w") as files:
files.write("dtv")
files.close()
message.reply("okk, ab tu message bhejane wali ki maa chod me rest krta")
app.run()
```
#### File: JITENDRAMINDA/liveline/ptb.py
```python
from telegram.ext import Updater
updater = Updater(token='<KEY>', use_context=True)
dispatcher = updater.dispatcher
from telegram.ext import CommandHandler
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
def start(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="I'm a bot, please talk to me!")
def echo(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
from telegram.ext import MessageHandler, Filters
echo_handler = MessageHandler(Filters.text, echo)
dispatcher.add_handler(echo_handler)
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
updater.start_polling()
``` |
{
"source": "JITENDRAMINDA/singh",
"score": 2
} |
#### File: JITENDRAMINDA/singh/bot.py
```python
from pyrogram import Client, Filters,Emoji
app = Client("session",bot_token="<KEY>",api_id=715451,api_hash="d2cba6f7bf5d1a45682da5bb9071a307")
#("session",bot_token="<KEY>",api_id=605563,api_hash="7f2c2d12880400b88764b9b304e14e0b")
@app.on_message(Filters.command("send"))
def forawrd(client, message):
x = client.get_chat_member(message.chat.id , message.from_user.id)
if x.status == "administrator" or x.status == "creator":
files = open("sure.txt","r")
x = files.readlines()
files.close()
for y in x:
z = y.split()
for f in z:
try:
client.forward_messages(str(f),message.chat.id,message.reply_to_message.message_id )
except:
message.reply("🔥 Sending Failed in " + f)
continue
message.reply("🔁 operation completed 🔁")
@app.on_message(Filters.command("add"))
def forawrd(client, message):
o = False
fil = open("sure.txt","r")
t = fil.readlines()
fil.close()
for h in t:
q = h.split()
for d in q:
if d.casefold() == message.text.split(" ")[1].casefold():
o = True
if not o :
x = client.get_chat_member(message.chat.id , message.from_user.id)
if x.status == "administrator" or x.status == "creator":
try:
client.send_message(message.text.split(" ")[1], "Powered by king")
y = client.get_chat(str(message.text.split(' ')[1])).title
file = open("sure.txt","a")
file.write(" " + message.text.split(" ")[1])
file.close()
message.reply("📶 The chat - "+str(y)+" ✅" + " added. 😋😝😜😍")
except:
message.reply("♻️ Bot is not a admin in this channel 😡🤬🤬")
if o:
message.reply("already added 😏😏")
@app.on_message(Filters.command('remove'))
def forward(client, message):
x = client.get_chat_member(message.chat.id , message.from_user.id)
if x.status == "administrator" or x.status == "creator":
try:
file = open("sure.txt" , "r")
u = file.readlines()
file.close()
for v in u:
lines = v.split()
del lines[lines.index(message.text.split(' ')[1])]
y = " ".join(str(x) for x in lines)
files = open("sure.txt" , "w")
files.write(y)
files.close()
message.reply("💾 Done, The chat_id ```" + message.text.split(' ')[1] +"```🌐 has been removed to my database. ✅✅")
except:
message.reply("☢️ ID not found 🧐🙄😒")
@app.on_message(Filters.command('list'))
def forward(client, message):
x = client.get_chat_member(message.chat.id , message.from_user.id)
if x.status == "administrator" or x.status == "creator":
file = open("sure.txt" , "r")
u = file.readlines()
file.close()
for v in u :
message.reply("🏘️ List of Chat_ids in my database are ```" + str(v) + "```.")
@app.on_message(Filters.command('clear1') & Filters.user(491634139))
def forward(client, message):
with open("sure.txt" , "w") as file:
file.write("cfamovies")
file.close()
message.reply("☢️ Done, Success ✅✅")
@app.on_message(Filters.private)
def forward(client, message):
if not message.from_user.id == 491634139:
message.reply("""♻️ The bot is Promotion bot created by a wonderful person .
Work only in groups. ✅✅
How to use:
👉 add a channel
1. ```/add @username```
👉 remove a channel
2. ```/remove @username```
👉 list of channels
3. ```/list```
👉 send a list
4. reply a list of Channel to ```/send``` for send it to all Channels.
only Admin exist that's command in supergroups. ✍️
Powered by king Promotion 👊. """)
app.run()
```
#### File: JITENDRAMINDA/singh/math.py
```python
import pynewtonmath as newton
from pyrogram import Client, Filters
from pyrogram.errors import FloodWait
import time
app = Client("mnnn",bot_token="8<PASSWORD>:<KEY>",api_id=768402,api_hash="f6420bf67303614279049d48d3e670f6")
@app.on_message(Filters.command("derive"))
def main(client, message):
x = message.text.split(' ')[1]
client.send_message(message.chat.id,newton.derive(x) + "thats okk")
app.run()
``` |
{
"source": "JITENDRAMINDA/spin",
"score": 2
} |
#### File: JITENDRAMINDA/spin/tst.py
```python
from pyrogram import Client, Filters, Emoji
import random
import time
app = Client("session",bot_token="<KEY>",api_id=715451,api_hash="d2cba6f7bf5d1a45682da5bb9071a307")
@app.on_message(Filters. private & Filters.command("start"))
def ran( client, message) :
message.reply( """♻️ This is antiusername created by a wonderful [person](https://t.me/Google_console) ✍️.
Add me to your group i will delete username from non admins automatically.""")
@app.on_message(Filters.group)
def main(client, message):
if not message.from_user:
message.delete(message.message_id)
b = client.get_chat_member(message.chat.id , message.from_user.id).status
if not b == 'administrator' or not b == "creator":
if "@" in message.text:
client.delete_messages(message.chat.id, message.message_id)
app.run()
``` |
{
"source": "JITENDRAMINDA/sssnnn",
"score": 2
} |
#### File: JITENDRAMINDA/sssnnn/formy.py
```python
from pyrogram import Client, Filters
TOKAN = "639957559:AAFbwAStH_GXBgUVFxC93CCsbBM5MSA-Piw"
app = Client( TOKAN ,605563,"7f2c2d12880400b88764b9b304e14e0b")
@app.on_message(Filters.command("delete"))
def main(client, message):
for message.message_id in range(int(message.text.split(' ')[1]), int(message.text.split(' ')[2])):
try:
client.delete_messages(message.chat.id,message.message_id)
except:
continue
@app.on_message(Filters.channel & ~ Filters.edited)
def main(client, message):
file = open("bullets.txt" , "r")
s = file.readlines()
file.close()
for d in s:
if message.chat.id == int(d):
file = open("bulleti.txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for x in p:
try:
mes = client.send_message( int(x), "**" + message.text + "**" )
fille = open(str(x)+".txt","r")
n = fille.readlines()
fille.close()
for t in n:
fie = open(str(x)+".txt","w")
fie.write(t +" " + str(message.message_id) + " " + str(mes.message_id))
fie.close()
except:
continue
fil = open("ferraris.txt" , "r")
q = fil.readlines()
fil.close()
for e in q:
if message.chat.id == int(e):
file = open("ferrarii.txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for x in p:
try:
mes = client.send_message( int(x), "**" + message.text + "**" )
fille = open(str(x)+".txt","r")
n = fille.readlines()
fille.close()
for t in n:
fie = open(str(x)+".txt","w")
fie.write(t +" " + str(message.message_id) + " " + str(mes.message_id))
fie.close()
except:
continue
fl = open("sessionss.txt" , "r")
h = fl.readlines()
fl.close()
for e in h:
if message.chat.id == int(e):
file = open("sessionsi.txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for x in p:
try:
mes = client.send_message( int(x), "**" + message.text + "**" )
fille = open(str(x)+".txt","r")
n = fille.readlines()
fille.close()
for t in n:
fie = open(str(x)+".txt","w")
fie.write(t +" " + str(message.message_id) + " " + str(mes.message_id))
fie.close()
except:
continue
@app.on_message(Filters.channel & Filters.edited)
def main(client, message):
file = open("ferraris.txt" , "r")
s = file.readlines()
file.close()
for d in s:
if message.chat.id == int(d):
filer = open("update.txt" , "r")
m = filer.readlines()
filer.close()
for l in m:
if l == "on":
file = open("ferrarii.txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for o in p:
files = open(str(o)+".txt" , "r")
d = files.readlines()
files.close()
for c in d:
x = c.split()
id = str(message.message_id)
if id in x:
try:
client.edit_message_text(int(o),int(x[x.index(id)+1]), "**" + message.text + "**" )
except:
continue
fil = open("bullets.txt" , "r")
f = fil.readlines()
fil.close()
for d in f:
if message.chat.id == int(d):
filer = open("update.txt" , "r")
m = filer.readlines()
filer.close()
for l in m:
if l == "on":
file = open("bulleti.txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for o in p:
files = open(str(o)+".txt" , "r")
d = files.readlines()
files.close()
for c in d:
x = c.split()
id = str(message.message_id)
if id in x:
try:
client.edit_message_text(int(o),int(x[x.index(id)+1]), "**" + message.text + "**" )
except:
continue
fl = open("sessionss.txt" , "r")
f = fl.readlines()
fl.close()
for d in f:
if message.chat.id == int(d):
filer = open("update.txt" , "r")
m = filer.readlines()
filer.close()
for l in m:
if l == "on":
file = open("sessionsi.txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for o in p:
files = open(str(o)+".txt" , "r")
d = files.readlines()
files.close()
for c in d:
x = c.split()
id = str(message.message_id)
if id in x:
try:
client.edit_message_text(int(o),int(x[x.index(id)+1]), "**" + message.text + "**" )
except:
continue
@app.on_message(Filters.command('add') & Filters.user(491634139) )
def forward(client, message):
if len(message.text.split(' ')) > 2:
if len(message.text.split(' ')[1]) == 14:
with open(message.text.split(" ")[2] + ".txt" , "r") as file:
lines = file.readlines()
file.close()
for line in lines:
files = open(message.text.split(" ")[2] + ".txt" , "w")
files.write(line + " " + message.text.split(' ')[1])
files.close()
with open(message.text.split(' ')[1]+".txt" , "w") as g:
g.write("001 002")
g.close()
message.reply("💾 Done, The chat_id ```" + message.text.split(' ')[1] +"```🌐 has been added to my database. ✅✅")
else:
message.reply("💼 Please write a valid chat id. ✅✅ ")
else:
message.reply("💼 Please write a valid chat id. ✅✅ ")
@app.on_message(Filters.command('remove') & Filters.user(491634139))
def forward(client, message):
if len(message.text.split(' ')) > 2:
if len(message.text.split(' ')[1]) == 14:
file = open(message.text.split(" ")[2] + ".txt" , "r")
u = file.readlines()
file.close()
for v in u:
lines = v.split()
del lines[lines.index(message.text.split(' ')[1])]
y = " ".join(str(x) for x in lines)
files = open(message.text.split(" ")[2] + ".txt" , "w")
files.write(y)
files.close()
message.reply("💾 Done, The chat_id ```" + message.text.split(' ')[1] +"```🌐 has been removed to my database. ✅✅")
else:
message.reply("💼 Please write a valid chat id. ✅✅ ")
else:
message.reply("💼 Please write a valid chat id. ✅✅ ")
@app.on_message(Filters.command('clear') & Filters.user(491634139))
def forward(client, message):
file = open(message.text.split(" ")[1] + ".txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for x in p:
fie = open(str(x)+".txt","w")
fie.write("001 002")
fie.close()
message.reply("☢️ Done, Editing data cleared ✅✅")
@app.on_message(Filters.command('list') & Filters.user(491634139))
def forward(client, message):
file = open(message.text.split(" ")[1] + ".txt" , "r")
u = file.readlines()
file.close()
for v in u :
message.reply("🏘️ List of Chat_ids in my database are ```" + str(v) + "```. Its can be change. ✅✅")
@app.on_message(Filters.command('sets') & Filters.user(491634139) )
def forward(client, message):
if len(message.text.split(' ')) > 2:
if len(message.text.split(' ')[1]) == 14:
with open(message.text.split(' ')[2] + '.txt', 'w') as file:
file.write(message.text.split(' ')[1])
file.close()
message.reply("🌐 Done, Now my source chat is ```" + message.text.split(' ')[1] + "```. I will try to forward messages from this chat. ✅✅")
else:
message.reply("💼 Please write a valid chat id. ✅✅ ")
else:
message.reply("💼 Please write a valid chat id. ✅✅ ")
@app.on_message(Filters.command('setupdate') & Filters.user(491634139) )
def forward(client, message):
if len(message.text.split(' ')) > 1:
with open('update.txt', 'w') as file:
file.write(message.text.split(' ')[1])
file.close()
message.reply("🌐 Done,Now my message update status is ```" + message.text.split(' ')[1] + "```.✅✅")
else:
message.reply("💼 Please write a valid chat id. ✅✅ ")
@app.on_message(Filters.command('source') & Filters.user(491634139) )
def forward(client, message):
with open(message.text.split(" ")[1] + '.txt', 'r') as file:
x = file.readlines()
file.close()
for y in x:
message.reply("🌐 My source chat is ```" + y + "```. I am trying to forward messages from this chat. ✅✅")
@app.on_message(Filters.command('get') & Filters.user(491634139) )
def forward(client, message):
if len(message.text.split(' ')) > 1:
if len(message.text.split(' ')[1]) == 14:
x = client.get_chat(int(message.text.split(' ')[1])).title
message.reply("📶 This chat name is - "+str(x)+" ✅")
else:
message.reply("💼 Please write a valid chat id. ✅✅ ")
else:
message.reply("💼 Please write a valid chat id. ✅✅ ")
@app.on_message(Filters.command('update') & Filters.user(491634139) )
def forward(client, message):
with open('update.txt', 'r') as file:
x = file.readlines()
file.close()
for y in x:
message.reply("🌐 My current message update status is ```" + y + "```. ✅✅")
@app.on_message(Filters.command("start"))
def forward(client, message):
if message.from_user.id == 491634139:
message.reply("♻️ Welcome to your LineBot . ✅✅")
else:
message.reply("♻️ You need admins permission to use my functions. ✅✅")
@app.on_message(Filters.private)
def forward(client, message):
if not message.from_user.id == 491634139:
message.reply("♻️ You need admins permission to use my functions. ✅✅")
app.run()
``` |
{
"source": "jitendrapitchuka/Recommendation_system",
"score": 2
} |
#### File: Recommendation_system/accounts/models.py
```python
from django.db import models
from django.contrib.auth.models import User
#class User(auth.models.User,auth.models.PermissionsMixin):
# def __str__(self):
# return (self.username)
class movie(models.Model):
index=models.IntegerField(primary_key=True)
movie_id=models.IntegerField()
title=models.CharField(max_length=120)
genres=models.CharField(max_length=130)
imdb_id=models.IntegerField()
tmdb_id=models.IntegerField()
Image_url=models.URLField(max_length=200)
text=models.TextField(max_length=400)
def __str__(self):
return self.title
class fav(models.Model):
user=models.ForeignKey(User, on_delete=models.CASCADE)
movie=models.ForeignKey(movie, on_delete=models.CASCADE)
```
#### File: Recommendation_system/accounts/views.py
```python
from django.shortcuts import render,get_object_or_404,HttpResponseRedirect,redirect
from django.views.generic import TemplateView,CreateView,DetailView,View
from django.urls import reverse_lazy,reverse
from . import forms
from django.contrib import messages
from django.contrib.auth import views as auth_views
from .models import movie,fav
from django.contrib.auth.models import User
from django.core.paginator import Paginator
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import sqlite3
class SignUp(CreateView):
form_class=forms.UserCreateForm
success_url=reverse_lazy('accounts:login')
template_name='signup.html'
def add_movie(request,pk):
# if fav.objects.filter(movie=pk,user=request.user.pk).exists():
# fav.objects.filter(movie=pk,user=request.user.pk).delete()
# else:
# print(request.user.id)
# user=User.objects.filter(id=request.user.id)
# print(user)
# fav(movie=pk,user=user).save()
# return HttpResponseRedirect('accounts/movie_detail.html')
if fav.objects.filter(movie=pk,user=request.user.pk).exists():
fav.objects.filter(movie=pk,user=request.user.pk).delete()
messages.success(request, 'Removed from list.')
else:
print(request.user.id)
movie_obj = movie.objects.filter(index=pk).first()
user=User.objects.filter(id=request.user.id).first()
print(user)
fav_obj = fav(movie=movie_obj,user=user)
messages.success(request, 'Added to list.')
fav_obj.save()
return redirect('detail',pk)
def search_results(request):
if request.method=='POST':
searched=request.POST['searched']
movies_obj=movie.objects.filter(title__contains=searched)
return render(request, 'search.html',{'movies_obj':movies_obj})
else:
return render(request, 'search.html',{})
def my_list(request):
temp=fav.objects.filter(user__id=request.user.id)
return render(request, 'my_list.html',{'temp':temp})
def Homepage(request):
# movies_list=movie.objects.all()
#return render(request, 'index.html',{'movies_list':movies_list})
posts = movie.objects.all() # fetching all post objects from database
p = Paginator(posts, 18) # creating a paginator object
# getting the desired page number from url
page_number = request.GET.get('page')
try:
page_obj = p.get_page(page_number) # returns the desired page object
except PageNotAnInteger:
# if page_number is not an integer then assign the first page
page_obj = p.page(1)
except EmptyPage:
# if page is empty then return last page
page_obj = p.page(p.num_pages)
context = {'page_obj': page_obj,'posts':posts}
# sending the page object to index.html
return render(request, 'index.html', context)
class thankspage(TemplateView):
template_name='logout.html'
class image_detailview(View):
movie = movie
def get(self, request, pk):
movies_detail = self.movie.objects.get(pk=pk)
print(movies_detail)
# Create your connection.
cnx = sqlite3.connect('db.sqlite3')
df = pd.read_sql_query("SELECT * FROM accounts_movie", cnx)
#print(df.head())
features = ['genres', 'title','text']
for f in features:
df[f] = df[f].fillna('')
def combineFeatures(row):
return row['genres'] + " " + row['title']+" "+row['text']
df['combineFeatures'] = df.apply(combineFeatures, axis = 1)
print(df['combineFeatures'])
cv = CountVectorizer()
count_matrix = cv.fit_transform(df['combineFeatures'])
cosine_sim = cosine_similarity(count_matrix)
movie_obj = self.movie.objects.filter(index=pk).first()
movie_user_likes=movie_obj.title
print(movie_user_likes)
def get_index_from_title(title):
return df[df.title == title]["index"].values[0]
movie_index = get_index_from_title(movie_user_likes)
print(movie_index)
similar_movies = list(enumerate(cosine_sim[movie_index]))
sorted_similar_movies = sorted(similar_movies, key=lambda x:x[1], reverse=True)
def get_title_from_index(index):
return df[df.index == index]["index"].values[0]
i=0
li=[]
for x in sorted_similar_movies:
get_index = get_title_from_index(x[0])
recommendation_list_obj=movie.objects.get(index=get_index)
li.append(recommendation_list_obj)
print(get_title_from_index(x[0]))
i=i+1
if i>17:
break
print(li)
return render(request,'accounts/movie_detail.html',{'li':li,'movie':movies_detail})
#class HomePage(TemplateView):
# template_name='index.html'
``` |
{
"source": "jitendrapurbey/setopati-reader",
"score": 3
} |
#### File: setopati-reader/reader/feed.py
```python
from typing import Dict, List # noqa
# Third party imports
import feedparser
import html2text
# Reader imports
from reader import URL
_CACHED_FEEDS = dict() # type: Dict[str, feedparser.FeedParserDict]
def _feed(url=URL): # type: (str) -> feedparser.FeedParserDict
"""Cache contents of the feed, so it's only read once"""
if url not in _CACHED_FEEDS:
_CACHED_FEEDS[url] = feedparser.parse(url)
return _CACHED_FEEDS[url]
def get_site(url=URL): # type: (str) -> str
"""Get name and link to web site of the feed"""
info = _feed(url).feed
return u"{info.title} ({info.link})".format(info=info)
def get_article(article_id, links=False, url=URL):
# type: (str, bool, str) -> str
"""Get article from feed with the given ID"""
articles = _feed(url).entries
try:
article = articles[int(article_id)]
except (IndexError, ValueError):
max_id = len(articles) - 1
msg = "Unknown article ID, use ID from 0 to {}".format(max_id)
raise SystemExit("Error: {}".format(msg))
# Get article as HTML
try:
html = article.content[0].value
except AttributeError:
html = article.summary
# Convert HTML to plain text
to_text = html2text.HTML2Text()
to_text.ignore_links = not links
text = to_text.handle(html)
return u"# {}\n\n{}".format(article.title, text)
def get_titles(url=URL): # type: (str) -> List[str]
"""List titles in feed"""
articles = _feed(url).entries
return [a.title for a in articles]
``` |
{
"source": "jitendravarma/demoApp",
"score": 2
} |
#### File: api/v1/views.py
```python
import json
from collections import OrderedDict
from core.models import Text
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import TextSerializer, UserSerializer
def create_response(response_data):
"""
method used to create response data in given format
"""
response = OrderedDict()
response["header"] = {"status": "1"}
response["body"] = response_data
return response
def create_serializer_error_response(errors):
"""
method is used to create error response for serializer errors
"""
error_list = []
for k, v in errors.items():
if isinstance(v, dict):
_, v = v.popitem()
d = {}
d["field"] = k
d["field_error"] = v[0]
error_list.append(d)
return OrderedDict({"header": {"status": "0"}, "errors": {
"errorList": error_list}})
class UpdateProfileAPIView(APIView):
"""update user's profile using this view"""
serializer_class = UserSerializer
permission_classes = (IsAuthenticated,)
def get(self, request):
source = UserSerializer(self.request.user)
return Response(create_response({"results": source.data}))
def post(self, request, *args, **kwargs):
user_serializer = UserSerializer(request.user, data=request.data)
if user_serializer.is_valid():
user_serializer.save()
return Response(create_response(
{"msg": "Profile updated successfully"}))
else:
return Response(
create_serializer_error_response(user_serializer.errors),
status=403)
def put(self, request):
image_file = request.data["image_file"]
user = request.user
user.profile_pic = image_file
user.save()
return Response(create_response({"msg": "Profile pic updated successfully!"}))
class UploadJson(APIView):
permission_classes = [
AllowAny,
]
"""
this view is used to upload json file
"""
def post(self, request, *args, **kwargs):
try:
json_file = request.data.get('file')
items = json.loads(json_file.read().decode("utf-8"))
for item in items:
text, _ = Text.objects.get_or_create(data_id=item['id'])
text.user_id = item['userId'] if 'userId' in item else ""
text.title = item['title'] if 'title' in item else ""
text.body = item['body'] if 'body' in item else ""
text.save()
status = 200
res_data = create_response({"msg": "File uploaded successfully"})
except Exception as error:
print(error)
status = 500
res_data = create_serializer_error_response({"msg": "Something went wrong"})
return Response(res_data, status=status)
class TextAPIView(APIView):
"""api view to render serailized text objects"""
serializer_class = TextSerializer
permission_classes = (IsAuthenticated,)
def get(self, request):
source = TextSerializer(Text.objects.all(), many=True)
return Response(create_response({"results": source.data}))
```
#### File: management/commands/load_data.py
```python
import os
import json
from forex_python.converter import CurrencyRates, CurrencyCodes
from django.core.management.base import BaseCommand, CommandError
from core.models import Currency
def load_currency():
print("Adding currencies..")
currencies = CurrencyRates()
code = CurrencyCodes()
for key in currencies.get_rates('USD').keys():
symbol = code.get_symbol(key)
Currency.objects.get_or_create(name=key, symbol=symbol)
print("Done..")
class Command(BaseCommand):
def handle(self, *args, **options):
load_currency()
``` |
{
"source": "jitendravarma/inventory",
"score": 2
} |
#### File: inventory/core/views.py
```python
from django.http import FileResponse
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView, View
from .models import PurchaseOrder
from .utils import generate_invoice_pdf
# Create your views here.
class CompanyView(TemplateView):
"""
this view renders company detail page
"""
template_name = 'frontend/company.html'
def get_context_data(self, **kwargs):
context = super(CompanyView, self).get_context_data(**kwargs)
context['company_page'] = "active"
return context
class PurchaseView(TemplateView):
"""
this view renders company detail page
"""
template_name = 'frontend/purchase.html'
def get_context_data(self, **kwargs):
context = super(PurchaseView, self).get_context_data(**kwargs)
context['purchase_page'] = "active"
return context
class ProductView(TemplateView):
"""
this view renders company detail page
"""
template_name = 'frontend/product.html'
def get_context_data(self, **kwargs):
context = super(ProductView, self).get_context_data(**kwargs)
context['product_page'] = "active"
return context
class DownloadInvoiceView(View):
"""
this view renders company detail page
"""
def get(self, request, pk):
# po = get_object_or_404(PurchaseOrder, id=self.kwargs['pk'])
po = PurchaseOrder.objects.first()
data = {'name': po.product.name, 'company': po.company.name,
'no': po.order_number, 'quantity': po.quantity,
'price': po.product.price, 'rate': po.quantity * po.price}
invoice_path = generate_invoice_pdf(data)
pdf_file = open(invoice_path, 'rb')
response = FileResponse(pdf_file, content_type="application/pdf")
response["Content-Disposition"] = "attachment; filename=invoice.pdf"
return response
``` |
{
"source": "jitendravarma/newscout_web",
"score": 2
} |
#### File: management/commands/get_initial_data.py
```python
from django.core.management.base import BaseCommand
from core.models import *
from api.v1.serializers import ArticleSerializer
from core.utils import create_index, ingest_to_elastic
from django.core import serializers
from collections import OrderedDict
class Command(BaseCommand):
help = 'This command is used to ingest data from database to elastic search'
batch = []
def handle(self, *args, **options):
app_list = OrderedDict()
domain = Domain.objects.get(domain_id="newscout")
categories = []
articles = []
sources = []
hash_tags = []
submenus = []
menus = Menu.objects.filter(domain=domain)
print("getting menus")
for m in menus:
categories.append(m.name)
for s in m.submenu.all():
categories.append(s.name)
submenus.append(s)
for h in s.hash_tags.all():
if h not in hash_tags:
hash_tags.append(h)
print("getting articles")
for cat in categories:
arts = Article.objects.filter(category=cat, domain=domain)[:200]
for art in arts:
articles.append(art)
if art.source not in sources:
sources.append(art.source)
for hashtag in art.hash_tags.all():
if hashtag not in hash_tags:
hash_tags.append(hashtag)
users = UserProfile.objects.all()
print("getting users")
for user in users:
for p in user.passion.all():
if p not in hash_tags:
hash_tags.append(p)
trendingarticles = []
print("getting trendingarticles")
for tart in TrendingArticle.objects.filter(domain=domain):
trendingarticles.append(tart)
for art in tart.articles.all():
articles.append(art)
if art.source not in sources:
sources.append(art.source)
for hashtag in art.hash_tags.all():
if hashtag not in hash_tags:
hash_tags.append(hashtag)
devices = Devices.objects.filter(user__in=users)
dailydigest = []
print("getting dailydigest")
for d in DailyDigest.objects.filter(device__in=devices):
dailydigest.append(d)
for art in d.articles.all():
if art.domain == domain:
articles.append(art)
if art.source not in sources:
sources.append(art.source)
for hashtag in art.hash_tags.all():
if hashtag not in hash_tags:
hash_tags.append(hashtag)
app_list['news_site.domain'] = [domain]
app_list['news_site.category'] = categories
app_list['news_site.categoryassociation'] = CategoryAssociation.objects.filter(parent_cat__in=categories, child_cat__in=categories)
app_list['news_site.categorydefaultimage'] = CategoryDefaultImage.objects.filter(category__in=categories)
app_list['news_site.source'] = sources
app_list['news_site.hashtag'] = hash_tags
app_list['news_site.userprofile'] = users
app_list['news_site.article'] = articles
app_list['news_site.articlemedia'] = ArticleMedia.objects.filter(article__in=articles)
app_list['news_site.submenu'] = submenus
app_list['news_site.menu'] = menus
app_list['news_site.devices'] = Devices.objects.filter(user__in=users)
app_list['news_site.trendingarticle'] = trendingarticles
app_list['news_site.dailydigest'] = dailydigest
for name, values in app_list.items():
data = serializers.serialize("json", values)
file_name = name + '.json'
f = open(file_name, 'w')
f.write(data)
f.close()
```
#### File: management/commands/ingest_data.py
```python
import os
import sys
import zlib
import pytz
import time
import redis
import cPickle
from random import randint
from datetime import datetime
from dateutil.parser import parse
from django.core.management.base import BaseCommand, CommandError
from core.models import *
from api.v1.serializers import ArticleSerializer
from core.utils import create_index, ingest_to_elastic
from core.classify import RegexClassification
from article_scoring import ArticleScore
# prometheus stats
from prometheus_client import start_http_server, Gauge, Enum
class Command(BaseCommand):
help = 'This command is used to ingest data from local disk cache'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.source_ingest = Gauge("total_ingest_count", "Total number of articles ingested", ['source', 'category'])
self.task_state = Enum("article_ingestion_state", "Article ingestion states", states=["running", "waiting"])
self.now = datetime.now(pytz.timezone("Asia/Kolkata")).strftime("%Y-%m-%d")
self.redis = redis.Redis()
self.batch = []
self.sleep_time = 0
self.classify = RegexClassification()
self.score = ArticleScore()
def reset_stats(self):
"""
this method is used to reset stats to zero
"""
print("Resetting Stats")
for metric in self.source_ingest._metrics.keys():
source, category = metric
self.source_ingest(source=source, category=category).set(0)
def add_arguments(self, parser):
parser.add_argument('--source', '-s', type=str, help='redis source name [Ex: theverge]')
parser.add_argument('--index', '-i', type=str, default='article', help='elastic search index name [default: article]')
def get_data_from_redis(self, source):
"""
this method returns data from redis
"""
return self.redis.lpop(source)
def parse_date(self, date_str):
try:
dt = parse(date_str)
return dt.astimezone(tz=pytz.UTC)
except Exception:
try:
ts = int(date_str)
return datetime.utcfromtimestamp(ts)
except Exception:
return None
def remove_char(self, tag, ch):
"""
this method removes given char from tag
"""
new_tag = [tag]
if ch in tag:
return tag.split(ch)
return new_tag
def remove_special_chars(self, tags):
"""
this method is used to remove special chars from tags
"""
new_tags = []
for tag in tags:
new_tags = new_tags + self.remove_char(tag, ";")
clean_tags = []
for tag in new_tags:
clean_tags = clean_tags + self.remove_char(tag, " & ")
final_tags = []
for tag in clean_tags:
final_tags = final_tags + self.remove_char(tag, " and ")
final_tags = [tag.replace("&", " ").replace(",", "").replace(":", "").replace("'", "").replace("#", "").replace("*", "").replace("(", "").replace(")", "").replace("@", "").replace("!", "").replace("-", " ").strip().lower() for tag in final_tags]
return final_tags
def get_tags(self, tags):
"""
this method will return tag name from tags objects
"""
tag_list = []
for tag in tags:
tag_list.append(tag["name"])
return tag_list
def create_model_obj(self, doc, index):
"""
this method is used to create django article model object
"""
title = doc["title"]
category = doc["category"]
source = doc["source"]
source_url = doc["source_url"]
cover_image = doc["cover_image"]
blurb = doc["blurb"]
full_text = doc.get("short_description") or doc.get("full_text", "")
published_on = self.parse_date(doc["published_on"])
if not published_on:
published_on = timezone.now()
author = doc.get("author", "")
author_twitter = doc.get("author_twitter", "")
video_data = doc.get("video_data", "")
images = doc["images"]
tags = doc["tags"]
if not cover_image:
if video_data:
cover_image = video_data[0].get("video_image", "")
if title and full_text:
if not Article.objects.filter(title=title).exists():
if category == "Uncategorised":
# apply regex based category only if article is uncategorised
# get category id from regex classfication
category_id = self.classify.match(title)
category = Category.objects.get(id=category_id)
else:
category = Category.objects.get(name=category)
source, _ = Source.objects.get_or_create(name=source)
article_obj = Article.objects.create(
title=title,
source=source,
category=category,
source_url=source_url,
cover_image=cover_image,
blurb=blurb,
full_text=full_text,
published_on=published_on,
active=True
)
if len(images) > 1:
for img in images:
_ = ArticleMedia.objects.create(
article=article_obj,
category="image",
url=img
)
if len(video_data) > 0:
for video_dic in video_data:
_ = ArticleMedia.objects.create(
article=article_obj,
category="video",
url=video_dic.get("video_image", ""),
video_url=video_dic.get("video_url", "")
)
if len(tags) > 0:
tag_objs = []
new_tags = self.remove_special_chars(tags)
if new_tags:
for tag in new_tags:
tag_obj = HashTag.objects.filter(name=tag)
if tag_obj:
tag_objs.append(tag_obj.first())
else:
tag_obj = HashTag.objects.create(name=tag)
tag_objs.append(tag_obj)
article_obj.hash_tags.add(*tag_objs)
# calculate article score
score = self.score.calculate_score(doc)
serializer = ArticleSerializer(article_obj)
json_data = serializer.data
json_data["article_score"] = score
if json_data["hash_tags"]:
tag_list = self.get_tags(json_data["hash_tags"])
json_data["hash_tags"] = tag_list
self.batch.append(json_data)
if len(self.batch) == 99:
ingest_to_elastic(self.batch, index, index, 'id')
self.batch = []
print("Ingesting Batch To Elastic...!!!")
def handle(self, *args, **options):
if options['source'] == None:
raise CommandError("Option `--source=...` must be specified.")
# start prometheus http server for metrics
start_http_server(8686)
source = options['source']
index = options['index']
create_index(index)
try:
while True:
file_path = self.get_data_from_redis(source)
if file_path:
date = datetime.now(pytz.timezone("Asia/Kolkata")).strftime("%Y-%m-%d")
self.task_state.state("running")
self.sleep_time = 0
if os.path.isfile(file_path):
doc = cPickle.loads(zlib.decompress(open(file_path).read()))
try:
self.create_model_obj(doc, index)
if date == self.now:
self.source_ingest.labels(source=doc.get("source", "source"), category=doc.get("category", "category")).inc()
else:
self.now = datetime.now(pytz.timezone("Asia/Kolkata")).strftime("%Y-%m-%d")
# self.reset_stats()
self.source_ingest.labels(source=doc.get("source", "source"), category=doc.get("category", "category")).inc()
except Exception as e:
print("error in doc read")
print(e)
else:
msg = "Data file not found: {0}".format(file_path)
print(msg)
else:
self.task_state.state("waiting")
print("Sleeping...!!!")
time.sleep(10)
self.sleep_time += 10
if self.sleep_time >= 60:
if self.batch:
ingest_to_elastic(self.batch, index, index, 'id')
print("Ingesting Final Batch...!!!")
self.batch = []
self.sleep_time = 0
except KeyboardInterrupt:
sys.exit(0)
``` |
{
"source": "jitendravarma/user_management",
"score": 2
} |
#### File: user_management/core/models.py
```python
from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractUser
# Create your models here.
class BaseUserProfile(AbstractUser):
"""
Model class for base user.
"""
address = models.CharField(max_length=255, blank=True, null=True)
phone_no = models.CharField(max_length=10, blank=True, null=True)
middle_name = models.CharField(max_length=255, blank=True, null=True)
email_verified = models.BooleanField(default=False)
class Meta:
verbose_name = "Base User Profile"
verbose_name_plural = "Base User Profiles"
@property
def full_name(self):
if self.last_name and self.first_name:
return "%s %s" % (self.first_name, self.last_name)
elif self.first_name:
return "%s " % (self.first_name)
else:
return "%s " % (self.email)
def __str__(self):
return f"{self.email}, is_active {self.is_active}"
class OTPVerification(models.Model):
otp = models.CharField(max_length=5)
created_on = models.DateTimeField(auto_now=True)
user = models.ForeignKey(BaseUserProfile, on_delete=models.CASCADE)
class Meta:
verbose_name = "OTP Verification"
verbose_name_plural = "OTP Verifications"
def __str__(self):
return f"{self.user.email}, is_active {self.otp}, id: {self.id}"
class VerificationLink(models.Model):
user = models.ForeignKey(BaseUserProfile, blank=False, null=False,
on_delete=models.CASCADE)
hash_key = models.TextField(blank=False, null=False)
created_on = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.user.email}"
class ForgotPasswordLink(models.Model):
user = models.ForeignKey(BaseUserProfile, blank=False, null=False,
on_delete=models.CASCADE)
hash_key = models.TextField(blank=False, null=False)
created_on = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.user.email}"
```
#### File: user_management/core/views.py
```python
import json
from django.urls import reverse
from django.contrib import messages
from django.utils.encoding import force_text
from django.contrib.auth import login, logout
from django.utils.http import urlsafe_base64_decode
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect, HttpResponse
from django.views.generic import RedirectView, FormView, TemplateView
from .backends import EmailModelBackend
from .forms import (LoginForm, SignUpForm, OTPForm, FPEmailForm,
ChangePasswordForm)
from .models import (BaseUserProfile, OTPVerification,
VerificationLink, ForgotPasswordLink)
from .tasks import create_forgot_password_link
# Create your views here.
class IndexView(LoginRequiredMixin, TemplateView):
"""
Home view for user after redirection
"""
template_name = 'frontend/dashboard.html'
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['dashboard_page'] = "active"
user = self.request.user
context['email_verified'] = user.email_verified
return context
class LoginView(FormView):
"""
This view handles authentication of the user, when they first time logs in
redirects them to login page if not authenticated.
"""
form_class = LoginForm
template_name = 'frontend/login.html'
def get_context_data(self, **kwargs):
context = super(LoginView, self).get_context_data(**kwargs)
return context
def post(self, request):
form = LoginForm(request.POST)
email = form.data['email']
password = form.data['password']
if form.is_valid():
user_auth = EmailModelBackend()
user = user_auth.validate_password(password=password, email=email)
if user:
otp_id = OTPVerification.objects.get(user=user).id
return HttpResponseRedirect(
reverse('phone-verification-view') + f"?otp_id={otp_id}")
context = {
'form': form,
"csrf_token": form.data['csrfmiddlewaretoken'], 'email': email
}
return render(
request, context=context, template_name=self.template_name)
class LogOutView(RedirectView):
"""
logout view
"""
def get_redirect_url(self):
url = reverse("login-view")
logout(self.request)
return url
class SignupView(FormView):
"""
This view signs up new user and validates the form on the server side
"""
form_class = SignUpForm
template_name = 'frontend/sign-up.html'
def post(self, request, *args, **kwargs):
form = SignUpForm(request.POST)
email = form.data['email']
password = form.data['password']
if form.is_valid():
user = form.save()
otp_id = OTPVerification.objects.get(user=user).id
return HttpResponseRedirect(
reverse('phone-verification-view') + f"?otp_id={otp_id}")
context = {
'form': form, "csrf_token": form.data['csrfmiddlewaretoken'],
'email': email
}
return render(
request, context=context, template_name=self.template_name)
class OTPView(FormView):
"""
This view handles otp verification
"""
form_class = OTPForm
template_name = 'frontend/otp.html'
def get_context_data(self, **kwargs):
context = super(OTPView, self).get_context_data(**kwargs)
otp_id = self.request.GET.get('otp_id')
context["otp_id"] = otp_id
get_object_or_404(OTPVerification, id=otp_id)
return context
def post(self, request, *args, **kwargs):
form = OTPForm(request.POST)
otp = form.data['otp']
otp_id = form.data['otp_id']
if not form.is_valid():
context = {
'form': form, "csrf_token": form.data['csrfmiddlewaretoken'],
'otp_id': otp_id}
return render(
request, context=context, template_name=self.template_name)
else:
otp_verified = get_object_or_404(OTPVerification, id=otp_id)
user_auth = EmailModelBackend()
user = user_auth.authenticate(_id=otp_id, otp=otp)
if user:
login(self.request, user)
if "next" in self.request.GET:
url = self.request.GET["next"]
response = HttpResponseRedirect(url)
return response
else:
response = HttpResponseRedirect('/home')
return response
else:
messages.error(self.request, "Incorrect OTP entered")
return HttpResponseRedirect(
reverse('phone-verification-view') + f"?otp_id={otp_id}")
class LinkExpireView(TemplateView):
"""
This view is to redirect user after confirming email
"""
template_name = 'frontend/link-expire.html'
class VerifyLinkView(TemplateView):
template_name = "frontend/verification.html"
def dispatch(self, request, *args, **kwargs):
if not VerificationLink.objects.filter(hash_key=self.kwargs["slug"]).exists():
return HttpResponseRedirect(reverse("link-expire-view"))
return super(VerifyLinkView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(VerifyLinkView, self).get_context_data(**kwargs)
slug = self.kwargs["slug"]
context["link"] = get_object_or_404(VerificationLink, hash_key=slug)
email = force_text(urlsafe_base64_decode(slug))
BaseUserProfile.objects.filter(email=user_id).update(
is_active=True, email_verified=True)
VerificationLink.objects.filter(hash_key=slug).delete()
return context
class ForgotPasswordView(FormView):
"""
This view confirms email for forgot password
"""
form_class = FPEmailForm
template_name = 'frontend/send-fp-mail.html'
def post(self, request, *args, **kwargs):
form = FPEmailForm(request.POST)
if not form.is_valid():
context = {
'form': form, "csrf_token": form.data['csrfmiddlewaretoken'], }
return render(
request, context=context, template_name=self.template_name)
email = form.data['email']
messages.error(self.request, "If your email exists in our database\
we will send your link to change your password")
create_forgot_password_link.delay(email)
return HttpResponseRedirect(reverse('forgot-password-view'))
class ForgotPasswordLinkView(FormView):
"""
This view confirms email for forgot password
"""
form_class = ChangePasswordForm
template_name = 'frontend/forgot-password.html'
def dispatch(self, request, *args, **kwargs):
if not ForgotPasswordLink.objects.filter(hash_key=self.kwargs["slug"]).exists():
return HttpResponseRedirect(reverse("link-expire-view"))
return super(ForgotPasswordLinkView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ForgotPasswordLinkView, self).get_context_data(**kwargs)
slug = self.kwargs["slug"]
get_object_or_404(ForgotPasswordLink, hash_key=slug)
return context
def post(self, request, *args, **kwargs):
form = ChangePasswordForm(request.POST)
if not form.is_valid():
context = {
'form': form, "csrf_token": form.data['csrfmiddlewaretoken'], }
return render(
request, context=context, template_name=self.template_name)
slug = self.kwargs["slug"]
email = force_text(urlsafe_base64_decode(slug))
if BaseUserProfile.objects.filter(email=email).exists():
user = BaseUserProfile.objects.filter(email=email).first()
user.set_password(form.data['password'])
user.save()
ForgotPasswordLink.objects.filter(hash_key=slug).delete()
messages.error(self.request, "We have updated your password")
return render(
request, template_name=self.template_name)
```
#### File: user_management/user_management/celery.py
```python
import os
from celery import Celery
from kombu import Exchange, Queue
from django.conf import settings
QUEUE_DEFAULT = 'default'
CELERY_ENABLE_UTC = True
CELERY_ACCEPT_CONTENT = ["pickle"]
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'user_management.settings.local')
app = Celery('user_management')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
app.conf.update(
CELERY_QUEUES=(
Queue(QUEUE_DEFAULT, Exchange('default'), routing_key='default'),
),
CELERY_IGNORE_RESULT=True,
CELERYD_PREFETCH_MULTIPLIER=1,
CELERY_DEFAULT_QUEUE=QUEUE_DEFAULT,
CELERY_DEFAULT_EXCHANGE_TYPE='direct',
CELERY_DEFAULT_ROUTING_KEY='default',
)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
``` |
{
"source": "JitengMu/A-SDF",
"score": 2
} |
#### File: A-SDF/asdf/asdf_reconstruct.py
```python
import argparse
import json
import logging
import os
import random
import time
import torch
from torch.nn import functional as F
import numpy as np
import re
import asdf
import asdf.workspace as ws
def sample_uniform_points_in_unit_sphere(amount):
unit_sphere_points = np.random.uniform(-1, 1, size=(amount * 2 + 20, 3))
unit_sphere_points = unit_sphere_points[np.linalg.norm(unit_sphere_points, axis=1) < 1]
points_available = unit_sphere_points.shape[0]
if points_available < amount:
# This is a fallback for the rare case that too few points are inside the unit sphere
result = np.zeros((amount, 3))
result[:points_available, :] = unit_sphere_points
result[points_available:, :] = sample_uniform_points_in_unit_sphere(amount - points_available)
return result
else:
return unit_sphere_points[:amount, :]
def generation_atc_config(specs, reconstruction_codes_dir, reconstruction_models_dir=False, test_time_training=False, sampling_difficulty='easy', dataset_name='shape2motion'):
'''
Modify atc_dic to the angles to test on.
'''
if specs["NumAtcParts"]==1:
if dataset_name=='rbo':
lat_vec_dic = {}
atc_vec_dic = {}
if test_time_training==True:
model_vec_dic = {}
names = {'0001_000091_-021':[-38,-59],'0002_000091_-064':[-43,-16],'0003_000064_-021':[-21,-22],'0004_000114_-008':[-17,-56],
'0005_000101_-054':[-44,-25],'0006_000091_-062':[-35,-18],'0007_000049_-019':[-36,-74],'0008_000094_-041':[-27,-31],
'0009_000085_-038':[-52,-64],'0010_000119_-052':[-64,-34],'0011_000197_-014':[-34,-51],'0012_000040_-051':[-10,-48],
'0013_000076_-021':[-10,-61],'0014_000100_-021':[-34,-47],'0016_000122_-030':[-56,-44],
'0017_000115_-010':[-36,-67],'0018_000142_-049':[-83,-30],'0019_000092_-052':[-71,-36],'0020_000106_-051':[-71,-37]}
for name in names.keys():
for trg in names[name]:
trg += 20 # compensate for the angle difference between shape2motion and rbo dataset
if test_time_training==True:
model_vec_dic[(name[:4], trg)] = os.path.join(os.path.join(reconstruction_models_dir, dataset_name, name+'.pth'))
lat_vec_dic[(name[:4], trg)] = torch.load(os.path.join(reconstruction_codes_dir, dataset_name, name+'.pth'))
atc_vec_dic[(name[:4], trg)] = np.array([trg])
else:
if specs["Class"]=='laptop':
test_instances = [66,67,68,69,70,72,74,75,78,80,82]
# angle range [-72,18]
if sampling_difficulty=='easy':
angles_mapping = {-72:[-63,-45,-27],-54:[-63,-45,9],-36:[-45,-27,9],-18:[-27,-9,-63],0:[-9,9,-27],18:[9,-9,-36]} # 6-angle dataset
elif sampling_difficulty=='hard':
angles_mapping = {-72:[-60,15,-48],-54:[-69,-39,3],-36:[-24,0,-57],-18:[-30,-42,-57],0:[15,-30,-42],18:[-12,-24,-30]}
else:
angles_mapping = {-36:np.arange(-72,19)} # 6-angle dataset animation
if specs["Class"]=='stapler' or specs["Class"]=='washing_machine' or specs["Class"]=='door' or specs["Class"]=='oven':
if specs["Class"]=='stapler':
test_instances = [24,25,26,27,28,29,30,31,32,33]
if specs["Class"]=='washing_machine':
test_instances = [5,26,27,34,37,42,43,60]
if specs["Class"]=='door':
test_instances = [10,27,38,59,86]
if specs["Class"]=='oven':
test_instances = [2,7,10,16,18,28,30,33,34,41]
# angle range [0,90]
if sampling_difficulty=='easy':
angles_mapping = {18:[9,27,81],36:[27,45,81],54:[45,63,9],72:[63,81,9],90:[81,63,9]} # 6-angle dataset
elif sampling_difficulty=='hard':
angles_mapping = {0:[39,60,24],18:[30,60,66],36:[69,84,21],54:[75,3,21],72:[33,12,66],90:[57,30,66]}
else:
angles_mapping = {54:np.arange(0,91)} # 6-angle dataset animation
lat_vec_dic = {}
atc_vec_dic = {}
if test_time_training==True:
model_vec_dic = {}
for test_ins in test_instances:
for start in angles_mapping.keys():
for trg in angles_mapping[start]:
if test_time_training==True:
model_vec_dic[(test_ins, start, trg)] = os.path.join(os.path.join(reconstruction_models_dir, dataset_name, '{:04d}art{:04d}.pth'.format(test_ins, start)))
lat_vec_dic[(test_ins, start, trg)] = torch.load(os.path.join(reconstruction_codes_dir, dataset_name, '{:04d}art{:04d}.pth'.format(test_ins, start)))
atc_vec_dic[(test_ins, start, trg)] = np.array([trg])
if specs["NumAtcParts"]==2:
angles_mapping = {(0,0):[20,20],(0,20):[20,30],(0,40):[20,20], \
(20,0):[30,20],(20,20):[10,30],(20,40):[10,20], \
(40,0):[20,20],(40,20):[20,10],(40,40):[20,20]}
if specs["Class"]=='eyeglasses':
test_instances = [34,35,36,37,38,39,40,41,43]
# eyeglasses angle range [0,50]
if sampling_difficulty=='easy':
# easy angles: aligned with interpolation
angles_mapping = {(0,0):[[25,15]],(0,10):[[25,25]],(0,20):[[15,35]],(0,30):[[25,15]],(0,40):[[15,25]],(0,50):[[25,35]], \
(10,0):[[15,25]],(10,10):[[5,25]],(10,20):[[25,35]],(10,30):[[25,15]],(10,40):[[25,25]],(10,50):[[25,25]], \
(20,0):[[35,25]],(20,10):[[15,35]],(20,20):[[35,15]],(20,30):[[15,15]],(20,40):[[35,25]],(20,50):[[35,25]], \
(30,0):[[25,25]],(30,10):[[15,25]],(30,20):[[15,35]],(30,30):[[25,35]],(30,40):[[15,45]],(30,50):[[25,25]], \
(40,0):[[25,5]],(40,10):[[45,15]],(40,20):[[25,35]],(40,30):[[25,15]],(40,40):[[45,35]],(40,50):[[35,35]], \
(50,0):[[35,15]],(50,10):[[25,25]],(50,20):[[35,35]],(50,30):[[45,15]],(50,40):[[35,25]],(50,50):[[45,45]]}
angles_mapping = angles_mapping
else:
angles_mapping = {}
angles_mapping[(40,20)] = []
for i in range(0,55,5):
for j in range(0,55,5):
angles_mapping[(40,20)].append([i,j])
print(angles_mapping)
if specs["Class"]=='refrigerator':
# refrigerator angle range [40,90]
test_instances = [6,17,27,46,61,65,78]
#test_instances = [61]
if sampling_difficulty=='easy':
angles_mapping = {(90,90):[[65,75]],(90,80):[[65,65]],(90,70):[[75,55]],(90,60):[[65,75]],(90,50):[[75,65]],(90,40):[[65,55]], \
(80,90):[[75,65]],(80,80):[[85,65]],(80,70):[[65,55]],(80,60):[[65,75]],(80,50):[[65,65]],(80,40):[[65,65]], \
(70,90):[[55,65]],(70,80):[[75,55]],(70,70):[[55,75]],(70,60):[[75,75]],(70,50):[[55,65]],(70,40):[[55,65]], \
(60,90):[[65,65]],(60,80):[[75,65]],(60,70):[[75,55]],(60,60):[[65,55]],(60,50):[[75,45]],(60,40):[[65,65]], \
(50,90):[[65,85]],(50,80):[[45,75]],(50,70):[[65,55]],(50,60):[[65,75]],(50,50):[[45,65]],(50,40):[[55,55]], \
(40,90):[[55,75]],(40,80):[[65,65]],(40,70):[[55,55]],(40,60):[[55,75]],(40,50):[[55,65]],(40,40):[[45,45]]}
else:
angles_mapping = {}
angles_mapping[(90,70)] = []
for i in range(90,35,-5):
for j in range(90,35,-5):
angles_mapping[(90,70)].append([i,j])
print(angles_mapping)
lat_vec_dic = {}
atc_vec_dic = {}
if test_time_training==True:
model_vec_dic = {}
for test_ins in test_instances:
for start in angles_mapping.keys():
for trg in angles_mapping[start]:
if test_time_training==True:
model_vec_dic[(test_ins, start[0], start[1], trg[0], trg[1])] = os.path.join(os.path.join(reconstruction_models_dir, dataset_name, '{:04d}art{:04d}{:04d}.pth'.format(test_ins, start[0], start[1])))
lat_vec_dic[(test_ins, start[0], start[1], trg[0], trg[1])] = torch.load(
os.path.join(reconstruction_codes_dir, dataset_name, '{:04d}art{:04d}{:04d}.pth'.format(test_ins, start[0], start[1]))
)
atc_vec_dic[(test_ins, start[0], start[1], trg[0], trg[1])] = np.array([trg])
if test_time_training==True:
return lat_vec_dic, atc_vec_dic, model_vec_dic
else:
return lat_vec_dic, atc_vec_dic
def interpolation_atc_config(specs, reconstruction_codes_dir, dataset_name='shape2motion'):
'''
Modify atc_dic to the angles to test on.
'''
if specs["NumAtcParts"]==1:
if specs["Class"]=='laptop':
test_instances = [66,67,68,69,70,72,74,75,78,80,82]
angles_mapping = {(-72,18):[13/30,17/30,23/30],(-54,0):[2/18,4/18,5/18],(-36,18):[5/18,7/18,11/18],(18,-54):[17/24,15/24,7/24]}
# # extrapolation
# angles_mapping = {(-54,-72):[1/6,2/6,3/6,4/6], (0,18):[1/6,2/6,3/6,4/6]}
if specs["Class"]=='stapler' or specs["Class"]=='washing_machine' or specs["Class"]=='oven' or specs["Class"]=='door':
if specs["Class"]=='stapler':
test_instances = [24,25,26,27,28,29,30,31,32,33]
if specs["Class"]=='washing_machine':
test_instances = [5,26,27,34,37,42,43,60]
if specs["Class"]=='door':
test_instances = [10,27,38,59,86]
if specs["Class"]=='oven':
test_instances = [2,7,10,16,18,28,30,33,34,41]
angles_mapping = {(90,0):[13/30,17/30,23/30],(54,0):[2/18,4/18,5/18],(36,90):[5/18,7/18,11/18],(18,90):[17/24,15/24,7/24]}
lat_vec_dic = {}
atc_vec_dic = {}
for test_ins in test_instances:
for (start, end) in angles_mapping.keys():
for factor in angles_mapping[(start,end)]:
trg = int(factor * (end-start) + start)
#trg = int(factor * (end-start) + end) # extra
lat_start = torch.load(os.path.join(reconstruction_codes_dir, dataset_name, '{:04d}art{:04d}.pth'.format(test_ins, start)))
lat_end = torch.load(os.path.join(reconstruction_codes_dir, dataset_name, '{:04d}art{:04d}.pth'.format(test_ins, end)))
#lat_vec_dic[(test_ins, start, trg)] = factor * (lat_end - lat_start) + lat_start
lat_vec_dic[(test_ins, start, trg)] = lat_start
#code_dic[(test_ins, start, trg)] = factor * (lat_end - lat_start) + lat_end # extra
if specs["Articulation"]==True:
atc_start = np.load(os.path.join(reconstruction_codes_dir, dataset_name, '{:04d}art{:04d}.npy'.format(test_ins, start)))
atc_end = np.load(os.path.join(reconstruction_codes_dir, dataset_name, '{:04d}art{:04d}.npy'.format(test_ins, end)))
atc_vec_dic[(test_ins, start, trg)] = factor * (atc_end - atc_start) + atc_start
#atc_dic[(test_ins, start, trg)] = factor * (atc_end - atc_start) + atc_end # extra
if specs["NumAtcParts"]==2:
angle_mapping = {(0,0):[40,40],(0,20):[40,40],(0,40):[40,0], \
(20,0):[40,40],(20,20):[0,40],(20,40):[0,0], \
(40,0):[0,40],(40,20):[0,0],(40,40):[0,0]}
if specs["Class"]=='eyeglasses':
# angles_aligned
test_instances = [34,35,36,37,38,39,40,41,43]
if specs["Class"]=='refrigerator':
# angles_aligned
test_instances = [6,17,27,46,61,65,78]
angle_mapping_90 = {}
for (i,j) in angle_mapping.keys():
angle_mapping_90[(90-i,90-j)] = [90-angle_mapping[(i,j)][0], 90-angle_mapping[(i,j)][1]]
angle_mapping = angle_mapping_90
lat_vec_dic = {}
atc_vec_dic = {}
for test_ins in test_instances:
for k in angle_mapping.keys():
start = list(k)
end = angle_mapping[k]
trg = (np.array(list(start)) + np.array(end))//2
lat_start = torch.load(os.path.join(reconstruction_codes_dir, dataset_name, '{:04d}art{:04d}{:04d}.pth'.format(test_ins, start[0], start[1])))
lat_end = torch.load(os.path.join(reconstruction_codes_dir, dataset_name, '{:04d}art{:04d}{:04d}.pth'.format(test_ins, end[0], end[1])))
#lat_vec_dic[(test_ins, start[0], start[1], trg[0], trg[1])] = (lat_end + lat_start)/2
lat_vec_dic[(test_ins, start[0], start[1], trg[0], trg[1])] = lat_start
if specs["Articulation"]==True:
atc_start = np.load(os.path.join(reconstruction_codes_dir, dataset_name, '{:04d}art{:04d}{:04d}.npy'.format(test_ins, start[0], start[1])))
atc_end = np.load(os.path.join(reconstruction_codes_dir, dataset_name, '{:04d}art{:04d}{:04d}.npy'.format(test_ins, end[0], end[1])))
atc_vec_dic[(test_ins, start[0], start[1], trg[0], trg[1])] = (atc_end + atc_start)/2
return lat_vec_dic, atc_vec_dic
def reconstruct(
decoder,
num_iterations,
latent_size,
test_sdf,
clamp_dist,
num_samples=8000,
lr=5e-3,
l2reg=True,
articulation=True,
specs=None,
infer_with_gt_atc=False,
num_atc_parts=1,
do_sup_with_part=False,
):
def adjust_learning_rate(
initial_lr, optimizer, num_iterations, decreased_by, adjust_lr_every
):
#lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every))
if num_iterations<adjust_lr_every*2:
lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every))
else:
lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every - 2))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
decreased_by = 10
adjust_lr_every = int(num_iterations / 2)
# init latent code optimizer
latent_rand = torch.ones(1, latent_size).normal_(mean=0, std=0.01).cuda()
lat_vec = latent_rand.clone()
lat_vec.requires_grad = True
lat_optimizer = torch.optim.Adam([lat_vec], lr=lr)
# init articulation code optimizer
if infer_with_gt_atc==False:
if num_atc_parts==1:
if specs["Class"]=='laptop':
atc_vec = torch.Tensor([-30]).view(1,1).cuda()
elif specs["Class"]=='stapler' or specs["Class"]=='washing_machine' or specs["Class"]=='door' or specs["Class"]=='oven':
atc_vec = torch.Tensor([45]).view(1,1).cuda()
else:
raise Exception("Undefined classes")
if num_atc_parts==2:
if specs["Class"]=='eyeglasses':
atc_vec = torch.Tensor([25, 25]).view(1,2).cuda()
elif specs["Class"]=='refrigerator':
atc_vec = torch.Tensor([75, 75]).view(1,2).cuda()
else:
raise Exception("Undefined classes")
atc_vec.requires_grad = True
atc_optimizer = torch.optim.Adam([atc_vec], lr=lr*1000)
loss_num = 0
loss_l1 = torch.nn.L1Loss()
decoder.eval()
# two-step optimization
for e in range(num_iterations*2):
# re-initialize lat_vec
if e==num_iterations:
lat_vec = latent_rand.clone()
lat_vec.requires_grad = True
lat_optimizer = torch.optim.Adam([lat_vec], lr=lr)
sdf_data = asdf.data.unpack_sdf_samples_from_ram(
test_sdf, num_samples, articulation,
)
if articulation==True:
xyz = sdf_data[0][:, 0:3].float().cuda()
sdf_gt = sdf_data[0][:, 3].unsqueeze(1).cuda()
part_gt = sdf_data[0][:, 4].unsqueeze(1).long().cuda()
if infer_with_gt_atc:
atc_vecs = sdf_data[1].view(1,num_atc_parts).expand(xyz.shape[0],num_atc_parts).cuda()
else:
atc_vecs = atc_vec.expand(xyz.shape[0],num_atc_parts).cuda()
else:
xyz = sdf_data[:, 0:3].float().cuda()
sdf_gt = sdf_data[:, 3].unsqueeze(1).cuda()
sdf_gt = torch.clamp(sdf_gt, -clamp_dist, clamp_dist)
adjust_learning_rate(lr, lat_optimizer, e, decreased_by, adjust_lr_every)
if infer_with_gt_atc==False:
adjust_learning_rate(lr*1000, atc_optimizer, e, decreased_by, adjust_lr_every)
lat_optimizer.zero_grad()
if infer_with_gt_atc==False:
atc_optimizer.zero_grad()
lat_vecs = lat_vec.expand(num_samples, -1)
if articulation==True:
inputs = torch.cat([lat_vecs, xyz, atc_vecs], 1).cuda()
else:
inputs = torch.cat([lat_vecs, xyz], 1).cuda()
if do_sup_with_part:
pred_sdf, pred_part = decoder(inputs)
else:
pred_sdf = decoder(inputs)
pred_sdf = torch.clamp(pred_sdf, -clamp_dist, clamp_dist)
loss = loss_l1(pred_sdf, sdf_gt)
if l2reg:
loss += 1e-4 * torch.mean(lat_vec.pow(2))
if do_sup_with_part:
loss += 1e-3 * F.cross_entropy(pred_part, part_gt.view(-1).cuda())
loss.backward()
lat_optimizer.step()
if infer_with_gt_atc==False and e<num_iterations:
atc_optimizer.step()
loss_num = loss.cpu().data.numpy()
#pos_mask = (torch.sign(pred_sdf)!=torch.sign(sdf_gt)).data & (sdf_gt>0).data
#neg_mask = (torch.sign(pred_sdf)!=torch.sign(sdf_gt)).data & (sdf_gt<0).data
#print(torch.sum(pos_mask), torch.sum(neg_mask))
if articulation==True:
if infer_with_gt_atc:
return loss_num, None, lat_vec, sdf_data[1].view(1,-1)
else:
# computer angle pred acc
atc_err = torch.mean(torch.abs(atc_vec.detach() - sdf_data[1].cuda())).cpu().data.numpy()
print(atc_vec)
return loss_num, atc_err, lat_vec, atc_vec
else:
return loss_num, lat_vec
def reconstruct_ttt(
decoder,
num_iterations,
latent_size,
test_sdf,
clamp_dist,
num_samples=8000,
lr=5e-3,
l2reg=True,
articulation=True,
specs=None,
infer_with_gt_atc=False,
num_atc_parts=1,
do_sup_with_part=False,
):
def adjust_learning_rate(
initial_lr, optimizer, num_iterations, decreased_by, adjust_lr_every
):
if num_iterations<adjust_lr_every*2:
lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every))
elif num_iterations<adjust_lr_every*4:
lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every - 2))
else:
lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every - 4))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
decreased_by = 10
adjust_lr_every = int(num_iterations / 2)
# init latent code optimizer
latent_rand = torch.ones(1, latent_size).normal_(mean=0, std=0.01).cuda()
lat_vec = latent_rand.clone()
lat_vec.requires_grad = True
lat_optimizer = torch.optim.Adam([lat_vec], lr=lr)
opt_params = []
for idx, (name, param) in enumerate(decoder.named_parameters()):
n = re.split('\.', name)
if n[1] in ['lin0', 'lin1', 'lin2', 'lin3']:
opt_params.append(param)
decoder_optimizer = torch.optim.Adam([{'params': filter(lambda p: p.requires_grad, opt_params)}], lr=0.01*lr)
# init articulation code optimizer
if infer_with_gt_atc==False:
if num_atc_parts==1:
if specs["Class"]=='laptop':
atc_vec = torch.Tensor([-30]).view(1,1).cuda()
elif specs["Class"]=='stapler' or specs["Class"]=='washing_machine' or specs["Class"]=='door' or specs["Class"]=='oven':
atc_vec = torch.Tensor([45]).view(1,1).cuda()
else:
raise Exception("Undefined classes")
if num_atc_parts==2:
if specs["Class"]=='eyeglasses':
atc_vec = torch.Tensor([25, 25]).view(1,2).cuda()
elif specs["Class"]=='refrigerator':
atc_vec = torch.Tensor([75, 75]).view(1,2).cuda()
else:
raise Exception("Undefined classes")
atc_vec.requires_grad = True
atc_optimizer = torch.optim.Adam([atc_vec], lr=1000*lr)
loss_num = 0
loss_l1 = torch.nn.L1Loss()
decoder.eval()
# two-step optimization
for e in range(num_iterations*3):
# re-initialize lat_vec
if e==num_iterations:
lat_vec = latent_rand.clone()
lat_vec.requires_grad = True
lat_optimizer = torch.optim.Adam([lat_vec], lr=lr)
sdf_data = asdf.data.unpack_sdf_samples_from_ram(
test_sdf, num_samples, articulation,
)
if articulation==True:
xyz = sdf_data[0][:, 0:3].float().cuda()
sdf_gt = sdf_data[0][:, 3].unsqueeze(1).cuda()
part_gt = sdf_data[0][:, 4].unsqueeze(1).long().cuda()
if infer_with_gt_atc:
atc_vecs = sdf_data[1].view(1,num_atc_parts).expand(xyz.shape[0],num_atc_parts).cuda()
else:
atc_vecs = atc_vec.expand(xyz.shape[0],num_atc_parts).cuda()
else:
xyz = sdf_data[:, 0:3].float().cuda()
sdf_gt = sdf_data[:, 3].unsqueeze(1).cuda()
sdf_gt = torch.clamp(sdf_gt, -clamp_dist, clamp_dist)
adjust_learning_rate(lr, lat_optimizer, e, decreased_by, adjust_lr_every)
adjust_learning_rate(0.01*lr, decoder_optimizer, e, decreased_by, adjust_lr_every)
if infer_with_gt_atc==False:
adjust_learning_rate(1000*lr, atc_optimizer, e, decreased_by, adjust_lr_every)
lat_optimizer.zero_grad()
decoder_optimizer.zero_grad()
if infer_with_gt_atc==False:
atc_optimizer.zero_grad()
lat_vecs = lat_vec.expand(num_samples, -1)
if articulation==True:
inputs = torch.cat([lat_vecs, xyz, atc_vecs], 1).cuda()
else:
inputs = torch.cat([lat_vecs, xyz], 1).cuda()
if do_sup_with_part:
pred_sdf, pred_part = decoder(inputs)
else:
pred_sdf = decoder(inputs)
pred_sdf = torch.clamp(pred_sdf, -clamp_dist, clamp_dist)
loss = loss_l1(pred_sdf, sdf_gt)
if l2reg:
loss += 1e-4 * torch.mean(lat_vec.pow(2))
if do_sup_with_part:
loss += 1e-3 * F.cross_entropy(pred_part, part_gt.view(-1).cuda())
loss.backward()
#lat_optimizer.step()
if infer_with_gt_atc==False and e<num_iterations:
atc_optimizer.step()
lat_optimizer.step()
elif e<num_iterations*2:
lat_optimizer.step()
else:
decoder_optimizer.step()
loss_num = loss.cpu().data.numpy()
#pos_mask = (torch.sign(pred_sdf)!=torch.sign(sdf_gt)).data & (sdf_gt>0).data
#neg_mask = (torch.sign(pred_sdf)!=torch.sign(sdf_gt)).data & (sdf_gt<0).data
#print(torch.sum(pos_mask), torch.sum(neg_mask))
if articulation==True:
if infer_with_gt_atc:
return loss_num, None, lat_vec, sdf_data[1].view(1,-1)
else:
# computer angle pred acc
atc_err = torch.mean(torch.abs(atc_vec.detach() - sdf_data[1].cuda())).cpu().data.numpy()
print(atc_vec)
return loss_num, atc_err, lat_vec, atc_vec
else:
return loss_num, lat_vec
def reconstruct_testset(args, ws, specs, decoder, npz_filenames, saved_model_epoch, dataset_name):
# build saving directory
reconstruction_dir = os.path.join(
args.experiment_directory, ws.recon_testset_subdir, str(saved_model_epoch)
)
if not os.path.isdir(reconstruction_dir):
os.makedirs(reconstruction_dir)
reconstruction_meshes_dir = os.path.join(
reconstruction_dir, ws.reconstruction_meshes_subdir
)
if not os.path.isdir(reconstruction_meshes_dir):
os.makedirs(reconstruction_meshes_dir)
reconstruction_codes_dir = os.path.join(
reconstruction_dir, ws.reconstruction_codes_subdir
)
if not os.path.isdir(reconstruction_codes_dir):
os.makedirs(reconstruction_codes_dir)
err_sum = 0.0
atc_err_sum = 0.0
save_latvec_only = False
# generate meshes
for ii, npz in enumerate(npz_filenames):
if "npz" not in npz:
continue
full_filename = os.path.join(args.data_source, ws.sdf_samples_subdir, npz)
if dataset_name=='rbo':
data_sdf = asdf.data.read_sdf_samples_into_ram_rbo(full_filename, articulation=specs["Articulation"], num_atc_parts=specs["NumAtcParts"])
else:
data_sdf = asdf.data.read_sdf_samples_into_ram(full_filename, articulation=specs["Articulation"], num_atc_parts=specs["NumAtcParts"])
#dataset_name = re.split('/', npz)[-3]
npz_name = re.split('/', npz)[-1][:-4]
mesh_filename = os.path.join(reconstruction_meshes_dir, dataset_name, npz_name)
latent_filename = os.path.join(reconstruction_codes_dir, dataset_name, npz_name + ".pth")
if (
args.skip
and os.path.isfile(mesh_filename + ".ply")
and os.path.isfile(latent_filename)
):
continue
logging.info("reconstructing {}".format(npz))
if specs["Articulation"]==True:
data_sdf[0][0] = data_sdf[0][0][torch.randperm(data_sdf[0][0].shape[0])]
data_sdf[0][1] = data_sdf[0][1][torch.randperm(data_sdf[0][1].shape[0])]
else:
data_sdf[0] = data_sdf[0][torch.randperm(data_sdf[0].shape[0])]
data_sdf[1] = data_sdf[1][torch.randperm(data_sdf[1].shape[0])]
start = time.time()
if specs["Articulation"]==True:
err, atc_err, lat_vec, atc_vec = reconstruct(
decoder,
int(args.iterations),
specs["CodeLength"],
data_sdf,
specs["ClampingDistance"],
num_samples=8000,
lr=5e-3,
l2reg=True,
articulation=specs["Articulation"],
specs=specs,
infer_with_gt_atc=args.infer_with_gt_atc,
num_atc_parts=specs["NumAtcParts"],
do_sup_with_part=specs["TrainWithParts"],
)
else:
err, lat_vec = reconstruct(
decoder,
int(args.iterations),
specs["CodeLength"],
data_sdf,
specs["ClampingDistance"],
num_samples=8000,
lr=5e-3,
l2reg=True,
articulation=specs["Articulation"],
specs=specs,
infer_with_gt_atc=True,
num_atc_parts=specs["NumAtcParts"],
do_sup_with_part=False,
)
atc_vec = None
if specs["Articulation"]==True and args.infer_with_gt_atc==False:
print("err: ", err, "atc_err: ", atc_err)
atc_err_sum += atc_err
err_sum += err
print("err avg: ", err_sum/(ii+1), "atc_err avg: ", atc_err_sum/(ii+1))
else:
err_sum += err
print("err avg: ", err_sum/(ii+1))
if not os.path.exists(os.path.dirname(mesh_filename)):
os.makedirs(os.path.dirname(mesh_filename))
if not save_latvec_only:
start = time.time()
with torch.no_grad():
asdf.mesh.create_mesh(
decoder, lat_vec, mesh_filename, N=256, max_batch=int(2 ** 18), atc_vec=atc_vec, do_sup_with_part=specs["TrainWithParts"], specs=specs,
)
logging.info("total time: {}".format(time.time() - start))
if not os.path.exists(os.path.dirname(latent_filename)):
os.makedirs(os.path.dirname(latent_filename))
torch.save(lat_vec.unsqueeze(0), latent_filename)
if specs["Articulation"]==True:
print("save atc npy: ", latent_filename[:-4]+'.npy', atc_vec.detach().cpu().numpy())
with open(latent_filename[:-4]+'.npy', 'wb') as f:
np.save(f, atc_vec.detach().cpu().numpy())
def reconstruct_testset_ttt(args, ws, specs, decoder, npz_filenames, saved_model_state, dataset_name):
# build saving directory
reconstruction_dir = os.path.join(
args.experiment_directory, ws.recon_testset_ttt_subdir, str(saved_model_state["epoch"])
)
if not os.path.isdir(reconstruction_dir):
os.makedirs(reconstruction_dir)
reconstruction_meshes_dir = os.path.join(
reconstruction_dir, ws.reconstruction_meshes_subdir
)
if not os.path.isdir(reconstruction_meshes_dir):
os.makedirs(reconstruction_meshes_dir)
reconstruction_codes_dir = os.path.join(
reconstruction_dir, ws.reconstruction_codes_subdir
)
if not os.path.isdir(reconstruction_codes_dir):
os.makedirs(reconstruction_codes_dir)
reconstruction_models_dir = os.path.join(
reconstruction_dir, ws.reconstruction_models_subdir
)
if not os.path.isdir(reconstruction_models_dir):
os.makedirs(reconstruction_models_dir)
err_sum = 0.0
atc_err_sum = 0.0
save_latvec_only = False
# generate meshes
for ii, npz in enumerate(npz_filenames):
if "npz" not in npz:
continue
decoder.load_state_dict(saved_model_state["model_state_dict"])
full_filename = os.path.join(args.data_source, ws.sdf_samples_subdir, npz)
if dataset_name=='rbo':
data_sdf = asdf.data.read_sdf_samples_into_ram_rbo(full_filename, articulation=specs["Articulation"], num_atc_parts=specs["NumAtcParts"])
else:
data_sdf = asdf.data.read_sdf_samples_into_ram(full_filename, articulation=specs["Articulation"], num_atc_parts=specs["NumAtcParts"])
#dataset_name = re.split('/', npz)[-3]
npz_name = re.split('/', npz)[-1][:-4]
mesh_filename = os.path.join(reconstruction_meshes_dir, dataset_name, npz_name)
latent_filename = os.path.join(reconstruction_codes_dir, dataset_name, npz_name + ".pth")
model_filename = os.path.join(reconstruction_models_dir, dataset_name, npz_name + ".pth")
if (
args.skip
and os.path.isfile(mesh_filename + ".ply")
and os.path.isfile(latent_filename)
):
continue
logging.info("reconstructing {}".format(npz))
if specs["Articulation"]==True:
data_sdf[0][0] = data_sdf[0][0][torch.randperm(data_sdf[0][0].shape[0])]
data_sdf[0][1] = data_sdf[0][1][torch.randperm(data_sdf[0][1].shape[0])]
else:
data_sdf[0] = data_sdf[0][torch.randperm(data_sdf[0].shape[0])]
data_sdf[1] = data_sdf[1][torch.randperm(data_sdf[1].shape[0])]
start = time.time()
if specs["Articulation"]==True:
err, atc_err, lat_vec, atc_vec = reconstruct_ttt(
decoder,
int(args.iterations),
specs["CodeLength"],
data_sdf,
specs["ClampingDistance"],
num_samples=8000,
lr=5e-3,
l2reg=True,
articulation=specs["Articulation"],
specs=specs,
infer_with_gt_atc=args.infer_with_gt_atc,
num_atc_parts=specs["NumAtcParts"],
do_sup_with_part=specs["TrainWithParts"],
)
else:
err, lat_vec = reconstruct_ttt(
decoder,
int(args.iterations),
specs["CodeLength"],
data_sdf,
specs["ClampingDistance"],
num_samples=8000,
lr=5e-3,
l2reg=True,
articulation=specs["Articulation"],
specs=specs,
infer_with_gt_atc=True,
num_atc_parts=specs["NumAtcParts"],
do_sup_with_part=False,
)
atc_vec = None
if specs["Articulation"]==True and args.infer_with_gt_atc==False:
print("err: ", err, "atc_err: ", atc_err)
atc_err_sum += atc_err
err_sum += err
print("err avg: ", err_sum/(ii+1), "atc_err avg: ", atc_err_sum/(ii+1))
else:
err_sum += err
print("err avg: ", err_sum/(ii+1))
if not os.path.exists(os.path.dirname(mesh_filename)):
os.makedirs(os.path.dirname(mesh_filename))
if not save_latvec_only:
start = time.time()
with torch.no_grad():
asdf.mesh.create_mesh(
decoder, lat_vec, mesh_filename, N=256, max_batch=int(2 ** 18), atc_vec=atc_vec, do_sup_with_part=specs["TrainWithParts"], specs=specs,
)
logging.info("total time: {}".format(time.time() - start))
if not os.path.exists(os.path.dirname(latent_filename)):
os.makedirs(os.path.dirname(latent_filename))
if not os.path.exists(os.path.dirname(model_filename)):
os.makedirs(os.path.dirname(model_filename))
torch.save(lat_vec.unsqueeze(0), latent_filename)
torch.save(decoder.state_dict(), model_filename)
if specs["Articulation"]==True:
print("save atc npy: ", latent_filename[:-4]+'.npy', atc_vec.detach().cpu().numpy())
with open(latent_filename[:-4]+'.npy', 'wb') as f:
np.save(f, atc_vec.detach().cpu().numpy())
def generation(args, ws, specs, decoder, reconstruction_codes_dir, saved_model_epoch, dataset_name='shape2motion'):
# build saving directory
generation_dir = os.path.join(
args.experiment_directory, ws.generation_subdir, str(saved_model_epoch)
)
if not os.path.isdir(generation_dir):
os.makedirs(generation_dir)
gen_meshes_dir = os.path.join(
generation_dir, ws.reconstruction_meshes_subdir
)
if not os.path.isdir(gen_meshes_dir):
os.makedirs(gen_meshes_dir)
save_latvec_only = False
num_atc_parts = specs["NumAtcParts"]
lat_vec_dic, atc_vec_dic = generation_atc_config(specs, reconstruction_codes_dir, dataset_name=dataset_name)
for ii, k in enumerate(lat_vec_dic.keys()):
lat_vec = lat_vec_dic[k].view(1, specs["CodeLength"])
atc_vec = torch.Tensor(atc_vec_dic[k]).float().view(1,num_atc_parts)
if num_atc_parts==1:
if dataset_name=='rbo':
mesh_filename = os.path.join(gen_meshes_dir, dataset_name, '{}_{:04d}'.format(k[0],int(k[1])))
else:
mesh_filename = os.path.join(gen_meshes_dir, dataset_name, '{:04d}art{:04d}_lat_from{:04d}to{:04d}'.format(k[0],k[1],k[1],k[2]))
if num_atc_parts==2:
mesh_filename = os.path.join(gen_meshes_dir, dataset_name, '{:04d}art{:04d}{:04d}_lat_from{:04d}{:04d}to{:04d}{:04d}'.format(k[0],k[1],k[2],k[1],k[2],k[3],k[4]))
if (
args.skip
and os.path.isfile(mesh_filename + ".ply")
):
continue
if not os.path.exists(os.path.dirname(mesh_filename)):
os.makedirs(os.path.dirname(mesh_filename))
logging.info("reconstructing {}".format(mesh_filename))
if not save_latvec_only:
start = time.time()
with torch.no_grad():
asdf.mesh.create_mesh(
decoder, lat_vec, mesh_filename, N=256, max_batch=int(2 ** 18), atc_vec=atc_vec, do_sup_with_part=specs["TrainWithParts"], specs=specs,
)
logging.info("total time: {}".format(time.time() - start))
if specs["TrainWithParts"]==True:
xyz = sample_uniform_points_in_unit_sphere(30000) * 0.75
xyz = torch.Tensor(xyz).cuda()
if num_atc_parts==1:
atc_vec = atc_vec.view(1,1).expand(xyz.shape[0],1).cuda()
if num_atc_parts==2:
atc_vec = atc_vec.view(1,2).expand(xyz.shape[0],2).cuda()
lat_vec = lat_vec.detach().expand(xyz.shape[0], -1)
inputs = torch.cat([lat_vec, xyz, atc_vec], 1).cuda()
sdf, pred_part = decoder(inputs)
_, part_pred = pred_part.topk(1, 1, True, True)
part_pred = part_pred.detach()
sdf = sdf.detach()
pos = np.concatenate([xyz[sdf.view(-1)>0].cpu().numpy(), sdf[sdf.view(-1)>0].cpu().numpy().reshape(-1,1), part_pred[sdf.view(-1)>0].cpu().numpy().reshape(-1,1)], axis=1)
neg = np.concatenate([xyz[sdf.view(-1)<0].cpu().numpy(), sdf[sdf.view(-1)<0].cpu().numpy().reshape(-1,1), part_pred[sdf.view(-1)<0].cpu().numpy().reshape(-1,1)], axis=1)
np.savez(mesh_filename+'.npz', pos=pos, neg=neg)
def generation_ttt(args, ws, specs, decoder, reconstruction_codes_dir, reconstruction_models_dir, saved_model_epoch, dataset_name='shape2motion'):
# build saving directory
generation_dir = os.path.join(
args.experiment_directory, ws.generation_ttt_subdir, str(saved_model_epoch)
)
if not os.path.isdir(generation_dir):
os.makedirs(generation_dir)
gen_meshes_dir = os.path.join(
generation_dir, ws.reconstruction_meshes_subdir
)
if not os.path.isdir(gen_meshes_dir):
os.makedirs(gen_meshes_dir)
save_latvec_only = False
num_atc_parts = specs["NumAtcParts"]
lat_vec_dic, atc_vec_dic, model_vec_dic = generation_atc_config(specs, reconstruction_codes_dir, reconstruction_models_dir, test_time_training=True, dataset_name=dataset_name)
for ii, k in enumerate(lat_vec_dic.keys()):
lat_vec = lat_vec_dic[k].view(1,specs["CodeLength"])
atc_vec = torch.Tensor(atc_vec_dic[k]).float().view(1,num_atc_parts)
saved_model_state = torch.load(model_vec_dic[k])
decoder.load_state_dict(saved_model_state)
if num_atc_parts==1:
if dataset_name=='rbo':
mesh_filename = os.path.join(gen_meshes_dir, dataset_name, '{}_{:04d}'.format(k[0],int(k[1]))) # real data
else:
mesh_filename = os.path.join(gen_meshes_dir, dataset_name, '{:04d}art{:04d}_lat_from{:04d}to{:04d}'.format(k[0],k[1],k[1],k[2]))
if num_atc_parts==2:
mesh_filename = os.path.join(gen_meshes_dir, dataset_name, '{:04d}art{:04d}{:04d}_lat_from{:04d}{:04d}to{:04d}{:04d}'.format(k[0],k[1],k[2],k[1],k[2],k[3],k[4]))
if (
args.skip
and os.path.isfile(mesh_filename + ".ply")
):
continue
if not os.path.exists(os.path.dirname(mesh_filename)):
os.makedirs(os.path.dirname(mesh_filename))
logging.info("reconstructing {}".format(mesh_filename))
if not save_latvec_only:
start = time.time()
with torch.no_grad():
asdf.mesh.create_mesh(
decoder, lat_vec, mesh_filename, N=256, max_batch=int(2 ** 18), atc_vec=atc_vec, do_sup_with_part=specs["TrainWithParts"], specs=specs,
)
logging.info("total time: {}".format(time.time() - start))
if specs["TrainWithParts"]==True:
xyz = sample_uniform_points_in_unit_sphere(30000) * 0.75
xyz = torch.Tensor(xyz).cuda()
if num_atc_parts==1:
atc_vec = atc_vec.view(1,1).expand(xyz.shape[0],1).cuda()
if num_atc_parts==2:
atc_vec = atc_vec.view(1,2).expand(xyz.shape[0],2).cuda()
lat_vec = lat_vec.detach().expand(xyz.shape[0], -1)
inputs = torch.cat([lat_vec, xyz, atc_vec], 1).cuda()
sdf, pred_part = decoder(inputs)
_, part_pred = pred_part.topk(1, 1, True, True)
part_pred = part_pred.detach()
sdf = sdf.detach()
pos = np.concatenate([xyz[sdf.view(-1)>0].cpu().numpy(), sdf[sdf.view(-1)>0].cpu().numpy().reshape(-1,1), part_pred[sdf.view(-1)>0].cpu().numpy().reshape(-1,1)], axis=1)
neg = np.concatenate([xyz[sdf.view(-1)<0].cpu().numpy(), sdf[sdf.view(-1)<0].cpu().numpy().reshape(-1,1), part_pred[sdf.view(-1)<0].cpu().numpy().reshape(-1,1)], axis=1)
np.savez(mesh_filename+'.npz', pos=pos, neg=neg)
def interpolate_testset(args, ws, specs, decoder, reconstruction_codes_dir, saved_model_epoch, dataset_name='shape2motion'):
# build saving directory
inter_dir = os.path.join(
args.experiment_directory, ws.inter_testset_subdir, str(saved_model_epoch)
)
if not os.path.isdir(inter_dir):
os.makedirs(inter_dir)
inter_meshes_dir = os.path.join(
inter_dir, ws.reconstruction_meshes_subdir
)
if not os.path.isdir(inter_meshes_dir):
os.makedirs(inter_meshes_dir)
save_latvec_only = False
num_atc_parts = specs["NumAtcParts"]
lat_vec_dic, atc_vec_dic = interpolation_atc_config(specs, reconstruction_codes_dir)
for ii, k in enumerate(lat_vec_dic.keys()):
lat_vec = lat_vec_dic[k].view(1,specs["CodeLength"])
if specs["Articulation"]==True:
atc_vec = torch.Tensor(atc_vec_dic[k]).view(1,num_atc_parts)
else:
atc_vec = None
if num_atc_parts==1:
mesh_filename = os.path.join(inter_meshes_dir, dataset_name, '{:04d}art{:04d}_lat_from{:04d}to{:04d}'.format(k[0],k[1],k[1],k[2]))
if num_atc_parts==2:
mesh_filename = os.path.join(inter_meshes_dir, dataset_name, '{:04d}art{:04d}{:04d}_lat_from{:04d}{:04d}to{:04d}{:04d}'.format(k[0],k[1],k[2],k[1],k[2],k[3],k[4]))
if (
args.skip
and os.path.isfile(mesh_filename + ".ply")
):
continue
if not os.path.exists(os.path.dirname(mesh_filename)):
os.makedirs(os.path.dirname(mesh_filename))
logging.info("reconstructing {}".format(mesh_filename))
if not save_latvec_only:
start = time.time()
with torch.no_grad():
asdf.mesh.create_mesh(
decoder, lat_vec, mesh_filename, N=256, max_batch=int(2 ** 18), atc_vec=atc_vec, do_sup_with_part=specs["TrainWithParts"], specs=specs,
)
logging.info("total time: {}".format(time.time() - start))
``` |
{
"source": "jitensinha98/Face-Detection-and-Emotion-Recognition",
"score": 3
} |
#### File: jitensinha98/Face-Detection-and-Emotion-Recognition/Extract_fer2013_images.py
```python
import pandas as pd
import cv2
import numpy as np
import os
import shutil
def generate_modified_dataset():
print("Generating Images ...")
count = 0
# reading csv file as pandas dataframe
data = pd.read_csv(dataset_path)
# converting numpy array into list
pixels = data['pixels'].tolist()
# obtaining list contain classes
classes = sorted((data['emotion'].unique()).tolist())
labels_column = data['emotion'].tolist()
# creating classes subfolder
for class_label in classes :
os.mkdir(Extracted_Dataset_Location + '/' + str(class_label))
# numpy array dimensions
width, height = 48, 48
# reading pixels and labels from csv to generate images
for pixel_sequence,label in zip(pixels,labels_column):
face = [int(pixel) for pixel in pixel_sequence.split(' ')]
face = np.asarray(face).reshape(width, height)
# resizing image
face = cv2.resize(face.astype('uint8'),image_size)
count = count + 1
# setting image name
image_name = str(count) + '.png'
# Testing Purposes
'''
cv2.imshow('Image',face)
cv2.waitKey(0)
'''
# Saving the image
cv2.imwrite(Extracted_Dataset_Location + '/' + str(label) + '/' + image_name,face)
print("Image Generation done.")
dataset_path = 'fer2013/fer2013.csv'
# image dimensions
image_size=(200,200)
# save location of the modified dataset
Extracted_Dataset_Location = 'fer2013_extracted_Dataset'
# checking if extracted dataset already exists
if os.path.exists('fer2013_extracted_Dataset') == False :
# creating extracted dataset folder
os.mkdir('fer2013_extracted_Dataset')
else:
msg = int(input("Extracted Dataset Directory already present. Press 1 to use the existing directory or 2 to renew directory :"))
if msg == 2 :
# deleting extracted images
shutil.rmtree('fer2013_extracted_Dataset')
os.mkdir('fer2013_extracted_Dataset')
generate_modified_dataset()
``` |
{
"source": "jitensinha98/Python-Chat-Application",
"score": 4
} |
#### File: jitensinha98/Python-Chat-Application/client.py
```python
import socket
import sys
import os
import time
import threading
from tkinter import *
#This function is used to receive messages from the server
def receive_message():
while True:
incoming_message=client.recv(4096)
if not incoming_message:
message_list.insert(END, ' ')
message_list.insert(END, 'DISCONNECTED FROM SERVER')
message_list.insert(END, ' ')
sys.out()
else:
incoming_message=incoming_message.decode()
incoming_message='<'+host+'('+host_ip+')'+'> : '+incoming_message
message_list.insert(END, incoming_message)
#This function is used to send messages to the server
def send_message():
message=send_entry.get()
encoded_message=message.encode()
client.send(encoded_message)
message='<You> : '+message
message_list.insert(END, message)
send_entry.delete(0,END)
#used for multithreading
thread_send = []
thread_rcv = []
num_threads = 2
#builds the GUI window
root=Tk()
#setting the resolution of the GUI window
root.geometry("520x410")
#Creating two frames to accomodate widgets
topframe=Frame(root)
bottomframe=Frame(root)
#Creating widgets to be used in the application
message_scrollbar=Scrollbar(topframe)
labelheading=Label(topframe,text="----------------Message History----------------")
message_list=Listbox(topframe,width=62,height=20)
send_button=Button(bottomframe,text="SEND",width=10,height=1,bg="grey",fg="white",command=send_message)
send_entry=Entry(bottomframe,width=50)
#Assigning frame location in the GUI window
topframe.pack(side=TOP)
bottomframe.pack(side=BOTTOM)
#inserting message in listbox
send_entry.insert(END,'Type your message here...')
#Assigning position to the label widget in the topframe
labelheading.pack(side=TOP)
#Assigning position to the lisbox and scrollbar widget in the topframe
message_list.pack(side=LEFT,fill=Y)
message_scrollbar.pack(side=LEFT,fill=Y)
#Assigning position to the entrybox and button widget in the bottomframe
send_entry.pack(side=LEFT)
send_button.pack(side=LEFT)
#linking scrollbar and lisbox widget
message_scrollbar.config(command=message_list.yview)
message_list.config(yscrollcommand=message_scrollbar.set)
#initializing socket
client= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#server host-name and port number
#change it to your desirable host and port of the server
host = 'jiten-Vostro1310'
port = 8080
#getting server host IP address
host_ip=socket.gethostbyname(host)
#connectiing the client to the server
client.connect((host,port))
#obtaining client information
client_info=client.getsockname();
#storing the messages to be displayed
info_1='Connected to chat server : '+str(host_ip)
info_2='USING PORT 8080'
info_3='CLIENT IP AND PORT : '+str(client_info)
info_4='MESSAGE FACILITY: ACTIVE'
#displaying the messages in the listbox
message_list.insert(END, info_1)
message_list.insert(END, info_2)
message_list.insert(END, info_3)
message_list.insert(END, ' ')
message_list.insert(END, info_4)
message_list.insert(END, ' ')
#enabling multi-threading for receive_message()
for loop_1 in range(num_threads):
thread_rcv.append(threading.Thread(target = receive_message))
thread_rcv[-1].start()
#changing name of the application
root.title('CLIENT CHATBOX')
#infinite loop to be executed to display the GUI
root.mainloop()
``` |
{
"source": "jitensinha98/SQLite-Database-Application",
"score": 5
} |
#### File: SQLite-Database-Application/Testcode_noGUI/DATABASEapp_noGUI_Testing.py
```python
import sqlite3
#creating a Database named BASIC and connection with localhost server
conn=sqlite3.connect('BASIC.db')
#creating cursor to execute SQL commands
c=conn.cursor()
#creating table in the database
def create_table():
c.execute('CREATE TABLE IF NOT EXISTS basics(Name TEXT,Salary TEXT)')
#function used to add entries in the database table
def data_entry():
name=str(input("Enter name : "));
salary=str(input("Enter salary : "));
c.execute("INSERT INTO basics(Name,Salary) VALUES(?,?)",(name,salary))
conn.commit()
print (" ")
#dynamic entry to the table
def dynamic_data_entry():
x=int(input("How many rows do you want to enter ? "))
for i in range (0,x):
data_entry()
#reading entries from the database table
def read_from_db():
print("Press 1 to view the entire database.")
print("Press 2 to view database by name.")
print(" ")
choice=int(input(">>>"));
if choice==2:
name_db=input("Enter Name of the person to acquire details : ")
c.execute("SELECT * FROM basics WHERE Name=?",(name_db,))
elif choice==1 :
c.execute("SELECT * FROM basics")
else:
print("INVALID ENTRY")
print(" ")
print(" ")
print("(NAME | SALARY)")
print(" ")
print("-----------------")
for row in c.fetchall():
print (row)
print(" ")
#updating database table entries
def update_db():
print("Please enter the name whose information you want to update : ")
name_query=str(input(">>"))
print(" ")
print("Press 1 to update Name.")
print("Press 2 to update Salary.")
print("Press 3 to update Both.")
ch=int(input(">>>"))
if ch==1 :
name_dba=str(input("Enter new Name : "))
c.execute('UPDATE basics SET Name=? WHERE Name=?',(name_dba,name_query))
elif ch==2 :
salary_dba=str(input("Enter new Salary : "))
c.execute('UPDATE basics SET Salary=? WHERE Name=?',(salary_dba,name_query))
elif ch==3 :
name_dba=str(input("Enter new Name : "))
salary_dba=str(input("Enter new Salary : "))
c.execute('UPDATE basics SET Name=? ,Salary=? WHERE Name=?',(name_dba,salary_dba,name_query))
else:
print("INVALID ENTRY")
print(" ")
conn.commit()
print(" ")
print("DATABASE SUCCESSFULLY UPDATED")
print(" ")
#deleting table or table entries from the database
def delete_db():
print(" ")
print("Press 1 to empty the entire database.")
print("Press 2 to delete an invidual from database.")
choice_del=int(input(">>>"))
if choice_del ==1:
c.execute('DELETE FROM basics');
elif choice_del==2 :
name_del=str(input("Enter Name : "))
c.execute('DELETE FROM basics WHERE Name=?',(name_del,))
else:
print("INVALID ENTRY")
print(" ")
conn.commit()
while True:
print("""WELCOME TO DATABSE MANAGEMENT SYSTEM
Press 1 to add entries.
Press 2 to read entries.
Press 3 to update entries.
Press 4 to delete entries.
Press 5 to create table.
Press 5 to Exit Control.
""")
ch=int(input(">>>"))
if ch==1 :
dynamic_data_entry()
elif ch==2:
read_from_db()
elif ch==3:
update_db()
elif ch==4:
delete_db()
elif ch==5:
create_table()
elif ch==6:
break
else :
print("INVALID ENTRY")
print(" ")
#closing cursor
c.close()
#declining connection to the localhost
conn.close()
``` |
{
"source": "Jitesh17/albumentations-demo",
"score": 3
} |
#### File: albumentations-demo/src/app.py
```python
import os
import streamlit as st
import albumentations as A
from utils import (
load_augmentations_config,
get_arguments,
get_placeholder_params,
select_transformations,
show_random_params,
)
from visuals import (
select_image,
show_credentials,
show_docstring,
get_transormations_params,
)
def main():
# get CLI params: the path to images and image width
path_to_images, width_original = get_arguments()
if not os.path.isdir(path_to_images):
st.title("There is no directory: " + path_to_images)
else:
# select interface type
interface_type = st.sidebar.radio(
"Select the interface mode", ["Simple", "Professional", "Ultimate"], 2
)
# select image
status, image = select_image(path_to_images, interface_type)
if status == 1:
st.title("Can't load image")
if status == 2:
st.title("Please, upload the image")
else:
# image was loaded successfully
placeholder_params = get_placeholder_params(image)
# load the config
augmentations = load_augmentations_config(
placeholder_params, "configs/augmentations.json"
)
# get the list of transformations names
transform_names = select_transformations(augmentations, interface_type)
# get parameters for each transform
transforms = get_transormations_params(transform_names, augmentations)
try:
# apply the transformation to the image
data = A.ReplayCompose(transforms)(image=image)
error = 0
except ValueError:
error = 1
st.title(
"The error has occurred. Most probably you have passed wrong set of parameters. \
Check transforms that change the shape of image."
)
# proceed only if everything is ok
if error == 0:
augmented_image = data["image"]
# show title
st.title("Demo of Albumentations")
# show the images
width_transformed = int(
width_original / image.shape[1] * augmented_image.shape[1]
)
st.image(image, caption="Original image", width=width_original)
st.image(
augmented_image,
caption="Transformed image",
width=width_transformed,
)
# comment about refreshing
st.write("*Press 'R' to refresh*")
# random values used to get transformations
show_random_params(data, interface_type)
# print additional info
for transform in transforms:
show_docstring(transform)
st.code(str(transform))
show_credentials()
# adding google analytics pixel
# only when deployed online. don't collect statistics of local usage
if "GA" in os.environ:
st.image(os.environ["GA"])
st.markdown(
(
"[Privacy policy]"
+ (
"(https://htmlpreview.github.io/?"
+ "https://github.com/IliaLarchenko/"
+ "albumentations-demo/blob/deploy/docs/privacy.html)"
)
)
)
if __name__ == "__main__":
main()
``` |
{
"source": "Jitesh17/classification",
"score": 3
} |
#### File: test/bolt_src/aug_test copy.py
```python
import os, sys
import pyjeasy.file_utils as f
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
import matplotlib.pyplot as plt
import albumentations as albu
import torchvision
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from torchvision import transforms
# from RandAugment import RandAugment
import albumentations as A
from PIL import Image
from pyjeasy.image_utils.output import show_image
path = "/home/jitesh/3d/data/coco_data/bolt/b2_coco-data/img/000000.png"
cv2_img = cv2.imread(path)
pil_img = Image.open(path)
# show_image(cv2_img, 800)
## In[]:
IMG_SIZE = 128
# aug_cv2_img = albu.GaussNoise(always_apply=True, p=1)(image=cv2_img)['image']
def get_augmentation(img):
train_transform = [
albu.Resize(height=IMG_SIZE, width= IMG_SIZE, p=1),
# albu.GaussNoise(p=1),
# albu.Blur(blur_limit=3, p=1),
albu.GaussianBlur(blur_limit=3, p=1)
]
transforms = albu.Compose(train_transform) # <- Compose
return transforms(image=img)['image'], transforms
aug_cv2_img, data_transform = get_augmentation(cv2_img)
plt.figure(figsize=(8, 5))
plt.imshow(aug_cv2_img)
# ## %%
# import numpy as np
# import imgaug as ia
# import imgaug.augmenters as iaa
# ia.seed(1)
# # Example batch of images.
# # The array has shape (32, 64, 64, 3) and dtype uint8.
# images = np.array(
# [ia.quokka(size=(64, 64)) for _ in range(32)],
# dtype=np.uint8
# )
# seq = iaa.Sequential([
# iaa.Fliplr(0.5), # horizontal flips
# iaa.Crop(percent=(0, 0.1)), # random crops
# # Small gaussian blur with random sigma between 0 and 0.5.
# # But we only blur about 50% of all images.
# iaa.Sometimes(
# 0.5,
# iaa.GaussianBlur(sigma=(0, 0.5))
# ),
# # Strengthen or weaken the contrast in each image.
# iaa.LinearContrast((0.75, 1.5)),
# # Add gaussian noise.
# # For 50% of all images, we sample the noise once per pixel.
# # For the other 50% of all images, we sample the noise per pixel AND
# # channel. This can change the color (not only brightness) of the
# # pixels.
# iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),
# # Make some images brighter and some darker.
# # In 20% of all cases, we sample the multiplier once per channel,
# # which can end up changing the color of the images.
# iaa.Multiply((0.8, 1.2), per_channel=0.2),
# # Apply affine transformations to each image.
# # Scale/zoom them, translate/move them, rotate them and shear them.
# iaa.Affine(
# scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
# translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
# rotate=(-25, 25),
# shear=(-8, 8)
# )
# ], random_order=True) # apply augmenters in random order
# images_aug = seq(images=images)
# plt.figure(figsize=(8, 5))
# plt.imshow(images_aug)
```
#### File: test/bolt_src/data_preparation.py
```python
import pyjeasy.file_utils as f
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
from sys import exit as x
import torch
import torch.nn as nn
import cv2
import matplotlib.pyplot as plt
import torchvision
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from torchvision import transforms
import copy
from tqdm import tqdm
from PIL import Image
import glob
# get_ipython().run_line_magic('matplotlib', 'inline')
# In[]:
IMG_SIZE = 512
batch_size = 8
DATA_TYPES = ["train", "val", "test"]
# dataset_path = "/home/jitesh/3d/data/UE_training_results/bolt2/bolt_cropped"
# b_type_list = [dir for dir in os.listdir(dataset_path) if dir not in DATA_TYPES]
# print(b_type_list)
# # Required once start
# In[]:
def convert_4ch_to_3ch(dataset_path, split_ratio =[0.8, 0.1, 0.1]):
b_type_list = [dir for dir in os.listdir(dataset_path) if dir not in DATA_TYPES]
img_path_list = dict()
for data_type in DATA_TYPES:
img_path_list[data_type] = []
import random
for b_type in b_type_list:
data = glob.glob(f'{dataset_path}/{b_type}/*.png')
# train_data.append(data[:int(len(data)*0.8)])
random.shuffle(data)
s1 = split_ratio[0]
s2 = split_ratio[0] + split_ratio[1]
assert 1 == split_ratio[0] + split_ratio[1] + split_ratio[2]
img_path_list["train"] += data[:int(len(data)*s1)]
img_path_list["val"] += data[int(len(data)*s1):int(len(data)*s2)]
img_path_list["test"] += data[int(len(data)*s2):]
print(f'len(train_data): {len(img_path_list["train"])}')
print(f'len(val_data): {len(img_path_list["val"])}')
print(f'len(test_data): {len(img_path_list["test"])}')
# In[ ]:
import pyjeasy.file_utils as f
import cv2
filename_list = dict()
for data_type in DATA_TYPES:
dirname_new = os.path.join(dataset_path, data_type)
f.make_dir_if_not_exists(dirname_new)
for file_path in tqdm(img_path_list[data_type]):
file_path_split = file_path.split("/")
dirname_old = file_path_split[-2].split("_")[0]
filename_old = file_path_split[-1]
# filename_new = dirname_old.replace("b", "") + "_" + filename_old
filename_new = dirname_old + "_" + filename_old
output_img_path = os.path.join(dirname_new, filename_new)
f.delete_file_if_exists(output_img_path)
# Converting 4 channel to 3 channel and then writing in different folder
img = cv2.imread(file_path)
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
cv2.imwrite(output_img_path, img)
# f.copy_file(src_path=file_path, dst_path=output_img_path, verbose=False)
filename_list[data_type] = os.listdir(dirname_new)
# train_files = os.listdir(TRAIN_IMG_DIR_PATH)
# test_files = os.listdir(TEST_IMG_DIR_PATH)
# # Required once ends
# In[6]:
# filename_list = dict()
# for data_type in DATA_TYPES:
# dirname_new = os.path.join(dataset_path, data_type)
# filename_list[data_type] = os.listdir(dirname_new)
# In[6]:
class BoltDataset(Dataset):
def __init__(self, file_list, dir, mode='train', transform = None):
self.file_list = file_list
self.dir = dir
self.mode= mode
self.transform = transform
# print(self.file_list[0])
if self.mode == 'train':
# if 'b00' in self.file_list[0]:
# print(self.file_list[0])
if 'b00' in self.file_list[0]:
self.label = 0
else:
self.label = 1
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
img = Image.open(os.path.join(self.dir, self.file_list[idx]))
if self.transform:
img = self.transform(img)
if self.mode == 'train':
img = img.numpy()
return img.astype('float32'), self.label
else:
img = img.numpy()
return img.astype('float32'), self.file_list[idx]
# data_transform = transforms.Compose([
# transforms.Resize(256),
# transforms.ColorJitter(),
# transforms.RandomCrop(224),
# transforms.RandomHorizontalFlip(),
# transforms.Resize(128),
# transforms.ToTensor()
# ])
def get_dataset(dataset_path: str, b_type_list: list, img_size: int):
dataset = dict()
filename_list = dict()
for data_type in DATA_TYPES:
dirname_new = os.path.join(dataset_path, data_type)
filename_list[data_type] = os.listdir(dirname_new)
for data_type in DATA_TYPES:
dir_path = os.path.join(dataset_path, data_type)
if data_type =="train":
data_transform = transforms.Compose([
transforms.Resize(img_size),
transforms.ColorJitter(),
transforms.RandomCrop(int(img_size*1)),
transforms.RandomHorizontalFlip(),
transforms.Resize(img_size),
transforms.ToTensor()
])
# catagory_data = dict()
# for b_type in ['b00', 'b01']: # b_type_list:
# # print(filename_list)
# cat_files = [tf for tf in filename_list[data_type] if b_type in tf]
# catagory_data[b_type] = BoltDataset(cat_files, dir_path, mode=data_type, transform = data_transform)
# dataset[data_type] = ConcatDataset([c for c in catagory_data.values()])
else:
data_transform = transforms.Compose([
transforms.Resize((img_size)),
transforms.RandomCrop(int(img_size*1)),
# transforms.RandomHorizontalFlip(),
transforms.Resize(img_size),
transforms.ToTensor()
])
# dataset[data_type] = BoltDataset(filename_list[data_type], dir_path, mode=data_type, transform = data_transform)
catagory_data = dict()
# for b_type in ['b00', 'b01']: # b_type_list:
# for b_type in ['b10', 'b11']: # b_type_list:
for b_type in b_type_list:
# print(filename_list)
cat_files = [tf for tf in filename_list[data_type] if b_type in tf]
catagory_data[b_type] = BoltDataset(cat_files, dir_path, mode=data_type, transform = data_transform)
dataset[data_type] = ConcatDataset([c for c in catagory_data.values()])
print(f'len({data_type}_data): {len(dataset[data_type])}')
return dataset
# In[10]:
def mmmm():
# batch_size = 2
dataloader = DataLoader(dataset["train"], batch_size = batch_size, shuffle=True, num_workers=1)
# dataloader = DataLoader(catdogs, batch_size = 32, shuffle=True, num_workers=4)
print("len dataloader", len(dataloader))
# In[30]:
show_n_images = 40
samples, labels = iter(dataloader).next()
plt.figure(figsize=(16*2,24))
grid_imgs = torchvision.utils.make_grid(samples[:show_n_images])
np_grid_imgs = grid_imgs.numpy()
# in tensor, image is (batch, width, height), so you have to transpose it to (width, height, batch) in numpy to show it.
plt.imshow(np.transpose(np_grid_imgs, (1,2,0)))
# In[]:
# In[]:
# In[]:
# dataiter = iter(trainloader)
# images, labels = dataiter.next()
# imshow(torchvision.utils.make_grid(images))
# In[]:
convert_4ch_to_3ch("/home/jitesh/3d/data/UE_training_results/bolt3/bolt_cropped")
# %%
```
#### File: test/src/TheLib.py
```python
from __future__ import annotations
# from _typeshed import NoneType
import copy
import glob
import os
from datetime import datetime
from shutil import Error
from sys import exit as x
from typing import List, Union
from zipfile import error
import albumentations as A
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import printj
import pyjeasy.file_utils as f
import torch
import torch.nn as nn
import torchvision
import tqdm
# import albumentations as albu
from imgaug import augmenters as iaa
from PIL import Image
# from RandAugment import RandAugment
from torch.utils.data import ConcatDataset, DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from torchvision.transforms.transforms import RandomGrayscale
from jaitool.training import save_ckp, load_ckp
# writer = SummaryWriter()
# get_ipython().run_line_magic('matplotlib', 'inline')
import tensorflow as tf
import tensorboard as tb
tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
def check_cuda():
print('torch.cuda.is_available(): ', torch.cuda.is_available())
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
raise EnvironmentError
print(torch.cuda.get_device_name(0))
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3, 1), 'GB')
print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3, 1), 'GB')
class BoltDataset(Dataset):
def __init__(self, file_list, dir, mode='train', aug=None,
transform=None, test_label: int = 1,
b_type_list: List[str] = ['b10', 'b11'],
img_size: int=256):
# super().__init__()
self.file_list = file_list
self.dir = dir
self.mode = mode
# self.transform = transform
self.test_label = test_label
self.b_type_list = b_type_list
self.img_size=img_size
if self.mode == 'train' or self.mode == 'val':
# print(self.file_list)
# if 'b00' in self.file_list[0]:
if b_type_list[0] in self.file_list[0]:
self.label = 0
else:
self.label = 1
if aug is None:
self.aug = BoltClassifier(img_size=img_size).get_augmentation()
self.val_aug_seq = A.Compose([
A.Resize(self.img_size, self.img_size),
# A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
img_path = os.path.join(self.dir, self.file_list[idx])
image = Image.open(img_path)
big_side = max(image.size)
# small_side = min(image.size)
# printj.red(list(image.size)[0])
# print(big_side)
new_im = Image.new("RGB", (big_side, big_side))
new_im.paste(image)
image = new_im
# x()
if self.mode == 'train':
image = self.aug(image=np.array(image))['image']
# image = self.val_aug_seq(image=np.array(image))['image']
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
return torch.tensor(image, dtype=torch.float), self.label
elif self.mode == 'val':
image = self.val_aug_seq(image=np.array(image))['image']
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
return torch.tensor(image, dtype=torch.float), self.label
else:
image = self.val_aug_seq(image=np.array(image))['image']
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
return torch.tensor(image, dtype=torch.float), self.file_list[idx]
class BoltClassifier:
def __init__(self, device: str = 'cuda', img_size: int = 256, batch_size: int = 8, data_types: List[str] = ["train", "val", "test"],
dataset_path: str = "/home/jitesh/3d/data/UE_training_results/bolt3/bolt_cropped",
b_type_list: List[str] = ['b10', 'b11'], num_workers: int = 2):
self.device = device
self.img_size = img_size
self.batch_size = batch_size
self.data_types = data_types
self.dataset_path = dataset_path
self.b_type_list = b_type_list
self.num_workers = num_workers
self.scheduler = None
self.model = None
# self.set_model()
# self.dataloader = self.get_dataloader()
def get_val(self, model, model_path, test_dir_path, no_of_samples: int = 24, test_label: int = 1, save_csv_path: str = None):
model.load_state_dict(torch.load(model_path))
# "/home/jitesh/prj/classification/test/bolt/ckpt_densenet121_mark_correct_128_s3_5.pth"))
data_transform = transforms.Compose([
transforms.Resize((self.img_size, self.img_size)),
transforms.ToTensor()
])
test_list = [file for file in os.listdir(test_dir_path) if os.path.isfile(
os.path.join(test_dir_path, file)) and "b1" in file]
test_list = sorted(test_list)
# print(test_list)
test_data = BoltDataset(test_list, test_dir_path, mode="test",
transform=data_transform, test_label=test_label)
testloader = DataLoader(
test_data, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers)
model.eval()
fn_list = []
pred_list = []
for x, fn in testloader:
with torch.no_grad():
x = x.to(self.device)
output = model(x)
pred = torch.argmax(output, dim=1)
fn_list += [n[:-4] for n in fn]
pred_list += [p.item() for p in pred]
submission = pd.DataFrame({"id": fn_list, "label": pred_list})
if save_csv_path is None:
save_csv_path = f'preds_densenet121_dir_{self.img_size}_test_.csv'
submission.to_csv(save_csv_path,
# index=False
)
samples, _ = iter(testloader).next()
samples = samples.to(self.device)
val_sample = samples[:no_of_samples]
return val_sample
def predict(self, model=None, model_path=None, test_dir_path=None,
no_of_samples: int = 24, test_label: int = 1,
save_csv_path: str = None, write_images=None):
if write_images:
f.make_dir_if_not_exists(write_images)
f.delete_all_files_in_dir(write_images)
f.make_dir_if_not_exists(f'{write_images}/{self.b_type_list[0]}')
f.make_dir_if_not_exists(f'{write_images}/{self.b_type_list[1]}')
if model is None:
model = self.model
try:
model.load_state_dict(torch.load(model_path)['state_dict'])
except KeyError:
model.load_state_dict(torch.load(model_path))
# "/home/jitesh/prj/classification/test/bolt/ckpt_densenet121_mark_correct_128_s3_5.pth"))
data_transform = transforms.Compose([
transforms.Resize((self.img_size, self.img_size)),
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
transforms.ToTensor(),
])
test_list = [file for file in os.listdir(test_dir_path) if os.path.isfile(
os.path.join(test_dir_path, file))] # and "b1" in file]
test_list = sorted(test_list)
# print(test_list)
test_data = BoltDataset(test_list, test_dir_path, mode="test",
transform=data_transform, test_label=test_label)
testloader = DataLoader(
test_data, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers)
model.eval()
fn_list = []
pred_list = []
for x, fn in testloader:
with torch.no_grad():
x = x.to(self.device)
output = model(x)
pred = torch.argmax(output, dim=1)
fn_list += [n[:-4] for n in fn]
pred_list += [p.item() for p in pred]
if write_images:
for filename, prediction in zip(fn, pred):
if prediction==0:
f.copy_file(src_path=f'{test_dir_path}/{filename}',
dst_path=f'{write_images}/{self.b_type_list[0]}/{filename}')
elif prediction==1:
f.copy_file(src_path=f'{test_dir_path}/{filename}',
dst_path=f'{write_images}/{self.b_type_list[1]}/{filename}')
submission = pd.DataFrame({"id": fn_list, "label": pred_list})
if save_csv_path is None:
save_csv_path = f'preds_densenet121_dir_{self.img_size}_test_.csv'
elif '.csv' not in save_csv_path:
save_csv_path = save_csv_path + '/1.csv'
submission.to_csv(save_csv_path,
# index=False
)
# samples, _ = iter(testloader).next()
# samples = samples.to(self.device)
# val_sample = samples[:no_of_samples]
# return val_sample
@staticmethod
def convert_img_shape_aug_to_normal(img):
# unnormalize
npimg = (img / 2 + 0.5)*255
# npimg = np.clip(npimg, 0, 255)
# print((npimg))
# print((npimg.shape))
# x()
# npimg = npimg.astype(int)
return Image.fromarray(npimg.astype('uint8'), 'RGB')
@staticmethod
def convert_img_shape_tensor_to_normal(img):
# unnormalize
img = img / 2 + 0.5
npimg = img.numpy()
npimg = np.clip(npimg, 0., 1.)
return np.transpose(npimg, (1, 2, 0))
@staticmethod
def tensore_to_np(img):
# unnormalize
img = img / 2 + 0.5
npimg = img.numpy()
npimg = np.clip(npimg, 0., 1.)
return np.transpose(npimg, (1, 2, 0))
def show_img(self, img):
plt.figure(figsize=(18, 15))
plt.imshow(self.tensore_to_np(img))
plt.show()
def split_train_test_0(self, split_ratio=[0.8, 0.1, 0.1], verbose: bool = True):
import random
if self.b_type_list is None:
self.b_type_list = [dir for dir in os.listdir(
self.dataset_path) if dir not in self.data_types]
img_path_list = dict()
for data_type in self.data_types:
img_path_list[data_type] = []
for b_type in self.b_type_list:
data = glob.glob(f'{self.dataset_path}/{b_type}/*.png')
random.shuffle(data)
s1 = split_ratio[0]
s2 = split_ratio[0] + split_ratio[1]
assert 1 == split_ratio[0] + split_ratio[1] + split_ratio[2]
img_path_list["train"] += data[:int(len(data)*s1)]
img_path_list["val"] += data[int(len(data)*s1):int(len(data)*s2)]
img_path_list["test"] += data[int(len(data)*s2):]
if verbose:
print(f'len(train_data): {len(img_path_list["train"])}')
print(f'len(val_data): {len(img_path_list["val"])}')
print(f'len(test_data): {len(img_path_list["test"])}')
return img_path_list
def split_train_test(self, split_ratio=[0.8, 0.1, 0.1], verbose: bool = True):
import random
# if self.b_type_list is None:
# self.b_type_list = [dir for dir in os.listdir(
# self.dataset_path) if dir not in self.data_types]
img_path_list = dict()
for data_type in ["train", "val", "test"]:
img_path_list[data_type] = []
# for b_type in self.b_type_list:
# data = glob.glob(f'{self.dataset_path}/train/*')
data = os.listdir(f'{self.dataset_path}/train')
# print(data)
random.shuffle(data)
s1 = split_ratio[0]
s2 = split_ratio[0] + split_ratio[1]
assert 1 == split_ratio[0] + split_ratio[1] + split_ratio[2]
img_path_list["train"] += data[:int(len(data)*s1)]
img_path_list["val"] += data[int(len(data)*s1):int(len(data)*s2)]
img_path_list["test"] += data[int(len(data)*s2):]
if verbose:
print(f'len(train_data): {len(img_path_list["train"])}')
print(f'len(val_data): {len(img_path_list["val"])}')
print(f'len(test_data): {len(img_path_list["test"])}')
print(f'len(test_data): {(img_path_list["val"])}')
self.filename_list = img_path_list
return img_path_list
def get_image_list(self):
filename_list = dict()
for data_type in self.data_types:
dirname_new = os.path.join(self.dataset_path, data_type)
filename_list[data_type] = os.listdir(dirname_new)
# printj.red(filename_list[data_type])
# printj.yellow(dirname_new)
self.filename_list = filename_list
return filename_list
# aug_iaa = iaa.Sequential([
# iaa.flip.Fliplr(p=0.5),
# iaa.flip.Flipud(p=0.5),
# iaa.GaussianBlur(sigma=(0.0, 0.1)),
# iaa.MultiplyBrightness(mul=(0.65, 1.35)),
# ])
def get_augmentation(self, save_path=None, load_path=None):
if load_path:
return A.load(load_path)
else:
aug_seq1 = A.OneOf([
A.Rotate(limit=(-90, 90), p=1.0),
A.Flip(p=1.0),
], p=1.0)
aug_seq2 = A.OneOf([
# A.ChannelDropout(always_apply=False, p=1.0, channel_drop_range=(1, 1), fill_value=0),
A.RGBShift(r_shift_limit=15, g_shift_limit=15,
b_shift_limit=15, p=1.0),
A.RandomBrightnessContrast(always_apply=False, p=1.0, brightness_limit=(
-0.2, 0.2), contrast_limit=(-0.2, 0.2), brightness_by_max=True)
], p=1.0)
aug_seq3 = A.OneOf([
A.GaussNoise(always_apply=False, p=1.0, var_limit=(10, 100)),
A.ISONoise(always_apply=False, p=1.0, intensity=(
0.1, 1.0), color_shift=(0.01, 0.3)),
A.MultiplicativeNoise(always_apply=False, p=1.0, multiplier=(
0.8, 1.6), per_channel=True, elementwise=True),
], p=1.0)
aug_seq4 = A.OneOf([
A.Equalize(always_apply=False, p=1.0,
mode='pil', by_channels=True),
A.InvertImg(always_apply=False, p=1.0),
A.MotionBlur(always_apply=False, p=1.0, blur_limit=(3, 7)),
A.OpticalDistortion(always_apply=False, p=1.0, distort_limit=(-0.3, 0.3),
shift_limit=(-0.05, 0.05), interpolation=0,
border_mode=0, value=(0, 0, 0), mask_value=None),
A.RandomFog(always_apply=False, p=1.0, fog_coef_lower=0.1,
fog_coef_upper=0.45, alpha_coef=0.5)
], p=1.0)
aug_seq = A.Compose([
A.Resize(self.img_size, self.img_size),
aug_seq1,
aug_seq2,
aug_seq3,
aug_seq4,
# A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
# aug_path = '/home/jitesh/prj/classification/test/bolt/aug/aug_seq.json'
if save_path:
A.save(aug_seq, save_path)
# loaded_transform = A.load(aug_path)
return aug_seq
def view_data(self):
cat_files = [tf for tf in self.filename_list["train"]
if "10" in tf] # if b_type in tf]
dir_path = os.path.join(self.dataset_path, "train")
alb_dataset = BoltDataset(cat_files, dir_path)
alb_dataloader = DataLoader(
dataset=alb_dataset, batch_size=16, shuffle=True)
data = iter(alb_dataloader)
images, _ = data.next()
# show images
self.show_img(torchvision.utils.make_grid(images))
def get_dataset(self):
dataset = dict()
for data_type in self.data_types:
dir_path = os.path.join(self.dataset_path, "train")
# dir_path = os.path.join(self.dataset_path, data_type)
catagory_data = dict()
# for b_type in ['b00', 'b01']: # b_type_list:
# for b_type in ['b10', 'b11']: # b_type_list:
for b_type in self.b_type_list:
# print(filename_list[data_type])
cat_files = [
tf for tf in self.filename_list[data_type] if b_type in tf]
# print(cat_files)
# , transform = albu_transforms)#data_transform)
catagory_data[b_type] = BoltDataset(
cat_files, dir_path, mode=data_type,
b_type_list=self.b_type_list, img_size=self.img_size)
dataset[data_type] = ConcatDataset(
[c for c in catagory_data.values()])
print(f'len({data_type}_data): {len(dataset[data_type])}')
return dataset
def get_dataloader(self, data=None):
if data is None:
dataset = self.get_dataset()
data = dataset["train"]
dataloader = DataLoader(
data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
print("len dataloader", len(dataloader))
self.dataloader = dataloader
return dataloader
def view_data2(self, dataloader=None, show_n_images: int = 24):
if dataloader is None:
dataloader = self.get_dataloader()
# images, labels = dataloader.next()
# self.show_img(torchvision.utils.make_grid(images))
# # show_n_images = 40
samples, labels = iter(dataloader).next()
# plt.figure(figsize=(16*2, 24))
grid_imgs = torchvision.utils.make_grid(samples[:show_n_images])
# np_grid_imgs = grid_imgs.numpy()
# in tensor, image is (batch, width, height), so you have to transpose it to (width, height, batch) in numpy to show it.
# plt.imshow(np.transpose(np_grid_imgs, (1,2,0)))
self.show_img(grid_imgs)
# plt.savefig("augr.png")
def get_lr(self):
for param_group in self.optimizer.param_groups:
return param_group['lr']
def set_model(self, learning_rate: float = 0.001, scheduler_on :bool=False, scheduler_steps: list=[500,1000,1500], dataloader=None):
if dataloader is None:
dataloader = self.get_dataloader()
model = torchvision.models.densenet121(pretrained=False)
# model = torchvision.models.googlenet(pretrained=False)
self.num_ftrs = num_ftrs = model.classifier.in_features
model.classifier = torch.nn.Sequential(
torch.nn.Linear(num_ftrs, 500),
torch.nn.Linear(500, 2)
# torch.nn.Linear(500, 1)
)
# model.features.
self.model = model.to(self.device)
self.loss_criteria = nn.CrossEntropyLoss()
# loss_criteria = nn.BCELoss()
self.optimizer = torch.optim.Adam(
model.parameters(), lr=learning_rate, amsgrad=True)
if scheduler_on:
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=scheduler_steps, gamma=0.5)
# scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[ 100*self.batch_size], gamma=0.5)
# self.scheduler = torch.optim.lr_scheduler.MultiStepLR(
# self.optimizer, milestones=[200], gamma=0.5)
# self.scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.1)
# self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, len(dataloader), eta_min=learning_rate)
# return model,
# # In[33]:
# val_sample = get_val(model=model,
# model_path="/home/jitesh/prj/classification/test/bolt/ckpt_densenet121_mark_correct_128_s3_2.pth",
# test_dir_path="/home/jitesh/sekisui/bolt/cropped_hexagon_bolts/b11",
# )
# # In[33]:
def train(self, epochs: int = 5, val_check_period = None,
val_sample=None,
dataloader_train=None, dataloader_val=None,
show_tensorboard=True, tensorboard_log_dir_path=None,
show_pyplot=False, weight_dir_path="weights", writer=None,
iteration_limit=np.inf):
data_available = False
if show_tensorboard:
if tensorboard_log_dir_path:
writer = SummaryWriter(log_dir=tensorboard_log_dir_path)
else:
writer = SummaryWriter()
else:
writer = None
dataset = self.get_dataset()
if dataloader_train is None:
dataloader_train = self.get_dataloader(dataset["train"])
if dataloader_val is None:
dataloader_val = DataLoader(dataset["val"],
batch_size=self.batch_size,
# batch_size=len(dataset["val"]),
shuffle=False, num_workers=self.num_workers)
# print(len(dataloader_val))
for val_samples, val_labels in dataloader_val:
val_samples = val_samples.to(self.device)
val_labels = val_labels.to(self.device)
# epochs = 3
# epochs = 10
itr = 1
p_itr = self.batch_size # 200
self.model.train()
total_loss = 0
loss_list = []
acc_list = []
val_total_loss = 0
val_loss_list = []
val_acc_list = []
# if val_check_period is None:
# val_check_period = len(dataloader_train)
# print("len dataloader", len(dataloader))
# dataloader_train=self.get_dataloader()
for epoch in range(epochs):
if itr > iteration_limit:
break
for batch_idx, (samples, labels) in enumerate(dataloader_train):
# all_val_labels = []
# all_val_output = []
# print(labels)
# print(labels.shape)
samples, labels = samples.to(
self.device), labels.to(self.device)
self.optimizer.zero_grad()
output = self.model(samples)
loss = self.loss_criteria(output, labels)
loss.backward()
total_loss += loss.item()
###
if val_sample:
val_output = self.model(val_sample)
val_loss = self.loss_criteria(val_output, torch.ones(
8, dtype=torch.long).to(self.device))
val_total_loss += val_loss.item()
elif val_check_period:
if itr % val_check_period == 0:
val_output = self.model(val_samples)
val_loss = self.loss_criteria(val_output, val_labels)
val_total_loss += val_loss.item()
# all_val_labels += val_labels.to(self.device)
# all_val_output += val_output
else:
val_output = output
###
self.optimizer.step()
if self.scheduler:
self.scheduler.step()
if itr % p_itr == 0:
pred = torch.argmax(output, dim=1)
correct = pred.eq(labels)
acc = torch.mean(correct.float())
loss_list.append(total_loss/p_itr)
acc_list.append(acc)
###
val_acc = 0
if val_sample:
val_pred = torch.argmax(val_output, dim=1)
val_correct = val_pred.eq(torch.ones(
8, dtype=torch.long).to(self.device))
val_acc = torch.mean(val_correct.float())
val_loss_list.append(val_total_loss/p_itr)
val_acc_list.append(val_acc)
print('[Epoch {}/{}] Iteration {} -> Train Loss: {:.4f}, Accuracy: {:.3f}, Val Accuracy: {:.3f}'.format(
epoch+1, epochs, itr, total_loss/p_itr, acc, val_acc))
elif val_check_period:
if itr % val_check_period == 0:
val_pred = torch.argmax(val_output, dim=1)
val_correct = val_pred.eq(val_labels)
val_acc = torch.mean(val_correct.float())
val_loss_list.append(val_total_loss/val_check_period)
val_acc_list.append(val_acc)
print('[Epoch {}/{}] Iteration {} -> Train Loss: {:.4f}, Accuracy: {:.3f}, Val Accuracy: {:.3f}'.format(
epoch+1, epochs, itr, total_loss/p_itr, acc, val_acc))
else:
print('[Epoch {}/{}] Iteration {} -> Train Loss: {:.4f}, Accuracy: {:.3f}'.format(
epoch+1, epochs, itr, total_loss/p_itr, acc))
else:
print('[Epoch {}/{}] Iteration {} -> Train Loss: {:.4f}, Accuracy: {:.3f}'.format(
epoch+1, epochs, itr, total_loss/p_itr, acc))
# loss_list.append(total_loss/p_itr)
# acc_list.append(acc)
if show_tensorboard:
# samples = samples / 2 + 0.5
# img_grid = torchvision.utils.make_grid(samples)
# writer.add_image('training data', img_grid)
# val_img_grid = torchvision.utils.make_grid(val_samples)
# writer.add_image('val data', val_img_grid)
# writer.add_graph(self.model, samples)
# log embeddings
# features = samples.view(-1, 3*self.img_size*self.img_size)
data = samples
data_labels = labels
# data = val_samples
# data_labels = val_labels
# features = data.reshape(data.shape[0], -1)
# class_labels = [self.b_type_list[label] for label in data_labels]
# writer.add_embedding(features,
# metadata=class_labels,
# label_img=data,
# global_step=batch_idx
# # label_img=samples.unsqueeze(1),
# )
# features = val_samples.reshape(val_samples.shape[0], -1)
# class_labels = [self.b_type_list[label] for label in val_labels]
# writer.add_embedding(features,
# metadata=class_labels,
# label_img=val_samples,
# global_step=batch_idx
# # metadata=self.b_type_list,
# # label_img=samples.unsqueeze(1),
# )
writer.add_scalar('* Loss/train', total_loss/p_itr, itr)
writer.add_scalar('* Accuracy/train', acc, itr)
writer.add_scalar('Learning Rate', self.get_lr(), itr)
if show_tensorboard:
if val_sample or val_check_period:
if itr % val_check_period == 0:
# print(f"{val_total_loss}/{val_check_period} = {val_total_loss/val_check_period}")
writer.add_scalar('* Loss/val', val_total_loss/val_check_period, itr)
writer.add_scalar('* Accuracy/val', val_acc, itr)
if itr > (epochs)*len(dataloader_train) -p_itr - 2:
writer.add_hparams({
'Learning rate': self.get_lr(),
'Batch Size': self.batch_size,
'Image Size': self.img_size,
'Iterations': itr,
},
{
'* Accuracy/train': acc,
'* Accuracy/val': val_acc,
'* Loss/train': total_loss/p_itr,
'* Loss/val': val_total_loss/val_check_period,
})
# writer.add_scalar('Loss/compare',
# {'train': total_loss/p_itr,
# 'val': val_total_loss/val_check_period}, itr)
# writer.add_scalar('Accuracy/compare',
# {'train': acc,
# 'val': val_acc}, itr)
total_loss = 0
val_total_loss = 0
data_available = True
itr += 1
if show_pyplot:
plt.plot(loss_list, label='loss')
if val_sample or val_check_period:
plt.plot(val_loss_list, label='val_loss')
plt.legend()
plt.title('training and val loss')
plt.show()
###
plt.plot(acc_list, label='accuracy')
if val_sample or val_check_period:
plt.plot(val_acc_list, label='val_accuracy')
plt.legend()
plt.title('training and val accuracy')
plt.show()
''' Saving multiple weights'''
checkpoint = {
'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict()
}
checkpoint_dir = f'{weight_dir_path}/checkpoint'
best_model_dir = f'{weight_dir_path}/best_model_dir'
filename_pth = f'ckpt_densenet121_mark_correct_{self.img_size}_s3_{itr}.pth'
if itr == int(epochs*len(dataloader_train)/10):
# if epoch % 10 == 0:
save_ckp(state=checkpoint,
checkpoint_dir=checkpoint_dir,
best_model_dir=best_model_dir,
filename_pth=filename_pth)
# torch.save(self.model.state_dict(), os.path.join(weight_dir_path, filename_pth))
printj.yellow.on_purple(f"Weight file saved: {os.path.join(weight_dir_path, filename_pth)}")
print("Total iterations: ", itr-1)
if show_pyplot:
plt.plot(loss_list, label='loss')
plt.plot(acc_list, label='accuracy')
plt.legend()
plt.title('training loss and accuracy')
plt.show()
if val_sample or val_check_period:
plt.plot(val_acc_list, label='Test_accuracy')
plt.plot(acc_list, label='Train_accuracy')
plt.legend()
plt.title('training loss and accuracy')
plt.show()
# filename_pth = 'ckpt_densenet121_catdog.pth'
# filename_pth = f'ckpt_densenet121_mark_exist_{self.img_size}.pth'
filename_pth = f'ckpt_densenet121_mark_correct_{self.img_size}_s3.pth'
torch.save(self.model.state_dict(), os.path.join(weight_dir_path, filename_pth))
def _get_something(self, data=None):
test_transform = transforms.Compose([
transforms.Resize((self.img_size, self.img_size)),
transforms.ToTensor()
])
# testset = CatDogDataset(test_files, TEST_IMG_DIR_PATH, mode='test', transform = test_transform)
# testloader = DataLoader(testset, batch_size = 32, shuffle=False, num_workers=4)
if data is None:
dataset = self.get_dataset()
data = dataset["train"]
testloader = DataLoader(
data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
self.model.eval()
fn_list = []
pred_list = []
for x, fn in testloader:
with torch.no_grad():
x = x.to(self.device)
output = self.model(x)
pred = torch.argmax(output, dim=1)
fn_list += [n[:-4] for n in fn]
pred_list += [p.item() for p in pred]
submission = pd.DataFrame({"id": fn_list, "label": pred_list})
submission.to_csv(f'preds_densenet121_dir_{self.img_size}.csv',
# index=False
)
def get_pred(self, model, model_path, test_dir_path, no_of_samples: int = 24, test_label: int = 1, save_csv_path: str = None):
model.load_state_dict(torch.load(model_path))
# "/home/jitesh/prj/classification/test/bolt/ckpt_densenet121_mark_correct_128_s3_5.pth"))
data_transform = transforms.Compose([
transforms.Resize((self.img_size, self.img_size)),
transforms.ToTensor()
])
test_list = [file for file in os.listdir(test_dir_path) if os.path.isfile(
os.path.join(test_dir_path, file)) and "b1" in file]
test_list = sorted(test_list)
# print(test_list)
test_data = BoltDataset(test_list, test_dir_path, mode="test",
transform=data_transform, test_label=test_label)
testloader = DataLoader(
test_data, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers)
model.eval()
fn_list = []
pred_list = []
for x, fn in testloader:
with torch.no_grad():
x = x.to(self.device)
output = model(x)
pred = torch.argmax(output, dim=1)
fn_list += [n[:-4] for n in fn]
pred_list += [p.item() for p in pred]
submission = pd.DataFrame({"id": fn_list, "label": pred_list})
if save_csv_path is None:
save_csv_path = f'preds_densenet121_dir_{self.img_size}_test_.csv'
submission.to_csv(save_csv_path,
# index=False
)
samples, _ = iter(testloader).next()
samples = samples.to(self.device)
val_sample = samples[:no_of_samples]
return val_sample
def _get_pred_(self, data=None):
if data is None:
dataset = self.get_dataset()
data = dataset["train"]
testloader = DataLoader(
data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
samples, _ = iter(testloader).next()
samples = samples.to(self.device)
fig = plt.figure(figsize=(24, 16))
ax = fig.add_subplot(1, 1, 1)
ax.set_facecolor('xkcd:salmon')
ax.set_facecolor((1.0, 0.47, 0.42))
fig.tight_layout()
output = self.model(samples[:24])
pred = torch.argmax(output, dim=1)
pred = [p.item() for p in pred]
# ad = {0:'cat', 1:'dog'}
# ad = {0:'no mark', 1:'mark'}
ad = {0: 'Incorrect', 1: 'Correct'}
# for num, sample in enumerate(samples[:24]):
for num, sample in enumerate(samples[:24]):
plt.subplot(4, 6, num+1)
plt.title(ad[pred[num]])
# plt.axis('off')
sample = sample.cpu().numpy()
plt.imshow(np.transpose(sample, (1, 2, 0)))
# ax = plt.gca()
plt.savefig('inference_mark_direction.png')
def preview_aug(self, image_path, grid=[4, 4], save_path=None):
pillow_image = Image.open(image_path)
image = np.array(pillow_image)
images = []
new_im = Image.new('RGB', (grid[1]*self.img_size,grid[0]*self.img_size))
# bolt =BoltClassifier(img_size=img_size)
transform = self.get_augmentation() #load_path='/home/jitesh/prj/classification/test/bolt_src/aug/aug_seq.json')
# rsize = self.img_size
# rsize = int(1000/max(grid[0], grid[1]))
# xp = int(1000/grid[1])
# yp = int(1000/grid[0])
for i in range(grid[0]):
for j in range(grid[1]):
transformed_image = transform(image=image)['image']
# transformed_image.thumbnail((xp,xp))
t_image = self.convert_img_shape_aug_to_normal(transformed_image)
# transformed_image.resize(xp,xp, Image.ANTIALIAS)
# print(transformed_image)
# new_im.paste(transformed_image, (i*xp,j*xp))
# t_image = t_image.resize((rsize,rsize), Image.ANTIALIAS)
# print(t_image)
new_im.paste(t_image, (i*self.img_size,j*self.img_size))
# new_im.paste(t_image)
# import sys
# sys.exit()
new_im.show()
if save_path:
new_im.save(save_path)
``` |
{
"source": "Jitesh17/jaitool",
"score": 2
} |
#### File: jaitool/annotation/change_background.py
```python
import os
import random
from datetime import datetime
from operator import itemgetter
from sys import exit as x
# from jaitool.annotation.COCO import COCO_Datase
# from jaitool.annotation.NDDS import NDDS_Dataset
# from logger import logger
import albumentations as A
import cv2
import numpy as np
import printj
# from albumentations.augmentations.functional import rotate
# from annotation_utils.coco.structs import COCO_Dataset
# from annotation_utils.ndds.structs import NDDS_Dataset
from pyjeasy.file_utils import (dir_contents_path_list_with_extension,
make_dir_if_not_exists)
from pyjeasy.image_utils.edit import get_all_colors, resize_img
from pyjeasy.image_utils.preview import show_image
from pyjeasy.image_utils import create_mask
from tqdm import tqdm
def replace_bg_wrt_transparency(
bg_dirs: list,
img_dir_name: str = "img",
output_img_dir_name: str = "img_",
aug_on: bool = False,
aug_json: str = None,
show_preview: bool = False,):
pass
def aug_flip_and_rotate(load_path=None):
if load_path:
return A.load(load_path)
else:
aug_seq = A.Compose([
A.Rotate(limit=(-90, 90), p=0.5),
A.Flip(p=0.5),
A.OpticalDistortion(
distort_limit=0.05, shift_limit=0.05,
interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_REFLECT_101,
value=None, mask_value=None, always_apply=False,
p=0.5)
])
return aug_seq
def image_sequence(image_path_list):
num = 0
while True:
yield cv2.imread(image_path_list[num])
if num + 2 < len(image_path_list):
num += 1
else:
num = 0
random.shuffle(image_path_list)
def replace_bg_wrt_seg_ann(
coco_data_dir: str,
json_filename: str,
bg_dirs: list,
img_dir_name: str = "img",
output_img_dir_name: str = "img_",
aug_on: bool = False,
aug_json: str = None,
show_preview: bool = False,
):
coco_dataset = COCO_Dataset.load_from_path(
json_path=f"{coco_data_dir}/json/{json_filename}.json", check_paths=False)
# image_path_list = folder_list(folder1)
image_path_list = []
for bg_dir in bg_dirs:
image_path_list += dir_contents_path_list_with_extension(
dirpath=bg_dir,
extension=['.jpg', '.jpeg', '.png'])
bg_gen = image_sequence(image_path_list)
pbar = tqdm(coco_dataset.images, colour='#44aa44')
for image in pbar:
pbar.set_description("Changing background")
pbar.set_postfix({'file_name': image.file_name})
image_path_split = image.coco_url.split("/")
image_path_split[-2] = img_dir_name
image_path = "/".join(image_path_split)
for ann in coco_dataset.annotations:
if ann.image_id == image.id:
seg = ann.segmentation
background = next(bg_gen)
if aug_on:
aug = aug_flip_and_rotate(aug_json)
background = aug(image=np.array(background))['image']
orig_image = cv2.imread(image_path)
assert orig_image.shape[1] == image.width
assert orig_image.shape[0] == image.height
mask = np.zeros((image.width, image.height), np.uint8)
contours = seg.to_contour()
cv2.drawContours(mask, contours, -1, (255, 255, 255), -1)
final = replace_bg_wrt_mask(orig_image, background, mask)
if show_preview:
show_image(final)
else:
output = os.path.join(coco_data_dir, output_img_dir_name)
make_dir_if_not_exists(coco_data_dir)
make_dir_if_not_exists(output)
output_path = os.path.join(output, image.file_name)
cv2.imwrite(output_path, final)
def replace_bg_wrt_mask(orig_image, background, mask):
fg = cv2.bitwise_or(orig_image, orig_image, mask=mask)
mask = cv2.bitwise_not(mask)
background = resize_img(src=background, size=(
orig_image.shape[0], orig_image.shape[1]))
bg = cv2.bitwise_or(background, background, mask=mask)
final = cv2.bitwise_or(fg, bg)
return final
def replace_bg_wrt_isimg(
ndds_data_dir: str,
coco_data_dir: str,
bg_dirs: list,
json_filename: str = None,
bg_iscolor: list = None,
output_img_dir_name: str = "img_",
aug_on: bool = False,
aug_json: str = None,
show_preview: bool = False,
verbose: bool = False,
):
make_dir_if_not_exists(os.path.abspath(
os.path.join(coco_data_dir, '../..')))
make_dir_if_not_exists(os.path.abspath(os.path.join(coco_data_dir, '..')))
make_dir_if_not_exists(coco_data_dir)
# Load NDDS Dataset
# ndds_dataset = NDDS_Dataset.load_from_dir(
# json_dir=ndds_data_dir,
# show_pbar=True
# )
coco_dataset = COCO_Dataset.load_from_path(
json_path=f"{coco_data_dir}/json/{json_filename}.json", check_paths=False)
image_path_list = []
for bg_dir in bg_dirs:
image_path_list += dir_contents_path_list_with_extension(
dirpath=bg_dir,
extension=['.jpg', '.jpeg', '.png'])
bg_gen = image_sequence(image_path_list)
# pbar = tqdm(ndds_dataset.frames, colour='#44aa44')
pbar = tqdm(coco_dataset.images, colour='#44aa44',
total=len(coco_dataset.images))
bg_gen = image_sequence(image_path_list)
for image in pbar:
pbar.set_description("Changing background")
# pbar.set_postfix({'file_name': image.file_name})
is_path = ndds_data_dir + '/' + \
image.file_name.split('.')[0]+'.is.'+image.file_name.split('.')[-1]
# img_path = ndds_data_dir +
if verbose:
printj.green(image.coco_url)
printj.green(is_path)
img = cv2.imread(image.coco_url)
is_img = cv2.imread(is_path)
is_img2 = is_img.copy()
# from PIL import Image
# img0 = Image.open(is_path)
# colors = img0.convert('RGB').getcolors()
# printj.red(colors)
if bg_iscolor:
mask = create_mask(img=is_img2, color=list(
reversed(bg_iscolor)), difference=2, min_limit=0, max_limit=255)
else:
# img.convert('RGB').getcolors()
colors = get_all_colors(img_path=is_path)
colors = tuple(sorted(colors, key=itemgetter(0), reverse=True))
_bg_iscolor = list(colors[0][1])
if verbose:
printj.cyan(
f"\nAll {len(colors)} colors in the image: {colors}")
printj.yellow(f'Background color is {_bg_iscolor}')
mask = create_mask(img=is_img2, color=list(
reversed(_bg_iscolor)), difference=2, min_limit=0, max_limit=255)
background = next(bg_gen)
if aug_on:
aug = aug_flip_and_rotate(aug_json)
background = aug(image=np.array(background))['image']
background = resize_img(
src=background, size=(img.shape[1], img.shape[0]))
# while (img.shape[1] > background.shape[1]) and (img.shape[0] > background.shape[0]):
# background1 = cv2.hconcat([background, next(bg_gen)])
# background2 = cv2.hconcat([next(bg_gen), next(bg_gen)])
# background = cv2.vconcat([background1, background2])
# background = background[:img.shape[1], :img.shape[0]]
bg = cv2.bitwise_or(background, background, mask=mask)
mask = cv2.bitwise_not(mask)
fg = cv2.bitwise_or(img, img, mask=mask)
final = cv2.bitwise_or(fg, bg)
output = os.path.join(coco_data_dir, output_img_dir_name)
make_dir_if_not_exists(coco_data_dir)
make_dir_if_not_exists(output)
collaged_output = os.path.join(output, image.file_name)
if show_preview:
quit = show_image(final)
if quit:
break
else:
cv2.imwrite(collaged_output, final)
if __name__ == "__main__":
now = datetime.now()
dt_string3 = now.strftime("%Y_%m_%d_%H_%M_%S")
key = 'bolt'
# folder_name = f'b8'
# coco_data_dir = f'/home/jitesh/3d/data/coco_data/bolt/{folder_name}_coco-data'#_{dt_string3}_coco-data'
folder_name = f'bolt_3-4'
# folder_name = f'ram-bolt'
# _{dt_string3}_coco-data'
coco_data_dir = f'/home/jitesh/3d/data/coco_data/bolt/{folder_name}'
bg_dirs = ["/home/jitesh/3d/data/images_for_ndds_bg/solar_panel"]
# bg_dirs.append("/home/jitesh/3d/data/images_for_ndds_bg/collaged_images_random-size")
# bg_dirs.append("/home/jitesh/3d/data/images_for_ndds_bg/collaged_images_random-size-v")
replace_bg_wrt_seg_ann(
coco_data_dir=coco_data_dir,
json_filename=key,
bg_dirs=bg_dirs,
img_dir_name="img0",
aug_on=True)
```
#### File: jaitool/annotation/ue_to_coco_tropicana.py
```python
import os
from sys import exit as x
from datetime import datetime
import cv2
import numpy as np
import printj
from annotation_utils.coco.structs import COCO_Category_Handler, COCO_Dataset
from annotation_utils.ndds.structs import NDDS_Dataset
from logger import logger
# dataset.display_preview(show_details=True)
# import disk2points_algo_fit_center as d2p
import vis_data
# from cook_data import run as cook
def make_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def id_to_color(RGBint):
pixel_b = RGBint & 255
pixel_g = (RGBint >> 8) & 255
pixel_r = (RGBint >> 16) & 255
return [pixel_b, pixel_g, pixel_r]
def create_mask(img, color):
lower = np.array(color)-np.array([1]*3) #, dtype="uint8")
upper = np.array(color)+np.array([1]*3) #, dtype="uint8")
mask = cv2.inRange(img, lower, upper)
return mask
now = datetime.now()
dt_string = now.strftime("%Y/%m/%d %H:%M:%S")
dt_string2 = now.strftime("%Y-%m-%d")
dt_string3 = now.strftime("%Y_%m_%d_%H_%M_%S")
# folder_name = f'h5_1000'
# folder_name = f'hc1_1000'
# folder_name = f'hr1_300'
# folder_name = f'hc11_400'
folder_name = f'tropi1'
# folder_name = f'hsk2_200'
# folder_name = f'hlk2_200'
ndds_path = f'/home/jitesh/3d/data/UE_training_results/{folder_name}'
coco_data_dir = f'/home/jitesh/3d/data/coco_data/{folder_name}_coco-data'#_{dt_string3}_coco-data'
make_dir_if_not_exists(os.path.abspath(os.path.join(coco_data_dir, '..')))
make_dir_if_not_exists(coco_data_dir)
# Load NDDS Dataset
ndds_dataset = NDDS_Dataset.load_from_dir(
json_dir=ndds_path,
show_pbar=True
)
# Fix NDDS Dataset naming so that it follows convention. (This is not necessary if the NDDS dataset already follows the naming convention.)
for frame in ndds_dataset.frames:
# printj.red(frame.img_path)
# is_path = frame.img_path.split('.')[0]+'.is.'+frame.img_path.split('.')[-1]
# # printj.red(is_path)
# img = cv2.imread(is_path)
# # printj.cyan(img)
# img2= img.copy()
# from PIL import Image
# img0 = Image.open(is_path)
# colors = img0.convert('RGB').getcolors()
# printj.red(colors)
# x()
# short
# change1_from = [36, 51, 243] # red
# change1_from = [240, 255, 255] # white
# # change2_from = id_to_color(15938340) # pole1
# change_to = id_to_color(7626000) # pole0
# # long
# # change1_from = [240, 255, 255] # white
# # change2_from = [8, 93, 244] # pole1
# # change_to = [40, 186, 104] # pole0
# # change1_from = list(colors[0][1])[::-1] # white
# # change2_from = list(colors[1][1])[::-1] # pole1
# # change_to = list(colors[-1][1])[::-1] # pole0
# # printj.red(change1_from)
# # printj.red(change2_from)
# # printj.red(change_to)
# mask1 = create_mask(img, change1_from)
# img2[mask1==255]=change_to
# mask2 = create_mask(img, change2_from)
# img2[mask2==255]=change_to
# cv2.imshow('img', img)
# cv2.waitKey(111111)
# cv2.imshow('img2', img2)
# cv2.waitKey(11111)
# x()
# cv2.imwrite(is_path, img2)
# Fix Naming Convention
for ann_obj in frame.ndds_ann.objects:
# printj.yellow.on_black(ann_obj)
# all_keys = set().union(*(d for d in ann_obj))
# printj.yellow.bold_on_black(all_keys)
# raise Exception
if ann_obj.class_name.startswith('tropicana'):
obj_type, obj_name = 'seg', 'tropicana'
instance_name = '0' #ann_obj.class_name #.replace('hook', '')
ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'
# printj.yellow( ann_obj.class_name)
else:
logger.error(f'ann_obj.class_name: {ann_obj.class_name}')
# raise Exception
# Delete Duplicate Objects
frame.ndds_ann.objects.delete_duplicates(verbose=True, verbose_ref=frame.img_path)
# ndds_dataset.save_to_path(save_path=f'{coco_data_dir}/hook_fixed_ndds.json', overwrite=True)
# Convert To COCO Dataset
dataset = COCO_Dataset.from_ndds(
ndds_dataset=ndds_dataset,
# categories=COCO_Category_Handler.load_from_path(f'/home/jitesh/3d/data/categories/hook_7ckpt.json'),
categories=COCO_Category_Handler.load_from_path(f'/home/jitesh/3d/data/categories/tropicana.json'),
naming_rule='type_object_instance_contained',
ignore_unspecified_categories=True,
show_pbar=True,
bbox_area_threshold=1,
default_visibility_threshold=-1,
allow_unfound_seg=True,
)
make_dir_if_not_exists(coco_data_dir)
img_path = f'{coco_data_dir}/img'
make_dir_if_not_exists(coco_data_dir)
ann_dir = f'{coco_data_dir}/json'
make_dir_if_not_exists(ann_dir)
dataset.move_images(
dst_img_dir=img_path,
preserve_filenames=False,
update_img_paths=True,
overwrite=True,
show_pbar=True
)
# if not os.path.exists(coco_data_dir):
# os.makedirs(coco_data_dir)
key='tropicana'
dataset.save_to_path(f'{ann_dir}/{key}.json', overwrite=True)
# new_dataset = d2p.run(ann_dir)
# new_dataset.display_preview(show_details=True)
# new_dataset = cook(img_path, f'{ann_dir}/new-hook.json', key_num=11)
# new_dataset = cook(img_path, f'{ann_dir}/{key}.json', key_num=7)
vis_data.complete(
# img_dir=f'{coco_data_dir}',
img_dir=f'{img_path}',
json_path=f'{ann_dir}/{key}.json',
show_image=True,
show_video=True,
show_seg=True,
)
```
#### File: jaitool/aug/augment.py
```python
import albumentations as A
import printj, cv2
# from pyjeasy.image_utils import show_image
# def flatten(t): return [item for sublist in t for item in sublist]
# def get_ann(img, mask):
# ret, thresh = cv2.threshold(mask, 127, 255, 0)
# contours, hierarchy = cv2.findContours(
# thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# seg = [flatten(flatten(c)) for c in contours]
# x = [c[0][0] for c in flatten(contours)]
# y = [c[0][1] for c in flatten(contours)]
# xmin = min(x)
# ymin = min(y)
# xmax = max(x)
# ymax = max(y)
# bbox = [xmin, ymin, xmax, ymax]
# cv2.rectangle(img, (xmin, ymin), (xmax, ymax), [222, 111, 222], 2)
# for xi, yi in zip(x, y):
# img = cv2.circle(img, (xi, yi), radius=1,
# color=(0, 0, 255), thickness=-1)
# cv2.fillPoly(img, pts=contours, color=(11, 255, 11))
# return img, seg, bbox
def get_augmentation(save_path=None, load_path=None):
if load_path is not None:
aug_seq=A.load(load_path)
# printj.red.bold_on_green("00000000000000000000000000000000")
# img1 = cv2.imread(
# "/home/jitesh/prj/belt-hook/data/training_data/8/coco_data_out/rgb_0010.png")
# mask1 = cv2.imread(
# "/home/jitesh/prj/belt-hook/data/training_data/8/coco_data_mask/rgb_0010.png", 0)
# print(img1.shape)
# print(mask1.shape)
# oo=cv2.hconcat([img1, cv2.cvtColor(mask1.copy(),cv2.COLOR_GRAY2RGB)])
# get_ann(img1.copy(), mask1.copy())
# for i in range(50):
# a = aug_seq(image=img1.copy(), mask=mask1.copy())
# img2 = a["image"]
# mask2 = a["mask"]
# img, seg, bbox = get_ann(img2, mask2)
# ooo=cv2.hconcat([img2, cv2.cvtColor(mask2.copy(),cv2.COLOR_GRAY2RGB)])
# o=cv2.vconcat([oo, ooo])
# show_image(o, "", 1111)
return aug_seq
else:
# print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
return
aug_seq1 = A.OneOf([
A.Rotate(limit=(-90, 90), p=1.0),
A.Flip(p=1.0),
A.OpticalDistortion(always_apply=False, p=1.0, distort_limit=(-0.3, 0.3),
shift_limit=(-0.05, 0.05), interpolation=3,
border_mode=3, value=(0, 0, 0), mask_value=None),
], p=1.0)
aug_seq2 = A.OneOf([
# A.ChannelDropout(always_apply=False, p=1.0, channel_drop_range=(1, 1), fill_value=0),
A.RGBShift(r_shift_limit=15, g_shift_limit=15,
b_shift_limit=15, p=1.0),
A.RandomBrightnessContrast(always_apply=False, p=1.0, brightness_limit=(
-0.2, 0.2), contrast_limit=(-0.2, 0.2), brightness_by_max=True)
], p=1.0)
aug_seq3 = A.OneOf([
A.GaussNoise(always_apply=False, p=1.0, var_limit=(10, 50)),
A.ISONoise(always_apply=False, p=1.0, intensity=(
0.1, 1.0), color_shift=(0.01, 0.3)),
A.MultiplicativeNoise(always_apply=False, p=1.0, multiplier=(
0.8, 1.6), per_channel=True, elementwise=True),
], p=1.0)
aug_seq4 = A.OneOf([
A.Equalize(always_apply=False, p=1.0,
mode='pil', by_channels=True),
A.InvertImg(always_apply=False, p=1.0),
A.MotionBlur(always_apply=False, p=1.0, blur_limit=(3, 7)),
A.RandomFog(always_apply=False, p=1.0,
fog_coef_lower=0.01, fog_coef_upper=0.2, alpha_coef=0.2)
], p=1.0)
aug_seq = A.Compose([
# A.Resize(self.img_size, self.img_size),
aug_seq1,
aug_seq2,
aug_seq3,
aug_seq4,
# A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
# aug_path = '/home/jitesh/prj/classification/test/bolt/aug/aug_seq.json'
if save_path:
A.save(aug_seq, save_path)
# loaded_transform = A.load(aug_path)
return aug_seq
if __name__ == "__main__":
get_augmentation(
save_path="/home/jitesh/prj/SekisuiProjects/test/gosar/bolt/training_scripts/aug_seq2.json",
load_path=None)
``` |
{
"source": "Jitesh17/kaggle",
"score": 3
} |
#### File: kaggle/rock-paper-scissors/my_agent2 copy 2.py
```python
import random
import numpy as np
import xgboost as xgb
from sklearn.metrics import accuracy_score
from xgboost.sklearn import XGBClassifier
action_list = np.empty((0, 0), dtype=int)
observation_list = np.empty((0, 0), dtype=int)
result_list = np.empty((0, 0), dtype=int)
params_xgb = {
"objective": "multi:softmax",
"num_class": 3,
"eval_metric": "merror",
"max_depth": 5,
"eta": 0.08,
"tree_method": "exact"
}
def i_win(me, you):
return int((me - you + 4) % 3) - 1
# for i in range(3):
# text = ""
# for j in range(3):
# text += f'{i_win(i, j)} '
# print(f'{text}')
def Agent(observation, configuration):
global action_list, observation_list, result_list
if observation.step == 0:
action = random.randint(0, 2)
action_list = np.append(action_list, action)
return action
# if observation.step == 1:
if observation.step == 1:
observation_list = np.append(observation_list, observation.lastOpponentAction)
result_list = np.append(result_list,
i_win(action_list[-1], observation.lastOpponentAction))
action = random.randint(0, 2)
action_list = np.append(action_list, action)
return action
# if observation.step <20:
# observation_list = np.append(observation_list, observation.lastOpponentAction)
# result_list = np.append(result_list,
# result_list[-1]+i_win(action_list[-1], observation.lastOpponentAction))
# action = random.randint(0, 2)
# action_list = np.append(action_list, action)
# return action
observation_list = np.append(observation_list, observation.lastOpponentAction)
result_list = np.append(result_list, result_list[-1]+i_win(action_list[-1], observation.lastOpponentAction))
if observation.step < 50:
start_from = 0
else:
start_from = -1*random.randint(16, 20)
X_train = np.vstack([action_list[start_from:-1],
observation_list[start_from:-1],
# result_list[start_from:-1]
]).T
y_train = np.roll(observation_list, -1)[start_from:-1].T
d_train = xgb.DMatrix(X_train, label=y_train)
model = xgb.train(params=params_xgb,
dtrain=d_train,
num_boost_round=30,
verbose_eval=0,
evals=[(d_train, "train")])
pred_train = model.predict(d_train, ntree_limit=model.best_ntree_limit)
score = accuracy_score(pred_train, y_train)
if score > 0.33:
last_data = np.array(
[action_list[-1], observation_list[-1]]).reshape(1, -1)
# [action_list[-1], observation_list[-1], result_list[-1]])
# X_test = np.array([[my_actions[-1], observation.lastOpponentAction]])
d_test = xgb.DMatrix(last_data)
pred_obs = model.predict(d_test, ntree_limit=model.best_ntree_limit)
action = int((pred_obs + 1) % 3)
else:
action = random.randint(0, 2)
# model = XGBClassifier(
# learning_rate=0.01,
# n_estimators=30,
# nthread=4,
# use_label_encoder=False)
# model.fit(X_train, y_train)
# expected_observation = model.predict(last_data.reshape(1, -1))
# if sum(result_list) < -3:
# if result_list[-1] < -3:
# if random.randint(0, 1):
# action = int((expected_observation - 1) % 3)
# else:
# action = expected_observation
# else:
# action = int((expected_observation + 1) % 3)
# action = int((expected_observation + 1) % 3)
# action = 2
action_list = np.append(action_list, action)
return action
``` |
{
"source": "Jitesh17/learn_rnn",
"score": 3
} |
#### File: Jitesh17/learn_rnn/gym_tutorial.py
```python
import gym
import numpy as np
class rl_memory(object):
"""Data storage and batch retrieval class for DQN"""
def __init__(self, capacity, batch_size, seed):
self.capacity = capacity
self.states = np.zeros((self.capacity, 4))
self.actions = np.zeros(self.capacity, dtype=np.int)
self.rewards = np.zeros(self.capacity)
self.next_states = np.zeros((self.capacity, 4))
self.current = 0
def add(self, state, action, reward, next_state):
self.states[self.current] = state
self.actions[self.current] = action
self.rewards[self.current] = reward
self.next_states[self.current] = next_state
self.current = (self.current + 1) % self.capacity
def get_batch(self, batch_size):
indexes = np.random.choice(min(self.capacity, self.current,
batch_size, replace=False)
return (self.states[indexes], self.actions[indexes],
self.rewards[indexes], self.next_states[indexes])
env = gym.make('CartPole-v0')
env_vis = []
for i_episode in range(20):
observation = env.reset()
for t in range(100):
env_vis.append(env.render(mode = 'rgb_array'))
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
env.close()
``` |
{
"source": "Jitesh17/ortools_projects",
"score": 3
} |
#### File: ortools_projects/src/scheduler.py
```python
from __future__ import annotations
import os
import sys
import numpy as np
import openpyxl
import pandas as pd
import printj
from ortools.sat.python import cp_model
from printj import ColorText as ct
# from typing import Union
class TimeVar:
def __init__(self, hours: int, minutes: int):
while minutes > 60:
minutes -= 60
hours += 1
self.hours = hours
self.minutes = minutes
self.time_str = f'{hours}:{minutes}'
def __str__(self):
return self.time_str
def __add__(self, added_time: TimeVar):
return TimeVar(self.hours + added_time.hours, self.minutes + added_time.minutes)
@classmethod
def by_string(cls, time: str):
time_split_hour_min = time.split(":")
hours = int(time_split_hour_min[0])
minutes = int(time_split_hour_min[1])
return cls(hours, minutes)
# # function to get unique values
# def unique(list1):
# # insert the list to the set
# list_set = set(list1)
# # convert the set to the list
# unique_list = (list(list_set))
# # for x in unique_list:
# # print x,
# return unique_list
class Scheduler:
def __init__(self) -> None:
# pass
self.input_data_package = None
self.input_data_worker = None
self.input_data_location = None
self.time_shifts = None
self.num_vehicles = None
def __call__(self, input_data_package, input_data_worker, input_data_location,
time_shifts,
num_vehicles: int = 4, ):
self.input_data_package = input_data_package
self.input_data_worker = input_data_worker
self.input_data_location = input_data_location
self.time_shifts = time_shifts
self.num_vehicles = num_vehicles
self.input_data_package.dropna(subset = ["package"], inplace=True)
self.input_data_package.dropna(axis=1, how='all')
self.input_data_package_orig, self.input_data_worker_orig, self.input_data_location_orig = self.input_data_package.copy(
), self.input_data_worker.copy(), self.input_data_location.copy()
def solution_printer(self):
alphabets = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"""
data = []
for p in range(num_packages):
# print('Package %i' % p)
data_i = []
for s in range(num_shifts):
s_val = ct.white('0 ')
for w in range(num_workers):
is_working = False
for v in range(num_vehicles):
if solver.Value(self.shifts[(w, p, v, s)]) == 1:
is_working = True
# print(' Worker %i works shift %i' % (w, s))
text_worker = ct.green(
f'Worker {alphabets[w]}')
# text_shift = ct.purple(f'shift {["9:00", "10:00", "11:00", "12:00", ][s]}')
text_shift = ct.purple(f'shift {time_shifts[s]}')
# text_shift = ct.purple(f'shift {s}')
text_package = ct.cyan(f'package-{p}')
text_vehicle = ct.yellow(
f'vehicle {v+1}')
# text_keiro = ct.yellow(
# f'keiro {["Main2", "Main1", "SUB", ][v]}')
# if p in [2, 4]:
# print(
# f' {text_worker} at {text_shift} moves {text_package} using {text_vehicle}')
s_val = ct.green(f'{alphabets[w]}{v+1} ')
data_i.append(s_val)
data.append(data_i)
# data = pd.DataFrame(data, columns=time_shifts)
data = pd.DataFrame(data, columns=[ct.yellow(f' {s}') for s in time_shifts])
"""
data = []
data_moved = []
for p in range(self.num_packages):
# print('Package %i' % p)
num_packages_moved = 0
data_i = []
for s in range(self.num_shifts):
s_val = '0 '
for w in range(self.num_workers):
is_working = False
for v in range(self.num_vehicles):
# print("self.solver.Value(self.shifts[(w, p, v, s)])", self.solver.Value(self.shifts[(w, p, v, s)]))
if self.solver.Value(self.shifts[(w, p, v, s)]) == 1:
# is_working = True
# # print(' Worker %i works shift %i' % (w, s))
# text_worker = f'Worker {alphabets[w]}'
# # text_shift = ct.purple(f'shift {["9:00", "10:00", "11:00", "12:00", ][s]}')
# text_shift = f'shift {self.time_shifts[s]}'
# # text_shift = ct.purple(f'shift {s}')
# text_package = f'package-{p}'
# text_vehicle = f'vehicle {v+1}'
# # text_keiro = ct.yellow(
# # f'keiro {["Main2", "Main1", "SUB", ][v]}')
# # if p in [2, 4]:
# # print(
# # f' {text_worker} at {text_shift} moves {text_package} using {text_vehicle}')
s_val = f'{alphabets[w]}{v+1} '
num_packages_moved += 1
data_i.append(s_val)
data.append(data_i)
data_moved.append([
num_packages_moved,
self.input_data_package.quantity[p] - num_packages_moved,
self.input_data_package.yesterday[p] + num_packages_moved - self.input_data_package.decay[p]*self.num_shifts])
# data = pd.DataFrame(data, columns=time_shifts)
data = pd.DataFrame(data, columns=[f' {s}' for s in self.time_shifts])
data_moved = pd.DataFrame(data_moved, columns=['moved', 'not_moved', 'q_at_destination'])
# print(data_moved)
self.input_data_package_orig = pd.concat([
self.input_data_package_orig[['package', 'quantity', 'decay',
'location', 'vehicle', 'next', 'yesterday']],
data,
data_moved], axis=1).copy()
# data.index = [f'Package-{p}' for p in range(self.num_packages)]
# self.data = self.data.reset_index(drop=True)
# self.data.dropna(axis=1, how='any')
self.data = self.input_data_package_orig.copy()
# print()
print(self.data)
return self.data
def solution_writer(self):
output_path = 'test/xl.xlsx'
print()
print(self.input_data_package_orig)
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(output_path, engine='xlsxwriter')
# Write each dataframe to a different worksheet.
self.input_data_package_orig.to_excel(
writer, sheet_name='Sheet_package', index=False)
self.input_data_worker_orig.to_excel(writer, sheet_name='Sheet_worker', index=False)
self.input_data_location_orig.to_excel(
writer, sheet_name='Sheet_location', index=False)
# output_data.to_excel(writer, sheet_name='Sheet_schedule')
writer.save()
def run(self):
# Data.
# package_to_table = [
# [1, 0, 0, 0, 0, 0],
# [1, 1, 0, 0, 0, 0],
# [0, 0, 1, 1, 0, 0],
# [0, 0, 0, 0, 1, 0],
# [0, 0, 0, 1, 0, 1],
# [1, 1, 1, 1, 1, 1],
# ]
# workers_to_table = [
# [1, 1, 1, 1, 0, 1],
# [1, 1, 1, 1, 1, 0],
# [1, 1, 1, 1, 0, 1],
# [1, 1, 1, 1, 1, 0],
# ]
printj.yellow('::::::::::::::::::: preprocess :::::::::::::::::::')
print(self.input_data_package)
print(self.input_data_package.dtypes)
if isinstance(self.input_data_package.vehicle[0], str):
self.input_data_package.vehicle = [
[int(i) for i in v.split(",")] for v in self.input_data_package.vehicle]
self.input_data_package.next = [v if isinstance(
v, int) else None for v in self.input_data_package.next]
if isinstance(self.input_data_worker.location[0], str):
self.input_data_worker.location = [
[int(i) for i in v.split(",")] for v in self.input_data_worker.location]
self.input_data_worker.vehicle = [
[int(i) for i in v.split(",")] for v in self.input_data_worker.vehicle]
self.num_locations = len(self.input_data_location.location)
# package_to_location = pd.crosstab(
# index=self.input_data_package['package'], columns=self.input_data_package['location']).to_numpy()
package_to_location = pd.DataFrame({p: [1 if l in [location_list] else 0 for l in range(self.num_locations)]
for p, location_list in enumerate(self.input_data_package.location)}).T.to_numpy() # num_location
package_to_vehicle = pd.DataFrame({p: [1 if (v+1) in vehicles_list else 0 for v in range(self.num_vehicles)]
for p, vehicles_list in enumerate(self.input_data_package.vehicle)}).T.to_numpy() # num_vehicle = 4
worker_to_vehicle = pd.DataFrame({p: [1 if (v+1) in vehicles_list else 0 for v in range(self.num_vehicles)]
for p, vehicles_list in enumerate(self.input_data_worker.vehicle)}).T.to_numpy() # num_vehicle = 4
location_to_worker = pd.DataFrame({p: [1 if v in worker_list else 0 for v in range(
self.num_locations)] for p, worker_list in enumerate(self.input_data_worker.location)}).to_numpy() # num_keiro = 6
package_orders = [[i, int(next_i)] for (i, next_i) in zip(
self.input_data_package.package, self.input_data_package.next) if pd.notna(next_i)]
print("package_to_vehicle\n", package_to_vehicle)
print("worker_to_vehicle\n", worker_to_vehicle)
print("package_to_location\n", package_to_location)
print("location_to_worker\n", location_to_worker)
print("package_orders\n", package_orders)
print()
print()
# print(package_to_location.to_numpy())
# sys.exit()
# package_orders = [[0, 1], [1, 2], ]
# main2, main1, sub
# package_to_vehicle = np.array([
# [1, 1, 1, 1],
# [1, 0, 0, 0],
# [1, 0, 0, 0],
# [0, 1, 1, 0],
# [0, 0, 1, 1],
# [0, 0, 1, 1],
# ])
# package_to_location = np.array([
# [1, 0, 0],
# [1, 0, 0],
# [1, 0, 0],
# [0, 1, 0],
# [1, 0, 0],
# [0, 0, 1],
# ])
# workers_to_keiro = np.array([
# [1, 0, 1],
# [1, 1, 0],
# [1, 0, 1],
# [1, 1, 0],
# ])
self.num_workers = len(self.input_data_worker.worker) # 4
self.num_packages = len(self.input_data_package.package) # 5
self.num_shifts = len(self.time_shifts)
# num_tables = 6
all_workers = range(self.num_workers)
all_packages = range(self.num_packages)
all_shifts = range(self.num_shifts)
all_vehicles = range(self.num_vehicles)
all_locations = range(self.num_locations)
# print(all_vehicles)
print(
f'\nNo. of package {self.num_packages}, No. of workers {self.num_workers}')
alphabets = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"""
available_workers_per_package = []
for i, item in enumerate(package_to_vehicle):
available_workers_list = []
for j, table in enumerate(item):
if table == 1:
available_workers_list += [k for k in range(len(workers_to_keiro)) if workers_to_keiro[k][j]==1]
available_workers_list = unique(available_workers_list)
print(f'Package-{i} can be moved by workers {"".join(alphabets[l] for l in available_workers_list)}')
available_workers_per_package.append(available_workers_list)
print(available_workers_per_package)
print(np.array(available_workers_per_package))
"""
# package_to_worker = np.matmul(package_to_location, workers_to_keiro.T)
# print(package_to_location.shape, location_to_worker.shape)
package_to_worker = np.matmul(package_to_location, location_to_worker)
available_workers_per_package = [
[i for i, ll in enumerate(l) if ll == 1] for l in package_to_worker]
available_vehicles_per_package = [
[i for i, ll in enumerate(l) if ll == 1] for l in package_to_vehicle]
available_packages_per_location = [
[i for i, ll in enumerate(l) if ll == 1] for l in package_to_location.T]
available_vehicles_per_worker = [
[i for i, ll in enumerate(l) if ll == 1] for l in worker_to_vehicle]
# print()
# for p, item in enumerate(available_workers_per_package):
# text_worker = ct.green(
# f'workers {"".join(alphabets[l] for l in item)}')
# text_package = ct.cyan(f'Package-{p}')
# print(f'{text_package} can be moved by {text_worker}')
print()
for w, item in enumerate(available_vehicles_per_worker):
text_vehicle = ct.green(
f'vehicle {", ".join(f"{l+1}" for l in item)}')
text_worker = ct.cyan(f'worker {alphabets[w]}')
print(f'{text_worker} can use {text_vehicle}')
print()
# for p, item in enumerate(available_vehicles_per_package):
# text_vehicle = ct.yellow(
# f'vehicle {" ".join(["Main2", "Main1", "SUB", ][l] for l in item)}')
# text_package = ct.cyan(f'Package-{p}')
# print(f'{text_package} can be moved to {text_vehicle}')
# print()
for p, (workers, vehicles) in enumerate(zip(available_workers_per_package, available_vehicles_per_package)):
text_worker = ct.green(
f'workers {", ".join(alphabets[l] for l in workers)}')
text_vehicle = ct.yellow(
f'vehicle {", ".join(str(v) for v in vehicles)}')
text_package = ct.cyan(f'Package-{p}')
print(f'{text_package} can be moved by \t{text_worker}\tusing {text_vehicle}')
print()
for l, item in enumerate(available_packages_per_location):
text_package = ct.cyan(f'package {", ".join(f"{i}" for i in item)}')
text_location = ct.green(
f'location {l}')
print(f'{text_location} carries {text_package}')
print()
# vehicle_to_worker = np.matmul(package_to_vehicle.T, package_to_worker)
# sys.exit()
# Creates the model.
model = cp_model.CpModel()
# Creates shift variables.
# self.shifts[(w, p, v, s)]: nurse 'n' works shift 's' on package 'd'.
self.shifts = {}
for w in all_workers:
for p in all_packages:
for v in all_vehicles:
for s in all_shifts:
self.shifts[(w, p, v, s)] = model.NewBoolVar(
'shift_w%ip%iv%is%i' % (w, p, v, s))
package_quantity = 1
for pi, p in enumerate(all_packages):
package_quantity = self.input_data_package.quantity[pi]
# package_quantity = min(package_quantity, )
# 1 worker needed per package
model.Add(sum(sum(sum(self.shifts[(w, p, v, s)] for v in all_vehicles)
for s in all_shifts) for w in all_workers) <= package_quantity)
# 1 available worker per package
model.Add(sum(sum(sum(self.shifts[(w, p, v, s)] for v in all_vehicles)
for s in all_shifts) for w in available_workers_per_package[p]) <= package_quantity)
# 1 available vehicle per package
model.Add(sum(sum(sum(self.shifts[(w, p, v, s)] for w in all_workers)
for s in all_shifts) for v in available_vehicles_per_package[p]) <= package_quantity)
for s in all_shifts:
model.Add(sum(sum(self.shifts[(w, p, v, s)]
for v in all_vehicles) for w in all_workers) <= 1)
# Capacity constraints
# location_filled = dict.fromkeys(self.input_data_location.location, 0)
for l in all_locations:
# total_quantity = sum(self.input_data_package.quantity[p] for p in available_packages_per_location[l])
# print(total_quantity)
# location_filled[l] += sum(sum(sum(sum(self.shifts[(w, p, v, s)]for v in all_vehicles) for s in all_shifts) for w in all_workers) for p in available_packages_per_location[l])
capacity = self.input_data_location.capacity[l]
# decay = self.input_data_location.decay[l]
# current_empty_space = capacity # 10 = 3 nimotsu + 7 empty_space = 2 nimotsu + 8 empty_space = 10 empty_space
# empty_space = max(current_empty_space + decay*1, capacity) # using max: empty space can't be more than the capacity of the shelf/location
# empty_space = min(total_quantity, empty_space) # using min:
# model.Add(location_filled[l]==empty_space)
for si in all_shifts:
for p in available_packages_per_location[l]:
constant = 4 # Use/ change when decay is a fraction like 0.5
decay = self.input_data_package.decay[p]*constant
# sum_package = sum(sum(sum(sum(self.shifts[(w, p, v, s)]for v in all_vehicles) for w in all_workers) for s in range(si+1)))
sum_package = sum(sum(sum(self.shifts[(
w, p, v, s)]for v in all_vehicles) for w in all_workers) for s in range(si+1))
sum_package += self.input_data_package.yesterday[p]
model.Add(sum_package*constant-int(decay)*(si+1) <= capacity*constant)
model.Add(sum_package-decay*(si+1)*constant >= 0)
# print(capacity, sum_package, decay*(si+1))
# print()
# 1 W, V, S for 1 package
for s in all_shifts:
for w in all_workers:
for v in all_vehicles:
model.Add(sum(self.shifts[(w, p, v, s)] for p in all_packages) <= 1)
printj.red(f'all_vehicles: {list(all_vehicles)}')
printj.red(
f'available_vehicles_per_worker: {available_vehicles_per_worker}')
for w in all_workers:
for v in all_vehicles:
# 1 available vehicle per worker
if v in available_vehicles_per_worker[w]:
model.Add(sum(sum(self.shifts[(w, p, v, s)] for p in all_packages)
for s in all_shifts) >= 0)
else:
model.Add(sum(sum(self.shifts[(w, p, v, s)] for p in all_packages)
for s in all_shifts) == 0)
# package_order # s(p=2) < s(p=4)
for package_order in package_orders:
shift_before = 0
for s in all_shifts:
for w in all_workers:
for v in all_vehicles:
# s = {0, 1, 2, 3}
shift_before += self.shifts[(w, package_order[0], v, s)]
shift_after = 0
# for s2 in range(s, num_shifts):
for s2 in range(s+2):
if s2 < self.num_shifts:
for w2 in all_workers:
for v2 in all_vehicles:
# (4 - {0, 1, 2, 3})
shift_after += self.shifts[(w2,
package_order[1], v2, s2)]
# model.Add(shift_before <= shift_after)
model.Add(shift_before == shift_after).OnlyEnforceIf(
self.shifts[(w, package_order[0], v, s)])
model.Add(shift_before == shift_after).OnlyEnforceIf(
self.shifts[(w, package_order[1], v, s)])
# # pylint: disable=g-complex-comprehension
objective = sum(sum(sum(sum(sum(self.shifts[(w, p, v, s)] for v in all_vehicles) for w in all_workers) for s in range(si+1)) for p in all_packages) for s in all_shifts)
model.Maximize(objective)
printj.yellow('::::::::::::::::::::: Output :::::::::::::::::::::')
# Creates the solver and solve.
self.solver = cp_model.CpSolver()
self.status = self.solver.Solve(model)
# if self.status == cp_model.OPTIMAL:
# self.output_data = self.solution_printer()
# self.solution_writer()
# else:
# print("No solutions")
# Statistics.
print()
print('Statistics')
# print(' - Number of shift requests met = %i' % solver.ObjectiveValue(),
# '(out of', num_nurses * min_shifts_per_nurse, ')')
print(' - wall time : %f s' % self.solver.WallTime())
# assert solution_printer.solution_count() == 5
def bool2int(x):
y = 0
for i, j in enumerate(x):
y += j << i
return y
def main(path = "test/xl.xlsx"):
printj.yellow('::::::::::::::::::::: Input :::::::::::::::::::::')
path = "test/xl.xlsx"
input_data_package = pd.read_excel(open(path, 'rb'),
sheet_name='Sheet_package')
input_data_worker = pd.read_excel(open(path, 'rb'),
sheet_name='Sheet_worker')
input_data_location = pd.read_excel(open(path, 'rb'),
sheet_name='Sheet_location')
num_vehicles = 4
num_shifts = 20
time_shifts = [TimeVar(6, 30) + TimeVar(0, 20*i)
for i in range(num_shifts)]
"""
input_data_package = pd.DataFrame({
"package": [0, 1],
"quantity": [2, 2],
"location": [0, 0],
"vehicle": [[1, 2, 3, 4], [1]],
# "next": [None, 2, 3, 4, 5, None],
"next": [None, None], # Only work if the quantity is same,
"yesterday": [1, 2],
})
input_data_worker = pd.DataFrame({
"worker": list("ABCD"),
"location": [[0, 2], [0, 1], [0, 2], [0, 1]],
"vehicle": [[1 ], [1, 2, 3, 4], [1], [1, 2, 3, 4]],
})
input_data_location = pd.DataFrame({
"location": list(range(3)),
# "decay": [1, 1, 1], # per shift
"capacity": [4, 3, 3],
}) # 4 - num_pack_loc0_shift + f(decay*(0, shift))
"""
"""
input_data_package = pd.DataFrame({
"package": [0, 1, 2, 3, 4, 5],
"quantity": [20000, 20000, 20000, 20000, 20000, 200000],
"decay": [1, 1, 1, 1, 1, 1],
"location": [0, 0, 0, 1, 0, 2],
"vehicle": [[1, 2, 3, 4], [1], [1], [2, 3], [3, 4], [1]],
# "next": [None, 2, 3, 4, 5, None],
"next": [None, None, None, None, None, None], # Only work if the quantity is same,
"yesterday": [0, 0, 0, 0, 0, 0],
})
input_data_worker = pd.DataFrame({
"worker": list("ABCD"),
"location": [[0, 2], [0, 1], [0, 2], [0, 1]],
"vehicle": [[1 ], [1, 2, 3, 4], [1], [1, 2, 3, 4]],
})
input_data_location = pd.DataFrame({
"location": list(range(3)),
# "decay": [1, 1, 1], # per shift
"capacity": [10, 10, 10],
}) # 4 - num_pack_loc0_shift + f(decay*(0, shift))
"""
"""
input_data_package = pd.DataFrame({
"package": [0],
"quantity": [6],
"location": [0],
"vehicle": [[1, 2, 3, 4]],
# "next": [None, 2, 3, 4, 5, None],
"next": [None], # Only work if the quantity is same
"yesterday": [1],
})
input_data_worker = pd.DataFrame({
"worker": list("ABCD"),
"location": [[0], [0], [0], [0]],
"vehicle": [[1 ], [1, 2, 3, 4], [1], [1, 2, 3, 4]],
})
input_data_location = pd.DataFrame({
"location": list(range(1)),
# "decay_rate": [1, 1, 1], # per shift
"capacity": [1],
}) # 4 - num_pack_loc0_shift + f(decay*(0, shift))
"""
# """
print(input_data_package)
print(input_data_worker)
print(input_data_location)
print()
scheduler = Scheduler(input_data_package, input_data_worker, input_data_location,
time_shifts,
num_vehicles)
scheduler.run()
if scheduler.status == cp_model.OPTIMAL:
scheduler.output_data = scheduler.solution_printer()
scheduler.solution_writer()
else:
print("No solutions")
# """
"""
wb = openpyxl.Workbook()
sheet = wb.active
sheet_title = sheet.title
wb.save(path)
print("active sheet title: " + sheet_title)
"""
if __name__ == '__main__':
main(path = "test/xl.xlsx")
```
#### File: ortools_projects/test/t1.py
```python
from __future__ import annotations
# from typing import Union
class TimeVar:
def __init__(self, hours:int, minutes:int):
while minutes > 60:
minutes -= 60
hours += 1
self.hours = hours
self.minutes = minutes
self.time_str = f'{hours}:{minutes}'
def __str__(self):
return self.time_str
def __add__(self, added_time:TimeVar):
hours = self.hours + added_time.hours
minutes = self.minutes + added_time.minutes
return TimeVar(self.hours + added_time.hours, self.minutes + added_time.minutes)
@classmethod
def by_string(cls, time:str):
time_split_hour_min = time.split(":")
hours = int(time_split_hour_min[0])
minutes = int(time_split_hour_min[1])
return cls(hours, minutes)
# t = TimeVar.by_string("8:30")
# r = TimeVar(2, 40)
# e = t + r
# print(f'{t} + {r} = {e}')
print(list("ABCD"))
print([(i , j) for i in range(3) for j in range(2)])
``` |
{
"source": "jitesh1922/HITR",
"score": 2
} |
#### File: jitesh1922/HITR/Stats.py
```python
from CollectionLM import CollectionLM
from DocumentLM import DocumentLM
from ParsimoniousLM import ParsimoniousLM
from DR import DR
from TM import TM
from TR import TR
from TAR import TAR
import logging
import os
import gensim
from operator import truediv
import pickle
logger = logging.getLogger(__name__)
# we want to log the process
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
class Stats(object):
def __init__(self, corpus, dictionary, outPutPath):
#logger.info("Running HiTR")
#self.documentsPath = documentsPath
self.corpus = corpus
#self.ldaPath = ldaPath
self.outPutPath = outPutPath
#self.numTopics = numTopics
#self.mu = mu
#self.threshold = threshold
#self.numIteration = numIteration
self.dictionary = dictionary
def calcStats(self):
logger.info("Running DR")
mu = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
#mu = [0.8]
threshold = [0.001, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2]
stats = {}
vocabFile = open(os.path.join(self.outPutPath, "docVocabSizes-all.txt"), 'w')
typeTokenFile = open(os.path.join(self.outPutPath, "docTypeTokenRatios-all.txt"), 'w')
p_movedFile = open(os.path.join(self.outPutPath, "p_moved-all.txt"), 'w')
for m in mu:
stats[m] = {}
for th in threshold:
if os.path.isfile("mtsamples-tmp.mm"):
os.remove("mtsamples-tmp.mm")
if os.path.isfile("mtsamples-tmp.mm.index"):
os.remove("mtsamples-tmp.mm.index")
modelPath = "/Users/admin/Downloads/20_newsgroups/Preprocessed-lemmas-shortened-models"
dictionary = gensim.corpora.Dictionary.load(os.path.join(modelPath,"mtsamples.dict"))
corpus = gensim.corpora.MmCorpus(os.path.join(modelPath,"mtsamples.mm"))
print "DR for: " + str(m) + " " + str(th)
dr = DR(corpus, dictionary, m, th, 20)
dr.runDR()
vocabSize = self.calcVocabSize(dr.corpus)
avgDocVocabSize, docVocabSizes = self.calcDocVocabSize(dr.corpus)
avgTypeTokenRatio, docTypeTokenRatios = self.calcTypeTokenRatio(dr.corpus)
avgp_moved, p_moved = self.calcp_moved(corpus, dr.corpus)
print str(avgDocVocabSize) + " " + str(avgTypeTokenRatio) + " " + str(avgp_moved)
del dr
line = ""
line += str(m) + " " + str(th) + " "
for l in docVocabSizes:
line += str(l) + " "
vocabFile.write(line + "\n")
line = ""
line += str(m) + " " + str(th) + " "
for l in docTypeTokenRatios:
line += str(l) + " "
typeTokenFile.write(line + "\n")
line = ""
line += str(m) + " " + str(th) + " "
for l in p_moved:
line += str(l) + " "
p_movedFile.write(line + "\n")
stats[m][th] = []
stats[m][th].append(vocabSize)
stats[m][th].append(avgDocVocabSize)
stats[m][th].append(avgTypeTokenRatio)
stats[m][th].append(avgp_moved)
del corpus
del dictionary
with open('res-all.txt', 'wb') as handle:
pickle.dump(stats, handle)
vocabFile.close()
typeTokenFile.close()
p_movedFile.close()
def calcVocabSize(self, corpus):
numTerms = 0
uniqueTerms = {}
for doc in corpus:
for token in doc:
if token[0] not in uniqueTerms:
uniqueTerms[token[0]] = 1
numTerms += 1
return numTerms
def calcDocVocabSize(self, corpus):
avgDocVocabSize = 0
docVocabSizes = [len(doc) for doc in corpus]
avgDocVocabSize = float(sum([len(doc) for doc in corpus])) / len(corpus)
return avgDocVocabSize, docVocabSizes
def calcTypeTokenRatio(self, corpus):
uniqueTerms = {}
corpusSize = 0
numTerms = 0
for doc in corpus:
for token in doc:
if token[0] not in uniqueTerms:
uniqueTerms[token[0]] = 1
numTerms += 1
corpusSize += sum([t[1] for t in doc])
avgTypeTokenRatio = 0
if corpusSize > 0:
avgTypeTokenRatio = float(len(uniqueTerms)) / corpusSize
#docTypeTokenRatios = map(truediv, [len(doc) for doc in corpus], [sum([t[1] for t in doc]) for doc in corpus])
a = [len(doc) for doc in corpus]
b = [sum([t[1] for t in doc]) for doc in corpus]
docTypeTokenRatios = [x/y if y else 0 for x,y in zip(a,b)]
return avgTypeTokenRatio, docTypeTokenRatios
def calcp_moved(self, corpus1, corpus2):
docId = 0
p_moved = []
for doc in corpus1:
words = {}
for token in corpus2[docId]:
words[token[0]] = token[1]
sumFreq = 0
docLen = 0
for token in doc:
docLen += token[1]
if token[0] not in words:
sumFreq += token[1]
p = 0
if docLen > 0:
p = float(sumFreq) / docLen
#p_moved.append(float(sumFreq) / docLen)
p_moved.append(p)
docId += 1
avgp_moved = sum(p_moved) / len(p_moved)
return avgp_moved, p_moved
if __name__ == "__main__":
modelPath = "/Users/admin/Downloads/20_newsgroups/Preprocessed-lemmas-shortened-models"
dictionary = gensim.corpora.Dictionary.load(os.path.join(modelPath,"mtsamples.dict"))
corpus = gensim.corpora.MmCorpus(os.path.join(modelPath,"mtsamples.mm"))
outPutPath = "/Users/admin/Downloads/20_newsgroups/stats"
stat = Stats(corpus, dictionary, outPutPath)
stat.calcStats()
``` |
{
"source": "jiteshk23/JupterWorkflow",
"score": 2
} |
#### File: jupyterworkflow/tests/test_data.py
```python
import jupyterworkflow.data as jwd
import pandas as pd
import numpy as np
def test_fremont_data():
data = jwd.get_fremont_data()
assert all(data.columns == [u'East', u'West', u'Total'])
assert isinstance(data.index, pd.DatetimeIndex)
assert len(np.unique(data.index.time)) == 24
``` |
{
"source": "jiteshm17/mcan-vqa",
"score": 2
} |
#### File: core/model/net.py
```python
from core.model.net_utils import FC, MLP, LayerNorm
from core.model.mca import MCA_ED
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
import random
from math import sqrt
from block import fusions
def sum_attention(nnet, query, value, mask=None, dropout=None, mode='1D'):
if mode == '2D':
batch, dim = query.size(0), query.size(1)
query = query.permute(0, 2, 3, 1).view(batch, -1, dim)
value = value.permute(0, 2, 3, 1).view(batch, -1, dim)
mask = mask.view(batch, 1, -1)
scores = nnet(query).transpose(-2, -1)
if mask is not None:
scores.data.masked_fill_(mask.eq(0), -65504.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
weighted = torch.matmul(p_attn, value)
return weighted, p_attn
class SummaryAttn(nn.Module):
def __init__(self, dim, num_attn, dropout, is_multi_head=False, mode='1D'):
super(SummaryAttn, self).__init__()
self.linear = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(inplace=True),
nn.Linear(dim, num_attn),
)
self.h = num_attn
self.is_multi_head = is_multi_head
self.attn = None
self.dropout = nn.Dropout(p=dropout) if dropout else None
self.mode = mode
def forward(self, query, value, mask=None):
if mask is not None:
mask = mask.unsqueeze(1)
batch = query.size(0)
weighted, self.attn = sum_attention(
self.linear, query, value, mask=mask, dropout=self.dropout, mode=self.mode)
weighted = weighted.view(
batch, -1) if self.is_multi_head else weighted.mean(dim=-2)
return weighted
class PredictLayer(nn.Module):
def __init__(self, dim1, dim2, num_attn, num_ans, dropout, dropattn=0):
super(PredictLayer, self).__init__()
self.summaries = nn.ModuleList([
SummaryAttn(dim1, num_attn, dropattn, is_multi_head=False),
SummaryAttn(dim2, num_attn, dropattn, is_multi_head=False),
])
self.predict = nn.Sequential(
nn.Linear(dim1 + dim2, (dim1 + dim2) // 2),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear((dim1 + dim2) // 2, num_ans),
)
def forward(self, data1, data2, mask1, mask2):
weighted1 = self.summaries[0](data1, data1, mask1)
weighted2 = self.summaries[1](data2, data2, mask2)
# weighted = torch.cat([weighted1, weighted2], dim=1)
mm = fusions.Tucker([2048, 1024], 3072).cuda()
proj_feat = mm([weighted1, weighted2])
# return proj_feat
# feat = self.predict(weighted)
feat = self.predict(proj_feat)
return feat
def qkv_attention(query, key, value, mask=None, dropout=None):
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / sqrt(d_k)
if mask is not None:
scores.data.masked_fill_(mask.eq(0), -65504.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class DenseCoAttn(nn.Module):
def __init__(self, dim1, dim2, num_attn, num_none, dropout, is_multi_head=False):
super(DenseCoAttn, self).__init__()
dim = min(dim1, dim2)
self.linears = nn.ModuleList([nn.Linear(dim1, dim, bias=False),
nn.Linear(dim2, dim, bias=False)])
self.nones = nn.ParameterList([nn.Parameter(nn.init.xavier_uniform_(torch.empty(num_none, dim1))),
nn.Parameter(nn.init.xavier_uniform_(torch.empty(num_none, dim2)))])
self.d_k = dim // num_attn
self.h = num_attn
self.num_none = num_none
self.is_multi_head = is_multi_head
self.attn = None
self.dropouts = nn.ModuleList(
[nn.Dropout(p=dropout) for _ in range(2)])
def forward(self, value1, value2, mask1=None, mask2=None):
batch = value1.size(0)
dim1, dim2 = value1.size(-1), value2.size(-1)
value1 = torch.cat([self.nones[0].unsqueeze(0).expand(
batch, self.num_none, dim1), value1], dim=1)
value2 = torch.cat([self.nones[1].unsqueeze(0).expand(
batch, self.num_none, dim2), value2], dim=1)
none_mask = value1.new_ones((batch, self.num_none))
if mask1 is not None:
mask1 = torch.cat([none_mask, mask1], dim=1)
mask1 = mask1.unsqueeze(1).unsqueeze(2)
if mask2 is not None:
mask2 = torch.cat([none_mask, mask2], dim=1)
mask2 = mask2.unsqueeze(1).unsqueeze(2)
query1, query2 = [l(x).view(batch, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (value1, value2))]
if self.is_multi_head:
weighted1, attn1 = qkv_attention(
query2, query1, query1, mask=mask1, dropout=self.dropouts[0])
weighted1 = weighted1.transpose(1, 2).contiguous()[
:, self.num_none:, :]
weighted2, attn2 = qkv_attention(
query1, query2, query2, mask=mask2, dropout=self.dropouts[1])
weighted2 = weighted2.transpose(1, 2).contiguous()[
:, self.num_none:, :]
else:
weighted1, attn1 = qkv_attention(query2, query1, value1.unsqueeze(1), mask=mask1,
dropout=self.dropouts[0])
weighted1 = weighted1.mean(dim=1)[:, self.num_none:, :]
weighted2, attn2 = qkv_attention(query1, query2, value2.unsqueeze(1), mask=mask2,
dropout=self.dropouts[1])
weighted2 = weighted2.mean(dim=1)[:, self.num_none:, :]
self.attn = [attn1[:, :, self.num_none:, self.num_none:],
attn2[:, :, self.num_none:, self.num_none:]]
return weighted1, weighted2
class NormalSubLayer(nn.Module):
def __init__(self, dim1, dim2, num_attn, num_none, dropout, dropattn=0):
super(NormalSubLayer, self).__init__()
self.dense_coattn = DenseCoAttn(
dim1, dim2, num_attn, num_none, dropattn)
self.linears = nn.ModuleList([
nn.Sequential(
nn.Linear(dim1 + dim2, dim1),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
),
nn.Sequential(
nn.Linear(dim1 + dim2, dim2),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
)
])
def forward(self, data1, data2, mask1, mask2):
weighted1, weighted2 = self.dense_coattn(data1, data2, mask1, mask2)
data1 = data1 + self.linears[0](torch.cat([data1, weighted2], dim=2))
data2 = data2 + self.linears[1](torch.cat([data2, weighted1], dim=2))
return data1, data2
class DCNLayer(nn.Module):
def __init__(self, dim1, dim2, num_attn, num_none, num_seq, dropout, dropattn=0):
super(DCNLayer, self).__init__()
self.dcn_layers = nn.ModuleList([NormalSubLayer(dim1, dim2, num_attn, num_none,
dropout, dropattn) for _ in range(num_seq)])
def forward(self, data1, data2, mask1, mask2):
for dense_coattn in self.dcn_layers:
data1, data2 = dense_coattn(data1, data2, mask1, mask2)
return data1, data2
class Initializer(object):
@staticmethod
def manual_seed(seed):
"""
Set all of random seed to seed.
--------------------
Arguments:
seed (int): seed number.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
@staticmethod
def xavier_normal(module, lstm_forget_bias_init=2):
"""
Xavier Gaussian initialization.
"""
lstm_forget_bias_init = float(lstm_forget_bias_init) / 2
normal_classes = (nn.Conv2d, nn.Linear, nn.Embedding)
recurrent_classes = (nn.RNN, nn.LSTM, nn.GRU)
if any([isinstance(module, cl) for cl in normal_classes]):
nn.init.xavier_normal_(
module.weight.data) if module.weight.requires_grad else None
try:
module.bias.data.fill_(
0) if module.bias.requires_grad else None
except AttributeError:
pass
elif any([isinstance(module, cl) for cl in recurrent_classes]):
for name, param in module.named_parameters():
if name.startswith("weight"):
nn.init.xavier_normal_(
param.data) if param.requires_grad else None
elif name.startswith("bias"):
if param.requires_grad:
hidden_size = param.size(0)
param.data.fill_(0)
param.data[hidden_size//4:hidden_size //
2] = lstm_forget_bias_init
@staticmethod
def xavier_uniform(module, lstm_forget_bias_init=2):
"""
Xavier Uniform initialization.
"""
lstm_forget_bias_init = float(lstm_forget_bias_init) / 2
normal_classes = (nn.Conv2d, nn.Linear, nn.Embedding)
recurrent_classes = (nn.RNN, nn.LSTM, nn.GRU)
if any([isinstance(module, cl) for cl in normal_classes]):
nn.init.xavier_uniform_(
module.weight.data) if module.weight.requires_grad else None
try:
module.bias.data.fill_(
0) if module.bias.requires_grad else None
except AttributeError:
pass
elif any([isinstance(module, cl) for cl in recurrent_classes]):
for name, param in module.named_parameters():
if name.startswith("weight"):
nn.init.xavier_uniform_(
param.data) if param.requires_grad else None
elif name.startswith("bias"):
if param.requires_grad:
hidden_size = param.size(0)
param.data.fill_(0)
param.data[hidden_size//4:hidden_size //
2] = lstm_forget_bias_init
# ------------------------------
# ---- Flatten the sequence ----
# ------------------------------
class AttFlat(nn.Module):
# mlp_model = None
def __init__(self, __C):
super(AttFlat, self).__init__()
self.__C = __C
self.mlp = MLP(
in_size=__C.HIDDEN_SIZE,
mid_size=__C.FLAT_MLP_SIZE,
out_size=__C.FLAT_GLIMPSES,
dropout_r=__C.DROPOUT_R,
use_relu=True
)
# mlp_model = self.mlp
self.linear_merge = nn.Linear(
__C.HIDDEN_SIZE * __C.FLAT_GLIMPSES,
__C.FLAT_OUT_SIZE
)
def forward(self, x, x_mask):
att = self.mlp(x)
att = att.masked_fill(
x_mask.squeeze(1).squeeze(1).unsqueeze(2),
-1e9
)
att = F.softmax(att, dim=1)
att_list = []
for i in range(self.__C.FLAT_GLIMPSES):
att_list.append(
torch.sum(att[:, :, i: i + 1] * x, dim=1)
)
x_atted = torch.cat(att_list, dim=1)
x_atted = self.linear_merge(x_atted)
return x_atted
# -------------------------
# ---- Main MCAN Model ----
# -------------------------
class Net(nn.Module):
def __init__(self, __C, pretrained_emb, token_size, answer_size):
super(Net, self).__init__()
copy_data = __C
self.embedding = nn.Embedding(
num_embeddings=token_size,
embedding_dim=__C.WORD_EMBED_SIZE
)
self.mlp = MLP(
in_size=__C.HIDDEN_SIZE,
mid_size=__C.FLAT_MLP_SIZE,
out_size=__C.FLAT_GLIMPSES,
dropout_r=__C.DROPOUT_R,
use_relu=True
)
# Loading the GloVe embedding weights
if __C.USE_GLOVE:
self.embedding.weight.data.copy_(torch.from_numpy(pretrained_emb))
self.lstm = nn.LSTM(
input_size=__C.WORD_EMBED_SIZE,
hidden_size=__C.HIDDEN_SIZE,
num_layers=1,
batch_first=True
)
self.img_feat_linear = nn.Linear(
__C.IMG_FEAT_SIZE,
2048
)
self.backbone = MCA_ED(__C)
self.attflat_img = AttFlat(__C)
self.attflat_lang = AttFlat(__C)
self.proj_norm = LayerNorm(1024)
self.proj = nn.Linear(1024, answer_size)
self.dense_coattn = DCNLayer(2048, 1024, 4, 3, 5, 0.3)
self.predict = PredictLayer(2048, 1024, 4, 3129, 0.3)
self.apply(Initializer.xavier_normal)
def forward(self, img_feat, ques_ix):
# Make mask
lang_feat_mask = self.make_mask(ques_ix.unsqueeze(2))
img_feat_mask = self.make_mask(img_feat)
# Pre-process Language Feature
lang_feat = self.embedding(ques_ix)
lang_feat, _ = self.lstm(lang_feat)
# Pre-process Image Feature
img_feat = self.img_feat_linear(img_feat)
img_feat, lang_feat = self.dense_coattn(
img_feat, lang_feat, None, None)
proj_feat = self.predict(img_feat, lang_feat, None, None)
# img_att = self.mlp(img_feat)
# lang_att = self.mlp(lang_feat)
# img_feat = img_feat.reshape((-1,img_feat.size(1)*img_feat.size(2)))
# lang_feat = lang_feat.res hape((-1,lang_feat.size(1)*lang_feat.size(2)))
# Backbone Framework
# lang_feat, img_feat = self.backbone(
# lang_feat,
# img_feat,
# lang_feat_mask,
# img_feat_mask
# )
# lang_feat = self.attflat_lang(
# lang_feat,
# lang_feat_mask
# )
# img_feat = self.attflat_img(
# img_feat,
# img_feat_mask
# )
# NUM_LAYERS = 3
# conv_layer_1 = nn.Linear(1024,1024).cuda()
# conv_layer_2 = nn.ModuleList([
# nn.Linear(1024, 1024)
# for i in range(NUM_LAYERS)]).cuda()
# img_feat = conv_layer_1(img_feat)
# lang_feat = conv_layer_1(lang_feat)
# feat1 = nn.Dropout(0.25)(feat1)
# feat2 = nn.Dropout(0.25)(feat2)
# x_mm = []
# for i in range(NUM_LAYERS):
# x1 = conv_layer_2[i](img_feat)
# # x1 = nn.Tanh()(x1)
# x2 = conv_layer_2[i](lang_feat)
# # x2 = nn.Tanh()(x2)
# x_mm.append(torch.mul(x1,x2))
# x_mm = torch.stack(x_mm,dim=1)
# batch_size = x_mm.size(0)
# nc,w,h = x_mm.shape[2],x_mm.shape[3],x_mm.shape[4]
# proj_feat = torch.sum(x_mm,dim=1)
# mm = fusions.LinearSum([1024,1024],3129).cuda()
# proj_feat = mm([img_feat,lang_feat])
# mul_feat = lang_feat * img_feat
# add_feat = lang_feat + img_feat
# proj_feat = mul_feat + add_feat
# proj_feat = lang_feat + img_feat
# proj_feat = F.softmax(proj_feat, dim=1)
# proj_feat = self.proj_norm(proj_feat)
# proj_feat = torch.sigmoid(proj_feat)
# proj_feat = torch.sigmoid(self.proj(proj_feat))
# proj_feat = self.proj(proj_feat)
# return proj_feat
return torch.sigmoid(proj_feat)
# Masking
def make_mask(self, feature):
return (torch.sum(
torch.abs(feature),
dim=-1
) == 0).unsqueeze(1).unsqueeze(2)
``` |
{
"source": "jiteshmohite/Python-Page-Object-Model-Framework",
"score": 3
} |
#### File: functional/android/chrome_tests.py
```python
import unittest
from appium import webdriver
"""
TODO: In-Progress, Refer page object model
"""
class ChromeTests(unittest.TestCase):
def setUp(self):
desired_caps = {
'platformName': 'Android',
'platformVersion': '7.0',
'deviceName': 'Galaxy Tab S3',
'browserName': 'Chrome'
}
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
def tearDown(self):
self.driver.quit()
def test_find_single_element(self):
self.driver.get('http://10.0.2.2:4723/test/guinea-pig')
self.driver.find_element_by_link_text('i am a link').click()
self.assertTrue('I am some other page content' in self.driver.page_source)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(ChromeTests)
unittest.TextTestRunner(verbosity=2).run(suite)
```
#### File: functional/android/find_by_image_tests.py
```python
import unittest
from appium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import desired_capabilities
"""
TODO: In-Progress, Refer page object model
"""
class FindByImageTests(unittest.TestCase):
def setUp(self):
desired_caps = desired_capabilities.get_desired_capabilities('ApiDemos-debug.apk')
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
def tearDown(self):
self.driver.quit()
def test_find_based_on_image_template(self):
image_path = desired_capabilities.PATH('find_by_image_success.png')
el = WebDriverWait(self.driver, 3).until(
EC.presence_of_element_located((By.IMAGE, image_path))
)
size = el.size
self.assertIsNotNone(size['width'])
self.assertIsNotNone(size['height'])
loc = el.location
self.assertIsNotNone(loc['x'])
self.assertIsNotNone(loc['y'])
rect = el.rect
self.assertIsNotNone(rect['width'])
self.assertIsNotNone(rect['height'])
self.assertIsNotNone(rect['x'])
self.assertIsNotNone(rect['y'])
self.assertTrue(el.is_displayed())
el.click()
self.driver.find_element_by_accessibility_id("Alarm")
def test_find_multiple_elements_by_image_just_returns_one(self):
WebDriverWait(self.driver, 3).until(
EC.presence_of_element_located((By.ACCESSIBILITY_ID, "App"))
)
image_path = desired_capabilities.PATH('find_by_image_success.png')
els = self.driver.find_elements_by_image(image_path)
els[0].click()
self.driver.find_element_by_accessibility_id("Alarm")
def test_find_throws_no_such_element(self):
image_path = desired_capabilities.PATH('find_by_image_failure.png')
with self.assertRaises(TimeoutException):
WebDriverWait(self.driver, 3).until(
EC.presence_of_element_located((By.IMAGE, image_path))
)
with self.assertRaises(NoSuchElementException):
self.driver.find_element_by_image(image_path)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(FindByImageTests)
unittest.TextTestRunner(verbosity=2).run(suite)
```
#### File: functional/android/ime_tests.py
```python
import unittest
from time import sleep
from selenium.common.exceptions import NoSuchElementException
from appium import webdriver
import desired_capabilities
# the emulator is sometimes slow and needs time to think
SLEEPY_TIME = 1
LATIN_IME = u'com.android.inputmethod.latin/.LatinIME'
"""
TODO: In-Progress, Refer page object model
"""
class IMETests(unittest.TestCase):
def setUp(self):
desired_caps = desired_capabilities.get_desired_capabilities('ApiDemos-debug.apk')
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
def tearDown(self):
self.driver.quit()
def test_available_ime_engines(self):
engines = self.driver.available_ime_engines
self.assertIsInstance(engines, list)
self.assertTrue(LATIN_IME in engines)
def test_is_ime_active(self):
self.assertTrue(self.driver.is_ime_active())
def test_active_ime_engine(self):
self.assertIsInstance(self.driver.active_ime_engine, unicode)
def test_activate_ime_engine(self):
engines = self.driver.available_ime_engines
active_engine = self.driver.active_ime_engine
self.driver.activate_ime_engine(engines[-1])
self.assertEqual(self.driver.active_ime_engine, engines[-1])
def test_deactivate_ime_engine(self):
engines = self.driver.available_ime_engines
self.driver.activate_ime_engine(engines[-1])
self.assertEqual(self.driver.active_ime_engine, engines[-1])
self.driver.deactivate_ime_engine()
sleep(1)
self.assertNotEqual(self.driver.active_ime_engine, engines[-1])
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(IMETests)
unittest.TextTestRunner(verbosity=2).run(suite)
```
#### File: functional/android/selendroid_tests.py
```python
import unittest
from time import sleep
import desired_capabilities
from selenium.webdriver.common.touch_actions import TouchActions
from appium import webdriver
"""
TODO: In-Progress, Refer page object model
"""
class SelendroidTests(unittest.TestCase):
def setUp(self):
desired_caps = desired_capabilities.get_desired_capabilities('ApiDemos-debug.apk')
desired_caps['automationName'] = 'Selendroid'
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
def test_contexts_list(self):
el = self.driver.find_element_by_class_name('android.widget.ListView')
els = el.find_elements_by_class_name('android.widget.TextView')
ta = TouchActions(self.driver).flick_element(el, 0, -300, 0)
ta.perform()
sleep(5)
def tearDown(self):
self.driver.quit()
def _enter_webview(self):
btn = self.driver.find_element_by_name('buttonStartWebviewCD')
btn.click()
self.driver.switch_to.context('WEBVIEW')
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(SelendroidTests)
unittest.TextTestRunner(verbosity=2).run(suite)
```
#### File: functional/android/touch_action_tests.py
```python
import unittest
from time import sleep
import desired_capabilities
from appium import webdriver
from selenium.common.exceptions import NoSuchElementException
from webdriver.util.touch_action import TouchAction
# the emulator is sometimes slow
SLEEPY_TIME = 2
"""
TODO: In-Progress, Refer page object model
"""
class TouchActionTests(unittest.TestCase):
def setUp(self):
desired_caps = desired_capabilities.get_desired_capabilities('ApiDemos-debug.apk')
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
def tearDown(self):
self.driver.quit()
def test_tap(self):
el = self.driver.find_element_by_accessibility_id('Animation')
action = TouchAction(self.driver)
action.tap(el).perform()
el = self.driver.find_element_by_accessibility_id('Bouncing Balls')
self.assertIsNotNone(el)
def test_tap_x_y(self):
el = self.driver.find_element_by_accessibility_id('Animation')
action = TouchAction(self.driver)
action.tap(el, 100, 10).perform()
sleep(SLEEPY_TIME)
el = self.driver.find_element_by_accessibility_id('Bouncing Balls')
self.assertIsNotNone(el)
def test_tap_twice(self):
el = self.driver.find_element_by_name('Text')
action = TouchAction(self.driver)
action.tap(el).perform()
sleep(SLEEPY_TIME)
el = self.driver.find_element_by_name('LogTextBox')
action.tap(el).perform()
el = self.driver.find_element_by_name('Add')
action.tap(el, count=2).perform()
els = self.driver.find_elements_by_class_name('android.widget.TextView')
self.assertEqual('This is a test\nThis is a test\n', els[1].get_attribute("text"))
def test_press_and_immediately_release(self):
el = self.driver.find_element_by_accessibility_id('Animation')
action = TouchAction(self.driver)
action.press(el).release().perform()
sleep(SLEEPY_TIME)
el = self.driver.find_element_by_accessibility_id('Bouncing Balls')
self.assertIsNotNone(el)
def test_press_and_immediately_release_x_y(self):
el = self.driver.find_element_by_accessibility_id('Animation')
action = TouchAction(self.driver)
action.press(el, 100, 10).release().perform()
sleep(SLEEPY_TIME)
el = self.driver.find_element_by_accessibility_id('Bouncing Balls')
self.assertIsNotNone(el)
def test_press_and_wait(self):
el1 = self.driver.find_element_by_name('Content')
el2 = self.driver.find_element_by_accessibility_id('Animation')
action = TouchAction(self.driver)
action.press(el1).move_to(el2).perform()
sleep(SLEEPY_TIME)
el = self.driver.find_element_by_accessibility_id('Views')
# self.assertIsNotNone(el)
action.tap(el).perform()
sleep(SLEEPY_TIME)
el = self.driver.find_element_by_accessibility_id('Expandable Lists')
# self.assertIsNotNone(el)
action.tap(el).perform()
sleep(SLEEPY_TIME)
el = self.driver.find_element_by_accessibility_id('1. Custom Adapter')
# self.assertIsNotNone(el)
action.tap(el).perform()
sleep(SLEEPY_TIME)
el = self.driver.find_element_by_name('People Names')
# self.assertIsNotNone(el)
action.press(el).wait(2000).perform()
sleep(SLEEPY_TIME)
# 'Sample menu' only comes up with a long press, not a press
el = self.driver.find_element_by_name('Sample menu')
self.assertIsNotNone(el)
def test_press_and_moveto(self):
el1 = self.driver.find_element_by_accessibility_id('Content')
el2 = self.driver.find_element_by_accessibility_id('Animation')
action = TouchAction(self.driver)
action.press(el1).move_to(el2).release().perform()
el = self.driver.find_element_by_accessibility_id('Views')
self.assertIsNotNone(el)
def test_press_and_moveto_x_y(self):
el1 = self.driver.find_element_by_accessibility_id('Content')
el2 = self.driver.find_element_by_accessibility_id('App')
action = TouchAction(self.driver)
action.press(el1).move_to(el2, 100, 100).release().perform()
el = self.driver.find_element_by_accessibility_id('Views')
self.assertIsNotNone(el)
def test_long_press(self):
el1 = self.driver.find_element_by_name('Content')
el2 = self.driver.find_element_by_accessibility_id('Animation')
action = TouchAction(self.driver)
action.press(el1).move_to(el2).perform()
el = self.driver.find_element_by_accessibility_id('Views')
# self.assertIsNotNone(el)
action.tap(el).perform()
el = self.driver.find_element_by_accessibility_id('Expandable Lists')
# self.assertIsNotNone(el)
action.tap(el).perform()
el = self.driver.find_element_by_accessibility_id('1. Custom Adapter')
# self.assertIsNotNone(el)
action.tap(el).perform()
el = self.driver.find_element_by_name('People Names')
# self.assertIsNotNone(el)
action.long_press(el).perform()
# 'Sample menu' only comes up with a long press, not a tap
el = self.driver.find_element_by_name('Sample menu')
self.assertIsNotNone(el)
def test_long_press_x_y(self):
el1 = self.driver.find_element_by_name('Content')
el2 = self.driver.find_element_by_accessibility_id('Animation')
action = TouchAction(self.driver)
action.press(el1).move_to(el2).perform()
el = self.driver.find_element_by_accessibility_id('Views')
# self.assertIsNotNone(el)
action.tap(el).perform()
el = self.driver.find_element_by_accessibility_id('Expandable Lists')
# self.assertIsNotNone(el)
action.tap(el).perform()
el = self.driver.find_element_by_accessibility_id('1. Custom Adapter')
# self.assertIsNotNone(el)
action.tap(el).perform()
# the element "People Names" is located at 0:110 (top left corner)
action.long_press(x=10, y=120).perform()
# 'Sample menu' only comes up with a long press, not a tap
el = self.driver.find_element_by_name('Sample menu')
self.assertIsNotNone(el)
def test_drag_and_drop(self):
el1 = self.driver.find_element_by_name('Content')
el2 = self.driver.find_element_by_name('Animation')
self.driver.scroll(el1, el2)
el = self.driver.find_element_by_name('Views')
action = TouchAction(self.driver)
action.tap(el).perform()
el = self.driver.find_element_by_name('Drag and Drop')
action.tap(el).perform()
dd3 = self.driver.find_element_by_id('com.example.android.apis:id/drag_dot_3')
dd2 = self.driver.find_element_by_id('com.example.android.apis:id/drag_dot_2')
# dnd is stimulated by longpress-move_to-release
action.long_press(dd3).move_to(dd2).release().perform()
el = self.driver.find_element_by_id('com.example.android.apis:id/drag_result_text')
self.assertEqual('Dropped!', el.get_attribute('text'))
def test_driver_drag_and_drop(self):
el1 = self.driver.find_element_by_name('Content')
el2 = self.driver.find_element_by_name('Animation')
self.driver.scroll(el1, el2)
el = self.driver.find_element_by_name('Views')
action = TouchAction(self.driver)
action.tap(el).perform()
el = self.driver.find_element_by_name('Drag and Drop')
action.tap(el).perform()
dd3 = self.driver.find_element_by_id('com.example.android.apis:id/drag_dot_3')
dd2 = self.driver.find_element_by_id('com.example.android.apis:id/drag_dot_2')
self.driver.drag_and_drop(dd3, dd2)
el = self.driver.find_element_by_id('com.example.android.apis:id/drag_result_text')
self.assertEqual('Dropped!', el.get_attribute('text'))
def test_driver_swipe(self):
self.assertRaises(NoSuchElementException, self.driver.find_element_by_name, 'Views')
self.driver.swipe(100, 500, 100, 100, 800)
el = self.driver.find_element_by_name('Views')
self.assertIsNotNone(el)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TouchActionTests)
unittest.TextTestRunner(verbosity=2).run(suite)
```
#### File: pageobjectmodel/pageobject/mainactivity.py
```python
from test.functional.pageobjectmodel.locators.main_activity_locator import MainActivityLocators
from test.functional.pageobjectmodel.pageobject import *
"""
This class contains all common method which we will require while executing main activity test
"""
class MainActivity(BasePage):
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
def go_to_login(self):
return self.click(MainActivityLocators.LOGIN_ID)
def go_to_movies_list(self):
return self.click(MainActivityLocators.MOVIES_LIST_ID)
def go_to_photo_viewer(self):
return self.click(MainActivityLocators.PHOTO_VIEWER_ID)
def go_to_view_pager(self):
return self.click(MainActivityLocators.VIEW_PAGER_ID)
```
#### File: pageobjectmodel/pageobject/photoviewpage.py
```python
from test.functional.pageobjectmodel.locators.photo_viewer_locator import PhotoViewerLocator
from test.functional.pageobjectmodel.pageobject import *
"""
This class contains all common method which we will require while executing photo viewer test
"""
class PhotoViewer(BasePage):
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
def zoom_photo(self):
self.zoom_view(PhotoViewerLocator.PHOTO_VIEW_ID)
``` |
{
"source": "jiteshnambiar/robinhood-portfolio",
"score": 4
} |
#### File: robinhood-portfolio/backend/excel_writer.py
```python
import pandas as pd
import xlsxwriter
"""
Creates New Excel Workbook
Usage:
>> xls = ExcelWriter(filename)
>> xls.add_sheet('sheet1', data_frame1)
>> xls.add_sheet('sheet2', data_frame2)
>> xls.default_formatting('sheet2')
>> xls.save()
"""
class ExcelWriter(object):
def __init__(self, filename):
"""
https://xlsxwriter.readthedocs.io/working_with_pandas.html
# Create a Pandas dataframe from the data.
df = pd.DataFrame({'Data': [10, 20, 30, 20, 15, 30, 45]})
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter('pandas_simple.xlsx', engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
df.to_excel(writer, sheet_name='Sheet1')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
"""
self.filename = filename + '.xlsx'
self.writer = pd.ExcelWriter(self.filename)
self.workbook = self.writer.book
self.sheets = {}
def add_sheet(self, sheetname, data_frame):
if sheetname in self.sheets:
raise Exception("Sheet already exists: [%s]" % sheetname)
data_frame.to_excel(self.writer, sheet_name=sheetname)
self.sheets[sheetname] = self.writer.sheets[sheetname]
def writesheet(self, sheetname, data_frame):
data_frame.to_excel(self.workbook, sheet_name=sheetname)
def add_cell_format(self, bg_color, font_color):
# Add a format with bg_color background fill and font_color text.
return self.workbook.add_format({'bg_color': bg_color,
'font_color': font_color})
def freeze_top_row(self, sheetname):
self.sheets[sheetname].freeze_panes(1, 0)
def freeze_first_column(self, sheetname):
self.sheets[sheetname].freeze_panes(0, 1)
def adjust_column_width(self, sheetname):
for colname, width in self.columnwidth[sheetname].items():
self.sheets[sheetname].set_column(self.headerindex[sheetname][colname],
self.headerindex[sheetname][colname], width)
def default_formatting(self, sheetname):
if sheetname not in self.sheets:
raise Exception("Sheet doesn't exist: [%s]" % sheetname)
# Add a format. Light red fill with dark red text.
format_red = self.workbook.add_format({'bg_color': '#FFC7CE',
'font_color': '#9C0006'})
# Add a format. Green fill with dark green text.
format_green = self.workbook.add_format({'bg_color': '#C6EFCE',
'font_color': '#006100'})
# self.adjust_column_width(sheetname)
self.freeze_top_row(sheetname)
self.freeze_first_column(sheetname)
# Highlight Cells greater than 0.6 to show green
row_count = self.sheets[sheetname].dim_rowmax
column_count = self.sheets[sheetname].dim_colmax
first_col = 'B'
first_row = 2
last_col = xlsxwriter.utility.xl_col_to_name(column_count)
last_row = row_count + 1
range = first_col + str(first_row) + ':' + last_col + str(last_row)
self.sheets[sheetname].conditional_format(range, {'type': 'cell',
'criteria': '>=',
'value': 0.6,
'format': format_green})
# Highlight Cells less than -0.6 to show red
self.sheets[sheetname].conditional_format(range, {'type': 'cell',
'criteria': '<',
'value': -0.6,
'format': format_red})
#########
# Close #
#########
def save(self):
# self.workbook.save()
self.workbook.close()
def close(self):
self.save()
``` |
{
"source": "JiteshRawat/Text_Summarization",
"score": 3
} |
#### File: JiteshRawat/Text_Summarization/main.py
```python
from os import write
import streamlit as st
import nltk
nltk.download("stopwords")
from nltk.corpus import stopwords
from nltk.cluster.util import cosine_distance
import numpy as np
import networkx as nx
#from transformers import pipeline
header = st.beta_container()
body = st.beta_container()
summary_container = st.beta_container()
######################## Summarization code ########################################
def sentence_similarity(sent1, sent2, stopwords=None):
if stopwords is None:
stopwords = []
sent1 = [w.lower() for w in sent1]
sent2 = [w.lower() for w in sent2]
all_words = list(set(sent1 + sent2))
vector1 = [0] * len(all_words)
vector2 = [0] * len(all_words)
# build the vector for the first sentence
for w in sent1:
if w in stopwords:
continue
vector1[all_words.index(w)] += 1
# build the vector for the second sentence
for w in sent2:
if w in stopwords:
continue
vector2[all_words.index(w)] += 1
return 1 - cosine_distance(vector1, vector2)
def build_similarity_matrix(sentences, stop_words):
# Create an empty similarity matrix
similarity_matrix = np.zeros((len(sentences), len(sentences)))
for idx1 in range(len(sentences)):
for idx2 in range(len(sentences)):
if idx1 == idx2: # ignore if both are same sentences
continue
similarity_matrix[idx1][idx2] = sentence_similarity(
sentences[idx1], sentences[idx2], stop_words)
return similarity_matrix
def generate_summary(rawtext, top_n=5):
stop_words = stopwords.words('english')
summarize_text = []
# Step 1 - Read text anc split it
article = rawtext.split(". ")
sentences = []
for sentence in article:
sentences.append(sentence.replace("[^a-zA-Z]", " ").split(" "))
# Step 2 - Generate Similary Martix across sentences
sentence_similarity_martix = build_similarity_matrix(sentences, stop_words)
# Step 3 - Rank sentences in similarity martix
sentence_similarity_graph = nx.from_numpy_array(sentence_similarity_martix)
scores = nx.pagerank(sentence_similarity_graph)
# Step 4 - Sort the rank and pick top sentences
ranked_sentence = sorted(
((scores[i], s) for i, s in enumerate(sentences)), reverse=True)
for i in range(top_n):
summarize_text.append(" ".join(ranked_sentence[i][1]))
# Step 5 - Offcourse, output the summarize texr
#print("Summarize Text: \n", ". ".join(summarize_text))
return summarize_text
# This was a trial for abstractive summarization using transformers which works well but too slow
# def abstractive(rawtext):
# summarizer = pipeline("summarization")
# summary = summarizer(rawtext, max_length=300,
# min_length=200, do_sample=False)
# summ = summary[0]
# return summ['summary_text']
######################## Frontend code ##############################################
with header:
st.title('Text Summarization using NLTK')
with body:
st.header('Extractive Summarization')
rawtext = st.text_area('Enter Text Here')
sample_col, upload_col = st.beta_columns(2)
sample_col.header('Or select a sample file from below')
sample = sample_col.selectbox('Or select a sample file',
('kalam_speech.txt', 'Stocks_ FRI_ JUN _8.txt', 'microsoft.txt', 'None'), index=3)
if sample != 'None':
file = open(sample, "r", encoding= 'cp1252')
#st.write(file)
rawtext = file.read()
upload_col.header('Or upload text file here')
uploaded_file = upload_col.file_uploader(
'Choose your .txt file', type="txt")
if uploaded_file is not None:
rawtext = str(uploaded_file.read(), 'cp1252')
no_of_lines = st.slider("Select number of lines in summary", 1, 5, 3)
if st.button('Get Summary'):
with summary_container:
if rawtext == "":
st.header('Summary :)')
st.write('Please enter text to see summary')
else:
result = generate_summary(rawtext, no_of_lines)
st.header('Summary :)')
for i in range(no_of_lines):
st.write(result[i])
# Abstractive summary
#st.header('Abstractive method')
#abstract = abstractive(rawtext)
# st.write(abstract)
st.header('Actual article')
st.write(rawtext)
``` |
{
"source": "jith47/Urlshortener",
"score": 2
} |
#### File: management/command/refreshcode.py
```python
from django.core.management.base import BaseCommand, CommandError
from shortener.model import Kirurl
class Command(BaseCommand):
help = 'Refresh Kirurl'
def add_arguments(self, parser):
parser.add_argument('--items', type=int)
def handle(self, *args, **options):
return Kirurl.objects.refresh_shortcodes(items=options['items'])
```
#### File: src/shortener/views.py
```python
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.views import View
from analytics.models import ClickEvent
from .forms import SubmitUrlForm
from .models import Kirurl
# Create your views here.
def home_view_fbv(request, *args, **kwargs):
if request.method == "POST":
print(request.POST)
return render(request, "shortener/home.html", {})
class HomeView(View):
def get(self, request, *args, **kwargs):
the_form = SubmitUrlForm()
context = {
"title": "Kirr.co",
"form": the_form
}
return render(request, "shortener/home.html", context) # Try Django 1.8 & 1.9 http://joincfe.com/youtube
def post(self, request, *args, **kwargs):
form = SubmitUrlForm(request.POST)
context = {
"title": "Kirr.co",
"form": form
}
template = "shortener/home.html"
if form.is_valid():
new_url = form.cleaned_data.get("url")
obj, created = Kirurl.objects.get_or_create(url=new_url)
context = {
"object": obj,
"created": created,
}
if created:
template = "shortener/success.html"
else:
template = "shortener/already-exists.html"
return render(request, template ,context)
class URLRedirectView(View):
def get(self, request, shortcode=None, *args, **kwargs):
qs = Kirurl.objects.filter(shortcode__iexact=shortcode)
if qs.count() != 1 and not qs.exists():
raise Http404
obj = qs.first()
actual_url = obj.url
if not actual_url.startswith("http"):
actual_url = "http://" + actual_url
return HttpResponseRedirect(actual_url)
``` |
{
"source": "jith4j/covid-vaccine-alert-bot",
"score": 3
} |
#### File: jith4j/covid-vaccine-alert-bot/cowid_bot.py
```python
import requests
from datetime import datetime
base_cowin_url = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict"
api_url_telegram = "https://api.telegram.org/botenteryourbotidhere/sendMessage?chat_id=@__groupid__&text="
now = datetime.now()
today_date = now.strftime("%d-%m-%Y")
group_id = ""
name_list = []
def fetch_data_from_cowin(district_id):
querry_params = "?district_id={}&date={}".format(district_id, today_date)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36'}
final_url = base_cowin_url + querry_params
response = requests.get(final_url, headers=headers)
extract_availability_data(response)
def extract_availability_data(response):
response_json = response.json()
for center in response_json["centers"]:
for session in center["sessions"]:
if session["available_capacity"] > 0 and session["min_age_limit"] == 18: # for 18+ vaccine
if int(center["center_id"]) not in name_list:
message = "Vaccination centers for {}+ age group: \n{} ({})- Pin: {}. \nVaccine: {} \nFee Type: {} \nTotal {} slots are available on {} \n(Dose 1: {}, Dose 2: {})".format(
session["min_age_limit"],
center["name"],
center["block_name"],
center["pincode"],
session["vaccine"],
center["fee_type"],
session["available_capacity"],
session["date"],
session["available_capacity_dose1"],
session["available_capacity_dose2"]
)
name_list.append(int(center["center_id"]))
print(name_list)
message = message + "\n\nCoWin: https://selfregistration.cowin.gov.in"
send_message_telegram(message)
def send_message_telegram(message):
final_telegram_url = api_url_telegram.replace("__groupid__", group_id)
final_telegram_url = final_telegram_url + message
response = requests.get(final_telegram_url)
print(response)
while(True):
fetch_data_from_cowin(296)
# 296 is of Trivandrum district
``` |
{
"source": "jith4j/Tuberculosis-Classification",
"score": 3
} |
#### File: jith4j/Tuberculosis-Classification/web.py
```python
import streamlit as st
import numpy as np
from PIL import Image
from tensorflow.keras.models import load_model
import tensorflow as tf
from tempfile import NamedTemporaryFile
from tensorflow.keras.preprocessing import image
st.set_option('deprecation.showfileUploaderEncoding', False)
@st.cache(allow_output_mutation=True)
def loading_model():
fp = "./model/model.h5"
model_loader = load_model(fp)
return model_loader
cnn = loading_model()
st.write("""
# X-Ray Classification [Tuberculosis/Normal]
by AJAS
""")
temp = st.file_uploader("Upload X-Ray Image")
#temp = temp.decode()
buffer = temp
temp_file = NamedTemporaryFile(delete=False)
if buffer:
temp_file.write(buffer.getvalue())
st.write(image.load_img(temp_file.name))
if buffer is None:
st.text("Oops! that doesn't look like an image. Try again.")
else:
img = image.load_img(temp_file.name, target_size=(
500, 500), color_mode='grayscale')
# Preprocessing the image
pp_img = image.img_to_array(img)
pp_img = pp_img/255
pp_img = np.expand_dims(pp_img, axis=0)
# predict
preds = cnn.predict(pp_img)
if preds >= 0.5:
out = ('I am {:.2%} percent confirmed that this is a Tuberculosis case'.format(
preds[0][0]))
else:
out = ('I am {:.2%} percent confirmed that this is a Normal case'.format(
1-preds[0][0]))
st.success(out)
image = Image.open(temp)
st.image(image, use_column_width=True)
``` |
{
"source": "Jithendra-k/AshTech-AI_Personal_Voice_Assistant",
"score": 3
} |
#### File: Jithendra-k/AshTech-AI_Personal_Voice_Assistant/Voice Assistant.py
```python
import speech_recognition as sr
import pyttsx3
import datetime
import wikipedia
import webbrowser
import os
import time
import subprocess
from ecapture import ecapture as ec
import wolframalpha
import json
import requests
import pyaudio
print("Loading your AI personal assistant - Ashtech ")
engine=pyttsx3.init('sapi5')
voices=engine.getProperty('voices')
engine.setProperty('voice','voices[0].id')
def speak(text):
engine.say(text)
engine.runAndWait()
def wishMe():
hour=datetime.datetime.now().hour
if hour>=0 and hour<12:
speak("Hello,Good Morning")
print("Hello,Good Morning")
elif hour>=12 and hour<18:
speak("Hello,Good Afternoon")
print("Hello,Good Afternoon")
else:
speak("Hello,Good Evening")
print("Hello,Good Evening")
def takeCommand():
r=sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
audio=r.listen(source)
try:
statement=r.recognize_google(audio,language='en-in')
print(f"user said:{statement}\n")
except Exception as e:
speak("Pardon me, please say that again")
return "None"
return statement
speak("Loading your AI personal assistant AshTech")
wishMe()
if __name__=='__main__':
while True:
speak("Tell me how can I help you now?")
statement = takeCommand().lower()
if statement==0:
continue
if "good bye" in statement or "ok bye" in statement or "stop" in statement:
speak('your personal assistant Ashtech is shutting down,Good bye')
print('your personal assistant Ashtech is shutting down,Good bye')
break
if 'wikipedia' in statement:
speak('Searching Wikipedia...')
statement =statement.replace("wikipedia", "")
results = wikipedia.summary(statement, sentences=3)
speak("According to Wikipedia")
print(results)
speak(results)
elif 'open youtube' in statement:
webbrowser.open_new_tab("https://www.youtube.com")
speak("youtube is open now")
time.sleep(5)
elif 'open google' in statement:
webbrowser.open_new_tab("https://www.google.com")
speak("Google chrome is open now")
time.sleep(5)
elif 'open gmail' in statement:
webbrowser.open_new_tab("gmail.com")
speak("Google Mail open now")
time.sleep(5)
elif "shoping" in statement:
speak("nice mood sir")
webbrowser.open("amazon.com")
speak("here you are sir")
elif "weather" in statement:
api_key = "8ef61edcf1c576d65d836254e11ea420"
base_url = "https://api.openweathermap.org/data/2.5/weather?"
speak("whats the city name")
city_name = takeCommand()
complete_url = base_url + "appid=" + api_key + "&q=" + city_name
response = requests.get(complete_url)
x = response.json()
if x["cod"] != "404":
y = x["main"]
current_temperature = y["temp"]
current_humidiy = y["humidity"]
z = x["weather"]
weather_description = z[0]["description"]
speak(" Temperature in kelvin unit is " +
str(current_temperature) +
"\n humidity in percentage is " +
str(current_humidiy) +
"\n description " +
str(weather_description))
print(" Temperature in kelvin unit = " +
str(current_temperature) +
"\n humidity (in percentage) = " +
str(current_humidiy) +
"\n description = " +
str(weather_description))
elif 'time' in statement:
strTime=datetime.datetime.now().strftime("%H:%M:%S")
speak(f"the time is {strTime}")
elif 'who are you' in statement or 'what can you do' in statement:
speak('I am Ashwin friend Ashtech version 1 point O your persoanl assistant. I am programmed to minor tasks like'
'opening youtube,google chrome,gmail and stackoverflow ,predict time,take a photo,search wikipedia,predict weather'
'in different cities , get top headline news from times of india and you can ask me computational or geographical questions too!')
elif "who made you" in statement or "who created you" in statement or "who discovered you" in statement:
speak("I was built by <NAME>")
print("I was built by <NAME>")
elif "open stackoverflow" in statement:
webbrowser.open_new_tab("https://stackoverflow.com/login")
speak("Here is stackoverflow")
elif 'news' in statement:
news = webbrowser.open_new_tab("https://timesofindia.indiatimes.com/home/headlines")
speak('Here are some headlines from the Times of India,Happy reading')
time.sleep(6)
elif "camera" in statement or "take a photo" in statement:
ec.capture(0,"robo camera","img.jpg")
elif 'search' in statement:
statement = statement.replace("search", "")
webbrowser.open_new_tab(statement)
time.sleep(5)
elif 'ask' in statement:
speak('I can answer to computational and geographical questions and what question do you want to ask now')
question=takeCommand()
app_id="R2K75H-7ELALHR35X"
client = wolframalpha.Client('R2K75H-7ELALHR35X')
res = client.query(question)
answer = next(res.results).text
speak(answer)
print(answer)
elif "log off" in statement or "sign out" in statement:
speak("Ok , your pc will log off in 10 sec make sure you exit from all applications")
subprocess.call(["shutdown", "/l"])
time.sleep(3)
``` |
{
"source": "jithesh82/noms",
"score": 3
} |
#### File: noms/client/dict_parse.py
```python
import operator
from ..objects.food import Food
def search_parse(search_results):
""" Return a simplified version of the json object returned from the USDA API.
This deletes extraneous pieces of information that are not important for providing
context on the search results.
"""
if 'errors' in search_results.keys():
return None
# Store the search term that was used to produce these results
search_term = search_results['foodSearchCriteria']['query']
if search_results['foods'] == []:
return None
else:
return dict(search_term=search_term, items=search_results['foods'])
def food_parse(food_results, nutrient_dict, values):
""" Return a simplified version of the json object returned from the USDA API.
This deletes extraneous pieces of information, including nutrients that are
not tracked. It also exchanges nutrient names for their more common names, or "nicknames",
as defined in noms.objects.nutrient_dict
"""
if len(food_results) == 0:
return None
food_arr = []
tracked_nutrients = []
nutrient_nicknames = []
# nutrient_dict is a global variable; some of the
# assignments below alters it value across modules
# thus making a shallow copy it
nutrient_dict = nutrient_dict()()
for nutrient in nutrient_dict:
if "nickname" in nutrient.keys():
nutrient_nicknames.append(nutrient["nickname"])
else:
nutrient_nicknames.append(None)
#nutrient['nutrient_id'] = str(nutrient['nutrient_id'])
tracked_nutrients.append(nutrient["nutrient_id"])
# Iterate through each food and simplify names
f = 0
for food in food_results:
# create a 'value' key and equate it to 'amount'
# to take into account the changes in the new
# api results
for nutrient in food["foodNutrients"]:
if 'amount' in nutrient.keys():
nutrient['value'] = nutrient['amount']
else:
nutrient['value'] = 0
nutrient['nutrient']['number'] = float(nutrient['nutrient']['number'])
nutrient['name'] = nutrient['nutrient']['name']
nutrient['unit'] = nutrient['nutrient']['unitName']
nutrient['nutrient_id'] = nutrient['nutrient']['number']
# sort nutrients by id if not already
n_list = food["foodNutrients"]
n_list.sort(key=lambda x: x['nutrient']['number'])
# end sort
n = 0
for nutrient in food["foodNutrients"]:
if n == len(tracked_nutrients):
break
# check if this is a nutrient we should record
if (nutrient["nutrient"]['number']) == (tracked_nutrients[n]):
potential_name = nutrient_nicknames[n]
if potential_name != None:
nutrient["nutrient"]["name"] = potential_name
n += 1
# check if the food doesn't contain a tracked nutrient
while n < len(tracked_nutrients) and (nutrient["nutrient"]["number"]) > tracked_nutrients[n]:
to_insert = nutrient_dict[n].copy()
to_insert.update(value=0)
to_insert["nutrient"] = {"number":to_insert["nutrient_id"]}
food["foodNutrients"].insert(n,to_insert)
n += 1
while n < len(tracked_nutrients) and food["foodNutrients"][-1]["nutrient"]["number"] < tracked_nutrients[-1]:
to_insert = nutrient_dict[n].copy()
to_insert.update(value=0)
to_insert["nutrient"] = {"number":to_insert["nutrient_id"]}
food["foodNutrients"].insert(n,to_insert)
n += 1
n = 0
n_to_del = []
for nutrient in food["foodNutrients"]:
# check if this is a nutrient we should delete
if (nutrient["nutrient"]["number"]) not in tracked_nutrients:
n_to_del.append(n)
n += 1
offset = 0
for del_n in n_to_del:
del food["foodNutrients"][del_n - offset]
offset += 1
# sort nutrients by id if not already
n_list = food["foodNutrients"]
n_list.sort(key=lambda x: x['nutrient']['number'])
# end sort
n = 0
for nutrient in food["foodNutrients"]:
if nutrient_nicknames[n] != None:
nutrient["name"] = nutrient_nicknames[n]
nutrient["value"] = nutrient["value"] * (values[f]/100)
n += 1
# deleting keys except that in keys_to_keep
keys_to_keep = ['nutrient_id', 'name', 'group', 'unit', 'value']
for nutrient in food['foodNutrients']:
nutrient_copy = nutrient.copy()
for key in nutrient_copy:
if key not in keys_to_keep:
del nutrient[key]
f += 1
food_arr.append(Food(food))
return food_arr
``` |
{
"source": "jithil-kore/angular",
"score": 2
} |
#### File: browsers/chromium/chromium.bzl
```python
load("//dev-infra/browsers:platform_http_file.bzl", "platform_http_file")
"""
Defines repositories for Chromium that can be used inside Karma unit tests
and Protractor e2e tests with Bazel.
"""
def define_chromium_repositories():
# To update to a newer version of Chromium see instructions in
# https://github.com/angular/angular/blob/master/dev-infra/browsers/README.md.
platform_http_file(
name = "org_chromium_chromium_amd64",
licenses = ["notice"], # BSD 3-clause (maybe more?)
sha256 = "36759ed6d151645d00a3a015200334edc70188b422eec51bcaa5790c8e906e27",
# 87.0.4280
urls = ["https://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux_x64/812847/chrome-linux.zip"],
)
platform_http_file(
name = "org_chromium_chromium_macos",
licenses = ["notice"], # BSD 3-clause (maybe more?)
sha256 = "e10533c84ef57232975d6bde9cd28fd0354371e9556dda85e01178e6dcd56b93",
# 87.0.4280
urls = ["https://commondatastorage.googleapis.com/chromium-browser-snapshots/Mac/812851/chrome-mac.zip"],
)
platform_http_file(
name = "org_chromium_chromium_windows",
licenses = ["notice"], # BSD 3-clause (maybe more?)
sha256 = "40d0dec1892d729db2f7d8f27feff762b070a02f04d4e14f4e37b97d6b7c3c8f",
# 87.0.4280
urls = ["https://commondatastorage.googleapis.com/chromium-browser-snapshots/Win/812822/chrome-win.zip"],
)
platform_http_file(
name = "org_chromium_chromedriver_amd64",
licenses = ["reciprocal"], # BSD 3-clause, ICU, MPL 1.1, libpng (BSD/MIT-like), Academic Free License v. 2.0, BSD 2-clause, MIT
sha256 = "d859f8ecb21e26d3ddaf3f229da695bc86512f4e6c9fe32533af7a8b36783ec5",
# 87.0.4280
urls = ["https://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux_x64/812847/chromedriver_linux64.zip"],
)
platform_http_file(
name = "org_chromium_chromedriver_macos",
licenses = ["reciprocal"], # BSD 3-clause, ICU, MPL 1.1, libpng (BSD/MIT-like), Academic Free License v. 2.0, BSD 2-clause, MIT
sha256 = "aa7a99fa23287725d7108cc07baa94e6f0ef4171ff7b134018387a939a67d93d",
# 87.0.4280
urls = ["https://commondatastorage.googleapis.com/chromium-browser-snapshots/Mac/812851/chromedriver_mac64.zip"],
)
platform_http_file(
name = "org_chromium_chromedriver_windows",
licenses = ["reciprocal"], # BSD 3-clause, ICU, MPL 1.1, libpng (BSD/MIT-like), Academic Free License v. 2.0, BSD 2-clause, MIT
sha256 = "826f2bd0c50b823e7642860ed08cacf69d3756002a71ac30cdd77c68f31d2d24",
# 87.0.4280
urls = ["https://commondatastorage.googleapis.com/chromium-browser-snapshots/Win/812822/chromedriver_win32.zip"],
)
``` |
{
"source": "Jithin0801/Code-Breaker-Console-Game",
"score": 4
} |
#### File: Jithin0801/Code-Breaker-Console-Game/breaker.py
```python
import random
MATCH = "One of the digits is a match!"
CLOSE = "You are close! One of the digit is a correct but in wrong position!"
NOT_CLOSE = "Not even close!"
CORRECT = "Oh! you got me correctly!"
guess_flag = 0
def get_guess():
global guess_flag
if guess_flag == 0:
return list(input("What is you guess? "))
else:
return list(input("Guess again! "))
def generate_code():
l = [str(i) for i in range(10)]
random.shuffle(l)
return l[:3]
def generate_result(clue):
return MATCH if MATCH in clue else CLOSE if CLOSE in clue else NOT_CLOSE
def find_guess(code, user_code):
global guess_flag
clue = []
if code == user_code:
return CORRECT
else:
for index, num in enumerate(user_code):
guess_flag = 1
if num == code[index]:
clue.append(MATCH)
elif num in code:
clue.append(CLOSE)
else:
clue.append(NOT_CLOSE)
result = generate_result(clue)
return result
def game():
guess = []
code = generate_code()
f = open("code.txt", "w")
st = "".join(code)
f.write(st)
f.close()
while guess != CORRECT:
user_code = get_guess()
guess = find_guess(code, user_code)
print(guess)
#Driver code
flag = 0
def main():
ans = "Y"
global flag
while ans == "Y" or ans == "y":
if flag == 1:
print("Continue the game? (Y-Yes/N-No)")
else:
print("Hello, Code Breaker!")
print("Start the game? (Y-Yes/N-No)")
ans = input()
if ans == "Y" or ans == "y":
flag = 1
game()
else:
break
main()
``` |
{
"source": "jithin-c/screenshot-paste-and-upload-django",
"score": 2
} |
#### File: screenshot-paste-and-upload-django/file_upload/views.py
```python
import os
import uuid
from django.conf import settings
from django.http.response import JsonResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.views.decorators.http import require_POST
from file_upload.models import Screenshot
def list_screenshots(request):
screenshots = Screenshot.objects.all()
return render_to_response('landing_page.html',{'screenshots':screenshots},context_instance=RequestContext(request))
@require_POST
def upload_screenshot(request):
unique_filename = 'screenshot_' + str(uuid.uuid4()) + '.png'
file_upload_path = os.path.join('screenshots', unique_filename)
try:
content = request.POST.get('data').replace('data:image/png;base64,', '')
fh = open(os.path.join(settings.MEDIA_ROOT,file_upload_path), "wb")
fh.write(content.decode('base64'))
fh.close()
file = Screenshot.objects.create(attachment = file_upload_path)
return JsonResponse({
'status': 'success',
# 'delete_url': reverse('delete_file', kwargs={'pk':file.id}),
'fileName': unique_filename,
'url': file.attachment.url
})
except Exception,e:
print e
return JsonResponse({'status': 'error'})
``` |
{
"source": "jithinjustin/ExploringSpark",
"score": 3
} |
#### File: spark/wordcount/WordCountTextFile.py
```python
__author__ = 'jithinjustin'
import sys
sys.path.append("/Users/jithinjustin/spark-1.4.1-bin-hadoop2.6/python/")
from operator import add
from pyspark import SparkContext
import re
def wordCount(wordListRDD):
wordCountRDD=wordListRDD.map(lambda x:(x,1)).reduceByKey(add)
return wordCountRDD
def removePunctuation(text):
return re.sub(r'[^a-z0-9\s]','',text.lower()).strip()
if __name__ == '__main__':
sc=SparkContext()
shakespeareRDD = (sc
.textFile("shakesphere.txt", 8)
.map(removePunctuation))
shakespeareRDD.cache()
#zip index with line
print '\n'.join(shakespeareRDD
.zipWithIndex() # to (line, lineNum)
.map(lambda (l, num): '{0}: {1}'.format(num, l)) # to 'lineNum: line'
.take(15))
#convert the sentences to individual words
shakespeareWordsRDD = shakespeareRDD.flatMap(lambda x: x.split())
shakespeareWordCount = shakespeareWordsRDD.count()
print shakespeareWordsRDD.top(5)
print shakespeareWordCount
#prune out empty words
shakeWordsRDD = shakespeareWordsRDD.filter(lambda x : (x!=''))
shakeWordCount = shakeWordsRDD.count()
print shakeWordCount
#give top 20 words with count
top20WordsAndCounts = wordCount(shakeWordsRDD).takeOrdered(20, lambda (k, v): -v)
print '\n'.join(map(lambda (w, c): '{0}: {1}'.format(w, c), top20WordsAndCounts))
``` |
{
"source": "JithinKS97/dune-analytics",
"score": 3
} |
#### File: src/dune_analytics/__init__.py
```python
from gql import gql, Client
from gql.transport.requests import RequestsHTTPTransport
import time
class Dune:
def __init__(self, username=None, password=<PASSWORD>, transport=None):
if transport is None:
transport = RequestsHTTPTransport(url="https://core-hsr.duneanalytics.com/v1/graphql")
self.client = Client(transport=transport, fetch_schema_from_transport=True)
if username and password:
self.authenticate(username, password)
def authenticate(self, username, password):
transport = self.client.transport
csrf = transport.session.post("https://duneanalytics.com/api/auth/csrf").json()['csrf']
r_auth = transport.session.post("https://duneanalytics.com/api/auth", data={
"csrf": csrf,
"action": "login",
"username": username,
"password": password
}, headers={
"Origin": "https://duneanalytics.com",
"Referer": "https://duneanalytics.com/auth/login"
}, allow_redirects=False)
self.refresh_session()
def refresh_session(self):
r_session = self.client.transport.session.post("https://duneanalytics.com/api/auth/session")
dune_session = r_session.json()
if 'token' in dune_session:
self.sub = dune_session['sub']
self.client.transport.session.headers['Authorization'] = "Bearer " + dune_session['token']
self.user = self.find_session_user()
def find_session_user(self):
gql_query = gql("""
query FindSessionUser($sub: uuid!) {
users(where: {
private_info: {
cognito_id: {_eq: $sub}
}
}) { ...SessionUser __typename }
}
fragment SessionUser on users {
id name profile_image_url memberships {
group { ...Group __typename }
__typename
} __typename
}
fragment Group on groups { id type permissions __typename }
"""
)
result = self.client.execute(gql_query, operation_name="FindSessionUser", variable_values={"sub": self.sub})
return result['users'][0]
def query(self, query):
self.refresh_session()
new_query = self.upsert_query(query)
job_id = self.execute_query(new_query['id'])
self.wait_for_job(job_id)
return self.find_result_data_by_job(job_id)
def list_tables(self, name_filter=None, dataset_id=4, limit=50):
gql_query = gql('''
query ListSchemas($dataset_id: Int!, $query: [blockchain_schemas_bool_exp], $offset: Int!, $limit: Int!) {
blockchain_schemas(
where: {dataset_id: {_eq: $dataset_id}, _and: $query}
order_by: [{schema: asc}, {table: asc}]
distinct_on: [schema, table]
offset: $offset
limit: $limit
) {
schema
table
__typename
}
}
''')
query = []
offset = 0
if limit > 10000:
raise Exception("Please don't abuse Dune's free service")
if name_filter is not None:
query.append({
"full_name": {
"_ilike": '%' + name_filter + '%'
}
})
while True:
result = self.client.execute(gql_query, variable_values={
"dataset_id": dataset_id,
"limit": limit,
"query": query,
"offset": offset
})
if len(result['blockchain_schemas']) == 0:
break
for table in result['blockchain_schemas']:
yield {"schema": table['schema'], "table": table['table']}
offset += limit
time.sleep(1) # Let's be gentle. Dune is growing
def list_columns(self, schema, table, dataset_id=4, limit=50):
gql_query = gql('''
query ListColumns($dataset_id: Int!, $schema: String!, $table: String!, $limit: Int!) {
blockchain_schemas(
where: {dataset_id: {_eq: $dataset_id}, schema: {_eq: $schema}, table: {_eq: $table}}
order_by: {column_name: asc}
limit: $limit
) {
column_name
data_type
__typename
}
}
''')
result = self.client.execute(gql_query, variable_values={
"schema": schema,
"table": table,
"dataset_id": dataset_id,
"limit": limit
})
for column in result['blockchain_schemas']:
yield { "name": columns['column_name'], "data_type": column['data_type'] }
def upsert_query(self, query, dataset_id=4):
gql_query = gql("""
mutation UpsertQuery($session_id: Int!, $object: queries_insert_input!, $on_conflict: queries_on_conflict!, $favs_last_24h: Boolean! = false, $favs_last_7d: Boolean! = false, $favs_last_30d: Boolean! = false, $favs_all_time: Boolean! = true) {
insert_queries_one(object: $object, on_conflict: $on_conflict) {
...Query
favorite_queries(where: {user_id: {_eq: $session_id}}, limit: 1) {
created_at
__typename
}
__typename
}
}
fragment Query on queries {
id
dataset_id
name
description
query
private_to_group_id
is_temp
is_archived
created_at
updated_at
schedule
tags
parameters
user {
...User
__typename
}
visualizations {
id
type
name
options
created_at
__typename
}
favorite_queries_aggregate @include(if: $favs_all_time) {
aggregate {
count
__typename
}
__typename
}
query_favorite_count_last_24h @include(if: $favs_last_24h) {
favorite_count
__typename
}
query_favorite_count_last_7d @include(if: $favs_last_7d) {
favorite_count
__typename
}
query_favorite_count_last_30d @include(if: $favs_last_30d) {
favorite_count
__typename
}
__typename
}
fragment User on users {
id
name
profile_image_url
__typename
}
"""
)
result = self.client.execute(gql_query, operation_name="UpsertQuery", variable_values={
"favs_last_24h": False,
"favs_last_7d": False,
"favs_last_30d": False,
"favs_all_time": False,
"object": {
"schedule": None,
"dataset_id": dataset_id,
"name": "Jupyter Temporary Query",
"query": query,
"user_id": self.user['id'],
"description": "",
"is_archived": False,
"is_temp": True,
"parameters": [],
"visualizations": {
"data": [
{
"type": "table",
"name": "Query results",
"options": {}
}
],
"on_conflict": {
"constraint": "visualizations_pkey",
"update_columns": [
"name",
"options"
]
}
}
},
"on_conflict": {
"constraint": "queries_pkey",
"update_columns": [
"dataset_id",
"name",
"description",
"query",
"schedule",
"is_archived",
"is_temp",
"tags",
"parameters"
]
},
"session_id": self.user['id']
})
return result['insert_queries_one']
def execute_query(self, query_id, **kwargs):
gql_query = gql("""
mutation ExecuteQuery($query_id: Int!, $parameters: [Parameter!]!) {
execute_query(query_id: $query_id, parameters: $parameters) {
job_id
__typename
}
}
""")
result = self.client.execute(gql_query, operation_name="ExecuteQuery", variable_values={
"query_id": query_id,
"parameters": [ { "key": key, "type": "text", "value": value} for key, value in kwargs.items() ]
})
return result['execute_query']['job_id']
def find_result_job(self, job_id):
gql_query = gql("""
query FindResultJob($job_id: uuid) {
jobs(where: {id: {_eq: $job_id}}) {
id
user_id
locked_until
created_at
category
__typename
}
view_queue_positions(where: {id: {_eq: $job_id}}) {
pos
__typename
}
}
""")
result = self.client.execute(gql_query, operation_name="FindResultJob", variable_values={
"job_id": job_id
})
return result['jobs']
def wait_for_job(self, job_id):
while True:
jobs = self.find_result_job(job_id)
if len(jobs) == 0:
break
else:
time.sleep(1)
def find_result_data_by_job(self, job_id):
gql_query = gql("""
query FindResultDataByJob($job_id: uuid!) {
query_results(where: {job_id: {_eq: $job_id}}) {
id
job_id
error
runtime
generated_at
columns
__typename
}
get_result_by_job_id(args: {want_job_id: $job_id}) {
data
__typename
}
}
""")
result = self.client.execute(gql_query, operation_name="FindResultDataByJob", variable_values={
"job_id": job_id
})
for item in result['get_result_by_job_id']:
yield item['data']
``` |
{
"source": "jithin-k-sreedharan/fourier_learning",
"score": 2
} |
#### File: fourier_learning/src/setup.py
```python
from distutils.core import setup
# from distutils.extension import Extension
from Cython.Build import cythonize, build_ext
import os
import sysconfig
import numpy as np
def get_ext_filename_without_platform_suffix(filename):
name, ext = os.path.splitext(filename)
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix == ext:
return filename
ext_suffix = ext_suffix.replace(ext, '')
idx = name.find(ext_suffix)
if idx == -1:
return filename
else:
return name[:idx] + ext
class BuildExtWithoutPlatformSuffix(build_ext):
def get_ext_filename(self, ext_name):
filename = super().get_ext_filename(ext_name)
return get_ext_filename_without_platform_suffix(filename)
setup(name='Fourier coefficient',
cmdclass={'build_ext': BuildExtWithoutPlatformSuffix},
ext_modules=cythonize("*.pyx", annotate=True),
include_dirs=[np.get_include()],
)
``` |
{
"source": "jithin-mathew/Python-Autocomplete",
"score": 3
} |
#### File: jithin-mathew/Python-Autocomplete/extract_code.py
```python
import os
from pathlib import Path
from typing import List, NamedTuple
from lab.logger import Logger
from parser import tokenizer
from parser.tokenizer import encode, parse_string
COMMENT = '#'
MULTI_COMMENT = '"""'
_logger = Logger()
class _PythonFile(NamedTuple):
relative_path: str
project: str
path: Path
class _GetPythonFiles:
"""
Get list of python files and their paths inside `data/source` folder
"""
def __init__(self):
self.source_path = Path(os.getcwd()) / 'data' / 'source'
self.files: List[_PythonFile] = []
self.get_python_files(self.source_path)
def add_file(self, path: Path):
"""
Add a file to the list of tiles
"""
project = path.relative_to(self.source_path).parents
project = project[len(project) - 2]
relative_path = path.relative_to(self.source_path / project)
self.files.append(_PythonFile(relative_path=str(relative_path),
project=str(project),
path=path))
def get_python_files(self, path: Path):
"""
Recursively collect files
"""
for p in path.iterdir():
if p.is_dir():
self.get_python_files(p)
else:
if p.suffix == '.py':
self.add_file(p)
def _fix_indentation(parsed: List[tokenizer.ParsedToken]) -> List[tokenizer.ParsedToken]:
"""
Change indentation tokens. Remove `DEDENT` tokens and
add `INDENT` tokens to each line.
This is easier for prediction.
"""
res: List[tokenizer.ParsedToken] = []
indentation = 0
indented = False
for t in parsed:
if t.type == tokenizer.TokenType.indent:
indentation += 1
elif t.type == tokenizer.TokenType.dedent:
indentation -= 1
elif t.type in [tokenizer.TokenType.new_line,
tokenizer.TokenType.eof]:
indented = False
res.append(t)
else:
if not indented:
for _ in range(indentation):
res.append(tokenizer.ParsedToken(tokenizer.TokenType.indent, 0))
indented = True
res.append(t)
return res
def _remove_comments(parsed: List[tokenizer.ParsedToken]) -> List[tokenizer.ParsedToken]:
"""
Remove comment tokens
"""
res = []
for p in parsed:
if p.type == tokenizer.TokenType.comment:
continue
else:
res.append(p)
return res
def _remove_empty_lines(parsed: List[tokenizer.ParsedToken]) -> List[tokenizer.ParsedToken]:
"""
Remove empty lines
"""
tokens = [tokenizer.TokenType.new_line, tokenizer.TokenType.new_line]
res = []
for p in parsed:
for i in range(1):
tokens[i] = tokens[i + 1]
tokens[-1] = p.type
all_new_line = True
for t in tokens:
if t != tokenizer.TokenType.new_line:
all_new_line = False
if all_new_line:
continue
else:
res.append(p)
return res
def _read_file(path: Path) -> List[int]:
"""
Read and encode a file
"""
with open(str(path)) as f:
content = f.read()
parsed = parse_string(content)
parsed = _remove_comments(parsed)
parsed = _remove_empty_lines(parsed)
parsed = _fix_indentation(parsed)
serialized = encode(parsed)
# deserialized = tokenizer.deserialize(serialized)
# for i in range(len(serialized)):
# assert deserialized[i] == parsed[i]
#
# res = to_text(deserialized)
# print(res)
return serialized
def main():
source_files = _GetPythonFiles().files
_logger.info(files=len(source_files))
with open(str(Path(os.getcwd()) / 'data' / 'all.py'), 'w') as f:
with _logger.section("Parse", total_steps=len(source_files)):
for i, source in enumerate(source_files):
try:
serialized = _read_file(source.path)
except Exception as e:
print(source.path)
print(e)
continue
serialized = [str(t) for t in serialized]
f.write(f"{str(source.path)}\n")
f.write(" ".join(serialized) + "\n")
f.flush()
_logger.progress(i + 1)
if __name__ == '__main__':
main()
```
#### File: Python-Autocomplete/parser/merge_tokens.py
```python
from typing import List, Dict, Optional
import numpy as np
from lab.logger import Logger
from parser import tokenizer
from parser.load import EncodedFile
class InputProcessor:
"""
TODO: We should do this at tokenizer level
"""
TYPE_MASK_BASE = 1 << 20
MAX_LENGTH = [1, 80, 25]
def __init__(self, logger: Logger):
self.logger = logger
self.infos: List[List[IdentifierInfo]] = [[], []]
self.dictionaries: List[Dict[str, int]] = [{} for _ in self.infos]
self.arrays: List[np.ndarray] = [np.array([], dtype=np.uint8) for _ in self.infos]
self.counts: List[int] = [0 for _ in self.infos]
types = [tokenizer.TokenType.name, tokenizer.TokenType.number]
# -1 because this is used right now for decoding,
# and we've added 1 since 0 is used for padding
self.offsets: List[int] = [0] + [tokenizer.get_vocab_offset(t) - 1 for t in types]
def _add_to(self, type_idx: int, key: str, arr: np.ndarray):
idx = self.dictionaries[type_idx]
infos: List[IdentifierInfo] = self.infos[type_idx]
data_array = self.arrays[type_idx]
if key in idx:
infos[idx[key]].count += 1
return
idx[key] = len(infos)
infos.append(IdentifierInfo(len(infos), len(data_array), len(arr), key))
self.arrays[type_idx] = np.concatenate((data_array, arr), axis=0)
def gather(self, input_codes: np.ndarray):
types = [tokenizer.TokenType.name, tokenizer.TokenType.number]
offsets: List[int] = [tokenizer.get_vocab_offset(t) for t in types]
strings: List[Optional[str]] = [None for _ in types]
arrays: List[List[int]] = [[] for _ in types]
for c in input_codes:
t = tokenizer.DESERIALIZE[c]
for type_idx, token_type in enumerate(types):
if t.type != token_type:
if strings[type_idx] is not None:
self._add_to(type_idx, strings[type_idx],
np.array(arrays[type_idx], dtype=np.uint8))
strings[type_idx] = None
arrays[type_idx] = []
else:
ch = tokenizer.DECODE[c][0]
# add one because 0 is for padding
arrays[type_idx].append(c + 1 - offsets[type_idx])
if strings[type_idx] is None:
strings[type_idx] = ch
else:
strings[type_idx] += ch
for type_idx, _ in enumerate(types):
if strings[type_idx] is not None:
self._add_to(type_idx, strings[type_idx],
np.array(arrays[type_idx], dtype=np.uint8))
def gather_files(self, files: List[EncodedFile]):
for f in self.logger.iterator("Counting", files):
self.gather(f.codes)
def transform(self, input_codes: np.ndarray):
types = [tokenizer.TokenType.name, tokenizer.TokenType.number]
strings: List[Optional[str]] = [None for _ in types]
type_mask = []
codes = []
for c in input_codes:
t = tokenizer.DESERIALIZE[c]
skip = False
for type_idx, token_type in enumerate(types):
if t.type != token_type:
if strings[type_idx] is not None:
type_mask.append(type_idx + 1)
idx = self.dictionaries[type_idx][strings[type_idx]]
codes.append(self.infos[type_idx][idx].code)
strings[type_idx] = None
else:
ch = tokenizer.DECODE[c][0]
# add one because 0 is for padding
if strings[type_idx] is None:
strings[type_idx] = ch
else:
strings[type_idx] += ch
skip = True
if skip:
continue
type_mask.append(0)
codes.append(c)
for type_idx, token_type in enumerate(types):
if strings[type_idx] is not None:
type_mask.append(type_idx + 1)
idx = self.dictionaries[type_idx][strings[type_idx]]
codes.append(self.infos[type_idx][idx].code)
strings[type_idx] = None
codes = np.array(codes, dtype=np.int32)
type_mask = np.array(type_mask, dtype=np.int32)
codes = type_mask * self.TYPE_MASK_BASE + codes
return codes
def transform_files(self, files: List[EncodedFile]) -> List[EncodedFile]:
transformed = []
for f in self.logger.iterator("Transforming", files):
transformed.append(EncodedFile(f.path, self.transform(f.codes)))
return transformed
class IdentifierInfo:
code: int
count: int
offset: int
length: int
string: str
def __init__(self, code, offset, length, string):
self.code = code
self.count = 1
self.offset = offset
self.length = length
self.string = string
``` |
{
"source": "jithin-qis/face-tagging",
"score": 3
} |
#### File: face-tagging/core/code.py
```python
import cv2
import sys
import face_recognition
import os
# from tkinter.filedialog import *
def blur_img(post, pro_pic):
imagePath = post
pro = pro_pic
# print(pro)
pro = cv2.imread(pro)
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.3,
minNeighbors=3,
minSize=(30, 30)
)
# print("[INFO] Found {0} Faces!".format(len(faces)))
images = os.listdir(os.path.join(os.getcwd(),'core','data'))
for i in images:
os.remove(os.path.join(os.getcwd(),'core','data',i))
cv2.imwrite(os.path.join(os.getcwd(),'core','data','1.jpg'),pro)
m=0
im=os.listdir(os.path.join(os.getcwd(),'core','face'))
for i in im:
os.remove(os.path.join(os.getcwd(),'core','face',i))
for (x, y, w, h) in faces:
m=m+1
name=str(m)+'.jpg'
path=os.path.join(os.path.join(os.getcwd(),'core','face'),name)
# print(path)
# cv2.imshow('faces',image[y:y+h,x:x+w])
# cv2.waitKey(1000)
cv2.imwrite(path,image[y:y+h,x:x+w])
# cv2.imwrite(images[0],image[y:y+h,x:x+w])
# cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 6)
image_to_be_matched = face_recognition.load_image_file(path)
try:
img_encoded = face_recognition.face_encodings(image_to_be_matched)[0]
except IndexError:
continue
for face in images:
## print(face)
# load the image
current_image = face_recognition.load_image_file(os.path.join(os.getcwd(),'core','data',face))
# encode the loaded image into a feature vector
current_image_encoded = face_recognition.face_encodings(current_image)[0]
# match your image with the image and check if it matches
result = face_recognition.compare_faces(
[img_encoded], current_image_encoded)
if result ==[True]:
print('+++++++++++++++++Found this',face)
aaa=image[y:y+h,x:x+w]
blurImg = cv2.blur(aaa,(70,70))
blurImg = cv2.blur(blurImg,(70,70))
image[y:y+h,x:x+w]=blurImg
# cv2.imshow('image',cv2.resize(image,(256,256)))
# cv2.waitKey(4000)
# cv2.imshow('image',cv2.resize(image,(256,256)))
# cv2.waitKey(1000)
cv2.imwrite(imagePath,image)
def check_img(post, pro_pic):
imagePath = post
pro = pro_pic
# print(pro)
pro = cv2.imread(pro)
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.3,
minNeighbors=3,
minSize=(30, 30)
)
# print("[INFO] Found {0} Faces!".format(len(faces)))
images = os.listdir(os.path.join(os.getcwd(),'core','data'))
for i in images:
os.remove(os.path.join(os.getcwd(),'core','data',i))
cv2.imwrite(os.path.join(os.getcwd(),'core','data','1.jpg'),pro)
m=0
im=os.listdir(os.path.join(os.getcwd(),'core','face'))
for i in im:
os.remove(os.path.join(os.getcwd(),'core','face',i))
for (x, y, w, h) in faces:
m=m+1
name=str(m)+'.jpg'
path=os.path.join(os.path.join(os.getcwd(),'core','face'),name)
# print(path)
# cv2.imshow('faces',image[y:y+h,x:x+w])
# cv2.waitKey(1000)
cv2.imwrite(path,image[y:y+h,x:x+w])
# cv2.imwrite(images[0],image[y:y+h,x:x+w])
# cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 6)
image_to_be_matched = face_recognition.load_image_file(path)
try:
img_encoded = face_recognition.face_encodings(image_to_be_matched)[0]
except IndexError:
continue
for face in images:
## print(face)
# load the image
current_image = face_recognition.load_image_file(os.path.join(os.getcwd(),'core','data',face))
# encode the loaded image into a feature vector
current_image_encoded = face_recognition.face_encodings(current_image)[0]
# match your image with the image and check if it matches
result = face_recognition.compare_faces(
[img_encoded], current_image_encoded)
if result ==[True]:
return pro_pic
``` |
{
"source": "jithinrkoshy/dhruma-django-website",
"score": 2
} |
#### File: dhruma/dh_app/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from dh_app.models import TreeModel
from dh_app.forms import messageForm
# Create your views here.
def index(request):
tree = TreeModel.objects.all()
form = messageForm()
if request.method == 'POST':
form = messageForm(request.POST)
if form.is_valid():
form.save(commit=True)
form = messageForm()
else:
print('ERROR')
return render(request,'dh_app/index.html',context={'tree':tree,'form':form})
def tree(request,uuid):
forest = TreeModel.objects.all()
flag = False
for tree in forest:
if(tree.tree_class == uuid):
tree_obj = tree
return render(request,'dh_app/treeinfo.html',context = {'tree':tree_obj})
``` |
{
"source": "jithinsarath/python3notes",
"score": 5
} |
#### File: jithinsarath/python3notes/decorators.py
```python
def div(a, b):
return a / b
print(div(4, 2))
print(div(2, 4))
# Let's say that I have a requirement to divide the largest by smallest, no matter what order it is
# How can I implement without touching the original code
# We define a smarter div function that takes another function as an input
def smart_div(function):
# Within this function we define another function that takes the same number of arguments as the original function to which
# We are adding features
def worker(a, b):
# Now we implement the features
if a < b:
a, b = b, a
# and then call the original function. Note that the values passed have already been manipulated.
return function(a, b)
# Finally, we return the inner function
return worker
# We are now re-defining the div() function in our code.
div = smart_div(div)
# And when you now call, it uses the new feature to calculate.
print(div(4, 2))
print(div(2, 4))
``` |
{
"source": "jithinsunnyofficial/TeleVital-MicrosoftHack",
"score": 3
} |
#### File: jithinsunnyofficial/TeleVital-MicrosoftHack/patientPrioritizer.py
```python
import pyrebase
from flask import Flask, request
from flask_restful import Resource, Api
import pickle
app = Flask(__name__)
api = Api(app)
class testabusive(Resource):
def get(self, user_index):
credentials_json = open("credentials.json","rb")
config = pickle.load(credentials_json)
credentials_json.close()
firebase = pyrebase.initialize_app(config)
db = firebase.database()
nm = db.child("Appointments/").child(user_index).get()
res = nm.val()
##print(res)
try:
age = int(res['age'])
except:
age = 44.0
try:
cough = int(res['cough'])
except:
cough = 0.0
try:
gender = res['gender']
except:
gender = 'male'
try:
height = int(res['height'])
except:
height = '170'
try:
pregnant = res['pregnant']
except:
pregnant = 'no'
try:
spo2 = int(res['spo2'])
except:
spo2 = 96
try:
weight = int(res['weight'])
except:
weight = 68
try:
tired = int(res['tiredness'])
except:
tired = 0.0
try:
temp = int(res['temp'])
except:
temp = 98
try:
sorethroat = int(res['sorethroat'])
except:
sorethroat = 0.0
try:
resp = int(res['rr'])
except:
resp = 13.0
try:
precond = res['precon']
except:
precond = 'no'
try:
heart = int(res['hr'])
except:
heart = 50.0
try:
breathing = int(res['breathingdiff'])
except:
breathing = 0.0
body_mass_index = 0.0
death_rate_age = 0.0
bmi_weight = 0.0
death_rate_gender = 0.0
death_rate_heart = 0.0
death_rate_resp = 0.0
if pregnant.lower() == "yes":
death_rate_preg = 6.0
else:
death_rate_preg = 0.0
if precond.lower() == "yes":
death_rate_preconding = 6.0
else:
death_rate_preconding = 0.0
height = int(height) * 0.393701
weight = int(weight) * 2.20462
body_mass_index = (int(weight) * 703) / (int(height) ** 2)
if body_mass_index < 18.5:
bmi_weight=.05
elif body_mass_index < 24.9:
bmi_weight=0
elif body_mass_index > 25.0 and body_mass_index < 28.9:
bmi_weight=.07
elif body_mass_index > 28.9 and body_mass_index < 34.9:
bmi_weight=.08
elif body_mass_index > 34.9 and body_mass_index < 39.9:
bmi_weight=.09
else:
bmi_weight=1.0
#print("The death_rate_preg is " + str(death_rate_preg))
#print("The death_rate_preexisting is " + str(death_rate_preexisting))
#print("A person with a BMI of " + str(body_mass_index ))
if int(age) < 9:
death_rate_age=0.0
elif int(age) < 39:
death_rate_age=1.0
elif int(age) < 49:
death_rate_age=3.0
elif int(age) < 59:
death_rate_age=3.0
elif int(age) < 69:
death_rate_age=6.0
elif int(age) < 79:
death_rate_age=7.0
else:
death_rate_age=10.0
#print("The death_rate_age is " + str(death_rate_age))
if gender.lower() == "male":
death_rate_gender=6.0
else:
death_rate_gender=4.0
#print("The death_rate_gender is " + str(death_rate_gender))
#athletes have heart rate of 40(implies good functioning of heart)
#less than 40 is abnormal
if int(heart) < 40:
death_rate_heart=5.0
elif int(heart) <60 and heart > 40:
death_rate_heart=0.0
elif int(heart) > 60 and heart < 100 :
death_rate_heart=0.0
else:
death_rate_heart=5.0
#print("The death_rate_heart is " + str(death_rate_heart))
#given more imporance to respiratory rate
if int(resp) < 10:
death_rate_resp=10.0
elif int(resp) < 12:
death_rate_resp=9.0
elif int(resp) > 12 and int(resp) < 20 :
death_rate_resp=0.0
elif int(resp) > 20 and int(resp) < 25 :
death_rate_resp=5.0
elif int(resp) > 25 and int(resp) < 27 :
death_rate_resp=9.0
elif int(resp) > 27 :
death_rate_resp=10.0
#print("The death_rate_resp is " + str(death_rate_resp))
if int(spo2) < 90:
death_rate_spo=7.0
elif int(spo2) >90 and int(spo2) <100:
death_rate_spo=0.0
else:
death_rate_spo=7.0
#print("The death_rate_spo is " + str(death_rate_spo))
if int(temp) < 97:
death_rate_temp = 1.0
elif int(temp) > 97 and int(temp) < 97.7:
death_rate_temp = 0.0
elif int(temp) > 97.7 and int(temp) < 100.5:
death_rate_temp = 3.0
else:
death_rate_temp = 6.0
#print("The death_rate_temp is " + str(death_rate_temp))
if int(cough) == 0:
death_rate_cough = 0.0
elif int(cough) == 1:
death_rate_cough = 2.0
elif int(cough) == 2:
death_rate_cough = 5.0
else:
death_rate_cough = 8.0
#print("The death_rate_cough is " + str(death_rate_cough))
if int(sorethroat) == 0:
death_rate_throat = 0.0
elif int(sorethroat) == 1:
death_rate_throat = 2.0
elif int(sorethroat) == 2:
death_rate_throat = 5.0
else:
death_rate_throat = 8.0
#print("The death_rate_throat is " + str(death_rate_throat))
if int(breathing) == 0:
death_rate_breath = 0.0
elif int(breathing) == 1:
death_rate_breath = 2.0
elif int(breathing) == 2:
death_rate_breath = 5.0
else:
death_rate_breath = 8.0
if int(tired) == 0:
death_rate_tired = 0.0
elif int(tired) == 1:
death_rate_tired = 2.0
elif int(tired) == 2:
death_rate_tired = 5.0
else:
death_rate_tired = 8.0
score = death_rate_tired+death_rate_breath+death_rate_throat+death_rate_cough+death_rate_temp+death_rate_spo+death_rate_resp+death_rate_heart+death_rate_gender+death_rate_age+bmi_weight+death_rate_preconding+death_rate_preg
print(score)
db.child("Appointments/").child(user_index).update({"score":score})
db.child("Consultation/").child(user_index).update({"gscore":score})
return (1)
api.add_resource(testabusive, '/score/<user_index>')
if __name__ == '__main__':
app.run()
```
#### File: jithinsunnyofficial/TeleVital-MicrosoftHack/televitalKiosk.py
```python
from application.face_recognition import Recog
from imutils.video import VideoStream
from flask import Response
from flask import Flask
from flask import render_template
import threading
import argparse
import datetime
import imutils
import time
import cv2
from tts import convert_and_play
from stt import generate_text
import concurrent.futures
from flow import flow
from playsound import playsound
from dialogflow_api import analyze_text
from keras.models import load_model
from label_detect import classify_face
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "Cascades/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)
font = cv2.FONT_HERSHEY_SIMPLEX
names = ['Joel', 'Farhan', 'Jithin']
id = 0 #facearray index
# initialize the output frame and a lock used to ensure thread-safe
# exchanges of the output frames (useful for multiple browsers/tabs
# are viewing tthe stream)
outputFrame = None
lock = threading.Lock()
# initialize a flask object
app = Flask(__name__)
# initialize the video stream and allow the camera sensor to
# warmup
#vs = VideoStream(usePiCamera=1).start()
vs = cv2.VideoCapture(0)
vs.set(3, 640) # set video widht
vs.set(4, 480) # set video height
# Define min window size to be recognized as a face
minW = 0.1*vs.get(3)
minH = 0.1*vs.get(4)
time.sleep(2.0)
def apicaller(text, session_id):
final_text = analyze_text(text session_id)
if final_text != 'end':
convert_and_play(final_text)
apicaller(analyze_text(generate_text), session_id)
def detect_face(frameCount):
# grab global references to the video stream, output frame, and
# lock variables
global vs, outputFrame, lock
# initialize the motion detector and the total number of frames
# read thus far
md = Recog(accumWeight=0.1)
total = 0
# loop over frames from the video stream
frame_counter = 0
face_frame_counter = 0
previous_state = 0
previous_id = 'unknown'
counter_id = 0
flag = False
mainFlag = False
flagTemp = 0
finalFlag = True
while True:
ret, frame = vs.read()
height,width = frame.shape[:2]
label = classify_face(frame)
if(label == 'with_mask'):
print("Mask detected")
else:
print("No mask detected")
cv2.putText(frame,str(label),(100,height-20), font, 1,(255,255,255),1,cv2.LINE_AA)
try:
x = th.is_alive()
if x == False and flagTemp == 0:
flagTemp = 1
frame_counter = 0
face_frame_counter = 0
previous_state = 0
previous_id = 'unknown'
counter_id = 0
flag = False
mainFlag = False
except:
pass
if finalFlag:
# read the next frame from the video stream, resize it,
# convert the frame to grayscale, and blur it
frame_counter+=1
msg = ""
#frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# grab the current timestamp and draw it on the frame
timestamp = datetime.datetime.now()
cv2.putText(frame, timestamp.strftime(
"%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# if the total number of frames has reached a sufficient
# number to construct a reasonable background model, then
# continue to process the frame
if total > frameCount:
# detect motion in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int(minW), int(minH)),
)
#print(faces)
if len(faces)>0:
# print("face detected")
if previous_state == 1:
face_frame_counter+=1
previous_state = 1
else:
previous_state = 0
face_frame_counter = 0
if face_frame_counter>70:
#start seq.
print('face recognized')
for(x,y,w,h) in faces:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
if counter_id>10:
print(previous_id)
tempstr = 'Hello '+names[id]
finalFlag = False
mainFlag = True
flagTemp = 0
a = 0
th = threading.Thread(target = flow, args= (names[id],))
th.start()
#print(th.is_alive())
flag = True
if(confidence<100):
id = names[id]
if (id==previous_id):
counter_id+=1
previous_id = id
else:
id = 'unknown'
previous_id = id
counter_id = 0
#face_frame_counter = 0
total += 1
# acquire the lock, set the output frame, and release the
# lock
with lock:
outputFrame = frame.copy()
@app.route("/")
def index():
t = threading.Thread(target=detect_face, args=(
args["frame_count"],))
t.daemon = True
t.start()
# return the rendered template
return render_template("index.html")
def generate():
# grab global references to the output frame and lock variables
global outputFrame, lock
# loop over frames from the output stream
while True:
# wait until the lock is acquired
with lock:
# check if the output frame is available, otherwise skip
# the iteration of the loop
if outputFrame is None:
continue
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(".jpg", outputFrame)
# ensure the frame was successfully encoded
if not flag:
continue
# yield the output frame in the byte format
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
@app.route("/video_feed")
def video_feed():
# return the response generated along with the specific media
# type (mime type)
return Response(generate(),
mimetype = "multipart/x-mixed-replace; boundary=frame")
# check to see if this is the main thread of execution
if __name__ == '__main__':
# construct the argument parser and parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--ip", type=str, default='127.0.0.1',
help="ip address of the device")
ap.add_argument("-o", "--port", type=int, default=8000,
help="ephemeral port number of the server (1024 to 65535)")
ap.add_argument("-f", "--frame-count", type=int, default=32,
help="# of frames used to construct the background model")
args = vars(ap.parse_args())
# start a thread that will perform motion detection
# start the flask app
app.run(host=args["ip"], port=args["port"], debug=True,
threaded=True, use_reloader=False)
# release the video stream pointer
vs.release
``` |
{
"source": "jithinvijayan007/django-sendgrid-v5",
"score": 2
} |
#### File: django-sendgrid-v5/test/test_backend_init.py
```python
from django.core.exceptions import ImproperlyConfigured
from django.test import override_settings
from django.test.testcases import SimpleTestCase
from sendgrid_backend.mail import SendgridBackend
class TestBackendInit(SimpleTestCase):
@override_settings(EMAIL_BACKEND="sendgrid_backend.SendgridBackend", SENDGRID_API_KEY=None)
def test_init_no_setting(self):
backend = SendgridBackend(api_key="DUMMY_API_KEY")
with self.assertRaises(ImproperlyConfigured):
backend = SendgridBackend() # noqa
``` |
{
"source": "JithLord/BlackOutPoetry",
"score": 3
} |
#### File: JithLord/BlackOutPoetry/main.py
```python
import tkinter
from tkinter import filedialog
from PIL import ImageTk,Image
from code1 import encode,decode
from code2 import encodeImage,decodeImage
window = tkinter.Tk()
window.title("Blackout Poetry")
w = window.winfo_screenwidth()
h = window.winfo_screenheight()
window.minsize(w,h)
text = ""
text_label = ""
image = [[]]
img = [[]]
img2 = [[]]
up_image = [[]]
txt_box=[]
txt_lavel=""
file=""
file2=""
canvas = tkinter.Canvas(window,width=w,height=h,highlightthickness=0)
canvas.place(x=0,y=0)
back=ImageTk.PhotoImage(Image.open('back1.png').resize((w,h), Image.ANTIALIAS))
canvas.create_image(int(w/2),int(h/2), anchor="center", image=back)
canvas.back=back
canvas1 = tkinter.Canvas(window,width=480, height=480,bg="gray50")
canvas1.place(x=300,y=150)
canvas1_label_bg = '#%02x%02x%02x' % (44,37,31)
canvas1_label = tkinter.Label(window, text='Original Image', fg='white', bg=canvas1_label_bg, font=(None,12))
canvas1_label.place(x=500,y=120)
canvas2 = tkinter.Canvas(window,width=480, height=480,bg="gray50")
canvas2.place(x=850,y=150)
canvas2_label_bg = '#%02x%02x%02x' % (46,38,35)
canvas2_label = tkinter.Label(window, text='Updated Image', fg='white', bg=canvas2_label_bg, font=(None,12))
canvas2_label.place(x=1050,y=120)
#adding image buttons :
upload_image_button=ImageTk.PhotoImage(Image.open('Icons/upload.png').resize((175,40), Image.ANTIALIAS)) #1.2
encode_image_button=ImageTk.PhotoImage(Image.open('Icons/encode.png').resize((118,33), Image.ANTIALIAS)) #1.3
decode_image_button=ImageTk.PhotoImage(Image.open('Icons/decode.png').resize((118,33), Image.ANTIALIAS)) #1.3
proceed_image_button=ImageTk.PhotoImage(Image.open('Icons/proceed.png').resize((128,38), Image.ANTIALIAS)) #1.2
clear_text_image_button=ImageTk.PhotoImage(Image.open('Icons/clear_text.png').resize((93,29), Image.ANTIALIAS))
clear_images_image_button=ImageTk.PhotoImage(Image.open('Icons/clear_images.png').resize((110,29), Image.ANTIALIAS))
save_image_button=ImageTk.PhotoImage(Image.open('Icons/save.png').resize((106,38), Image.ANTIALIAS))
exit_image_button=ImageTk.PhotoImage(Image.open('Icons/exit.png').resize((87,39), Image.ANTIALIAS)) #1.3
encode_set_image=ImageTk.PhotoImage(Image.open('Icons/encode_set.png').resize((118,33), Image.ANTIALIAS)) #1.3
decode_set_image=ImageTk.PhotoImage(Image.open('Icons/decode_set.png').resize((118,33), Image.ANTIALIAS)) #1.3
upload_image2_button=ImageTk.PhotoImage(Image.open('Icons/upload.png').resize((140,33), Image.ANTIALIAS)) #1.5
enter_text_button=ImageTk.PhotoImage(Image.open('Icons/text.png').resize((76,32), Image.ANTIALIAS)) #1.5
#adding upload image box
def upload_image() :
global file
file = filedialog.askopenfilename(initialdir = "/",
title="Select An Image",
filetype=(("PNG","*.png"),("JPEG","*.jpg"),("Bitmap","*.bmp")))
label = tkinter.Label(window,text=file[:14]+str("...")+file[-12:],font="Helvetica 8 italic")
label.place(x=30,y=200)
disp_image(file)
uploadbg = '#%02x%02x%02x' % (50, 39, 35)
upload = tkinter.Button(window,
image=upload_image_button,
command=upload_image,
borderwidth=0,
bg=uploadbg,
cursor='hand2',
activebackground=uploadbg,
highlightthickness=0)
upload.place(x=30,y=150)
#displaying uploaded image
def disp_image(file) :
global image
global img
img = Image.open(file)
img = img.convert('RGB')
r_img = img #resized image for canvas
width,height = img.size
ratio = width/height
if (ratio>1) : # landscape image
width = 480 # max possible width in canvas
height = (int)(width/ratio)
r_img = img.resize((width,height), Image.ANTIALIAS)
elif (ratio<1) : #potrait image
height = 480 #max possible height in canvas
width = (int)(height*ratio)
r_img = img.resize((width,height), Image.ANTIALIAS)
else : #square image
height = 480
width = 480
r_img = img.resize((width,height), Image.ANTIALIAS)
image = ImageTk.PhotoImage(r_img) #converting to image type
canvas1.create_image(240,240,image=image)
canvas1.image = image
# radio buttons and functions
c = tkinter.IntVar()
d = tkinter.IntVar()
def clicked():
def clicked_2() :
def extract_data() :
#calling the functions
global up_image
if (c.get()==1 and d.get()==1) : # encoding with text
global text
text = txt_box.get()
#txt_box.delete(0,'end') #clear content once entered
#txt_button['state'] = tkinter.DISABLED #freeze entry button
text_label = tkinter.Label(window,text=text,font="Times 12")
modified_image = encode(img,text) #modified image is only attributes
elif (c.get()==2 and d.get()==1) : # decoding with text
text,modified_image = decode(img)
decode_text_bg = '#%02x%02x%02x' % (97,80,70)
decoded_text_label = tkinter.Label(window,text="Decoded Text : "+text,fg='white',bg=decode_text_bg,font=("Helvatica","12","bold"))
decoded_text_label.place(x=850,y=650)
elif (c.get()==1 and d.get()==2) : # encoding with images
modified_image = encodeImage(img,img2)
elif (c.get()==2 and d.get()==2) : # decoding with images
modified_image = decodeImage(img)
#modified_image = crop_black(modified_image)
update_image(modified_image) # display updated image
if (d.get()==1) :
if (c.get() == 1) : # enter text box only for encode
global txt_label
global txt_box
message = "Enter Text :" #enter text to encode
txt_box = tkinter.Entry(window)
txt_label_bg = '#%02x%02x%02x' % (56,47,40)
txt_label = tkinter.Label(window,text="(alphanumeric values and spaces)",font=(None,7),fg='white',bg=txt_label_bg)
txt_box.place (x=130,y=335)
txt_label.place (x=130,y=360)
elif (d.get()==2 and c.get()==1) : #upload image only for encode
global file2
global img2
file2 = filedialog.askopenfilename(initialdir = "/",
title="Select An Image",
filetype=(("PNG","*.png"),("JPEG","*.jpg"),("Bitmap","*.bmp")))
img2 = Image.open(file2)
img2 = img2.convert('RGB')
proceedbg = '#%02x%02x%02x' % (70,58,51)
proceed_button = tkinter.Button(window,
image=proceed_image_button,
command=extract_data,
cursor='hand2',
bg=proceedbg,
activebackground=proceedbg,
border=0,
highlightthickness=0)
proceed_button.place(x=20,y=480)
rad3 = tkinter.Radiobutton(window,
image=enter_text_button,
variable=d,
value=1,
indicator=0, #to change style
background=encodebg,
border=0,
command=clicked_2,
cursor='hand2',
activebackground=encodebg,
highlightthickness=0)
rad4 = tkinter.Radiobutton(window,
image=upload_image2_button,
variable=d,
value=2,
indicator=0, #to change style
background=encodebg,
border=0,
command=clicked_2,
cursor='hand2',
activebackground=encodebg,
highlightthickness=0)
rad3.place (x=20,y=330)
rad4.place (x=20,y=380)
messagebg = '#%02x%02x%02x' % (54,45,37)
message = tkinter.Label(window,text="Select one of the following :",font="Helvetica 10 italic",fg='white',bg=messagebg)
message.place(x=20,y=290)
decodebg = '#%02x%02x%02x' % (54,45,37)
encodebg = '#%02x%02x%02x' % (52,41,35)
rad1 = tkinter.Radiobutton(window,
image=encode_image_button,
variable=c,
value=1,
indicator=0, #to change style
background=encodebg,
border=0,
command=clicked,
cursor='hand2',
activebackground=encodebg,
highlightthickness=0,
selectimage=encode_set_image)
rad2 = tkinter.Radiobutton(window,
image=decode_image_button,
variable=c,
value=2,
indicator=0, #to change style
background=decodebg,
border=0,
command=clicked,
cursor='hand2',
activebackground=decodebg,
highlightthickness=0,
selectimage=decode_set_image)
rad1.place(x=20, y=240)
rad2.place(x=155, y=240)
def update_image(modified_image) : #display updated image
width,height = modified_image.size
ratio = width/height
r_img = modified_image
if (ratio>1) : # landscape image
width = 480 # max possible width in canvas
height = (int)(width/ratio)
#print(width,height)
r_img = modified_image.resize((width,height), Image.ANTIALIAS)
elif (ratio<1): #portrait image
height = 480 #max possible height in canvas
width = (int)(height*ratio)
#print(width,height)
r_img = modified_image.resize((width,height), Image.ANTIALIAS)
else : #square image
height = 480
width = 480
r_img = modified_image.resize((width,height), Image.ANTIALIAS)
up_image = ImageTk.PhotoImage(r_img) #converting to image type
canvas2.create_image(240,240, image=up_image)
canvas2.up_image=up_image
# saving the result
def save_result() :
x=file.rfind('/') # last occurance of / in file name
y=file.rfind('.') # last occurance of . in file name
image_name=file[x+1:y:1] # to extract only name of the image and not entire path
result_img = modified_image.save("Saved Images/"+image_name+" result.png")
savebg = '#%02x%02x%02x' % (100,92,80)
save = tkinter.Button(window,
image=save_image_button,
command=save_result,
cursor='hand2',
border=0,
bg=savebg,
activebackground=savebg,
highlightthickness=0)
save.place(x=1225,y=650)
#reset and exit buttons
def clear_img() :
canvas1.delete("all")
canvas2.delete("all")
def clear_text() :
text=''
txt_box.delete(0,'end')
clearimgbg = '#%02x%02x%02x' % (70,57,51)
cleartextbg = '#%02x%02x%02x' % (65,52,45)
quitbg = '#%02x%02x%02x' % (80,67,61)
clear_images = tkinter.Button(window,image=clear_images_image_button,command=clear_img,border=0,activebackground=clearimgbg,bg=clearimgbg,highlightthickness=0)
clear_images.place(x=20,y=580)
clear_text = tkinter.Button(window,image=clear_text_image_button,command=clear_text,border=0,activebackground=cleartextbg,bg=cleartextbg,highlightthickness=0)
clear_text.place(x=150,y=580)
quit_button = tkinter.Button(window,
image=exit_image_button,
command=window.quit,
border=0,
highlightthickness=0,
bg=quitbg,
activebackground=quitbg)
quit_button.place(x=20,y=630)
window.mainloop()
``` |
{
"source": "JithLord/Jith",
"score": 4
} |
#### File: JithLord/Jith/Main.py
```python
import pygame, sys, random, time
from pygame.locals import *
# BLOCK_SIZE is the size of each cell in the grid. Used for position calculations.
BLOCK_SIZE = 600//10
# RGB Colors for players - Black, Red, Green, Blue
COLORS = ((0, 0, 0), (255, 0, 0), (0, 100, 0), (0, 0, 255))
# Snakes' and ladders' positions where key is from and value is to.
SNAKES = {93:28, 84:41, 59:38, 55:12, 23:3}
LADDERS = {4:36, 10:49, 26:85, 40:79, 70:92}
# Sample players
players = [ {"name": "Player 1", "pos": 1, "color": COLORS[0], "bot": True},
{"name": "Player 2", "pos": 1, "color": COLORS[1], "bot": True},
{"name": "Player 3", "pos": 1, "color": COLORS[2], "bot": False},
{"name": "Player 4", "pos": 1, "color": COLORS[3], "bot": False} ]
# game_message holds value of game status to display
game_message = ""
# If game is won or not
game_won = False
# Current player number (index of player in players)
player_index = 0
# Used to get input (space) when required
user_input = None
wait_for_user_input = False
def convert_num_to_xy(num, offset=(0,0)):
'''This function converts a given cell number into its (x,y) coordinates. '''
if num%10 == 0:
y = 10 - num//10
else:
y = 10 - num//10 - 1
if y % 2 == 0:
if num%10 == 0:
x = 10 - num%10 - 10
else:
x = 10 - num%10
else:
if num%10 == 0:
x = num%10 - 1 + 10
else:
x = num%10 - 1
# print(num, (x,y))
return (x*BLOCK_SIZE+offset[0], y*BLOCK_SIZE+offset[1])
dice_num=0
def draw_players_on_map():
'''This function runs every frame and redraws the board and all players on it.'''
screen.fill((65,186,235)) # Blue
# Draw board background
board_img = pygame.image.load("board3.jpg")
board_img = pygame.transform.scale(board_img, (600, 600))
screen.blit(board_img, (0,0))
# Draw logo
logo_img = pygame.image.load("logo2.png")
logo_img = pygame.transform.scale(logo_img, (240, 120))
screen.blit(logo_img, (600,6))
# Draw players' circle dots
for player in players:
index = players.index(player)
if index == 0:
offset = (BLOCK_SIZE//4, BLOCK_SIZE//4)
elif index == 1:
offset = (BLOCK_SIZE*3//4, BLOCK_SIZE//4)
elif index == 2:
offset = (BLOCK_SIZE//4, BLOCK_SIZE*3//4)
elif index == 3:
offset = (BLOCK_SIZE*3//4, BLOCK_SIZE*3//4)
pygame.draw.circle(screen, player["color"], convert_num_to_xy(player["pos"], offset), BLOCK_SIZE//4, 0)
def draw_scoreboard():
'''This function redraws the right sidebar of the game, including scoreboard and text.'''
global game_won
pygame.draw.rect(screen, (65,186,235), (600,150,300,260)) # Draw a blue rectangle to "clear" the right side.
headingtext = heading_head.render("Players", 1, (0,0,0)) # Draw the word "Players"
screen.blit(headingtext, (675, 125))
for player in players: # For each player:
if player["pos"] == 100:
game_won = True
index = players.index(player)
score = player["name"] + ": " + str(player["pos"])
scoretext = score_font.render(score, 1, (0,0,0)) # Draw player name and score
screen.blit(scoretext, (650, 200+50*index))
pygame.draw.circle(screen, player["color"], (638, 210+50*index), 6, 0) # Draw the small color circle dot
a=players[player_index]["name"]+"'s turn"
playerturntext = heading_font.render(a.center(30," "), 1, (0,0,0)) # Draw player name of who should play
screen.blit(playerturntext, (620, 500))
b=game_message
gamemsgtext = score_font.render(b.center(40," "), 1, (0,0,0)) # Draw game message
screen.blit(gamemsgtext, (620, 540))
def draw_die():
'''Draw die.'''
if dice_num:
die_img = pygame.image.load("die/die_"+str(dice_num)+".jpg")
die_img = pygame.transform.scale(die_img, (75, 75))
screen.blit(die_img, (712,410))
def play_turn(player_index):
'''This function actually "plays" and rolls the die.'''
global user_input, wait_for_user_input, game_message, dice_num
if players[player_index]["bot"]: # If player is a computer:
print("play_turn", player_index, "is bot. Playing.")
dice_num = random.randint(1,6) # Generate random number
game_message = players[player_index]["name"] + " (bot) got " + str(dice_num)
else: # If player is human:
print("play_turn", player_index, "is user.")
if user_input: # Check if player has pressed space
print("found user input. Setting", user_input, "for", player_index)
dice_num = random.randint(1,6) # Roll a die
user_input = None # Reset player input
game_message = players[player_index]["name"] + " played " + str(dice_num)
else: # If no input from player:
print("no user input. Setting wait for", player_index)
wait_for_user_input = True # Keep waiting, stop function midway here
return
draw_die()
# Check if player crosses 100
if players[player_index]["pos"]+dice_num > 100:
game_message = "Can only proceed with " + str(100-players[player_index]["pos"]) + ", not " + str(dice_num)
return # Do not increment player's position
# Check if player won the game
elif players[player_index]["pos"]+dice_num == 100:
game_message = players[player_index]["name"] + " WON!"
players[player_index]["pos"] += dice_num # Add die number to player's position
print("new pos for", player_index, "is", players[player_index]["pos"])
def check_and_teleport(player_index):
'''Check if a player landed at a snake's head or ladder foot. Teleport when necessary.'''
global game_message
# Check for snakes
if players[player_index]["pos"] in SNAKES:
print(players[player_index]["name"], "was swallowed by a snake :(")
game_message = players[player_index]["name"] + " was swallowed by a snake :("
players[player_index]["pos"] = SNAKES[players[player_index]["pos"]]
# Check for ladders
elif players[player_index]["pos"] in LADDERS:
print(players[player_index]["name"], "climbed a ladder :)")
game_message = players[player_index]["name"] + " climbed a ladder :)"
players[player_index]["pos"] = LADDERS[players[player_index]["pos"]]
#Ask for player details
players = []
try:
n = int(input("Number of players: "))
except ValueError:
print("Invalid input. Exiting.")
exit()
if n not in range(2,5):
print("Number of players must be 2-4.")
exit()
for i in range(n):
name = input("Name: ")
b = input("Computer Bot? (yes/no): ").lower()
if b in ("yes", "y", "true","t"):
bot=True
elif b in ("no", "n", "false","f"):
bot=False
else:
print("Invalid input, Try again.")
players.append({"name": name, "bot": bot, "pos": 1, "color": COLORS[i] })
# Initialize pygame
pygame.init()
# Create fonts to use
heading_head = pygame.font.SysFont("bauhaus93",34)
heading_font = pygame.font.SysFont("comicsansms", 22)
score_font = pygame.font.SysFont("comicsansms", 18)
# Create a new screen of size: 900x600
screen = pygame.display.set_mode([900,600])
pygame.display.set_caption("Snake And Ladder")
# Start a "clock" for the game
fpsClock = pygame.time.Clock()
# Main game loop
while True:
for i in pygame.event.get():
if i.type == pygame.QUIT: # If window close, exit properly
pygame.quit()
sys.exit()
if i.type == pygame.KEYDOWN: # If user input:
key = pygame.key.name(i.key)
print(key)
if key == 'space': # If key is space:
user_input = True
wait_for_user_input = False
player_index -= 1
draw_players_on_map() # Draw players on map
draw_scoreboard() # Draw scoreboard
# Do not go ahead with game if game expects input, but none is given
if wait_for_user_input and not user_input:
continue
play_turn(player_index) # Play turn
draw_scoreboard() # Draw scoreboard
draw_die()
pygame.display.update() # Apply all changes made to screen
# Stop if game won
if game_won:
time.sleep(6)
pygame.quit()
sys.exit()
time.sleep(0.7) # Wait so people can see what's happening
check_and_teleport(player_index) # Check for snakes and ladders
# Go to next player. Cycle over all players.
game_message = "" # Reset game message
if player_index == len(players)-1:
player_index = 0
else:
player_index += 1
pygame.display.update() # Apply all changes made to screen
fpsClock.tick(1)
``` |
{
"source": "Jithra/JithBot",
"score": 2
} |
#### File: JithBot/plugins/Echo.py
```python
from util import Events
import discord
import re
class Plugin(object):
def __init__(self, pm):
self.pm = pm
@staticmethod
def register_events():
return [Events.Command('echo', desc = 'Make JithBot say something.')]
async def handle_command(self, message_object, command, args):
if command == 'echo':
await self.echo_test(message_object, command)
async def echo_test(self, message_object,command):
message = re.split(command, message_object.content)
message = re.split('`', message[1])
channel = re.sub('\W','', message[0])
await self.pm.client.delete_message(message_object)
await self.pm.client.send_message(discord.Object(id = str(channel)), str(message[1]))
return()
```
#### File: JithBot/plugins/MusicBot.py
```python
from util import Events
from util.Ranks import Ranks
from discord import Channel
class Plugin(object):
def __init__(self, pm):
self.pm = pm
self.player = None
@staticmethod
def register_events():
return [Events.Command("play", desc="Play a song in voice chat"),
Events.Command("stop", Ranks.Mod, desc="Stop the Music bot")]
async def handle_command(self, message_object, command, args):
if command == "play":
await self.play(message_object, args[1])
if command == "stop":
await self.stop(message_object)
async def play(self, message_object, url):
await self.pm.client.delete_message(message_object)
# Kill all playing connections before starting a new one
if self.player is not None:
self.player.stop()
# Check if the user requesting is in a voice channel
channel = message_object.author.voice.voice_channel
if channel is not None and type(channel) is Channel:
# Disconnect if we're connected without playing anything.
if self.player is None:
chan = self.pm.client.voice_client_in(message_object.server)
if chan is not None:
await chan.disconnect()
# Get current joined channel, if not available join user channel
if len(self.pm.client.voice_clients) is 0:
voice = await self.pm.client.join_voice_channel(channel)
else:
voice = self.pm.client.voice_client_in(message_object.server)
self.player = await voice.create_ytdl_player(url, ytdl_options={"default_search": "ytsearch"})
self.player.start()
# Format stream duration
m, s = divmod(self.player.duration, 60)
h, m = divmod(m, 60)
if h is 0:
duration = str(m) + ":" + str(s)
else:
duration = str(h) + ":" + str(m) + ":" + str(s)
await self.pm.client.send_message(message_object.channel, "Now playing **" + self.player.title +
"** (" + duration + ") in " + channel.name)
else:
await self.pm.client.send_message(message_object.channel, message_object.author.mention +
" please join a voice channel in order to start the bot!")
async def stop(self, message_object):
# Kill all playing connections
if self.player is not None:
self.player.stop()
# Disconnect from voice
chan = self.pm.client.voice_client_in(message_object.server)
if chan is not None:
await chan.disconnect()
```
#### File: JithBot/plugins/rate.py
```python
from util import Events
import random
class Plugin(object):
def __init__(self, pm):
self.pm = pm
@staticmethod
def register_events():
return [Events.Command("rate",desc="Rate someone or something between 0 and 100")]
async def handle_command(self, message_object, command, args):
if command == "rate":
await self.rate(message_object, args[1])
async def rate(self, message_object, user):
'''
# totally not rigged or something
def isDevMentioned():
for u in message_object.mentions:
if u.name == "Theraga" or u.name == "Dynista":
return True
return False
if user == "theraga" or user == "Theraga" or user == "dynista" or user == "Dynista" or isDevMentioned():
await self.pm.client.send_message(message_object.channel, "I would rate **" + user + "** 100.00/100")
else:
'''
number = round(random.uniform(1, 100), 2)
print(message_object.mentions)
await self.pm.client.send_message(message_object.channel,
"I would rate " + "**" + user + "** " + str(number) + "/100")
```
#### File: JithBot/plugins/sqlservlog.py
```python
from PluginManager import PluginManager
from util import Jithsql
from util import Events
import discord
import re
import pyodbc
class Plugin(object):
def __init__(self, pm):
self.pm = pm
@staticmethod
def register_events():
return [Events.Message("logging")]
async def handle_message(self, message_object):
if message_object.content.startswith(self.pm.botPreferences.commandPrefix) is False:
#format server name, user name, and message contents to make them all SQL friendly.
ServerName = Jithsql.sanitize_inputs(message_object, 'server')
UserName = Jithsql.sanitize_inputs(message_object, 'author')
UserMessage = Jithsql.sanitize_inputs(message_object, 'content')
ServerID = str(message_object.server.id)
UserID = str(message_object.author.id)
UserChannel = '#' + str(message_object.channel)
sql = "EXEC insert_into_table '" + ServerName + "','" + ServerID + "','" + UserName + "','" + UserMessage + "','" + UserChannel + "','" + UserID + "';"
connection = Jithsql.parse_connection()
cursor = connection.cursor()
cursor.execute(sql)
cursor.execute("COMMIT;")
del cursor
connection.close
``` |
{
"source": "jithuraju1290/gotmtool",
"score": 2
} |
#### File: jithuraju1290/gotmtool/gotm_build.py
```python
import os
from gotmtool.model import Model
def main():
"""Build GOTM
"""
# create a model instance for build
m = Model(
# an arbitrary name, not used in the build step
name='build',
# if necessary, change to the GOTM environment file set up
# by gotm_env_init.py
environ='.gotm_env.yaml',
)
# build the model
print('-'*64)
print('Building GOTM source code')
print('-'*64)
m.build(
# do a clean build
clean=True,
# build with CVMix
use_cvmix=True,
# build with FABM
use_fabm=False,
# build with STIM
use_stim=False,
# build with NetCDF
use_netcdf=True,
# output extra output for turbulence variables
extra_output=False,
# debug mode
debug=False,
)
if __name__ == "__main__":
main()
```
#### File: gotmtool/gotmtool/stokesdrift.py
```python
import numpy as np
from scipy import special
from .constants import gravity
def stokes_drift_dhh85(
z,
wind_speed,
wave_age,
omega_min=0.1,
omega_max=20.,
n_omega=1000,
):
"""Compute Stokes drift from Donelan et al., 1985 spectrum
:z: (array-like) depth < 0 (m)
:wind_speed: (float) 10-meter wind speed (m/s)
:wave_age: (float) wave age (unitless)
:omega_min: (float) minimum frequency (2*pi*f) for integration
:omega_max: (float) maximum frequency (2*pi*f) for integration
:n_omega: (int) number of frequency bins for integration
:returns: (array-like) Stokes drift at z
"""
omega = np.linspace(omega_min, omega_max, n_omega)
domega = omega[1]-omega[0]
z = np.array(z)
dz, _ = _get_grid(z)
us = np.zeros_like(z)
for i in np.arange(n_omega):
us += domega * _stokes_drift_kernel_dhh85(omega[i],z,dz,wind_speed,wave_age)
return us
def _stokes_drift_kernel_dhh85(
omega,
z,
dz,
wind_speed,
wave_age,
):
"""Kernel of the Stokese
:omega: (float) frequency (2*pi*f)
:z: (array-like) depth < 0 (m)
:dz: (array-like) layer thickness (m)
:wind_speed: (float) 10-meter wind speed (m/s)
:wave_age: (float) wave age (unitless)
:return: (array-like) Stokes drift kernel at omega and z
"""
iwa = 1./wave_age
omega_p = gravity * iwa / wind_speed
alpha = 0.006 * iwa**(0.55)
sigma = 0.08 * (1. + 4. * wave_age**3)
if iwa <= 1.:
gamma1 = 1.7
else:
gamma1 = 1.7 + 6. * np.log10(iwa)
gamma2 = np.exp(-0.5 * (omega - omega_p)**2 / sigma**2 / omega_p**2)
spec = alpha * gravity**2 / (omega_p * omega**4) * np.exp(-(omega_p/omega)**4) * gamma1**gamma2
kdz = omega**2 * dz / gravity
zfilter = np.where(kdz < 10., np.sinh(kdz)/kdz, 1.)
return 2. * (spec * omega**3) * zfilter * np.exp(2. * omega**2 * z / gravity) / gravity
def stokes_drift_spec(
z,
spec,
xcmp,
ycmp,
freq,
dfreq,
tail_fm5=False,
):
"""Compute Stokes drift profile from wave spectrum
:z: (array-like) depth < 0 (m)
:spec: (array-like) band wave energy density (m^2 s)
:xcmp: (array-like) fraction of x-component (0-1)
:ycmp: (array-like) fraction of y-component (0-1)
:freq: (array-like) band center wave frequency (Hz)
:dfreq: (array-like) band width of wave frequency (Hz)
:tail_fm5: (bool, optional) add contribution from a f^-5 tail
:returns: (array-like) Stokes drift at z (x- and y-components)
"""
z = np.array(z)
spec = np.array(spec)
xcmp = np.array(xcmp)
ycmp = np.array(ycmp)
freq = np.array(freq)
dfreq = np.array(dfreq)
us = np.zeros_like(z)
vs = np.zeros_like(z)
nfreq = freq.size
nz = z.size
const = 8. * np.pi**2 / gravity
factor2 = const * freq**2
factor = 2. * np.pi * freq *factor2 * dfreq
# cutoff frequency
freqc = freq[-1] + 0.5 * dfreq[-1]
dfreqc = dfreq[-1]
# get vertical grid
dz, zi = _get_grid(z)
# Stokes drift averaged over the grid cell
for i in np.arange(nz):
for j in np.arange(nfreq):
kdz = factor2[j] * dz[i] / 2.
if kdz < 100.:
tmp = np.sinh(kdz) / kdz * factor[j] * spec[j] * np.exp(factor2[j]*z[i])
else:
tmp = factor[j] * spec[j] * np.exp(factor2[j]*z[i])
us[i] += tmp * xcmp[j]
vs[i] += tmp * ycmp[j]
# contribution from a f^-5 tail
if tail_fm5:
us_t, vs_t = stokes_drift_tail_fm5(z, spec[-1], xcmp[-1], ycmp[-1], freqc)
us += us_t
vs += vs_t
return us, vs
def stokes_drift_tail_fm5(
z,
spec,
xcmp,
ycmp,
freq,
):
"""Contribution of a f^-5 spectral tail to Stokes drift
see <NAME> Harcourt and D'Asaro 2008
:z: (array-like) depth < 0 (m)
:spec: (float)
:xcmp: (float) fraction of x-component (0-1)
:ycmp: (float) fraction of y-component (0-1)
:freq: (float) cutoff frequency
:returns: (array-like) Stokes drift at z (x- and y-components)
"""
# initialize arrays
nz = z.size
us = np.zeros_like(z)
vs = np.zeros_like(z)
# constants
const = 8. * np.pi**2 / gravity
# get vertical grid
dz, zi = _get_grid(z)
for i in np.arange(nz):
aplus = np.maximum(1.e-8, -const * freq**2 * zi[i])
aminus = -const * freq**2 * zi[i+1]
iplus = 2. * aplus / 3. * (np.sqrt(np.pi * aplus) * special.erfc(np.sqrt(aplus)) - (1. - 0.5 / aplus) * np.exp(-aplus))
iminus = 2. * aminus / 3. * (np.sqrt(np.pi * aminus) * special.erfc(np.sqrt(aminus)) - (1. - 0.5 / aminus) * np.exp(-aminus))
tmp = 2. * np.pi * freq**2 / dz[i] * spec * (iplus - iminus)
us[i] = tmp * xcmp
vs[i] = tmp * ycmp
return us, vs
def _get_grid(z):
# get vertical grid thickness
z = np.array(z)
nz = z.size
if nz == 1:
dz = 1.e6 # an arbitrarily large number
zi = z
else:
dz = np.zeros_like(z)
zi = np.zeros(nz+1)
dz[1:-1] = 0.5 * (z[0:-2] - z[2:])
dz[0] = -z[0] + 0.5 * (z[0] - z[1])
dz[-1] = dz[-2]
zi[1:] = -np.cumsum(dz)
return dz, zi
``` |
{
"source": "jithurjacob/Recommendation-Engine-Algorithms",
"score": 2
} |
#### File: models/DeepLearning/DL_recommender.py
```python
import json
# import pickle
"""
hm_epochs = 150
batch_size = 500
num_steps = 10
state_size = 200
vocab_size = 11207
# Placeholders
x = tf.placeholder(tf.int32, [None, num_steps]) # [batch_size, num_steps]
seqlen = tf.placeholder(tf.int32, [None])
y = tf.placeholder(tf.int32, [None, num_steps])
model_path = "./models/DeepLearning/model_data/"
model_path = "./model_data/"
sess=tf.Session()
#First let's load meta graph and restore weights
saver = tf.train.import_meta_graph(model_path+'model.meta')
saver.restore(sess,tf.train.latest_checkpoint(model_path))
sess.run(tf.global_variables_initializer())
graph = tf.get_default_graph()
#context = graph.get_tensor_by_name("context:0")
#utterance = graph.get_tensor_by_name("utterance:0")
#target = graph.get_tensor_by_name("target:0")
op_to_restore = graph.get_tensor_by_name("logits:0")
#qe,an,lbl = test_batch(0)
"""
def get_recommendations(cart_items):
cart_items = [json.loads(cart_items)]
"""
with open(model_path+'embeddings.txt','rb') as fp:
embeddings = pickle.load(fp)
with open (model_path+'words.txt', 'rb') as fp:
wordlist = pickle.load(fp)
wordlist.append('UNKNOWN_TOKEN')
embeddings = np.vstack([embeddings, np.zeros(200)])
word_to_index = dict([(w,i) for i,w in enumerate(wordlist)])
test_prod_embeds = [[word_to_index[prod] for prod in bucket] for bucket in cart_items ]
test_x_padded = []
max_length = 10
for row in test_prod_embeds:
print(row)
if len(row) <= max_length:
test_x_padded.append(row + [word_to_index['UNKNOWN_TOKEN']] * (max_length - len(row)))
else :
test_x_padded.append(test_prod_embeds[:10])
feed_dict={x: test_x_padded}
#feed_dict={x: tt}
results = sess.run(op_to_restore,feed_dict)
print("These are results:", results)
results = np.reshape(results, [len(test_x_padded), num_steps, vocab_size])
r1= results.argmax(axis=2)
for pre in (r1[:10]):
print("Starts")
print("\ny': These are the predictions made \n%s" % (wordlist[pre[-1]] ))
#print("\ny': These are the predictions made \n%s" % (",".join([wordlist[x] for x in pre])))
#print("\ny These are the ground truths :\n%s" % (",".join([wordlist[x] for x in the])))
print("End\n")
"""
return ["placeholder1","placeholder2"]
#print(get_recommendations('["Milk"]'))
``` |
{
"source": "JitindraFartiyal/odmetrics",
"score": 3
} |
#### File: odmetrics/metrics/iou.py
```python
import numpy
def iou(bboxA, bboxB):
'''
Input :
bboxA : [left, top, right, bottom]
bboxB : [left, top, right, bottom]
Output:
Intersection over union(iou) between bboxA and bboxB
Range of iou [0-1]
'''
xA = max(bboxA[0], bboxB[0])
yA = max(bboxA[1], bboxB[1])
xB = min(bboxA[2], bboxB[2])
yB = min(bboxA[3], bboxB[3])
intersected_area = (xB - xA) * (yB - yA)
bboxA_area = (bboxA[2] - bboxA[0]) * (bboxA[3] - bboxA[1])
bboxB_area = (bboxB[2] - bboxB[0]) * (bboxB[3] - bboxB[1])
if (bboxA_area + bboxB_area - intersected_area) > 0:
intersection_over_union = intersected_area / (bboxA_area + bboxB_area - intersected_area)
else:
intersection_over_union = 0
return round(intersection_over_union, 3)
if __name__ == '__main__':
bboxA = [10, 20, 30, 40]
bboxB = [100, 200, 300, 541]
print('IOU : ', iou(bboxA, bboxB))
``` |
{
"source": "jitinkrishnan/Diversity-Based-Generalization",
"score": 3
} |
#### File: jitinkrishnan/Diversity-Based-Generalization/dataset_utils.py
```python
from preprocess import *
from wordvec_utils import *
import numpy as np
import nltk, random
def create_one_training_example(full_text_example, max_len, wv_dict):
text = preprocess_1(full_text_example)
words = nltk.word_tokenize(text)
bag = []
mywords = []
count = 0
for word in words:
if count == max_len:
break
if word in wv_dict.vocab.keys():
v = get_wordvector(word,wv_dict,300)
if v is not None:
count += 1
bag.append(list(v))
mywords.append(word)
for i in range(max_len-count):
bag.append(list(np.zeros(300)))
return mywords, np.asarray(bag)
def inplace_shuffle(a,b):
c = list(zip(a, b))
random.shuffle(c)
a, b = zip(*c)
return a,b
def create_data4lstm(train_category, test_category, wv_dict, Tx=75, Ty=1):
# TRAIN
f_bags_pos = open("raw_data/"+train_category+"/review_positive")
f_bags_neg = open("raw_data/"+train_category+"/review_negative")
pos = f_bags_pos.readlines()[:10]
neg = f_bags_neg.readlines()[:10]
bags = pos + neg
f_bags_pos.close()
f_bags_neg.close()
min_num = min(len(pos), len(neg))
bag_pos = []
for text in pos[:min_num]:
bag_pos.append(create_one_training_example(text, Tx, wv_dict)[1])
bag_neg = []
for text in neg[:min_num]:
bag_neg.append(create_one_training_example(text, Tx, wv_dict)[1])
pos_labels = []
for i in range(len(bag_pos)):
pos_labels.append([1,0])
neg_labels = []
for i in range(len(bag_neg)):
neg_labels.append([0,1])
X_train = bag_pos + bag_neg
Y_train = pos_labels + neg_labels
(X_train,Y_train) = inplace_shuffle(X_train,Y_train)
Xoh = np.asarray(X_train)
Yoh = np.asarray(Y_train)
Yoh = np.reshape(Yoh, (Yoh.shape[0],1,2))
# TEST
f_bags_pos = open("raw_data/"+test_category+"/review_positive")
f_bags_neg = open("raw_data/"+test_category+"/review_negative")
pos = f_bags_pos.readlines()[:10]
neg = f_bags_neg.readlines()[:10]
bags = pos + neg
f_bags_pos.close()
f_bags_neg.close()
min_num = min(len(pos), len(neg))
bag_pos = []
for text in pos[:min_num]:
bag_pos.append(create_one_training_example(text, Tx, wv_dict)[1])
bag_neg = []
for text in neg[:min_num]:
bag_neg.append(create_one_training_example(text, Tx, wv_dict)[1])
pos_labels = []
for i in range(len(bag_pos)):
pos_labels.append([1,0])
neg_labels = []
for i in range(len(bag_neg)):
neg_labels.append([0,1])
X_test = bag_pos + bag_neg
Y_test = pos_labels + neg_labels
(X_test,Y_test) = inplace_shuffle(X_test,Y_test)
Xoh_test = np.asarray(X_test)
Yoh_test = np.asarray(Y_test)
return Xoh, Yoh, Xoh_test, Yoh_test
def create_data4lstm_DA_oneclass(domain_A, wv_dict, Tx=75, Ty=1):
# TRAIN
f_bags_pos = open("raw_data/"+domain_A+"/review_unlabeled")
pos = f_bags_pos.readlines()[:10]
f_bags_pos.close()
bag_pos = []
for text in pos:
bag_pos.append(create_one_training_example(text, Tx, wv_dict)[1])
Xoh = np.asarray(bag_pos)
return Xoh
```
#### File: jitinkrishnan/Diversity-Based-Generalization/wordvec_utils.py
```python
import numpy as np
from nltk.stem import SnowballStemmer
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import TweetTokenizer
from gensim.models.keyedvectors import KeyedVectors
snowball_stemmer = SnowballStemmer('english')
porter_stemmer = PorterStemmer()
wordnet_lemmatizer = WordNetLemmatizer()
tknzr = TweetTokenizer()
def wordvec_dict(bin_file):
model = KeyedVectors.load_word2vec_format(bin_file, binary=True)
return model.wv
def lenOfdict(wv_dict):
return len(wv_dict.vocab.keys())
def get_wordvector(word, model_wv,dim):
vocab_keys = model_wv.vocab.keys()
if word in vocab_keys:
return model_wv[word]
elif word.lower() in vocab_keys:
return model_wv[word.lower()]
elif word.upper() in vocab_keys:
return model_wv[word.upper()]
elif snowball_stemmer.stem(word) in vocab_keys:
return model_wv[snowball_stemmer.stem(word)]
elif wordnet_lemmatizer.lemmatize(word) in vocab_keys:
return model_wv[wordnet_lemmatizer.lemmatize(word)]
return None
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.