blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
345bdaab2b38aae1910c3a58274b6e2998041641 | 68fa4dca860bdacdf34bd86e1aed23b61efbadde | /perception/fabric/train.py | 8b453a77060c2f750adb858ee0859400290046eb | [] | no_license | nehasunil/deformable_following | 161013489ad6a2fccd843984ca260ea6beb612e5 | 5bbb56e3136fc2195966f6f2926fe956e2c94311 | refs/heads/master | 2023-07-22T01:39:12.576350 | 2021-08-30T20:54:34 | 2021-08-30T20:54:34 | 361,874,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,882 | py | #!/usr/bin/env python
# coding: utf-8
import os
import sys
import json
import time
import random
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from datetime import datetime
from unet import unet
from data_loader import TowelDataset
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as T
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torch.utils.data import DataLoader
from utils import weights_init, compute_map, compute_iou, compute_auc, preprocessHeatMap
from tensorboardX import SummaryWriter
from PIL import Image
class BaseTrain:
def __init__(self, cfgs):
self.cfgs = cfgs
self.init_dirs()
self.init_datasets()
self.init_tboard()
def init_dirs(self):
runspath = self.cfgs["runspath"]
self.run_id = str(
max([int(run_id) for run_id in os.listdir(runspath) if run_id.isdecimal()])
+ 1
)
self.model_path = os.path.join(runspath, self.run_id)
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
with open(os.path.join(self.model_path, "cfgs.json"), "w") as f:
json.dump(self.cfgs, f, sort_keys=True, indent=2)
self.chkpnts_path = os.path.join(self.model_path, "chkpnts")
if not os.path.exists(self.chkpnts_path):
os.makedirs(self.chkpnts_path)
print(self.model_path)
def init_datasets(self):
self.datasize = self.cfgs["datasize"]
if self.datasize == "":
self.datasize = None
print("self.datasize", self.datasize)
train_data = TowelDataset(
root_dir=self.cfgs["datapath"],
phase="train",
use_transform=self.cfgs["transform"],
datasize=self.datasize,
)
self.train_loader = DataLoader(
train_data, batch_size=self.cfgs["batch_size"], shuffle=True, num_workers=16
)
if self.datasize != None and self.datasize <= 8:
return
val_data = TowelDataset(
root_dir=self.cfgs["datapath"],
phase="val",
use_transform=False,
datasize=self.datasize,
)
self.val_loader = DataLoader(
val_data, batch_size=self.cfgs["batch_size"], num_workers=16
)
def init_model(self):
self.model = unet(n_classes=self.cfgs["n_feature"], in_channels=1)
self.use_gpu = torch.cuda.is_available()
def init_tboard(self):
self.score_dir = os.path.join(self.model_path, "scores")
os.mkdir(self.score_dir)
self.n_class = self.cfgs["n_class"]
self.iou_scores = np.zeros((self.cfgs["epochs"], self.n_class))
self.pixel_scores = np.zeros(self.cfgs["epochs"])
train_sum_path = os.path.join(
self.model_path, "summaries", "train", self.run_id
)
os.makedirs(train_sum_path)
self.train_writer = SummaryWriter(train_sum_path)
val_sum_path = os.path.join(self.model_path, "summaries", "val", self.run_id)
self.val_writer = SummaryWriter(val_sum_path)
# if self.datasize != None and self.datasize <= 8: return
# os.makedirs(val_sum_path)
def loss(self, outputs, labels):
return self.criterion(outputs[:, : self.n_class, :, :], labels)
def tboard(self, writer, names, vals, epoch):
for i in range(len(names)):
writer.add_scalar(names[i], vals[i], epoch)
def metrics(self, outputs, labels):
output = torch.sigmoid(outputs[:, : self.n_class, :, :])
output = output.data.cpu().numpy()
pred = output.transpose(0, 2, 3, 1)
gt = labels.cpu().numpy().transpose(0, 2, 3, 1)
maps = []
ious = []
aucs = []
for g, p in zip(gt, pred):
maps.append(compute_map(g, p, self.n_class))
ious.append(compute_iou(g, p, self.n_class))
aucs.append(compute_auc(g, p, self.n_class))
return maps, ious, aucs
class TrainSeg(BaseTrain):
def __init__(self, cfgs):
super().__init__(cfgs)
self.init_model()
def init_model(self):
super().init_model()
weights_init(self.model)
if self.use_gpu:
torch.cuda.device(0)
self.model = self.model.cuda()
self.criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(20.0).to("cuda"))
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.cfgs["lr"],
weight_decay=self.cfgs["w_decay"],
)
self.scheduler = lr_scheduler.StepLR(
self.optimizer, step_size=self.cfgs["step_size"], gamma=self.cfgs["gamma"]
) # decay LR by a factor of 0.5 every 30 epochs
def get_batch(self, batch):
inputs = Variable(batch["X"].cuda()) if self.use_gpu else Variable(batch["X"])
labels = Variable(batch["Y"].cuda()) if self.use_gpu else Variable(batch["Y"])
return inputs, labels
def train(self):
ts = time.time()
for epoch in range(self.cfgs["epochs"]):
self.model.train()
self.scheduler.step()
losses = []
maps, ious, aucs = ([] for i in range(3))
for iter, batch in enumerate(self.train_loader):
self.optimizer.zero_grad()
inputs, labels = self.get_batch(batch)
outputs = self.model(inputs)
# Compute losses
loss = self.loss(outputs, labels)
losses.append(loss.item())
loss.backward()
self.optimizer.step()
if iter % 10 == 0:
print("epoch%d, iter%d, loss: %0.5f" % (epoch, iter, loss))
batch_maps, batch_ious, batch_aucs = self.metrics(outputs, labels)
maps += batch_maps
ious += batch_ious
aucs += batch_aucs
ious = np.nanmean(ious, axis=1)
pixel_map = np.nanmean(maps)
mean_auc = np.nanmean(aucs)
# Write to tensorboard
names = ["loss", "MAP", "meanIOU", "meanAUC"]
values = [np.nanmean(losses), pixel_map, np.nanmean(ious), mean_auc]
self.tboard(self.train_writer, names, values, epoch)
print("summary writer add train loss: " + str(np.nanmean(losses)))
print("Finish epoch {}, time elapsed {}".format(epoch, time.time() - ts))
if epoch % 1 == 0:
if self.datasize == None or self.datasize > 8:
self.val(epoch)
torch.save(
self.model.state_dict(),
os.path.join(
self.chkpnts_path, "%s_epoch%d" % (self.run_id, epoch)
),
)
self.train_writer.close()
self.val_writer.close()
def val(self, epoch):
self.model.eval()
num_batches = len(self.val_loader)
losses = []
maps, ious, aucs = ([] for i in range(3))
for iter, batch in enumerate(self.val_loader):
inputs, labels = self.get_batch(batch)
outputs = self.model(inputs)
with torch.no_grad():
loss = self.loss(outputs, labels)
losses.append(loss.item())
batch_maps, batch_ious, batch_aucs = self.metrics(outputs, labels)
maps += batch_maps
ious += batch_ious
aucs += batch_aucs
if epoch % 50 == 0 and iter == 0:
hm = torch.sigmoid(outputs[0, :, :, :])
self.val_writer.add_image("hm_%d" % epoch, hm, epoch)
# Calculate average
ious = np.array(ious).T # n_class * val_len
ious = np.nanmean(ious, axis=1)
pixel_map = np.nanmean(maps)
mean_auc = np.nanmean(aucs)
print(
"epoch{}, pix_map: {}, meanAUC: {}, meanIoU: {}, IoUs: {}".format(
epoch, pixel_map, mean_auc, np.nanmean(ious), ious
)
)
self.iou_scores[epoch] = ious
np.save(os.path.join(self.score_dir, "meanIOU"), self.iou_scores)
self.pixel_scores[epoch] = pixel_map
np.save(os.path.join(self.score_dir, "PixelMAP"), self.pixel_scores)
names = ["loss", "MAP", "meanIOU", "meanAUC"]
values = [np.nanmean(losses), pixel_map, np.nanmean(ious), mean_auc]
self.tboard(self.val_writer, names, values, epoch)
if __name__ == "__main__":
torch.manual_seed(1337)
torch.cuda.manual_seed(1337)
np.random.seed(1337)
random.seed(1337)
with open("configs/segmentation.json", "r") as f:
cfgs = json.loads(f.read())
print(json.dumps(cfgs, sort_keys=True, indent=1))
t = TrainSeg(cfgs)
t.train()
| [
"[email protected]"
] | |
f488148590db023618748efe38e9ebc10eb86b47 | 595c75b36a8efc5cefd13fa2261f2db7476e4af2 | /server_2357.py | 737b1bf21f967d5c19b7fdc5d43a99ad01e79976 | [] | no_license | rightpeter/ssdutnews | 0e3619c207c715fe26d4c206b3a834fa6513dfeb | a1804ddfa6d5aeec3ecd4709824c700cc3abde9c | refs/heads/master | 2021-01-20T02:19:08.325955 | 2015-12-01T10:22:39 | 2015-12-01T10:22:39 | 15,448,369 | 0 | 0 | null | 2014-01-03T02:25:44 | 2013-12-26T07:47:26 | Python | UTF-8 | Python | false | false | 18,240 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
import os
import re
import time
import json
import tornado.web
import tornado.ioloop
import tornado.httpclient
# tornado 3.x nolonger have this. use torndb
#import tornado.database
import torndb
import math
import httplib
import json
import pickle
import datetime
import threading
from config import *
from db import *
from myTools import *
import uimodules
reload(sys)
sys.setdefaultencoding('utf-8')
from tornado.options import define, options
# define("port", default=80, help="run on the given port", type=int)
define("port", default=2357, help="run on the given port", type=int)
class CJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
else:
return json.JSONEncoder.default(self, obj)
class Application(tornado.web.Application):
def __init__(self):
#self.max_comm = 5000
handlers = [
(r'/', MainHandler),
# API -----------------
(r'/api', APIHandler),
(r'/api/follow', FllwHandler),
(r'/api/subscribed', SbscHandler),
(r'/api/check', CheckHandler),
(r'/api/cgpasswd', ChangePasswdHandler),
(r'/login', LoginHandler),
(r'/logout', LogoutHandler),
(r'/signup', SignupHandler),
(r'/id/(\d+)$', NewsHandler),
(r'/renrencallback', RenrenCallBackHandler),
(r'/renrengettoken', RenrenGetTokenHandler),
(r'/404', Error404Handler),
(r'/index', TucaoIndexHandler),
(r'/about', AboutHandler),
#(r'/tucao', TucaoHandler),
#(r'/tucao/(\d+)$', TucaoHandler),
(r'/news', TucaoCommHandler),
(r'/news/(\d+)$', TucaoCommHandler),
(r'/home/(\d+)$', HomeHandler),
#(r'/blacklist', BlackListHandler),
]
settings = {
"template_path": os.path.join(os.path.dirname(__file__), "templates"),
"static_path": os.path.join(os.path.dirname(__file__), "static"),
"ui_modules": uimodules,
"cookie_secret": "#De1rFq@oyW^!kc3MI@74LY*^TPG6J8fkiG@xidDBF",
"login_url": "/login",
"xsrf_cookies": True,
}
tornado.web.Application.__init__(self, handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("name")
class MainHandler(BaseHandler):
#@tornado.web.authenticated
def get(self):
slides = [{}, {}, {}]
slides[0]['image'] = "pedestal.jpg"
slides[0]['name'] = "周知吐槽"
slides[0]['descript'] = "尽情地为自己呐喊"
slides[0]['href'] = "/about"
slides[0]['button'] = "关于我们"
slides[1]['image'] = "tucao_slide.jpg"
slides[1]['name'] = "周知吐槽"
slides[1]['descript'] = "尽情地为自己呐喊"
slides[1]['href'] = "/index"
slides[1]['button'] = "开始吐槽"
slides[2]['image'] = "pedestal_welcome.jpg"
slides[2]['name'] = "这只是一个大标题"
slides[2]['descript'] = "用来测试轮播插件这个复杂的东西"
slides[2]['href'] = "/signup"
slides[2]['button'] = "加入我们"
user = myTools.get_current_user(self)
self.set_cookie("url", self.request.uri)
url = self.request.uri
self.render("index.html", slides=slides, user=user, url=url)
class APIHandler(BaseHandler):
def get(self):
jsonDict = {}
api_type = self.get_argument('type')
value = self.get_argument('value')
call_back = self.get_argument('callback')
jsonDict['value'] = value
if api_type == 'EMAIL':
jsonDict['type'] = "EMAIL"
if myTools.is_email_exist(value):
jsonDict['status'] = "UNIQUE"
else:
jsonDict['status'] = "REPEATED"
if api_type == 'NAME':
jsonDict['type'] = "NAME"
if myTools.is_name_exist(value):
jsonDict['status'] = "UNIQUE"
else:
jsonDict['status'] = "REPEATED"
encoded_json = json.dumps(jsonDict)
call_back_json = '%s(%s)' % (call_back, encoded_json)
self.write(call_back_json)
class Error404Handler(BaseHandler):
def get(self):
user = myTools.get_current_user(self)
url = self.request.uri
self.render('404.html', user=user, url=url)
class HomeHandler(BaseHandler):
@tornado.web.authenticated
def get(self, home_id):
home_name = myTools.get_name_by_id(home_id)
user = myTools.get_current_user(self)
url = self.request.uri
self.set_cookie('url', url)
self.render('home.html', home_name=home_name, user=user, url=url)
class AboutHandler(BaseHandler):
def get(self):
user = myTools.get_current_user(self)
url = self.request.uri
self.set_cookie('url', url)
self.render('about.html', user=user, url=url)
class LoginHandler(BaseHandler):
def get(self):
self.render("login.html")
def post(self):
if ( myTools.is_a_attack(self) ):
return
self.set_header("Content-Type", "text/plain")
email = self.get_argument('email')
password = self.get_argument('password')
if myTools.login(email, password):
name = myTools.get_name_by_email(email)
if myTools.is_user_checked(email):
self.set_secure_cookie('name', name)
url = self.get_cookie('url')
self.redirect(url)
else:
self.set_secure_cookie('guest', name)
self.redirect('/signup')
else:
self.write("Login Failed!")
class LogoutHandler(BaseHandler):
def get(self):
url = self.get_cookie('url')
self.clear_cookie('name')
self.clear_cookie('guest')
self.redirect(url)
class SignupHandler(BaseHandler):
def get(self):
user = myTools.get_current_user(self)
#if self.get_secure_cookie('guest'):
# user['name'] = self.get_secure_cookie('guest')
# user['id'] = myTools.get_id_by_name(user['name'])
# guest = self.get_secure_cookie('guest')
#elif self.get_current_user():
# user['name'] = self.get_current_user()
# user['id'] = myTools.get_id_by_name(user['name'])
self.render('signup.html', user=user, url='/')
def post(self):
if ( myTools.is_a_attack(self) ):
return
user = {}
user['email'] = self.get_argument('email')
user['name'] = self.get_argument('name')
user['password'] = self.get_argument('password')
re_password = self.get_argument('repassword')
try:
if self.get_argument('is_subscribed'):
user['subscribed'] = 1
except:
user['subscribed'] = 0
if user['password'] == re_password:
if myTools.is_email_exist(user['email']) and myTools.is_name_exist(user['name']):
if myTools.insert_a_user(user):
myTools.send_check_email(user['email'])
if myTools.login(user['email'], user['password']):
self.set_secure_cookie('guest', user['name'])
self.redirect('/signup')
self.write('Signup Failed!')
class FllwHandler(BaseHandler):
@tornado.web.authenticated
def post(self):
pid = int(self.get_argument('pid'))
fname = self.get_current_user()
if myTools.follow(pid, fname):
self.write("Succeed Following!")
else:
self.write("Followed Error!")
class SbscHandler(BaseHandler):
@tornado.web.authenticated
def post(self):
subscribed = int(self.get_argument('subscribed'))
name = self.get_current_user()
if myTools.subscribe(name, subscribed):
self.write("Succeed Subscribing!")
else:
self.write("Subscribed Error!")
class CheckHandler(BaseHandler):
def get(self):
try:
code = self.get_argument('code')
email = self.get_argument('email')
name = myTools.get_name_by_email(email)
if myTools.check_email(email, code):
self.set_secure_cookie('name', name)
self.redirect('/signup')
else:
self.write('Checked Failed!')
except:
name = self.get_argument('name')
email = myTools.get_email_by_name(name)
myTools.send_check_email(email)
class ChangePasswdHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
name = self.get_current_user()
email = myTools.get_email_by_name(name)
passwd = self.get_argument('passwd')
new_passwd = self.get_argument('new_passwd')
re_new_passwd = self.get_argument('re_new_passwd')
if myTools.change_passwd(email, passwd, new_passwd, re_new_passwd):
self.write('Passwd Changed!')
else:
self.write('Changing Passwd Fail!')
@tornado.web.authenticated
def get(self):
name = self.get_current_user()
email = myTools.get_email_by_name(name)
passwd = self.get_argument('passwd')
new_passwd = self.get_argument('new_passwd')
re_new_passwd = self.get_argument('re_new_passwd')
if myTools.change_passwd(email, passwd, new_passwd, re_new_passwd):
self.write('Passwd Changed!')
else:
self.write('Changing Passwd Fail!')
class RenrenCallBackHandler(BaseHandler):
def get(self):
code = self.get_argument('code')
print code
url = "https://graph.renren.com/oauth/token" +\
"?grant_type=authorization_code" + \
"&client_id=a5cd69597ccf4b369057f919928cbfce"+\
"&redirect_uri=http://tucao.pedestal.cn/renrencallback"+\
"&client_secret=ce6f56e203524cfc9c3bb61523009b6e"+\
"&code=" + code
#print url
http_client = tornado.httpclient.HTTPClient()
response = http_client.fetch(url)
print response.body
jsonDic = json.loads(response.body)
print jsonDic['access_token']
http_client.close()
class RenrenGetTokenHandler(BaseHandler):
def get(self):
token = self.get_argument('code')
print token
class NewsHandler(BaseHandler):
def get(self, nnid):
nid = int(nnid)
news = myTools.get_a_news(nid)
news['id'] = news['nid']
news.pop('nid')
news_json = json.dumps(news)
self.write(news_json)
class TucaoIndexHandler(BaseHandler):
def get(self):
try:
page = int(self.get_argument('page'))
except:
page = 1
page_size = 20
latest_id = myTools.get_latest_news_id()
oldest_id = myTools.get_oldest_news_id()
total_pages = (myTools.get_total_news_num()-1) / page_size + 1
if page>total_pages or page<1:
page = 1
max_id = latest_id - (page-1)*page_size
if page == total_pages:
min_id = myTools.get_oldest_news_id()
else:
min_id = max_id - page_size + 1
newsList = myTools.get_news_list(min_id, max_id)
visible_pages = 10
user = myTools.get_current_user(self)
self.set_cookie("url", self.request.uri)
url = self.request.uri
self.render("tucao_index.html", newsList=newsList, total_pages=total_pages, current_page=page,
visible_pages=visible_pages, user=user, url=url)
class TucaoHandler(BaseHandler):
def get(self, nnid):
if ( myTools.is_a_attack(self) ):
return
NewsDatabase.reconnect()
nid = int(nnid)
comm = NewsDatabase.query("""SELECT * FROM commTable WHERE id=%r ORDER
BY level DESC, tolevel""", nid)
# print comm
reply = json.dumps(comm, cls=CJsonEncoder)
# print reply
self.write(reply)
def post(self):
if ( myTools.is_a_attack(self) ):
return
print ("In post")
NewsDatabase.reconnect()
remote_ip = self.request.remote_ip
if ( restrict.has_key(remote_ip) ):
if ( time.time() - restrict[remote_ip][0] < 5 ):
self.write("less than 5 second")
print restrict[remote_ip][0]
print time.time()
print "less than 5 second"
return
if ( restrict[remote_ip][1] > 1000 ):
self.write("too much")
NewsDatabase.execute(u"""INSERT blackList(ip) VALUES(%s)""", remote_ip)
blacklist.append(remote_ip)
print blacklist
print restrict[remote_ip][1]
print "too much"
return
else:
restrict[remote_ip] = [time.time(), 0]
print self.request
raw_body = str(self.request.body)
# print raw_body
jsonDic = json.loads(raw_body)
# print jsonDic
nid = int(jsonDic['id'])
content = jsonDic['content']
r = r"^@(\d+):([\s\S]+)$"
LEVEL = re.findall(r, content)
if LEVEL:
level = int(LEVEL[0][0])
TOLEVEL = NewsDatabase.query("""SELECT COUNT(*) AS tolevel FROM commTable WHERE id=%r AND level=%r""", nid, level)
if ( int(TOLEVEL[0]['tolevel']) == 0 ):
print "no such level"
self.write("no such level")
return
else:
tolevel = int(TOLEVEL[0]['tolevel']) + 1
content = LEVEL[0][1]
else:
tolevel = 1
LEVEL = NewsDatabase.query("""SELECT COUNT(DISTINCT(level)) AS level FROM commTable WHERE id=%r""", nid)
level = int(LEVEL[0]['level']) + 1
# print content
if (content == 'water'):
NewsDatabase.execute(u"""INSERT blackList(ip) VALUES(%s)""", remote_ip)
blacklist.append(remote_ip)
print blacklist
myTools.is_a_attack(self)
NewsDatabase.execute(u"""INSERT commTable(id, level, tolevel,
content) VALUES(%r, %r, %r, %s)""", nid, level, tolevel,
content)
restrict[remote_ip][1] += 1
print ("Insert comm")
print restrict[remote_ip][1]
self.write("success")
class TucaoCommHandler(BaseHandler):
def get(self, nnid):
NewsDatabase.reconnect()
nid = int(nnid)
news = myTools.get_a_news(nid)
news['body'] = news['body'].replace('href="/Attachments/file', 'href="http://ssdut.dlut.edu.cn/Attachments/file')
comm = NewsDatabase.query("""SELECT * FROM commTable WHERE id=%r ORDER
BY level DESC, tolevel""", nid)
latest = myTools.get_latest_news_id()
total = myTools.get_total_news_num()
# print comm
user = myTools.get_current_user(self)
self.set_cookie("url", self.request.uri)
url = self.request.uri
self.render('TucaoComm.html', title=news['title'],\
body=news['body'], publisher=news['publisher'],\
date=news['date'], clean_body=news['clean_body'],\
commList=comm, nid=nid, latest=latest, total=total,
user=user, url=url)
def post(self):
if ( myTools.is_a_attack(self) ):
self.redirect("/blacklist")
return
print ("In post")
NewsDatabase.reconnect()
raw_body = str(self.request.body)
print self.request.remote_ip
print raw_body
nid = int(self.get_argument('id'))
content = self.get_argument('content')
r = r"^@(\d+):([\s\S]+)$"
LEVEL = re.findall(r, content)
if LEVEL:
level = int(LEVEL[0][0])
TOLEVEL = NewsDatabase.query("""SELECT COUNT(*) AS tolevel FROM commTable WHERE id=%r AND level=%r""", nid, level)
if ( int(TOLEVEL[0]['tolevel']) == 0 ):
print "no such level"
self.write("no such level")
return
else:
tolevel = int(TOLEVEL[0]['tolevel']) + 1
content = LEVEL[0][1]
else:
tolevel = 1
LEVEL = NewsDatabase.query("""SELECT COUNT(DISTINCT(level)) AS level FROM commTable WHERE id=%r""", nid)
level = int(LEVEL[0]['level']) + 1
# print content
if (content == 'water'):
NewsDatabase.execute(u"""INSERT blackList(ip) VALUES(%s)""", remote_ip)
ENV_DIC['blacklist'].append(remote_ip)
myTools.is_a_attack(self)
NewsDatabase.execute(u"""INSERT commTable(id, level, tolevel,
content) VALUES(%r, %r, %r, %s)""", nid, level, tolevel,
content)
myTools.post_once(self)
self.redirect("/news/%d" % nid)
class BlackListHandler(BaseHandler):
def get(self):
self.write("U are in blacklist!<br>联系人人网“学生周知”")
def main():
app = Application()
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
def init():
ENV_DICT['latest'] = myTools.get_latest_news_id()
ENV_DICT['total'] = myTools.get_total_news_num()
print ENV_DICT['total']
BLACKLIST = NewsDatabase.query("""SELECT * FROM blackList""")
ENV_DICT['blacklist'] = []
ENV_DICT['restrict'] = {}
for blackdict in BLACKLIST:
ENV_DICT['blacklist'].append(blackdict['ip'])
print ENV_DICT['blacklist']
if __name__ == "__main__":
init()
main()
| [
"[email protected]"
] | |
5196aa80c752adb16bb8cd345d8db75574e812f7 | e334a81be3960876a8f918f592f6d8de6bfe9926 | /Deep Learning approach/Inference.py | ca166b5f65a760a4b8cb513e9b8e66b4f5204693 | [] | no_license | akshatbjain/Object-Detection-using-Computer-Vision | a03432243820cc4ad5cc4bfb09035b7468bc0c01 | 44f27ffcf55641470a791ec39b825aa1f82ea564 | refs/heads/master | 2021-06-17T12:52:00.624763 | 2017-04-23T21:32:10 | 2017-04-23T21:32:10 | 74,620,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
from keras.optimizers import SGD
import numpy as np
import os
import cv2
# load json and create model
json_file = open('VGG16_Model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("VGG16_Model.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
loaded_model.compile(loss='mse',
optimizer=sgd,
metrics=['accuracy'])
#loaded_model.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy'])
# Code to load the image from webcam
cam = cv2.VideoCapture(0)
print 'Press <q> to exit this code!'
while(1):
ret, img = cam.read()
# Code to load the image from local directory
#im = cv2.imread('test_images/5.jpeg')
im = img.copy()
im = cv2.resize(im, (224, 224)).astype(np.float32)
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
out = loaded_model.predict(im)
print out[0]
if out[0][0] > 0.8 and out[0][1] > 0.022:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'Object Detected', (20,150), font, 2, (255,255,255), 2, cv2.CV_AA)
cv2.imshow('Input', img)
k = cv2.waitKey(33)
if k == 1048689:
cam.release()
break
| [
"[email protected]"
] | |
bb034ab1792689bad0fe60d59c014703462e053b | a01633825cb77ce83320e9d99fba73b20a08ffab | /ycm_extra_conf.py | aab6f667988882458a1d5e6bdac43d3507ab8ac9 | [] | no_license | lucckb/sysscripts | e9c6b116329963e5e52cbad2becbe72dc2f400eb | d5d191981509cc71520076aa87f56c84e3667c49 | refs/heads/master | 2023-03-20T06:44:51.291742 | 2023-03-08T21:23:43 | 2023-03-08T21:23:43 | 186,891,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,017 | py | import os
import ycm_core
from clang_helpers import PrepareClangFlags
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
def find_build_folder():
current = '.build.json'
for i in range(10):
if os.path.exists(current):
return current
current = '../' + current
return ''
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = find_build_folder()
# These are the compilation flags that will be used in case there's no
# compilation database set.
flags = [
'-Wall',
'-Wextra',
'-Wno-variadic-macros',
'-fexceptions',
'-nostdinc',
'-std=c++17',
'-x', 'c++',
]
# For check the compiler paths call `gcc -print-prog-name=cc1plus` -v
# Compatibility with clang complete
if os.path.isfile('.clang_complete'):
with open('.clang_complete') as clf:
content = clf.read().splitlines()
flags += content
else:
flags += [
'-I',
'/usr/include'
'-isystem',
'/usr/lib/gcc/x86_64-pc-linux-gnu/8.2.1/../../../../include/c++/8.2.1'
'-isystem',
'/usr/lib/gcc/x86_64-pc-linux-gnu/8.2.1/../../../../include/c++/8.2.1/x86_64-pc-linux-gnu',
'-isystem',
'/usr/lib/gcc/x86_64-pc-linux-gnu/8.2.1/../../../../include/c++/8.2.1/backward',
'-isystem',
'/usr/lib/gcc/x86_64-pc-linux-gnu/8.2.1/include',
'-isystem',
'/usr/lib/gcc/x86_64-pc-linux-gnu/8.2.1/include-fixed',
'-I',
'/usr/local/include',
]
if compilation_database_folder:
database = ycm_core.CompilationDatabase(compilation_database_folder)
else:
database = None
#def DirectoryOfThisScript():
# return os.path.dirname(os.path.abspath(__file__))
def DirectoryOfThisScript():
return os.path.abspath(os.getcwd())
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return flags
new_flags = []
make_next_absolute = False
path_flags = ['-isystem', '-I', '-iquote', '--sysroot=']
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def FlagsForFile(filename):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = database.GetCompilationInfoForFile(filename)
final_flags = PrepareClangFlags(
MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_),
filename)
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {
'flags': final_flags,
'do_cache': True}
| [
"[email protected]"
] | |
223e1702edfa023a4ea7e8553dae1934294aa7ea | 1875c16dee46ab528b5a227c09c5743d8d10e982 | /pyacs/lib/strain.py | 286a4eea8f7c2d32302bdc8043281aba51f4d7f6 | [] | no_license | CorentinPeutin/pyacs | 56a8d2b3941bb7f921c447167c40d89157a502ed | c82aecd2c95a5d635170ed6750131cb49e28e570 | refs/heads/main | 2023-04-16T13:41:53.042830 | 2021-05-03T14:56:28 | 2021-05-03T14:56:28 | 363,963,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,658 | py | """
Strain rate calculation library
"""
###############################################################################
def vgrad(X,Y,VE,VN,SVE=None,SVN=None,CVEN=None,CODE=None, method='WLS',verbose=False):
###############################################################################
"""
Linear estimates of a horizontal velocity gradient
:param X,Y: 1D numpy array of longitudes and latitudes
:param VE,VN: 1D numpy array of east and north velocity. Unit is assummed to be mm/yr
:param SVE,SVN,CVEN: 1D numpy array of east and north velocity standard deviation (mm/yr) and correlation.
:param CODE: 1D numpy of string with site codes
:param method: estimator in 'L1','WLS' (default: weighted least-squares 'WLS')
:return: DV, the velocity gradient tensor as a 2x2 2D-numpy array and a 2-columns (East and North) residuals 2D numpy array
"""
import numpy as np
###########################################################################
def __vgrad_obs_eq__(l0,p0,l,p,ve,vn,sve,svn,cven):
###########################################################################
"""
observation equation for the horizontal velocity gradient
we assume that observation are in mm/yr and result will be in nstrain / yr
"""
Rt = 6371.E3 # Earth's radius in metres
Rt3nstrain = Rt / 1.E6
import pyacs.lib.glinalg
Ai = np.zeros((2,6))
Bi = np.zeros(2)
Corri = np.zeros((2,2))
# velocity at the barycenter
Ai[0,0] = 1.
Ai[1,1] = 1.
# ve
Ai[0,2] = np.radians(l-l0) * Rt3nstrain * np.cos( np.radians( (p+p0)/2. ) )
Ai[0,3] = np.radians(p-p0) * Rt3nstrain
# vn
Ai[1,4] = np.radians(l-l0) * Rt3nstrain * np.cos( np.radians( (p+p0)/2. ) )
Ai[1,5] = np.radians(p-p0) * Rt3nstrain
# Observation vector
Bi[0] = ve
Bi[1] = vn
# Covariance matrix
Corri[0,0] = 1.
Corri[1,1] = 1.
Corri[1,0] = cven
Corri[0,1] = cven
Cvi = pyacs.lib.glinalg.corr_to_cov( Corri,np.array([sve,svn]) )
return(Ai,Bi,Cvi)
###########################################################################
def __barycenter__(X,Y):
###########################################################################
"""
the barycenter of a set of point given by geographical coordinates
"""
import pyacs.lib.coordinates
XYZ = np.array( list(map(pyacs.lib.coordinates.geo2xyz,np.radians(X),np.radians(Y),np.zeros(X.size)) ))
[mx,my,mz] = np.mean( XYZ, axis=0 )
[mlong,mlat, _ ] = pyacs.lib.coordinates.xyz2geo(mx, my, mz, unit='dec_deg')
return(mlong,mlat)
# MAIN
# barycenter of network
(l0,p0) = __barycenter__(X,Y)
if verbose:
print("-- Barycenter at (%10.5lf , %10.5lf )" % (l0,p0))
# build the linear system
A=np.zeros((2*X.size,6))
B=np.zeros((2*X.size))
CV=np.zeros((2*X.size,2*X.size))
for i in np.arange(X.size):
(Ai,Bi,Cvi) = __vgrad_obs_eq__(l0,p0,X[i],Y[i],VE[i],VN[i],SVE[i],SVN[i],CVEN[i])
A[2*i:2*i+2,:] = Ai
B[2*i:2*i+2] = Bi
CV[2*i:2*i+2,2*i:2*i+2] = Cvi
# Solve
import pyacs.lib.glinalg
SOL,COV,RESIDUALS, chi2 = pyacs.lib.glinalg.lscov_full(A,B,CV)
return(l0,p0,SOL,COV,RESIDUALS, chi2)
| [
"[email protected]"
] | |
2687452fb54b846ce10ed9522ff3792242e23613 | dbbe307491d3d4b00c077d502c52ee45fc7944f9 | /Spam_Filter.py | a98714b8d2eb9ad0ae9e69c3a1282ec848c713a6 | [] | no_license | Prais3/Artificial-Intelligence | d0c2a180b265fd76447fe08bb3b6eb6eec1f26db | 0c60700a5b03401fbe04d4ddf5b4fe63f8764669 | refs/heads/master | 2023-02-02T17:56:48.791188 | 2020-12-03T14:53:23 | 2020-12-03T14:53:23 | 167,734,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,946 | py | ############################################################
# CMPSC442: Homework_4
############################################################
# student_name = "Varun Jani"
############################################################
# Imports
############################################################
# Include your imports here, if any are used.
import email
import os
from math import log, exp
# from queue import PriorityQueue
############################################################
# Section 1: Spam Filter
############################################################
# Function to read the emails once the path is specified, and it extracts the tokens and returns it as a list
def load_tokens(email_path):
# To open the file using the specified path
file_obj = open(email_path, encoding="utf8")
message = email.message_from_file(file_obj)
token_list = []
for line in email.iterators.body_line_iterator(message):
token_list += line.split()
# Simply close the file
file_obj.close()
return token_list
# Function to return a dictionary from the words contained in the email to their Laplace-smoothed probabilities
def log_probs(email_paths, smoothing):
p_dict = {}
# For loop to check the path of the email iterating over the paths
for e in email_paths:
values = load_tokens(e)
# From the values, we are looking for a word specifically using for loop
for x in values:
# To check if the word is in the dictionary or not
if x in p_dict:
p_dict[x] += 1
else:
p_dict[x] = 1
final = sum(p_dict.values())
# To check the word and its frequency in the dictionary using for loop and items() for dictionary
for term, w in p_dict.items():
temp = final + smoothing * (len(p_dict) + 1)
p_dict[term] = log((w + smoothing) / temp)
p_dict.update({'<UNK>': log(smoothing / (final + smoothing * (len(p_dict) + 1)))})
return p_dict
class SpamFilter(object):
# Simple init function create two log probability dictionaries
def __init__(self, spam_dir, ham_dir, smoothing):
spam_path, ham_path = [], []
# Using os.walk to access the file information
for dir_path, dir_name, file_name in os.walk(spam_dir):
spam_path = [dir_path + '/' + f for f in file_name]
for dir_path, dir_name, file_name in os.walk(ham_dir):
ham_path = [dir_path + '/' + f for f in file_name]
self.spam_prob = log_probs(spam_path, smoothing)
self.ham_prob = log_probs(ham_path, smoothing)
total_len = len(spam_path) + len(ham_path)
self.p_ham = log(len(ham_path) / total_len)
self.p_spam = log(1 - self.p_ham)
# Function to return whether the email in the given file path is spam or not (returns True or False)
def is_spam(self, email_path):
spam_p, ham_p = self.p_spam, self.p_ham
dict_words = {}
# Check for the word in the given file, which is accessed using load_tokens function
for w in load_tokens(email_path):
if w in dict_words:
dict_words[w] += 1
else:
dict_words[w] = 1
# Update the ham and spam accordingly after iterating through the dictionary
for w, count in dict_words.items():
# To check if the word is in there or not (for spam and ham)
if w in self.ham_prob:
ham_p += self.ham_prob[w]
else:
ham_p += self.ham_prob["<UNK>"]
if w in self.spam_prob:
spam_p += self.spam_prob[w]
else:
spam_p += self.spam_prob["<UNK>"]
# I started these valued with the calculated value and not zero
if spam_p > ham_p:
return True
return False
# Next two function return the n most indicative words for each category sorted in descending order
def most_indicative_spam(self, n):
v = set(self.spam_prob.keys()) & set(self.ham_prob.keys())
spam_res = {i: log(exp(self.spam_prob[i]) / (exp(self.ham_prob[i]) +
exp(self.spam_prob[i]))) for i in v}
ans = sorted([(i, spam_res[i]) for i in spam_res], key=lambda x: x[1], reverse=True)
return [i[0] for i in ans[:n]]
def most_indicative_ham(self, n):
v = set(self.spam_prob.keys()) & set(self.ham_prob.keys())
ham_res = {i: log(exp(self.ham_prob[i]) / (exp(self.spam_prob[i]) +
exp(self.ham_prob[i]))) for i in v}
ans = sorted([(i, ham_res[i]) for i in ham_res], key=lambda x: x[1], reverse=True)
return [i[0] for i in ans[:n]]
# P.S: I used priority queue also to implement the two most indicative function here. It was slightly slower than the
# sorting method so I decided not to use it. Based on my test cases, this is a faster method. I hope I am not wrong.
# I would love to change it if it isn't because the auto-grader test cases might be different and so I cannot
# be completely sure.
# Best time overall using unittest should be in the range 3.5 to 4.0 seconds
############################################################
# Section 2: Feedback
############################################################
feedback_question_1 = """
Around 8-9 hours mainly because of last timing part
"""
feedback_question_2 = """
The second function was challenging at first, and the most indicative part was a whole lot time consuming for me
because of the time issue. I had to solve a lot of issues there. Please look at the P.S. above, as I have a better
solution if the current one isn't satisfactory. Overall otherwise I had not many stumbling blocks.
"""
feedback_question_3 = """
Assignment was alright. It was a good assignment and helped me learn something new.
"""
| [
"[email protected]"
] | |
a1884220878bb23e0717618fb735566fe0dbc2ee | 29597b67f10d456bdcc90a693ac93f571635ae34 | /starter/fact_gen.py | 5c18200c17517a2188492ff21fe5285875adda1a | [] | no_license | sh-tatsuno/python-algorithm | 67d50f24604550c115f957ed74e81483566c560d | 2800050077562eef50b6f0bd8ba6733068469c4c | refs/heads/master | 2020-05-17T09:46:05.714449 | 2019-07-25T15:43:20 | 2019-07-25T15:43:20 | 183,641,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | def prod(a,b):
# TODO change output to the product of a and b
output = a * b
return output
def fact_gen():
i = 1
n = i
while True:
output = prod(n, i)
yield output
n = output
i += 1
# TODO: update i and n
# Hint: i is a successive integer and n is the previous product
# Test block
my_gen = fact_gen()
num = 5
for i in range(num):
print(next(my_gen))
# Correct result when num = 5:
# 1
# 2
# 6
# 24
# 120
| [
"[email protected]"
] | |
ad7d6b93bd2c0c9188bc6788a1c2cd4f6d68c6b4 | aec1201dad3f0aeda161c4e927ba05c04377eae9 | /cogs/commands/userinfo.py | 4fa88b6b6c214c9db0db2bddc83083cce150529a | [] | no_license | asdiasjiodasj/ahsidusdfdsfs | aa8455915633f7187d6756291e61db7e97e5303a | 2214c6318147fa397d6d01b87086250edeec5ff4 | refs/heads/main | 2023-02-24T11:48:31.811629 | 2021-01-29T12:38:28 | 2021-01-29T12:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | import discord
from discord.ext import commands
class Userinfo(commands.Cog):
@commands.command(aliases=["whois"])
async def userinfo(self, ctx, *, target: discord.Member=None):
if target is None:
target = ctx.author
roles = [role for role in target.roles if role != ctx.guild.default_role]
embed = discord.Embed(title="User information", colour=discord.Color.gold(), timestamp=datetime.utcnow())
embed.set_author(name=target.name, icon_url=target.avatar_url)
embed.set_thumbnail(url=target.avatar_url)
fields = [("Name", str(target), False),
("ID", target.id, False),
("Status", str(target.status).title(), False),
(f"Roles ({len(roles)})", " ".join([role.mention for role in roles]), False),
("Created at", target.created_at.strftime("%d/%m/%Y %H:%M:%S"), False),
("Joined at", target.joined_at.strftime("%d/%m/%Y %H:%M:%S"), False)]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Userinfo(bot)) | [
"[email protected]"
] | |
8d4552c6674aa3ccb96e1d03b40f4a53cc32e0d2 | 7c8957401e1991ea8fa84ba7ac8462ad11bd5502 | /train.py | 733cee5d9861b3de998afeb6b1d48b16ce65e99e | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | y33-j3T/DeepMusicvStyle | cfec508078832ce9032008e2c9ce8fe15615fcc6 | f1a6b149d8412ad480952e6820708b2b6eaf4b96 | refs/heads/master | 2021-04-02T12:49:52.828167 | 2020-03-23T15:44:06 | 2020-03-23T15:44:06 | 248,276,853 | 0 | 0 | BSD-3-Clause | 2020-03-23T15:44:08 | 2020-03-18T15:55:41 | Python | UTF-8 | Python | false | false | 5,792 | py | from model import MusicVAE
from loader import load_noteseqs
import numpy as np
import tensorflow as tf
import argparse
tf.reset_default_graph()
ap = argparse.ArgumentParser()
ap.add_argument("-bs", "--batch_size", default=32, type=int)
ap.add_argument("-s", "--save_path", default="vae/", type=str)
ap.add_argument("-e", "--epochs", default=100, type=int)
ap.add_argument("--train_set", default="data/Jsbtr.pkl data/Nmdtr.pkl", type=str)
ap.add_argument("--test_set", default="data/Jsbte.pkl data/Nmdte.pkl", type=str)
ap.add_argument("--x_depth", default="89 33 33", type=str)
ap.add_argument("--enc_rnn", default="hyperlstm", type=str)
ap.add_argument("--enc_rnn_dim", default=512, type=int)
ap.add_argument("--enc_hyper_unit", default=256, type=int)
ap.add_argument("--enc_dropout", default=0.25, type=float)
ap.add_argument("--enc_rnn_layer", default=1, type=int)
ap.add_argument("--dec_rnn", default="hyperlstm", type=str)
ap.add_argument("--dec_rnn_dim", default=512, type=int)
ap.add_argument("--dec_hyper_unit", default=256, type=int)
ap.add_argument("--dec_dropout", default=0.25, type=float)
ap.add_argument("--dec_rnn_layer", default=1, type=int)
ap.add_argument("--attention", default=128, type=int)
ap.add_argument("--cont_dim", default=100, type=int)
ap.add_argument("--cat_dim", default=2, type=int)
ap.add_argument("--style_embed_dim", default=100, type=int)
ap.add_argument("--mu_force", default=2.0, type=float)
ap.add_argument("--gumbel", default=0.67, type=float)
ap.add_argument("--l2_reg", default=0.002, type=float)
ap.add_argument("--kl_reg", default=1.0, type=float)
ap.add_argument("--kl_anneal", default=1000, type=int)
ap.add_argument("--restore_path", default=None, type=str)
args = ap.parse_args()
x_depth = args.x_depth.split()
x_depth = [int(i) for i in x_depth]
train_set = args.train_set.split()
test_set = args.test_set.split()
train_graph = tf.Graph()
val_graph = tf.Graph()
with train_graph.as_default():
t_it, t_x, t_s, t_l = load_noteseqs(train_set, x_depth,
batch_size=args.batch_size, augment=True).get_iterator()
m = MusicVAE(x_depth=x_depth,
enc_rnn_dim=args.enc_rnn_dim, enc_hyper_unit=args.enc_hyper_unit, enc_dropout=args.enc_dropout,
dec_rnn_dim=args.dec_rnn_dim, dec_hyper_unit=args.dec_hyper_unit, dec_dropout=args.dec_dropout,
enc_rnn_layer=args.enc_rnn_layer, dec_rnn_layer=args.dec_rnn_layer,
enc_rnn=args.enc_rnn, dec_rnn=args.dec_rnn,
attention=args.attention,
cont_dim=args.cont_dim, cat_dim=args.cat_dim, mu_force=args.mu_force,
gumbel=args.gumbel, style_embed_dim=args.style_embed_dim,
l2_reg=args.l2_reg, kl_reg=args.kl_reg,
training=True, beta_anneal_steps=args.kl_anneal)
m.build(t_x, t_s, t_l, None)
with val_graph.as_default():
v_it, v_x, v_s, v_l = load_noteseqs(test_set, x_depth,
batch_size=20).get_iterator()
n = MusicVAE(x_depth=x_depth,
enc_rnn_dim=args.enc_rnn_dim, enc_hyper_unit=args.enc_hyper_unit, enc_dropout=0.0,
dec_rnn_dim=args.dec_rnn_dim, dec_hyper_unit=args.dec_hyper_unit, dec_dropout=0.0,
enc_rnn_layer=args.enc_rnn_layer, dec_rnn_layer=args.dec_rnn_layer,
enc_rnn=args.enc_rnn, dec_rnn=args.dec_rnn,
attention=args.attention,
cont_dim=args.cont_dim, cat_dim=args.cat_dim, mu_force=args.mu_force,
gumbel=args.gumbel, style_embed_dim=args.style_embed_dim,
l2_reg=args.l2_reg, kl_reg=args.kl_reg,
training=False, beta_anneal_steps=args.kl_anneal)
n.build(v_x, v_s, v_l, None)
tf_config = tf.ConfigProto()
tf_config.allow_soft_placement = True
tf_config.gpu_options.allow_growth = True
sess = tf.Session(config=tf_config, graph=train_graph)
ss = tf.Session(config=tf_config, graph=val_graph)
if args.restore_path:
print("[INFO] Restoring from checkpoint {}".format(args.restore_path))
m.saver.restore(sess, args.restore_path)
else:
sess.run(m.init)
step = 0
tw = tf.summary.FileWriter(args.save_path+"train", sess.graph)
vw = tf.summary.FileWriter(args.save_path+"val", ss.graph)
print("[INFO] Start training...")
for epoch in range(args.epochs):
sess.run(t_it.initializer)
train_loss = []
train_kl = []
while True:
try:
if (step+1)%20 == 0 or step == 0:
_, tmp_loss, tmp_kl, step, summ = sess.run([m.op, m.recon_loss, m.kl_loss, m.step, m.summ_op])
tw.add_summary(summ, step)
else:
_, tmp_loss, tmp_kl, step = sess.run([m.op, m.recon_loss, m.kl_loss, m.step])
train_loss.append(tmp_loss)
train_kl.append(tmp_kl)
except tf.errors.OutOfRangeError:
break
m.saver.save(sess, args.save_path + "vae-epoch{}".format(epoch+1))
n.saver.restore(ss, args.save_path + "vae-epoch{}".format(epoch+1))
val_loss = []
val_kl = []
ss.run(v_it.initializer)
while True:
try:
tmp_loss, tmp_kl, summ = ss.run([n.recon_loss, n.kl_loss, n.summ_op])
val_loss.append(tmp_loss)
val_kl.append(tmp_kl)
except tf.errors.OutOfRangeError:
vw.add_summary(summ, step)
break
train_loss = np.mean(train_loss)
train_kl = np.mean(train_kl)
val_loss = np.mean(val_loss)
val_kl = np.mean(val_kl)
print("{} Train Loss: {:.4f} Train KL: {:.2f} Val Loss: {:.4f} Val KL: {:.2f}".format(epoch+1, train_loss, train_kl, val_loss, val_kl))
| [
"[email protected]"
] | |
7f65ad75e2696a0df66c6ff8d05e1997399a2474 | c7cb7b9583859147cdc13811e2d25c15fae059d8 | /Problem Sets/Only A Little Lucky.py | 79390ee8c07a4c8a885d1f4387a512d43a040c17 | [] | no_license | rhmiller47/Udacity-cs101-building-a-search-engine | f83e2ecadd34d29c48ff16aa711b2c6e616e6d16 | 661c7253e338f7ad15f317be9205e70f7b81f087 | refs/heads/master | 2021-12-13T21:17:27.254062 | 2017-04-06T03:11:40 | 2017-04-06T03:11:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,144 | py | # Triple Gold Star
# Only A Little Lucky
# The Feeling Lucky question (from the regular homework) assumed it was enough
# to find the best-ranked page for a given query. For most queries, though, we
# don't just want the best page (according to the page ranking algorithm), we
# want a list of many pages that match the query, ordered from the most likely
# to be useful to the least likely.
# Your goal for this question is to define a procedure, ordered_search(index,
# ranks, keyword), that takes the same inputs as lucky_search from Question 5,
# but returns an ordered list of all the URLs that match the query.
# To order the pages, use the quicksort algorithm, invented by Sir Tony Hoare in
# 1959. Quicksort provides a way to sort any list of data, using an expected
# number of comparisons that scales as n log n where n is the number of elements
# in the list.
# The idea of quicksort is quite simple:
# If the list has zero or one elements, it is already sorted.
# Otherwise, pick a pivot element, and split the list into two partitions: one
# contains all the elements equal to or lower than the value of the pivot
# element, and the other contains all the elements that are greater than the
# pivot element. Recursively sort each of the sub-lists, and then return the
# result of concatenating the sorted left sub-list, the pivot element, and the
# sorted right sub-list.
# For simplicity, use the first element in the list as your pivot element (this
# is not usually a good choice, since it means if the input list is already
# nearly sorted, the actual work will be much worse than expected).
def qsort(pages):
if pages == []:
return []
else:
pivot = pages[0]
lesser = qsort([x for x in pages[1:] if x < pivot])
greater = qsort([x for x in pages[1:] if x >= pivot])
return lesser + [pivot] + greater
def ordered_search(index, ranks, keyword):
pages = lookup(index,keyword)
if not pages:
return None
else:
return qsort(pages)
cache = {
'http://udacity.com/cs101x/urank/index.html': """<html>
<body>
<h1>Dave's Cooking Algorithms</h1>
<p>
Here are my favorite recipies:
<ul>
<li> <a href="http://udacity.com/cs101x/urank/hummus.html">Hummus Recipe</a>
<li> <a href="http://udacity.com/cs101x/urank/arsenic.html">World's Best Hummus</a>
<li> <a href="http://udacity.com/cs101x/urank/kathleen.html">Kathleen's Hummus Recipe</a>
</ul>
For more expert opinions, check out the
<a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>
and <a href="http://udacity.com/cs101x/urank/zinc.html">Zinc Chef</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/zinc.html': """<html>
<body>
<h1>The Zinc Chef</h1>
<p>
I learned everything I know from
<a href="http://udacity.com/cs101x/urank/nickel.html">the Nickel Chef</a>.
</p>
<p>
For great hummus, try
<a href="http://udacity.com/cs101x/urank/arsenic.html">this recipe</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/nickel.html': """<html>
<body>
<h1>The Nickel Chef</h1>
<p>
This is the
<a href="http://udacity.com/cs101x/urank/kathleen.html">
best Hummus recipe!
</a>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/kathleen.html': """<html>
<body>
<h1>
Kathleen's Hummus Recipe
</h1>
<p>
<ol>
<li> Open a can of garbonzo beans.
<li> Crush them in a blender.
<li> Add 3 tablesppons of tahini sauce.
<li> Squeeze in one lemon.
<li> Add salt, pepper, and buttercream frosting to taste.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/arsenic.html': """<html>
<body>
<h1>
The Arsenic Chef's World Famous Hummus Recipe
</h1>
<p>
<ol>
<li> Kidnap the <a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>.
<li> Force her to make hummus for you.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/hummus.html': """<html>
<body>
<h1>
Hummus Recipe
</h1>
<p>
<ol>
<li> Go to the store and buy a container of hummus.
<li> Open it.
</ol>
</body>
</html>
""",
}
def get_page(url):
if url in cache:
return cache[url]
return ""
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def add_to_index(index, keyword, url):
if keyword in index:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index, keyword):
if keyword in index:
return index[keyword]
else:
return None
def crawl_web(seed): # returns index, graph of inlinks
tocrawl = [seed]
crawled = []
graph = {} # <url>, [list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
def compute_ranks(graph):
d = 0.8 # damping factor
numloops = 10
ranks = {}
npages = len(graph)
for page in graph:
ranks[page] = 1.0 / npages
for i in range(0, numloops):
newranks = {}
for page in graph:
newrank = (1 - d) / npages
for node in graph:
if page in graph[node]:
newrank = newrank + d * (ranks[node] / len(graph[node]))
newranks[page] = newrank
ranks = newranks
return ranks
# Here are some example showing what ordered_search should do:
# Observe that the result list is sorted so the highest-ranking site is at the
# beginning of the list.
# Note: the intent of this question is for students to write their own sorting
# code, not to use the built-in sort procedure.
index, graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
ranks = compute_ranks(graph)
print ordered_search(index, ranks, 'Hummus')
#>>> ['http://udacity.com/cs101x/urank/kathleen.html',
# 'http://udacity.com/cs101x/urank/nickel.html',
# 'http://udacity.com/cs101x/urank/arsenic.html',
# 'http://udacity.com/cs101x/urank/hummus.html',
# 'http://udacity.com/cs101x/urank/index.html']
print ordered_search(index, ranks, 'the')
#>>> ['http://udacity.com/cs101x/urank/nickel.html',
# 'http://udacity.com/cs101x/urank/arsenic.html',
# 'http://udacity.com/cs101x/urank/hummus.html',
# 'http://udacity.com/cs101x/urank/index.html']
print ordered_search(index, ranks, 'babaganoush')
#>>> None
| [
"[email protected]"
] | |
6e3219373e0597dbbe517953e3878c45f03103a8 | e16aba29c27fa7a0bdabd9d30b91c4ddafbc5c11 | /mainsite/migrations/0004_order_confirmed.py | 2e3dcf59c809a29cdce06b938c4f0617feecb9b2 | [] | no_license | ansh-saini/GraphicDesignShop | ebecd0ba039c135de73e4485e0e454c7e891b958 | 047d1af93982c5cfc696340411d27cc27d00782f | refs/heads/master | 2020-04-20T16:12:35.106247 | 2019-03-12T17:45:46 | 2019-03-12T17:45:46 | 168,952,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # Generated by Django 2.1.1 on 2018-11-16 09:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0003_auto_20181103_1322'),
]
operations = [
migrations.AddField(
model_name='order',
name='confirmed',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
342c17d9683d247865ce5a972ee907442a24a85c | f68c4cf56cf68c4d025a107804c5b2049baf4484 | /eventregistry/tests/TestAnalytics.py | 40a7e46b6c844950c5284806591a54577b7cca0c | [
"MIT"
] | permissive | philip928lin/event-registry-python | 6688aa946e71292518951e67084530f83bf55cf8 | a031cf6103b68f388e3035c2a7c1e311208c3ff3 | refs/heads/master | 2021-05-04T00:17:18.121273 | 2017-12-19T07:50:46 | 2017-12-19T07:50:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,015 | py | import unittest
import eventregistry as ER
from DataValidator import DataValidator
class TestAnalytics(DataValidator):
def testConcepts(self):
analytics = ER.Analytics(self.er)
annInfo = analytics.annotate("Microsoft released a new version of Windows OS.")
self.assertTrue("annotations" in annInfo, "Annotations were not provided for the given text")
anns = annInfo["annotations"]
self.assertTrue(len(anns) == 2)
self.assertTrue("url" in anns[0])
self.assertTrue("title" in anns[0])
self.assertTrue("lang" in anns[0])
self.assertTrue("secLang" in anns[0])
self.assertTrue("secUrl" in anns[0])
self.assertTrue("secTitle" in anns[0])
self.assertTrue("wgt" in anns[0])
self.assertTrue("wikiDataItemId" in anns[0])
self.assertTrue("adverbs" in annInfo)
self.assertTrue("adjectives" in annInfo)
self.assertTrue("verbs" in annInfo)
self.assertTrue("nouns" in annInfo)
self.assertTrue("ranges" in annInfo)
self.assertTrue("language" in annInfo)
def testCategories(self):
analytics = ER.Analytics(self.er)
cats = analytics.categorize("Microsoft released a new version of Windows OS.")
self.assertTrue("dmoz" in cats)
self.assertTrue("categories" in cats.get("dmoz"))
self.assertTrue("keywords" in cats.get("dmoz"))
cat = cats.get("dmoz").get("categories")[0]
self.assertTrue("label" in cat)
self.assertTrue("score" in cat)
kw = cats.get("dmoz").get("keywords")[0]
self.assertTrue("keyword" in kw)
self.assertTrue("wgt" in kw)
def testLanguage(self):
analytics = ER.Analytics(self.er)
langInfo = analytics.detectLanguage("Microsoft released a new version of Windows OS.")
print langInfo
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestAnalytics)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"[email protected]"
] | |
95b78133c0068f053357affee18b2b8a8cf13311 | 3c89509e8f04fe5b12d6a447edca1de61b670514 | /OIA_Automation/Resuables/test_logging.py | 192c8f768a759cf087fd4e21a458ceb6d42e55dc | [] | no_license | waytoabhay99/OIA_Automation_ | 8413ef913d3b353a3c2fea55a9bf7ebaf51b77eb | 0e2c9046230b81c932d4548fba09cdc865d626a2 | refs/heads/master | 2023-05-21T13:43:25.598755 | 2021-06-09T12:57:30 | 2021-06-09T12:57:30 | 375,355,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import inspect
import logging
def get_logger():
loggerName = inspect.stack()[1][3]
logger = logging.getLogger(loggerName)
fileHandler = logging.FileHandler('../Execution_log/OIA_Admin_test_execution_report_log.log')
formatter = logging.Formatter("%(asctime)s :%(levelname)s : %(name)s : %(message)s")
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler) # filehandler object
logger.setLevel(logging.DEBUG)
return logger
| [
"[email protected]"
] | |
58dd6b5779438026c8c04a401030f1fdf2bd4a76 | 82debe3cc9f8fa33a60f03119f8a9343f959ceed | /sensor_logger.py | 70fc6cbc02449977ec4f1a743210d9120b04caa2 | [] | no_license | robotoilet/sensor_logger | 68bd831a37c2b53307b1bbbb5a57231ce02f97cc | 611375bce89dbed380f2f8b4e14843f5e65f5225 | refs/heads/master | 2021-01-22T11:20:44.202166 | 2016-10-25T22:41:13 | 2016-10-25T22:41:13 | 32,469,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,175 | py | from datetime import datetime
import os.path
import re
import time
import sensor_config
from transmit import send_data
SENSOR_CFG = sensor_config.CONFIG
def create_logfilepath(datapoint, logdir):
"""
Find the first occurrence of a 10digit unix timestamp in a string,
return a filepath (string) of format "<logdir>/L<timestamp>"
"""
timestamp = re.findall('\d{10}', datapoint)[0]
return os.path.join(logdir, 'L%s' % timestamp)
def tracked(log_function):
"""
Decorator to start writing to a new logfile after max_datapoints
"""
track = {
'count': 0,
'logfile': None,
}
def tracked_log(datapoint, max_datapoints=5, logdir='.'):
lf = track.get('logfile')
if not lf or logdir.split('/') != lf.split('/')[:-1]:
track['logfile'] = create_logfilepath(datapoint, logdir)
track['count'] = 0
log_function(datapoint, track['logfile'])
count = track['count'] + 1
track['count'] = (count < max_datapoints) and count or 0
if not track['count']:
print("closing logfile %s.." % track['logfile'])
os.rename(track['logfile'],
re.sub('L(?=\d{10})', 'C', track['logfile']))
track['logfile'] = None
return tracked_log
@tracked
def log_datapoint(datapoint, logfile):
print('logging datapoint %s to file %s' % (datapoint, logfile))
with open(logfile, 'a') as f:
f.write(datapoint)
def run():
sensors = [{'name': sr['name'], 'res': sr['res'],
'kwargs': sr['kwargs'], 'sense': sr['sensor'].setup()}
for sr in SENSOR_CFG['sensors']]
prev = None
logdir = SENSOR_CFG['logdir']
closed_logfiles = (f for f in os.listdir(logdir) if f.startswith('C'))
while True:
ts = datetime.now()
if ts.second == prev:
continue
#if ts.second % SENSOR_CFG['collect-res'] == 0:
# for sr in sensors:
# if ts.second % sr['res'] == 0:
# dp = '(%s %s %s)' % (sr['name'], ts.strftime('%s'),
# sr['sense'](**sr['kwargs']))
# try:
# log_datapoint(dp, SENSOR_CFG['max_dp'], logdir)
# except Exception as e:
# msg = "could not get data for sensor {}, exeption was {}"
# print(msg.format(sr['name'], e))
if ts.second % SENSOR_CFG['send-res'] == 0:
try:
next_file = closed_logfiles.next()
except StopIteration: # last filelist used? try to create new one
closed_logfiles = (f for f in os.listdir(logdir)
if f.startswith('C'))
try:
next_file = closed_logfiles.next()
except StopIteration:
continue # nothing to send
try:
send_data(next_file)
except Exception as e:
msg = "Could not send data, exception was: {}"
print(msg.format(e))
prev = ts.second
if __name__ == '__main__':
run()
| [
"[email protected]"
] | |
f147dd355e000a73b9f724fdbe9401418cbdb0b3 | ac9f014b138003ad288732688bf08b085f6b18b6 | /prac_03/password_check.py | d4e1169c887fa0d1a24e24203f59853bc50bda59 | [] | no_license | rdaumuller1/CP1404_practicals | 8b97a78d10861eb5baffd77de8ca8ea19e0d8fd2 | f12b740a0be4382bd32f602885505ca0afcb6e93 | refs/heads/master | 2020-04-30T13:46:10.677487 | 2019-05-31T00:00:13 | 2019-05-31T00:00:13 | 176,868,372 | 0 | 0 | null | 2019-04-04T03:46:07 | 2019-03-21T04:12:33 | Python | UTF-8 | Python | false | false | 249 | py | MIN_LENGTH = 3
def main():
password = get_password()
while len(password) < MIN_LENGTH:
password = get_password()
print(len(password) * '*')
def get_password():
password = input("Password: ")
return password
main()
| [
"[email protected]"
] | |
7b04ca1d2f2e688491f33a4fb76efb298e938f59 | 194848d309f1b5ea1e7749d3560d99f9aa158431 | /plot_and_analyze_the_climatology.py | 6b7197972e5dc486d22f9997b3b936e4b6ccad14 | [] | no_license | siirias/nemo_analysis | 967d7012a1b963e46290b7d83a7d23293f0d168c | d20181750406445a259c2c0de02f5c1eabd51d0d | refs/heads/master | 2023-03-16T08:16:21.595165 | 2023-03-09T16:17:45 | 2023-03-09T16:17:45 | 177,143,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,553 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 14:11:45 2018
@author: siirias
"""
import datetime
import matplotlib as mp
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import netcdf
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import Basemap, shiftgrid, cm
from netCDF4 import Dataset
from smartseahelper import smh
import os
import cmocean
import re
import importlib
import param_sets_for_plotting
months = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
param_sets =[\
{ 'var1':'icecon',\
'name_marker':'A005',\
'other_name_marker':'A001',\
'slice_wanted':-1,\
'compare_two':True,\
'output_dir_end':"/tmp/images/ice/",\
'cludge':True\
}
] # actually parameters are set in next line. this is just an example.
importlib.reload(param_sets_for_plotting)
param_sets = param_sets_for_plotting.temperature_param_sets
for param_set in param_sets:
# param_set['name_marker'] = param_set['name_marker'].replace('A','C') # a cludge to quickly change the set.
# param_set['other_name_marker'] = param_set['other_name_marker'].replace('A','C') # a cludge to quickly change the set.
print(param_set)
name_marker=param_set['name_marker'] #this one tells which dataseries is handled.
compare_two=param_set['compare_two']
other_name_marker=param_set['other_name_marker']
land_color = (0.8, 0.83, 0.8)
sea_color = (0.5, 0.5, 0.7) # meaningful only on sea-area where no data present
var1 = param_set['var1'] #'VEL', 'SST', 'SSS', 'votemper', 'vosaline'
slice_wanted = param_set['slice_wanted'] # if 3d grid, which layer to take. if -1, bottommost.
ss=smh()
ss.grid_type='T'
ss.interval='m'
ss.main_data_folder= ss.root_data_in+"/OUTPUT{}/".format(name_marker)
datadir = ss.root_data_out+param_set['output_dir_end'] #where everyt output is stored
extra =""
other_extra = ""
cludge = param_set['cludge']
if(not '1' in name_marker):
extra = 'c20v_'
if(not '1' in other_name_marker):
other_extra = 'c20v_'
climatology_file=ss.root_data_out+'derived_data/test/{}climatology_{}_{}_{}.nc'.format(\
extra,name_marker,ss.interval,var1) #this one tells which dataseries is handled.
series_name='climatology{}'.format(name_marker)
if(compare_two):
other_climatology_file=ss.root_data_out+\
'derived_data/test/{}climatology_{}_{}_{}.nc'.\
format(other_extra,other_name_marker,ss.interval,var1) #this one tells which dataseries is handled.
series_name='climatology{}vs{}'.format(name_marker,other_name_marker)
if cludge:
climatology_file=ss.root_data_out+\
'derived_data/test/monthly_climatology_{}_d_icecon.nc'.format(name_marker) #GLUDGE!
other_climatology_file=ss.root_data_out+\
'derived_data/test/monthly_climatology_{}_d_icecon.nc'.format(other_name_marker) #GLUDGE!
def get_bottom(grid):
# grid is supposed to be masked array, Time, D,Lat,Lon
# The idea in this is to shifht the mask one layer up,
# and find the values which are masked in one (and only one) of
# these masks.
full_shape = grid.shape
bottom_layers = np.zeros(( full_shape[0],\
full_shape[2],
full_shape[3]))
bottom_layers = np.ma.masked_array(bottom_layers,False)
mask_roll = np.roll(grid.mask,-1,1) # move mask values one up.
mask_roll[:,-1,:,:] = True # And mark bottom most mask as True.
# This to get bottom values if there are no mask at end
grid.mask = ~(grid.mask ^ mask_roll)
bottom_layers = np.sum(grid,1)
values = np.array(np.sum(~grid.mask,1),bool) # used to get the mask
bottom_layers.mask = ~values
return bottom_layers
#Let's try to plot soemthing to start with:
lon_min=16;lat_min=60;lon_max=26.01;lat_max=66.01;
running_number=0
show_contours=True
contour_step = 0.5
plotted_depth=0. #meters
var2 = None #None, 'icecon'
var2_min=0.0
var2_max=1.0
var2_cm='gray'
if var1 in ['VEL']:
var_min=0.0
var_max=0.15
var1_cm=cmocean.cm.speed
var1_cm=cmocean.cm.speed
ss.grid_type='U' #U,V,T
plotted_depth=-10000. #meters
var2=None
show_contours=False
if var1 in ['SST','votemper']:
var_min=-0.5
var_max=18.0
var1_cm=cmocean.cm.thermal
ss.grid_type='T' #U,V,T
if(compare_two):
var_min=-2.
var_max=2.
var1_cm='coolwarm'
if var1 in ['SSS','vosaline']:
var_min=0.0
var_max=8.0
var1_cm=cmocean.cm.haline
ss.grid_type='T' #U,V,T
if(compare_two):
var_min=-2.
var_max=2.
var1_cm='RdGy'
if var1 in ['SSH']:
var_min=1.0
var_max=6.0
var1_cm=cmocean.cm.haline
ss.grid_type='T' #U,V,T
if var1 in ['icecon']:
var_min=0.0
var_max=1.0
var1_cm=cmocean.cm.ice
ss.grid_type='T' #U,V,T
if(compare_two):
var_min=-1.
var_max=1.
var1_cm='RdGy'
font_size=10.0
resolution='h'
projection='laea'
just_one=False
#first setup the main map:
fig=plt.figure(figsize=(6,6))
if projection in ['laea']:
lat_0=0.5*(lat_min+lat_max)
lon_0=0.5*(lon_min+lon_max)
bmap = Basemap(llcrnrlon=lon_min,llcrnrlat=lat_min,urcrnrlon=lon_max,urcrnrlat=lat_max, \
lat_0=lat_0, lon_0=lon_0,resolution = resolution, \
projection=projection,fix_aspect=False) #resolution c,l,i,h,f
elif projection in ['merc','cyl']:
bmap = Basemap(llcrnrlon=lon_min,llcrnrlat=lat_min,urcrnrlon=lon_max,urcrnrlat=lat_max, \
resolution = resolution,
projection=projection,fix_aspect=False) #resolution c,l,i,h,f
bmap.drawmapboundary(fill_color=sea_color, zorder = -10)
bmap.fillcontinents(color=land_color, lake_color=sea_color,zorder = -9)
bmap.drawcoastlines(zorder=21,linewidth=0.5,color='gray')
#bmap.fillcontinents([0.9,0.9,0.9],lake_color=[0.85,0.85,0.85],zorder=20)
bmap.drawparallels(np.arange(lat_min,lat_max,1.),linewidth=1,zorder=50,\
labels=[True,False,False,False],dashes=[1,0],color="#00000020",fontsize=10)
bmap.drawmeridians(np.arange(lon_min,lon_max,2.),linewidth=1,zorder=50,\
labels=[False,False,False,True],dashes=[1,0],color="#00000020",fontsize=10)
is_first=True
data=Dataset(climatology_file)
if(compare_two):
other_data=Dataset(other_climatology_file)
if(var1 in ['SST','SSH','SSS','votemper','vosaline','icecon','ivevolume']):
d=data.variables['{}_mean'.format(var1)][:]
if(len(d.shape)>3): # means this is 3d data, we only want one slice
if(slice_wanted>=0): # Just a regular slice
d = d[:,slice_wanted,:,:]
else: #Now we want the bottom-most
d = get_bottom(d)
if(compare_two):
other_d=other_data.variables['{}_mean'.format(var1)][:]
if(len(other_d.shape)>3): # means this is 3d data, we only want one slice
if(slice_wanted>=0): # Just a regular slice
other_d = other_d[:,slice_wanted,:,:]
else:
other_d = get_bottom(other_d)
#fix to get values fromplaces where one is masked, and the other is not
mask_changed = d.mask ^ other_d.mask
other_d.data[mask_changed & other_d.mask == True] = 0.0
d.data[mask_changed & d.mask == True] = 0.0
d.mask[mask_changed] = False # set the mask value of these off.
other_d.mask[mask_changed] = False # set the mask value of these off.
d=d-other_d
# d=np.ma.masked_where(d==0.0,d)
if(var1 in ['VEL']):
data2=Dataset(ss.main_data_folder+re.sub("_grid_.","_grid_V",f))
if(plotted_depth>=0):
depth_index=np.abs(data.variables['depthu'][:]-plotted_depth).argmin()
d=data.variables['uos'][:,depth_index,:,:]
d2=data2.variables['vos'][:,depth_index,:,:]
else: #negative plotted depth measn bottom.
d=ss.give_bottom_values(data.variables['uos'][:,:,:,:])
d2=data2.variables['vos'][:,:,:]
d=np.ma.masked_where(d==0.0,d)
#this one requires that we take another file too:
d2=np.ma.masked_where(d==0.0,d)
d=np.sqrt(d*d+d2*d2)
data2.close()
if(var2 is not None):
ice_d=data.variables[var2][:]
ice_d=np.ma.masked_where(ice_d<0.2,ice_d)
lons = data.variables['nav_lon'][:]
lats = data.variables['nav_lat'][:]
lats,lons=ss.fix_latslons(lats,lons)
if(ss.interval == 'd'):
times=data.variables['day_of_year'][:].data
else:
times=data.variables['month_of_year'][:].data
times = times[:12] # gludge to get over a bug. figure out later.
data.close()
if compare_two:
other_data.close()
if(just_one):
times=[times[0]]
for time_frame in range(len(times)):
time=time_frame
tmp_lon,tmp_lat=bmap(lons,lats)
#cb=plt.colorbar(fraction=0.027, pad=0.01)
value_range = np.max(d[time_frame,:,:])-np.min(d[time_frame,:,:])
print(time_frame, value_range)
colors_fig=bmap.pcolormesh(tmp_lon,tmp_lat,d[time_frame,:,:],vmin=var_min,vmax=var_max,zorder=-3,cmap=var1_cm)
if is_first:
cb=plt.colorbar()
cb.set_clim(vmin=var_min,vmax=var_max)
cb.ax.tick_params(labelsize=font_size)
is_first=False
if(var2 is not None):
ice_fig=bmap.pcolormesh(tmp_lon,tmp_lat,ice_d[time_frame,:,:],vmin=var2_min,vmax=var2_max,zorder=16,cmap=var2_cm)
if show_contours and value_range>0.0001:
cont_fig=bmap.contour(tmp_lon,tmp_lat,d[time_frame,:,:],\
levels = np.arange(var_min, var_max, contour_step),\
vmin=var_min,vmax=var_max,\
zorder=15,colors='black',linewidths=0.5,alpha=0.5)
cont_labels=plt.clabel(cont_fig,inline=1,fontsize=5,fmt="%1.1f")
if(ss.interval == 'd'):
annotation=plt.annotate("Year day: {}".format(time_frame+1),\
xy=(0.25, 0.95), xycoords='axes fraction',zorder=100)
if(ss.interval == 'm'):
annotation=plt.annotate("{}".format(months[time_frame%12]),\
xy=(0.25, 0.95), xycoords='axes fraction',zorder=100)
layer_info = ""
if(slice_wanted > 0):
layer_info = "_D{}_".format(slice_wanted)
if(slice_wanted<0):
layer_info = "_bottom_"
plt.savefig("{}{}{}{}{:05d}.png".format( datadir,\
var1,\
series_name,\
layer_info,\
running_number),\
facecolor='w',dpi=300)
if(not just_one):
#clean up the changing things, so we don't have to do everything again, just these:
colors_fig.remove()
if(var2 is not None):
ice_fig.remove()
annotation.remove()
if show_contours and value_range > 0.0001:
for i in cont_labels:
i.remove()
for i in cont_fig.collections:
i.remove()
# plt.close(fig)
running_number+=1
print("Image {} done.".format(running_number))
plt.close('all')
| [
"[email protected]"
] | |
19c20999a51c16f5b201f8d6685836bf58b4c980 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnsentenc.py | e85c518a0706e02ee13728df2b638a1d8d85cb7d | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 2,387 | py | ii = [('BentJDO2.py', 3), ('EmerRN.py', 3), ('CookGHP3.py', 13), ('MarrFDI.py', 4), ('RogePAV2.py', 1), ('CoolWHM2.py', 8), ('KembFFF.py', 3), ('GodwWSL2.py', 3), ('ChanWS.py', 3), ('SadlMLP.py', 9), ('FerrSDO3.py', 1), ('WilbRLW.py', 8), ('WilbRLW4.py', 7), ('AubePRP2.py', 4), ('CookGHP.py', 23), ('MartHSI2.py', 2), ('LeakWTI2.py', 2), ('KembFJ1.py', 2), ('WilkJMC3.py', 3), ('WilbRLW5.py', 6), ('PettTHE.py', 3), ('MarrFDI3.py', 2), ('PeckJNG.py', 1), ('AubePRP.py', 7), ('ChalTPW2.py', 6), ('GellWPT.py', 1), ('FitzRNS3.py', 1), ('WilbRLW2.py', 3), ('ClarGE2.py', 44), ('GellWPT2.py', 1), ('WilkJMC2.py', 12), ('CarlTFR.py', 23), ('SeniNSP.py', 9), ('LyttELD.py', 3), ('CoopJBT2.py', 2), ('TalfTAC.py', 5), ('GrimSLE.py', 1), ('RoscTTI3.py', 4), ('AinsWRR3.py', 3), ('CookGHP2.py', 13), ('AdamHMM.py', 4), ('BailJD1.py', 5), ('RoscTTI2.py', 5), ('CoolWHM.py', 3), ('MarrFDI2.py', 1), ('CrokTPS.py', 3), ('ClarGE.py', 60), ('LandWPA.py', 7), ('IrviWVD.py', 4), ('LyelCPG.py', 1), ('GilmCRS.py', 3), ('DaltJMA.py', 15), ('WestJIT2.py', 1), ('DibdTRL2.py', 6), ('CrocDNL.py', 2), ('MedwTAI.py', 5), ('LandWPA2.py', 9), ('WadeJEB.py', 5), ('FerrSDO2.py', 2), ('TalfTIT.py', 2), ('NewmJLP.py', 15), ('GodwWLN.py', 14), ('CoopJBT.py', 3), ('KirbWPW2.py', 2), ('SoutRD2.py', 8), ('MedwTAI2.py', 1), ('BachARE.py', 57), ('SoutRD.py', 9), ('WheeJPT.py', 24), ('MereHHB3.py', 5), ('BailJD3.py', 15), ('MereHHB.py', 1), ('WilkJMC.py', 2), ('HogaGMM.py', 8), ('MartHRW.py', 8), ('MackCNH.py', 3), ('BabbCEM.py', 2), ('FitzRNS4.py', 3), ('CoolWHM3.py', 3), ('DequTKM.py', 2), ('FitzRNS.py', 1), ('BentJRP.py', 36), ('EdgeMHT.py', 2), ('BowrJMM.py', 2), ('LyttELD3.py', 13), ('FerrSDO.py', 1), ('RoscTTI.py', 5), ('ThomGLG.py', 7), ('StorJCC.py', 15), ('KembFJ2.py', 1), ('LewiMJW.py', 12), ('MackCNH2.py', 6), ('BellCHM.py', 2), ('JacoWHI2.py', 4), ('HaliTBC.py', 3), ('WilbRLW3.py', 9), ('AinsWRR2.py', 1), ('MereHHB2.py', 4), ('JacoWHI.py', 3), ('ClarGE3.py', 18), ('RogeSIP.py', 5), ('MartHRW2.py', 8), ('DibdTRL.py', 12), ('FitzRNS2.py', 8), ('HogaGMM2.py', 4), ('MartHSI.py', 8), ('EvarJSP.py', 9), ('DwigTHH.py', 13), ('SadlMLP2.py', 6), ('BowrJMM2.py', 2), ('BowrJMM3.py', 4), ('TaylIF.py', 8), ('WordWYR.py', 1), ('DibdTBR.py', 1), ('ChalTPW.py', 11), ('ThomWEC.py', 10), ('KeigTSS.py', 48), ('KirbWPW.py', 4), ('BentJDO.py', 7), ('ClarGE4.py', 10), ('AdamJOA.py', 1), ('HowiWRL.py', 2)] | [
"[email protected]"
] | |
101894a2704c43da6f94ad22f0bc531725b70904 | 6b2e77aa6a6d03d6fa12eab2ea8ba22aa8d70d4a | /run.py | 0741a664f6d2fee6e1f4a8f96546cf1d06f0e270 | [
"MIT"
] | permissive | francisdbillones/LearnNet | 05aa38fc12bb2f913e4dca58fbdccf5fb3307a11 | 2c1561fc85df853c78e21c6ba0dd10d4f173ee0a | refs/heads/main | 2023-04-29T22:45:56.446266 | 2021-05-17T07:20:05 | 2021-05-17T07:20:05 | 334,654,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | from learn_net import app
if __name__ == '__main__':
import os
dev = os.environ.get('DEV')
# if DEV environment variable is set, run in debug mode
if dev:
app.run(debug=True)
else:
from gevent.pywsgi import WSGIServer
server = WSGIServer(('', 5000), app)
server.serve_forever()
| [
"[email protected]"
] | |
9bd11828a0b8e41003f03f87a0792ddb26fea227 | 7e769c53213d5de6220616d825c86cf539c7abab | /rush00/moviemon/moviemon/model/Player.py | 44b7f2d4742536ef316b70c92659d87cc57f7547 | [] | no_license | romanets-s/cisco_rest_api | 473f8a001e36086e37bfedc7d7feff9fe6c11651 | 6304ec9a802a406a81ee96141f6a909c95a5f8e5 | refs/heads/master | 2020-05-16T11:45:46.727550 | 2020-02-25T14:06:20 | 2020-02-25T14:06:20 | 116,568,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | class Player:
def __init__(self, x, y, power=0):
self.x = x
self.y = y
self.power = power
self.boll = 1
self.lucky = 5
self.allMonsters = 0
self.bollHere = 0
self.enemy = {}
| [
"[email protected]"
] | |
55412234dfa65f0d0446c2f25686141214596ab8 | d0869d4a6b782527313700d8167c431fbbd6fd10 | /scidownl/core/updater.py | b8adc631ff3ca8d2d1d86b2f8d6301e5feb64253 | [
"MIT"
] | permissive | Tishacy/SciDownl | 7148cb15c5881fb816cab6653c800897e9fbd592 | 0fe77d2f21d4c840d60a907f2a0682645a5e6fe0 | refs/heads/v1.0 | 2023-04-09T03:41:35.350645 | 2023-04-02T03:17:03 | 2023-04-02T03:17:03 | 180,743,987 | 149 | 40 | MIT | 2023-04-02T03:17:04 | 2019-04-11T08:02:29 | Python | UTF-8 | Python | false | false | 5,157 | py | # -*- coding: utf-8 -*-
"""Implementations of DomainUpdater"""
import re
import string
from typing import Iterable, Union, List
from concurrent.futures import ThreadPoolExecutor, as_completed
import requests
from bs4 import BeautifulSoup
from .base import DomainUpdater
from ..log import get_logger
from ..config import get_config
from ..db.entities import ScihubUrl
from ..db.service import ScihubUrlService
logger = get_logger()
configs = get_config()
class CrawlingScihubDomainUpdater(DomainUpdater):
"""Updater of Scihub domains by crawling a domain source."""
def __init__(self, domain_source_url: str = None):
super().__init__()
self.service = ScihubUrlService()
# use strings defined in configs.
self.domain_source_url = domain_source_url or configs['scihub.domain.updater.crawl']['scihub_domain_source']
self._domain_url_pattern = configs['scihub.domain.updater.crawl']['scihub_url_pattern']
self._exclude_url_pattern = configs['scihub.domain.updater.crawl']['exclude_url_pattern']
def update_domains(self) -> Union[List, Iterable[ScihubUrl]]:
html = requests.get(self.domain_source_url).text
domain_urls = re.findall(self._domain_url_pattern, html)
# Drop duplicates.
domain_urls = list(set(domain_urls))
# Exclude invalid urls.
available_domain_urls = self._exclude_domain_urls(domain_urls)
logger.info(f"Found {len(available_domain_urls)} valid SciHub domains in total: {available_domain_urls}")
# Save to db.
urls_to_save = [ScihubUrl(url=url) for url in available_domain_urls]
self.service.add_urls(urls_to_save)
logger.info(f"Saved {len(urls_to_save)} SciHub domains to local db.")
return available_domain_urls
def _exclude_domain_urls(self, domain_urls, exclude_url_pattern: str = None):
exclude_url_pattern = exclude_url_pattern or self._exclude_url_pattern
remain_urls = []
for url in domain_urls:
if not re.search(exclude_url_pattern, url):
remain_urls.append(url)
return remain_urls
class SearchScihubDomainUpdater(DomainUpdater):
"""Updater of Scihub domains by brute force search."""
OK_STATUS_CODES = [200]
def __init__(self, title_keyword_pattern: str = None, num_workers: int = None,
timeout: int = None):
super().__init__()
self.service = ScihubUrlService()
# read from configs
self._domain_prefixes = ["http://sci-hub.", "https://sci-hub."]
self._keyword_pattern = title_keyword_pattern or \
configs['scihub.domain.updater.search']['scihub_title_keyword_pattern']
self._num_workers = num_workers or configs['scihub.domain.updater.search'].getint('num_workers')
self._timeout = timeout or configs['scihub.domain.updater.search'].getint('check_timeout')
def update_domains(self) -> Union[List, Iterable[str]]:
search_urls = self._get_search_urls()
logger.info(f"# Search valid SciHub domains from {len(search_urls)} urls")
valid_urls = []
with ThreadPoolExecutor(max_workers=self._num_workers) as executor:
future_to_url = {
executor.submit(self._check_valid_url, url, self._timeout): url
for url in search_urls
}
for future in as_completed(future_to_url):
url = future_to_url[future]
try:
valid = future.result()
if valid:
valid_urls.append(url)
except Exception as exc:
logger.error('%r generated an exception: %s' % (url, exc))
logger.info(f"Found {len(valid_urls)} valid SciHub domains in total: {valid_urls}")
# Save to db.
urls_to_save = [ScihubUrl(url=url) for url in valid_urls]
self.service.add_urls(urls_to_save)
logger.info(f"Saved {len(urls_to_save)} SciHub domains to local db.")
return valid_urls
def _get_search_urls(self):
letters = string.ascii_lowercase
search_urls = []
for first_letter in letters:
for second_letter in letters:
for prefix in self._domain_prefixes:
search_urls.append(prefix + first_letter + second_letter)
return search_urls
def _check_valid_url(self, url: str, timeout: int = 60) -> bool:
try:
res = requests.get(url, timeout=timeout)
except Exception as e:
# Cannot connect the specified url, skip it.
return False
if res.status_code not in SearchScihubDomainUpdater.OK_STATUS_CODES:
return False
content = res.content.decode()
soup = BeautifulSoup(content, 'html.parser')
if soup.title is not None and re.search(self._keyword_pattern, soup.title.text) is not None:
logger.info(f"# Found a SciHub domain url: {url}")
return True
return False
scihub_domain_updaters = {
'crawl': CrawlingScihubDomainUpdater,
'search': SearchScihubDomainUpdater
}
| [
"[email protected]"
] | |
03245af4d15f65c90d827859b1a2760c5454a6df | ef415f543a41a45a181c4b24ef8172c6bf669317 | /IPR_Website/Skits/apps.py | af729b72c711201d813bef5f4e14f4fd497b21ce | [] | no_license | ujwalkpl/IPR_Website | 1e6ff77d075b2b6b5a287dc6e5d1c1c8b759cfa0 | 04685f52aa06187a8cb412d4c8d6dd72922b47b4 | refs/heads/master | 2022-04-10T16:30:39.285414 | 2020-03-29T13:00:19 | 2020-03-29T13:00:19 | 222,711,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class SkitsConfig(AppConfig):
name = 'Skits'
| [
"[email protected]"
] | |
d08ee0b6fd6e3be4deec4400e41fd332d03eeee6 | e5de6c8470e1019d1ea8faff8b9d9e8c5207885b | /Iterables and Iterators.py | baf90209ede4be0ef1cac68d2dfc08554913d718 | [] | no_license | gordonmhy/HackerRank-Python | 87233dcc0d1658c1494b75e8cbde98a89db5751d | ba1e1506aa2636ffe0ca97f583355b2b4415c0c9 | refs/heads/main | 2023-06-29T05:57:07.097954 | 2021-08-04T09:46:21 | 2021-08-04T09:46:21 | 392,407,705 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # Iterables and Iterators
import itertools
n = int(input())
letters = str(input()).split(' ')
permutations = iter(itertools.permutations(letters, int(input())))
nominator = 0
denominator = 0
i = next(permutations, None)
while i is not None:
if 'a' in i:
nominator += 1
denominator += 1
i = next(permutations, None)
print('{:.3f}'.format(nominator/denominator)) | [
"[email protected]"
] | |
48a368af4aa96cc89abf65e929c329bb1bf267fa | bbee658bffae55ee62381ccba6b0b64c36815643 | /examples/AtmosphericFlight/HypersonicNose/HypersonicNose.py | 89168323515006f491c9178858defdbda6d59c3b | [
"MIT"
] | permissive | regular-oscillator/beluga | c770d23715ebfe26e59422c0a1e6e1e68e8f1615 | 2bda36fddb674b70a72e28178212fb4f56be5cc4 | refs/heads/master | 2020-11-27T22:36:40.294786 | 2020-01-01T23:25:31 | 2020-01-01T23:25:31 | 229,630,669 | 0 | 0 | MIT | 2019-12-22T21:03:15 | 2019-12-22T21:03:14 | null | UTF-8 | Python | false | false | 1,966 | py | """
References
----------
.. [1] Buttazzo, Giuseppe, and Bernhard Kawohl. "On Newton’s problem of minimal resistance."
The Mathematical Intelligencer 15.4 (1993): 7-12.
"""
import beluga
import logging
import matplotlib.pyplot as plt
ocp = beluga.OCP()
# Define independent variables
ocp.independent('l', 'm')
# Define equations of motion
ocp.state('r', '-u', 'm')
# Define controls
ocp.control('u', 'm/s')
# Define constants
ocp.constant('r_0', 1, 'm')
ocp.constant('eps1', 1, '1')
# Define costs
ocp.path_cost('4*r*u**3/(1+u**2)', 'm')
# Define constraints
ocp.constraints() \
.initial('r - r_0', 'm') \
.initial('l - 0', 'm') \
.terminal('r', 'm') \
.terminal('l - 2', 'm')
ocp.path_constraint('u', 'm/s', lower='-5', upper='5', method='utm', activator='eps1')
ocp.scale(m='x', rad=1)
guess_maker = beluga.guess_generator('ones',
start=[1.0], # Starting values for states in order
costate_guess = 0.1,
control_guess=[0.35],
use_control_guess=True
)
continuation_steps = beluga.init_continuation()
continuation_steps.add_step('bisection') \
.num_cases(10, 'log') \
.const('eps1', 2e-1)
beluga.add_logger(logging_level=logging.DEBUG, display_level=logging.INFO)
bvp_solver = beluga.bvp_algorithm('spbvp')
beluga.solve(ocp=ocp,
method='indirect',
optim_options={'control_method': 'icrm', 'analytical_jacobian': True},
bvp_algorithm=bvp_solver,
steps=continuation_steps,
guess_generator=guess_maker,
autoscale=False,
save='indirect_data.blg')
bvp_solver = beluga.bvp_algorithm('Collocation', num_nodes=60)
beluga.solve(ocp=ocp,
method='direct',
bvp_algorithm=bvp_solver,
steps=None,
guess_generator=guess_maker,
autoscale=False,
save='direct_data.blg')
| [
"[email protected]"
] | |
70ea25d148ecf96291a8297391d0fc5c9f5f8b9b | f035389665420ac94118130afa2665cee9e1d681 | /blog/models.py | a991ce291ad8c63dd0fac91d7b6289425738cb0a | [] | no_license | Juanno/gmr_site | 02f5e411784ba357b4585707226b80dd92c7c0aa | 1f918e45f34029ae1754834354065700530e4ea6 | refs/heads/main | 2023-01-11T10:02:34.879713 | 2020-11-08T15:05:28 | 2020-11-08T15:05:28 | 311,087,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | from django.db import models
from tinymce.models import HTMLField
class Articles(models.Model):
titre = models.CharField(max_length=200)
pub_date = models.DateField('date published')
content = HTMLField()
def __str__(self):
return self.titre
class Infos(models.Model):
titre = models.CharField(max_length=200)
pub_date = models.DateField('date published')
content = HTMLField()
INFO_TYPE = [
('gr', 'general'),
('ra', 'randonnees'),
('ma', 'materiel'),
]
type_infos = models.CharField(max_length=30, choices=INFO_TYPE, default='gr')
def __str__(self):
return self.titre
class QuiSommmesNous(models.Model):
titre = models.CharField(max_length=200)
pub_date = models.DateField('date published')
content = HTMLField()
def __str__(self):
return self.titre
class Calendrier(models.Model):
titre = models.CharField(max_length=200)
pub_date = models.DateField('date published')
image = models.ImageField(upload_to="media", null=False)
content = HTMLField()
# trois fichiers un par groupe :
# chamoix - marmotte - rando santé
def __str__(self):
return self.titre
#Séjours unique pour été et pour hiver.
#Galerie photos à prévoir X photos. | [
"[email protected]"
] | |
0652436f3f2ea25130dee16bb63eab2e47356b04 | 3ba76ce98cdc5578f1ce38fba41da152404224fa | /python/test/grammar_translator/kernelgen/testAccKernel.py | b41d4728c3d8e16fdd1e88175e315b3f37ba7594 | [
"MIT"
] | permissive | DiaosiDev/gpufort | 258a4d3f8e8a8d53428f34f377ad8ff662369a53 | e60e99dfa3b17306ad65a01d56a764aac471eaba | refs/heads/main | 2023-08-14T13:15:08.266532 | 2021-10-05T09:51:23 | 2021-10-05T09:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,607 | py | #!/usr/bin/env python3
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 Advanced Micro Devices, Inc. All rights reserved.
import addtoplevelpath
import translator.translator as translator
testdata=[]
testdata.append("""!$acc kernels if(2>3) self num_workers(64) num_gangs(8) async(0) wait(1,2) copy(a,b,c) default(PRESENT) private(a,b) reduction(+:a) detach(aaim,a) collapse(3) bind(a) seq tile(4,4)
do i= 1, N
a(i) = 10
b(i) = 5
end do
!$acc end kernels""")
testdata.append("""!$acc parallel num_gangs(8)
do i= 1, N
a(i) = 10
b(i) = 5
end do
!$acc end parallel""")
testdata.append("""!$acc wait async(0)""")
testdata.append("""!$acc host_data use_device(WS)
do i= 1, N
a(i) = 10
b(i) = 5
end do
!$acc end host_data""")
testdata.append("""!$acc data create(IWKX)
do i= 1, N
a(i) = 10
b(i) = 5
end do
!$acc end data""")
testdata.append("""!$acc enter data copyin(ICELNOD)""")
testdata.append("""!$acc exit data copyout(X)""")
testdata.append("""!$acc loop independent reduction(+:a)""")
testdata.append("""!$acc routine(JACOBI) seq""")
testdata.append("""!$acc loop independent
!$acc& private(DETJ,PNX,PNY,PNZ)
!$acc& private(X1,X2,X3,X4,X5,X6,X7,X8)
!$acc& private(Y1,Y2,Y3,Y4,Y5,Y6,Y7,Y8)
!$acc& private(Z1,Z2,Z3,Z4,Z5,Z6,Z7,Z8)
!$acc& private(NODLOCAL)""")
testdata.append("""!$acc atomic read
x=3
!$acc end atomic""")
testdata.append("""!$acc atomic
do i = 1,N
do j = 1,M
b(indx(i)) = b(indx(i)) + a(i,j)
end do
end do
!$acc end atomic""")
testdata.append("""!$acc update """)
testdata.append("""!$acc serial create(IWKX)
do i= 1, N
a(i) = 10
b(i) = 5
end do
!$acc end serial""")
testdata.append("""!$acc cache (a,b)""")
testdata.append("""!$acc declare link(a)""")
testdata.append("""!$acc kernels
!$acc loop independent gang vector(16)
do j = jS, jE
jsL= indexL(j-1)+1
jeL= indexL(j)
enddo
!$acc end kernels""")
for i in range(len(testdata)):
#print(str(i)+".",accKernels.parseString(testdata[i]))
#results = accKernels.parseString(testdata[i])
#print(str(i)+".",accClauses.parseString(testdata[i]))
#results = accClauses.parseString(testdata[i])
#print(str(i)+".",accConstruct.parseString(testdata[i]))
results = translator.accConstruct.parseString(testdata[i])
print(results)
results[0].printTokens()
print(results[0].c_str()) | [
"[email protected]"
] | |
708696ce2c8856f55c9f5f8af1bc5b00af04bb1a | 2bba023206664e137cc9e02f1f76e8d326019182 | /frame_2D_alg/intra_comp_a.py | 932577d4be2874bfbd85617e2540e8720504ad6a | [
"MIT"
] | permissive | gargoyle-ltd/CogAlg | 316793f9ac3f26a001eb4c91bfff5e5f1fac75cc | cce62a16167db7c7f842c235e8f2a833afd4261b | refs/heads/master | 2021-05-17T02:25:36.672667 | 2020-05-22T13:18:39 | 2020-05-22T13:18:39 | 250,550,869 | 2 | 0 | null | 2020-03-27T14:07:57 | 2020-03-27T14:07:56 | null | UTF-8 | Python | false | false | 17,994 | py | """
Cross-comparison of pixels, angles, or gradients, in 2x2 or 3x3 kernels
"""
import numpy as np
import numpy.ma as ma
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Sobel coefficients to decompose ds into dy and dx:
YCOEFs = np.array([-1, -2, -1, 0, 1, 2, 1, 0])
XCOEFs = np.array([-1, 0, 1, 2, 1, 0, -1, -2])
'''
|--(clockwise)--+ |--(clockwise)--+
YCOEF: -1 -2 -1 ¦ XCOEF: -1 0 1 ¦
0 0 ¦ -2 2 ¦
1 2 1 ¦ -1 0 1 ¦
'''
# ------------------------------------------------------------------------------------
# Functions
def comp_r(dert__, fig, root_fcr):
"""
Cross-comparison of input param (dert[0]) over rng passed from intra_blob.
This fork is selective for blobs with below-average gradient,
where input intensity didn't vary much in shorter-range cross-comparison.
Such input is predictable enough for selective sampling: skipping current
rim derts as kernel-central derts in following comparison kernels.
Skipping forms increasingly sparse output dert__ for greater-range cross-comp, hence
rng (distance between centers of compared derts) increases as 2^n, starting at 0:
rng = 1: 3x3 kernel,
rng = 2: 5x5 kernel,
rng = 4: 9x9 kernel,
...
Due to skipping, configuration of input derts in next-rng kernel will always be 3x3, see:
https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/intra_comp_diagrams.png
"""
i__ = dert__[0] # i is ig if fig else pixel
'''
sparse aligned i__center and i__rim arrays:
'''
i__center = i__[1:-1:2, 1:-1:2]
i__topleft = i__[:-2:2, :-2:2]
i__top = i__[:-2:2, 1:-1:2]
i__topright = i__[:-2:2, 2::2]
i__right = i__[1:-1:2, 2::2]
i__bottomright = i__[2::2, 2::2]
i__bottom = i__[2::2, 1:-1:2]
i__bottomleft = i__[2::2, :-2:2]
i__left = i__[1:-1:2, :-2:2]
if root_fcr: # root fork is comp_r, all params are present in the input:
idy__, idx__, m__ = dert__[[2, 3, 4]] # skip g: recomputed, output for Dert only
dy__ = idy__[1:-1:2, 1:-1:2] # sparse to align with i__center
dx__ = idx__[1:-1:2, 1:-1:2]
m__ = m__[1:-1:2, 1:-1:2]
else: # root fork is comp_g or comp_pixel, initialize sparse derivatives:
dy__ = np.zeros((i__center.shape[0], i__center.shape[1])) # row, column
dx__ = np.zeros((i__center.shape[0], i__center.shape[1]))
m__ = np.zeros((i__center.shape[0], i__center.shape[1]))
if not fig: # compare four diametrically opposed pairs of rim pixels:
dt__ = np.stack((i__topleft - i__bottomright,
i__top - i__bottom,
i__topright - i__bottomleft,
i__right - i__left
))
for d__, YCOEF, XCOEF in zip(dt__, YCOEFs[:4], XCOEFs[:4]):
dy__ += d__ * YCOEF # decompose differences into dy and dx,
dx__ += d__ * XCOEF # accumulate with prior-rng dy, dx
g__ = np.hypot(dy__, dx__) # gradient
'''
inverse match = SAD, more precise measure of variation than g, direction doesn't matter:
(all diagonal derivatives can be imported from prior 2x2 comp)
'''
m__ += (abs(i__center - i__topleft)
+ abs(i__center - i__top)
+ abs(i__center - i__topright)
+ abs(i__center - i__right)
+ abs(i__center - i__bottomright)
+ abs(i__center - i__bottom)
+ abs(i__center - i__bottomleft)
+ abs(i__center - i__left)
)
else: # fig is TRUE, compare angle and then magnitude of 8 center-rim pairs
if not root_fcr:
idy__, idx__ = dert__[[-2, -1]] # root fork is comp_g, not sparse
a__ = [idy__, idx__] / i__ # i = ig
'''
sparse aligned a__center and a__rim arrays:
'''
a__center = a__[:, 1:-1:2, 1:-1:2]
a__topleft = a__[:, :-2:2, :-2:2]
a__top = a__[:, :-2:2, 1:-1: 2]
a__topright = a__[:, :-2:2, 2::2]
a__right = a__[:, 1:-1:2, 2::2]
a__bottomright = a__[:, 2::2, 2::2]
a__bottom = a__[:, 2::2, 1:-1:2]
a__bottomleft = a__[:, 2::2, :-2:2]
a__left = a__[:, 1:-1:2, :-2:2]
'''
8-tuple of differences between center dert angle and rim dert angle:
'''
dat__ = np.stack((angle_diff(a__center, a__topleft, 0),
angle_diff(a__center, a__top, 0),
angle_diff(a__center, a__topright, 0),
angle_diff(a__center, a__right, 0),
angle_diff(a__center, a__bottomright, 0),
angle_diff(a__center, a__bottom, 0),
angle_diff(a__center, a__bottomleft, 0),
angle_diff(a__center, a__left, 0)
))
if root_fcr:
m__, day__, dax__ = dert__[[-4, -2, -1]] # skip ga: recomputed, output for summation only?
m__ = m__[1:-1:2, 1:-1:2] # sparse to align with i__center
day__ = day__[1:-1:2, 1:-1:2]
dax__ = dax__[1:-1:2, 1:-1:2]
else:
m__ = np.zeros((i__center.shape[0], i__center.shape[1])) # row, column
day__ = np.zeros((a__center.shape[0], a__center.shape[1], a__center.shape[2]))
dax__ = np.zeros((a__center.shape[0], a__center.shape[1], a__center.shape[2]))
for dat_, YCOEF, XCOEF in zip(dat__, YCOEFs, XCOEFs):
'''
accumulate in prior-rng (3x3 -> 5x5 -> 9x9) day, dax:
'''
day__ += dat_ * YCOEF # decomposed differences of angle,
dax__ += dat_ * YCOEF # accumulate in prior-rng day, dax
'''
gradient of angle: not needed in comp_r?
'''
ga__ = np.hypot(np.arctan2(*day__), np.arctan2(*dax__))
'''
accumulate match (cosine similarity) in prior-rng (3x3 -> 5x5 -> 9x9) m:
'''
m__ += (np.minimum(i__center, (i__topleft * dat__[0][1]))
+ np.minimum(i__center, (i__top * dat__[1][1]))
+ np.minimum(i__center, (i__topright * dat__[2][1]))
+ np.minimum(i__center, (i__right * dat__[3][1]))
+ np.minimum(i__center, (i__bottomright * dat__[4][1]))
+ np.minimum(i__center, (i__bottom * dat__[5][1]))
+ np.minimum(i__center, (i__bottomleft * dat__[6][1]))
+ np.minimum(i__center, (i__left * dat__[7][1]))
)
'''
8-tuple of cosine differences per direction:
'''
dt__ = np.stack(((i__center - i__topleft * dat__[0][1]),
(i__center - i__top * dat__[1][1]),
(i__center - i__topright * dat__[2][1]),
(i__center - i__right * dat__[3][1]),
(i__center - i__bottomright * dat__[4][1]),
(i__center - i__bottom * dat__[5][1]),
(i__center - i__bottomleft * dat__[6][1]),
(i__center - i__left * dat__[7][1])
))
for d__, YCOEF, XCOEF in zip(dt__, YCOEFs, XCOEFs):
dy__ += d__ * YCOEF # y-decomposed center-to-rim difference
dx__ += d__ * XCOEF # x-decomposed center-to-rim difference
'''
accumulate in prior-rng (3x3 -> 5x5 -> 9x9) dy, dx
'''
g__ = np.hypot(dy__, dx__)
if fig:
rdert = ma.stack((i__center, g__, dy__, dx__, m__, ga__, *day__, *dax__))
else:
rdert = ma.stack((i__center, g__, dy__, dx__, m__))
'''
return input dert__ with accumulated derivatives,
next comp_r will use full dert # comp_rr
next comp_a will use g__, dy__, dx__ # comp_agr, preserve dy, dx as idy, idx
'''
return rdert
def comp_a(dert__, fga): # cross-comp of a or aga in 2x2 kernels
'''
if fga: dert = (g, gg, dgy, dgx, gm, ?(iga, iday, idax)
else: dert = (i, g, dy, dx, ?m)
'''
dert__ = shape_check(dert__) # remove derts of incomplete kernels
i__, g__, dy__, dx__, = dert__[0:4]
if fga: # input is adert
ga__, day__, dax__ = dert__[4], dert__[5:7], dert__[7:9]
a__ = [day__[0], day__[1], dax__[0], dax__[1]] / ga__
else:
a__ = [dy__, dx__] / g__ # similar to calc_a
# each shifted a in 2x2 kernel
a__topleft = a__[:, :-1, :-1]
a__topright = a__[:, :-1, 1:]
a__botright = a__[:, 1:, 1:]
a__botleft = a__[:, 1:, :-1]
# diagonal angle differences:
sin_da0__, cos_da0__ = angle_diff(a__topleft, a__botright, fga)
sin_da1__, cos_da1__ = angle_diff(a__topright, a__botleft, fga)
ma__ = np.hypot(sin_da0__ + 1, cos_da0__ + 1) + np.hypot(sin_da1__ + 1, cos_da1__ + 1)
# ma = inverse angle match = SAD: covert sin and cos da to 0->2 range
day__ = (-sin_da0__ - sin_da1__), (cos_da0__ + cos_da1__)
# angle change in y, sines are sign-reversed because da0 and da1 are top-down, no reversal in cosines
dax__ = (-sin_da0__ + sin_da1__), (cos_da0__ + cos_da1__)
# angle change in x, positive sign is right-to-left, so only sin_da0__ is sign-reversed
'''
sin(-θ) = -sin(θ), cos(-θ) = cos(θ):
sin(da) = -sin(-da), cos(da) = cos(-da) => (sin(-da), cos(-da)) = (-sin(da), cos(da))
'''
ga__ = np.hypot(np.arctan2(*day__), np.arctan2(*dax__))
# angle gradient, a scalar, to evaluate for comp_aga
adert__ = ma.stack((i__[:-1, :-1], # for summation in Dert
g__[:-1, :-1], # for summation in Dert
dy__[:-1, :-1], # passed on as idy
dx__[:-1, :-1], # passed on as idx # no use for m__[:-1, :-1]?
ga__,
*day__,
*dax__,
ma__,
cos_da0__,
cos_da1__
))
'''
next comp_g will use g, cos_da0__, cos_da1__, dy, dx (passed to comp_rg as idy, idx)
next comp_a will use ga, day, dax # comp_aga
'''
return adert__
def angle_diff(a2, a1, fga): # compare angle_1 to angle_2
if fga:
sin_11, cos_11, sin_12, cos_12 = a1[:] # dyy1, dxy1, dyx1, dxx1 = a1[:]
sin_21, cos_21, sin_22, cos_22 = a2[:] # dyy2, dxy2, dyx2, dxx2 = a2[:]
sin_da = np.subtract(np.multiply([sin_12, cos_12], [sin_21, cos_21]),
np.multiply([sin_11, cos_11], [sin_22, cos_22]))
cos_da = np.add(np.multiply([sin_11, cos_11], [sin_12, cos_12]),
np.multiply([sin_21, cos_21], [sin_22, cos_22]))
# =
# sin_da = ( [sin_12,cos_12] * [sin_21,cos_21]) - ([sin_11,cos_11] * [sin_22,cos_22])
# cos_da = ( [sin_11,cos_11] * [sin_12,cos_12]) + ([sin_21,cos_21] * [sin_22,cos_22])
# right now each sin_da and cos_da is having sin and cos components
# we need to find a way to reduce the sin & cos components into 1 component only
# probably take the sine part of sin_da (since sine = direction info) , and cosine part of cos_da (cosine = magnitude info)?
sin_da = sin_da[0]
cos_da = cos_da[1]
else:
sin_1, cos_1 = a1[:]
sin_2, cos_2 = a2[:]
# sine and cosine of difference between angles:
sin_da = (cos_1 * sin_2) - (sin_1 * cos_2)
cos_da = (sin_1 * cos_1) + (sin_2 * cos_2)
return ma.array([sin_da, cos_da])
def comp_g(dert__): # add fga if processing in comp_ga is different?
"""
Cross-comp of g or ga in 2x2 kernels, between derts in ma.stack dert__:
input dert = (i, g, dy, dx, ga, dyy, dxy, dyx, dxx, ma, cos_da0, cos_da1)
output dert = (g, gg, dgy, dgx, gm, ga, day, dax, dy, dx)
"""
dert__ = shape_check(dert__) # remove derts of incomplete kernels
g__, cos_da0__, cos_da1__ = dert__[[1, -2, -1]] # top dimension of numpy stack must be a list
cos_da0__ = cos_da0__[:-1, :-1]
cos_da1__ = cos_da1__[:-1, :-1]
g_topleft__ = g__[:-1, :-1]
g_topright__ = g__[:-1, 1:]
g_bottomleft__ = g__[1:, :-1]
g_bottomright__ = g__[1:, 1:]
dgy__ = ((g_bottomleft__ + g_bottomright__) -
(g_topleft__ * cos_da0__ + g_topright__ * cos_da1__))
# y-decomposed cosine difference between gs
dgx__ = ((g_topright__ + g_bottomright__) -
(g_topleft__ * cos_da0__ + g_bottomleft__ * cos_da1__))
# x-decomposed cosine difference between gs
gg__ = np.hypot(dgy__, dgx__) # gradient of gradient
mg0__ = np.minimum(g_topleft__, (g_bottomright__ * cos_da0__)) # g match = min(g, _g*cos(da))
mg1__ = np.minimum(g_topright__, (g_bottomleft__ * cos_da1__))
mg__ = mg0__ + mg1__
gdert = ma.stack((g__[:-1, :-1], # remove last row and column to align with derived params
gg__,
dgy__,
dgx__,
mg__,
dert__[4][:-1, :-1], # ga__
dert__[5][:-1, :-1], # dayy
dert__[6][:-1, :-1], # daxy
dert__[7][:-1, :-1], # dayx
dert__[8][:-1, :-1], # daxx
dert__[9][:-1, :-1], # ma__
dert__[2][:-1, :-1], # idy__
dert__[3][:-1, :-1] # idx__
))
'''
next comp_r will use g, idy, idx # comp_rg
next comp_a will use ga, day, dax # comp_agg, also dgy__, dgx__ as idy, idx?
'''
return gdert
def shape_check(dert__):
# remove derts of 2x2 kernels that are missing some other derts
if dert__[0].shape[0] % 2 != 0:
dert__ = dert__[:, :-1, :]
if dert__[0].shape[1] % 2 != 0:
dert__ = dert__[:, :, :-1]
return dert__
def calc_a(dert__):
"""
Compute vector representation of angle of gradient by normalizing (dy, dx).
Numpy-broadcasted, first dimension of dert__ is a list of parameters: g, dy, dx
Example
-------
>>> dert1 = np.array([0, 5, 3, 4])
>>> a1 = calc_a(dert1)
>>> print(a1)
array([0.6, 0.8])
>>> # 45 degrees angle
>>> dert2 = np.array([0, 450**0.5, 15, 15])
>>> a2 = calc_a(dert2)
>>> print(a2)
array([0.70710678, 0.70710678])
>>> print(np.degrees(np.arctan2(*a2)))
45.0
>>> # -30 (or 330) degrees angle
>>> dert3 = np.array([0, 10, -5, 75**0.5])
>>> a3 = calc_a(dert3)
>>> print(a3)
array([-0.5 , 0.8660254])
>>> print(np.rad2deg(np.arctan2(*a3)))
-29.999999999999996
"""
return dert__[[2, 3]] / dert__[1] # np.array([dy, dx]) / g
''' old comp_a:
day__ = ( Y_COEFFS[0][0] * angle_diff(a__topleft, a__bottomright) +
Y_COEFFS[0][1] * angle_diff(a__topright, a__bottomleft)
)
dax__ = ( X_COEFFS[0][0] * angle_diff(a__topleft, a__bottomright) +
X_COEFFS[0][1] * angle_diff(a__topright, a__bottomleft)
)
2x2 COEFFS:
Y: np.array([-2, -2, 2, 2])
X: np.array([-2, 2, 2, -2])
roll axis to align COEFFs with dat__: move 1st axis to 4th axis,
for broadcasting 4 pairs of 8 directionals with coefficients:
dat__ = np.rollaxis(dat__, 0, 4)
day__ = (dat__ * YCOEFs).sum(axis=-1)
dax__ = (dat__ * XCOEFs).sum(axis=-1)
dt__ = np.rollaxis(dt__, 0, 3)
gy__ += (dt__ * YCOEFs).sum(axis=-1)
gx__ += (dt__ * XCOEFs).sum(axis=-1)
if isinstance(a__, ma.masked_array):
a__.data[a__.mask] = np.nan
a__.mask = ma.nomask
'''
def intra_blob_a(blob, rdn, rng, fig, fca, fcr, fga):
# recursive input rng+ | der+ | angle cross-comp within a blob
# flags: fca: comp angle, fga: comp angle of ga, fig: input is g, fcr: comp over rng+
if fca:
dert__ = comp_a(blob['dert__'], fga) # -> ma sub_blobs evaluate for comp_g | comp_aga:
cluster_derts(blob, dert__, ave * rdn, fca, fcr, fig=0)
for sub_blob in blob['blob_']: # eval intra_blob: if disoriented g: comp_aga, else comp_g
if sub_blob['sign']:
if sub_blob['Dert']['Ma'] > aveB * rdn:
# +Ma -> comp_g -> dert = g, gg, gdy, gdx, gm:
intra_blob(sub_blob, rdn + 1, rng=1, fig=1, fca=0, fcr=0, fga=1) # fga for comp_agg
elif sub_blob['Dert']['Ga'] > aveB * rdn:
# +Ga -> comp_aga -> dert + gaga, ga_day, ga_dax:
intra_blob(sub_blob, rdn + 1, rng=1, fig=1, fca=1, fcr=0, fga=1)
else:
if fcr:
dert__ = comp_r(blob['dert__'], fig, blob['root']['fcr']) # -> m sub_blobs
else:
dert__ = comp_g(blob['dert__']) # -> g sub_blobs:
cluster_derts(blob, dert__, ave * rdn, fca, fcr, fig)
# feedback: root['layer_'] += [[(lL, fig, fcr, rdn, rng, blob['sub_blob_'])]] # 1st sub_layer
for sub_blob in blob['blob_']: # eval intra_blob comp_a | comp_rng if low gradient
if sub_blob['sign']:
if sub_blob['Dert']['M'] > aveB * rdn:
# +M -> comp_r -> dert with accumulated derivatives:
intra_blob(sub_blob, rdn + 1, rng + 1, fig=fig, fca=0, fcr=1, fga=0) # fga for comp_agr
elif sub_blob['Dert']['G'] > aveB * rdn:
# +G -> comp_a -> dert + a, ga=0, day=0, dax=0:
intra_blob(sub_blob, rdn + 1, rng=1, fig=1, fca=1, fcr=0, fga=0)
'''
fca, # flag comp angle, clustering by ga: gradient of angle?
fga, # flag comp angle of ga vs. angle of g
also cluster_derts(crit=gi): abs_gg (no * cos(da)) -> abs_gblobs, no eval by Gi?
with feedback:
for sub_blob in blob['blob_']:
blob['layer_'] += intra_blob(sub_blob, rdn + 1 + 1 / lL, rng, fig, fca) # redundant to sub_blob
''' | [
"[email protected]"
] | |
9db6c4629ec3ba1451b1569a803d3370d83954d7 | d25734585fc616d1a92eeed4ced78796af877d70 | /test/functions/python/helloworld.py | 5d5c288a17dae1f11ee851fef3e3b1f00a4939f5 | [
"MIT"
] | permissive | swiftycloud/swifty | 9c1db8285311f8e9464b7353023dbaf38c501507 | ac9ea8b8919f797c0d92ab70db13459ec0148dd3 | refs/heads/master | 2022-02-21T19:56:41.976335 | 2022-01-31T09:06:43 | 2022-01-31T09:06:43 | 181,493,509 | 36 | 3 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | def Main(req):
return {"message": "hw:python:%s" % req.args['name']}, None
| [
"[email protected]"
] | |
162bbbab50f98d221282c53b09bb56049dae0155 | db221615b13a27b9b34901fe52500f4b7ee58473 | /rememerme/cards/cassa.py | ccff6cbe63bb8d148dbf5141f161011e82c0b118 | [
"Apache-2.0"
] | permissive | rememerme/marketplace-model | ea4ee6a1238682e86bf455bf80ea3aadc0ff6927 | 7a5199037ab863237778849be771edb80ba975fd | refs/heads/master | 2016-09-06T09:47:29.177398 | 2014-01-27T20:56:08 | 2014-01-27T20:56:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | from django.db import models
'''
Created on Jan 7, 2014
@author: Andrew Oberlin
'''
'''
Model that we can use to get rid of the Django stuff, but still use the model
concept while coding.
'''
class CassaModel(models.Model):
'''
Overriding the default save method to remove Django operation.
This save will do nothing and will not be used.
'''
def save(self):
pass
'''
Updates this user with the values put into the map.
@param other: The map of values to use for updating the model.
'''
def update(self, other):
for attrKey in self.__dict__.keys():
if attrKey in other:
setattr(self, attrKey, other[attrKey])
'''
Overriding the default delete method to remove Django operation.
This delete will do nothing and will not be used.
'''
def delete(self):
pass
class Meta:
app_label = u'friends' | [
"[email protected]"
] | |
50a54e1743e2d479801e432af50f14d10d6acdaf | f3b233e5053e28fa95c549017bd75a30456eb50c | /p38a_input/L2I/2I-3FN_MD_NVT_rerun/set_1.py | 900f01cb309373c1957f494591d94947da0ea1b2 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | import os
dir = '/mnt/scratch/songlin3/run/p38a/L2I/MD_NVT_rerun/ti_one-step/2I_3FN/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_1.in'
temp_pbs = filesdir + 'temp_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_1.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
07dec2dc5f395a39cb3c3643e0b760b89b6f9576 | 584bbbd6936222a67ed88bd36446f0f35bc89432 | /rango/migrations/0001_initial.py | 93ae0af67463ef73b8f3889d9f9a990c697de05a | [] | no_license | jianhuayou/tango_with_django_project1 | 2645115af5813cf2ada5e2666dc0a45090d80cbd | fa9916881e2d0baf571fb1f02071d11ac3dc3e4a | refs/heads/master | 2023-03-07T12:36:45.479855 | 2021-02-12T10:24:12 | 2021-02-12T10:24:12 | 335,925,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | # Generated by Django 2.2.17 on 2021-02-04 08:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
],
),
migrations.CreateModel(
name='page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
('url', models.URLField()),
('views', models.IntegerField(default=0)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rango.Category')),
],
),
]
| [
"[email protected]"
] | |
69133b3c2ade788942c380de5ecfc040e34c476c | 16dfc690007b3dec2761e80712f83ab3e7a8b33f | /FIO/fusion_io/fpy.py | 47c11fae827752c1d225456ebe6a78d683f5c072 | [] | no_license | usczpw/KORC | b07475e50fd8795bb221f67d73bd007e8baf5232 | 59c2428e02a92c982addb34927163d3805b98ae2 | refs/heads/master | 2023-04-11T22:32:25.647314 | 2021-03-24T02:12:16 | 2021-03-24T02:12:16 | 363,285,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,533 | py | # fpy.py: wrapper functions for the sim_data library to make accessing
# simulation output more pythonic.
#
#
# coded by Christopher Berg Smiet on 18 January 2019
# Edited by Ralf Mackenbach on the 14th of August 2019
# Mesh added by Andreas Kleiner on 20 August 2019
# [email protected]
# [email protected]
# [email protected]
import fio_py
import numpy as np
import h5py
class sim_data:
"""
Class that accesses the fusion-io functions in a more pythonic way.
Invoke an element of the class as follows:
sim = sim_data(filename='../C1.h5')
This creates a simulation object. This object has four attributes:
- Fields
- Time traces
- Mesh
- Diagnostics
- Constants
--Fields--
Fields objects can be created by, for example,
magnetic_field = sim.get_field('B',time=10)
This field can in turn be evaluated using
magnetic_field.evaluate((R,phi,Z)).
!! Fields are given in SI units !!
**Technical info**
A list of all strings allowed to call on fields can be
found in sim.available_fields. This dictionary is made so that
one string contains all information on the type of field
(scalar/vector), and the species. The fields make use of
the sim_data library to interpolate fields in a consistent
way.
-- Time traces--
Time trace objects can be created by, for example,
dt = sim.get_time_traces('dt').
The object dt then has two attributes:
dt.time, this is an array of all the times where dt is evaluated
dt.values, this is an array of all values of dt.
To plot some arbitrary time trace plt.plot(dt.time,dt.values) is enough.
A list of all callable traces can be found by invoking
sim.available_traces.
!! Time series are given in M3D-C1 units !!
**Technical info**
The time traces are read directly from the C1.h5 file. This is different
from how the fields are evaluated (which use the sim_data library).
--Mesh--
Mesh object can be created by, for example
mesh = sim.get_mesh(time=1).
This object has a few attributes:
mesh.nplanes shows how many toroidal planes were used
mesh.elements contains info on the actual locations. The specifics
of this are explained in S. Jardin's 2004 paper. But useful info is:
mesh.elements[:,4] are the R locations of the mesh-points
mesh.elements[:,5] are the Z locations of the mesh-points
**Technical info**
The mesh is read directly from the time_xxx.h5 files.
--Diagnostics--
The diagnostic object can be created by, for example
diagnostic = sim.get_diagnostics(diagnostic)
The diagnostic object is meant for non-physical parameters such as
number of iterations, timings for each solve, and time_slice - time correspondensies.
The diagnostic object has two attributes:
!! Diagnostic are given in M3D-C1 units !!
diagnostic.diagnostic - contains the value of the diagnostic. Can be a simple array (i.e. for
'time slices', or several objects i.e. to see a breakdown for timings)
diagnostic.x_axis - contains the values of the x-axis of the diagnostic. This is alfven times
for 'time slices' and iteration number for 'timings'
To see all available diagnostics, check sim.available_diagnostics
**Technical info**
The diagnostic object is a bit of a hodge-podge of different methods. They all call on the
C1.h5 file, and some make use of string manipulations.
--Constants--
On subclass containing the constants can be created by,
for example,
constants = sim.get_constants()
In this subclass, stuff like gamma, R0, and the version
number of the M3DC1 simulation are stored.
"""
def __init__(self, filename='C1.h5', filetype='m3dc1', verbose=False, time = 0):
"""
Initializes the fusion-io bindings to a file.
Keyworded arguments:
**filename**
the name of the file which is to be read. Can include path.
**filetype**
the type of file.
**verbose**
if true, gives extra info on file read. the fio source does some printing anyways.
**time**
sets the time variable during initialization. Not necessarily needed as is also set
when reading a field
"""
if filetype == 'm3dc1':
ifiletype = fio_py.FIO_M3DC1_SOURCE
else:
print('Sorry, cannot do that Dave. Only supports M3DC1 for now. \n'
'Please feel free to add to my functionality and add to the sim_data library!')
# Dictionary contains the field abbreviation, the type of field, and the species
self.typedict = {'j' : ('current density', 'vector', None ),
'ni': ('density', 'scalar', 'main ion'),
'ne': ('density', 'scalar', 'electron'),
'v' : ('fluid velocity', 'vector', None ),
'B' : ('magnetic field', 'vector', None ),
'p' : ('total pressure', 'scalar', None ),
'pi': ('pressure', 'scalar', 'main ion'),
'pe': ('pressure', 'scalar', 'electron'),
'alpha': ('alpha', 'scalar', None),
'ti': ('temperature', 'scalar', 'main ion'),
'te': ('temperature', 'scalar', 'electron'),
'A' : ('vector potential', 'vector', None ),
'gradA' : ('grad vector potential', 'tensor', None ),
'E' : ('electric field', 'vector', None )}
self.available_fields = self.typedict
self._all_attrs = h5py.File(filename, 'r')
self._all_attrs_list = list(h5py.File(filename, 'r').keys())
self._all_traces = h5py.File(filename, 'r')['scalars']
self.available_traces = list(self._all_traces.keys())
self.available_traces.extend(['bharmonics','keharmonics'])
self.available_diagnostics = ['slice times','timings','iterations']
self.fields = []
self.seriess = []
self._isrc = fio_py.open_source(ifiletype, filename)
self.hint = fio_py.allocate_hint(self._isrc)
fio_py.get_options(self._isrc)
#self.ntime = fio_py.get_int_parameter(self._isrc, fio_py.FIO_NUM_TIMESLICES)
self.ntime = self._all_attrs.attrs["ntime"]
if time == 'last':
time = self.ntime - 1
print('last time slice = '+str(time))
fio_py.set_int_option(fio_py.FIO_TIMESLICE, int(time))
self._imag = fio_py.get_field(self._isrc, fio_py.FIO_MAGNETIC_FIELD)
self.fields.append(self._imag)
self._cs = fio_py.get_int_parameter(self._isrc, fio_py.FIO_GEOMETRY)
self._iavailable_fields = fio_py.get_available_fields(self._isrc)
#available fields is a dictionary from names to assigned integers
self._available_fields = dict(zip([fio_py.get_field_name(nr) for nr in self._iavailable_fields], self._iavailable_fields))
self.ntor = fio_py.get_int_parameter(self._isrc, fio_py.FIO_TOROIDAL_MODE)
self.period = fio_py.get_real_parameter(self._isrc, fio_py.FIO_PERIOD)
self.fc = None #used to store flux coodinate object upon calculation of flux coordinates (see class definition below and flux_coordinates.py)
#if time == -1:
# self.timeslice = int(0)
#else:
self.timeslice = int(time)
self.time = fio_py.get_real_field_parameter(self._imag, fio_py.FIO_TIME)
if verbose:
print('Available fields:')
if self._cs == fio_py.FIO_CYLINDRICAL:
print('Using CYLINDRICAL coordinate system')
else :
print('Using CARTESIAN coordinate system')
print('Number of time slices: ', self.ntime)
print('Toroidal period = ', self.period)
def __del__(self):
print('deleting simulation object and closing {} fields'.format(
len(self.fields)+len(self.seriess)))
for ifield in self.fields:
fio_py.close_field(ifield)
for iseries in self.seriess:
fio_py.close_series(iseries)
fio_py.close_source(self._isrc)
def get_time_traces(self,scalar):
"""
Makes an object containing a time-array, and an
array with the correspond values of the physical
quantity. Call on them using
trace.time
trace.values
"""
return self.time_traces(self,scalar)
def get_diagnostic(self, diagnostic):
"""
returns a diagnostics object.
Available diagnostics:
*slice times*
relation between slice numbers and time in M3DC1 Alfven units
*timings*
Clock time needed per iteration of the sim
*iterations*
Number of iterations needed per time advance
"""
return self.diagnostic(self,diagnostic)
def get_field(self, field, time):
"""
Returns a field object.
Keyworded arguments:
**field**
Contains field name. Allowed fieldnames mysim.typedict
**time**
Timeslice for field to be read out
"""
return self.field(self, field=self.typedict[field][0], time=time, species=self.typedict[field][2], ftype=self.typedict[field][1])
def get_mesh(self, time=0):
"""
Returns a mesh object.
Keyworded arguments:
**time**
Timeslice for field to be read out
"""
return self.mesh(self, time)
def get_signal(self,filename,signame):
"""
Return a signal object
"""
return self.signal(self,filename,signame)
def get_constants(self):
return self.constants(self)
class field:
"""
Field class: the init sets up the data access, and its bound methods
return to you what you want. ex:
myfield =mysim.get_field('...')
Allowed strings are:
'j' - current density
'ni' - ion density
'ne' - electron density
'v' - fluid velocity
'B' - magnetic field
'p' - total pressure
'pi' - ion pressure
'pe' - electron pressure
'ti' - ion temperature
'te' - electron temperature
'A' - vector potential
'E' - electric field
Fields are evaluated using:
myfield.evaluate((r,phi,theta))
"""
def __init__(self, sim_data, field, time, species, ftype):
self.sim_data = sim_data
self.field = field
fio_py.set_int_option(fio_py.FIO_TIMESLICE, time)
if species == 'electron':
fio_py.set_int_option(fio_py.FIO_SPECIES, fio_py.FIO_ELECTRON)
elif species == 'main ion':
fio_py.set_int_option(fio_py.FIO_SPECIES, fio_py.FIO_MAIN_ION)
itype = sim_data._available_fields[field]
self._ifield = fio_py.get_field(sim_data._isrc, itype)
self.sim_data.fields.append(self._ifield)
self.time = fio_py.get_real_field_parameter(self._ifield, fio_py.FIO_TIME)
self.ftype = ftype
def __del__(self):
fio_py.close_field(self._ifield)
def evaluate(self, x):
"""
Evaluates the required field, returns a one-tuple (scalar quantity required)
or a three-tuple (vector) evaluated at the location x
Arguments:
**x**
Three-tuple with the R, phi, z coordinate where the field is to be evaluated.
"""
if self.ftype == 'vector':
try:
return fio_py.eval_vector_field(self._ifield, x, self.sim_data.hint)
except:
return (None,None,None)
if self.ftype == 'tensor':
try:
return fio_py.eval_tensor_field(self._ifield, x, self.sim_data.hint)
except:
return (None,None,None,None,None,None,None,None,None)
elif self.ftype == 'scalar':
try:
return (fio_py.eval_scalar_field(self._ifield, x, self.sim_data.hint),)
except:
return (None,)
else:
print('ftype not recognized!')
class mesh:
"""
Mesh class: init routine calls read_mesh() that reads the mesh from time slice file
and stores the elements in an array. It also reads the output version and nplanes.
The latter variable is needed for plotting 3D meshes.
"""
def __init__(self, sim_data, time=0):
self.sim_data = sim_data
fio_py.set_int_option(fio_py.FIO_TIMESLICE, int(time))
itype = sim_data._available_fields['magnetic field']
#'magnetic field' is just used as a dummy field to read time
self.time = fio_py.get_real_field_parameter(fio_py.get_field(sim_data._isrc, itype), fio_py.FIO_TIME)
self.elements, self.version, self.nplanes = self.read_mesh(sim_data, time)
def read_mesh(self, sim_data, time):
self.sim_data = sim_data
timestr = str(time)
if timestr == '-1':
fname = 'equilibrium.h5'
else:
fname = "time_"+timestr.zfill(3)+'.h5'
f = h5py.File(fname, 'r')
mesh = np.asarray(f['mesh/elements'])
version = f.attrs["version"]
print('Output version: '+str(version))
meshshape = mesh.shape
print('Mesh shape: '+str(meshshape))
dset = f["mesh"]
nplanes = dset.attrs["nplanes"]
return mesh, version, nplanes
class signal:
"""
Signal class: for diagnostic signals from magnetic probes and flux loops
"""
def __init__(self, sim_data, filename, signame):
self.sim_data = sim_data
self._all_attrs = h5py.File(filename, 'r')
self._all_attrs_list = list(h5py.File(filename, 'r').keys())
self.sigvalues = h5py.File(filename, 'r')[signame+'/value']
class time_traces:
"""
time_trace class: init routine calls read_scalars() that reads all possible scalars
from the C1.h5 file.
"""
def __init__(self, sim_data, scalar):
self.sim_data = sim_data
self.time = self.sim_data._all_traces['time'][()]
if scalar is not 'bharmonics' and scalar is not 'keharmonics':
self.values = self.sim_data._all_traces[scalar][()]
if scalar is 'bharmonics':
self.values = self.sim_data._all_attrs['bharmonics/bharmonics'][()]
if scalar is 'keharmonics':
self.values = self.sim_data._all_attrs['keharmonics/keharmonics'][()]
class diagnostic:
def __init__(self, sim_data, diagnostic):
self.sim_data = sim_data
# Multiple cases depending on the diagnostic
# Case corresponding to knowing the alfven times of the time slices
if diagnostic == 'slice times':
times = []
for string in sim_data._all_attrs_list:
if string.startswith('time_') == True:
times.append(string)
times.sort()
alfven_times = []
for time in times:
# Retrieve the time attribute
alfven_times.append(sim_data._all_attrs[time].attrs['time'])
for (idx,time) in enumerate(times):
times[idx] = int(time.replace('time_',''))
self.diagnostic = times
self.x_axis = alfven_times
# Case corresponding to the timings of each iteration
# self.diagnostic holds several objects, so you can see a breakdown of the
# timings as well.
# x_axis contains the iteration number
if diagnostic == 'timings':
self.diagnostic = sim_data._all_attrs['timings']
self.x_axis = np.arange(self.diagnostic['t_onestep'].shape[0])+1
# Case corresponding to the iterations of each step
# self.diagnostic is a set of arrays for different iterations
# x_axis contains iteration number
if diagnostic =='iterations':
self.diagnostic = sim_data._all_attrs['kspits/kspits'][()]
self.x_axis = np.arange(np.shape(self.diagnostic[:,0])[0])+1
class constants:
def __init__(self, sim_data):
self.sim_data = sim_data
self.R0 = sim_data._all_attrs.attrs['rzero']
self.B0 = sim_data._all_attrs.attrs['bzero']
self.gamma = sim_data._all_attrs.attrs['gam']
self.amupar = sim_data._all_attrs.attrs['amupar']
self.version = sim_data._all_attrs.attrs['version']
self.numvar = sim_data._all_attrs.attrs['numvar']
self.itor = sim_data._all_attrs.attrs['itor']
self.is3D = sim_data._all_attrs.attrs['3d']
class flux_coordinates:
"""
Class that represents a flux surface coordinate system, e.g. PEST, Boozer or Hamada coordinates.
"""
def __init__(self, m,n,rpath,zpath,axis,omega,psi,psin,period,theta,jac,q,area,dV,fcoords,V,phi,itor,r0,current,dpsi_dpsin):
"""
Initializes the flux_coordinates.
"""
self.m = m
self.n = n
self.rpath = rpath
self.zpath = zpath
self.rma = axis[0]
self.zma = axis[1]
self.omega = omega
self.psi = psi
self.psi_norm = psin
self.flux_pol = -period*(psi-psi[0])
self.theta = theta
self.j = jac
self.q = q
self.area = area
self.dV_dchi = dV
self.fcoords = fcoords
#self.pest =
#self.boozer =
#self.hamada =
self.V = V
self.flux_tor = phi
self.phi_norm = phi/phi[n-1]
self.rho = np.sqrt(phi/phi[n-1])
self.period = period
self.itor = itor
self.r0 = r0
self.current = current
self.dpsi_dchi = dpsi_dpsin
| [
"[email protected]"
] | |
729720d4d212e2b7d382262b644bcbadcc9816a3 | 555b2a10e3c788704b876a2885a030af94852a19 | /Vertex.py | abcbf0a80d8469e9ba4da15e35966981db2609b8 | [] | no_license | swetlika/Python-Web-Scraper | 196274e0355144ff680e3bad840a535db4d59b17 | 880151544b8facc2677ba46a350188b2d4820df6 | refs/heads/master | 2021-09-01T11:41:54.161236 | 2017-12-26T19:10:00 | 2017-12-26T19:10:00 | 115,448,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | """
A vertex of a graph represents each point It holds information about the actor/movie,
including the name, info (which is the release year of a movie, and the age of an actor),
and the type, which is True if movie, else False
"""
class Vertex:
def __init__(self, node, info, type_is_movie):
self.id = node
self.type = type_is_movie
# if vertex is a movie, info = info movie was released
# if vertex is an actor, info = age of actor
self.info = info
self.neighbors = {}
def __str__(self):
return str(self.id) + ' adjacent: ' + str([x.id for x in self.neighbors])
# while adding an edge between a vertex and another, add the other one to this vertex's neighbor list
# edges are weighted
def add_neighbor(self, neighbor, weight=0):
self.neighbors[neighbor] = weight
# returns a list of all the adjacent vertices of the current
def get_neighbors(self):
return self.neighbors.keys()
# returns movie title if type movie
# returns actor name if type actor
def get_id(self):
return self.id
# returns the weight of the edge
def get_weight(self, neighbor):
return self.neighbors[neighbor]
# returns release year if movie
# returns age of actor if actor
def get_info(self):
return self.info
# returns True if type movie
# returns False if type actor
def get_type(self):
return self.type | [
"[email protected]"
] | |
dd97e1f73c7a3bd0e9ca1d944071be2ad3b1bab3 | 8b12560a1792ef7a15453a603dc038da3ee3eb4d | /server/optimization.py | 350b2e9321397f166e61c5d99c46974f1fd49507 | [] | no_license | LoveIace/GASI | edd1cee32f1fe4f368a94c1012bf3105ed046965 | 363c84f02f3bfc3cd61e5b1ca7c4edc559e0075a | refs/heads/master | 2023-01-28T02:33:10.455472 | 2020-06-07T12:11:41 | 2020-06-07T12:11:41 | 246,835,683 | 0 | 0 | null | 2023-01-06T17:21:32 | 2020-03-12T13:02:27 | Python | UTF-8 | Python | false | false | 4,464 | py | import pandas
from selection import roulette, tournament, uniform
from numpy import exp
from copy import copy
def firefly_algo(problem, population_size = 20, iteration_ceiling = 50, alpha = 1.5, beta = 0.6, beta_min = 0.2, gamma = 0.005, delta=0.97, out_path='../documentation/FA_out.csv'):
data = []
iter_count = 1
fireflies = problem.fireflies(population_size)
for firefly in fireflies:
firefly.brightness = problem.evaluate(firefly.position)
data.append([iter_count] + firefly.values())
# find lowest brightness
min_brightness = min(fireflies, key=lambda f: f.brightness).brightness
# iterate
while iter_count <= iteration_ceiling:
for firefly in fireflies:
moved = False
for mate in fireflies:
# don't compare to itself
if firefly == mate:
continue
# compute light intensity based on distance
firefly_intensity = firefly.brightness + abs(min_brightness)
mate_intensity = (mate.brightness + abs(min_brightness)) * exp(-gamma * firefly.distance(mate) ** 2)
if firefly_intensity < mate_intensity:
# move firefly
firefly.move_towards(mate, alpha, beta, gamma)
# update brightness
firefly.brightness = problem.evaluate(firefly.position)
# update min_brightness
if firefly.brightness < min_brightness:
min_brightness = firefly.brightness
moved = True
# if firefly hasn't moved, do random local search
# if not moved:
# firefly.random_walk(alpha)
data.append([iter_count] + firefly.values())
best = max(fireflies, key=lambda f: f.brightness)
if problem.is_solved(best.brightness):
iter_count+=1
return
# best firefly searches locally
best.random_walk(alpha)
# update alpha value, increase iteration counter
alpha *= delta
iter_count += 1
if out_path is not False:
print("Exporting dataset to", out_path,"...")
df = pandas.DataFrame(data)
df.to_csv(out_path, index=False, header=['generation']+problem.values()+['fitness'])
print("Dataset exported")
return pandas.DataFrame(data), data, iter_count-1
def genetic_algo(problem, population_size = 50, generation_ceiling = 25, select = tournament, mutation_rate = 0.25, elitism = 0.02, out_path='../documentation/GA_out.csv'):
population = problem.population(population_size)
generation_num = 1
data = []
# iterate generations
while generation_num <= generation_ceiling:
min_val = float('inf')
# evaluate fitness of each individual in current population, add to dataset
for individual in population:
individual.fitness = problem.evaluate(individual.genotype)
data.append([generation_num] + individual.values())
if individual.fitness is None:
continue
# move lower boundary
if individual.fitness < min_val:
min_val = individual.fitness
# sort population by fitness
population.sort(key=lambda individual: individual.fitness, reverse=True)
# if population contains the problem solution break cycle
if problem.is_solved(population[0].fitness):
generation_num+=1
break
# account for elitism
new_generation = [
copy(population[i])
for i in range(int(elitism * population_size))
]
# select 2 parents, create offspring, repeat until new generation is complete
selector = select(population)
while len(new_generation) < population_size:
new_generation.append(
next(selector).crossover(next(selector), mutation_rate)
)
# introduce new population
population = new_generation[:]
generation_num+=1
if out_path is not False:
print("Exporting dataset to", out_path,"...")
df = pandas.DataFrame(data)
df.to_csv(out_path, index=False, header=['generation']+problem.df_headers()+['fitness'])
print("Dataset exported")
return pandas.DataFrame(data), data, generation_num-1 | [
"[email protected]"
] | |
3f7954d3be9e18502554f59eebb8efc3ed37feaa | 8a2f62f9ea9c6520b2cfa5a94967eb824e92bbd6 | /src/server.py | f6fd66516bed87adf955b100bdf2dffab03bcc00 | [
"MIT"
] | permissive | zarkle/sockets | 66a8654904e767df6cc1da47cb99bad8569a17bb | b025ac208830bb7a6b686d686b15482888321676 | refs/heads/master | 2020-03-08T00:42:38.897980 | 2018-04-03T02:47:37 | 2018-04-03T02:47:37 | 127,812,073 | 0 | 0 | MIT | 2018-04-03T02:47:37 | 2018-04-02T21:01:59 | null | UTF-8 | Python | false | false | 1,121 | py | from datetime import datetime
import socket
sock = socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
PORT = 3000
address = ('127.0.0.1', PORT)
sock.bind(address)
try:
sock.listen(1)
print('--- Starting server on port {} at {} ---'.format(PORT, datetime.now().strftime('%H:%M:%S %d-%m-%y')))
conn, addr = sock.accept()
buffer_length = 8
message_complete = False
message = b''
while not message_complete:
part = conn.recv(buffer_length)
message += part
if len(part) < buffer_length:
break
message = message.decode('utf8')
print('{} Echoed: {}'.format(datetime.now().strftime('%H:%M:%S %d-%m-%y'), message))
conn.sendall(message.encode('utf8'))
except KeyboardInterrupt:
try:
conn.close()
except NameError:
pass
sock.close()
print('--- Stopping server on port {} at {} ---'.format(PORT, datetime.now().strftime('%H:%M:%S %d-%m-%y')))
conn.close()
sock.close()
print('--- Stopping server on port {} at {} ---'.format(PORT, datetime.now().strftime('%H:%M:%S %d-%m-%y')))
| [
"[email protected]"
] | |
f0944519b2dcc61fa45505a45074964dab08301f | e90e83dea550fd2e36f2c20bf9c0f2354bfc1778 | /movies/settings.py | 1e7e6bb232b52a6d0eca12429af8490a747b764d | [] | no_license | vaishakh1990/MovieApp | efa0b66acf17ca3f8eda3c7674d47bda73dc4b04 | 9d372ae1a453135c14ebfeec5f81da98da7454f6 | refs/heads/master | 2022-12-15T13:28:05.550344 | 2019-05-30T05:01:30 | 2019-05-30T05:01:30 | 188,631,851 | 0 | 0 | null | 2022-12-08T05:11:43 | 2019-05-26T02:29:11 | Python | UTF-8 | Python | false | false | 3,366 | py | """
Django settings for movies project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm&m1zai8!#hj-6=_()g^35vkxkgu^aur67)ljp&t5x=06ahs7b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'users.apps.UsersConfig',
'movieapp.apps.MovieappConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'movies.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'movies.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Calcutta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
OMDB_URL = 'http://www.omdbapi.com/'
OMDB_API_KEY = 'a43a80f'
LOGIN_REDIRECT_URL = 'movies-search'
LOGIN_URL = 'login'
| [
"[email protected]"
] | |
7cdbdc2209859155ca0f435744e95486cb3840ac | 949a91a5fbe7b1f8968a68d86657503a75690e99 | /cloud_native_app/microservices/b/b.py | 3f7370a6ec7655788c8d45c3d65fb50912ea0f4b | [] | no_license | Jeanbouvatt/crepeausucre | 1114f7deb62020e7cca59a10da89538b5fec6fef | b3df534792e1fd1ecc980539f3524b85f30aef33 | refs/heads/master | 2021-01-12T01:50:40.943863 | 2017-01-26T18:00:10 | 2017-01-26T18:00:10 | 78,437,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Service B """
import base64
import logging
from logging.handlers import RotatingFileHandler
import pprint
import time
import sys
import os
from flask import Flask
from flask import jsonify
from flask import request
import config
# Initialise Flask
app = Flask(__name__)
app.debug = True
# Affect app logger to a global variable so logger can be used elsewhere.
config.logger = app.logger
@app.route("/get_button/<id>")
def get_button(id):
try:
config.logger.info("*** Start processing id %s ***", id)
data = {"msg": "ok", "html":config.b.conf_file.get_w_protocol()+"://"+config.b.conf_file.get_w_hostname()+\
":" + config.b.conf_file.get_w_port() + "/" + config.b.conf_file.get_w_route() + "/" + str(id)}
resp = jsonify(data)
resp.status_code = 200
config.logger.info("*** End processing id %s ***", id)
add_headers(resp)
return resp
except:
data = {"msg": "bug"}
resp = jsonify(data)
resp.status_code = 500
return resp
@app.route("/shutdown", methods=["POST"])
def shutdown():
"""Shutdown server"""
shutdown_server()
config.logger.info("Stopping %s...", config.b.NAME)
return "Server shutting down..."
@app.route("/", methods=["GET"])
def api_root():
"""Root url, provide service name and version"""
data = {
"Service": config.b.NAME,
"Version": config.b.VERSION
}
resp = jsonify(data)
resp.status_code = 200
resp.headers["AuthorSite"] = "https://github.com/Jeanbouvatt/crepeausucre"
add_headers(resp)
return resp
def shutdown_server():
"""shutdown server"""
func = request.environ.get("werkzeug.server.shutdown")
if func is None:
raise RuntimeError("Not running with the Werkzeug Server")
func()
def configure_logger(logger, logfile):
"""Configure logger"""
formatter = logging.Formatter(
"%(asctime)s :: %(levelname)s :: %(message)s")
file_handler = RotatingFileHandler(logfile, "a", 1000000, 1)
# Add logger to file
if (config.b.conf_file.get_b_debug().title() == 'True'):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def add_headers(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization')
if __name__ == "__main__":
# Vars
app_logfile = "b.log"
# Change diretory to script one
try:
os.chdir(os.path.dirname(sys.argv[0]))
except FileNotFoundError:
pass
# Define a PrettyPrinter for debugging.
pp = pprint.PrettyPrinter(indent=4)
# Initialise apps
config.initialise_b()
# Configure Flask logger
configure_logger(app.logger, app_logfile)
config.logger.info("Starting %s", config.b.NAME)
app.run(port=int(config.b.conf_file.get_b_port()), host='0.0.0.0')
| [
"[email protected]"
] | |
301cc7c35e53e0f27f8e1983cabc3ed68f3b0eb7 | 58cc56aebd1d0a23a5bdd531774c139a74b58ea7 | /cl_meats/colors.py | aa07166269eb19d468faabeeaff1803ef1030f8a | [] | no_license | ashleygwilliams/cl-meats | 1fb36b9e5fe6e739b9468eb0e38da7de35ccab3f | 8ec2aa736cd9ed63783b365ba39e049fe0038665 | refs/heads/master | 2020-12-11T09:08:44.856784 | 2013-11-28T01:23:12 | 2013-11-28T01:23:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | from __future__ import print_function
class pretty_output():
'''
Context manager for pretty terminal prints
'''
def __init__(self, *attr):
self.END = '0e8ed89a-47ba-4cdb-938e-b8af8e084d5c'
self.ALL_OFF = '\033[0m'
self.attributes = attr
self.opts = dict(
BOLD = '\033[1m',
UNDERSCORE = '\033[4m',
BLINK = '\033[5m',
REVERSE = '\033[7m',
CONCEALED = '\033[7m',
FG_BLACK = '\033[30m',
FG_RED = '\033[31m',
FG_GREEN = '\033[32m',
FG_YELLOW = '\033[33m',
FG_BLUE = '\033[34m',
FG_MAGENTA = '\033[35m',
FG_CYAN = '\033[36m',
FG_WHITE = '\033[37m',
BG_BLACK = '\033[40m',
BG_RED = '\033[41m',
BG_GREEN = '\033[42m',
BG_YELLOW = '\033[43m',
BG_BLUE = '\033[44m',
BG_MAGENTA = '\033[45m',
BG_CYAN = '\033[46m',
BG_WHITE = '\033[47m'
)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def write(self, msg):
style = ''
for a in self.attributes:
a = a.upper()
if a in self.opts.keys():
style += self.opts[a]
else:
raise("No Such Style!")
print('{}{}{}'.format(style, msg.replace(self.END, self.ALL_OFF + style), self.ALL_OFF))
| [
"[email protected]"
] | |
e87d4ea35bb7cf4d21fc6a18af3ecc493ddf16fd | 5f5813eedd7984410ebfe71ccd7d288fccfc8475 | /towersOfHanoi.py | d36ba7a853342d4b5f927c01cc868e013275a4be | [] | no_license | helios2k6/python3_interview_questions | 8a1956d013e184f6c5c12331d57e0beb6760ee19 | d0a0334c090206083d8915e8c3ebadb86d35ad12 | refs/heads/master | 2020-04-16T09:07:39.267659 | 2019-09-14T16:45:22 | 2019-09-14T16:45:22 | 165,451,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | def th(n, start, aux, dest):
if n > 0:
th(n - 1, start, dest, aux)
dest.append(start.pop())
th(n - 1, aux, start, dest)
def test1():
start = [3, 2, 1]
aux = []
dest = []
th(3, start, aux, dest)
print(start)
print(aux)
print(dest)
test1() | [
"[email protected]"
] | |
f6c1c585e692663e47ba39c37d0b18e23dc3bee6 | e05c284894322f42eeb4020509f43aee7ad2b503 | /pysync.py | 19582785128ccb31df4c48c417ca54a649adc73e | [
"MIT"
] | permissive | plinecom/pysync | 27426153bc125d50a66d5376201bda3481ec6b00 | c0b5c5ccb38b214be2b5cf5d4686eabdbadd8553 | refs/heads/master | 2021-01-19T06:10:20.660240 | 2016-06-10T08:12:43 | 2016-06-10T08:12:43 | 60,820,118 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,044 | py | # -*- coding: utf-8 -*-
import sys
import os
import os.path
import time
import shutil
def listup(srcRoot, destRoot, oldRoot, relativePath):
srcPath = os.path.join(srcRoot, relativePath)
destPath = os.path.join(destRoot, relativePath)
oldPath = os.path.join(oldRoot, relativePath)
# System::String ^ srcPath = System::IO::Path::Combine(srcRoot, relativePath);
# array < System::String ^ > ^ srcDirItems = System::IO::Directory::GetDirectories(srcPath);
items = os.listdir(srcPath)
dir_list=[]
symlink_list=[]
file_list=[]
for item in items:
item_path = os.path.join(srcPath, item)
if os.path.isdir(item_path):
dir_list.append(item)
elif os.path.islink(item_path):
symlink_list.append(item)
elif os.path.isfile(item_path):
file_list.append(item)
# print dir_list
# print file_list
for directory in dir_list:
src_dir = os.path.join(srcPath, directory)
dest_dir = os.path.join(destPath, directory)
print src_dir
print u"->"+dest_dir
# exception?
os.makedirs(dest_dir)
listup(srcRoot, destRoot, oldRoot, os.path.join(relativePath, directory))
for file_item in file_list:
hardlinked = False
src_file = os.path.join(srcPath, file_item)
dest_file = os.path.join(destPath, file_item)
old_file = os.path.join(oldPath, file_item)
print src_file
print u"->" + dest_file
if os.path.exists(old_file):
old_file_size = os.path.getsize(old_file)
src_file_size = os.path.getsize(src_file)
# 古いDestと日付がほぼ一緒で、サイズが同じ
if old_file_size == src_file_size:
old_file_last_write_time = time.gmtime(os.path.getmtime(old_file))
src_file_last_write_time = time.gmtime(os.path.getmtime(src_file))
# print old_file_last_write_time
# print src_file_last_write_time
if old_file_last_write_time.tm_year == src_file_last_write_time.tm_year \
and old_file_last_write_time.tm_mon == src_file_last_write_time.tm_mon \
and old_file_last_write_time.tm_mday == src_file_last_write_time.tm_mday \
and old_file_last_write_time.tm_hour == src_file_last_write_time.tm_hour \
and old_file_last_write_time.tm_min == src_file_last_write_time.tm_min \
and old_file_last_write_time.tm_sec == src_file_last_write_time.tm_sec:
success = True
try:
os.link(old_file, dest_file)
except IOError:
success = False
if success:
print "HLinked"
hardlinked = True
if not hardlinked:
shutil.copy2(src_file, dest_file)
"""
array < System::String ^ > ^ srcFileItems = System::IO::Directory::GetFiles(srcPath);
for (int i = 0; i < srcFileItems->Length; i + +){
System::String ^ srcItem = srcFileItems[i];
System::String ^ filename = System::IO::Path::GetFileName(srcItem);
System::String ^ destItem = System::IO::Path::Combine(destRoot, relativePath, filename);
System::String ^ oldItem = System::IO::Path::Combine(oldRoot, relativePath, filename);
// ファイルだ。ハードリンク必要か調べて、ダメならコピーだ。
bool hardlinked = false;
if (System: :IO::File::Exists(oldItem)){
if (System: : IO::File::Exists(oldItem)){
// 古いDestと日付がほぼ一緒で、サイズが同じ
System::IO::FileInfo ^ oldfi = gcnew
System::IO::FileInfo(oldItem);
System::IO::FileInfo ^ srcfi = gcnew
System::IO::FileInfo(srcItem);
if (oldfi->Length == srcfi->Length){
if (oldfi->LastWriteTime.Year == srcfi->LastWriteTime.Year
& & oldfi->LastWriteTime.Month == srcfi->LastWriteTime.Month
& & oldfi->LastWriteTime.Day == srcfi->LastWriteTime.Day
& & oldfi->LastWriteTime.Hour == srcfi->LastWriteTime.Hour
& & oldfi->LastWriteTime.Minute == srcfi->LastWriteTime.Minute
& & oldfi->LastWriteTime.Second == srcfi->LastWriteTime.Secon)
{
ATL::CString
atlOldItem(oldItem);
ATL::CString
atlDestItem(destItem);
BOOL success =::CreateHardLink(atlDestItem, atlOldItem, NULL);
if (success){
hardlinked = true;
// System::Console::WriteLine(L"HLinked");
}
else{
System::Console::WriteLine(L"Error:HLink");
System::Console::WriteLine(destItem);
}
}
}
}
}
if (!hardlinked){
// System::Console::WriteLine(L"Copy");
try{
System::IO::File::Copy(srcItem, destItem, true);
}catch(System::Exception ^ e){
System::Console::WriteLine(L"An error occurred: '{0}'", e);
}
}
}
}
"""
if __name__ == "__main__":
srcRoot = sys.argv[1] # バックアップ元
oldRoot = sys.argv[2] # バックアップ(ふるいの)
destRoot = sys.argv[3] # バックアップ先
listup(srcRoot, destRoot, oldRoot, u"")
| [
"[email protected]"
] | |
a0689170300c9ac5c0f3fd4e0774d82d6a4f2a22 | e89d6b85b8e113cdac4a10f42a5ba9f40f807600 | /crud/models.py | 0ed369d27394374729951d046291c4f0063659a4 | [] | no_license | JuanVicenteVazquezG/crud-django-project | 5c951b2a371d4c2b4568c13a610fcde75b211e1e | d0c9ebb8630ad4467c4fccc3a2abcd4ad3cc7111 | refs/heads/main | 2023-03-05T15:15:15.097269 | 2021-02-14T20:13:15 | 2021-02-14T20:13:15 | 338,376,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | from django.db import models
"""
Autor (nombre, email)
Articulo (autores, titulo, contenido)
"""
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length=120)
content = models.TextField()
public = models.BooleanField()
# Usado cuando cree el registro me guarda la fecha, solo se hara una vez para este registro
created_at = models.DateTimeField(auto_now_add=True)
# Usado cuando actualice el registro me guarda la fecha, solo se hara a partir del momento que se actualice
update_at = models.DateTimeField(auto_now_add=True)
class Author(models.Model):
nombre = models.CharField(max_length=50)
email = models.EmailField()
# Usado cuando cree el registro me guarda la fecha, solo se hara una vez para este registro
created_at = models.DateTimeField(auto_now_add=True)
# Usado cuando actualice el registro me guarda la fecha, solo se hara a partir del momento que se actualice
update_at = models.DateTimeField(auto_now_add=True)
| [
"[email protected]"
] | |
29da6b4856f275e645c98bc770853337711e82cc | aa5e5a5e3aa1729d9dc5af610890ec6bc69eb137 | /SunPolitcs/SunPolitcs/startproject.py | 8575e5c94cba0359e445f144661f9f61bcfaf195 | [] | no_license | quqingsong/MyScrapy | 2d3659b09436631fa4f58d65da74eae9cd65eed9 | bc1e5094d0e6a1b9ac596297d27fb810e4f67158 | refs/heads/master | 2022-12-31T18:43:42.952234 | 2020-10-24T05:41:16 | 2020-10-24T05:41:16 | 306,808,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName :startproject.py
# @Time :2020/5/17 19:37
# @Author :青松
from scrapy import cmdline
cmdline.execute(["scrapy","crawl","sunpolitics","-o","1.csv"]) | [
"[email protected]"
] | |
8f0a3f1e893a6f042c0b8df346a95d4eb0c54193 | 57a8e6497effdda6be198562a4ba84b1660d9d57 | /first_NN.py | ac2a30c27721d08ea6dd7af4f408d772842727c2 | [] | no_license | frankpennchen/First_Tensorflow | 47605fe5390275fdda55824bcde42765c176053a | 9db1317888930fab8f41b3891960b834f81ba4c6 | refs/heads/master | 2020-03-28T19:43:53.916883 | 2018-09-16T14:35:17 | 2018-09-16T14:35:17 | 149,004,172 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,885 | py | # -*- coding: utf-8 -*-
import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases_v4 import *
from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward
from init_utils import load_dataset
np.random.seed(1)
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(1)
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h, n_x)*0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h)*0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
### END CODE HERE ###
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
### START CODE HERE ### (≈ 1 line of code)
Z = np.dot(W,A)+b
### END CODE HERE ###
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = np.dot(W,A_prev) + b, np.dot(W,A_prev) + b
A, activation_cache = sigmoid(Z)
### END CODE HERE ###
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = np.dot(W,A_prev) + b, np.dot(W,A_prev) + b
A, activation_cache = relu(Z)
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
### START CODE HERE ### (≈ 2 lines of code)
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], "relu")
caches.append(cache)
### END CODE HERE ###
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
### START CODE HERE ### (≈ 2 lines of code)
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], "sigmoid")
caches.append(cache)
### END CODE HERE ###
assert(AL.shape == (1,X.shape[1]))
return AL, caches
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
### START CODE HERE ### (≈ 1 lines of code)
cost = -(np.inner(np.log(AL),Y)+np.inner(np.log(1.0-AL),1.0-Y))/float(m)
### END CODE HERE ###
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
### START CODE HERE ### (≈ 3 lines of code)
dW = np.dot(dZ,A_prev.T)/float(m)
db = np.sum(dZ,axis=1,keepdims=True)/float(m)
dA_prev = np.dot(W.T,dZ)
### END CODE HERE ###
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
### START CODE HERE ### (≈ 2 lines of code)
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
### END CODE HERE ###
elif activation == "sigmoid":
### START CODE HERE ### (≈ 2 lines of code)
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
### END CODE HERE ###
return dA_prev, dW, db
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
### START CODE HERE ### (1 line of code)
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
### END CODE HERE ###
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "dAL, current_cache". Outputs: "grads["dAL-1"], grads["dWL"], grads["dbL"]
### START CODE HERE ### (approx. 2 lines)
current_cache = caches[L-1]
grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, "sigmoid")
### END CODE HERE ###
# Loop from l=L-2 to l=0
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
# Inputs: "grads["dA" + str(l + 1)], current_cache". Outputs: "grads["dA" + str(l)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
### START CODE HERE ### (approx. 5 lines)
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 1)], current_cache, "relu")
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
### END CODE HERE ###
return grads
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
### START CODE HERE ### (≈ 3 lines of code)
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads["dW" + str(l + 1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads["db" + str(l + 1)]
### END CODE HERE ###
return parameters
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, "relu")
A2, cache2 = linear_activation_forward(A1, W2, b2, "sigmoid")
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, "sigmoid")
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, "relu")
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (≈ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL, Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
################################################################################
| [
"[email protected]"
] | |
e10916000769e062512e896968a462248e3aaaf8 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/merraRF882/294-tideGauge.py | cd2a90061870ed011239ee4570526d3b677444d3 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,456 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a Random Forest
model by using the KFOLD method
@author: Michael Tadesse
"""
#import packages
import os
import glob
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validateRF():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraRFValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 294
y = 295
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
#filter only .csv files
tgNames = []
for file in glob.glob("*.csv"):
tgNames.append(file)
tg_name = sorted(tgNames)[tg]
print(tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
print("this tide gauge is already taken care of")
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
rf= RandomForestRegressor(n_estimators = 50, random_state = 101, \
min_samples_leaf = 1)
rf.fit(X_train, y_train)
#predictions
predictions = rf.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
print()
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#run script
validateRF()
| [
"[email protected]"
] | |
3b29858378e8a84dc8d6061a2f3b26b081be22f2 | 2bbf04bde4585ea4217f65a0037ff824007ada77 | /Python/PetyaAndStrings.py | 45f967d72a3bd2d16c3c1c76a39a1cb0ef78de23 | [] | no_license | knight-byte/Codeforces-Problemset-Solution | 7e0dea73a3906434faf4e59dbc8dd5269b09a688 | cdac3669cc14385ed43f19621ba72a055a52c76c | refs/heads/main | 2023-04-09T00:06:27.062577 | 2021-04-28T05:28:24 | 2021-04-28T05:28:24 | 340,555,681 | 1 | 1 | null | 2021-02-20T03:50:47 | 2021-02-20T03:50:46 | null | UTF-8 | Python | false | false | 225 | py | def main():
a = input().lower()
b = input().lower()
if a == b:
return 0
x = sorted([a, b])
if x[0] == a:
return -1
else:
return 1
if __name__ == '__main__':
print(main())
| [
"[email protected]"
] | |
7c4ffa6c899f19c47416e06fb5253a69e6282d82 | 3d5da3b78d3e18027efb00a07a4a1641865acaf3 | /dataset/hpa_single_dataset.py | e59b485fdaed577d46053a5cbe738a340ff647f4 | [
"Apache-2.0"
] | permissive | marshuang80/cell-segmentation | d79326a3bba77417b98469a609618cb932e705a7 | aef5196b6fd906bff3866156e205d4f787710bde | refs/heads/master | 2022-02-20T15:29:02.716024 | 2019-08-27T22:00:11 | 2019-08-27T22:00:11 | 194,341,336 | 33 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | from torch.utils.data import Dataset
from scipy import ndimage
from .augmentation import augmentation
import skimage
import imageio
import numpy as np
import h5py
import os
import random
class HPASingleDataset(Dataset):
def __init__(self, data_path, phase='train', transform=False, percent_hole=0.4):
"""Custom PyTorch Dataset for hpa dataset
Parameters
----------
data_path: str
path to the nuclei dataset hdf5 file
phase: str, optional
phase this dataset is used for (train, val. test)
"""
self.data_path = data_path
self.phase = phase
self.transform = transform
self.percent_hole = percent_hole
self.target_dim = 1
with h5py.File(self.data_path,"r") as h:
self.data_names = list(h.keys())
self.dim = 1 # decision to only use one channel (rgb are the same, a is all 1s)
def __len__(self):
return len(self.data_names)
def __getitem__(self, idx):
with h5py.File(self.data_path,"r") as h:
data = h[self.data_names[idx]][:]
data = data / 255.
size = len(list(data.reshape(-1)))
rand_holes = np.ones(size)
idx = np.random.choice(size, int(size*self.percent_hole), replace=False)
rand_holes[idx] = 0
rand_holes = rand_holes.reshape(data.shape)
y = data.copy()
x = data.copy()
x = x * rand_holes
x = np.expand_dims(x, 0)
if self.transform:
x,y = augmentation(x,y)
return x, y
| [
"[email protected]"
] | |
76cb72de738a416e8e036c041c95efadb27e14ec | fb8d595d44f5e7ed67942e286db835e1e7f931a5 | /frechetmean/manifolds/hyperboloid.py | a90a0447fb1a4034c94bc1561ed209ca343ad2ba | [
"MIT"
] | permissive | CUAI/Differentiable-Frechet-Mean | 8b112ed02ffdb590697ef1ec66fbf7b121adcf58 | c73f9e8d0969a727043d3d2a07af6392fdc802af | refs/heads/master | 2023-08-31T08:27:04.075283 | 2021-09-28T01:55:55 | 2021-09-28T01:55:55 | 274,028,706 | 38 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,212 | py | import torch
from frechetmean.utils import (EPS, arcosh, arsinh, artanh, cosh, divsinh,
sinh, sinhdiv, tanh)
from .manifold import Manifold
class Lorentz(Manifold):
def __init__(self, K=-1.0):
super(Lorentz, self).__init__()
assert K < 0
if torch.is_tensor(K):
self.K = K
else:
self.K = torch.tensor(K)
@staticmethod
def _ldot(u, v, keepdim=False, dim=-1):
m = u * v
if keepdim:
ret = torch.sum(m, dim=dim, keepdim=True) - 2 * m[..., 0:1]
else:
ret = torch.sum(m, dim=dim, keepdim=False) - 2 * m[..., 0]
return ret
def ldot(self, u, v, keepdim=False, dim=-1):
return Lorentz._ldot(u, v, keepdim, dim)
def sh_to_dim(self, sh):
if hasattr(sh, '__iter__'):
return sh[-1] - 1
else:
return sh - 1
def dim_to_sh(self, dim):
if hasattr(dim, '__iter__'):
return dim[-1] + 1
else:
return dim + 1
def zero(self, *shape):
x = torch.zeros(*shape)
x[..., 0] = 1 / (-self.K).sqrt().detach()
return x
def zero_tan(self, *shape):
return torch.zeros(*shape)
def zero_like(self, x):
y = torch.zeros_like(x)
y[..., 0] = 1 / (-self.K).sqrt().detach()
return y
def zero_tan_like(self, x):
return torch.zeros_like(x)
def inner(self, x, u, v, keepdim=False):
return self.ldot(u, v, keepdim=keepdim)
def proju(self, x, u):
return u - self.K * self.ldot(x, u, keepdim=True).expand_as(u) * x.expand_as(u)
@staticmethod
def _proju(x, u, K):
return u - K * Lorentz._ldot(x, u, keepdim=True).expand_as(u) * x.expand_as(u)
def projx(self, x):
x = x.clone()
x.data[..., 0] = (1 / (-self.K) + x[..., 1:].pow(2).sum(dim=-1)).sqrt()
return x
def egrad2rgrad(self, x, u):
scaling = torch.zeros_like(x)
scaling[..., :1] = torch.ones_like(scaling[..., :1])
u = u - 2 * x[..., :1] * scaling
u = self.proju(x, u)
return u
def exp(self, x, u):
un = self.ldot(u, u, keepdim=True)
un = un.clamp(min=EPS[x.dtype]).sqrt() * (-self.K).sqrt()
return x * cosh(un) + sinhdiv(un) * u
def log(self, x, y):
xy = self.K * self.ldot(x, y, keepdim=True)
num = arcosh(xy)
u = divsinh(num) * (y - xy * x)
return self.proju(x, u)
def dist(self, x, y, squared=False, keepdim=False):
d = self.K * self.ldot(x, y)
d.data.clamp(min=1)
dist = arcosh(d) / (-self.K).sqrt()
dist.data.clamp(min=EPS[x.dtype])
return dist.pow(2) if squared else dist
def transp(self, x, y, u):
xy = self.ldot(x, y, keepdim=True).expand_as(u)
uy = self.ldot(u, y, keepdim=True).expand_as(u)
return u - (self.K * uy) / (1 + self.K * xy) * (x + y).expand_as(u)
def __str__(self):
return 'Hyperboloid'
def squeeze_tangent(self, x):
return x[..., 1:]
def unsqueeze_tangent(self, x):
return torch.cat((torch.zeros_like(x[..., 0]).unsqueeze(-1), x), dim=-1)
| [
"[email protected]"
] | |
f82ba9bc2bc1e77f9d51cf89cba9753ccb178e25 | d0d95c538c4092b4f74f49c71a9306c79859cc97 | /contents/models.py | 5bfa22d9f0f56713a8f9b99805bd89e57e260eb9 | [] | no_license | sjoh0704/Simple-Instagram | 4e7c453e653cfbecb480b4bd283afb3f57bdedec | 45604111120076191ef1c71589d9d4c36c05916c | refs/heads/master | 2023-04-21T04:39:49.955102 | 2021-05-18T01:35:56 | 2021-05-18T01:35:56 | 356,603,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | import os
import uuid
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class BaseModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Content(BaseModel):
user = models.ForeignKey(User, on_delete=models.CASCADE)
text = models.TextField(default="")
# def __str__(self) -> str: # amdin에서 display_list를 설정해주었기 때문에 없어도 된다.
# return self.text
# 이미지를 업로드해주는 함수
def image_upload_to(instance, filename):
ext = filename.split(".")[-1]
return os.path.join(instance.UPLOAD_PATH, "{}.{}".format(uuid.uuid4(), ext))
class Image(BaseModel):
UPLOAD_PATH = 'user-upload'
content = models.ForeignKey(Content, on_delete=models.CASCADE)
image = models.ImageField(upload_to = image_upload_to)
order = models.SmallIntegerField() #게시물 마다 있는 사진들의 순서를 결정하기 위해 있는 데이터
class Meta:
ordering = ['order']
def __str__(self) -> str:
return "image" + str(self.order)
| [
"[email protected]"
] | |
13ce8ea93a9f040e69314c5c86987140c95f96c6 | 49fe5157ac5cece1fbd537514ded6b8c0f7a03cb | /training/v0.1.1/danish_augmenter.py | 6cc52be5e2ab18e3ca8ee4bdd5c6c86e9e436128 | [
"Apache-2.0"
] | permissive | scottire/DaCy | 5303f8c47f4c84a8e319c982c2495d03983b7555 | b5a1a34422efd00ccf9e678b1bb9a8e1a150f3c3 | refs/heads/main | 2023-06-28T15:31:32.127470 | 2021-08-03T11:32:41 | 2021-08-03T11:32:41 | 392,293,740 | 0 | 0 | Apache-2.0 | 2021-08-03T11:29:26 | 2021-08-03T11:29:25 | null | UTF-8 | Python | false | false | 1,991 | py | """python -m spacy train config.cfg --code functions.py
"""
import sys
from typing import Callable, Iterable, Iterator
import spacy
sys.path.append("../..")
from dacy.augmenters import (
create_keyboard_augmenter,
create_pers_augmenter,
create_char_swap_augmenter,
create_æøå_augmenter,
)
from dacy.datasets.names import load_names
from spacy.language import Language
from spacy.training import Example
from spacy.training.augment import create_lower_casing_augmenter
def combine_augmenters(
augmenters: Iterable[Callable[[Language, Example], Iterator[Example]]]
) -> Callable[[Language, Example], Iterator[Example]]:
"""Combines a series og spaCy style augmenters.
Args:
augmenters (Iterable[Callable[[Language, Example], Iterator[Example]]]): An list of spaCy augmenters.
Returns:
Callable[[Language, Example], Iterator[Example]]: The combined augmenter
"""
def apply_multiple_augmenters(nlp: Language, example: Example):
examples = [example]
for aug in augmenters:
examples = (e for example in examples for e in aug(nlp, example))
for e in examples:
yield example
return apply_multiple_augmenters
@spacy.registry.augmenters("dacy_augmenter.v1")
def dacy_augmenters():
augmenters = [
create_keyboard_augmenter(doc_level=1, char_level=0.02, distance=1.5),
create_pers_augmenter(
ent_dict=load_names(),
patterns=[
"fn,ln",
"abbpunct,ln",
"fn,ln,ln",
"fn,ln,ln",
"abb,ln",
"ln,abbpunct",
],
force_pattern_size=True,
keep_name=False,
prob=0.1,
),
create_char_swap_augmenter(doc_level=1, char_level=0.02),
create_æøå_augmenter(doc_level=1, char_level=0.1),
create_lower_casing_augmenter(level=0.1),
]
return combine_augmenters(augmenters)
| [
"[email protected]"
] | |
1681c4bfa1b633a467c98beb07c08d18c0ac0c02 | 3ca2fc0098d3f6a876eba9f70d3c891b553b150e | /API/migrations/0005_auto_20200807_1508.py | c753ec0075667d630e165285919fa132d24c2b9a | [] | no_license | shivam-bit/drf-csv-processor | 6c49bfafff0b2b18b77c51329af79f516cfb6eb1 | 4adbc4a7af7304cb2ace0c268685712847104f30 | refs/heads/master | 2022-12-01T13:59:18.726838 | 2020-08-07T19:15:31 | 2020-08-07T19:15:31 | 285,902,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # Generated by Django 3.0.8 on 2020-08-07 15:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('API', '0004_auto_20200807_1501'),
]
operations = [
migrations.AlterField(
model_name='csv_product',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=12),
),
]
| [
"[email protected]"
] | |
6b29f937e438a7dbf2fd0bb68f186740125ae654 | 50027b2559f8e9c728f48bb172128149cc54071d | /src/main.py | 62b5a1750a70cc679c7b659c4b8bc6f57df160f0 | [] | no_license | kristhyanmatos/logica-fuzzy | 396c47cb5769cd25c94b386e4494031fd01b3db2 | 04e442405e6e05ec500a9ad6993f1857d9924cf0 | refs/heads/main | 2023-06-09T16:01:34.167020 | 2021-07-06T12:39:28 | 2021-07-06T12:39:28 | 382,421,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | from db import DEVS
from utils import busca_chave
from fuzzy import calula_aptidao
input_complexidade = int(input("insira a complexidade: "))
input_linguagens = input("insira as linguagens: ")
input_linguagens = input_linguagens.split(",")
aptidao_linguagens = []
for linguagem in input_linguagens:
aptidao_linguagens.append(
{
"nome": linguagem,
"devs_aptidao": [],
}
)
for dev in DEVS:
for linguagen in dev["linguagens"]:
if linguagen["nome"] in (input_linguagens):
for index, aptidao_linguagem in enumerate(aptidao_linguagens):
if aptidao_linguagem["nome"] == linguagen["nome"]:
aptidao_linguagens[index]["devs_aptidao"].append(
{
"nome": dev["nome"],
"aptidao": calula_aptidao(
dev["tempo_carreira"],
input_complexidade,
linguagen["projetos_realizados"],
),
}
)
for linguagem in aptidao_linguagens:
linguagem["devs_aptidao"].sort(key=busca_chave)
print(aptidao_linguagens)
| [
"[email protected]"
] | |
c1dc45806274b3fd28e8afd136f642fdbdefd492 | 0ed9a8eef1d12587d596ec53842540063b58a7ec | /cloudrail/knowledge/utils/policy_utils.py | c6e67eb897180b23d1600163ecbe65f63c194702 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | cbc506/cloudrail-knowledge | 8611faa10a3bf195f277b81622e2590dbcc60da4 | 7b5c9030575f512b9c230eed1a93f568d8663708 | refs/heads/main | 2023-08-02T08:36:22.051695 | 2021-09-13T15:23:33 | 2021-09-13T15:24:26 | 390,127,361 | 0 | 0 | MIT | 2021-07-27T21:08:06 | 2021-07-27T21:08:06 | null | UTF-8 | Python | false | false | 3,180 | py | from typing import List, Set
from cloudrail.knowledge.context.connection import PolicyEvaluation
from cloudrail.knowledge.context.aws.resources.iam.policy import Policy
from cloudrail.knowledge.context.aws.resources.iam.policy_statement import StatementCondition, StatementEffect
from cloudrail.knowledge.context.aws.resources.iam.principal import PrincipalType
from cloudrail.knowledge.utils.action_utils import is_action_fully_defined
from cloudrail.knowledge.utils.arn_utils import are_arns_intersected
def build_condition(statement_dict: dict) -> List[StatementCondition]:
condition_dict: dict = statement_dict.get('Condition')
if condition_dict:
condition_list: List[StatementCondition] = []
for operator, key_values in condition_dict.items():
for cond_key, cond_value in key_values.items():
values_list = [cond_value] if isinstance(cond_value, str) else cond_value
condition_list.append(StatementCondition(operator, cond_key, values_list))
return condition_list
else:
return []
def is_any_resource_based_action_allowed(policy_evaluation: PolicyEvaluation) -> bool:
resource_allowed_actions_copy = policy_evaluation.resource_allowed_actions.copy()
resource_denied_actions_copy = policy_evaluation.resource_denied_actions.copy()
remove_allowed_from_denied(resource_allowed_actions_copy, resource_denied_actions_copy)
return len(resource_allowed_actions_copy) > 0
def remove_allowed_from_denied(allowed_actions: Set[str], denied_actions: Set[str]):
allows_to_remove = set()
denieds_to_remove = set()
for allow_action_a in allowed_actions:
for allow_action_b in allowed_actions:
if allow_action_a != allow_action_b and is_action_fully_defined(allow_action_a, allow_action_b):
allows_to_remove.add(allow_action_a)
for denied_action_a in denied_actions:
for denied_action_b in denied_actions:
if denied_action_a != denied_action_b and is_action_fully_defined(denied_action_a, denied_action_b):
denieds_to_remove.add(denied_action_a)
for allow_action in allowed_actions:
for deny_action in denied_actions:
if is_action_fully_defined(allow_action, deny_action):
allows_to_remove.add(allow_action)
for allow_to_remove in allows_to_remove:
allowed_actions.remove(allow_to_remove)
for denied_to_remove in denieds_to_remove:
denied_actions.remove(denied_to_remove)
def is_policy_block_public_access(policy: Policy, aws_resource_arn: str = '*') -> bool:
for statement in policy.statements:
if len(statement.condition_block) == 0:
if statement.principal.principal_type == PrincipalType.PUBLIC or \
(statement.principal.principal_type == PrincipalType.AWS and
any(value == "*" for value in statement.principal.principal_values)) and \
any(are_arns_intersected(res, aws_resource_arn) for res in statement.resources):
return statement.effect == StatementEffect.DENY
else:
return True
return False
| [
"[email protected]"
] | |
7f38ef6ec889c598cc6765fe7b60205fec67bcfb | 43196f3ddb6492867bfcf00c9169c340aad4e7b6 | /examples/Dict2D_examples.py | 6fcd4cd72cf7e0b53cd4730dd2b960362a152b6a | [] | no_license | pycogent/old-cogent | eb24cf9b86e2df17ab32d05586ba0e034ab0dc86 | b49442bd793a743188a43809903dc140512420b7 | refs/heads/master | 2016-09-05T18:06:15.100337 | 2012-12-20T20:04:09 | 2012-12-20T20:04:09 | 7,252,631 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,443 | py | #!/usr/bin/env python
# Dict2D_examples.py
# Owner: Greg Caporaso [email protected]
from old_cogent.base.dict2d import Dict2D, largest
from old_cogent.util.demo import QuickDemo
from old_cogent.base.stats import Freqs
demo = QuickDemo(
[
"Demonstration of the Dict2D class",
"Written by Greg Caporaso [email protected] 5/6/04",
"""
A Dict2D is meant to be a common interface for any 2D mapping types
where the top level is a dict.
\t
This example is meant to illustrate a lot of its functionality
""",
["Several objects can be used to create a Dict2D",None],
["No data creates an empty dict", "d = Dict2D()"],
[None, "print d"],
["A dict of dicts", "d = Dict2D(data={'a':{'a':1,'b':2},'b':{'a':3,'b':4}})"],
[None, "print d"],
["Indices data", "d = Dict2D(data=[('a','a',1),('a','b',2),('b','a',3),('b','b',4)])"],
[None, "print d"],
["List data, note you must also define a RowOrder and ColOrder",\
"d = Dict2D(data=[[1,2,3],[4,5,6]], RowOrder=['a','b'], ColOrder=['d','e','f'])"],
[None, "print d"],
["RowOrder and ColOrder are lists of the 'interesting' row keys and column keys",
"d = Dict2D(data={'a':{'c':5}}, RowOrder=['a','b'], ColOrder=['c','d'])"],
[None, "print d"],
["""If RowOrder and ColOrder are both passed in, along with setting Pad=True,
the Dict2D will be filled with the default value to conatin all rows and
cols.""",
"d = Dict2D(data={'a':{'c':5}}, RowOrder=['a','b'], ColOrder=['c','d'], Pad=True)"],
[None, "print d"],
["""You can change the default value for Pad with self.Default """,
"d = Dict2D(data={'a':{'c':5}}, RowOrder=['a','b'], ColOrder=['c','d'], Pad=True, Default=42)"],
[None, "print d"],
["""The only remaining constructor argument is RowConstructor, this allows
you to optionally pass a different constructor (default==dict()) for the inner
layer of the Dict2D. For example you could make all inner objects into
cogent.base.stats.Freqs objects""",
"d = Dict2D(data={'a':{'a':1,'b':2}}, RowConstructor=Freqs)"],
[None, "print d"],
["Note the data type of d['a']", "print type(d['a'])"],
["""The pad() method can be called after initialization to achieve the same
effect as passing Pad=True to the constructor. pad() optionally a default
parameter, which by default is self.Default""",
"d = Dict2D(data={'a':{'b':1}}, RowOrder=['a','b'], ColOrder=['a','b'])"],
[None, "print d"],
[None, "d.pad(default=42)"],
[None, "print d"],
["""If pad() is called and RowOrder and ColOrder haven't been set, it
pads all rows to contain data for all existing column keys""",
"d = Dict2D(data={'a':{'c':1}, 'b':{'d':3}}, Default=42)"],
[None, "print d"],
[None, "d.pad()"],
[None, "print d"],
["""The purge() method is used to get rid of unwanted elements. Calling
purge will remove any elements whose row or column key is not in RowOrder
or ColOrder respectively""",
"d = Dict2D(data={'a':{'a':0,'b':1,'c':2}, 'b':{'b':5, 'c':9}, 'c':{'b':5}}, RowOrder=['a','b'], ColOrder=['a','b'])"],
[None, "print d"],
[None, "d.purge()"],
[None, "print d"],
["""The fill() method is similar to pad(), it takes parameters to list rows
and cols, and fills them with val. By default, RowOrder and ColOrder are not
touched, but can be overwritten by passing set_orders=True). All values
optional except val""",
"d.fill(val=42, rows=['c'],cols=['y','z'],set_orders=True)"],
[None, "print d"],
[None, "print d.RowOrder"],
[None, "print d.ColOrder"],
["""The square() method will fill a Dict2D with necessary values to make it
square""", "d = Dict2D({'a':{'a':1, 'b':2}, 'b':{'a':0}})"],
[None, "print d"],
[None, "d.square()"],
[None, "print d"],
["""Optionally square() can fill in columns that exist in one row but not in
others by passinging reset_order=True""",
"d = Dict2D({'a':{'a':1, 'b':2}, 'b':{'c':0}})"],
[None, "print d"],
[None, "d.square(default=42, reset_order=True)"],
[None, "print d"],
["""There are several built-in operations for working on matrices:""",None],
["""Setting up a Dict2D for examples""",
"d = Dict2D({'a':{'a':1,'b':2,'c':3}, 'b':{'a':4, 'b':5, 'c':6}}, RowOrder=list('ab'), ColOrder=('abc'))"],
[None, 'print d'],
["""setDiag() allows you to set the diagonal to a certain value""",
"d.setDiag(val=99)"],
[None,"print d"],
["""scale() allows for the application of a function to all elements in the
Dict2D""", """def add_one(x):
\treturn x + 1
d.scale(f=add_one)"""],
[None,"print d"],
["""transpose() swaps all self[r][c] -> self[c][r]""", "d.transpose()"],
[None,"print d"],
["""reflect() reflects items across the diagonal based on a function that is
passed in. Several are defined and available to use, or you may define your
own. In this example, largest is used, which sets self[r][c] and self[c][r]
to the larger value of the two. Reflect only works if
RowOrder and ColOrder exist and are equal. Items that don't lie within the
scope of RowOrder and ColOrder will be ignored.""", "d.RowOrder=list('ab')"],
[None, "d.reflect(method=largest)"],
[None, "print d"],
["""All methods operate on the Dict2D object. If you wish to create a new
object to work on leaving your original untouched you should use the copy()
method which makes a deep copy of the Dict2D""",
"d = Dict2D(data={'a':{'a':1, 'b':2}, 'b':{'a':5, 'b':6}})"],
[None, "c = d.copy()"],
[None, "c.setDiag(42)"],
[None, "print c"],
["...and the original remains untouched", "print d"],
["""A Dict2D can be converted to a list of lists based on specified RowOrder
and ColOrder. The list will be padded as called for in self.Pad, and will
raise an error on missing values if self.Pad=False. Headers describing the
row keys and col keys will be included if specified, but will not be by
default""",
"d = Dict2D(data={'a':{'a':1, 'b':2}, 'b':{'a':5, 'b':6}}, RowOrder=list('ab'), ColOrder=('ba'))"],
[None, "print d.toLists(headers=True)"],
["""There are several ways to learn about the keys present in the Dict2D
object""",
"d = Dict2D(data={'a':{'a':1, 'b':2}, 'b':{'a':5, 'c':6}})"],
["Get the row keys", "print d.rowKeys()"],
["Get all existing column keys", "print d.colKeys()"],
["Get column keys that are shared between all rows", "print d.sharedColKeys()"],
["""Iterators exist for Rows, Cols, and Items. If Dict2D is sparse, an error will
be raised if self.Pad == False, otherwise self.Default will be returned for
missing elements.""",
"d = Dict2D(data={'a':{'a':1, 'b':2}, 'b':{'a':5, 'c':6}}, Pad=True)"],
[None, """for r in d.Rows:
\tprint r
"""],
[None,"""for c in d.Cols:
\tprint c
"""],
[None,"""for i in d.Items:
\tprint i
"""],
["""There are several ways to select for Rows, Cols, and Items based on how
they evaluate in an arbitrary boolean function. Setting up some functions and
objects to analyze for example.""",
"""def is_zero(x):
\treturn x == 0.
"""],
[None,"""def sum_to_zero(l):
\tsum = 0
\tfor i in l:
\t\tsum += i
\treturn sum == 0.
"""],
[None,"""d = Dict2D({'a':{'a':1,'b':0,'c':-1},'b':{'a':0, 'b':3, 'c':42},'c':{'a':-1, 'b':9, 'c':-41}})"""],
["""Return indices of rows whose elements sum to zero""",
"print d.getRowIndices(f=sum_to_zero)"],
["""Return rows whose elements sum to zero""",
"print d.getRowsIf(f=sum_to_zero)"],
["""Return indices of columns whose elements sum to zero""",
"print d.getColIndices(f=sum_to_zero)"],
["""Return columns whose elements sum to zero""",
"print d.getColsIf(f=sum_to_zero)"],
["""Return indices of items which are zero.""",
"print d.getItemIndices(f=is_zero)"],
["""Return items which are zero (silly example, but you get the idea)""",
"print d.getItemsIf(f=is_zero)"],
["""You can also select specific rows, cols, or items, and create a NEW OBJECT
with that data, for example""",
"c = d.getRows(rows=['a','b'])"],
[None,"print c"],
["Note original remains the same", "print d"],
["... same thing exists for cols", "c = d.getCols(cols=['b','c'])"],
[None,"print c"],
["... and for items", "c = d.getItems(items=[('a','b'),('c','a')])"],
[None,"print c"],
["""Additionally all of these selections methods have a negate parameter, which if
passed in as True, returns data corresponding to a result of False when tested
against f, for example:""",
"print d.getRowIndices(f=sum_to_zero)"],
[None,"print d.getRowIndices(f=sum_to_zero, negate=True)"],
["""Dict2D objects also define a method to write them out in delimited form,
which is useful for storing data in a file which can later be read back into
a Dict2D object. (You can additionally define Orders and turn headers off)""",
"print d.toDelimited()"]
])
| [
"[email protected]"
] | |
ba50c9c691ae34220377be54c213cb36dcf65adb | e1cf2d7eb9a475d078d78a398e5007630f0da1a7 | /server.py | 9df1ff32cf6f7bbb585112e599366f5a3d12331e | [] | no_license | saml/crosswodrs | beae5017420f4b365670fdd8c0eb57c6081c7fd1 | 51e3dcea9c2b28d2a0477cb05704cb5f345ee1f1 | refs/heads/master | 2020-04-19T04:04:48.614945 | 2014-01-10T18:52:42 | 2014-01-10T18:52:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | if __name__ == '__main__':
from crosswords import app
app.run(host='0.0.0.0',port=5001,debug=False)
| [
"[email protected]"
] | |
d5c1b022a9c773143cb7addb3519ae552efc46cf | 6c3e747e8e0d97789f54ad5292dd81fd0757a1b2 | /manage.py | 0705edb05cab36d859f486ab028c07e2df9e61a0 | [] | no_license | tmclean/flask-by-example | 1f21648e41ec30b9ff48a8b2e5f66bf2162d02a0 | 0c0503bd7918b0495023b8df94c52ffc0fa70235 | refs/heads/master | 2023-02-09T15:28:55.586485 | 2021-01-10T02:44:51 | 2021-01-10T02:44:51 | 328,291,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app, db
app.config.from_object( os.environ['APP_SETTINGS'] )
migrate = Migrate( app, db )
manager = Manager( app )
manager.add_command( 'db', MigrateCommand )
if __name__ == '__main__':
manager.run()
| [
"[email protected]"
] | |
d1d43e025b167a92f9bd20f67cea4bea728a7028 | 116f28795a717f9ead7b0758eebccfe072c6894e | /Model/FutureSale.py | 45b045bdcb92dbc38f7d84f94048d3d24e25537f | [] | no_license | KJKimCE/ESPP-Python | 5941bf1a6cfb730866dc2e7b0db84eedaea0b309 | 199136080005f555f4ff575cc7c2820421bbf555 | refs/heads/master | 2023-01-24T10:06:39.653347 | 2020-12-11T04:51:48 | 2020-12-11T04:51:48 | 308,115,136 | 2 | 0 | null | 2020-11-23T07:45:07 | 2020-10-28T19:04:34 | Python | UTF-8 | Python | false | false | 2,774 | py | from Model.Regime import DisqualifyingShort, DisqualifyingLong, Qualifying
from Model.Benefit import Benefit
from Tools import Utils
import heapq
class FutureSale:
regime = None
date = None
expectedReturn = None
predictedPrice = None
benefits = None
regimes = None
decision = None
def __init__(self, model, taxBracket, currentSales, futureDate):
self.benefits = []
self.regimes = []
self.date = futureDate
self.expectedReturn = abs(self.date - model.today).days / 365.0 * .08
self.predictedPrice = model.currentPrice * (1 + self.expectedReturn)
benefitIndex = 0
for regime in currentSales:
if regime.dateFrom <= self.date < regime.dateTo:
if regime.regimeType == 'Disqualifying Short':
self.regime = DisqualifyingShort(model, taxBracket, self.predictedPrice)
break
elif regime.regimeType == 'Disqualifying Long':
self.regime = DisqualifyingLong(model, taxBracket, self.predictedPrice)
break
elif regime.regimeType == 'Qualifying':
self.regime = Qualifying(model, taxBracket, self.predictedPrice)
break
benefitIndex += 1
heapq.heappush(self.regimes, (-1 * self.regime.finalProceeds, self.regime.regimeType))
benefitIndex -= 1
while benefitIndex >= 0:
benefit = Benefit(model, taxBracket, currentSales[benefitIndex], self)
self.benefits.append(benefit)
heapq.heappush(self.regimes, (-1 * benefit.finalProceeds, benefit.regimeType))
# self.regimes.append((benefit.difference, benefit.regimeType))
benefitIndex -= 1
self.decision = heapq.heappop(self.regimes)[1]
def print(self):
print(f"Future - {self.regime.regimeType}")
print(f"Date: {self.date}")
print(f"Expected Return: {'{:,.2f}'.format(self.expectedReturn * 100) + '%'}")
print(f"Predicted Price: {Utils.formatCurrency(self.predictedPrice)}")
print(f"Proceeds: {Utils.formatCurrency(self.regime.saleProceeds)}")
print(f"Profit: {Utils.formatCurrency(self.regime.saleProfit)}")
print(f"Income: {Utils.formatCurrency(self.regime.income)}")
print(f"Short Term: {Utils.formatCurrency(self.regime.shortTerm)}")
print(f"Long Term: {Utils.formatCurrency(self.regime.longTerm)}")
print(f"Tax: {Utils.formatCurrency(self.regime.tax)}")
print(f"Final Proceeds: {Utils.formatCurrency(self.regime.finalProceeds)}")
print()
for benefit in self.benefits:
benefit.print()
print(f"DECISION: {self.decision}")
print()
| [
"[email protected]"
] | |
aa866e7ee13cae23c3eba611617856ed3614b598 | 96c05a71c6f50ddbc1eb8b8ad0e83e8169dd3060 | /simple_webapp.py | 36d2e5fd0aeafb24f617fdb8ebe51957beef6ac3 | [] | no_license | apheyhys/FlaskBerry | 563c5d61e23dee713ef2de07e27c79dc7e1841d7 | ceda5510e40d9236d71e7f0e3a72ffd2859898ba | refs/heads/master | 2021-08-15T05:40:02.598312 | 2017-11-17T11:48:16 | 2017-11-17T11:48:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | from flask import Flask, session
from checker import check_logged_in
app = Flask(__name__)
@app.route('/')
def hello() -> str:
return 'Hello from the simple webapp.'
@app.route('/page1')
@check_logged_in
def page1() -> str:
return 'This is page1.'
@app.route('/page2')
@check_logged_in
def page2() -> str:
return 'This is page2.'
@app.route('/page3')
@check_logged_in
def page3() -> str:
return 'This is page3.'
@app.route('/login')
def do_login() -> str:
session['logged_in'] = True
return 'You are now logged in.'
@app.route('/logout')
def do_logout() -> str:
session.pop('logged_in')
return 'You are now logged out.'
app.secret_key = 'YouWillNeverGuessMySecretKey'
if __name__ == '__main__':
app.run(debug=False, port=5001)
| [
"[email protected]"
] | |
b25f971ab860af42f04ab1c38fdedeeab5a7d550 | 5e6781dd3ea77c718a983c34fd8abc4af2b1090e | /bin/generate.py | 0347af207f4c06469bef760f4c45bbfc61f2e5a6 | [] | no_license | ajmirsky/appy | 3ae41791aff3b6ec8b85fbdceb7b6369f5e4ebb8 | 24b2bde14f05d96e13765e3c5c7f3c62db6429c7 | refs/heads/master | 2016-09-05T12:30:10.031331 | 2011-09-02T07:59:49 | 2011-09-02T07:59:49 | 2,315,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,359 | py | '''This script allows to generate a product from a Appy application.'''
# ------------------------------------------------------------------------------
import sys, os.path
from optparse import OptionParser
from appy.gen.generator import GeneratorError
from appy.shared.utils import LinesCounter
# ------------------------------------------------------------------------------
ERROR_CODE = 1
VALID_PRODUCT_TYPES = ('plone25', 'odt')
APP_NOT_FOUND = 'Application not found at %s.'
WRONG_NG_OF_ARGS = 'Wrong number of arguments.'
WRONG_OUTPUT_FOLDER = 'Output folder not found. Please create it first.'
PRODUCT_TYPE_ERROR = 'Wrong product type. Product type may be one of the ' \
'following: %s' % str(VALID_PRODUCT_TYPES)
C_OPTION = 'Removes from i18n files all labels that are not automatically ' \
'generated from your gen-application. It can be useful during ' \
'development, when you do lots of name changes (classes, ' \
'attributes, states, transitions, etc): in this case, the Appy ' \
'i18n label generation machinery produces lots of labels that ' \
'then become obsolete.'
S_OPTION = 'Sorts all i18n labels. If you use this option, among the ' \
'generated i18n files, you will find first all labels ' \
'that are automatically generated by appy.gen, in some logical ' \
'order (ie: field-related labels appear together, in the order ' \
'they are declared in the gen-class). Then, if you have added ' \
'labels manually, they will appear afterwards. Sorting labels ' \
'may not be desired under development. Indeed, when no sorting ' \
'occurs, every time you add or modify a field, class, state, etc, ' \
'newly generated labels will all appear together at the end of ' \
'the file; so it will be easy to translate them all. When sorting ' \
'occurs, those elements may be spread at different places in the ' \
'i18n file. When the development is finished, it may be a good ' \
'idea to sort the labels to get a clean and logically ordered ' \
'set of translation files.'
class GeneratorScript:
'''usage: %prog [options] app productType outputFolder
"app" is the path to your Appy application, which may be a
Python module (= a file than ends with .py) or a Python
package (= a folder containing a file named __init__.py).
Your app may reside anywhere (but it needs to be
accessible by the underlying application server, ie Zope),
excepted within the generated product. Typically, if you
generate a Plone product, it may reside within
<yourZopeInstance>/lib/python, but not within the
generated product (typically stored in
<yourZopeInstance>/Products).
"productType" is the kind of product you want to generate
(currently, only "plone25" and 'odt' are supported;
in the near future, the "plone25" target will also produce
Plone 3-compliant code that will still work with
Plone 2.5).
"outputFolder" is the folder where the product will be generated.
For example, if you specify /my/output/folder for your
application /home/gde/MyApp.py, this script will create
a folder /my/output/folder/MyApp and put in it the
generated product.
Example: generating a Plone product
-----------------------------------
In your Zope instance named myZopeInstance, create a folder
"myZopeInstance/lib/python/MyApp". Create into it your Appy application
(we suppose here that it is a Python package, containing a __init__.py
file and other files). Then, chdir into this folder and type
"python <appyPath>/gen/generator.py . plone25 ../../../Products" and the
product will be generated in myZopeInstance/Products/MyApp.
"python" must refer to a Python interpreter that knows package appy.'''
def generateProduct(self, options, application, productType, outputFolder):
exec 'from appy.gen.%s.generator import Generator' % productType
Generator(application, outputFolder, options).run()
def manageArgs(self, parser, options, args):
# Check number of args
if len(args) != 3:
print WRONG_NG_OF_ARGS
parser.print_help()
sys.exit(ERROR_CODE)
# Check productType
if args[1] not in VALID_PRODUCT_TYPES:
print PRODUCT_TYPE_ERROR
sys.exit(ERROR_CODE)
# Check existence of application
if not os.path.exists(args[0]):
print APP_NOT_FOUND % args[0]
sys.exit(ERROR_CODE)
# Check existence of outputFolder basic type
if not os.path.exists(args[2]):
print WRONG_OUTPUT_FOLDER
sys.exit(ERROR_CODE)
# Convert all paths in absolute paths
for i in (0,2):
args[i] = os.path.abspath(args[i])
def run(self):
optParser = OptionParser(usage=GeneratorScript.__doc__)
optParser.add_option("-c", "--i18n-clean", action='store_true',
dest='i18nClean', default=False, help=C_OPTION)
optParser.add_option("-s", "--i18n-sort", action='store_true',
dest='i18nSort', default=False, help=S_OPTION)
(options, args) = optParser.parse_args()
try:
self.manageArgs(optParser, options, args)
print 'Generating %s product in %s...' % (args[1], args[2])
self.generateProduct(options, *args)
# Give the user some statistics about its code
LinesCounter(args[0]).run()
except GeneratorError, ge:
sys.stderr.write(str(ge))
sys.stderr.write('\n')
optParser.print_help()
sys.exit(ERROR_CODE)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
GeneratorScript().run()
# ------------------------------------------------------------------------------
| [
"[email protected]"
] | |
801fa55840a488244b220d5d91cce882341e68b7 | a6fe8aeaa30a22b65d98a2bb360b6d761a2e17fc | /venv/lib/python2.7/site-packages/kubernetes/client/models/v1_network_policy_list.py | 710fbf0a942cfbfaac48d8a60676112b85e12b70 | [
"MIT"
] | permissive | 784134748/kubernetes-install | 54a2a8e83e2f47f2064270649725899282b7b244 | 5df59632c2619632e422948b667fb68eab9ff5be | refs/heads/master | 2022-12-15T13:52:43.486633 | 2019-03-27T13:01:06 | 2019-03-27T13:01:06 | 176,937,818 | 0 | 0 | MIT | 2022-05-25T01:56:18 | 2019-03-21T12:13:41 | Python | UTF-8 | Python | false | false | 6,560 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1NetworkPolicyList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1NetworkPolicy]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1NetworkPolicyList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1NetworkPolicyList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1NetworkPolicyList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1NetworkPolicyList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1NetworkPolicyList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1NetworkPolicyList.
Items is a list of schema objects.
:return: The items of this V1NetworkPolicyList.
:rtype: list[V1NetworkPolicy]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1NetworkPolicyList.
Items is a list of schema objects.
:param items: The items of this V1NetworkPolicyList.
:type: list[V1NetworkPolicy]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1NetworkPolicyList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1NetworkPolicyList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1NetworkPolicyList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1NetworkPolicyList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1NetworkPolicyList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:return: The metadata of this V1NetworkPolicyList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1NetworkPolicyList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1NetworkPolicyList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1NetworkPolicyList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
b371c6adb1b7f2e01b0565c36cd021eda72e8d90 | f0922c2bf70af17fcade0f857c9bf834bd294a9f | /model/model/settings.py | cfcb2d107cea5f9a5812971c42428261b23c1d9f | [] | no_license | Vaibhavraj-nath-chauhan/Django-Basic | f994d1180b789a3728ed4b018408fef8d26c352c | fa74e25aba00546cb19a72cf306a371510b85497 | refs/heads/main | 2023-02-04T04:21:37.261943 | 2020-12-23T05:31:26 | 2020-12-23T05:31:26 | 323,810,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,081 | py | """
Django settings for model project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*l^8(0uuvbjfb89872i6-ka7n1pd(p0q^mah6v&9+5z0p4_p71'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"music",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'model.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'model.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
c4408cc0bd8a882f7da921f174d7c3c1ee877558 | 3e0d6a208afc1d2ee186c553c7c97f261eeee36d | /keras-ensemble.py | b392f966a672a995bda502e60ac910118d801ae0 | [] | no_license | andrewszwec/realestate-v004 | 0cdfc99ee57f4dfeef13d911b447f624bdfc8179 | d5ad8c129b298d93f7bfdd9516909a61591ac01e | refs/heads/master | 2021-06-20T16:17:13.977967 | 2017-06-24T09:48:58 | 2017-06-24T09:48:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | model1 = sequential()
model1.compile()
model1.fit()
model2 = sequential()
model2.compile()
model2.fit()
model3 = sequential()
model3.compile()
model3.fit()
prob1 = model1.predict_prob()
prob2 = model2.predict_prob()
prob3 = model3.predict_prob()
final_prob = (prob1 + prob2 + prob3)/3 | [
"[email protected]"
] | |
4b665756cfecba34a72ea4c1c191b5f2a5cf9bef | f4f657b7167b63b38a5d63af1d6495fc3d791f2d | /server.py | 36ea711fb3e0a9b230f0ff63dbdd19d10c65ffea | [] | no_license | harshitsinghai77/platypus-socketio-server | 821d8cd7e1ea072a91aea0e15acb2099f863246a | 02205556ee70acc65d2f8e80a9d2daa3f222a3c7 | refs/heads/master | 2023-08-01T02:01:14.026951 | 2021-09-21T16:04:30 | 2021-09-21T16:04:30 | 408,876,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | import logging
import socketio
import uvicorn
from uvicorn.loops.uvloop import uvloop_setup
from socketio.exceptions import ConnectionRefusedError
# Set some basic logging
logging.basicConfig(
level=2,
format="%(asctime)-15s %(levelname)-8s %(message)s"
)
sio = socketio.AsyncServer(async_mode='asgi', cors_allowed_origins='*' )
app = socketio.ASGIApp(sio)
@sio.event
async def connect(sid, environ, auth):
logging.info(f"connect {sid}")
# raise ConnectionRefusedError('authentication failed')
@sio.event
async def disconnect(sid):
logging.info(f'disconnect {sid}')
async def send_message_to_client():
await sio.emit('my message to harshit', {
'data': 'Hello world from server'
})
async def background_task_1():
while True:
await sio.emit('my message to harshit', {
'data': 'Hello world from server'
})
await sio.sleep(2)
@sio.event
async def send_changes(sid, data):
# print("message ", data)
await sio.emit('recieve_changes', data)
@sio.on("message_from_client")
async def message_from_client(sid, data):
print(data)
await sio.sleep(2)
sio.start_background_task(background_task_1)
@sio.event
async def another_event(sid, data):
print('sid: ', sid)
print('data: ', data)
return "Something just like this"
if __name__ == '__main__':
uvicorn.run(app, host='127.0.0.1', port=5000)
| [
"[email protected]"
] | |
abad02fd73eba9b1bae8a6b5e1278161f56a046a | 056a41657db9a6cc00475e2b1339edd834067a70 | /glitter/assets/migrations/0001_initial.py | 74e08286444cbeecd7bb8e28500639e1ead5014e | [
"BSD-3-Clause"
] | permissive | developersociety/django-glitter | 83f17160a19f632572b4364653df3f3816876fe5 | b9b0a3d8b49d5d9b840656f84564ba0a6e016f98 | refs/heads/master | 2023-04-14T16:58:14.765895 | 2020-01-07T14:46:17 | 2020-01-07T14:46:17 | 42,167,612 | 3 | 1 | BSD-3-Clause | 2020-01-07T16:26:56 | 2015-09-09T08:59:19 | Python | UTF-8 | Python | false | false | 3,199 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('title', models.CharField(db_index=True, max_length=100)),
('file', models.FileField(upload_to='assets/file')),
('file_size', models.PositiveIntegerField(default=0, editable=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
options={
'abstract': False,
'ordering': ('-created_at', '-modified_at', 'title'),
},
),
migrations.CreateModel(
name='FileCategory',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('title', models.CharField(unique=True, max_length=100)),
],
options={
'verbose_name_plural': 'file categories',
'abstract': False,
'ordering': ('title',),
},
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('title', models.CharField(db_index=True, max_length=100)),
('file', models.ImageField(width_field='image_width', upload_to='assets/image', verbose_name='Image', height_field='image_height')),
('image_height', models.PositiveIntegerField(editable=False)),
('image_width', models.PositiveIntegerField(editable=False)),
('file_size', models.PositiveIntegerField(default=0, editable=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
options={
'abstract': False,
'ordering': ('-created_at', '-modified_at', 'title'),
},
),
migrations.CreateModel(
name='ImageCategory',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('title', models.CharField(unique=True, max_length=100)),
],
options={
'verbose_name_plural': 'image categories',
'abstract': False,
'ordering': ('title',),
},
),
migrations.AddField(
model_name='image',
name='category',
field=models.ForeignKey(to='glitter_assets.ImageCategory'),
),
migrations.AddField(
model_name='file',
name='category',
field=models.ForeignKey(to='glitter_assets.FileCategory'),
),
]
| [
"[email protected]"
] | |
ec1c5ba1eb51f7891f4b274f9671d5a2804dc5a7 | bf97e65650967281a3181a107994a9e02e269f6b | /main.py | b61dbe172cc1fa4853f207f84c63c3425c138456 | [] | no_license | ta21cos/luna-lang | 7e8566f73a2b842170c5a2ad65480b00cfa114cd | 975483d7fe9fb961da0ccffa77c84ad9545f4699 | refs/heads/master | 2020-04-11T07:17:31.091780 | 2018-12-13T08:26:35 | 2018-12-13T08:26:35 | 161,606,310 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,175 | py | import sys
from converter import Inst, convert_bf_to_instruction, convert_luna_to_brainfxck
class Pointer:
def __init__(self):
self.__p = 0
def inc(self):
self.__p += 1
def dec(self):
self.__p -= 1
# 負になるかも
def set(self, v):
self.__p = v
def __call__(self):
return self.__p
class Interpreter:
def __init__(self):
self.memory = [0 for _ in range(100)]
def inc(self, p):
self.memory[p()] += 1
def dec(self, p):
self.memory[p()] -= 1
def out(self, p):
return self.memory[p()]
def inp(self, p, v):
self.memory[p()] = v
def jmpifz(self, p, insts, a):
if self.memory[p()] == 0:
_i = self.next_ret_index(insts, a)
a.set(_i)
def retifnz(self, p, insts, a):
if self.memory[p()] != 0:
_i = self.prev_jmp_index(insts, a)
a.set(_i)
def next_ret_index(self, insts, a):
count = 0 # for nest
for i in range(a()+1, len(insts)):
if insts[i] == Inst.JMPIFZ:
count += 1
elif insts[i] == Inst.RETIFNZ:
if count == 0:
return i
else:
count -= 1
print('could not jump')
return len(insts)
def prev_jmp_index(self, insts, a):
count = 0
for i in range(0, a()-1)[::-1]:
if insts[i] == Inst.RETIFNZ:
count += 1
if insts[i] == Inst.JMPIFZ:
if count == 0:
return i
else:
count -= 1
print('could not return')
return len(insts)
execute_dict = {
Inst.INCP: lambda intp, p, insts, a: p.inc(),
Inst.DECP: lambda intp, p, insts, a: p.dec(),
Inst.INCV: lambda intp, p, insts, a: intp.inc(p),
Inst.DECV: lambda intp, p, insts, a: intp.dec(p),
Inst.OUTV: lambda intp, p, insts, a: print(chr(intp.out(p)), end=''),
Inst.INPV: lambda intp, p, insts, a: intp.inp(p, ord(input())),
Inst.JMPIFZ: lambda intp, p, insts, a: intp.jmpifz(p, insts, a),
Inst.RETIFNZ: lambda intp, p, insts, a: intp.retifnz(p, insts, a)
}
def execute(instructions):
addr = Pointer() # 現在実行している命令を示すポインタ,ほぼ同じデータ構造なので流用
p = Pointer() # 現在参照しているメモリを表すデータポインタ
interpreter = Interpreter()
while addr() < len(instructions):
try:
execution = execute_dict[instructions[addr()]]
execution(interpreter, p, instructions, addr)
addr.inc()
except:
print("Could not execute", sys.exc_info())
break
print()
return
def main(filename):
with open(filename) as f:
input_luna = f.read()
instructions = convert_luna_to_brainfxck(input_luna)
execute(instructions)
# python interpreter.py luna.txtのように実行
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Filename is not found.')
exit()
filename = sys.argv[1]
main(filename)
| [
"[email protected]"
] | |
bb7eaa310edf715bfe4281cd05da97a576af80ef | 323f6a9078653e63c6bd9f83177c6e2689b40e0e | /scripts/merge_logs.py | edd05a2c1a44bc50a9e2e6c409463d6da0b0bc10 | [] | no_license | kux/dev-env | 99a43bcb0c91950b258b234516aa61c84206ac8c | 9009fe08763161f3a3ef139c46233e6af60f9db8 | refs/heads/master | 2021-01-25T03:40:31.232930 | 2016-09-15T01:11:20 | 2016-09-15T01:11:20 | 22,931,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,210 | py | import copy
import datetime
import os
import re
import sys
import time
APACHE_TIMESTAMP_REGEX = re.compile(r'\[(\d\d)/(\w\w\w)/(\d\d\d\d):(\d\d):(\d\d):(\d\d) -\d*\]')
MONTHS = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul': 7, 'Aug': 8, 'Sep': 9, 'Nov': 10, 'Oct': 11, 'Dec': 12}
def _get_timestamp_from_log_line(line):
match = re.search(APACHE_TIMESTAMP_REGEX, line)
if match:
day, month, year, hour, mn, sec = match.groups()
return datetime.datetime(int(year), MONTHS[month], int(day), int(hour), int(mn), int(sec))
else:
raise ValueError("Line %s doesn't contian a timestamp in expected format" % line)
def _skip_empty_logs(current_lines, fds):
indexes_to_remove = set()
for index, line in enumerate(current_lines[:]):
if line == '':
indexes_to_remove.add(index)
current_lines[:] = [ln for (i, ln) in enumerate(current_lines)
if i not in indexes_to_remove]
fds[:] = [fd for (i, fd) in enumerate(fds) if i not in indexes_to_remove]
def _nway_merge(logs, merged_log, burst_regex, burst_delta=5):
fds = [open(log) for log in logs]
fds_copy = copy.copy(fds)
merged_fd = open(merged_log, 'w')
burst_log_fd = open(merged_log + '.burst', 'w')
prev_burst = 0
prev_burst_line = ''
prev_is_written = False
try:
current_lines = [fd.readline() for fd in fds]
_skip_empty_logs(current_lines, fds)
timestamps = [_get_timestamp_from_log_line(ln) for ln in current_lines]
while fds:
assert len(timestamps) == len(current_lines) == len(fds)
min_timestamp = min(timestamps)
min_index = timestamps.index(min_timestamp)
line_to_write = current_lines[min_index]
merged_fd.write(line_to_write)
matches_burst_regex = re.search(burst_regex, line_to_write) is not None
if matches_burst_regex:
burst_time = time.mktime(min_timestamp.timetuple())
if burst_time < prev_burst + burst_delta:
if not prev_is_written:
burst_log_fd.write('\n--- new burst ---\n')
burst_log_fd.write(prev_burst_line)
burst_log_fd.write(line_to_write)
prev_is_written = True
else:
prev_is_written = False
prev_burst = burst_time
prev_burst_line = line_to_write
new_line = fds[min_index].readline()
if new_line != '':
current_lines[min_index] = new_line
timestamps[min_index] = _get_timestamp_from_log_line(new_line)
else:
del current_lines[min_index]
del fds[min_index]
del timestamps[min_index]
finally:
for fd in fds_copy:
fd.close()
merged_fd.close()
burst_log_fd.close()
def merge_logs(logs_dir, burst_regex, burst_delta):
log_files = os.listdir(logs_dir)
logs_by_day = {}
log_regex = r'(?:(\w*)_)?(access|error)\.log(?:\.(\d+))?\.(\w[a-zA-Z0-9\-]+)'
for log_file in log_files:
match = re.search(log_regex, log_file)
if match:
vhost, log_type, day, host = match.groups()
if host == 'burst':
continue
logs_by_day.setdefault((vhost, log_type, day), []).append(os.path.join(logs_dir, log_file))
for details, logs in logs_by_day.iteritems():
vhost, log_type, day = details
aggregated_log = '%s.log' % log_type
if vhost:
aggregated_log = '%s_%s' % (vhost, aggregated_log)
if day:
aggregated_log = '%s.%s' % (aggregated_log, day)
aggregated_log = os.path.join(logs_dir, aggregated_log)
_nway_merge(logs, aggregated_log, burst_regex, burst_delta)
if __name__ == '__main__':
if len(sys.argv) != 4:
print 'Usage: python merge_logs.py logs_dir burst_regex burst_delta'
sys.exit(1)
_, logs_dir, burst_regex, burst_delta = sys.argv
burst_regex = re.compile(burst_regex)
merge_logs(logs_dir, burst_regex, int(burst_delta))
| [
"[email protected]"
] | |
986e0eec3c38aa11a451675f610d62ea2f425201 | b651947d36ae5b190f63ea9e7dc05cccd5ba7e83 | /bot/parsing.py | 18cb82cd8876ef36643fffe17f25d930e2b0354c | [
"MIT"
] | permissive | andrewmaximoff/Schedule-Bot | 733a910904dd2ddcba2d74fc87c6af6c5f295e28 | 59dcb3fcf04ef547a44da369021e825c5f75921a | refs/heads/master | 2022-12-10T15:04:13.063306 | 2018-04-20T09:53:30 | 2018-04-20T09:59:16 | 130,342,287 | 1 | 0 | MIT | 2022-12-08T00:57:14 | 2018-04-20T09:41:25 | Python | UTF-8 | Python | false | false | 1,678 | py | import re
import requests
import pendulum
from bs4 import BeautifulSoup
def _exams(group_name):
""""""
r_json = requests.get(
'https://api.rozklad.org.ua/v2/groups/{}'.format(group_name)).json()
group_url = r_json['data']['group_url'].rsplit('g=', 1)
html_text = requests.get(
'http://rozklad.kpi.ua/Schedules/ViewSessionSchedule.aspx?g={}'.format(group_url[1])).text
soup = BeautifulSoup(html_text, 'lxml')
exams_list = soup.findAll('tr')
number_exams = len(exams_list)
exams = []
result_l = []
for i in range(number_exams):
soup_2 = BeautifulSoup(str(exams_list[i]), 'lxml')
if not soup_2.findAll('a'):
continue
exam = []
date_list = str(soup_2.find('td').text).split('/')
time_exam = re.findall(r'\d\d:\d\d', str(
soup_2.findAll('td')[1]))[0].split(':')
date_exam = pendulum.create(year=int(date_list[2]), month=int(date_list[0]), day=int(date_list[1]),
hour=int(time_exam[0]),
minute=int(time_exam[1]), tz='Europe/Kiev')
date = date_exam.format(
'dddd D MMMM YYYY о HH:mm', formatter='alternative', locale='uk').capitalize()
for j in soup_2.findAll('a'):
exam.append(str(j.text))
exam.append(date)
exams.append(exam)
for i in exams:
result_l.append(
'*{}* `{}`\n{}\n{}'.format(i[0], i[2], i[1], i[3]))
result = '\n'.join(result_l)
if result:
return result
else:
return 'Дата экзаменов ещё неизвестна, спи спокойно :)'
| [
"[email protected]"
] | |
017f8e8a008c7776d18c53f850cc4000f42f9981 | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/080_Send_more_money/smm.py | f810119d76ad911c83aa4df522952226bb7b069d | [] | no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | # send+more=money puzzle
# 2021-03-10 PV
# Brute force is acceptable here, only ~10! evals of the calc formula, acceptable
# 9 5 6 7 + 1 0 8 5 = 1 0 6 5 2
for s in range(0, 10):
for e in range(0, 10):
if e != s:
for n in range(0, 10):
if n not in (s, e):
for d in range(0, 10):
if d not in (s, e, n):
for m in range(1, 10):
if m not in (s, e, n, d):
for o in range(0, 10):
if o not in (s, e, n, d, m):
for r in range(0, 10):
if r not in (s, e, n, d, m, o):
for y in range(0, 10):
if y not in (s, e, n, d, m, o, r):
if s*1000+e*100+n*10+d+m*1000+o*100+r*10+e == m*10000+o*1000+n*100+e*10+y:
print(s, e, n, d, ' + ', m, o, r, e, ' = ', m, o, n, e, y)
| [
"[email protected]"
] | |
5877e3f3a02c8ba15ac1068debe5982a735d288d | 881369ae601208e19608067f75527d71174ee9c5 | /CH7P9.py | 259cee70f68b47d50c7c626ab4b7f50c81dc4417 | [] | no_license | ozmaws/Chapter-7 | dc3c958876b4d49e3d2b1805debda1975ebaac2e | 05358bed4cc010cafcaa40e59f6f1d882b67ffe0 | refs/heads/master | 2022-04-25T07:46:23.073206 | 2020-04-27T18:59:37 | 2020-04-27T18:59:37 | 259,424,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | from images import Image
def lighten(image, value):
for y in range(image.getHeight()):
for x in range(image.getWidth()):
(r, g, b) = image.getPixel(x, y)
r = r + value
if r > 255:
r = 255
g = g + value
if g > 255:
g = 255
b = b + value
if b > 255:
b = 255
image.setPixel(x, y, (r, g, b))
def darken(image, value):
for y in range(image.getHeight()):
for x in range(image.getWidth()):
(r, g, b) = image.getPixel(x, y)
r = r - value
if r < 0:
r = 0
g = g - value
if g < 0:
g = 0
b = b - value
if b < 0:
b = 0
image.setPixel(x, y, (r, g, b))
def colorFilter(image, cFilter):
for y in range(image.getHeight()):
for x in range(image.getWidth()):
(r, g, b) = image.getPixel(x, y)
(adjustedRed, adjustedGreen, adjustedBlue) = cFilter
r = r + adjustedRed
if r > 255:
r = 255
elif r < 0:
r = 0
g = g + adjustedGreen
if g > 255:
g = 255
elif g < 0:
g = 0
b = b + adjustedBlue
if b > 255:
b = 255
elif b < 0:
b = 0
image.setPixel(x, y, (r, g, b))
def main(filename = "example.gif"):
image = Image(filename)
print("Close to continue.")
image.draw()
colorFilter(image, (0, -100, 0))
print("Close window to quit.")
image.draw()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
91e061654c76195b0f182c4aca38252914464bcb | 6421489894e389d3d7d32fa276dabafa50772208 | /app.py | 0a30cbb4a977b74fbe7436e7cb943907c53a4f57 | [] | no_license | Jardix/Jareds_Survey1 | ab77448d3a3275a9712cc3719dc3e9b9311a0425 | af5b073b86da0eeb9ebe9ffb0ad51f896f840191 | refs/heads/main | 2023-07-10T03:45:27.405309 | 2021-08-12T20:19:33 | 2021-08-12T20:19:33 | 395,434,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,281 | py | from flask import Flask, request, render_template, redirect, flash, session
from flask_debugtoolbar import DebugToolbarExtension
from surveys import satisfaction_survey as survey
# key names will use to store some things in the session;
# put here as constants so we're guaranteed to be consistent in
# our spelling of these
RESPONSES_KEY = "responses"
app = Flask(__name__)
app.config['SECRET_KEY'] = "never-tell!"
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
debug = DebugToolbarExtension(app)
@app.route("/")
def show_survey_start():
"""Select a survey."""
return render_template("survey_start.html", survey=survey)
@app.route("/begin", methods=["POST"])
def start_survey():
""" Clear the session of responses."""
session[RESPONSES_KEY] = []
return redirect("/questions/0")
@app.route("/answer", methods=["POST"])
def handle_question():
"""Save responses and redirect to next question."""
#get the response choice
choice = request.form['answer']
# add this response to the question
responses = session[RESPONSES_KEY]
responses.append(choice)
session[RESPONSES_KEY] = responses
if (len(responses) == len(survey.questions)):
# User has answered all questions
return redirect("/complete")
else:
return redirect(f"/questions/{len(responses)}")
@app.route("/questions/<int:qid>")
def show_questions(qid):
"""Display current question"""
responses = session.get(RESPONSES_KEY)
if (responses is None):
# Trying to access question page too soon
return redirect("/")
if (len(responses) == len(survey.questions)):
#They've answered all the questions, thank them.
return redirect("/complete")
if (len(responses) != qid):
# Trying to access questions out of order.
flash(f"Invalid Question Id: {qid}.")
return redirect(f"/questions/{len(responses)}")
question = survey.questions[qid]
return render_template("question.html", question_num=qid, question=question)
@app.route("/complete")
def complete():
"""Survey complete. Show completion page. Award cookie."""
return render_template("completion.html")
| [
"[email protected]"
] | |
c8a135a13369285e7b7c9872324986de82fc5ce7 | 493b19d44a6f7f5e664d59267f4041d7e6370492 | /coir/test_single_image.py | ad567dec1aae9540d0c5c8128306be555cc3f49a | [
"MIT"
] | permissive | OmarHamdoun/Yolo-on-vedai | 41eb2514ed6c189b61a5be64ce9dd58317d7e3bb | 0c055870f29537abed0d5b92f733a7a5f90c8245 | refs/heads/master | 2020-05-20T03:28:52.958217 | 2019-02-10T22:06:23 | 2019-02-10T22:06:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,930 | py | # coding: utf-8
from __future__ import division, print_function
import tensorflow as tf
import numpy as np
import argparse
import cv2
import time
from modelcoir import yolov3
import sys
sys.path[:0] = ['../']
from utils.misc_utils import parse_anchors, read_class_names
from utils.nms_utils import gpu_nms
from utils.plot_utils import get_color_table, plot_one_box
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
parser = argparse.ArgumentParser(description="YOLO-V3 test single image test procedure.")
parser.add_argument("--input_image_ir", type=str, default = '../data/ir512/00001270_ir.png', help="The path of the ir input image.")
parser.add_argument("--input_image_co", type=str, default = '../data/visible512/00001270_co.png', help="The path of the co input image.")
parser.add_argument("--anchor_path", type=str, default="../data/yolo_anchors.txt",
help="The path of the anchor txt file.")
parser.add_argument("--new_size", nargs='*', type=int, default=[512, 512],
help="Resize the input image with `new_size`, size format: [width, height]")
parser.add_argument("--class_name_path", type=str, default="../data/vedai.names",
help="The path of the class names.")
parser.add_argument("--checkpoint_dir", type=str, default="./checkpoint/",
help="The path of the weights to restore.")
args = parser.parse_args()
args.anchors = parse_anchors(args.anchor_path)
args.classes = read_class_names(args.class_name_path)
args.num_class = len(args.classes)
color_table = get_color_table(args.num_class)
img_co = cv2.imread(args.input_image_co)
img_ori = cv2.imread(args.input_image_ir)
height_ori, width_ori = img_ori.shape[:2]
img_ir = cv2.resize(img_ori, tuple(args.new_size))
img_co = cv2.resize(img_co, tuple(args.new_size))
img_ir = cv2.cvtColor(img_ir, cv2.COLOR_BGR2RGB)
img_co = cv2.cvtColor(img_co, cv2.COLOR_BGR2RGB)
img_ir = np.asarray(img_ir, np.float32)
img_co = np.asarray(img_co, np.float32)
img_ir = img_ir[np.newaxis, :] / 255.
img_co = img_co[np.newaxis, :] / 255.
restore_path = tf.train.latest_checkpoint(args.checkpoint_dir)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
input_data_ir = tf.placeholder(tf.float32, [1, args.new_size[1], args.new_size[0], 3], name='input_data_ir')
input_data_co = tf.placeholder(tf.float32, [1, args.new_size[1], args.new_size[0], 3], name='input_data_co')
yolo_model = yolov3(args.num_class, args.anchors)
with tf.variable_scope('yolov3'):
pred_feature_maps = yolo_model.forward(input_data_ir, input_data_co, False)
pred_boxes, pred_confs, pred_probs = yolo_model.predict(pred_feature_maps)
pred_scores = pred_confs * pred_probs
boxes, scores, labels = gpu_nms(pred_boxes, pred_scores, args.num_class, max_boxes=30, score_thresh=0.4, iou_thresh=0.5)
saver = tf.train.Saver()
saver.restore(sess, restore_path)
start = time.time()
boxes_, scores_, labels_ = sess.run([boxes, scores, labels], feed_dict={input_data_ir: img_ir,
input_data_co: img_co})
print(time.time()-start)
# rescale the coordinates to the original image
boxes_[:, 0] *= (width_ori/float(args.new_size[0]))
boxes_[:, 2] *= (width_ori/float(args.new_size[0]))
boxes_[:, 1] *= (height_ori/float(args.new_size[1]))
boxes_[:, 3] *= (height_ori/float(args.new_size[1]))
print("box coords:")
print(boxes_)
print('*' * 30)
print("scores:")
print(scores_)
print('*' * 30)
print("labels:")
print(labels_)
for i in range(len(boxes_)):
x0, y0, x1, y1 = boxes_[i]
plot_one_box(img_ori, [x0, y0, x1, y1], label=args.classes[labels_[i]], color=color_table[labels_[i]])
cv2.imshow('Detection result', img_ori)
cv2.imwrite('detection_result.jpg', img_ori)
cv2.waitKey(10000)
| [
"[email protected]"
] | |
2ffe9bd07b23131856aee4d0358d667d3db33aee | 154b2ee78e764f800de199dda70d4dfde9e1b7e7 | /checkfile.py | 50e592d53b5a5f6fb190a9617cccd50752b3d39b | [] | no_license | AkashPatel1/PUBG_kaggle | bb904b293d9ca9256ecd91238a79762a40ec7663 | 0e8ea89c6ec7b3b0cf7d1bdc5f606d305c947293 | refs/heads/master | 2020-04-07T05:37:05.288138 | 2018-11-18T16:25:02 | 2018-11-18T16:25:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py |
import numpy as np
import pandas as pd
import os
# Any results you write to the current directory are saved as output.
dataset = pd.read_csv("train_V2.csv")
dataset.drop(['matchType'],axis=1,inplace=True)
pd.DataFrame(dataset).fillna(0, inplace=True)
X_train = dataset.iloc[:1000, 3:27].values
y_train = dataset.iloc[:1000, 27].values
dataset1 = pd.read_csv("test_V2.csv")
dataset1.drop(['matchType'],axis=1,inplace=True)
pd.DataFrame(dataset1).fillna(0, inplace=True)
ID = dataset1.iloc[:, 0:1].values
X_test = dataset1.iloc[:, 3:].values
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators=20, random_state=0)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
ID = ID.reshape(-1)
fmt='%s, %.8f'
combined = np.vstack((ID, y_pred)).T
np.savetxt("submission_V2.csv", combined, header="Id,winPlacePerc", delimiter=",",fmt=fmt,comments='')
| [
"[email protected]"
] | |
e074c3726081150a53af76e76343370280b5ed44 | bbb9a0a9e68c6da6aa72ac9ceecdcbf97a392788 | /job4/db2.py | e4061196f7b75d0a9c7482ef9870621c4778ce56 | [] | no_license | sskp9999/testing | 9b4adf242c13b91c99a52c95a220b96260fe41ea | eb0415024fa442950d005617a06d4ba8188f3114 | refs/heads/master | 2023-08-25T20:18:19.137177 | 2021-10-19T07:03:25 | 2021-10-19T07:03:25 | 417,916,413 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,911 | py | import requests
import json
instance_id = '1871036088967188.8.gcp.databricks.com'
api_version = '/api/2.0'
job_name = 'Sample2_schedule'
project_id = "zebra-cicd-327507"
secret_id = "brick-sec-1"
version_id = 1
def access_secret_version(project_id, secret_id, version_id):
"""
Access the payload for the given secret version if one exists. The version
can be a version number as a string (e.g. "5") or an alias (e.g. "latest").
"""
# Import the Secret Manager client library.
from google.cloud import secretmanager
import json
# Create the Secret Manager client.
client = secretmanager.SecretManagerServiceClient()
# Build the resource name of the secret version.
name = f"projects/{project_id}/secrets/{secret_id}/versions/{version_id}"
# Access the secret version.
response = client.access_secret_version(request={"name": name})
# Print the secret payload.
#
# WARNING: Do not print the secret in a production environment - this
# snippet is showing how to access the secret material.
pd = response.payload.data.decode("UTF-8")
return json.loads(pd)
def create_job():
api_command = '/jobs/create'
url = f"https://{instance_id}{api_version}{api_command}"
acc_token=access_secret_version(project_id, secret_id, version_id)
response = requests.post(url, auth=(acc_token["login"], acc_token['password']), data = open('./create_job.json', 'rb'))
print(json.dumps(json.loads(response.text), indent = 2))
def reset_job():
api_command = '/jobs/reset'
url = f"https://{instance_id}{api_version}{api_command}"
acc_token=access_secret_version(project_id, secret_id, version_id)
response = requests.post(url, auth=(acc_token["login"], acc_token['password']), data = open('./create_job.json', 'rb'))
print(json.dumps(json.loads(response.text), indent = 2))
def list_job():
api_command = '/jobs/list'
url = f"https://{instance_id}{api_version}{api_command}"
acc_token=access_secret_version(project_id, secret_id, version_id)
response = requests.get(url, auth=(acc_token["login"], acc_token['password']))
# response = requests.get(url = url, headers={'Authorization': 'token apia2c2b104501763ff04b111938b261297'})
#a=json.loads(response.text)
#print(type(a))
a=json.dumps(json.loads(response.text), indent = 2)
print(a)
if a == '{}':
return False
elif a != "{}":
b=json.loads(a)
f=[]
for i in b['jobs']:
f.append(i['settings']['name'])
print("these jobs are exist", f)
if job_name in f:
job=True
else:
job=False
return job
else:
return False
def main():
status=list_job()
if status == True:
# reset_job()
pass
else:
create_job()
def get_job():
api_command = '/jobs/get'
url = f"https://{instance_id}{api_version}{api_command}"
acc_token=access_secret_version(project_id, secret_id, version_id)
params = {"job_id":'Sample3_schedule'}
response = requests.get(url, auth=(acc_token["login"], acc_token['password']), params = params)
a=json.dumps(json.loads(response.text), indent = 2)
print(a)
def list_job_test():
api_command = '/jobs/list'
url = f"https://{instance_id}{api_version}{api_command}"
acc_token=access_secret_version(project_id, secret_id, version_id)
response = requests.get(url, auth=(acc_token["login"], acc_token['password']))
a=json.dumps(json.loads(response.text), indent = 2)
# print(a)
if a == '{}':
return False
elif a != "{}":
b=json.loads(a)
f=[]
for i in b['jobs']:
if job_name == i["settings"]["name"]:
print (i['job_id'], i["settings"]["name"] )
break
# f.append(i['settings']['name'])
# print("these jobs are exist", f)
list_job_test()
#create_job()
#list_job()
#main()
#reset_job()
#get_job()
| [
"[email protected]"
] | |
2e3f1a46a36aad0dae7847e0e866a41c456e917b | 7179913b31c30bb3d7cc9f932cf8b068bd7f88a0 | /BFS.py | 9375527d195266e6fe98dabc4d5aac7c8b899c31 | [] | no_license | RinLaNir/homework_discrete_mathematics | 29c4d79d978266541f17427955a42f3a59d9d952 | 1543ba2d66c25ff1e55f35c28342f0f0f4a27528 | refs/heads/master | 2020-05-04T18:42:21.351256 | 2019-04-04T13:29:06 | 2019-04-04T13:29:06 | 179,363,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,531 | py | import random
import numpy as np
import collections
while True:
n = int(input("Задайте розмір суміжної матриці [5-100]: "))
if 5<=n<=100:
break
#генерація матриці суміжності
matrix = np.random.randint(0,2,size=(n,n))
#без використання пакету NumPy:
#matrix = [[random.randint(0,1) for j in range(n)] for i in range(n)]
print('\n')
#робить матрицю симетричною
for i in range(0, n):
for j in range(0, i+1):
if i == j:
matrix[i][j] = 0
else:
matrix[i][j] = matrix[j][i]
print('Матриця суміжності: \n{}\n'.format(matrix))
#без використання пакету NumPy:
#for i in range(len(matrix)):
# print(matrix[i])
#створення словника графу. Вершина - ключ, а значення - вершини, з якими вона з'єднана
graph ={}
for i in range(n):
ls = []
for j in range(n):
if matrix[i][j] == 1:
ls.append(j)
graph[i] = set(ls)
def bfs(graph, start, end):
visited, queue = set(), collections.deque([(start, [start])])
visited.add(start)
while queue:
(vertex, path) = queue.popleft()
for neighbour in graph[vertex] - set(path):
if neighbour not in visited:
if neighbour == end:
yield path + [neighbour]
else:
visited.add(neighbour)
queue.append((neighbour, path + [neighbour]))
#Знаючи, що найкоротший шлях буде повернуто першим з методу генератора шляху BFS,
#ми можемо створити метод, який просто повертає найкоротший знайдений шлях або "None", якщо немає шляху.
def shortest_path(graph, start, end):
try:
return next(bfs(graph, start, end))
except StopIteration:
return None
distance = np.random.randint(0,1,size=(n,n))
#без використання пакету NumPy:
#distance = [[0 for j in range(n)] for i in range(n)]
for i in range(0, n):
for j in range(0, i+1):
if i == j:
distance[i][j] = 10**9
else:
if shortest_path(graph, i, j) == None:
distance[i][j] = 10**9
distance[j][i] = distance[i][j]
else:
ls = len(list(shortest_path(graph, i, j)))
if ls == 0:
distance[i][j] = 10**9
distance[j][i] = distance[i][j]
else:
distance[i][j] = ls - 1
distance[j][i] = distance[i][j]
#перевірити найкоротші шляхи та єх довжину для двох точок. Потрібно для зручності перевірки з великими матрицями
print('Матриця відстані: \n{}\n'.format(distance))
print('Перевірити для двох точок')
while True:
i = int(input('Початкова точка [0 - {}]: '.format(n)))
j = int(input('Початкова точка [0 - {}]: '.format(n)))
print('\n')
print('Найкоротший шлях: {}\nДовжина шляху: {}\n'.format(list(shortest_path(graph, i, j)),len(list(shortest_path(graph, i, j)))-1))
| [
"[email protected]"
] | |
063b15567e855884541fac9dd792242013f313a0 | 72306656e0a0e7daa59468ce6cde0024db7962ff | /201312/201312-3.py | 5de291a8013fd85ecc7fa38ebcd0327cf6581be2 | [] | no_license | HUSTERGS/CSP | ea584254abb60298a6e6492b80cf23a9ab0a4850 | 1328eaffb54b03f9a97b951366b1160342aae7bf | refs/heads/master | 2022-12-10T07:54:29.531296 | 2020-09-15T14:01:51 | 2020-09-15T14:01:51 | 201,618,751 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | # score 100
"""
每次添加一个矩形,那么最大值必定为以下两者之一
1. 之前的最大值
2. 添加的最右边的矩形之后从该矩形开始向左延伸的最大矩形
其中2的计算中需要计算最小值,可以不用调用min函数,从而减少时间复杂度
"""
from sys import stdin
n = int(stdin.readline())
nums = tuple(int(x) for x in stdin.readline().split())
queue = []
max_rect = nums[0]
def find_max():
length = len(queue)
index = length - 1
max_value = queue[-1]
min_one = queue[-1]
while index >= 0:
if queue[index] < min_one:
min_one = queue[index]
temp_value = min_one * (length - index)
if temp_value > max_value:
max_value = temp_value
index -= 1
return max_value
i = 0
while i < len(nums):
next_num = nums[i]
queue.insert(len(queue), next_num)
temp = find_max()
if temp > max_rect:
max_rect = temp
i += 1
print(max_rect) | [
"[email protected]"
] | |
58ecea86d5b4170dd2c1fbfa9bef55b7c2dedc0f | 9023909d2776e708755f98d5485c4cffb3a56000 | /oneflow/python/nn/modules/acos.py | 64bd494444673f26b34ecc02a3e7fc7db0dd081b | [
"Apache-2.0"
] | permissive | sailfish009/oneflow | f6cf95afe67e284d9f79f1a941e7251dfc58b0f7 | 4780aae50ab389472bd0b76c4333e7e0a1a56ef7 | refs/heads/master | 2023-06-24T02:06:40.957297 | 2021-07-26T09:35:29 | 2021-07-26T09:35:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.python.oneflow_export import oneflow_export
from oneflow.python.nn.module import Module
from oneflow.python.framework.tensor import register_tensor_op
class Acos(Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return flow.F.acos(x)
@oneflow_export("acos")
@register_tensor_op("acos")
def acos_op(tensor):
r"""
Returns a new tensor with the inverse cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \arccos(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> arr = np.array([0.5, 0.6, 0.7])
>>> input = flow.Tensor(arr, dtype=flow.float32)
>>> output = flow.acos(input)
>>> output
tensor([1.0472, 0.9273, 0.7954], dtype=oneflow.float32)
"""
return Acos()(tensor)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"[email protected]"
] | |
fca4b392599e38d2b6e36e42a928c8a37a5028d4 | 6bcee1f9e345985b461353f99e0ccf93b753a807 | /venv/bin/wheel | b0a15c76ae7bc0e975f84acba3d640a01f8b0f6c | [] | no_license | pipesjr/volumetracker | dcd98eb5c794a2ff0a5e8c21b436f3f89b26af37 | ab68b644466e06d801ebc9736290ecca5ecc0ca8 | refs/heads/master | 2022-09-27T08:13:57.397497 | 2019-11-19T13:40:28 | 2019-11-19T13:40:28 | 222,704,944 | 0 | 0 | null | 2022-09-16T18:13:14 | 2019-11-19T13:39:42 | Python | UTF-8 | Python | false | false | 249 | #!/Users/peterpeluso/Desktop/flask_rp/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
5a122b62eb01f2b1116a7650429bf6fec1b490e1 | af904de02a5949b57648b0393f7827856b2947b9 | /QLFactChecker/code/classification/answers/combine_best_feature_groups.py | 77c325268ec91a450ae5891327668f087b6d54c7 | [] | no_license | EtiChaudhary/FCPLus | f69731fe48bae8cfb1ac7886e697f5a9056e1ea0 | f76a020b1dfe8e86fb4a9e2e36654eca2a398dad | refs/heads/master | 2022-11-12T16:38:20.773773 | 2020-07-02T17:17:08 | 2020-07-02T17:17:08 | 276,702,102 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,088 | py | import RunCV
import csv, operator
################################
# Combine the best features groups.
#
################################
# This is the index of the column of the result we are comparing in the results file.
ACCURACY_INDEX = 5
MAP_INDEX = 9
# To change whether runs should be sorted by accuracy or MAP, change the values below:
RESULT_SCORE_INDEX = ACCURACY_INDEX
RUN_PREFIX = 'TOP_'
SET_NAME = 'dev+test'
RESULTS_FILE = "../../../data/results/results-answers-cross-validation-"+SET_NAME+".tsv"
TOP_N_FROM = 2
TOP_N_TO = 40
def main():
# 1. Read the results file and sort the scores (only the single feature groups: -incl)
# 2. combine the features from the feature group
best_results = read_best_results()
max_index = min(TOP_N_TO, len(best_results))
# for group_score in best_results:
# group = group_score[0]
# score = group_score[1]
groups_names = []
# groups_names_str = ''
groups_names_str = 'USER_EXPERTISE, ' + 'ANSWER_QUALITY, ' + 'READABILITY, '
for i in range(0, max_index):
n = i+1
index_name = RUN_PREFIX+str(n)
groups_names.append(best_results[i][0])
if groups_names_str != '':
groups_names_str += ', '
groups_names_str += best_results[i][0]
if n >= TOP_N_FROM:
run_id = index_name + ' ('+groups_names_str + ')'
print('!!! Running...', run_id)
RunCV.run(run_id, groups_names)
def read_best_results():
group_scores = {}
with open(RESULTS_FILE,encoding="utf8") as csvfile:
csvreader = csv.reader(csvfile, delimiter='\t')
for row in csvreader:
group = row[0]
score = row[RESULT_SCORE_INDEX]
if (group.endswith('-incl')):
group_scores[group] = score
print('BEFORE SORT:', group_scores)
sorted_group_scores = sorted(group_scores.items(), key=operator.itemgetter(1), reverse=True)
print('AFTER SORT:', sorted_group_scores)
return sorted_group_scores
# Start here:
main() | [
"[email protected]"
] | |
295a1c7eeb4b7fb02b3fe930155e6f4753c5bbf9 | ea1b3613fa137e812e2e8d3a95f3d649bcab7162 | /SportsRegistration/wsgi.py | f01e94cbb86095d1789189fc8482e4dddb3abc43 | [] | no_license | garbagemayor/sports | aa15dd077c53a9f6ef0fab8e792425b00e64d667 | 92580468004b9d1a3488f81f1d319911fbeddcb8 | refs/heads/master | 2021-09-06T06:28:40.488916 | 2018-02-03T06:04:22 | 2018-02-03T06:04:22 | 111,417,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | """
WSGI config for SportsRegistration project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SportsRegistration.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
42b2917a16ba451c8ad5096c5c9b576f076affa0 | 1455db7ea72218bd184e29f4daca3fcddf05a630 | /main.py | 32a395e0a62a3ecc3c94e4c81344197c2040459c | [] | no_license | hiaiproject/hiaiproject | 4dc9300244ddc3914db1a0c893241ca2da27952b | 52b4f29a2601f97f6f3c545397a31c5a41295abf | refs/heads/master | 2023-05-06T22:46:11.366746 | 2021-05-27T06:51:41 | 2021-05-27T06:51:41 | 367,259,234 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | def main():
print("Hello, AI")
main()
| [
"[email protected]"
] | |
a938268e6e7a211edccad018a4b6418271b2bdb2 | bdf7b3ff5b8fd5d18003705f0cb893220788a179 | /tests/verification/test_build_stage.py | 74bb795273dd5e258e567d7d38765bb56ba6a4ce | [
"Apache-2.0"
] | permissive | ianlet/spaceship-ci | a055afecc58456274ae3fdffe53b0978eee49750 | c61df12778b024078a19d9d97e44dc7d219201b9 | refs/heads/master | 2021-03-30T17:41:40.060784 | 2018-09-23T19:55:43 | 2018-09-23T19:55:43 | 122,879,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | from unittest import TestCase
from mockito import mock, verify, when
from challenge.challenge import Challenge
from challenge.submission import Submission
from verification.build_stage import BuildStage
class TestBuildStage(TestCase):
ORG_NAME = 'org-name'
CHALLENGE_NAME = 'challenge-name'
SUBMISSION_NAME = 'submission-01'
SUBMISSION_URL = '[email protected]:org/submission.git'
BASE_PATH = 'tests'
CONTAINER_COMMAND = ['./gradlew', 'build', '-x', 'test']
challenge = Challenge(ORG_NAME, CHALLENGE_NAME)
submission = Submission(SUBMISSION_NAME, SUBMISSION_URL, challenge)
submission_path = f'{BASE_PATH}/{submission.path}'
container_name = f'{submission.slug}--build'
def setUp(self):
self.executor = mock()
self.stage = BuildStage(self.BASE_PATH, self.executor)
def test_run_submission_container(self):
container = mock()
when(self.executor).create_container(self.submission.slug, self.container_name, self.CONTAINER_COMMAND,
self.submission_path).thenReturn(container)
self.stage.execute(self.submission)
verify(container).run()
| [
"[email protected]"
] | |
604c698499ac73b30f20bbc78ebd91207f9538a6 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/astropy-1.1.2-np110py27_0/lib/python2.7/site-packages/astropy/units/tests/py3_test_quantity_annotations.py | e0eceac14e6e744ab5d600b14ee0ae04223bd509 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 6,014 | py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from functools import wraps
from textwrap import dedent
from ... import units as u
from ...extern import six
from ...tests.helper import pytest
def py3only(func):
if not six.PY3:
return pytest.mark.skipif('not six.PY3')(func)
else:
@wraps(func)
def wrapper(*args, **kwargs):
if func.__doc__ is None:
pytest.skip('unable to run this test due to missing '
'docstrings (maybe the module was compiled with '
'optimization flags?)')
code = compile(dedent(func.__doc__), __file__, 'exec')
# This uses an unqualified exec statement illegally in Python 2,
# but perfectly allowed in Python 3 so in fact we eval the exec
# call :)
eval('exec(code)')
return wrapper
@py3only
def test_args3():
"""
@u.quantity_input
def myfunc_args(solarx: u.arcsec, solary: u.arcsec):
return solarx, solary
solarx, solary = myfunc_args(1*u.arcsec, 1*u.arcsec)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, u.Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
"""
@py3only
def test_args_noconvert3():
"""
@u.quantity_input()
def myfunc_args(solarx: u.arcsec, solary: u.arcsec):
return solarx, solary
solarx, solary = myfunc_args(1*u.deg, 1*u.arcmin)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, u.Quantity)
assert solarx.unit == u.deg
assert solary.unit == u.arcmin
"""
@py3only
def test_args_nonquantity3():
"""
@u.quantity_input
def myfunc_args(solarx: u.arcsec, solary):
return solarx, solary
solarx, solary = myfunc_args(1*u.arcsec, 100)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, int)
assert solarx.unit == u.arcsec
"""
@py3only
def test_arg_equivalencies3():
"""
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: u.arcsec, solary: u.eV):
return solarx, solary+(10*u.J) # Add an energy to check equiv is working
solarx, solary = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, u.Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.gram
"""
@py3only
def test_wrong_unit3():
"""
@u.quantity_input
def myfunc_args(solarx: u.arcsec, solary: u.deg):
return solarx, solary
with pytest.raises(u.UnitsError) as e:
solarx, solary = myfunc_args(1*u.arcsec, 100*u.km)
assert str(e.value) == "Argument 'solary' to function 'myfunc_args' must be in units convertable to 'deg'."
"""
@py3only
def test_not_quantity3():
"""
@u.quantity_input
def myfunc_args(solarx: u.arcsec, solary: u.deg):
return solarx, solary
with pytest.raises(TypeError) as e:
solarx, solary = myfunc_args(1*u.arcsec, 100)
assert str(e.value) == "Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead."
"""
@py3only
def test_decorator_override():
"""
@u.quantity_input(solarx=u.arcsec)
def myfunc_args(solarx: u.km, solary: u.arcsec):
return solarx, solary
solarx, solary = myfunc_args(1*u.arcsec, 1*u.arcsec)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, u.Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
"""
@py3only
def test_kwargs3():
"""
@u.quantity_input
def myfunc_args(solarx: u.arcsec, solary, myk: u.arcsec=1*u.arcsec):
return solarx, solary, myk
solarx, solary, myk = myfunc_args(1*u.arcsec, 100, myk=100*u.deg)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, int)
assert isinstance(myk, u.Quantity)
assert myk.unit == u.deg
"""
@py3only
def test_unused_kwargs3():
"""
@u.quantity_input
def myfunc_args(solarx: u.arcsec, solary, myk: u.arcsec=1*u.arcsec, myk2=1000):
return solarx, solary, myk, myk2
solarx, solary, myk, myk2 = myfunc_args(1*u.arcsec, 100, myk=100*u.deg, myk2=10)
assert isinstance(solarx, u.Quantity)
assert isinstance(solary, int)
assert isinstance(myk, u.Quantity)
assert isinstance(myk2, int)
assert myk.unit == u.deg
assert myk2 == 10
"""
@py3only
def test_kwarg_equivalencies3():
"""
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: u.arcsec, energy: u.eV=10*u.eV):
return solarx, energy+(10*u.J) # Add an energy to check equiv is working
solarx, energy = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(solarx, u.Quantity)
assert isinstance(energy, u.Quantity)
assert solarx.unit == u.arcsec
assert energy.unit == u.gram
"""
@py3only
def test_kwarg_wrong_unit3():
"""
@u.quantity_input
def myfunc_args(solarx: u.arcsec, solary: u.deg=10*u.deg):
return solarx, solary
with pytest.raises(u.UnitsError) as e:
solarx, solary = myfunc_args(1*u.arcsec, solary=100*u.km)
assert str(e.value) == "Argument 'solary' to function 'myfunc_args' must be in units convertable to 'deg'."
"""
@py3only
def test_kwarg_not_quantity3():
"""
@u.quantity_input
def myfunc_args(solarx: u.arcsec, solary: u.deg=10*u.deg):
return solarx, solary
with pytest.raises(TypeError) as e:
solarx, solary = myfunc_args(1*u.arcsec, solary=100)
assert str(e.value) == "Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead."
"""
@py3only
def test_kwarg_default3():
"""
@u.quantity_input
def myfunc_args(solarx: u.arcsec, solary: u.deg=10*u.deg):
return solarx, solary
solarx, solary = myfunc_args(1*u.arcsec)
"""
| [
"[email protected]"
] | |
bf19842a2b0d50475b0ddf15ecdd803a42097161 | bd3df2f18b1aabda2d157fdec32b1c4c6074b60b | /scripts/get_state_dict.py | 99bc50a49eacd2757f2ef82e68f6432982b527da | [] | no_license | wushuang01/Focal_loss_for_object_detection | dcc0f25da0577bd6bf9a7a2276ac31440bd6de2d | f3c66b975510fa697c46838ec7e548c93959b42d | refs/heads/master | 2020-04-15T17:32:14.239187 | 2019-01-13T14:19:32 | 2019-01-13T14:19:32 | 164,874,626 | 17 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | '''Init RestinaNet50 with pretrained ResNet50 model.
Download pretrained ResNet50 params from:
https://download.pytorch.org/models/resnet50-19c8e357.pth
'''
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from fpn import FPN50
from retinanet import RetinaNet
print('Loading pretrained ResNet50 model..')
d = torch.load('/home/wushuanchen/PycharmProjects/pytorch-retinanet-master/model/resnet50-19c8e357.pth')
print('Loading into FPN50..')
fpn = FPN50()
dd = fpn.state_dict()
for k in d.keys():
if not k.startswith('fc'): # skip fc layers
dd[k] = d[k]
print('Saving RetinaNet..')
net = RetinaNet()
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.normal(m.weight, mean=0, std=0.01)
if m.bias is not None:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
pi = 0.01
init.constant(net.cls_head[-1].bias, -math.log((1-pi)/pi))
net.fpn.load_state_dict(dd)
torch.save(net.state_dict(), 'net.pth')
print('Done!')
| [
"[email protected]"
] | |
5008856705ec2cf86f13825b73dca55b21601e6a | 9b521bf3e768d07c323c681ae9be96ef9e3796a1 | /src/app.py | 0fdd3f56012aea49ddc862d66bbe6baa26668875 | [] | no_license | vineethnnn/Nanba | 2187689f73b88f17f2c7969ffa0a5171afc7d4e8 | 30f4a745a89bc8a2754da9b87deeb33c875bf2c1 | refs/heads/master | 2022-11-10T10:49:25.027976 | 2020-06-22T13:57:42 | 2020-06-22T14:08:07 | 274,066,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | #Developer A : Line 1
#Developer B : Line 1
| [
"[email protected]"
] | |
07c972209a7ff04f110cabe114711e157eeb5729 | 70dd72b7837907f69eef0ee5d877cd54534eff21 | /POC_Scan/information/informationmain.py | 4ea171e6683067a3d20bdf0dc73134cc8ca9bd1e | [] | no_license | scsdev-cyber/JR-scan | 077b12c4fc4d1d1ec7542ee01a22b0d3833ce77b | f4c014e86fc43d2032de8f350b1273d988afa42a | refs/heads/master | 2023-08-09T20:09:45.589994 | 2021-08-27T02:25:32 | 2021-08-27T02:25:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: information漏洞库
referer: unknow
author: Lucifer
description: 包含所有information漏洞类型,封装成一个模块
'''
from POC_Scan.information.springboot_api import springboot_api_BaseVerify
from POC_Scan.information.options_method import options_method_BaseVerify
from POC_Scan.information.robots_find import robots_find_BaseVerify
from POC_Scan.information.git_check import git_check_BaseVerify
from POC_Scan.information.jsp_conf_find import jsp_conf_find_BaseVerify
from POC_Scan.information.svn_check import svn_check_BaseVerify
from POC_Scan.information.jetbrains_ide_workspace_disclosure import jetbrains_ide_workspace_disclosure_BaseVerify
from POC_Scan.information.apache_server_status_disclosure import apache_server_status_disclosure_BaseVerify
from POC_Scan.information.crossdomain_find import crossdomain_find_BaseVerify
| [
"[email protected]"
] | |
92792e9874397cc6ff3d79a132acca0cd6f5a226 | 363feefc8c5268e0a7b883a4cfcebc9706b3e0aa | /manage.py | 68989438932d195e8a72ad2b891e595514f62810 | [] | no_license | viniciuspaulo/food | 9438e2ba428dc48d1c0b9ced516d8b33fc472400 | 94436b46af89610aa7b8c82ac82d8aa830142f29 | refs/heads/master | 2022-12-08T21:55:26.176266 | 2020-01-02T08:35:00 | 2020-01-02T08:35:00 | 231,315,718 | 0 | 0 | null | 2022-12-08T03:22:42 | 2020-01-02T05:47:10 | JavaScript | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vinicius.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
7d58a752b08f9e4e6440bc904e81f9ff2b9878b2 | b79dc44cf659d86b3157737084545eae85da98bf | /flasky/app/__init__.py | 0d190f84fb16b10a3a0d31705bcc5ae8538aeb67 | [] | no_license | coral1412/Flask-Blog | fc6fc4af47fb2d8df5dce0a97decdfbe473529b6 | 3603e7f56323b70ee23e481891aa524acfcd775e | refs/heads/master | 2021-01-11T08:57:08.666210 | 2017-01-21T08:31:07 | 2017-01-21T08:31:07 | 77,498,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | from flask import Flask,render_template
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from config import config
bootstrap=Bootstrap()
mail=Mail()
moment=Moment()
db=SQLAlchemy()
def create_app(config_name):
app=Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
| [
"[email protected]"
] | |
d54e5d3ce6cc47fbad810f3b6dbb87a511b19a32 | b6b380e6f5353dba2256211033cebec638dffe4a | /packages/pd/tests/db/default_TestCase.py | 41e5dd8ef384968e14126b398fdb1b4a32feedd2 | [] | no_license | danse-inelastic/pyre-all | 0ddf640b68f6089e40345e9a8e20562a8b035b3c | 59cc235b6481586c58415535bbec660470218e31 | refs/heads/master | 2021-01-18T12:31:27.905459 | 2015-04-26T04:57:46 | 2015-04-26T04:57:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,917 | py | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2009 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import unittest
class TestCase(unittest.TestCase):
def dbManager(self):
from pd.db import connect
db = connect(db ='postgres:///test')
db.autocommit(True)
return db
def test1(self):
'dsaw.db.ReferenceSet: '
db = self.dbManager()
defaults = {
'boolean': True,
'integer': 10,
'real': 100.,
'char': 'abc',
#'doubleArray': [1,2,3],
}
initkwds = {
'char': {'length': 10},
}
from pd.Table import Table
class Test(Table):
name = "test"
import pd
for type, default in defaults.iteritems():
kwds = initkwds.get(type) or {}
name = '%svar' % type
code = '%s = pd.%s(name="%s", default=%r, **kwds)' % (
name, type, name, default)
exec code
continue
t = Test()
for type, default in defaults.iteritems():
varname = '%svar' % type
self.assertEqual(getattr(t, varname), default)
continue
return
pass # end of TestCase
def pysuite():
suite1 = unittest.makeSuite(TestCase)
return unittest.TestSuite( (suite1,) )
def main():
pytests = pysuite()
alltests = unittest.TestSuite( (pytests, ) )
unittest.TextTestRunner(verbosity=2).run(alltests)
return
if __name__ == '__main__': main()
# version
__id__ = "$Id$"
# End of file
| [
"[email protected]"
] | |
7ba646953f0f1a15f5634f06ac357280c66783ce | fb72aef4db762749f3ac4bc08da36d6accee0697 | /modules/photons_tile_paint/set_64.py | a5dad90fe6b7d8f9d4f9b8643e4dac851f4fb956 | [
"MIT"
] | permissive | xbliss/photons-core | 47698cc44ea80354e0dcabe42d8d370ab0623f4b | 3aca907ff29adffcab4fc22551511c5d25b8c2b7 | refs/heads/master | 2022-11-07T12:33:09.951104 | 2020-05-07T09:10:35 | 2020-05-07T09:45:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,373 | py | """
Tile animations requires packing many Set64 messages in a short amount of
time. This file exists to make that as efficient as possible by creating a class
that can pack a Set64 for tile animation with as much caching power as
possible
Don't use this directly, always use it via the
photons_tile_paint.animation.canvas_to_msgs function
"""
from photons_messages.fields import hsbk, Color as ProtocolColor
from photons_messages import TileMessages
from photons_protocol.messages import T
from delfick_project.norms import sb
from bitarray import bitarray
from lru import LRU
import functools
import binascii
import operator
import struct
class Set64Maker:
"""
One of these is made as the set_64_maker callable. It contains as much
of the Set64 message packd as we can and then provides a fake Set64
that photons can use to create the messages
"""
class Target:
"""Used to cache the bits for our target"""
def __init__(self, val):
self.bytes = val
self.serial = binascii.hexlify(val[:6]).decode()
self.packd = bitarray(endian="little")
self.packd.frombytes(val)
class Set64:
"""
The actual fake message
We assume this is used via canvas_to_msgs and so we expect kwargs to
contain "ack_required", "tile_index", "width", "duration" and "colors"
And we assume target is set via a setter property and .update() is
called with source and sequence
Because this is for tile animations, the rest of the payload is always
length=1, x=0, y=0; and res_required is always False
We use a real TileMessages.Set64 to get the packed bits for the parts
that do not change, which is stored on the maker (the instance of Set64Maker)
"""
def __init__(self, maker, **kwargs):
self.kwargs = kwargs
self.maker = maker
self.extra = {}
def pack(self):
bits = []
# Frame header
bits.append(self.maker.frame_header_start)
bits.append(self.maker.source_bits(self.extra["source"]))
# Frame Address
bits.append(self.extra["target"].packd)
if self.kwargs["ack_required"]:
bits.append(self.maker.frame_address_with_acks_middle)
else:
bits.append(self.maker.frame_address_without_acks_middle)
bits.append(self.maker.sequence_bits[self.extra["sequence"]])
# Protocol Header
bits.append(self.maker.protocol_header_packd)
# Payload
bits.append(self.maker.tile_index_bits[self.kwargs["tile_index"]])
bits.append(self.maker.payload_middle)
bits.append(self.maker.width_bits[self.kwargs["width"]])
bits.append(self.maker.duration_bits(self.kwargs["duration"]))
bits.append(self.maker.colors_bits(self.kwargs["colors"]))
return functools.reduce(operator.add, bits)
def tobytes(self, serial=None):
return self.pack().tobytes()
def simplify(self):
return self
def clone(self):
res = self.__class__(self.maker, **self.kwargs)
res.update(self.extra)
return res
def update(self, extra):
self.extra.update(extra)
if "target" in extra and not isinstance(extra["target"], self.maker.Target):
self.target = extra["target"]
@property
def target(self):
if "target" in self.extra:
return self.extra["target"].bytes
return sb.NotSpecified
@target.setter
def target(self, val):
self.update({"target": self.maker.make_target(val)})
@property
def serial(self):
if "target" not in self.extra:
return "000000000000"
return self.extra["target"].serial
@property
def res_required(self):
return False
@property
def ack_required(self):
return self.kwargs["ack_required"]
@ack_required.setter
def ack_required(self, value):
self.kwargs["ack_required"] = value
@property
def source(self):
return self.extra["source"]
@source.setter
def source(self, val):
"""Source is set on the clone by the photons_transport writer"""
self.extra["source"] = val
@property
def sequence(self):
return self.extra["sequence"]
@sequence.setter
def sequence(self, val):
self.extra["sequence"] = val
@property
def pkt_type(self):
return TileMessages.Set64.Payload.message_type
@property
def protocol(self):
return 1024
@property
def is_dynamic(self):
return False
@property
def colors(self):
return self.kwargs.get("colors", [])
def actual(self, key):
if key == "source":
return self.extra.get("source", sb.NotSpecified)
return getattr(self, key)
def __init__(self):
self.targets_cache = LRU(1000)
self.source_bits_cache = LRU(10)
self.duration_bits_cache = LRU(10)
self.cache = ProtocolColor.Meta.cache
msg = TileMessages.Set64(
source=0,
sequence=0,
target="d073d5000000",
res_required=False,
ack_required=True,
tile_index=0,
length=1,
x=0,
y=0,
width=8,
duration=0,
colors=[{"hue": 0, "saturation": 0, "brightness": 0, "kelvin": 3500}],
)
self.frame_header_start = msg.frame_header.pack()[:-32]
self.frame_address_with_acks_middle = msg.frame_address.pack()[64:-8]
msg.ack_required = False
self.frame_address_without_acks_middle = msg.frame_address.pack()[64:-8]
self.protocol_header_packd = msg.protocol_header.pack()
# tile_index, width, duration and colors are variable
self.payload_middle = msg.payload.pack()[8 : -8 - 32 - (64 * 64)]
self.uint8_bits = {val: self.bits(T.Uint8, val) for val in range(256)}
self.width_bits = self.uint8_bits
self.sequence_bits = self.uint8_bits
self.tile_index_bits = self.uint8_bits
def bits(self, typ, val):
bits = bitarray(endian="little")
bits.frombytes(struct.pack(typ.struct_format, val))
return bits
def source_bits(self, val):
if val not in self.source_bits_cache:
self.source_bits_cache[val] = self.bits(T.Uint32, val)
return self.source_bits_cache[val]
def duration_bits(self, val):
if val not in self.duration_bits_cache:
self.duration_bits_cache[val] = self.bits(T.Uint32, int(val * 1000))
return self.duration_bits_cache[val]
def colors_bits(self, colors):
res = []
for color in colors:
fields = color.cache_key
if fields not in self.cache:
bits = []
bits.append(self.bits(hsbk[0][1], int(65535 * (color.hue / 360))))
bits.append(self.bits(hsbk[1][1], int(65535 * color.saturation)))
bits.append(self.bits(hsbk[2][1], int(65535 * color.brightness)))
bits.append(self.bits(hsbk[3][1], color.kelvin))
self.cache[fields] = functools.reduce(operator.add, bits)
res.append(self.cache[fields])
return functools.reduce(operator.add, res)
def make_target(self, target):
if target not in self.targets_cache:
val = target
if isinstance(target, str):
val = binascii.unhexlify(target)
if isinstance(val, bytes) and len(val) == 6:
val += b"\x00\x00"
self.targets_cache[target] = self.Target(val)
return self.targets_cache[target]
def __call__(self, **kwargs):
return self.Set64(self, **kwargs)
# Used to create a message that photons thinks is a valid Set64
# It is a callable that requires keyword arguments for
# ack_required, tile_index, width, duration, colors
set_64_maker = Set64Maker()
| [
"[email protected]"
] | |
32f734117fa4c780a440899047f60a411c1659eb | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=1_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=37/params.py | 5b58a1e8644383c673b55f63426dd5dad45e4f82 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.104810',
'max_util': '3.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 37,
'utils': 'uni-medium-3'}
| [
"[email protected]"
] | |
7198ffbf88e31f94904be634ca44e9c91dbf2cee | 08df87a0be17699c99c812cfdfe4d5f56d226696 | /python/optkit/api.py.bak | 2ab9d39226427fb8bc6a6f4858e7625b51ac148f | [] | no_license | mengpanqingyun/optkit | e5d48ad4dc9c16143e5fdf3c3cf0ba45d72493ef | bfe44937c701ee8d79cb6b6714e76934412de0cc | refs/heads/master | 2021-06-25T12:58:28.028884 | 2018-12-20T01:59:26 | 2018-12-20T01:59:26 | 162,408,788 | 0 | 0 | null | 2018-12-19T08:46:33 | 2018-12-19T08:46:32 | null | UTF-8 | Python | false | false | 1,302 | bak | from optkit.backends import OKBackend
from optkit.types import PogsTypes, ClusteringTypes
from os import getenv
"""
Version query
"""
OPTKIT_VERSION = None
"""
Backend handle
"""
backend = OKBackend()
"""
C implementations
"""
pogs_types = None
PogsSolver = None
PogsObjective = None
clustering_types = None
ClusteringSettings = None
Clustering = None
"""
Backend switching
"""
def set_backend(gpu=False, double=True):
# Backend
global OPTKIT_VERSION
global backend
## C implementations
global pogs_types
global PogsSolver
global PogsObjective
global clustering_types
global ClusteringSettings
global Clustering
# change backend
backend_name=backend.change(gpu=gpu, double=double)
OPTKIT_VERSION = backend.version
## C implemenetations
pogs_types = PogsTypes(backend)
PogsSolver = pogs_types.Solver
PogsObjective = pogs_types.Objective
clustering_types = ClusteringTypes(backend)
ClusteringSettings = clustering_types.ClusteringSettings
Clustering = clustering_types.Clustering
print "optkit backend set to {}".format(backend.config)
"""
INITIALIZATION BEHAVIOR:
"""
default_device = getenv('OPTKIT_DEFAULT_DEVICE', 'cpu')
default_precision = getenv('OPTKIT_DEFAULT_FLOATBITS', '64')
set_backend(gpu=(default_device == 'gpu'),
double=(default_precision == '64')) | [
"[email protected]"
] | |
324f3103534545e93ad452e9cbd39a329d543299 | 3248a00b88d1e1f02b608cfb162fabf8d69fc070 | /Python/Functions/recursion.py | 4b6d365122b22dae47325e3861a0990e422aee0b | [
"MIT"
] | permissive | steveachu/Lectures | 74b72aeb0691a05d127ebbe0f89f4d00165bce7e | 41438bec21f6d3525f508f3a1ecf4751d95fa58c | refs/heads/master | 2021-01-18T20:59:14.239293 | 2015-08-21T01:18:42 | 2015-08-21T01:18:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | # A simple recursive implementation of factorial
def factorial(n):
if n <= 1:
return 1
return n * factorial(n-1)
print factorial(5)
# A tail-recursive implementation of factorial
def factorial_tail(n, prod):
if n <= 1:
return prod
return factorial_tail(n-1, prod * n)# Why does this work?
# Why is it better?
# Names can be re-bound to new values at any time.
def factorial(n):
print "Tail factorial" # To be sure our new version is running
return factorial_tail(n, 1)
print factorial(5)
| [
"[email protected]"
] | |
cd20aa8d6cd0173f26163f8b63d1921cf755961b | faa93ef63edf80e6276c2c20192e726a0b24ac33 | /docbook-backend/DocBook/urls.py | 9d4adaf0f7739a1342f1197ec03da5f488e6be02 | [] | no_license | Livingstone99/abs_doc_book | ef03a3674363c0f9af05050611b4b8698623d55d | fe83968ae4525d9daad1166aafcee5bf9ce500f5 | refs/heads/master | 2022-12-24T06:53:20.394509 | 2020-10-07T22:50:30 | 2020-10-07T22:50:30 | 302,182,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py |
from django.contrib import admin
from django.urls import path, include
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
path('token-auth', obtain_jwt_token),
# path('api-auth/', include('rest_framework.urls')),
# path('rest-auth/', include('rest_auth.urls')),
# path('rest-auth/registration/', include('rest_auth.registration.urls')),
# path('api/', include('articles.api.urls')),
path('admin/', admin.site.urls),
path('', include('hospital.urls')),
] | [
"[email protected]"
] | |
c39f1ad13f13ac93ca81747b9ddc950c34e3efc6 | 3cbfc79ae539389ff0e31f832107d3dbbd4e311a | /machine_learning_filtering_data.py | 3a5c659fdfe521709e8eb5030b18d7f9cc7ffce7 | [] | no_license | elijahfr27/CSE163-Final-Project | cf08015ebabfd8b3eaf3f61616cc435cec38f176 | c2f4aebdce2e67cb36f414e93674ad160fcb7b4a | refs/heads/master | 2023-05-09T21:19:20.444253 | 2021-06-10T14:07:06 | 2021-06-10T14:07:06 | 373,659,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,966 | py | """
CSE163 Final Project
This file contains the functions for filtering the original datasets in terms
of the state data for the maching learning model.
"""
def state_graduation_rates(data, visualizations=True):
""" This function takes in the Washington State graduation dataset
as a datframe and returns a dataframe containing the graduation
rates for students graduating in 4 years for each included year.
"""
# reducing to relevant columns
if visualizations:
columns = [
"SchoolYear",
"DistrictName",
"StudentGroup",
"Cohort",
"GraduationRate",
]
else:
columns = [
"SchoolCode",
"DistrictCode",
"SchoolYear",
"Cohort",
"StudentGroup",
"GraduationRate",
"DistrictName",
]
cut_data = data[columns]
# masks for data
area = cut_data["DistrictName"] == "State Total"
student_group = cut_data["StudentGroup"] == "All Students"
cohort = cut_data["Cohort"] == "Four Year"
return cut_data[area & student_group & cohort]
def state_enrollment(data, visualization=True):
"""Takes in enrollment data as dataframe. Filters to only
state data for all high school grades"""
# reducing to relevant columns
if visualization:
columns = [
"SchoolYear",
"DistrictName",
"Gradelevel",
"All Students",
"American Indian/ Alaskan Native",
"Asian",
"Black/ African American",
"Hispanic/ Latino of any race(s)",
"Native Hawaiian/ Other Pacific Islander",
"Two or More Races",
"White",
]
else:
columns = [
"SchoolYear",
"DistrictName",
"Gradelevel",
"All Students",
"American Indian/ Alaskan Native",
"Asian",
"Black/ African American",
"Hispanic/ Latino of any race(s)",
"Native Hawaiian/ Other Pacific Islander",
"Two or More Races",
"White",
"SchoolCode",
]
cut_data = data[columns]
# masks for data
if visualization:
area = cut_data["DistrictName"] == "State Total"
grade = (
(cut_data["Gradelevel"] == "9th Grade")
| (cut_data["Gradelevel"] == "10th Grade")
| (cut_data["Gradelevel"] == "11th Grade")
| (cut_data["Gradelevel"] == "12th Grade")
)
else:
area = cut_data["DistrictName"] != "State Total"
grade = cut_data["Gradelevel"] == "12th Grade"
return cut_data[area & grade]
def state_assessment(data, visualization=True):
"""Takes in testing data as dataframe and filters to the
appropriate columns."""
# reducing to relevant columns
if visualization:
columns = [
"SchoolYear",
"DistrictName",
"StudentGroup",
"GradeLevel",
"Test Administration (group)",
"TestSubject",
"PercentMetTestedOnly",
]
else:
columns = [
"DistrictName",
"DistrictCode",
"SchoolCode",
"GradeLevel",
"StudentGroup",
"TestSubject",
"PercentMetStandard",
"PercentMetTestedOnly",
]
cut_data = data[columns]
# masks for data
if visualization:
area = cut_data["DistrictName"] == "State Total"
grade = (
(cut_data["GradeLevel"] == "9th Grade")
| (cut_data["GradeLevel"] == "10th Grade")
| (cut_data["GradeLevel"] == "11th Grade")
| (cut_data["GradeLevel"] == "12th Grade")
)
test = cut_data["Test Administration (group)"] == "General"
area = cut_data["DistrictName"] != "State Total"
grade = cut_data["GradeLevel"] == "12th Grade"
student_group = cut_data["StudentGroup"] == "All Students"
return cut_data[area & student_group & grade & test]
else:
area = cut_data["DistrictName"] != "State Total"
grade = cut_data["GradeLevel"] == "12th Grade"
return cut_data[area & grade]
def state_classes(data, visualization=True):
"""Takes in data about class enrollment and filters down
to the appropriate columns."""
# reducing to relevant columns
if visualization:
columns = [
"SchoolYear",
"DistrictName",
"StudentGroup",
"GradeLevel",
"Measures",
"PercentTakingAP",
"PercentTakingIB",
"PercentTakingCollegeInTheHighSchool",
"PercentTakingCambridge",
"PercentTakingRunningStart",
"PercentTakingCTETechPrep",
]
else:
columns = [
"SchoolYear",
"SchoolCode",
"DistrictName",
"GradeLevel",
"PercentTakingAP",
"PercentTakingIB",
"PercentTakingCollegeInTheHighSchool",
"PercentTakingRunningStart",
"StudentGroup",
]
cut_data = data[columns]
# masks for data
student_group = cut_data["StudentGroup"] == "All Students"
if visualization:
area = cut_data["DistrictName"] == "State Total"
grade = (
(cut_data["GradeLevel"] == "9")
| (cut_data["GradeLevel"] == "10")
| (cut_data["GradeLevel"] == "11")
| (cut_data["GradeLevel"] == "12")
)
measures = cut_data["Measures"] == "Dual Credit"
return cut_data[area & student_group & grade & measures]
else:
area = cut_data["DistrictName"] != "State Total"
grade = cut_data["GradeLevel"] == "12"
student_group = cut_data["StudentGroup"] == "All Students"
return cut_data[area & student_group & grade]
| [
"[email protected]"
] | |
5b6f9ab6b7718a4104f546e53ec542a780495dd6 | 154f80fe8ce2ec389407bcd811d74e39ac8d7772 | /model/run.py | d5b48657cdd9807e02d2865e4891261c00063829 | [] | no_license | fagan2888/Kaggle_SpeechRecognition | fbf945b64352d5def904487fa79b169fd56d380c | b84e7731fb11440cde4a659d1248dd9c75f12eec | refs/heads/master | 2021-09-03T17:18:26.373277 | 2018-01-10T17:28:01 | 2018-01-10T17:28:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #!/usr/local/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
@author: Jing Guo
@time : 12/29/17
"""
from __future__ import absolute_import, division, print_function
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
sys.path.append(module_path)
cmd = 'rm ../output/subm*.csv'
os.system(cmd)
cmd = 'python lightWeight_CNN.py'
os.system(cmd)
#cmd = 'python GS_CNN2.py'
#os.system(cmd)
| [
"[email protected]"
] | |
00b3a87b42969e21e1afe115847ae465906bb7a5 | a9d792254cb8e3b45aaa4cffdef79823b4ea969d | /main.py | 4964805787dcd9d71dd76dfa83950b56859b4e79 | [] | no_license | Shauryayamdagni/IITM_python | 20d96e926b35e54d6f6d80c2ec5f488bf27160a7 | 73239d4a6e8c2293ce01ee529c5a4074b12fc472 | refs/heads/master | 2023-06-12T00:49:30.120778 | 2021-06-19T16:31:36 | 2021-06-19T16:31:36 | 377,646,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | n=int(input("enter the dimension"))
l1=[]
zero=[]
s=[]
a=[]
b=[]
for x in range(0,n):
for y in range(0,n):
num=int(input())
l1.append(num)
a.append(l1)
l1=[]
for x in range(0,n):
for y in range(0,n):
num=int(input())
l1.append(num)
zero.append(0)
b.append(l1)
s.append(zero)
zero=[]
l1=[]
#print(s)
for i in range(0,n):
for j in range(0,n):
s[i][j]=a[i][j]+b[i][j]
print(a)
print(b)
print(s) | [
"[email protected]"
] | |
d7e58c3c6ff50fdc934de20808883e8695911613 | 469af60a0fd332ac2e06b5141b7aba19696e8abe | /front/webkitgtk3/srt_wkgtk/window.py | efe040a1d3d2e8295b005dba2de07c7a2dd72adb | [] | no_license | yuyichao/srtctrl | 7091d03c2358a045cac34302d26432347f3bdccf | 55e2eb227940c9bb3bb5bab1b4f7d2e84db2b96f | refs/heads/master | 2021-01-15T17:02:59.838631 | 2012-08-26T00:56:53 | 2012-08-26T00:56:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,156 | py | # Copyright (C) 2012~2012 by Yichao Yu
# [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division
from gi.repository import WebKit, Gtk
from pywkjs import *
class SrtWindow:
def __init__(self):
self.window = Gtk.Window()
self.window.set_default_size(500, 400)
self.webview = WebKit.WebView()
self.webview.connect("new-window-policy-decision-requested",
self._new_window_requested)
self.window.add(self.webview)
self.helper = Helper(self.webview)
settings = self.webview.get_settings()
settings.set_property('enable-universal-access-from-file-uris', True)
settings.set_property('enable-file-access-from-file-uris', True)
settings.set_property('javascript-can-access-clipboard', True)
settings.set_property('enable-default-context-menu', True)
settings.set_property('enable-page-cache', True)
settings.set_property('tab-key-cycles-through-elements', True)
settings.set_property('enable-spell-checking', False)
settings.set_property('enable-caret-browsing', False)
def _new_window_requested(self, view, frame, request, action, decision):
decision.ignore()
uri = request.get_uri()
import subprocess
browser = 'xdg-open'
subprocess.Popen([browser, uri])
return True
def show_all(self):
self.window.show_all()
def load_uri(self, uri):
self.webview.load_uri(uri)
| [
"[email protected]"
] | |
9284e1ffe308213cd426453d63e3cac78b1ad2b6 | 177c481a40e88dc0eedf773496561d05be7772d8 | /mod_repair_extended/_build_manual.py | ab85e8e38a9d1c0449daee652f07708a739ceb53 | [
"WTFPL"
] | permissive | vizzz/spoter-mods | f0792caa742b7d5d74eeb20bae65377511d16d86 | e0a275a08532aed1be2b2cdeac3be1c9fa15f2ac | refs/heads/master | 2020-03-11T03:11:01.039580 | 2018-04-07T09:57:17 | 2018-04-07T09:57:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,238 | py | # -*- coding: utf-8 -*-
import codecs
import datetime
import glob
import json
import os
import re
import shutil
import subprocess
import base64
CLIENT_VERSION = '1.0.0'
BUILD = 'manual'
NAME = 'spoter.repair_extended'
ADD_LICENSE = True
class Build(object):
OUT_PATH = '.out'
PYC_PATH = os.path.join(OUT_PATH, 'res', 'scripts', 'client', 'gui', 'mods')
BUILD_PATH = os.path.join('source', BUILD)
VERSION = None
RELEASE = '%s.wotmod' % NAME
DATE = datetime.datetime.now().strftime("%Y-%m-%d")
CONFIG_NAME = None
def __init__(self):
self.clear()
if not os.path.exists('release'): subprocess.check_call(['powershell', 'mkdir', 'release'])
self.readVersion()
self.createFileDict()
self.packWotmod()
self.clear()
print 'created: %s v%s (%s) to %s' % (self.RELEASE, self.VERSION["version"], self.DATE, CLIENT_VERSION)
def clear(self):
try:
shutil.rmtree(self.OUT_PATH, True)
except OSError:
pass
def readVersion(self):
filePath = os.path.join(self.BUILD_PATH, 'VERSION')
with codecs.open(filePath, 'r', encoding='utf-8') as versionFile:
data = versionFile.read().decode('utf-8')
versionFile.close()
self.VERSION = json.loads(data)
def createFileDict(self):
version = '{:.2f}'.format(float(self.VERSION["version"]))
files = []
if self.VERSION["source"]:
files.append((os.path.join(self.BUILD_PATH, self.VERSION["source"]), 'self.version = ', "'v%s (%s)'" % (version, self.DATE)))
files.append((os.path.join(self.BUILD_PATH, self.VERSION["source"]), 'self.version_id = ', re.sub('[.\s]', '', '%s' % version)))
if self.VERSION["meta"]:
files.append((os.path.join(self.BUILD_PATH, self.VERSION["meta"]), '<version>', '%s</version>' % version))
if self.VERSION["config"]:
files.append((os.path.join(self.BUILD_PATH, self.VERSION["config"]), '"version": ', re.sub('[.\s]', '', '%s' % version)))
if self.VERSION["i18n"]:
for path in glob.glob(os.path.join(self.BUILD_PATH, self.VERSION["i18n"], "*.json")):
files.append((path, '"version": ', re.sub('[.\s]', '', '%s' % version)))
for path in files:
self.updateFiles(*path)
def updateFiles(self, path, string, text):
with open(path, 'a+') as xfile:
data = xfile.readlines()
newData = []
for line in data:
if 'self.ids = ' in line:
self.configName = re.split('self.ids = ', line)[1]
if string in line:
newData.append('%s%s%s\n' % (re.split(string, line)[0], string, text))
continue
newData.append(line)
xfile.close()
with open(path, 'w') as xfile:
xfile.writelines(newData)
xfile.close()
def packWotmod(self):
self.RELEASE = '%s_%s.wotmod' % (NAME, '{:.2f}'.format(float(self.VERSION["version"])))
subprocess.check_call(['powershell', 'mkdir', self.PYC_PATH])
py = '%s' % os.path.join(self.BUILD_PATH, self.VERSION["source"])
pyc = '%sc' % self.VERSION["source"]
ps = '%s\%s' % (os.path.realpath(self.OUT_PATH), 'create-7zip.ps1')
metaPath = '%s' % os.path.join(self.BUILD_PATH, os.path.dirname(self.VERSION["meta"]))
metaFile = os.path.basename(self.VERSION["meta"])
subprocess.check_call(['python', '-m', 'compileall', py])
subprocess.call('powershell robocopy %s %s %s /COPYALL /MOV' % (os.path.realpath(self.BUILD_PATH), os.path.realpath(self.PYC_PATH), pyc))
subprocess.call('powershell robocopy %s %s %s /COPYALL' % (os.path.realpath(metaPath), os.path.realpath(self.OUT_PATH), metaFile))
if self.VERSION["resources"]:
for directory in self.VERSION["resources"]:
if os.path.exists(os.path.join(self.BUILD_PATH, directory)):
subprocess.call('powershell robocopy %s %s /COPYALL /E' % (os.path.realpath(os.path.join(self.BUILD_PATH, directory)), os.path.realpath(os.path.join(self.OUT_PATH, 'res', directory))))
with open(ps, 'w') as xfile:
xfile.write('function create-7zip([String] $aDirectory, [String] $aZipfile){ [string]$pathToZipExe = "C:\Program Files\\7-zip\\7z.exe"; [Array]$arguments = "a", "-tzip", "-ssw", "-mx0", "$aZipfile", "$aDirectory"; & $pathToZipExe $arguments; }\n'
'create-7zip "%s" "%s"\n'
'create-7zip "%s" "%s"\n' % (os.path.realpath(os.path.join(self.OUT_PATH, 'res')), os.path.realpath(os.path.join('release', self.RELEASE)),
os.path.realpath(os.path.join(self.OUT_PATH, metaFile)), os.path.realpath(os.path.join('release', self.RELEASE))))
if ADD_LICENSE:
xfile.write('create-7zip "%s" "%s"\n' % (self.createLicense(), os.path.realpath(os.path.join('release', self.RELEASE))))
xfile.close()
subprocess.call('powershell -executionpolicy bypass -command "& {Set-ExecutionPolicy AllSigned; %s; Set-ExecutionPolicy Undefined}"' % ps)
def createLicense(self):
b64 = "DQogICAgICAgIERPIFdIQVQgVEhFIEZVQ0sgWU9VIFdBTlQgVE8gUFVCTElDIExJQ0VOU0UgDQogICAgICAgICAgICAgICAgICAgIFZlcnNpb24gMiwgRGVjZW1iZXIgMjAwNCANCg0KIENvcHlyaWdodCAoQykgMjAwNCBTYW0gSG9jZXZhciA8c2FtQGhvY2V2YXIubmV0PiANCg0KIEV2ZXJ5b25lIGlzIHBlcm1pdHRlZCB0byBjb3B5IGFuZCBkaXN0cmlidXRlIHZlcmJhdGltIG9yIG1vZGlmaWVkIA0KIGNvcGllcyBvZiB0aGlzIGxpY2Vuc2UgZG9jdW1lbnQsIGFuZCBjaGFuZ2luZyBpdCBpcyBhbGxvd2VkIGFzIGxvbmcgDQogYXMgdGhlIG5hbWUgaXMgY2hhbmdlZC4gDQoNCiAgICAgICAgICAgIERPIFdIQVQgVEhFIEZVQ0sgWU9VIFdBTlQgVE8gUFVCTElDIExJQ0VOU0UgDQogICBURVJNUyBBTkQgQ09ORElUSU9OUyBGT1IgQ09QWUlORywgRElTVFJJQlVUSU9OIEFORCBNT0RJRklDQVRJT04gDQoNCiAgMC4gWW91IGp1c3QgRE8gV0hBVCBUSEUgRlVDSyBZT1UgV0FOVCBUTy4NCg=="
output_name = os.path.realpath(os.path.join(self.OUT_PATH, 'LICENSE'))
data = base64.b64decode(b64)
with open(output_name, "wb") as output_file:
output_file.write(data)
output_file.close()
return output_name
build = Build() | [
"[email protected]"
] | |
ab1386762776bda1772c77591ca769769ffd917e | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/python_all/93_5.py | 4d0d66a1531f89f026f67385657698d9be34adb7 | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,768 | py | Python | Even Front digits Test in List
Sometimes we may face a problem in which we need to find for a list if it
contains numbers which are even. This particular utility has an application in
day-day programming. Let’s discuss certain ways in which this task can be
achieved.
**Method #1 : Using list comprehension +map()**
We can approach this problem by converting the elements to the strings and
then testing the starting element of string and if they are even we can return
true and then convert to set and test for size of result to be one. The
conversion is done by map, set function converts to set and list comprehension
checks for first element of string.
__
__
__
__
__
__
__
# Python3 code to demonstrate
# Even Front digits Test in List
# using list comprehension + map()
# initializing list
test_list = [25, 6, 828829, 432]
# printing original list
print("The original list : " + str(test_list))
# using list comprehension + map()
# Even Front digits Test in List
res = len(set((int(sub[0]) % 2) for sub in
map(str, test_list))) == 1
# print result
print("Does each element start with even digit ? " + str(res))
---
__
__
**Output :**
The original list : [25, 6, 828829, 432]
Does each element start with even digit ? True
**Method #2 : Using all() \+ list comprehension**
This is yet another approach in which this problem can be solved. In this we
use all function to check for all elements and return a Boolean result and
list comprehension does the part of conversion of string by str function and
checking for all elements with the first digit of first element to be even.
__
__
__
__
__
__
__
# Python3 code to demonstrate
# Even Front digits Test in List
# using all() + list comprehension
# initializing list
test_list = [25, 6, 828829, 432]
# printing original list
print("The original list : " + str(test_list))
# using all() + list comprehension
# Even Front digits Test in List
res = all(not int(str(i)[0]) % 2 for i in
test_list)
# print result
print("Does each element start with even digit ? " + str(res))
---
__
__
**Output :**
The original list : [25, 6, 828829, 432]
Does each element start with even digit ? True
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"[email protected]"
] | |
c96dcad8ec65c2a84f7547eb6003c0caa5b79839 | 16a375a4750241533614065deaef4b3951d4e8bb | /cmdstanpy_attempt1.py | 6ab55b28f72a31e6e457d0fc067f49ac5c830fa9 | [] | no_license | tommylees112/stan_hydro_model | d625b20fec2e67de5addb97fea93538c6adadb55 | 8570638004521e5a80478bc1f42ad62cb5dbb602 | refs/heads/master | 2022-11-22T02:22:25.395025 | 2020-07-07T18:00:38 | 2020-07-07T18:00:38 | 277,887,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | from pathlib import Path
import cmdstanpy
from cmdstanpy import cmdstan_path, CmdStanModel, CmdStanMCMC
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from typing import Dict
import arviz as az
import os
cmdstanpy.CMDSTAN_PATH = "/Users/tommylees/.cmdstanpy/cmdstan-2.21.0"
print(cmdstan_path())
def clear_pre_existing_files():
os.system(
"""
pchs=`find . -name "*.gch"`;
for f in $pchs; do rm $f; done;
"""
)
os.system(
"""
hpps=`find . -name "*.hpp"`;
for f in $hpps; do rm $f; done;
"""
)
print("Deleted *.hpp and *.gch files")
if __name__ == "__main__":
clear_pre_existing_files()
# set the directories
model_dir = Path("stan_code")
data_dir = Path("data")
data = pd.read_csv(data_dir / "cherwell_station.csv")
# ---- data ---- #
Q: np.ndarray = data["discharge_spec"].values
precip: np.ndarray = data["precipitation"].values
initial_state: float = 0.0
discharge_data: Dict = { "T" : len(Q), "Q" : Q , "P": precip, "S_t0": initial_state}
# ---- model ---- #
stan_file = model_dir / "bernoulli.stan"
stan_model = CmdStanModel(stan_file=stan_file)
stan_model.compile()
# ---- fit parameters ---- #
abcmodel_fit: CmdStanMCMC = stan_model.sample(
data=discharge_data,
chains=4,
cores=1,
seed=1111,
show_progress=True,
)
# ---- get simulations ---- #
posterior = az.from_cmdstanpy(
posterior=bern_fit,
posterior_predictive="y",
) | [
"[email protected]"
] | |
3b1a146bea831d8de9a8abf40740088fce48b2aa | a3b8082e2d6160545591953d718fe197f2a1e8c2 | /pecobro/consts.py | 53d620c3c00d0a7e54836a10f558d5592fcec200 | [] | no_license | asutherland/pecobro | 887cc49509c4ce012bba133f9e2604b92b736bcc | 6c59619c4b33cfcc232620ab29d8d9da1362cf9c | refs/heads/master | 2021-01-10T21:23:59.238520 | 2008-05-20T11:27:22 | 2008-05-20T11:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | py | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is the "pecobro" Peformance Code Browser.
#
# The Initial Developer of the Original Code is
# Mozilla Messaging, Inc.
# Portions created by the Initial Developer are Copyright (C) 2008
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Andrew Sutherland <[email protected]>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
defines = {'XP_UNIX': True,
'AB_CD': 'en-us',
'APP_UA_NAME': 'Dummy',
'APP_VERSION': '1.0',
'MOZ_UPDATE_CHANNEL': 'http://www.example.com/'}
| [
"[email protected]"
] | |
df236f657fa4f27802eff0c8f610c5aa3d356661 | 222f2534b9ac7a32bd7eec939f296b23ecb9db8b | /construce_array.py | ab6e55c0b4208a72cb0dcbabc48e943ab3252af5 | [] | no_license | hzphzp/acm | 0900774a4e3ca08873941dc213db0c54736723a6 | 4bd9cf793b3e09e21db93cc3ca51e9c2a3b8b715 | refs/heads/master | 2020-03-27T06:56:06.795492 | 2018-09-22T08:15:36 | 2018-09-22T08:15:36 | 146,148,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | # Complete the countArray function below.
def countArray(n, k, x):
if (n % 2) == 1:
count = (x == 1)
add = k - 2
if (n % 2) == 0:
count = (x != 1)
add = (k - 2) * (k - 1)
for i in range((n - 3) // 2 + 1):
count += add
add *= (k - 1) * (k - 1)
add = add % 1000000007
count = count % 1000000007
return count % 1000000007
print(countArray(17048, 14319, 1))
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.