blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8529adb3953865be06069280751f2878877bec8b | f5d94d12733c480848ee002a5b4df8d5b5f33a80 | /core/models.py | 910ed8ccdf92f683e091130fa76ce31919e821f8 | []
| no_license | nagkumar91/dj_1_8_test | a19b574b0c1cfe1ad279a38bd5d5d7357d75b114 | 724fc87dbef6519b781b1dcb464ad288b64b6f4d | refs/heads/master | 2021-01-25T10:07:19.643798 | 2015-05-04T08:17:25 | 2015-05-04T08:17:25 | 35,017,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from django.db import models
# Create your models here.
class TempData(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = "Temp Data" | [
"[email protected]"
]
| |
e55e7935aeec78e11c9ffa69cda07b1fe3562bfc | 4d74898756d6e82a04d98b8efd83720dfe374cd5 | /algorithms/sorting_algorithms/merge_sort.py | 520447ea670a251aaa5007b1badb7c7c4553079b | [
"MIT"
]
| permissive | onyonkaclifford/data-structures-and-algorithms | 1d7494882afc6c4190459e8622424e5724f32041 | e0ca4bfa878273d06bf22c303e47762b8ec3870b | refs/heads/main | 2023-08-27T13:27:20.189492 | 2021-10-28T12:56:44 | 2021-10-28T12:56:44 | 394,561,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | from typing import List
def merge_sort(x: List) -> List:
"""Merge sort divides a list into two smaller lists, and recursively repeats the process on the two smaller lists
till lists of single elements are obtained. These smaller lists are then combined to form a single sorted list of
the original elements. It has an average time complexity of Θ(nlogn). Time complexity for the worst case is
O(nlogn). Time complexity for the best case is Ω(nlogn).
>>> merge_sort([4, 2, 3, 1, 0, 5])
[0, 1, 2, 3, 4, 5]
:param x: list to be sorted
:return: new sorted list
"""
length = len(x)
if length <= 1:
return x
mid_idx = length // 2
left = merge_sort(x[0:mid_idx])
right = merge_sort(x[mid_idx:length])
result = []
while len(left) > 0 and len(right) > 0:
if left[0] <= right[0]:
result.append(left.pop(0))
else:
result.append(right.pop(0))
result.extend(left)
result.extend(right)
return result
| [
"[email protected]"
]
| |
62e07965d72382918089ca8e09e47d671dfdc6b9 | ac886fcb23c5ee24846ab75de5fa2b0a06332eb4 | /main/train_classifier.py | eb5c7eebc5caef08a56ada5e39a50a60213a7910 | []
| no_license | saibot94/digit-recognition-tutorial | c6d8b0198fffa686b55bd8bbf2ac42105a1abec4 | 79ae96a4d00c7bf1ecc164031c128ba9d8842e4c | refs/heads/master | 2021-01-12T06:57:59.205167 | 2016-12-19T17:52:30 | 2016-12-19T17:52:30 | 76,883,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | from sklearn.externals import joblib
from sklearn import datasets
from skimage.feature import hog
from sklearn.svm import LinearSVC
import numpy as np
dataset = datasets.fetch_mldata("MNIST Original")
imagini = np.array(dataset.data, 'int16')
etichete = np.array(dataset.target, 'int')
print '=> Creare lista de trasaturi din fiecare imagine...'
list_hog_fd = []
for imagine in imagini:
fd = hog(imagine.reshape(28, 28), orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1), visualise=False)
list_hog_fd.append(fd)
computed_features = np.array(list_hog_fd, 'float64')
clf = LinearSVC()
print '=> Antrenare clasificator...'
clf.fit(computed_features, etichete)
joblib.dump(clf, "./model/digits_clf.pkl", compress=3)
print '=> Am terminat!' | [
"[email protected]"
]
| |
f744665e78776ee05b5ab86fb4f3408994c0e079 | 7cc342310bac85d2600d9f3246a3d6b62ae40c3d | /Day52.py | bdde81243f32889151f73a9e52a4258ea208446f | []
| no_license | kalthommusa/100DaysOfCode | 52b1111a81671b68e8ddb8806bb22fec58355f11 | 3b7b1e036aaeaee7243e4dabfbd360d3deb8d6dd | refs/heads/master | 2020-07-07T05:51:28.971917 | 2019-11-12T20:11:13 | 2019-11-12T20:11:13 | 203,270,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | #import the datetime module and display the full current date
import datetime
day52=datetime.datetime.now()
print(day52)
#return the year
print(day52.year)
#return the name of the day
day=day52.strftime("%A")
#return the name of the month
month=day52.strftime("%B")
#return the time
time=day52.strftime("%X")
print("Successfully passed Day52 on",day,month,"at",time)
#display the date
print(day52.strftime("%x"))
| [
"[email protected]"
]
| |
8d7fb22a6c6756d44fe42a19ac950cc877acbe97 | aadf51507e9a664729ea42d38e62cd6a08da0f06 | /change.py | c3a407c2c6f913115059195abbfe9277cc5a754c | []
| no_license | tanjinarahm/algorithms2 | 29b2dcbe0b59d0a84aa95b96fe7e49a26f85432e | 61b8022ddf0b78a799a2e88f63fb845925ec127f | refs/heads/master | 2022-04-23T07:29:50.300892 | 2020-04-28T02:08:45 | 2020-04-28T02:08:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | def change(num):
change = {"q": 0, "d": 0, "n": 0, "p": 0}
if num >= 25:
change["q"] = int(num/25)
num %= 25
if num >= 10:
change["d"] = int(num/10)
num %= 10
if num >= 5:
change["n"] = int(num/5)
num %= 5
if num >= 1:
change["p"] = int(num/1)
num %= 1
return change
print(change(94))
def change2(num):
change = {"q": 0, "d": 0, "n": 0, "p": 0}
while (num > 0):
if num >= 25:
change["q"] += 1
num -= 25
elif num >= 10:
change["d"] += 1
num -= 10
elif num >= 5:
change["n"] += 1
num -= 5
else:
change["p"] += 1
num -= 1
return change
print(change2(94))
| [
"[email protected]"
]
| |
60565cb0ed87d3af131ea4e8d341b49a2de658e5 | abb7036772165df37dfeb595012443e2fd525b3c | /dsi_calculator.py | 8ac8b91b77af5d1d205f44dbfa5085eeffd4ccdf | []
| no_license | hdaylin/Inventory-Manager | a6cef20a2d1c1d4d59e0a6fda4e7ef9bc0af4ba6 | b26d6e02c5b540e684ced30e2fc0fae8905d92b5 | refs/heads/master | 2021-01-17T10:13:07.509692 | 2017-03-05T21:53:51 | 2017-03-05T21:53:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | import os
from flask import Flask, render_template, request, redirect, url_for, send_from_directory, \
flash
from werkzeug import secure_filename
app = Flask(__name__)
# This is the path to the upload directory
my_dir = os.path.dirname(__file__)
data_path = os.path.join(my_dir, 'uploads/')
app.config['UPLOAD_FOLDER'] = data_path
# These are the extension that we are accepting to be uploaded
app.config['ALLOWED_EXTENSIONS'] = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'csv', 'xlsx', 'xls'}
# For a given file, return whether it's an allowed type or not
def allowed_file(filename):
"""
:rtype: object
"""
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
@app.route('/')
def home():
return render_template('index.html')
@app.route('/upload')
def upload_file():
files = make_tree(app.config['UPLOAD_FOLDER'])
return render_template('upload.html', files=files)
@app.route('/uploader', methods=['POST'])
def process_file():
# Get the name of the uploaded file
file = request.files['file']
# Check if the file is one of the allowed types/extensions
if file and allowed_file(file.filename):
# Make the filename safe, remove unsupported chars
filename = secure_filename(file.filename)
# Move the file form the temporal folder to
# the upload folder we setup
try:
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
except:
print 'File not valid'
pass
# Redirect the user to the upload page
return redirect(url_for('upload_file'))
# This route is expecting a parameter containing the name
# of a file. Then it will locate that file on the upload
# directory and show it on the browser, so if the user uploads
# an image, that image is going to be show after the upload
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
#list the files which are in directories and subdirectories.
def make_tree(path):
try: lst = os.listdir(path)
except OSError:
pass #ignore errors
else:
return lst
return []
if __name__ == '__main__':
app.run()
| [
"[email protected]"
]
| |
3f8696a67045f47329b07878a2c45fb56b764242 | ff75d0c898f2322be8245bf3ab8edcb4e166b765 | /kafka/spacecenter/rocketinfo.py | c354d12a55633b4ed1ce160d6043471212a2deae | []
| no_license | davetrencher/kafka | 52b7d79c8a1c5c9ba901bb9b9040a1ad6986760d | cd918241785e64611cdcf10b8f3e97804bbc2692 | refs/heads/master | 2021-01-10T12:50:13.769694 | 2019-08-15T20:26:14 | 2019-08-15T20:26:14 | 54,588,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | from kafka.helper.Logger import Logger
from kafka.helper.krpchelper import KrpcHelper
from kafka.vessels.BaseVessel import BaseVessel
conn = KrpcHelper.conn
vessels = conn.space_center.vessels
vessel = BaseVessel(conn.space_center.active_vessel)
Logger.log(vessel.describe())
Logger.log("Welcome to Control")
conn.space_center.clear_target()
for avail_vessel in conn.space_center.vessels:
Logger.log(avail_vessel.name)
| [
"[email protected]"
]
| |
e70f14eb83da74ee83dd9e8854f5f79da094837c | fb783dda8d0ca34ad95d0c3f7ebbb6794a4b0467 | /ball.py | a2d479dde631ec996cf01de0feb2431d739b6875 | []
| no_license | Loai17/Y--Project | 967ec843ccc033fcdfdb59bd676adcfbea397446 | 155e9820bfa42c13e3dc7a82976146b1b86505ce | refs/heads/master | 2020-04-12T16:29:00.322136 | 2018-12-27T10:34:39 | 2018-12-27T10:34:39 | 162,613,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | from turtle import *
class Ball(Turtle):
def __init__(self,x,y,dx,dy,r,color):
Turtle.__init__(self)
self.dx=dx
self.dy=dy
self.r=r
self.pu()
self.goto(x,y)
self.shape("circle")
self.shapesize(r*r)
self.color(color)
print(self.xcor())
print(self.ycor())
def move(self,screen_width,screen_height):
current_x = self.xcor()
new_x = current_x + self.dx
current_y = self.ycor()
new_y = current_y + self.dy
right_side_ball = new_x + self.r
left_side_ball = new_x - self.r
top_side_ball = new_y + self.r
bottom_side_ball = new_y - self.r
self.goto(new_x,new_y)
if (current_x >= screen_width/2):
self.dx = -self.dx
elif(current_x <= (-screen_width/2)):
self.dx = -self.dx
if(current_y >= screen_height/2):
self.dy = -self.dy
elif(current_y <= (-screen_height/2)):
self.dy = -self.dy
| [
"[email protected]"
]
| |
cef5644422bd50310697c6c532f786ee108005a0 | 94c870e66c1ebcf8077eda4b192eea5ca4b0ac09 | /test/test_pet.py | ef80a3121bb1cf2bf69a2f8816de4e61851e9ef0 | []
| no_license | georgeerol/pythonPetStoreSDK | 238a02fefeff5da50200bd3e55c70d4e498cdfab | c71df239733502e56d8416be9974d18108bdfcc0 | refs/heads/master | 2023-01-02T14:16:46.378234 | 2020-10-22T22:30:16 | 2020-10-22T22:30:16 | 306,472,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | # coding: utf-8
"""
Swagger Petstore
This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters. # noqa: E501
OpenAPI spec version: 1.0.5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.pet import Pet # noqa: E501
from swagger_client.rest import ApiException
class TestPet(unittest.TestCase):
"""Pet unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPet(self):
"""Test Pet"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.pet.Pet() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
e554e238196d5315ac5d6c9d3b6c9423e9c2163d | 165ef016932b8b2741004c321e7f2fda225d8793 | /Baidu_spider/Baidu_base_spider.py | e5ddbf5d64f1e29cdb03144dd57b726749fca5cf | []
| no_license | lH-Liu-Hao/Article_abstract | d6ffa6ddcccf9cfb1b5096dc884c6d8886633bb0 | ab6d495406bda7008d953961d89c4b5f3a9ecf2d | refs/heads/master | 2022-11-13T08:26:13.034371 | 2020-07-07T10:21:48 | 2020-07-07T10:21:48 | 277,784,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,037 | py | import re
import time
import json
import requests
import random
import aiohttp
import html as _html
from lxml import etree
from Utility.Baidu_Message.log import log
from Utility.Baidu_Message.Get_time import get_real_time
from Utility.Baidu_Message.Get_ADSL_Proxy import get_ADSL_proxy
from Utility.Baidu_Message.config import UA_POOL
from Utility.Baidu_Message.Common_code import get_md5
from Utility.Baidu_Message.Baidu_spider.Quanju_and_Pro import Quanju_Pro
requests.packages.urllib3.disable_warnings()
class Baidu_base():
def __init__(self, args, send_type):
self.args = args
self.keyword = self.args['word']
self.sum_content_count = 0
self.sum_error_content_count = 0
self.real_page = 1
self.proxy = 0
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
"Host": "www.baidu.com",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
}
self.headers['User-Agent'] = random.choice(UA_POOL)
self.medium = 1
self.url = f"http://www.baidu.com/s?ie=utf-8&cl=2&rtt=1&bsst=1&rsv_dl=news_b_pn&tn=news&wd={self.keyword}&medium={self.medium}&pn="
# 上传数据的类型--全局或三合一
self.send_type = send_type
self.max_page = 0
self.logger = log
def send_type_dict(self, page_data_dict):
Quanju_and_Pro = Quanju_Pro()
# #上传数据全局格式
# if self.send_type == 1:
# send_type_dict = Quanju_and_Pro.common_dict
# #上传数据其中三合一格式
# else:
# commit_dict = Quanju_and_Pro.common_dict
# commit_dict['headers']['topic'] = "probaidu"
# send_type_dict = commit_dict
send_type_dict = Quanju_and_Pro.create_baidu_solr_dict(page_data_dict)
return send_type_dict
async def get_cont(self, con_html, xpath_rule):
'''
xpath规则获取多个标签下的文本,再组合在一起
:param con_html:
:param xpath_rule:
:return:
'''
doc = etree.HTML(con_html)
con = doc.xpath(xpath_rule)
content = "".join(x.strip() for x in con)
return content
async def exp_code(self, field, url, e, error_count, count): # 抽取出错后的通用代码
self.logger.exception(f'获取{field}字段出现错误--当前页面url-->{url}-->错误原因-->{e}')
error_count += 1
count += 1
async def get_res(self, url, page_num=0, page_datas=None):
'''获得element对象和源码'''
# 抽取请求的通用代码
real_page = page_num + 1
if self.proxy:
proxy = get_ADSL_proxy()['http']
else:
proxy = ""
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=self.headers, verify_ssl=False, proxy=proxy) as res:
if res.status != 200:
self.logger.info(f"请求第{real_page}页失败--url-->{url}--状态码-->{res.status}")
return page_datas
real_result = await res.text()
doc = etree.HTML(real_result)
return doc, real_result
except Exception as e:
self.logger.exception(f"在{self.medium_type}请求第{real_page}页获得response失败--url-->{url}--原因-->{str(e)}")
async def comback(self, page_num, num):
# 回调函数
real_page_datas = []
real_page = page_num - num
for page in range(real_page):
pn = page * 10
sub_url = re.sub('medium=(.*?)&', f'medium={self.medium}&', self.url)
real_url = sub_url + str(pn)
page_datas = await self.parse_page(real_url, real_page)
real_page_datas.append(page_datas)
return real_page_datas
async def the_page_exist(self, url, page_num, page_datas):
'''对该关键词进项判断是否有相关资讯,得出最大页码'''
doc_result = await self.get_res(url, page_num, page_datas)
doc = doc_result[0]
real_result = doc_result[1]
news_total_num = re.search('找到相关资讯约(.*?)篇', real_result)
if news_total_num == 0:
self.logger.info(f"该关键词{self.keyword}没有找到相关资讯")
return page_datas, 0
the_page_exist = doc.xpath('//p[@id="page"]//span[@class="pc"]/text()')
if the_page_exist:
self.max_page = int(the_page_exist[-1].strip())
else:
self.max_page = 1
async def get_last_page_count(self, url):
'''
获得数据的总数目,最多100条,请求最后一页统计最后一页的数目,加上前面数量即总数目
:param url:
:return:
'''
doc_result = await self.get_res(url)
doc = doc_result[0]
the_page_exist = doc.xpath('//p[@id="page"]//span[@class="pc"]/text()')
if the_page_exist:
max_page = int(the_page_exist[-1].strip())
else:
max_page = 1
pn = (max_page - 1) * 10
url = self.url + str(pn)
doc_result = await self.get_res(url)
doc = doc_result[0]
count = len(doc.xpath('//div[@class="result"]'))
total_count = (max_page - 1) * 10 + count
return total_count
async def parse_page(self, url, page_num):
'''
获取每一页的数据
:param url:
:return:
'''
if self.medium == 1:
self.medium_type = "媒体网站"
else:
self.medium_type = "百家号"
self.real_page = page_num + 1
# 详情数量
content_count = 0
# 失败详情数量
error_content_count = 0
# 该页的全部数据
page_datas = []
doc_result = await self.get_res(url, page_num, page_datas)
doc = doc_result[0]
await self.the_page_exist(url, page_num, page_datas)
# 判断页数是否大于最大请求页10,当page_num=9,实际页数第10页
if page_num > 9:
self.logger.info(f'请求的页数已经大于10页--在{self.medium_type}')
if self.medium == 1:
self.medium = 2
else:
return page_datas
num = 9
page_datas = await self.comback(page_num, num)
return page_datas
# 判断请求的页数是否大于搜索到的关键词的实际页数
real_page_num = page_num + 1
if real_page_num > self.max_page:
self.real_page = 1
if self.medium == 2:
self.logger.info(f'已经请求完该关键词{self.keyword}的全部页码,无更多数据')
self.logger.info(f"请求的页数大于在--{self.medium_type}--{self.keyword}--的实际页数---实际页数{self.max_page}")
# 当能在媒体网站拿到任意数据都不再去请求百家号
if self.sum_content_count % 10 == 0:
self.medium = 2
self.medium_type = "百家号"
num = self.max_page
page_datas = await self.comback(page_num, num)
else:
self.logger.info(f"暂不提供百家号数据--{self.keyword}")
return page_datas
try:
every = doc.xpath('//div[@class="result"]')
for every_one in every:
one_html = _html.unescape(etree.tostring(every_one, method="html").decode())
one_doc = etree.HTML(one_html)
try:
title = await self.get_cont(one_html, '//h3[@class="c-title"]/a//text()')
except Exception as e:
await self.exp_code("标题", url, e, self.sum_error_content_count, error_content_count)
continue
try:
url = one_doc.xpath('//h3[@class="c-title"]/a/@href')[0].strip()
except Exception as e:
await self.exp_code("文章详情url", url, e, self.sum_error_content_count, error_content_count)
continue
try:
source_and_time = one_doc.xpath('//p[@class="c-author"]//text()')
source_and_time_con = ''.join(x.strip() for x in source_and_time)
source, false_time = source_and_time_con.split(" ", 1)
From = source.strip()
time_date, timestamp = get_real_time(false_time.strip())
except Exception as e:
await self.exp_code("来源和时间", url, e, self.sum_error_content_count, error_content_count)
continue
try:
content = await self.get_cont(one_html,
'//div[contains(@class,"c-summary")]/text() | //div[contains(@class,"c-summary")]/em/text()')
except Exception as e:
await self.exp_code("内容", url, e, self.sum_error_content_count, error_content_count)
continue
add_timestamp = int(time.time()) * 1000
content_html = re.sub('[\n\r\t]', '', one_html)
page_data_dict = {
"ID": get_md5(url),
'TaskName': From,
'GroupName': From,
'AddOn': add_timestamp,
"Title": title,
"Url": url,
"Time": timestamp,
"Content": content,
"From": From,
# 站点语言(1.简体中文 2.繁体中文 3.英文 4.日文 5.韩文 6.藏语 7.蒙古语)
'Language': 2052,
'Keyword': self.keyword,
'ContentSource': content_html,
}
send_type_dict = self.send_type_dict(page_data_dict)
page_datas.append(send_type_dict)
self.sum_content_count += 1
content_count += 1
except Exception as e:
self.logger.exception(f'获取所需字段出现未匹配成功或其他错误--当前页面url-->{url}-->错误原因-->{e}')
# print('数据****',page_datas)
return page_datas
async def parse_turn_page(self, page):
'''翻页'''
url = self.url + '0'
total_count = await self.get_last_page_count(url)
try:
for page_num in range(page): # 循环获取全部页数的数据
pn = page_num * 10 # 单获取当前页数的数据
url = self.url + str(pn)
page_datas = await self.parse_page(url, page_num)
yield page_datas, total_count
except Exception as e:
self.logger.exception(f'循环请求失败--错误原因--{e}')
self.logger.info(f'实际请求第{page}页完成')
async def run(self, page):
'''
将每一页的数据返回
'''
page_datas_count = self.parse_turn_page(page)
async for i in page_datas_count:
page_datas = i[0]
total_count = i[1]
if page_datas:
for data_dict in page_datas:
yield data_dict, total_count
self.logger.info(f"{page}页最终成功获取到的详情数量--{self.sum_content_count}条")
if __name__ == "__main__":
task = {"keyword": "广州", "GroupID": "325645", "TaskID": "256455"}
keyword = task['keyword']
medium = 1
baidu_base = Baidu_base(task)
page = '10'
collect_type = "百度_媒体网站"
| [
"[email protected]"
]
| |
700eae00d458af1a27cfa44c1a82cf7f2793020d | 39fa481b356c5e8df77c7459330294b30b45b845 | /process_aggregated_data_counters_baseline_2midd_2vms.py | 49036992b16709c60d2c970ba0f57068aa9452cc | []
| no_license | jovan-ioanis/asl-project-2017 | bd32d4220b23163bcb1de7b99bfc5c852311b0b4 | 079b94e0119bd6d71b5ccf8ecf0ee4a085d69722 | refs/heads/master | 2021-04-06T20:46:30.637686 | 2018-06-14T10:25:56 | 2018-06-14T10:25:56 | 125,279,638 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,988 | py | """
ASL project - fall 2017
author: Jovan Nikolic
Processes aggregated logs generated by middleware
"""
import numpy as np
import csv
agg_path_base = "aggregated_data/baseline_2midd_2vms/counters/"
plot_path_base = "plots/baseline_2midd_2vms/timers/"
name_base = "timer_aggregated_data_"
client_threads_basename = "clientThreads_"
worker_threads_basename = "_workerThreads_"
counters_basename = "counter_"
timers_basename = "timers_"
number_of_middlewares = 2
virtual_clients_pt = [1, 5, 8, 15, 22, 28, 32, 42, 52, 64]
worker_threads = [8, 16, 32, 64]
command_types = ["_S1-G0"]
repetitions = 3
memtier_vms = 2
memtier_instances_per_vm = 2
memtier_threads_per_inst = 1
def read_csv(cpt, wt, command_type):
rep1 = []
rep2 = []
rep3 = []
for mw in range(number_of_middlewares):
print("READING MW: " + str(mw))
current_mw = mw + 1
path = agg_path_base + "throughput_" + "mw" + str(current_mw) + "_cpt" + str(cpt) + \
"_wt" + str(wt) + \
command_type + ".csv"
counter = 0
with open(path, 'r') as file:
data = file.readlines()
data = [x.strip() for x in data]
for k, line in enumerate(data):
if k == 0:
continue
if k == len(data)-1:
continue
parsed_line = line.split(',')
[x.strip() for x in parsed_line]
if mw == 0:
rep1.append(float(parsed_line[1]))
rep2.append(float(parsed_line[2]))
rep3.append(float(parsed_line[3]))
else:
if counter >= len(rep1):
continue
rep1[counter] += float(parsed_line[1])
rep2[counter] += float(parsed_line[2])
rep3[counter] += float(parsed_line[3])
counter += 1
file.close()
cut_left = 5
cut_right = min([len(rep1), len(rep2), len(rep3)]) - 2
throughput_mean_1 = np.mean(np.asarray(rep1)[cut_left:cut_right])
throughput_mean_2 = np.mean(np.asarray(rep2)[cut_left:cut_right])
throughput_mean_3 = np.mean(np.asarray(rep3)[cut_left:cut_right])
# print("tps are: " + str(throughput_mean_1) + " " + str(throughput_mean_2) + " " + str(throughput_mean_3))
throughput_mean = np.mean(np.asarray([throughput_mean_1, throughput_mean_2, throughput_mean_3]))
throughput_std = np.std(np.asarray([throughput_mean_1, throughput_mean_2, throughput_mean_3]))
# print("final tps = " + str(throughput_mean) + ", final std = " + str(throughput_std))
response_time = memtier_vms * memtier_instances_per_vm * memtier_threads_per_inst * cpt * 1000 / throughput_mean
return [throughput_mean, throughput_std, response_time]
def print_csv(header, path, full_data):
total_clients = memtier_vms * memtier_instances_per_vm * memtier_threads_per_inst * np.asarray(virtual_clients_pt)
print("Number of rows is: " + str(len(total_clients)))
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=header)
writer.writeheader()
for row in range(len(total_clients)):
one_row = {}
i = 0
one_row[header[i]] = total_clients[row]
i += 1
for wt in worker_threads:
one_row[header[i]] = full_data[virtual_clients_pt[row]][wt][0]
i += 1
one_row[header[i]] = full_data[virtual_clients_pt[row]][wt][1]
i += 1
one_row[header[i]] = full_data[virtual_clients_pt[row]][wt][2]
i += 1
writer.writerow(one_row)
csv_file.close()
def main():
for z, command_type in enumerate(command_types):
big_data = {}
if z == 0:
suffix = "write-only"
else:
suffix = "read-only"
print("Command type = " + suffix)
for cpt in virtual_clients_pt:
print(" Virtual clients: " + str(cpt))
data = {}
for wt in worker_threads:
print(" Workers: " + str(wt))
data[wt] = read_csv(cpt, wt, command_type)
big_data[cpt] = data
header = ["#Number of Clients",
"Mean Throughput [req/s] - 8 WORKERS", "Std Dev Throughput - 8 WORKERS", "Response Time [ms] - 8 WORKERS",
"Mean Throughput [req/s] - 16 WORKERS", "Std Dev Throughput - 16 WORKERS", "Response Time [ms] - 16 WORKERS",
"Mean Throughput [req/s] - 32 WORKERS", "Std Dev Throughput - 32 WORKERS", "Response Time [ms] - 32 WORKERS",
"Mean Throughput [req/s] - 64 WORKERS", "Std Dev Throughput - 64 WORKERS", "Response Time [ms] - 64 WORKERS"]
path = plot_path_base + "throughput_" + suffix + ".csv"
print_csv(header, path, big_data)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
7d58d170ccd59d2b30f04e9210067ffec1c01f94 | d4f76aa484cbf1f6026b0c102e5d70012a28512a | /msos_project/dsp_tools/spectral_contrast_feature_max_classifier.py | e8c4ff4004c4f9e5ebfe3c18654201b6ca4a19de | []
| no_license | hbulg96/MSOS-classifier | 4eaea8b434455fc300b25fcd0c6bde52b32e7d23 | aa5b9702f7f39a30ea9b9746244c82fa75b2bbea | refs/heads/main | 2023-05-05T05:19:19.374327 | 2021-05-25T16:39:59 | 2021-05-25T16:39:59 | 370,755,497 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,046 | py | import numpy
import matplotlib
from matplotlib import pyplot
import scipy
from scipy import signal
from scipy.io.wavfile import read
from scipy.io.wavfile import write
import os
import timeit
import traceback
import msos_project
from msos_project import *
from msos_project.dsp_tools import *
import msos_project.dsp_tools.peakdetection as peakdetection
import msos_project.dsp_tools.peakflatten as peakflatten
import msos_project.dsp_tools.rhythmdetection as rhythmdetection
import msos_project.dsp_tools.find_similar_magnitude_peaks as find_similar_magnitude_peaks
import msos_project.dsp_tools.find_rhythmic_packets as find_rhythmic_packets
from numpy import random
import msos_project.classification_1_rhythm_time_domain_v0_standalone as classifier1
import msos_project.dsp_tools.spectral_centroid_classifier as spectral_centroid_classifier
import msos_project.dsp_tools.spectral_centroid_assign_weights as spectral_centroid_assign_weights
import msos_project.dsp_tools.zero_crossing_rate_classifier as zero_crossing_rate_classifier
import msos_project.dsp_tools.rms_variation_classifier as rms_variation_classifier
from scipy import stats
from numpy import polyfit
import librosa
from librosa import *
from librosa import display
import scipy.stats
def spectral_contrast_feature_max_classifier(input_path, show_graph=False):
input_file = read(input_path) # read wav file
fs = input_file[0]
input_file = numpy.array(input_file[1], dtype = float) # interpret file as numpy array
print("Fs = ", fs)
feature_1 = librosa.feature.spectral_contrast(input_file, n_bands=8, fmin=100, sr=fs)
feature_2 = librosa.feature.spectral_contrast(input_file, n_bands=8, fmin=100, sr=fs)
number_of_bands = feature_1.shape[0]
length_of_contrast_values = feature_1.shape[1]
# find most tonal or most noisy band
band_averages = [] #store average spectral contrast value per band
for freq_band in range(number_of_bands):
current_band = feature_1[freq_band]
band_average = sum(current_band)/len(current_band)
band_averages.append(band_average)
for contrast_value in range(len(current_band)):
current_value = current_band[contrast_value]
pass
pass
max_contrast_band = max(band_averages)
max_contrast_band_index = band_averages.index(max_contrast_band)
min_contrast_band = min(band_averages)
min_contrast_band_index = band_averages.index(min_contrast_band)
#print("band_averages = ", band_averages)
#print("largest average contrast band value = ", max_contrast_band)
#print("max contrast band index = ", max_contrast_band_index)
#print("smallest average contrast band value = ", min_contrast_band)
#print("minx contrast band index = ", min_contrast_band_index)
# most important band (feature band)
feature_band_index = max_contrast_band_index
feature_band = feature_1[feature_band_index] # contrast band with the highest average contrast value,
# representing the most interesting/intentional sound?
# "least" important band (noise band)
noise_band_index = min_contrast_band_index
noise_band = feature_1[noise_band_index]
# amount of time spent with max contrast in feature band (should be closest to feature band)
time_spent_at_feature_band = 0
time_spent_at_noise_band = 0
max_contrast_all_bands = [] # location of the max spectral contrast at any time
for value_index in range(length_of_contrast_values):
# find index of current spectral contrast value
contrast_values_per_band = []
for freq_band in range(number_of_bands):
# find max value in all bands
current_band = feature_1[freq_band]
#print("freq band index = ", freq_band)
#print("spectral contrast values = ", current_band)
contrast_values_per_band.append(current_band[value_index])
pass
max_contrast_value_band = max(contrast_values_per_band)
mcvb_index = contrast_values_per_band.index(max_contrast_value_band)
max_contrast_all_bands.append(mcvb_index)
min_contrast_value_band = min(contrast_values_per_band)
mincvb_index = contrast_values_per_band.index(min_contrast_value_band)
if mcvb_index == feature_band_index:
time_spent_at_feature_band += 1
pass
else:
pass
if mincvb_index == noise_band_index:
time_spent_at_noise_band += 1
pass
else:
pass
feature_1 = time_spent_at_noise_band # Average of spectral contrast in all bands condensed into one value
feature_2 = time_spent_at_feature_band # amount of time ticks spent with max spentral contrast in the feature band
if show_graph == True:
print("Noise-min metric = ", feature_1)
print("Feature-max metric = ", feature_2)
pyplot.figure(1)
contrast_bands = librosa.feature.spectral_contrast(input_file, n_bands=8, fmin=100, sr=fs)
pyplot.imshow(contrast_bands, aspect='auto', origin="lower", cmap="coolwarm")
pyplot.ylabel('Frequency Band')
pyplot.xlabel('Time (DFT bin)')
pyplot.title("Spectral Contrast")
# add lines for feature band and noise band
contrast_bands = librosa.feature.spectral_contrast(input_file, n_bands=8, fmin=100, sr=fs)
feature_band_x_points = [0, (contrast_bands.shape[1] - 1)]
feature_band_y_points = [feature_band_index, feature_band_index]
pyplot.plot(feature_band_x_points, feature_band_y_points, color='r',linewidth=3, label='feature band')
noise_band_x_points = [0, (contrast_bands.shape[1] - 1)]
noise_band_y_points = [noise_band_index, noise_band_index]
pyplot.plot(noise_band_x_points, noise_band_y_points, color='b', linewidth=3, label='noise band')
pyplot.plot(range(len(max_contrast_all_bands)), max_contrast_all_bands, color='g', label='max spectral contrast value')
pyplot.legend()
pyplot.show()
pass
elif show_graph == False:
pass
else:
print("Error in detecting show_graph variable")
pass
return(feature_1, feature_2)
"""
test_file = read(r"C:\\Users\h_bul\Documents\Acoustics Year 3\Project\Audio Resources\Development\Effects\0M8.wav")
test_file = numpy.array(test_file[1], dtype = int)
matplotlib.pyplot.plot(test_file)
pyplot.xlabel("Time")
pyplot.ylabel("Amplitude")
pyplot.show()
"""
"""
matplotlib.pyplot.plot(gain_boosted_file)
pyplot.xlabel("Time")
pyplot.ylabel("Amplitude")
pyplot.show()
"""
"""
f, t, Sxx = signal.spectrogram(average_effect_file, 44100)
pyplot.pcolormesh(t, f, Sxx, shading='gouraud')
pyplot.ylabel('Frequency [Hz]')
pyplot.xlabel('Time [sec]')
pyplot.show()
"""
| [
"[email protected]"
]
| |
b4e958c4c8b3a6d7b11e0c879811003cbd6781ff | b83cc9e051f969d386c0d7ef40a4e9a5b6faff4c | /promqtt/promexp/promexp.py | fb63f21c0fd2a8a62d55fb0ddb2ff07c75e26288 | []
| no_license | motlib/promqtt | 5309b3178238db76bed3c17d19b8820752637a29 | abb7f50b637d23a2e58d79170a64c5ca755d3e41 | refs/heads/develop | 2023-06-08T07:00:42.333962 | 2023-06-06T15:24:18 | 2023-06-06T15:24:18 | 194,465,311 | 7 | 2 | null | 2023-06-06T15:24:19 | 2019-06-30T02:03:31 | Python | UTF-8 | Python | false | false | 3,812 | py | """Prometheus exporter"""
import logging
from threading import Lock
from typing import Iterator
from .exceptions import PrometheusExporterException, UnknownMeasurementException
from .metric import Metric
from .types import MetricTypeEnum
logger = logging.getLogger(__name__)
class PrometheusExporter:
"""Manage all measurements and provide the htp interface for interfacing with
Prometheus."""
def __init__(self, hide_empty_metrics: bool = False) -> None:
self._prom: dict[str, Metric] = {}
self._lock = Lock()
self._hide_empty_metrics = hide_empty_metrics
def register(
self,
name: str,
datatype: MetricTypeEnum,
helpstr: str,
timeout: int = 0,
with_update_counter: bool = False,
): # pylint: disable=too-many-arguments
"""Register a name for exporting. This must be called before calling
`set()`.
:param str name: The name to register.
:param str type: One of gauge or counter.
:param str helpstr: The help information / comment to include in the
output.
:param int timeout: Timeout in seconds for any value. Before rendering,
values which are updated longer ago than this value, are removed."""
with self._lock:
if name in self._prom:
raise PrometheusExporterException(
f"The metric '{name}' is already registered"
)
metric = Metric(
name=name,
datatype=datatype,
helpstr=helpstr,
timeout=timeout,
with_update_counter=with_update_counter,
)
self._prom[name] = metric
if with_update_counter:
self.register(
name=f"{name}_updates",
datatype=MetricTypeEnum.COUNTER,
helpstr=f"Number of updates to {name}",
timeout=0,
)
def set(self, name: str, labels: dict[str, str], value: float | None):
"""Set a value for exporting.
:param str name: The name of the value to set. This name must have been
registered already by calling `register()`.
:param dict labels: The labels to attach to this name.
:param value: The value to set. Automatically converted to string.
:param fmt: The string format to use to convert value to a string.
Default: '{0}'."""
# We raise an exception if we do not know the metric name, i.e. if it
# was not registered
if name not in self._prom:
raise UnknownMeasurementException(
f"Cannot set not registered measurement '{name}'."
)
with self._lock:
metric = self._prom[name]
metric.set(labels, value)
if metric.with_update_counter:
counter = self._prom[f"{name}_updates"]
counter.inc(labels)
def check_timeout(self) -> None:
"""Remove all metric instances which have timed out"""
with self._lock:
for metric in self._prom.values():
metric.check_timeout()
def render_iter(self) -> Iterator[str]:
"""Return an iterator providing each line of Prometheus output."""
for metric in self._prom.values():
if not self._hide_empty_metrics or len(metric):
yield from metric.render_iter()
def render(self) -> str:
"""Render the current data to Prometheus format. See
https://prometheus.io/docs/instrumenting/exposition_formats/ for details.
:returns: String with output suitable for consumption by Prometheus over
HTTP."""
self.check_timeout()
return "\n".join(self.render_iter())
| [
"[email protected]"
]
| |
6ca78af1f834764d06e25d5f327c58698d1cc904 | b464b3602186a237b913377706a79ec7a6c0638f | /Sono.py | b0ea0c6c6e4566b7ffeeab0b037ebe0a13f34f05 | []
| no_license | gauravnv/SonoChrome | f1d2532ce620afc3715f6c37d065fade1ff2d20a | b90278ffffe0cf8b101208cb70a2301ddb605cd5 | refs/heads/master | 2020-09-10T17:10:53.430246 | 2019-11-26T10:30:09 | 2019-11-26T10:30:09 | 221,773,256 | 1 | 1 | null | 2020-05-26T02:37:37 | 2019-11-14T19:39:42 | Python | UTF-8 | Python | false | false | 2,456 | py | # Importing the libraries
import pandas as pd
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
# def baseline_model():
# Initializing Neural Network
model = Sequential()
# Adding the input layer and the first hidden layer
model.add(Dense(activation="relu", input_dim=54, units=38, kernel_initializer="he_uniform"))
# Adding the second hidden layer
model.add(Dense(activation="relu", units=38, kernel_initializer="he_uniform"))
# # Adding the third hidden layer
# classifier.add(Dense(activation="relu", units=38, kernel_initializer="he_uniform"))
# Adding the output layer
model.add(Dense(activation="softmax", units=19, kernel_initializer="he_uniform"))
# Compiling Neural Network
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
sc = StandardScaler()
# Importing the dataset
dataset = pd.read_csv('Dataset/Audio_features_train.csv')
# Get all the features starting from tempo
features = dataset.loc[:, 'tempo':]
features = features.values
labels = dataset.loc[:, 'label'].dropna().astype(int)
labels = to_categorical(labels)
# Fix naming here
def build_model():
test_size = 0.333
random_seed = 5
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=test_size,
random_state=random_seed)
X_train = sc.fit_transform(X_train)
classifier = model
# Fitting our model
classifier.fit(X_train, y_train, batch_size=20, epochs=100)
# Predicting the Test set results
def predict_emotion():
test_features = pd.read_csv('Dataset/Audio_features.csv')
test_features = test_features.loc[:, 'tempo':]
test_features = test_features.values
test_features = sc.transform(test_features)
emotion_probabilities = model.predict(test_features)
predicted_emotions = []
for i in range(1, len(emotion_probabilities)):
for j in range(len(emotion_probabilities[0])):
if emotion_probabilities[i][j] > 0.5:
predicted_emotions.append(j)
return predicted_emotions
| [
"[email protected]"
]
| |
d7fbb5416b8806ae276c11411ccc2255ccd912fb | 5af948ca56c80bb5a2fa2eb96cde0b5f21d7cc1c | /py/tweet_clore_rec.py | d71e3f8b54c31c012c108448031441e9df3b0c0f | []
| no_license | shin-kanouchi/mybot | 5c80bbffa7f9617e60d1f3300ee77a8061cc58cb | 915b10ad3df4854b154957f4c73f258708f73380 | refs/heads/master | 2021-01-24T11:22:52.195347 | 2017-01-10T13:06:57 | 2017-01-10T13:06:57 | 70,224,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,781 | py | #!/usr/bin/env python
# coding: utf-8
import sys
import twitter
import twkeys
import time
maxcount=1000
maxid =0
terms=sys.argv[1].strip().split(",") #["八意永琳","永琳","えーりん"]
search_str=" OR ".join(terms)
CONSUMER_KEY = twkeys.twkey['cons_key']
CONSUMER_SECRET = twkeys.twkey['cons_sec']
ACCESS_TOKEN_KEY = twkeys.twkey['accto_key']
ACCESS_TOKEN_SECRET = twkeys.twkey['accto_sec']
api = twitter.Api(consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
access_token_key=ACCESS_TOKEN_KEY,
access_token_secret=ACCESS_TOKEN_SECRET)
#rate = api.getRateLimitStatus().getLimit()
#print ("Limit %d / %d" % (rate['resources']['search']['/search/tweets']['remaining'],rate['resources']['search']['/search/tweets']['limit']))
#tm = time.localtime(rate['resources']['search']['/search/tweets']['reset'])
#print ("Reset Time %d:%d" % (tm.tm_hour , tm.tm_min))
#print ("-----------------------------------------\n")
found = api.GetSearch(term=search_str, count=100, result_type='recent')
i = 0
while True:
for f in found:
if maxid > f.id or maxid == 0: maxid = f.id
if len(f.text) < 80:
print (" ||| ".join(f.text.split("\n")))
#print (f.text)
i = i + 1
if len(found) == 0: break
if maxcount <= i: break
print (maxid)
found = api.GetSearch(term=search_str, count=100, result_type='recent', max_id=maxid-1)
#print ("-----------------------------------------\n")
#rate = api.GetRateLimitStatus()
#print ("Limit %d / %d" % (rate['resources']['search']['/search/tweets']['remaining'],rate['resources']['search']['/search/tweets']['limit']))
#tm = time.localtime(rate['resources']['search']['/search/tweets']['reset'])
#print ("Reset Time %d:%d" % (tm.tm_hour , tm.tm_min))
| [
"[email protected]"
]
| |
a470b06fc9b78794c067ddce000504747b54e9ee | e9669487133b6ff7771064d4ce7d7a67a6eb8898 | /ecopro-10.0_ZKHNZZ-20170724-02/mysite/api/tests.py | 4ef88d471419c88428d277379e3eaa057f5a7d4a | []
| no_license | sq2012/my-job | 4331bf0443ad58fc1f5eab05aa67628082495ae7 | 9d660026e6aac2e3b3c0ce22d5065868855249ec | refs/heads/master | 2023-01-02T05:32:43.392141 | 2020-11-02T10:39:53 | 2020-11-02T10:39:53 | 108,929,231 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | #coding=utf-8
"""
本文件为其他系统提供信息
"""
import os,platform
from mysite.utils import *
from mysite.core.mimi import getLicenseInfo
def index(request):
#print request.META
response=head_response()
lic=getLicenseInfo()
q=request.GET.get('q')
if q=='system_info':
d={'processor':platform.processor(),
'sysname':platform.platform(),
'registered':lic['registerTime'],
'license':lic['closeDay'],
'productName':lic['version'],
'clientNo':lic['custom']
}
result='\n'.join('%s=%s'%(k,v) for k,v in d.items())
#result=u"processor=%s\r\n%s\r\n"%(platform.processor(),platform.platform())
#result=dumps2(result)
result=result.encode("gb18030")
response.write(result)
return response
| [
"[email protected]"
]
| |
7ccbc4e48c8a6c4e12216c53585dbeed1a8e79b3 | 41ec83c821f1dbfc90020dad0da0a5c68c76a712 | /P16/main.py | 56783c8c3afabff1de73c66703cfbad7bc5daa36 | []
| no_license | t-sakuma2018/python-99 | 41c4901bc106a071103d4644759b989ab4bafe3a | 207b7fd1d94c14fbb90c5194b462ec4cdd610195 | refs/heads/master | 2020-03-30T18:37:28.276215 | 2018-10-22T08:42:17 | 2018-10-22T08:42:17 | 151,508,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | def drop(data, cnt):
# data = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 2
# result = ['a', 'c', 'e', 'g', 'i']
result = []
for data_cnt in range(len(data)):
if (data_cnt + 1) % cnt != 0:
result.append(data[data_cnt])
return result
| [
"[email protected]"
]
| |
7687e8fad295a78d7b2e1c5cfea19739a38862ed | 960abea1f15e82cca91de8736876d366a659d7da | /src/restful_app.py | a9d2817cce2d02f96aa0ec8e1be0e92cb1f5178a | []
| no_license | divyeshnair/video-library | 659b3ba0867d78cf53566aabeb7375e7add460b9 | f65303245910e6e71c880e8e9d4955a03edc7ee5 | refs/heads/master | 2023-09-03T06:44:25.093413 | 2021-11-13T04:13:35 | 2021-11-13T04:13:35 | 427,561,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | from flask_cors import CORS
from flask_restful import Api
from src.routes.v1 import urls
def restful_api(app):
CORS(app, resources={r"/*": {"origins": "*"}})
api = Api(app, prefix="/")
for url in urls:
url.resource.method_decorators = (url.resource.decorators
or []) + \
url.resource.base_decorators \
if hasattr(url.resource, "base_decorators") else []
api.add_resource(
url.resource,
*url.endpoint,
endpoint=url.name,
strict_slashes=False
) | [
"[email protected]"
]
| |
eda7bd6a0bd018f600a0c68863f943f94a273eaa | bfab1736dcbf271a50ab761b459d8d1fa3c21cee | /models/ops/functions/ms_deform_attn_func.py | e15205b629c170d95e549072e3247e5fef82be77 | [
"MIT"
]
| permissive | Abrahamon/TransTrack | fdd7db99ac77c24d650e5dfd8f9d4e221d7b1e15 | 30b6f08f3b6dc6ffab9929189089979390a185d6 | refs/heads/main | 2023-09-02T01:53:44.706620 | 2021-10-30T03:47:42 | 2021-10-30T03:47:42 | 423,220,818 | 0 | 0 | MIT | 2021-10-31T17:52:46 | 2021-10-31T17:52:45 | null | UTF-8 | Python | false | false | 2,791 | py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
import MultiScaleDeformableAttention as MSDA
class MSDeformAttnFunction(Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, value, value_spatial_shapes, sampling_locations, attention_weights, im2col_step):
ctx.im2col_step = im2col_step
output = MSDA.ms_deform_attn_forward(
value, value_spatial_shapes, sampling_locations, attention_weights, ctx.im2col_step)
ctx.save_for_backward(value, value_spatial_shapes, sampling_locations, attention_weights)
return output
@staticmethod
@torch.cuda.amp.custom_bwd
@once_differentiable
def backward(ctx, grad_output):
value, value_spatial_shapes, sampling_locations, attention_weights = ctx.saved_tensors
grad_value, grad_sampling_loc, grad_attn_weight = \
MSDA.ms_deform_attn_backward(
value, value_spatial_shapes, sampling_locations, attention_weights, grad_output, ctx.im2col_step)
return grad_value, None, grad_sampling_loc, grad_attn_weight, None
def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights):
# for debug and test only,
# need to use cuda version instead
N_, S_, M_, D_ = value.shape
_, Lq_, M_, L_, P_, _ = sampling_locations.shape
value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for lid_, (H_, W_) in enumerate(value_spatial_shapes):
# N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_)
# N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)
# N_*M_, D_, Lq_, P_
sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_,
mode='bilinear', padding_mode='zeros', align_corners=False)
sampling_value_list.append(sampling_value_l_)
# (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)
attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_)
output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_)
return output.transpose(1, 2).contiguous()
| [
"[email protected]"
]
| |
2a3d59808ff43327a4acc4464720e3595d3e6003 | e02ce1b168047f7bf1e639342179b7b8e9209e3a | /todo/migrations/0014_remove_userprofile_title.py | b58dd855f6d1abbd4869f0185e1b8b85318ad585 | []
| no_license | Manish12356789/TodoApp | 245133f283dbe93c288cd05c18d89d22eff1ad3d | a763643d7967baa1ea77c1fb9b5115cf4df34bc0 | refs/heads/main | 2023-06-02T21:53:50.394491 | 2021-06-18T03:40:40 | 2021-06-18T03:40:40 | 358,489,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | # Generated by Django 3.1.7 on 2021-04-08 05:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('todo', '0013_userprofile_title'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='title',
),
]
| [
"[email protected]"
]
| |
8e5ad8e713ee715f2e8a9c9186d1301779615956 | 2307605c5c23581069b0ac919924adc16dba3bd2 | /stack.py | 2f9d492f81643f5bd692a0b4173a3c9bd1c41473 | []
| no_license | kshitijyadav1/Python_Programming_Stuff | c2bf0c81e09065452c8a7af658303c0abe751563 | 15b476a498e1a1cb3fb4dfa9c651d053a0f2a90f | refs/heads/master | 2020-08-04T08:36:35.409140 | 2019-10-21T03:28:02 | 2019-10-21T03:28:02 | 212,075,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | #! python3
# Stack overview program. with built-in method
import sys
lst = []
def push(number):
if len(lst) <= 10:
lst.append(number)
else:
print("Stack overload")
def pops():
remove_element = 0
if len(lst) > 0:
remove_element = lst[len(lst) - 1]
lst.pop()
print("The list element value is ", remove_element, " has been removed.")
else:
print("The list is empty.")
def is_empty():
if len(lst) == 0:
return True
else:
return False
def length():
return len(lst)
def top():
return lst[0]
def show():
print(lst)
def title():
print("Stack operation")
def body():
print("0 => quit")
print("1 => push")
print("2 => pop")
print("3 => len")
print("4 => is_empty")
print("5 => top")
print("6 => show")
if __name__ == '__main__':
opt = 6
title()
while opt != 0:
try:
body()
opt = int(input("Enter option: "))
print("Ok, your request is processing.")
if opt == 0:
print("Bye bye, thanks for trying.")
sys.exit()
elif opt == 1:
print("Push value in stack")
try:
value = int(input("Enter value in stack "))
push(value)
except:
print("There was just an issue, when push value in stack.")
elif opt == 2:
print("Pop value in stack")
pops()
elif opt == 3:
print("The length of stack is ", length())
elif opt == 4:
if is_empty():
print("Yes, the stack is emtpy.")
else:
print("No, the stack is not empty.")
elif opt == 5:
print("The top stack reference is", top())
elif opt == 6:
show()
else:
print("Incorrect input.")
except:
pass
| [
"[email protected]"
]
| |
ad7379dd14cdc9148ef5d03f5ce9ebaaaac15f8d | 2ad6ba93c6b8b24bd888df8eb6062850d89f3742 | /hal/plugins/image.py | 520af9f29341b99547234678877d28fa433e8056 | []
| no_license | human-analysis/pytorchnet | c1a2200676faf366bd1dc689eb1c09fb1e914c6f | 026bd369cad0ae114d80b22d489ae55eeb48f312 | refs/heads/master | 2021-12-04T06:27:39.690510 | 2021-11-25T04:25:34 | 2021-11-25T04:25:34 | 88,304,856 | 37 | 17 | null | 2021-09-08T03:00:04 | 2017-04-14T21:31:59 | Python | UTF-8 | Python | false | false | 820 | py | # image.py
import os
import torchvision.utils as vutils
class Image:
def __init__(self, path, ext='png'):
if os.path.isdir(path) is False:
os.makedirs(path)
self.path = path
self.names = []
self.ext = ext
self.iteration = 1
self.num = 0
def register(self, modules):
# here modules is assumed to be a list
self.num = self.num + len(modules)
for tmp in modules:
self.names.append(tmp)
def update(self, modules):
# here modules is assumed to be a list
for i in range(self.num):
name = os.path.join(self.path, '%s_%03d.png' %
(self.names[i], self.iteration))
vutils.save_image(modules[i], name)
self.iteration = self.iteration + 1
| [
"[email protected]"
]
| |
4148ba0011b8da0c23ac14048f68d96a7d5a144f | ed7f2c5c235d1a3beca2ad78f8ef6eecd9afeea6 | /src/motors.py | d32ecdccd49447815025fb0116e63b984bb1da0e | []
| no_license | mvwicky/roboSim | 8f48bdfa291cfe6abc1c6a7294c7ab59161e3304 | c4d5d5f641ed976c71a591085019fcedc2ec3a5a | refs/heads/master | 2016-09-10T10:40:11.235120 | 2014-08-19T18:51:06 | 2014-08-19T18:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,741 | py | import os
import sys
import random
import math
import utilFunctions as utlF
class motor(object):
"""Generic motor object"""
def __init__(self,port,wheelRad=0,ticks=1000,tolerance=0,sprite=None):
"""port:which port the motor is in
wheelRad:the radius of the attached wheel
if not a drive motor: wheelRad=0
ticks:number of ticks per revolution
tolerance:
sprite:path to the sprite
"""
self.port=port
self.positon=0
self.wheelRad=wheelRad
self.ticks=ticks
self.lastSpeed=0
self.currentSpeed=0
#self.distPerTick
self.context=None
if sprite==None:
pass
elif sprite!=None and type(sprite)!=utlF.sprite:
print("Invalid sprite")
elif sprite!=None and type(sprite)==utlF.sprite:
self.sprite=sprite
self.tolerance=tolerance
def update(self):
if self.context==None:
print("Context not defined")
return -1
else:
pass
def draw(self):
pass
def moveAtVelocity(self,velocity):
self.currentSpeed=velocity
return 0
def moveRelativePosition(self,velocity,delta):
pass
def moveToPosition(self,velocity,position):
pass
def moveAngleDeg(self,velocity,theta):
pass
def moveAngleRad(self,velocity,theta):
pass
def getPosition(self):
pass
def forward(self):
pass
def off(self):
pass
def zeroMotor(self):
"""Sets the motor position back to zero"""
pass
def mav(self,velocity):
return self.moveAtVelocity(velocity)
def mrp(self,velocity,position):
return self.moveRelativePosition(velocity,position)
def mtp(self,velocity,position):
return self.moveToPosition(velocity,position)
def mad(self,velocity,theta):
return self.moveAngleDeg(velocity,theta)
def mar(self,velocity,theta):
return self.moveAngleRad(velocity,theta) | [
"[email protected]"
]
| |
8bda58c80727cd6213e5d9dbe03577076096112a | 479436b8581c6b27d9b91c5788d54b083d4d6bce | /111111111.py | 93965472241ced9f5119c2977bdbfbe37582a6b2 | []
| no_license | Khawoat6/Programming-Fundamental1-Python | 4e2821492e8f12c8a64df6ef47c2e8af7cdf6f8e | f40b79f81e58500fc8e7a68f5a97cda796d95d5e | refs/heads/master | 2021-08-31T15:07:58.058202 | 2017-12-21T20:47:01 | 2017-12-21T20:47:01 | 115,045,605 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,003 | py | '''
import pygame as pg
import table as tb
import blocks as bk
import colors as col
GAME_SPEED = 500
SPEED_INC_TICK = 50
LINES_INC_TICK = 10
LEVEL = 1
REMOVED_LINES = 0
MAX_LEVEL = 10
FPS = 100
def delay(ticks):
return (ticks % GAME_SPEED) >= GAME_SPEED-10
def incSpeed(remlines):
global GAME_SPEED
global LEVEL
if LEVEL < MAX_LEVEL:
if remlines / (LEVEL*LINES_INC_TICK) == 1:
LEVEL += 1
GAME_SPEED -= SPEED_INC_TICK
return True
return False
def updateInfo(nb):
global LEVEL_NUM_TEXT
global LINES_NUM_TEXT
global infosurface
LEVEL_NUM_TEXT = font.render(str(LEVEL),True,col.WHITE)
LINES_NUM_TEXT = font.render(str(REMOVED_LINES),True,col.WHITE)
infosurface.fill(col.GREY_DARK)
infosurface.blit(LEVEL_TEXT,LEVEL_TEXT_OFFSET)
infosurface.blit(LEVEL_NUM_TEXT,LEVEL_NUM_TEXT_OFFSET)
infosurface.blit(LINES_TEXT,LINES_TEXT_OFFSET)
infosurface.blit(LINES_NUM_TEXT,LINES_NUM_TEXT_OFFSET)
nb.show(infosurface,NEXT_BLOCK_OFFSET,20,INF_BLOCK_SIZE)
pg.init()
pg.mixer.init()
sndblockplaced = pg.mixer.Sound("sounds/block_placed.wav")
sndblockrotate = pg.mixer.Sound("sounds/block_rotate.wav")
sndremovelines = pg.mixer.Sound("sounds/remove_lines.wav")
sndlevelup = pg.mixer.Sound("sounds/level_up.wav")
sndgameover = pg.mixer.Sound("sounds/game_over.wav")
clock = pg.time.Clock()
pg.display.set_caption("yat - yet another tetris")
pg.key.set_repeat(10,50)
INFO_SURFACE_HEIGHT = 105
FONT_SIZE = 30
FONT_SIZE_GAME_OVER = 60
UPPER_OFFSET = 20
LEFT_OFFSET = 10
INF_BLOCK_SIZE = 20
font = pg.font.SysFont(pg.font.get_default_font(),FONT_SIZE)
font_game_over = pg.font.SysFont(pg.font.get_default_font(),
FONT_SIZE_GAME_OVER)
LEVEL_TEXT = font.render("Level : ",True,col.WHITE)
LINES_TEXT = font.render("Lines : ",True,col.WHITE)
LEVEL_TEXT_OFFSET = (LEFT_OFFSET,UPPER_OFFSET)
LEVEL_NUM_TEXT_OFFSET = (70+LEFT_OFFSET,UPPER_OFFSET)
LINES_TEXT_OFFSET = (LEFT_OFFSET,INFO_SURFACE_HEIGHT-40)
LINES_NUM_TEXT_OFFSET = (70+LEFT_OFFSET,INFO_SURFACE_HEIGHT-40)
NEXT_BLOCK_OFFSET = tb.BLOCK_SIZE*tb.WIDTH - INF_BLOCK_SIZE * 5
GAME_OVER_TEXT = font_game_over.render("GAME OVER",True,col.WHITE)
GAME_OVER_TEXT_OFFSET = ((tb.BLOCK_SIZE*tb.WIDTH/2)-120,(tb.BLOCK_SIZE*tb.
HEIGHT/2)-50)
screen = pg.display.set_mode((tb.BLOCK_SIZE*tb.WIDTH,tb.
BLOCK_SIZE*tb.HEIGHT+INFO_SURFACE_HEIGHT))
tablesurface = screen.subsurface((0,INFO_SURFACE_HEIGHT,tb.BLOCK_SIZE*tb.
WIDTH,tb.BLOCK_SIZE*tb.HEIGHT))
infosurface = screen.subsurface((0,0,tb.BLOCK_SIZE*tb.WIDTH,
INFO_SURFACE_HEIGHT))
BLOCK_SPAWN_POS = (0,(tb.WIDTH/2)-1)
t = tb.table(tablesurface)
b = bk.block(BLOCK_SPAWN_POS)
nextb = bk.block(BLOCK_SPAWN_POS)
updateInfo(nextb)
running = True
while running:
clock.tick_busy_loop(FPS)
t.adBlock(b.getPosList(),b.getType())
t.show()
if delay(pg.time.get_ticks()):
if b.canMovDown(t.getHeight(),t.getOcupPosList(b.getPosList())):
t.adBlock(b.getPosList(),bk.E)
b.movDown()
else:
t.adBlock(b.getPosList(),b.getType())
sndblockplaced.play()
retval = t.delFullLines()
if retval != 0:
sndremovelines.play()
REMOVED_LINES += retval
if incSpeed(REMOVED_LINES):
sndlevelup.play()
b.__init__(BLOCK_SPAWN_POS,nextb.getType())
nextb = bk.block(BLOCK_SPAWN_POS)
updateInfo(nextb)
if t.gameOver(b.getPosList()):
t.adBlock(b.getPosList(),b.getType())
t.show()
running = False
'''
| [
"[email protected]"
]
| |
8227e62538c36e990bf680b8e9b673e8d73582c3 | 167a2385f40df58d47150ce75450644c3a08595f | /average.py | 6efa93354432137d61c8ec4f3f0f52ae87fe002e | []
| no_license | gogo7654321/python_gamer | f0eefbce7333e2aafd0b5c7dab6dd5c45cbe44f6 | ed9d23fc5c06ee4695073175697048b1780f75ce | refs/heads/master | 2023-06-20T09:36:00.720461 | 2021-07-17T03:06:47 | 2021-07-17T03:06:47 | 384,267,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | print("enter 5 numbers and I will give you the average. Use zeros to enter less than 5")
a = int(input())
b = int(input())
c = int(input())
d = int(input())
e = int(input())
ans = (a + b + c + d + e)/5
print (ans) | [
"[email protected]"
]
| |
88fb1cdd124245e3cc253a2114f9ce2bc39fc21a | f05fb1ea5d03147d50383bb7aa97bc6f347847b0 | /tensorpack/dataflow/dataset/customdataset.py | 1a2ee88f406ea31cfee26d3acf111f89ede67967 | [
"Apache-2.0"
]
| permissive | wuyuebupt/tensorpack | f25b272570fc5fe97c5d81f4f0f503168d910309 | 4caef684b10512a10abbba8d58d25cfdd253a8f6 | refs/heads/master | 2021-06-24T13:17:34.304702 | 2018-08-14T17:36:56 | 2018-08-14T17:36:56 | 95,582,653 | 0 | 0 | null | 2017-06-28T16:28:39 | 2017-06-27T17:14:01 | Python | UTF-8 | Python | false | false | 8,780 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: ilsvrc.py
# Author: Yuxin Wu <[email protected]>
import os
import tarfile
import six
import numpy as np
import tqdm
import xml.etree.ElementTree as ET
from ...utils import logger
# from ...utils.loadcaffe import get_caffe_pb
from ...utils.fs import mkdir_p, download, get_dataset_path
from ...utils.timer import timed_operation
from ..base import RNGDataFlow
__all__ = ['customMeta', 'customData']
class customMeta(object):
"""
Provide methods to access metadata for ILSVRC dataset.
"""
def __init__(self, dir=None):
assert not (dir is None)
self.dir = dir
f = os.path.join(self.dir, 'synsets.txt')
# assert os.path.isfile(f)
# def get_synset_words_1000(self):
# """
# Returns:
# dict: {cls_number: cls_name}
# """
# fname = os.path.join(self.dir, 'synset_words.txt')
# assert os.path.isfile(fname)
# lines = [x.strip() for x in open(fname).readlines()]
# return dict(enumerate(lines))
def get_synset_1000(self):
"""
Returns:
dict: {cls_number: synset_id}
"""
fname = os.path.join(self.dir, 'synsets.txt')
assert os.path.isfile(fname)
lines = [x.strip() for x in open(fname).readlines()]
return dict(enumerate(lines))
# def _download_caffe_meta(self):
# fpath = download(CAFFE_ILSVRC12_URL, self.dir)
# tarfile.open(fpath, 'r:gz').extractall(self.dir)
def get_image_list(self, name, dir_structure='original'):
"""
Args:
name (str): 'train' or 'val' or 'test'
dir_structure (str): same as in :meth:`ILSVRC12.__init__()`.
Returns:
list: list of (image filename, label)
"""
assert name in ['train', 'val', 'test']
assert dir_structure in ['original', 'train']
# add_label_to_fname = (name != 'train' and dir_structure != 'original')
# if add_label_to_fname:
# synset = self.get_synset_1000()
# synset = self.get_synset_1000()
fname = os.path.join(self.dir, name + '.txt')
assert os.path.isfile(fname), fname
with open(fname) as f:
ret = []
for line in f.readlines():
name, cls = line.strip().split()
cls = int(cls)
# if add_label_to_fname:
# name = os.path.join(synset[cls], name)
ret.append((name.strip(), cls))
assert len(ret), fname
return ret
# def get_per_pixel_mean(self, size=None):
# """
# Args:
# size (tuple): image size in (h, w). Defaults to (256, 256).
# Returns:
# np.ndarray: per-pixel mean of shape (h, w, 3 (BGR)) in range [0, 255].
# """
# obj = self.caffepb.BlobProto()
#
# mean_file = os.path.join(self.dir, 'imagenet_mean.binaryproto')
# with open(mean_file, 'rb') as f:
# obj.ParseFromString(f.read())
# arr = np.array(obj.data).reshape((3, 256, 256)).astype('float32')
# arr = np.transpose(arr, [1, 2, 0])
# if size is not None:
# arr = cv2.resize(arr, size[::-1])
# return arr
class customData(RNGDataFlow):
"""
Produces uint8 ILSVRC12 images of shape [h, w, 3(BGR)], and a label between [0, 999],
and optionally a bounding box of [xmin, ymin, xmax, ymax].
"""
def __init__(self, dir, name, meta_dir, shuffle=None,
dir_structure='original', include_bb=False):
"""
Args:
dir (str): A directory containing a subdir named ``name``, where the
original ``ILSVRC12_img_{name}.tar`` gets decompressed.
name (str): 'train' or 'val' or 'test'.
shuffle (bool): shuffle the dataset.
Defaults to True if name=='train'.
dir_structure (str): The directory structure of 'val' and 'test' directory.
'original' means the original decompressed
directory, which only has list of image files (as below).
If set to 'train', it expects the same two-level
directory structure simlar to 'train/'.
include_bb (bool): Include the bounding box. Maybe useful in training.
Examples:
When `dir_structure=='original'`, `dir` should have the following structure:
.. code-block:: none
dir/
train/
n02134418/
n02134418_198.JPEG
...
...
val/
ILSVRC2012_val_00000001.JPEG
...
test/
ILSVRC2012_test_00000001.JPEG
...
With the downloaded ILSVRC12_img_*.tar, you can use the following
command to build the above structure:
.. code-block:: none
mkdir val && tar xvf ILSVRC12_img_val.tar -C val
mkdir test && tar xvf ILSVRC12_img_test.tar -C test
mkdir train && tar xvf ILSVRC12_img_train.tar -C train && cd train
find -type f -name '*.tar' | parallel -P 10 'echo {} && mkdir -p {/.} && tar xf {} -C {/.}'
"""
assert name in ['train', 'test', 'val'], name
assert os.path.isdir(dir), dir
# self.full_dir = os.path.join(dir, name)
self.full_dir = dir
self.name = name
assert os.path.isdir(self.full_dir), self.full_dir
if shuffle is None:
shuffle = name == 'train'
self.shuffle = shuffle
meta = customMeta(meta_dir)
assert dir_structure == 'train'
self.imglist = meta.get_image_list(name, dir_structure)
# self.synset = meta.get_synset_1000()
assert not include_bb
if include_bb:
bbdir = os.path.join(dir, 'bbox') if not \
isinstance(include_bb, six.string_types) else include_bb
assert name == 'train', 'Bounding box only available for training'
self.bblist = ILSVRC12.get_training_bbox(bbdir, self.imglist)
self.include_bb = include_bb
def size(self):
return len(self.imglist)
def get_data(self):
idxs = np.arange(len(self.imglist))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
fname, label = self.imglist[k]
fname = os.path.join(self.full_dir, fname)
im = cv2.imread(fname, cv2.IMREAD_COLOR)
assert im is not None, fname
if im.ndim == 2:
im = np.expand_dims(im, 2).repeat(3, 2)
if self.include_bb:
bb = self.bblist[k]
if bb is None:
bb = [0, 0, im.shape[1] - 1, im.shape[0] - 1]
yield [im, label, bb]
else:
yield [im, label]
@staticmethod
def get_training_bbox(bbox_dir, imglist):
ret = []
def parse_bbox(fname):
root = ET.parse(fname).getroot()
size = root.find('size').getchildren()
size = map(int, [size[0].text, size[1].text])
box = root.find('object').find('bndbox').getchildren()
box = map(lambda x: float(x.text), box)
# box[0] /= size[0]
# box[1] /= size[1]
# box[2] /= size[0]
# box[3] /= size[1]
return np.asarray(box, dtype='float32')
with timed_operation('Loading Bounding Boxes ...'):
cnt = 0
for k in tqdm.trange(len(imglist)):
fname = imglist[k][0]
fname = fname[:-4] + 'xml'
fname = os.path.join(bbox_dir, fname)
try:
ret.append(parse_bbox(fname))
cnt += 1
except KeyboardInterrupt:
raise
except:
ret.append(None)
logger.info("{}/{} images have bounding box.".format(cnt, len(imglist)))
return ret
try:
import cv2
except ImportError:
from ...utils.develop import create_dummy_class
# ILSVRC12 = create_dummy_class('ILSVRC12', 'cv2') # noqa
customData = create_dummy_class('customData', 'cv2') # noqa
if __name__ == '__main__':
meta = customMeta()
# meta = ILSVRCMeta()
# print(meta.get_synset_words_1000())
# ds = ILSVRC12('/home/wyx/data/fake_ilsvrc/', 'train', include_bb=True,
# shuffle=False)
ds = customData('/home/yinpen/project/facemodel_train/tensorflow_res18/celeb20k_part1/', 'train', include_bb=False,
shuffle=False)
ds.reset_state()
for k in ds.get_data():
from IPython import embed
embed()
break
| [
"[email protected]"
]
| |
1fe95ef238ced027a00676f503504e0919482eb8 | 7a1f43c3180469c50b02b30a29f38969159be575 | /shop_src/urls.py | 53968521277f6036d2cf78e99adc81ad25607555 | []
| no_license | gitacc908/e-commerse.com | 4e93675845649c5d9da3c30a1f8acb79730a334e | 830207cfe7bd550ee2a7a451f589cb50e5afc8e8 | refs/heads/master | 2023-02-02T22:27:14.111288 | 2020-12-22T11:31:07 | 2020-12-22T11:31:07 | 321,278,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | """shop_src URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('shop.urls')),
path('accounts/', include('django.contrib.auth.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"[email protected]"
]
| |
6bc1c39d0bfd966c86046b9b2b34af90fc49a7b8 | f24c8aa0a55709eb660026f2c94c284b314d471e | /app.py | 461a6d4414e7997877b6daf8c7babc3d82ee91af | [
"BSD-3-Clause"
]
| permissive | ocanava/number_guessing_game | 72ee44ecf3169c6c00a05150bc651fd8deb27ba3 | f0ca634301ee0f24fd39b05d6196ac7b490fb00a | refs/heads/master | 2022-12-13T11:54:33.841804 | 2020-08-31T15:43:41 | 2020-08-31T15:43:41 | 278,231,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,401 | py | """
Python Web Development Techdegree
Project 1 - Number Guessing Game
--------------------------------
import random
number = random.randint(1, 10)
def start_game():
print("Welcome to the Number Guessing Game!!")
input("Press ENTER to continue...")
Tries = 1
while True:
try:
number = int(input("Pick a number between 1 and 10: "))
number = int(number)
guess_value = 3
except ValueError:
print("Oops! Please enter a valid number.")
Tries = Tries + 1
else:
if guess_value > number:
print("It's Higher! ")
Tries = Tries + 1
continue
elif guess_value < number:
print("It's Lower! ")
Tries = Tries + 1
continue
elif guess_value == number:
Tries = str(Tries)
print("Well done! You guessed it in", Tries + " tries. Game has ended! See you next time! ")
break
start_game()
| [
"[email protected]"
]
| |
b6784b3247c97783664af99f76f1ed296789c7e0 | 46b8f310899dd5940b2b5f9099df8c106fbb731d | /test_case/Test_Calc3.py | ec04a9950931efd8a1096b5721cdefd889834ff6 | []
| no_license | ericyishi/HTMLTestRunner_PY3 | 5e70cf571b946a4d1d37a4b01138bb6cb547281a | cb7d62d27c4bcec52b92b3ae145bba4359118d4d | refs/heads/master | 2020-03-18T17:11:59.646279 | 2018-05-27T07:04:11 | 2018-05-27T07:04:11 | 135,012,629 | 3 | 0 | null | 2018-05-27T03:58:15 | 2018-05-27T03:29:25 | Python | UTF-8 | Python | false | false | 1,259 | py | import unittest
from Calc import Calc
class TestCalc(unittest.TestCase):
'''计算器模块3'''
def setUp(self):
print("测试开始")
self.cal = Calc() # 在这里实例化
def test_add(self):
'''计算器加法模块3'''
self.assertEqual(self.cal.add(1, 2), 3, 'test add1 failed')
self.assertNotEquals(self.cal.add(1, 4), 4, 'test add2 failed')
print("test_add pass")
# @unittest.skip("暂时不测")
def test_minus(self):
'''计算器减法模块3'''
self.assertEqual(self.cal.minus(1, 2), -1, 'test minus1 failed')
self.assertEqual(self.cal.minus(3, 2), 1, 'test minus2 failed')
print("test_minus pass")
def test_divide(self):
"""计算器除法法模块3"""
self.assertEqual(self.cal.divide(6, 2), 3, 'test divide1 failed')
self.assertEqual(self.cal.divide(3, 2), 1.5, 'test divide2 failed')
def test_multiple(self):
"""计算器乘法模块3"""
self.assertEqual(self.cal.multiple(6, 2), 13, 'test multiple1 failed')
self.assertEqual(self.cal.multiple(3, 2), 6, 'test multiple2 failed')
def tearDown(self):
print("测试结束")
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
0f8bf04cd31f309613105a87df3df2b3b7c2c20d | 187b8c73572c26c28bc356fbe70591754f5ea92f | /userinfo/migrations/0001_initial.py | 672a7e37c5bfb62554782dbd484f8213491a5ca7 | []
| no_license | xingle0/onlybuy | f43e12e78c5dfbb826cc30bc21df1319eb4a0c0d | 1b97f853752905ab2f70e38a0ae2793cd50b3d3d | refs/heads/master | 2020-05-25T04:21:36.773900 | 2019-05-21T10:40:44 | 2019-05-21T10:40:44 | 187,624,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,214 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-03-29 02:25
from __future__ import unicode_literals
import datetime
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('headp', models.ImageField(blank=True, default='/headphoto/touxiang.png', upload_to='headphoto', verbose_name='头像')),
('nickname', models.CharField(blank=True, max_length=30, null=True, verbose_name='昵称')),
('mobile', models.CharField(max_length=13, verbose_name='手机号')),
('email', models.EmailField(max_length=254, null=True, verbose_name='邮箱')),
('sex', models.CharField(blank=True, choices=[('1', '男'), ('0', '女')], default='1', max_length=10, null=True, verbose_name='性别')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'users',
'abstract': False,
'verbose_name': 'user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('consignee', models.CharField(default='any', max_length=20, verbose_name='收件人')),
('ads', models.TextField(verbose_name='收货地址')),
('mobile', models.CharField(max_length=13, verbose_name='手机号')),
('defaultads', models.BooleanField(default=False, verbose_name='是否为默认地址')),
('zipcode', models.CharField(default='000000', max_length=30, verbose_name='邮编')),
('alias', models.CharField(max_length=50, verbose_name='别名')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='EmailVerifyRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20, verbose_name='验证码')),
('email', models.EmailField(max_length=50, verbose_name='邮箱')),
('send_type', models.CharField(choices=[('register', '注册'), ('forget', '忘记密码')], max_length=20, verbose_name='验证码类型')),
('send_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='发送时间')),
],
options={
'verbose_name_plural': '邮箱验证码',
'verbose_name': '邮箱验证码',
},
),
]
| [
"[email protected]"
]
| |
753c1ed00799a2708a1459d586b88d5d75a234ee | 531d35baa4b294cee0a191cae04ce9920f1a3d54 | /python_stack/django/django_full_stack/wish_project/wishApp/views.py | b7ad509bd076aaa8c60eb3a024c5e2e54d4a0d42 | []
| no_license | brianoc707/Coding-Dojo | 3fa584bb86df203212823990545e66eae4ae6fd2 | fe73b7367f6dc9b7d64acef071e0eeb6db8746f6 | refs/heads/master | 2022-12-22T01:36:29.759106 | 2020-01-28T18:38:54 | 2020-01-28T18:38:54 | 226,181,033 | 1 | 0 | null | 2022-12-11T22:21:22 | 2019-12-05T20:12:16 | Python | UTF-8 | Python | false | false | 3,496 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from .models import *
import bcrypt
# Create your views here.
def index(request):
return render(request, 'index.html')
def register(request):
print(request.POST)
resultFromValidator = User.objects.validateUser(request.POST)
if len(resultFromValidator) > 0:
for key, value in resultFromValidator.items():
messages.error(request, value)
return redirect('/')
else:
#if the request.POST info is valid then create a new user w the info from the form
#encrypt the pw
pwFromForm = request.POST['pw']
hash1 = bcrypt.hashpw(pwFromForm.encode(), bcrypt.gensalt())
newUser = User.objects.create(first_name= request.POST['fname'], last_name = request.POST['lname'], email = request.POST['email'], password = hash1.decode())
#save the info of the new user in session
request.session['loggedinID'] = newUser.id
return redirect('/success')
def login(request):
print(request.POST)
resultFromValidator = User.objects.loginValidator(request.POST)
if len(resultFromValidator) > 0:
for key, value in resultFromValidator.items():
messages.error(request, value)
return redirect('/')
else:
user = User.objects.get(email = request.POST['email'])
request.session['loggedinID'] = user.id
return redirect('/success')
def success(request):
if 'loggedinID' not in request.session:
return redirect('/')
loggedinUser = User.objects.get(id = request.session['loggedinID'])
wishesNotGranted = Wish.objects.filter(user = loggedinUser, isgranted = False)
allgrantedwishes = Wish.objects.filter(isgranted = True)
print(wishesNotGranted)
context = {
'loggedinUser' : loggedinUser,
'myWishesNotGranted' : wishesNotGranted,
'allGrantedWishes' : allgrantedwishes,
}
return render(request, 'wishes.html', context)
def newWish(request):
return render(request, 'newwish.html')
def createWish(request):
loggedinUser = User.objects.get(id = request.session['loggedinID'])
print(request.POST)
errors = Wish.objects.wishValidator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/wishes/new')
else:
wish = Wish.objects.create(item = request.POST['item'], user = loggedinUser, likes = 0, desc = request.POST['desc'])
print(wish)
return redirect('/success')
def grant(request, wishid):
wish = Wish.objects.get(id = wishid)
wish.isgranted = True
wish.save()
return redirect('/success')
def like(request, wishid):
wish = Wish.objects.get(id = wishid)
wish.likes += 1
wish.save()
return redirect('/success')
def edit(request, wishid):
context = {
'wish' : Wish.objects.get(id = wishid)
}
return render(request, 'edit.html', context)
def update(request, wishid):
wish = Wish.objects.get(id = wishid)
wish.item = request.POST['item']
wish.desc = request.POST['desc']
wish.save()
def stats(request):
loggedinUser = User.objects.get(id = request.session['loggedinID'])
allgrantedwishescount = len(Wish.objects.filter(isgranted = True))
mygrantedwishescount = len(Wish.objects.filter(user = loggedinUser, isgranted = True))
myungranted = len(Wish.objects.filter(user = loggedinUser, isgranted = False))
context = {
'loggedinUser' : loggedinUser,
'countofgranted': allgrantedwishescount,
'mycount' : mygrantedwishescount,
'myungranted' : myungranted,
}
return render(request, 'stats.html', context)
def logout(request):
request.session.clear()
return redirect('/')
| [
"[email protected]"
]
| |
aa2a8c8f570a1c0f44928db8d59780469b207993 | 4f97122844fb8cbaccf9ed9fa300a27a290d1a37 | /1/111.py | 0a46d7eb3eec2fe044cfcd027f9ffbf0dbd17e63 | []
| no_license | cq146637/Advanced | 52d97ab0f8e7ec85e6d81692e92bad967af066e6 | 18380e5c51124ef1e6d243ae216280b49edc7001 | refs/heads/master | 2020-03-22T03:05:02.960444 | 2018-07-02T08:30:27 | 2018-07-02T08:30:27 | 139,151,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | __author__ = 'Cq'
from collections import deque
import pickle
from random import randint
import os
result = randint(1,100)
print("result is ",result)
deque1 = deque([],5)
if os.path.isfile("save.data"):
deque1 = pickle.load(open("save.data"))
while True:
k = input("\nplease input your guess number: ")
if k.isdigit():
k = int(k)
elif k == 'h' or k == 'H':
print("your input history is ",list(deque1))
else:
continue
if k != result:
if k > result:
print("your number is greater than result\n")
else:
print("your number is less than result\n")
deque1.append(k)
else:
print("It was good result...")
deque1.append(k)
break
if k == 100:
break
f = open("save.data",'w')
pickle.dump(deque1, f) | [
"[email protected]"
]
| |
5ff88ef18493eedc1ff2c03369b53bedee882b04 | 0f297fb93f82b55c83817479af2e00bb737dcc93 | /实习小车启动代码/111/merg.py | f44ff12c93da1cff11d9a96f42d51c2890771ce5 | []
| no_license | yejiasheng/raspberry | 55c3dabf13fcff6dfeaddecbc72e2cf8968daaa3 | 27e1a95197a10583ce205bf40c04bcc8b76b2dc7 | refs/heads/main | 2023-07-25T02:57:38.875487 | 2021-09-07T01:45:36 | 2021-09-07T01:45:36 | 403,806,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,776 | py | from flask import Flask, render_template, Response
import sys
sys.path.append("/home/lzk/samples/common")
sys.path.append("../")
import os
import numpy as np
import acl
import time
import socket
import cv2
import traceback
from PIL import Image, ImageDraw, ImageFont
import atlas_utils.constants as const
from atlas_utils.acl_model import Model
from atlas_utils.acl_resource import AclResource
import atlas_utils.utils as utils
from atlas_utils.acl_dvpp import Dvpp
from atlas_utils.acl_image import AclImage
app = Flask(__name__)
camera = cv2.VideoCapture('rtsp://192.168.10.24/test') # use 0 for web camera
# for cctv camera use rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp' instead of camera
# for local webcam use cv2.VideoCapture(0)
labels =["hand"]
MODEL_PATH = "/home/YJS/model/yolov3_me.om"
MODEL_WIDTH = 416
MODEL_HEIGHT = 416
class_num = 3
stride_list = [32, 16, 8]
anchors_3 = np.array([[12, 16], [19, 36], [40, 28]]) / stride_list[2]
anchors_2 = np.array([[36, 75], [76, 55], [72, 146]]) / stride_list[1]
anchors_1 = np.array([[142, 110], [192, 243], [459, 401]]) / stride_list[0]
anchor_list = [anchors_1, anchors_2, anchors_3]
conf_threshold = 0.8
iou_threshold = 0.3
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255), (255, 0, 255), (255, 255, 0)]
# Initialization
acl_resource = AclResource()
acl_resource.init()
model = Model("/home/YJS/model/yolov3_me.om")
def preprocess(image):#cv
image = Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))
img_h = image.size[1] #360
img_w = image.size[0] #640
net_h = MODEL_HEIGHT #416
net_w = MODEL_WIDTH #416
scale = min(float(net_w) / float(img_w), float(net_h) / float(img_h)) #416/640
new_w = int(img_w * scale) #416
new_h = int(img_h * scale) #234
#delta = (MODEL_HEIGHT - int(image.size[1] * scale)) // 2
shift_x = (net_w - new_w) // 2 #0
shift_y = (net_h - new_h) // 2 #91
shift_x_ratio = (net_w - new_w) / 2.0 / net_w #0
shift_y_ratio = (net_h - new_h) / 2.0 / net_h #0.21875
image_ = image.resize((new_w, new_h))
new_image = np.zeros((net_h, net_w, 3), np.uint8)
new_image[shift_y: new_h + shift_y, shift_x: new_w + shift_x, :] = np.array(image_)
new_image = new_image.astype(np.float32)
new_image = new_image / 255
#print('new_image.shape', new_image.shape)
new_image = new_image.transpose(2, 0, 1).copy()
return new_image, image
def overlap(x1, x2, x3, x4):
left = max(x1, x3)
right = min(x2, x4)
return right - left
def cal_iou(box, truth):
w = overlap(box[0], box[2], truth[0], truth[2])
h = overlap(box[1], box[3], truth[1], truth[3])
if w <= 0 or h <= 0:
return 0
inter_area = w * h
union_area = (box[2] - box[0]) * (box[3] - box[1]) + (truth[2] - truth[0]) * (truth[3] - truth[1]) - inter_area
return inter_area * 1.0 / union_area
def apply_nms(all_boxes, thres):
res = []
for cls in range(class_num):
cls_bboxes = all_boxes[cls]
sorted_boxes = sorted(cls_bboxes, key=lambda d: d[5])[::-1]
p = dict()
for i in range(len(sorted_boxes)):
if i in p:
continue
truth = sorted_boxes[i]
for j in range(i + 1, len(sorted_boxes)):
if j in p:
continue
box = sorted_boxes[j]
iou = cal_iou(box, truth)
if iou >= thres:
p[j] = 1
for i in range(len(sorted_boxes)):
if i not in p:
res.append(sorted_boxes[i])
return res
def _sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def decode_bbox(conv_output, anchors, img_w, img_h, x_scale, y_scale, shift_x_ratio, shift_y_ratio):
print('conv_output.shape', conv_output.shape)
_, _, h, w = conv_output.shape
conv_output = conv_output.transpose(0, 2, 3, 1)
pred = conv_output.reshape((h * w, 3, 5 + class_num))
pred[..., 4:] = _sigmoid(pred[..., 4:])
pred[..., 0] = (_sigmoid(pred[..., 0]) + np.tile(range(w), (3, h)).transpose((1, 0))) / w
pred[..., 1] = (_sigmoid(pred[..., 1]) + np.tile(np.repeat(range(h), w), (3, 1)).transpose((1, 0))) / h
pred[..., 2] = np.exp(pred[..., 2]) * anchors[:, 0:1].transpose((1, 0)) / w
pred[..., 3] = np.exp(pred[..., 3]) * anchors[:, 1:2].transpose((1, 0)) / h
bbox = np.zeros((h * w, 3, 4))
bbox[..., 0] = np.maximum((pred[..., 0] - pred[..., 2] / 2.0 - shift_x_ratio) * x_scale * img_w, 0) # x_min
bbox[..., 1] = np.maximum((pred[..., 1] - pred[..., 3] / 2.0 - shift_y_ratio) * y_scale * img_h, 0) # y_min
bbox[..., 2] = np.minimum((pred[..., 0] + pred[..., 2] / 2.0 - shift_x_ratio) * x_scale * img_w, img_w) # x_max
bbox[..., 3] = np.minimum((pred[..., 1] + pred[..., 3] / 2.0 - shift_y_ratio) * y_scale * img_h, img_h) # y_max
# print('bbox', bbox)
pred[..., :4] = bbox
pred = pred.reshape((-1, 5 + class_num))
# pred[:, 4] = np.max(pred[:, 5:], axis=-1)
pred[:, 4] = pred[:, 4] * pred[:, 5:].max(1)
pred[:, 5] = np.argmax(pred[:, 5:], axis=-1)
pred = pred[pred[:, 4] >= 0.2]
print('pred[:, 5]', pred[:, 5])
print('pred[:, 5] shape', pred[:, 5].shape)
# pred = pred[pred[:, 4] >= conf_threshold]
all_boxes = [[] for ix in range(class_num)]
for ix in range(pred.shape[0]):
box = [int(pred[ix, iy]) for iy in range(4)]
box.append(int(pred[ix, 5]))
box.append(pred[ix, 4])
all_boxes[box[4] - 1].append(box)
# print('all_boxes', all_boxes)
return all_boxes
def convert_labels(label_list):
if isinstance(label_list, np.ndarray):
label_list = label_list.tolist()
label_names = [labels[int(index)] for index in label_list]
return label_names
def construct_image_info():
"""construct image info"""
image_info = np.array([MODEL_WIDTH, MODEL_HEIGHT,
MODEL_WIDTH, MODEL_HEIGHT],
dtype = np.float32)
return image_info
def post_process(infer_output, origin_img):
"""postprocess"""
print("post process")
box_num = infer_output[1][0, 0]
print(infer_output[1][0, 0])
print("box num ", box_num)
box_info = infer_output[0].flatten()
scalex = origin_img.width / MODEL_WIDTH
delta = (MODEL_HEIGHT - int(origin_img.height * 416/640)) // 2 #91
print(delta)
scaley = origin_img.height / MODEL_HEIGHT
# if scalex > scaley:
# scaley = scalex
draw = ImageDraw.Draw(origin_img)
font = ImageFont.load_default()
for n in range(int(box_num)):
ids = int(box_info[5 * int(box_num) + n])
label = labels[ids]
score = box_info[4 * int(box_num)+n]
top_left_x = box_info[0 * int(box_num)+n] * scalex
top_left_y = (box_info[1 * int(box_num)+n]-delta)/234*360
bottom_right_x = box_info[2 * int(box_num) + n] * scalex
bottom_right_y = (box_info[3 * int(box_num) + n]-delta)/234*360
draw.line([(top_left_x, top_left_y), (bottom_right_x, top_left_y), (bottom_right_x, bottom_right_y), \
(top_left_x, bottom_right_y), (top_left_x, top_left_y)], fill=(0, 200, 100), width=5)
draw.text((top_left_x, top_left_y), label, font=font, fill=255)
num=0
if box_num==1:
xpt=(top_left_x+bottom_right_x)/2#获取绿框的中心点
ypt=(top_left_y+bottom_right_y)/2#获取绿框的中心点
w = origin_img.size[0] # 图片长度
h = origin_img.size[1] # 图片宽度
# print(w)
# print(h)
if 0<=ypt<(1/3)*h and ypt < (h/w)*xpt and ypt < -(h/w)*xpt+h:
# print("前进!")
# print(f"数字信号{num}")
#draw.text((xpt, ypt), "前进", font=font, fill=255)
num=0
elif 0 <= xpt < (1/3)*w and (h/w)*xpt <= ypt <= -(h/w)*xpt+h:
# print("右转!")
# print(f"数字信号{num}")
#draw.text((xpt, ypt), "左转", font=font, fill=255)
num=1
elif ypt > (h/w)*xpt and ypt>-(h/w)*xpt+h and (2/3)*h < ypt <= h:
# print("后退!")
# print(f"数字信号{num}")
#draw.text((xpt, ypt), "后退", font=font, fill=255)
num=2
elif (2/3)*w < xpt <= w and -(h/w)*xpt+h <= ypt <= (h/w)*xpt:
# print("左转!")
# print(f"数字信号{num}")
#draw.text((xpt, ypt), "右转", font=font, fill=255)
num=3
elif (1/3)*w <= xpt <= (2/3)*w and (1/3)*h <= ypt <= (2/3)*h:
# print("停止!")
# print(f"数字信号{num}")
#draw.text((xpt, ypt), "停止", font=font, fill=255)
num=4
else :
print("error")
else:
# print("未成功识别")
# print(f"数字信号{num}")
num=4
return origin_img,num
def frameprocessing(frame):
w=640
h=360
frame == cv2.flip(frame,1)
image_info = construct_image_info()
data, orig = preprocess(frame)
result_list = model.execute([data,image_info])
# ret = acl.rt.synchronize_stream(0)
print(result_list)
image1 = Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
afterframe,num = post_process(result_list,image1)
afterframe = cv2.cvtColor(np.asarray(afterframe),cv2.COLOR_RGB2BGR)
a = int(w/3)#长三分之一处
b = int(2*w/3)#长三分之二处
c = int(h/3) # 宽三分之一处
d = int(2*h/3) # 宽三分之二处
e = int(w/3)+3
f = int(2*w/3)-3
cv2.line(afterframe, (0,0), (a,c), (0, 0, 255), 2)
cv2.line(afterframe, (a,c), (b,c), (0, 0, 255), 2)
cv2.line(afterframe, (b,c), (w,0), (0, 0, 255), 2)
cv2.line(afterframe, (a,c), (a,d), (0, 0, 255), 2)
cv2.line(afterframe, (a,d), (0,h), (0, 0, 255), 2)
cv2.line(afterframe, (a,d), (b,d), (0, 0, 255), 2)
cv2.line(afterframe, (b,d), (w,h), (0, 0, 255), 2)
cv2.line(afterframe, (b,c), (b,d), (0, 0, 255), 2)#以上八行为区域判定
cv2.line(afterframe, (e,0), (f,0), (0, 255, 0), 2)
cv2.line(afterframe, (e,h), (f,h), (0, 255, 0), 2)
cv2.line(afterframe, (e,0), (e,h), (0, 255, 0), 2)
cv2.line(afterframe, (f,0), (f,h), (0, 255, 0), 2)
return afterframe
def gen_frames(): # generate frame by frame from camera
while True:
# Capture frame-by-frame
success, frame = camera.read() # read the camera frame
# frame, num=ff.frameprocessing(frame)
if not success:
break
else:
print('1')
frame = cv2.imread('/home/YJS/111/1.jpg')
print("2")
frame=frameprocessing(frame) ###############
frame = cv2.imread('/home/YJS/111/1.jpg')
ret, buffer = cv2.imencode('.jpg', frame)
# ret, buffer = cv2.imencode('.jpg', fram)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concat frame one by one and show result
@app.route('/video_feed')
def video_feed():
#Video streaming route. Put this in the src attribute of an img tag
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def tttt():
fram = cv2.imread('/home/YJS/111/1.jpg')
frame=frameprocessing(fram) ###############
cv2.imwrite('/home/YJS/111/4.jpg',frame)
if __name__ == '__main__':
# tttt()
app.run(host="0.0.0.0",debug=True)
# fram = cv2.imread('/home/YJS/111/1.jpg')
# frame=frameprocessing(fram) ###############
# cv2.imwrite('/home/YJS/111/3.jpg',frame)
| [
"[email protected]"
]
| |
d29ec6f0c8570c9501336dd74aa36439125e85ec | 2a72e3bc6ef3ede7e5064f96256be27a5cbfa37b | /efforts/migrations/0001_initial.py | 0c063add58476a122db459b9195398d8bc3f2d72 | []
| no_license | muare/django_staffing_demo | d0690a7d89ef4736f245a2badafaf98186fab16d | ff471fff76122aff93d1203ee1fef3ef222d8ae6 | refs/heads/main | 2023-08-18T15:58:26.558492 | 2021-10-05T09:14:55 | 2021-10-05T09:14:55 | 413,739,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,830 | py | # Generated by Django 3.1.7 on 2021-10-02 02:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='客户名称')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
],
options={
'verbose_name': '客户',
'verbose_name_plural': '客户',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='产品名称')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
],
options={
'verbose_name': '产品',
'verbose_name_plural': '产品',
},
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='团队名称')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('parent_team', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='efforts.team', verbose_name='上级团队')),
],
options={
'verbose_name': '团队',
'verbose_name_plural': '团队',
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='项目名称')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='efforts.customer', verbose_name='客户')),
('product', models.ManyToManyField(to='efforts.Product', verbose_name='产品')),
],
options={
'verbose_name': '项目',
'verbose_name_plural': '项目',
},
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('leader', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='efforts.employee', verbose_name='上级')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='efforts.team', verbose_name='团队名称')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='员工')),
],
options={
'verbose_name': '员工',
'verbose_name_plural': '员工',
},
),
migrations.CreateModel(
name='Effort',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('interval', models.DecimalField(decimal_places=1, max_digits=3, verbose_name='工时(小时)')),
('employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='efforts.employee', verbose_name='员工')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='efforts.project', verbose_name='项目')),
],
options={
'verbose_name': '工时',
'verbose_name_plural': '工时',
},
),
]
| [
"[email protected]"
]
| |
8293d30afaa2a09ca9a5a5b28a23d8ec1b213df5 | febe37fbe04f533778c60ecd669ca7010d6cfa26 | /excursions/models.py | 90f4198bc3561e88c771b8c0b1d27b94053fe5d6 | []
| no_license | landing-russia/i_evpatoria | 27c8785357bead0102b5823d316f51842fe263c1 | e4fedebee79c9ab456ce1664f951f66df610cd6c | refs/heads/main | 2023-04-09T00:40:36.576638 | 2021-04-21T06:00:20 | 2021-04-21T06:00:20 | 358,298,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | from django.db import models
class Exscursion(models.Model):
TYPES = (
('group', 'Групповая'),
('individual', 'Индивидуальная'),
)
name = models.CharField(max_length=255, verbose_name='Название экскурсии')
description = models.TextField(blank=True, verbose_name='Описание')
location = models.CharField(max_length=255, verbose_name='Локация', null=True, blank=True)
duration = models.CharField(max_length=255, verbose_name='Длительность', null=True, blank=True)
max_people_count = models.CharField(max_length=255, verbose_name='Кол-во человек', null=True, blank=True)
price = models.CharField(max_length=255, verbose_name='Стоимость', null=True, blank=True)
photo = models.ImageField(upload_to='excursions_photos', verbose_name='Фото', null=True, blank=True)
type = models.CharField(max_length=10, choices=TYPES, verbose_name='Тип', default='group')
is_published = models.BooleanField(default=True, verbose_name='Опубликовано')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='Добавлено')
updated_at = models.DateTimeField(auto_now=True, verbose_name='Обновлено')
def __str__(self):
return self.name
class Meta:
verbose_name = 'Экскурсию'
verbose_name_plural = 'Экскурсии'
ordering = ['-created_at']
| [
"[email protected]"
]
| |
13447713655c5faee3e7511c439ebac65c7b3ee4 | 8fd09b12c42b9efaf7216d087a4ee586e72a10d4 | /wechat/urls.py | bc5ecf0d746df1b6bc6ae0775b734311764cdac2 | []
| no_license | aweirose/test | 324c4c1a0dba47ecc79115ebbefd851114317509 | 1277c0b8752e3f442583af01cecc459645561cce | refs/heads/master | 2023-01-12T21:57:54.927255 | 2018-09-12T07:34:54 | 2018-09-12T07:34:54 | 148,089,935 | 0 | 0 | null | 2022-12-27T15:35:11 | 2018-09-10T02:47:16 | JavaScript | UTF-8 | Python | false | false | 153 | py | from django.conf.urls import url
from wechat import views
urlpatterns = [
url('link/', views.link),
url('rose/', views.WeixinView.as_view()),
]
| [
"[email protected]"
]
| |
2ea42ed75506284aeaca6832127c5ac1f95139ab | c23b4c6253ca5a0d42822dd0d28ffa752c11ebf5 | /exercises/c3ec2a04-cbca-459a-951f-f17cc34310c7/skeletons/8fd3c5ac-35d2-40cd-9d21-77a4a6671d7c/skeleton4.py3 | e2a36f52de4511c924c13798bc533064cd0477c9 | []
| no_license | josepaiva94/e57d8867-6234-41a6-b239-2cd978ad1e70 | 803e2eb1e2db23c64409bc72ff00c4463875a82f | aa270941dd8cf7b2e1ec8ac89445b1ab3a47f89d | refs/heads/master | 2023-01-07T10:49:56.871378 | 2020-11-16T11:28:14 | 2020-11-16T11:28:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py3 | if largest != root_index:
nums[root_index], nums[largest] = nums[largest], nums[root_index]
heapify(nums, heap_size, largest)
| [
"[email protected]"
]
| |
5d0be96f53a6eb3f0613b8b630d91e8bee7aab61 | 0cb6d07174a03218a67c968cca6bc6ccdded99ea | /CNN.py | 3d25d19f0a51dff4862156178b3cf653880c905c | []
| no_license | RabbitSea/ASVchallenge2019 | 6c4a87a16f1ab4106dbe15224d88b204fd2b8ca9 | 16d0bded6b10afe9d2c088dc1c52c67a63ce1898 | refs/heads/master | 2021-03-23T08:06:30.996110 | 2019-07-15T13:14:53 | 2019-07-15T13:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,574 | py | from __future__ import print_function, division
import warnings
warnings.filterwarnings('ignore')
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
import keras
from keras.utils.training_utils import multi_gpu_model
from keras.callbacks import LearningRateScheduler
import tensorflow as tf
from keras.utils import plot_model
from keras.datasets import mnist
from keras import regularizers
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
from keras.layers import MaxPooling2D, MaxPooling1D, merge, LSTM, concatenate
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D, Conv1D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import losses
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping
import keras.backend as K
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelBinarizer
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys, os, h5py, json
import numpy as np
np.set_printoptions(suppress=True)
from collections import defaultdict
####################################################################
# To Run:
# python CNN.py feature_group noise_std l2_val
# python CNN.py mfcc+rfcc 0.001 0.005
#####################################################################
NUM_EPOCHS = 10
frames = 10
task = "PA"
abgroup = sys.argv[1]
std = float(sys.argv[2])
l2_val = float(sys.argv[4]) # [0.01, 0.03, 0.05, 0.07, 0.1, 0.15, 0.2, 0.25, 0.3]
coefs_ref = {"mfcc":70, "imfcc":60, "rfcc":30, "scmc":40, "mfccD":70, "imfccD":60, "rfccD":30, "scmcD":40, "mfccDD":70, "imfccDD":60, "rfccDD":30, "scmcDD":40, "cqcc":30, "cqccD":30, "cqccDD":30, "lfcc":70, "lfccD":70, "lfccDD":70, "xA":10, "xEA":10, "xE":10, "xEAs":10, "xEs":10,"xAs":10, "xS":10, "xSs":10}
pos = 1
neg = -1
num_feats = len(abgroup.split("+"))
F = abgroup.split("+")
C = []
for f in F:
c = coefs_ref[f]
C.append(c)
activation = "tanh"
loss = "mse"
# load up the x-vector attack embeddings
def load_xvec(feat, data):
infile = "xvecs/"+task+"_"+data+"_"+feat+".npy"
fdict = np.load(infile)[()]
X = np.array(list(fdict.values()))
print(X.shape)
y_data = np.array(list(fdict.keys()))
print(y_data.shape)
new_dict = defaultdict(list)
for i in range(0, y_data.shape[0]):
fname = y_data[i].split("_")[-1]
new_dict[fname] = X[i]
return new_dict
# load up the data
def load_h5data(feat, frame_bin, data):
infile = task+"_"+data+"_resample/"+task+"_"+data+"_"+feat+".h5"
coefs = coefs_ref[feat]
f = h5py.File(infile,'r')
data = f.get("Features")[:,:]
y = json.loads(f.get("Targets")[()])
X1 = data[:,:coefs*frames]
data_dict = defaultdict(list)
for i in range(0, len(y)):
try:
item = y[i]
dat = X1[i]
new = np.array(item.split(","))
fname = new[0].split(".wav")[0][8:]
data_dict[fname] = dat
p = fname
pd = dat
except:
data_dict[p] = pd
return data_dict
# load up the data
def load_h5data_eval(feat, frame_bin, data):
infile = task+"_"+data+"_resample/"+task+"_"+data+"_"+feat+".h5"
coefs = coefs_ref[feat]
f = h5py.File(infile,'r')
data = f.get("Features")[:,:]
y = json.loads(f.get("Targets")[()])
X1 = data[:,:coefs*frames]
data_dict = defaultdict(list)
for i in range(0, len(y)):
try:
item = y[i]
dat = X1[i]
new = np.array(item.split(","))
fname = new[0].split(".h5")[0].split("/")[-1]
data_dict[fname] = dat
p = fname
pd = dat
except:
data_dict[p] = pd
return data_dict
# rescale training data values
def scale_neg_pos(data, domain):
minval = domain[0]
ptpval = domain[1]
X_data = 2*(data - minval)/ptpval-1
return X_data
def scale_zero_one(data, domain):
out_range = (0,1)
X_data = (data - (domain[1] + domain[0]) / 2) / (domain[1] - domain[0])
X_data = X_data * (out_range[1] - out_range[0]) + (out_range[1] + out_range[0]) / 2
return X_data
def load_csv(infile):
input = open(infile, "r")
data = input.read().split("\n")[:-1]
input.close()
truth = defaultdict(int)
attacks = defaultdict(str)
spoofy = defaultdict(str)
T, F, A, S = [], [], [], []
for item in data:
fname, envID, attackID, t = item.split(",")
fname = fname.split(".wav")[0].split("_")[-1]
if t == "spoof":
truth[fname] = -1
T.append(-1)
if t == "bonafide":
truth[fname] = 1
T.append(1)
attacks[fname] = attackID
spoofy[fname] = t
F.append(fname)
A.append(attackID)
S.append(t)
return T, F, A, S
def load_csv_eval(infile):
input = open(infile, "r")
data = input.read().split("\n")[:-1]
input.close()
return data
def dataprep(X1_train):
domain1 = np.min(X1_train), np.ptp(X1_train)
X1_train = scale_neg_pos(X1_train, domain1)
X1_train = np.expand_dims(X1_train, axis=3)
return X1_train, domain1
def dataprep_test(X1_test,domain1):
X1_test = scale_neg_pos(X1_test, domain1)
X1_test = np.expand_dims(X1_test, axis=3)
return X1_test
def generate_cm_file_dev(frames, uttids, attackid, true_class, scores, l2_val, abgroup):
l2_val = str(l2_val).replace(".", "_")
scores_output_file = task+"_presubmission/scorethis_dev_adam."+abgroup+"."+l2_val+".txt"
scores_output_file2 = task+"_submission/scorethis_dev_adam."+abgroup+"."+l2_val+".txt"
output = open(scores_output_file, "w")
output2 = open(scores_output_file2, "w")
for i in range(0,len(uttids)):
fname = task+"_D_"+uttids[i]
attack = attackid[i]
true = true_class[i]
score = scores[i]
outstring = fname+" "+attack+" "+true+" "+str(score)+"\n"
output.write(outstring)
output2.write(outstring)
output.close()
output2.close()
def generate_cm_file_eval(uttids, scores, name):
scores_output_file = task+"_submission/Edinburgh-CSTR_"+task+"_primary_"+name+".txt"
output = open(scores_output_file, "w")
for i in range(0,len(uttids)):
if name == "dev":
fname = task+"_D_"+uttids[i]
if name == "eval":
fname = uttids[i]
score = scores[i]
outstring = fname+" "+str(score)+"\n"
output.write(outstring)
output.close()
#########################################################################################
# define this DNN
class DNN():
def __init__(self, total_cols, task):
self.rows = 1
self.cols = total_cols
self.channels = 1 #set to 3 if doing feat, featD, featDD
self.num_classes = 2 # spoof and bonafide, one hotted
self.shape = (self.cols, 1)
optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
vec = Input(shape=self.shape)
self.classifier = self.build_classifier_CNN()
labels = self.classifier(vec)
self.classifier.compile(loss="mse",optimizer=optimizer,metrics=['mean_absolute_error'])
def build_classifier_CNN(self):
k = 3
m = 2
model1 = Sequential()
model1.add(GaussianNoise(std, input_shape=self.shape))
model1.add(BatchNormalization(input_shape=self.shape))
model1.add(Conv1D(filters=32, kernel_size=k,input_shape=(self.shape),activation='relu',kernel_regularizer=regularizers.l2(l2_val)))
model1.add(MaxPooling1D(m))
model1.add(Conv1D(filters=32, kernel_size=k,input_shape=(self.shape),activation='relu',kernel_regularizer=regularizers.l2(l2_val)))
model1.add(MaxPooling1D(m))
model1.add(Conv1D(filters=32, kernel_size=k,input_shape=(self.shape),activation='relu',kernel_regularizer=regularizers.l2(l2_val)))
model1.add(MaxPooling1D(m))
model1.add(Flatten())
model1.add(Dense(1, activation=activation))
model1.summary()
vec = Input(shape=(self.shape))
labels = model1(vec)
return Model(vec, labels)
def get_results(self, pred, truth, name):
ref = ["spoof", "bonafide"]
pred[pred>0] = 1
pred[pred<=0] = 0
truth[truth>0] = 1
truth[truth<=0] = 0
print(truth[0])
print(pred[0])
score = accuracy_score(truth, pred)
# save the output
outstring = "*********** "+name+" ***********\n"
outstring += name+" - acc: "+str(100*score)+"\n"
outstring += str(classification_report(truth, pred, target_names=ref))+"\n"
outstring += str(confusion_matrix(truth, pred))+"\n"
return outstring
def plot_history(self, H, abgroup, l2_val):
# grab the history object dictionary
H = H.history
# plot the training loss and accuracy
N = np.arange(0, len(H["loss"]))
plt.style.use("ggplot")
plt.figure()
plt.plot(N, H["loss"], label="train_loss")
plt.plot(N, H["val_loss"], label="val_loss")
plt.plot(N, H["mean_absolute_error"], label="train_mae")
plt.plot(N, H["val_mean_absolute_error"], label="val_mae")
plt.title(task+" CNN Training")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Error")
plt.legend()
# save the figure
l2_val = str(l2_val).replace(".", "_")
plt.savefig(task+"_presubmission/plot_adam."+abgroup+"."+l2_val+".png")
plt.savefig(task+"_submission/plot_adam."+abgroup+"."+l2_val+".png")
plt.close()
#########################################################################################
if __name__ == '__main__':
D = []
Xtr, Xdev, Xeval = [], [], []
Ttrain, Ftrain, Atrain, Strain = load_csv(task+"_reference_train.csv")
Tdev, Fdev, Adev, Sdev = load_csv(task+"_reference_dev.csv")
Feval = load_csv_eval(task+"_reference_eval.csv")
total_cols = 0
for i in range(0, num_feats):
feat = F[i]
if feat[0] == "x":
X1_train_recon, X1_dev_recon, X1_eval_recon = [], [], []
X1_train_dict = load_xvec(feat, "train")
X1_dev_dict = load_xvec(feat, "dev")
X1_eval_dict = load_xvec(feat, "eval")
cols = 10
total_cols += cols
for i in range(0, len(Ftrain)):
f = Ftrain[i]
item = X1_train_dict[f]
X1_train_recon.append(item)
for i in range(0, len(Fdev)):
f = Fdev[i]
item = X1_dev_dict[f]
X1_dev_recon.append(item)
for i in range(0, len(Feval)):
f = Feval[i].split("_")[-1]
item = X1_eval_dict[f]
X1_eval_recon.append(item)
X1_train_recon = np.array(X1_train_recon)
X1_dev_recon = np.array(X1_dev_recon)
X1_eval_recon = np.array(X1_eval_recon)
X1_train = np.expand_dims(np.array(X1_train_recon), axis=3)
X1_dev = np.expand_dims(np.array(X1_dev_recon), axis=3)
X1_eval = np.expand_dims(np.array(X1_eval_recon), axis=3)
print(X1_train_recon.shape)
print(X1_dev_recon.shape)
print(X1_eval_recon.shape)
Xtr.append(X1_train)
Xdev.append(X1_dev)
Xeval.append(X1_eval)
else:
X1_train_recon, X1_dev_recon, X1_eval_recon = [], [], []
X1_train_dict = load_h5data(feat, frames, "train")
X1_dev_dict = load_h5data(feat, frames, "dev")
X1_eval_dict = load_h5data_eval(feat, frames, "eval")
coefs = coefs_ref[feat]
cols = 10*coefs
total_cols += cols
for i in range(0, len(Ftrain)):
f = Ftrain[i]
item = X1_train_dict[f]
X1_train_recon.append(item)
for i in range(0, len(Fdev)):
f = Fdev[i]
item = X1_dev_dict[f]
X1_dev_recon.append(item)
for i in range(0, len(Feval)):
f = Feval[i]
item = X1_eval_dict[f]
X1_eval_recon.append(item)
X1_train_recon = np.array(X1_train_recon)
X1_dev_recon = np.array(X1_dev_recon)
X1_eval_recon = np.array(X1_eval_recon)
X1_train, domain1 = dataprep(X1_train_recon)
X1_dev = dataprep_test(X1_dev_recon, domain1)
X1_eval = dataprep_test(X1_eval_recon, domain1)
print(X1_train_recon.shape)
print(X1_dev_recon.shape)
print(X1_eval_recon.shape)
Xtr.append(X1_train)
Xdev.append(X1_dev)
Xeval.append(X1_eval)
X_train = np.concatenate(Xtr, axis=1)
X_dev = np.concatenate(Xdev, axis=1)
X_eval = np.concatenate(Xeval, axis=1)
# initialize the DNN object
dnn = DNN(total_cols, task)
val_method = "val_loss"
val_mode = "min"
batch_size = 32
early_stopping = EarlyStopping(monitor=val_method,
min_delta=0,
patience=5,
mode=val_mode)
callbacks_list = [early_stopping]
############################# train the DNN on X1
DNN1 = dnn.classifier.fit(X_train, Ttrain,
batch_size=batch_size,
validation_data=[X_dev, Tdev],
epochs=NUM_EPOCHS, shuffle=True,
callbacks=callbacks_list)
dnn.plot_history(DNN1, abgroup,l2_val)
y_dev = dnn.classifier.predict(X_dev).reshape(len(Tdev))
generate_cm_file_dev(frames, Fdev, Adev, Sdev, y_dev, l2_val, abgroup)
generate_cm_file_eval(Fdev, y_dev, "dev")
y_eval = dnn.classifier.predict(X_eval).reshape(len(Feval))
generate_cm_file_eval(Feval, y_eval, "eval")
| [
"[email protected]"
]
| |
6028f8b7cd09174dc3376f24b44ceb20a2253006 | 3e19950b3f7d08a4d3f3b619e9d2be0891337701 | /code/models/py_utils/bbox/bbox_target.py | a4bcbdecd51837edb63e1ff4ce9ee98836a93d14 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | 360iQ/CPNDet | 9e3c9113a1c83f5b0f145e828dc52fb4aca458db | b15f0bf917f52f6a98909472e02680dfb5dcfd0c | refs/heads/master | 2023-04-23T12:33:26.210939 | 2021-05-18T13:02:07 | 2021-05-18T13:02:07 | 349,141,172 | 0 | 0 | MIT | 2021-05-14T11:12:03 | 2021-03-18T16:20:24 | Jupyter Notebook | UTF-8 | Python | false | false | 2,632 | py | import torch
import pdb
from .utils import multi_apply
from .transforms import bbox2delta
def bbox_target(pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
concat=True):
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
bbox_target_single,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=cfg,
reg_classes=reg_classes,
target_means=target_means,
target_stds=target_stds)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
def bbox_target_single(pos_bboxes,
neg_bboxes,
pos_gt_bboxes,
pos_gt_labels,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]):
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
pos_bbox_targets = bbox2delta(pos_bboxes, pos_gt_bboxes, target_means, target_stds)
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
return labels, label_weights, bbox_targets, bbox_weights
def expand_target(bbox_targets, bbox_weights, labels, num_classes):
bbox_targets_expand = bbox_targets.new_zeros(
(bbox_targets.size(0), 4 * num_classes))
bbox_weights_expand = bbox_weights.new_zeros(
(bbox_weights.size(0), 4 * num_classes))
for i in torch.nonzero(labels > 0).squeeze(-1):
start, end = labels[i] * 4, (labels[i] + 1) * 4
bbox_targets_expand[i, start:end] = bbox_targets[i, :]
bbox_weights_expand[i, start:end] = bbox_weights[i, :]
return bbox_targets_expand, bbox_weights_expand
| [
"[email protected]"
]
| |
7cde6f03db1bf876413bb9b21f5e1abe9ea1d07a | d9d5f3b5c36cbe8daa1c6c1e2f7ebe0af929dc97 | /이런저런/자료구조/1406.py | 0b5b1b664a438340781fd5c6888eec1c13609006 | []
| no_license | raeyoungii/baekjoon | 28122b73309ace73e3f07c138407f7bba1cfe6e0 | 773271a163f23e549531569b7b7710481bd35260 | refs/heads/master | 2023-02-13T09:31:18.615702 | 2021-01-14T19:32:46 | 2021-01-14T19:32:46 | 277,856,368 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | import sys
l_stk = list(sys.stdin.readline().strip())
M = int(sys.stdin.readline())
r_stk = []
for _ in range(M):
cmd = list(sys.stdin.readline().split())
if cmd[0] == 'P':
l_stk.append(cmd[1])
if cmd[0] == 'L':
if l_stk:
r_stk.append(l_stk.pop())
if cmd[0] == 'D':
if r_stk:
l_stk.append(r_stk.pop())
if cmd[0] == 'B':
if l_stk:
l_stk.pop()
print(''.join(l_stk) + ''.join(reversed(r_stk)))
| [
"[email protected]"
]
| |
fe5afc9879b959e3ea8af568f5faa66bfaa6b37f | 2bfefffbc80dde1ff6996a4c6da28a35a93bcfc1 | /ML_App/prediction.py | 7ff59a89bcbff012827803f43760b7801439b8bc | []
| no_license | Gozdescientist/Machine_Learning_app | 16679b22be56e2c44a54d74b5f1c9aa41584a7dd | 99716f145cb9cac89932d156720791bb89de4d58 | refs/heads/main | 2022-12-20T13:40:00.769806 | 2020-10-05T07:56:34 | 2020-10-05T07:56:34 | 300,936,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,448 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'prediction.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1066, 694)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/icons/bars.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setStyleSheet("#centralwidget{\n"
"border-image: url(:/icons/icons/main.png);\n"
"}")
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Segoe Print")
font.setPointSize(25)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.verticalLayout_4.addWidget(self.label_3)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
spacerItem1 = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.horizontalLayout_2.addItem(spacerItem1)
self.formFrame = QtWidgets.QFrame(self.centralwidget)
self.formFrame.setMinimumSize(QtCore.QSize(400, 220))
self.formFrame.setMaximumSize(QtCore.QSize(16777215, 100))
self.formFrame.setStyleSheet("#formFrame{\n"
"background-color: rgb(255, 255, 255);\n"
"border-radius: 10px\n"
"}")
self.formFrame.setObjectName("formFrame")
self.formLayout = QtWidgets.QFormLayout(self.formFrame)
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(self.formFrame)
self.label.setMaximumSize(QtCore.QSize(16777215, 300))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.label)
self.lineEdit_username = QtWidgets.QLineEdit(self.formFrame)
self.lineEdit_username.setMinimumSize(QtCore.QSize(0, 30))
self.lineEdit_username.setObjectName("lineEdit_username")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.lineEdit_username)
self.label_2 = QtWidgets.QLabel(self.formFrame)
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.label_2)
self.lineEdit_password = QtWidgets.QLineEdit(self.formFrame)
self.lineEdit_password.setMinimumSize(QtCore.QSize(0, 30))
self.lineEdit_password.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEdit_password.setObjectName("lineEdit_password")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.lineEdit_password)
self.label_4 = QtWidgets.QLabel(self.formFrame)
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.label_4)
self.PushButton_signup = QtWidgets.QPushButton(self.formFrame)
self.PushButton_signup.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.PushButton_signup.setFont(font)
self.PushButton_signup.setStyleSheet("color: rgb(0, 0, 0);\n"
"border-right-color: rgb(0, 0, 0);\n"
"border-color: rgb(85, 0, 255);\n"
"background-color: rgb(174, 229, 183);\n"
"border-radius: 10px\n"
"")
self.PushButton_signup.setObjectName("PushButton_signup")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.FieldRole, self.PushButton_signup)
self.horizontalLayout_2.addWidget(self.formFrame)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.verticalLayout_4.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
spacerItem4 = QtWidgets.QSpacerItem(20, 120, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.horizontalLayout.addItem(spacerItem4)
self.PushButton_login = QtWidgets.QPushButton(self.centralwidget)
self.PushButton_login.setMinimumSize(QtCore.QSize(150, 70))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.PushButton_login.setFont(font)
self.PushButton_login.setStyleSheet("color: rgb(0, 0, 0);\n"
"border-right-color: rgb(0, 0, 0);\n"
"border-color: rgb(85, 0, 255);\n"
"background-color: rgb(75, 150, 225);\n"
"border-radius: 10px\n"
"")
self.PushButton_login.setObjectName("PushButton_login")
self.horizontalLayout.addWidget(self.PushButton_login)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem5)
self.verticalLayout_4.addLayout(self.horizontalLayout)
self.label_5 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(8)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setStyleSheet("")
self.label_5.setObjectName("label_5")
self.verticalLayout_4.addWidget(self.label_5)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1066, 26))
self.menubar.setObjectName("menubar")
self.menuApplication = QtWidgets.QMenu(self.menubar)
self.menuApplication.setObjectName("menuApplication")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.menuApplication.addAction(self.actionExit)
self.menubar.addAction(self.menuApplication.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Welcome!!"))
self.label_3.setText(_translate("MainWindow", "Machine Learning Predictions"))
self.label.setText(_translate("MainWindow", "Username"))
self.label_2.setText(_translate("MainWindow", "Password"))
self.label_4.setText(_translate("MainWindow", "Don\'t have an account?"))
self.PushButton_signup.setText(_translate("MainWindow", "SignUp"))
self.PushButton_login.setText(_translate("MainWindow", "Login"))
self.label_5.setText(_translate("MainWindow", "This application aims to analyze the business processes of different departments of the Group Company and obtain various predictions. All rights reserved."))
self.menuApplication.setTitle(_translate("MainWindow", "Application"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
import icons_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"[email protected]"
]
| |
a6c505b33f88650962566a521d6430300a80475b | 3c7320a0ccc22c105e8c6f18e84fc9566770c5da | /code_and_map.py | e395332fd4bbac1da5b9b4fd9ab1c36a344b3226 | []
| no_license | MrDenexi/Tagpro-Replay-Render2 | cc6da500164efaf46157662be101bfc30d8c385f | 639c94a829a86b5e2f063b9e952815b9e90ebbb6 | refs/heads/master | 2021-06-10T17:29:17.984665 | 2016-11-30T03:24:11 | 2016-11-30T03:24:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,647 | py | map_codes = {
"0" : "black",
"1" : "wall",
"1.2" : "225 tile",
"1.3" : "135 tile",
"1.4" : "45 tile",
"1.1" : "315 tile",
"2" : "floor",
"3" : "red flag",
"3.1" : "red flag away",
"4" : "blue flag",
"4.1" : "blue flag away",
"5" : "boost",
"5.1" : "boost off",
"6" : "powerup off",
"6.3" : "tagpro",
"6.1" : "jukejuice",
"6.2" : "rolling bomb",
"7" : "spike",
"8" : "button",
"9" : "gate off",
"9.1" : "gate neutral",
"9.2" : "gate red",
"9.3" : "gate blue",
"10" : "bomb",
"10.1" : "bomb off",
"11" : "red speed",
"12" : "blue speed",
"13" : "portal",
"13.1" : "portal off",
"14" : "red boost",
"14.1" : "red boost off",
"15" : "blue boost",
"15.1" : "blue boost off",
"16" : "neutral flag",
"16.1" : "neutral flag away",
"17" : "red endzone",
"18" : "blue endzone"
}
tiles_map = {
"spike" : (480, 0),
"red ball" : (560, 0),
"blue ball" : (600, 0),
"bomb" : (480, 40),
"bomb off" : (480, 80),
"neutral flag" : (520, 40),
"neutral flag away" : (520, 80),
"red flag" : (560, 40),
"red flag away" : (560, 80),
"blue flag" : (600, 40),
"blue flag away" : (600, 80),
"gate off" : (480, 120),
"gate neutral" : (520, 120),
"gate red" : (560, 120),
"gate blue" : (600, 120),
"floor" : (520, 160),
"red speed" : (560, 160),
"blue speed" : (600, 160),
"red endzone" : (560, 200),
"blue endzone" : (600, 200),
"button" : (520, 240),
"black" : (560, 360),
"wall" : (600, 240),
"tagpro" : (480, 240),
"jukejuice" : (480, 160),
"rolling bomb" : (480, 200),
"powerup off" : (480, 320),
"315 tile" : (600, 280),
"45 tile" : (600, 320),
"225 tile" : (600, 360),
"135 tile" : (600, 400),
"mars ball" : (480, 360),
}
portal_map = {
"portal" : ((0, 0), (40, 0), (80, 0), (120, 0)),
"portal off" : (160, 0)
}
boost_map = {
"boost" : ((0, 0), (40, 0), (80, 0), (120, 0)),
"boost off" : (160, 0)
}
boost_red_map = {
"red boost" : ((0, 0), (40, 0), (80, 0), (120, 0)),
"red boost off" : (160, 0)
}
boost_blue_map = {
"blue boost" : ((0, 0), (40, 0), (80, 0), (120, 0)),
"blue boost off" : (160, 0)
}
| [
"[email protected]"
]
| |
b8b5d53aedd215e4c38db5455b764f4b73bb83b5 | 3420aba3622faf2d4aede984c656f68ad24a1f3c | /backend/personal_care_22730/settings.py | 230da7088fe365290e5935afd842c015a2ea9d7d | []
| no_license | crowdbotics-apps/personal-care-22730 | bb81af122e64cb58f6d52df31df328b6dfa4b25d | 066d2cd5e890057df054ea7c5b3b5f061e872371 | refs/heads/master | 2023-01-11T06:30:05.971088 | 2020-11-18T16:23:30 | 2020-11-18T16:23:30 | 313,990,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,048 | py | """
Django settings for personal_care_22730 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"healthcare",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
"storages",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "personal_care_22730.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "personal_care_22730.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
and AWS_STORAGE_BUCKET_NAME
and AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = "/mediafiles/"
MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
]
| |
385da3ba92840752f9a7dafb9d06cf97dd3f612f | 7e59f4738cf73d0fde97a1eec1768fb247ca189a | /command/status.py | 7fca29c44f3ba09e5aa54eea963e5b4979b5c644 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | fossabot/Desert-Fireball-Maintainence-GUI | d48e1ae491ced17deb2b0f1cf1fe5e9d2145f258 | dd9167e85aedd183e5c91370b8be0ce9879beb98 | refs/heads/master | 2021-09-17T13:57:30.596073 | 2018-07-02T10:18:14 | 2018-07-02T10:18:14 | 115,166,948 | 0 | 0 | null | 2017-12-23T03:16:00 | 2017-12-23T03:16:00 | null | UTF-8 | Python | false | false | 1,832 | py | # ADVANCED UTILITIES
import datetime
import os
from backend import constants
from command import exec_console_command
def get_log(directory):
"""
Fetches the file path of a text logfile on the file system.
Args:
directory (str): The directory to get the logfile from. Format::
/data0/ + directory
Returns:
foundFile (str): The file path of the found logfile.
"""
filenames = exec_console_command(constants.getLogfileName.format(directory))
foundfile = filenames.split('\n')[0]
return foundfile
def latest_log():
"""Fetches the latest log file."""
environment = os.getenv('APP_SETTINGS')
if environment is "prod":
path = "/data0/latest/" + get_log("latest")
else:
import basedir
path = os.path.join(basedir.basedir, 'dfn-gui-server.log')
if os.path.exists(path):
logfile = open(path, 'rb').read()
file_state = os.stat(path)
timestamp = datetime.datetime.fromtimestamp(file_state.st_mtime).strftime('%d-%m-%Y %H:%M:%S')
return logfile, timestamp
else:
raise AttributeError("Unable to locate the latest log file: " + path)
def second_latest_log():
"""Fetches the second latest log file."""
environment = os.getenv('APP_SETTINGS')
if environment is "prod":
path = "/data0/latest_prev/" + get_log("latest_prev")
else:
import basedir
path = os.path.join(basedir.basedir, 'dfn-gui-server.log')
if os.path.exists(path):
logfile = open(path, 'rb').read()
file_state = os.stat(path)
timestamp = datetime.datetime.fromtimestamp(file_state.st_mtime).strftime('%d-%m-%Y %H:%M:%S')
return logfile, timestamp
else:
raise AttributeError("Unable to locate the second latest log file: " + path)
| [
"[email protected]"
]
| |
46f75a94f867ab61447a4484ebbc02589363e18f | 332d3cb0dc123541e436ac620477dd4560438d6b | /SoMax_1.5_Max7/mkcorpus/CorpusBuilder.py | c363c688b11dfb9696828deea661bad476a79298 | []
| no_license | DYCI2/Somax | 7c160e842a6224fcb2c1181de4bfeb389834a606 | 14ed895719a336d719e6c5b669144b42bf607ad0 | refs/heads/master | 2021-06-26T02:50:11.697157 | 2020-10-02T10:28:13 | 2020-10-02T10:28:13 | 133,677,823 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 10,149 | py | from string import split
import sys, os, importlib
import logging, settings
from ops import OpSomaxStandard, OpSomaxHarmonic, OpSomaxMelodic
class CorpusBuilder:
""" Main class to instantiate to achieve corpus construction. """
def __init__(self, input_path, foreground_channels=None, self_bg_channels=None, mel_bg_channels=None,
harm_bg_channels=None, corpus_name=None, uses_legacy_parser=False, **kwargs):
""" Generates a list of files and operations (legacy: based on existing files in corpus path) required for
building the corpus but does not create the actual files."""
self.logger = logging.getLogger(settings.MAIN_LOGGER)
if 'callback_dic' in kwargs.keys():
self.callback_dic = kwargs["callback_dic"]
else:
self.callback_dic = {'': 'OpSomaxStandard', 'h': 'OpSomaxHarmonic', 'm': 'OpSomaxMelodic'}
self.input_path = str(input_path)
if corpus_name is None:
# without explicit corpus name, take the name of the corpus path
self.corpus_name = os.path.splitext(os.path.basename(input_path))[0]
else:
self.corpus_name = corpus_name
self.logger.debug('Corpus name set to {}'.format(self.corpus_name))
# TODO: Clean up! This could be simplified a lot!
# If the very ugly lambda expression in generate_ops can be removed, ops_filepaths does not have
# to be global. then self.generate_ops could return ops, i.e. self.ops = self.generate_ops().
self.ops = dict() # type: {str: (MetaOp, [str])}
self.ops_filepaths = dict() # type: {str: [str]}
self.generate_ops(input_path, foreground_channels, self_bg_channels, mel_bg_channels, harm_bg_channels,
uses_legacy_parser)
# self.debug_print_ops()
def generate_ops(self, input_path, foreground_channels, self_bg_channels, mel_bg_channels, harm_bg_channels,
uses_legacy_parser):
"""Generates the dict containing the corresponding `MetaOp`s.
Always adds OpSomaxStandard, OpSomaxMelodic and OpSomaxHarmonic.
If legacy flag: Will check the folder for separate files with names _h.mid or _m.mid, if either of
those exist, OpSomaxHarmonic and/or OpSomaxMelodic will be generated with these as input
files.
If legacy flag is not set or the above mentioned files do not exist: the default midi file will be used to
generate these
(Old docstring: the CorpusBuilder, at initialization, builds a proposition for the operations to be made.
the operation dictionary is a dictionary labelled by suffix containing the files to be
analyzed. the operation corresponding to a given suffix will be so executed to whole of the
files.)
"""
if os.path.isfile(input_path):
# if input is a file and legacy flag is set, scan the current folder to get the files
if uses_legacy_parser:
self.ops_filepaths = self.get_linked_files_legacy(self.input_path)
# otherwise create dictionary with same formatting as legacy parser, containing only the main item
else:
self.ops_filepaths = {settings.STANDARD_FILE_EXT: [input_path]}
elif os.path.isdir(input_path):
# if a folder, scan the given folder with files in it
os.path.walk(input_path, lambda a, d, n: self.store_filepaths(a, d, n, uses_legacy_parser), input_path)
else:
# Note! This error has most likely been caught eariler, but will be kept just in case.
self.logger.critical("The corpus file(s) were not found! Terminating script without output.")
sys.exit(1)
# Dynamic Generation of SomaxOp objects
for key, filepaths in self.ops_filepaths.iteritems():
op_class = getattr(importlib.import_module("ops"), self.callback_dic[key])
op_object = op_class(filepaths, self.corpus_name)
self.ops[key] = op_object
self.logger.debug("Added operator {0} related to file(s) {1}".format(self.callback_dic[key], filepaths))
# Adding harmonic and melodic output files if no matching midi files are found
if settings.MELODIC_FILE_EXT not in self.ops.keys():
standard_filepaths = self.ops[settings.STANDARD_FILE_EXT].getFilePaths()
if self.all_files_are_midi(standard_filepaths):
self.ops[settings.MELODIC_FILE_EXT] = OpSomaxMelodic(standard_filepaths, self.corpus_name)
self.logger.debug("No _m file found. Added Melodic operator based on standard file(s) ({0})."
.format(standard_filepaths))
if settings.HARMONIC_FILE_EXT not in self.ops.keys():
standard_filepaths = self.ops[settings.STANDARD_FILE_EXT].getFilePaths()
self.ops[settings.HARMONIC_FILE_EXT] = OpSomaxHarmonic(standard_filepaths, self.corpus_name)
self.logger.debug("No _h file found. Added Harmonic operator based on based on standard file(s) ({0})."
.format(standard_filepaths))
# Setting the channel values for each operator according to input specification
for key, op in self.ops.iteritems():
self.set_channels(op, key, foreground_channels, self_bg_channels, mel_bg_channels, harm_bg_channels)
def set_channels(self, op_object, key, foreground_channels, self_bg_channels, mel_bg_channels, harm_bg_channels):
op_object.setFgChannels(foreground_channels)
if key == settings.STANDARD_FILE_EXT:
op_object.setBgChannels(self_bg_channels)
if key == settings.MELODIC_FILE_EXT:
op_object.setBgChannels(mel_bg_channels)
if key == settings.HARMONIC_FILE_EXT:
op_object.setBgChannels(harm_bg_channels)
def build_corpus(self, output_folder):
output_files = []
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for key, op in self.ops.iteritems():
if key != settings.STANDARD_FILE_EXT:
output_file = output_folder + self.corpus_name + '_' + key + '.json'
else:
output_file = output_folder + self.corpus_name + '.json'
for path in op.getFilePaths():
if not os.path.splitext(path)[-1] in op.admitted_extensions:
self.logger.critical("File {0} is not understood by operation {1}. This should not have occurred."
"Script terminating without output.".format(path, self.callback_dic[key]))
# Run the actual operator
op.process(output_file)
output_files.append(output_file)
return output_files
def store_filepaths(self, corpus_path, dirname, names, uses_legacy_parser):
"""function called to build the operation dictionary on every file of a folder."""
names = filter(lambda x: x[0] != '.', names) # exclude hidden files
file_dict = dict()
Op = getattr(importlib.import_module("ops"), self.callback_dic[''])
if uses_legacy_parser:
main_files = filter(lambda x: len(x.split('_')) == 1 and os.path.splitext(x)[1] in Op.admitted_extensions,
names)
else:
main_files = filter(lambda x: os.path.splitext(x)[1] in Op.admitted_extensions, names)
file_dict[''] = map(lambda x: dirname + '/' + x, main_files)
# looking
potential_files = filter(
lambda x: "".join(x.split('_')[:-1]) in map(lambda x: os.path.splitext(x)[0], main_files), names)
for f in potential_files:
suffix = os.path.splitext(f)[0].split('_')[-1]
try:
file_dict[suffix].append(dirname + '/' + f)
except KeyError:
file_dict[suffix] = [dirname + '/' + f]
# gerer ca!!!
for k, v in file_dict.iteritems():
if k != '':
if len(v) < len(file_dict[""]):
print "missing object"
elif len(v) > len(file_dict[""]):
print "too many object"
self.ops_filepaths = file_dict
def get_linked_files_legacy(self, input_file):
dir_name = os.path.dirname(input_file) + '/'
corpus_name = os.path.splitext(os.path.basename(input_file))[0]
if '_' in corpus_name:
self.logger.critical('Invalid name provided for corpus: the midi file must not contain underscores (_). \n'
+ settings.CRITICAL_INDENT +
'Note that script should never be run on _h.mid or _m.mid files: these will\n'
+ settings.CRITICAL_INDENT +
'automatically be loaded when running the script on the .mid file.\n'
+ settings.CRITICAL_INDENT +
'Terminating the script without output.')
sys.exit(1)
files = os.listdir(dir_name)
file_dict = dict()
for f in files:
name, ext = os.path.splitext(f)
parts = split(name, '_')
if parts[0] == corpus_name:
if len(parts) == 1:
Op = getattr(importlib.import_module("ops"), self.callback_dic[''])
if ext in Op.admitted_extensions:
file_dict[''] = [dir_name + f]
else:
Op = getattr(importlib.import_module("ops"), self.callback_dic[parts[-1]])
if ext in Op.admitted_extensions:
file_dict[parts[-1]] = [dir_name + f]
self.logger.debug("Relevant files found: {}".format(file_dict))
return file_dict
def all_files_are_midi(self, filepaths):
return all([os.path.splitext(p)[-1] in settings.MIDI_EXTENSIONS for p in filepaths])
| [
"[email protected]"
]
| |
6f72d08d18e271030a22d27c0ad5ea5646058ea9 | 8833c1472b9d9fb19c3ba94df83774ade0268beb | /image_download.py | 69c54c5ac586f3c5542859eea588180109a5574a | []
| no_license | vimkaf/Learning-Python | 1e83d6a562dd7aa6eb65e3a9bc23787a10170f03 | 16f1988a2940ece42e2b3081cab476109f0648ce | refs/heads/master | 2020-04-14T09:49:20.060701 | 2019-01-01T22:16:19 | 2019-01-01T22:16:19 | 163,768,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | import random
import urllib.request
def download_image(url):
file_name = str(random.randrange(1,1000)) + ".jpg"
urllib.request.urlretrieve(url,file_name)
download_image("https://proxy.duckduckgo.com/iu/?u=https%3A%2F%2Fguardian.ng%2Fwp-content%2Fuploads%2F2017%2F08%2FOlanrewaju-kayode.jpg&f=1") | [
"[email protected]"
]
| |
89319c9b50d8e57de117d8156c74826de6ff872a | 1c7b9e6d3430b791abd7f821661050a6eb9f0ac0 | /migrations/versions/beec3b9620f4_clean_app_setup.py | ec45ddd74581957ff21fe3b9e566e425091ba258 | []
| no_license | jtemplon/stock-exchange-app | 9e2324a3707c6d472078b1cbcf6e22b798b333e9 | b5af33260b9a545349b04ba7fe54686b627c9d46 | refs/heads/master | 2020-04-06T12:59:56.400675 | 2019-01-21T14:36:02 | 2019-01-21T14:36:02 | 157,479,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,690 | py | """clean app setup
Revision ID: beec3b9620f4
Revises:
Create Date: 2018-11-13 09:10:09.576304
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'beec3b9620f4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('stock',
sa.Column('name', sa.String(length=120), nullable=False),
sa.Column('price', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('cash', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.create_table('holding',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('stock', sa.String(length=120), nullable=True),
sa.Column('shares', sa.Integer(), nullable=True),
sa.Column('purchase_price', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['stock'], ['stock.name'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('transaction',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('shares', sa.Integer(), nullable=True),
sa.Column('team', sa.String(length=120), nullable=True),
sa.Column('price', sa.Float(), nullable=True),
sa.Column('buy_or_sell', sa.String(length=120), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_transaction_timestamp'), 'transaction', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_transaction_timestamp'), table_name='transaction')
op.drop_table('transaction')
op.drop_table('holding')
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
op.drop_table('stock')
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
509b104aab4713a3c0671c13fcbb53ff0fb05ea2 | 7c9b25a10af05e25e24a574a9e97769ead0a5a39 | /myblog/urls.py | 7d572b019adb43b422f85effcab9b7c5bde93189 | []
| no_license | mnamegaurav/django_personal_blog | 8beef6fe3e3c87de726a9f958edc512689bed8dc | e5189edd0e8afa1534f6bbfd19c49e30760fb342 | refs/heads/master | 2023-04-04T12:18:41.914691 | 2021-04-17T05:35:17 | 2021-04-17T05:35:17 | 340,793,354 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | """myblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from users import views as user_views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('register/', user_views.register,name='register'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'),name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'),name='logout'),
path('profile/', user_views.profile,name='profile'),
]
| [
"[email protected]"
]
| |
58b45bd1c481e89aa008bdccbc1ec6900b6e8f90 | 92481be14549fb5ebde77adf64aebe90b7c04b00 | /raspberry_scripts/connect.py | d8cb30fe3126358471c5886b72a9b21cf0aec980 | []
| no_license | piskula/intelligent-house | 71850af149dbe76fe1fc2a97bade49a3b4c65690 | c5bb768ac3cfd94fa3e6b1aa2c3cc33ade3dea6b | refs/heads/master | 2020-04-05T05:51:56.579073 | 2018-12-25T16:14:51 | 2018-12-25T16:14:51 | 156,614,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | import yaml
import os
from time import sleep
print('starting cron')
with open("/home/pi/intelligent-house/raspberry_scripts/config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
temperatureSensorDelay = cfg['delays']['temperature']
humiditySensorDelay = cfg['delays']['humidity']
for temperatureSensor in cfg['sensors']['temperature']:
sleep(60)
pathToFile = cfg['sensors']['temperature'][temperatureSensor]['path']
# os.system(f'nohup python3 -u temperature.py {pathToFile} {temperatureSensorDelay} {temperatureSensor} &')
os.system('nohup python3 -u /home/pi/intelligent-house/raspberry_scripts/temperature.py ' + pathToFile + ' ' + str(temperatureSensorDelay) + ' ' + temperatureSensor + ' > /home/pi/out_' + temperatureSensor + '.txt &')
for humiditySensor in cfg['sensors']['humidity']:
sleep(60)
gpioPin = str(cfg['sensors']['humidity'][humiditySensor])
os.system('nohup python3 -u /home/pi/intelligent-house/raspberry_scripts/humidity.py ' + gpioPin + ' ' + str(humiditySensorDelay) + ' ' + humiditySensor + ' > /home/pi/out_' + humiditySensor + '.txt &')
| [
"[email protected]"
]
| |
56c15e78ef411bada79abd374bd7d67e36ff9929 | 234c7fb0bdabdd696c8e4c6a449ac2c8e3f14ad5 | /build/PureCloudPlatformClientV2/models/workday_values_trend.py | 2cd0430e7aa02ba9e1bd12ac92f707c225c96002 | [
"Apache-2.0",
"MIT"
]
| permissive | humano7/platform-client-sdk-python | 2a942c43cc2d69e8cb0c4113d998e6e0664fdedb | dd5b693b1fc90c9dcb36885d7227f11221db5980 | refs/heads/master | 2023-04-12T05:05:53.932393 | 2021-04-22T03:41:22 | 2021-04-22T03:41:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,713 | py | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class WorkdayValuesTrend(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
WorkdayValuesTrend - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'date_start_workday': 'date',
'date_end_workday': 'date',
'division': 'Division',
'user': 'UserReference',
'timezone': 'str',
'results': 'list[WorkdayValuesMetricItem]'
}
self.attribute_map = {
'date_start_workday': 'dateStartWorkday',
'date_end_workday': 'dateEndWorkday',
'division': 'division',
'user': 'user',
'timezone': 'timezone',
'results': 'results'
}
self._date_start_workday = None
self._date_end_workday = None
self._division = None
self._user = None
self._timezone = None
self._results = None
@property
def date_start_workday(self):
"""
Gets the date_start_workday of this WorkdayValuesTrend.
The start workday for the query range for the metric value trend. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd
:return: The date_start_workday of this WorkdayValuesTrend.
:rtype: date
"""
return self._date_start_workday
@date_start_workday.setter
def date_start_workday(self, date_start_workday):
"""
Sets the date_start_workday of this WorkdayValuesTrend.
The start workday for the query range for the metric value trend. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd
:param date_start_workday: The date_start_workday of this WorkdayValuesTrend.
:type: date
"""
self._date_start_workday = date_start_workday
@property
def date_end_workday(self):
"""
Gets the date_end_workday of this WorkdayValuesTrend.
The end workday for the query range for the metric value trend. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd
:return: The date_end_workday of this WorkdayValuesTrend.
:rtype: date
"""
return self._date_end_workday
@date_end_workday.setter
def date_end_workday(self, date_end_workday):
"""
Sets the date_end_workday of this WorkdayValuesTrend.
The end workday for the query range for the metric value trend. Dates are represented as an ISO-8601 string. For example: yyyy-MM-dd
:param date_end_workday: The date_end_workday of this WorkdayValuesTrend.
:type: date
"""
self._date_end_workday = date_end_workday
@property
def division(self):
"""
Gets the division of this WorkdayValuesTrend.
The targeted division for the query
:return: The division of this WorkdayValuesTrend.
:rtype: Division
"""
return self._division
@division.setter
def division(self, division):
"""
Sets the division of this WorkdayValuesTrend.
The targeted division for the query
:param division: The division of this WorkdayValuesTrend.
:type: Division
"""
self._division = division
@property
def user(self):
"""
Gets the user of this WorkdayValuesTrend.
The targeted user for the query
:return: The user of this WorkdayValuesTrend.
:rtype: UserReference
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user of this WorkdayValuesTrend.
The targeted user for the query
:param user: The user of this WorkdayValuesTrend.
:type: UserReference
"""
self._user = user
@property
def timezone(self):
"""
Gets the timezone of this WorkdayValuesTrend.
The time zone used for aggregating metric values
:return: The timezone of this WorkdayValuesTrend.
:rtype: str
"""
return self._timezone
@timezone.setter
def timezone(self, timezone):
"""
Sets the timezone of this WorkdayValuesTrend.
The time zone used for aggregating metric values
:param timezone: The timezone of this WorkdayValuesTrend.
:type: str
"""
self._timezone = timezone
@property
def results(self):
"""
Gets the results of this WorkdayValuesTrend.
The metric value trends
:return: The results of this WorkdayValuesTrend.
:rtype: list[WorkdayValuesMetricItem]
"""
return self._results
@results.setter
def results(self, results):
"""
Sets the results of this WorkdayValuesTrend.
The metric value trends
:param results: The results of this WorkdayValuesTrend.
:type: list[WorkdayValuesMetricItem]
"""
self._results = results
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
21901bac5c3e1042eca32269d0f30c9e6f065094 | f2acfe325ee64b239ba68e8a2474d78f89f274bd | /cloud/infrastructure/capella_aws_8s_1c_c5_2xlarge_m5_2xlarge_sizing_kv_rebalance.spec | 7501796635c519528f57f5e7788828edeb73ee47 | [
"Apache-2.0"
]
| permissive | couchbase/perfrunner | d12596ff3c2a53b08c488cd48f0c72475cc59c49 | e76fd55bf1c733adf9988dee166da36304c36e72 | refs/heads/master | 2023-08-30T18:14:01.731809 | 2023-08-30T12:13:49 | 2023-08-30T12:14:32 | 12,786,971 | 23 | 44 | Apache-2.0 | 2023-08-02T01:56:41 | 2013-09-12T15:17:58 | Python | UTF-8 | Python | false | false | 1,395 | spec | [infrastructure]
provider = capella
backend = aws
[clusters]
couchbase1 =
ec2.ec2_cluster_1.ec2_node_group_1.1:kv
ec2.ec2_cluster_1.ec2_node_group_1.2:kv
ec2.ec2_cluster_1.ec2_node_group_1.3:kv
ec2.ec2_cluster_1.ec2_node_group_2.1:index
ec2.ec2_cluster_1.ec2_node_group_2.2:index
ec2.ec2_cluster_1.ec2_node_group_2.3:n1ql
ec2.ec2_cluster_1.ec2_node_group_2.4:n1ql
ec2.ec2_cluster_1.ec2_node_group_1.4:kv
[clients]
workers1 =
ec2.ec2_cluster_1.ec2_node_group_3.1
[utilities]
brokers1 = ec2.ec2_cluster_1.ec2_node_group_4.1
[ec2]
clusters = ec2_cluster_1
[ec2_cluster_1]
node_groups = ec2_node_group_1,ec2_node_group_2,ec2_node_group_3,ec2_node_group_4
storage_class = gp3
[ec2_node_group_1]
instance_type = m5.2xlarge
instance_capacity = 4
volume_size = 1000
iops = 16000
[ec2_node_group_2]
instance_type = c5.2xlarge
instance_capacity = 4
volume_size = 1000
iops = 16000
[ec2_node_group_3]
instance_type = c5.24xlarge
instance_capacity = 1
volume_size = 300
[ec2_node_group_4]
instance_type = c5.9xlarge
instance_capacity = 1
volume_size = 100
[storage]
data = var/cb/data
[credentials]
rest = Administrator:Password123!
ssh = root:couchbase
[parameters]
OS = Amazon Linux 2
CPU = Data: m5.2xlarge (8 vCPU), Analytics: c5.2xlarge (8 vCPU)
Memory = Data: 32GB, Index/N1ql: 16GB
Disk = EBS gp3, 300GB, 16000 IOPS
| [
"[email protected]"
]
| |
5e9d5cc98dc54a3720199667dab263dc0b42dfb6 | 3fa8c5984304c79e988d32a2ce6eb9c5983e3cba | /Python_assignment_10/olympics_pandas_practice.py | 9497888ed60dd39db1ae790259b2160f8e82a34e | []
| no_license | nlad-gmu/Lad_AIT580 | 8a62ae5abbd5ed6ff11ed06ebb060985d7bd83a0 | e5dc2a6751aacb4eab39f7d64f91bd22ebfe9c7c | refs/heads/master | 2020-07-24T11:29:18.151181 | 2019-12-17T09:36:46 | 2019-12-17T09:36:46 | 207,908,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,429 | py | import pandas as pd
df = pd.read_csv('olympics_clean.csv')
#Q1: Which country has won the most gold medals in summer games?
def answer_one():
x = max(df['Gold'])
ans = df[df['Gold'] == x].index.tolist()
return df['Country'][ans[0]]
print(answer_one())
#Q2; Which country had the biggest difference between their summer and winter gold medal counts?
def answer_two():
x = max(df['Gold'] - df['Gold.1'])
ans = df[(df['Gold'] - df['Gold.1']) == x].index.tolist()
return df['Country'][ans[0]]
print(answer_two())
#Q3: Which country has the biggest difference between their summer and winter gold medal counts relative to their total gold medal count?
# Only include countries that have won at least 1 gold in both summer and winter.
def answer_three():
df_gold = df[(df['Gold']>0) & (df['Gold.1']>0)]
df_max_diff = (abs(df_gold['Gold']-df_gold['Gold.1'])/df_gold['Gold.2'])
max_diff_index = df_max_diff.idxmax()
return df['Country'][max_diff_index]
print(answer_three())
#Q4: Write a function to update the dataframe to include a new column called "Points" which is a weighted value
# where each gold medal counts for 3 points, silver medals for 2 points, and bronze mdeals for 1 point.
# The function should return only the column Points which you created.
def answer_four():
Points = 3*df['Gold.2'] + 2*df['Silver.2'] + 1*df['Bronze.2']
return Points
print(answer_four()) | [
"[email protected]"
]
| |
2b05106d3a46f169272d1e3f9813b03703458746 | 842c0fd117d1bf0502988ae960c174a598d72584 | /ecdh.py | 610756bf66b689a00a18c59c923b5f542fd32cb5 | []
| no_license | ThomasB123/ecc | e41ab8314ef38ff11583cdf816b2c059e9e455cf | 45d8585263bf29f922ee94d3d588e71ecf41fbeb | refs/heads/main | 2023-06-14T22:07:36.010246 | 2021-07-13T16:44:31 | 2021-07-13T16:44:31 | 304,315,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,678 | py |
def binary(num): # convert denary number to binary
out = []
while num > 0:
if num % 2 == 1:
num -= 1
out.append(1)
else:
out.append(0)
num /= 2
return out
def move(xa,ya,xb,yb):
if [xa,ya] == [xb,yb]:
# doubling a point
m = ((3*xa**2+a)*pow(2*ya,p-2,p)) % p # (3x^2+a)/(2y) % p
else:
# adding two points
m = ((yb-ya)*pow(xb-xa,p-2,p)) % p # (yb-ya)/(xb-xa) % p
xd = (m**2 -xa-xb) % p
yd = (m*(xa-xd) - ya) % p
return xd,yd
def K(start,k):
points = [start]
bina = binary(k)
for i in range(len(bina)-bina.index(1)):
points.append(move(points[-1][0],points[-1][1],points[-1][0],points[-1][1])) # double
index = bina.index(1) # find first occurence of 1 in the binary representation
out = points[index] # start with smallest multiple of g
for i in range(index+1,len(bina)): # count up from the smallest multiple
if bina[i] == 1:
out = move(out[0],out[1],points[i][0],points[i][1])
return out
def montgomery(a,b): # convert from montgomery to short weierstrass
# a = (3 - a^2)/(3b^2) and b = (2a^3 - 9a)/(27b^3)
return (3-a**2)*pow(3*b**2,p-2,p),(2*a**3-9*a)*pow(27*b**3,p-2,p)
def edwards(d): # convert from edwards to short weierstrass
# a = 2(1 + d)/(1 - d) and b = 4/(1 - d)
return montgomery(2*(1+d)*pow(1-d,p-2,p),4*pow(1-d,p-2,p))
# public parameters: p,a,b,g
# Curve25519
print('You are using Curve25519')
p = 2**255 - 19
a,b = montgomery(486662,1)
g = (9,14781619447589544791020593568409986887264606134616475288964881837755586237401)
print('Equation of curve: y^2 = x^3 + 486662x^2 + x mod 2^255 - 19')
print('Starting point g = {}'.format(g))
print()
# Change private keys here
#####################################
# private keys 2 <= ka,kb <= p-2
ka = 2**200-1 # Alice private key
kb = 2**210-1 # Bob private key
#####################################
print('Alice computes A = (ka)g mod p')
A = K(g,ka) # Alice calculation
print('A = {}\n'.format(A))
print('Alice sends A to Bob\n')
print('Bob computes B = (kb)g mod p')
B = K(g,kb) # Bob calculation
print('B = {}\n'.format(B))
print('Bob sends B to Alice\n')
# Bob sends B to Alice
print('Alice computes K = (ka)B mod p = (ka.kb)g mod p')
k = K(B,ka) # Alice calculation
print('K = {}\n'.format(k))
# Alice sends A to Bob
print('Bob computes K = (kb)A mod p = (kb.ka)g mod p')
k = K(A,kb) # Bob calculation
print('K = {}\n'.format(k))
# Alice and Bob now know the same K
print('Alice and Bob now know the same K\n')
print('x-coordinate used as secret value')
print('Secret value = {}\n'.format(k[0]))
| [
"[email protected]"
]
| |
1a519de31d1f8954c5d1274c2611ea3a2b428ed6 | 704608f54e440d20e23f3d4c1519746ecadad3e5 | /python1812alg/urls.py | 4369ec36ec7f98c2d0bc068157108771afac77de | []
| no_license | kenzhanglg/python1812alg | 565af679fd549cabb18e4d644f9ad5db7a8e0fdc | fc655f3dd9bab8e99afd693abd0ff44d1067f948 | refs/heads/master | 2020-04-29T03:44:13.167911 | 2019-03-19T12:03:29 | 2019-03-19T12:03:29 | 175,571,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | """python1812alg URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^',include('app.urls',namespace='alg')),
]
| [
"[email protected]"
]
| |
2d9d1d9d7a5f154131b426c6f8b4abf0ca4fea06 | b7aef4891809c09c2193f281fc88f166149b0c5d | /Scripts/py/FNplus.py | 0fd3e351fe9cdf6197d8e897ecf0ba42d13803bc | []
| no_license | mapengXM/VIP | acfcfdd0c34a68a8ea7a55b6f353bca50b1857a7 | 8895cb1e8a54ed2e28e2ccfe0f50e42358196ce4 | refs/heads/main | 2023-07-17T00:00:17.251628 | 2021-08-16T20:10:01 | 2021-08-16T20:10:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,755 | py | import os,re,requests,argparse
# 登录地址
LOGIN_URL = 'https://my.freenom.com/dologin.php'
# 域名状态地址
DOMAIN_STATUS_URL = 'https://my.freenom.com/domains.php?a=renewals'
# 域名续期地址
RENEW_DOMAIN_URL = 'https://my.freenom.com/domains.php?submitrenewals=true'
# token 正则
token_ptn = re.compile('name="token" value="(.*?)"', re.I)
# 域名信息正则
domain_info_ptn = re.compile(
r'<tr><td>(.*?)</td><td>[^<]+</td><td>[^<]+<span class="[^<]+>(\d+?).Days</span>[^&]+&domain=(\d+?)">.*?</tr>',
re.I)
# 登录状态正则
login_status_ptn = re.compile('<a href="logout.php">Logout</a>', re.I)
# args
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--username', type=str)
parser.add_argument('-p', '--password', type=str)
args = parser.parse_args()
username = args.username
password = args.password
class FreeNom:
def __init__(self, username: str, password: str):
if "FN_ID" in os.environ:
self._u = os.environ.get('FN_ID')
else:
self._u = username
if "FN_PW" in os.environ:
self._p = os.environ.get('FN_PW')
else:
self._p = password
self._s = requests.Session()
self._s.headers.update({
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) Chrome/79.0.3945.130 Safari/537.36'
})
def _login(self) -> bool:
self._s.headers.update({
'content-type': 'application/x-www-form-urlencoded',
'referer': 'https://my.freenom.com/clientarea.php'
})
r = self._s.post(LOGIN_URL, data={'username': self._u, 'password': self._p})
return r.status_code == 200
def renew(self):
global msg
msg = ''
# login
ok = self._login()
if not ok:
msg = 'login failed'
print(msg)
return
# check domain status
self._s.headers.update({'referer': 'https://my.freenom.com/clientarea.php'})
r = self._s.get(DOMAIN_STATUS_URL)
# login status check
if not re.search(login_status_ptn, r.text):
msg = 'get login status failed'
print(msg)
return
# page token
match = re.search(token_ptn, r.text)
if not match:
msg = 'get page token failed'
print(msg)
return
token = match.group(1)
# domains
domains = re.findall(domain_info_ptn, r.text)
# renew domains
for domain, days, renewal_id in domains:
days = int(days)
if days < 14:
self._s.headers.update({
'referer': f'https://my.freenom.com/domains.php?a=renewdomain&domain={renewal_id}',
'content-type': 'application/x-www-form-urlencoded'
})
r = self._s.post(RENEW_DOMAIN_URL, data={
'token': token,
'renewalid': renewal_id,
f'renewalperiod[{renewal_id}]': '12M',
'paymentmethod': 'credit'
})
result = f'{domain} 续期成功' if r.text.find('Order Confirmation') != -1 else f'{domain} 续期失败'
print(result)
msg += result + '\n'
result = f'{domain} 还有 {days} 天续期'
print(result)
msg += result + '\n'
cur_path = os.path.abspath(os.path.dirname(__file__))
service = 1
if os.path.exists(cur_path + "/notify.py"):
try:
from notify import send
except:
print("加载通知服务失败~")
else:
service = 0
instance = FreeNom(username, password)
instance.renew()
if service == 1:
send('Freenom 续期', msg) | [
"[email protected]"
]
| |
3c8532971330519aab359334a8036f420b97fcd7 | cbc37c553833e6ff4bd0c5b473427aa99dc867d2 | /brokenChains/models.py | 9c64661b6ac72bf69905ccfd82f6f13a81c423ae | [
"MIT"
]
| permissive | bunya017/brokenChains | 970d1c65886b4f68023d4665a34f7fff2932d081 | 3e20c834efd7f0ade8e3abe7acf547c093f76758 | refs/heads/master | 2020-04-05T22:08:56.866300 | 2020-02-10T11:03:47 | 2020-02-10T11:04:20 | 157,246,570 | 1 | 0 | MIT | 2019-10-14T18:16:57 | 2018-11-12T16:56:05 | Python | UTF-8 | Python | false | false | 1,739 | py | from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from rest_framework.authtoken.models import Token
import datetime
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
class Habit(models.Model):
name = models.CharField(max_length=50)
owner = models.ForeignKey(User, related_name='habits', on_delete=models.CASCADE)
goal = models.CharField(max_length=150)
start_date = models.DateField(auto_now_add=True)
stop_date = models.DateField(default=timezone.now().date() + datetime.timedelta(days=21))
class Meta:
ordering = ('start_date',)
unique_together = ('owner', 'name')
def save(self, *args, **kwargs):
self.name = self.name.title()
self.goal = self.goal.title()
super(Habit, self).save(*args, **kwargs)
def __str__(self):
return self.name.title()
class Session(models.Model):
habit = models.ForeignKey(Habit, related_name='sessions', on_delete=models.CASCADE)
name = models.CharField(max_length=50)
text = models.CharField(max_length=200, blank=True) # For additional notes
date = models.DateField(auto_now_add=True)
is_complete = models.BooleanField(default=False)
class Meta:
ordering = ('date',)
unique_together = ('habit', 'name')
def save(self, *args, **kwargs):
sessions_count = Session.objects.filter(habit=self.habit).count()
self.name = self.habit.name + " - Day " + str(sessions_count + 1)
super(Session, self).save(*args, **kwargs)
def __str__(self):
return self.name.title()
| [
"[email protected]"
]
| |
98f1b78b8810338038167479184830c15e4a2e72 | 01a0c16978068d8dd03f2d8a6ed6ceff48a06e50 | /webapps/secrets/migrations/0001_initial.py | 8e1a5aad5b545cf2350524908d5de686781b008f | []
| no_license | Kiwicai/Secrets | ab3c50872d3ea0da44bc11a80f92123643c14314 | ae10631e44a6165a10278ce272a709cd694b4e85 | refs/heads/master | 2021-01-13T08:20:28.294186 | 2016-10-29T21:13:35 | 2016-10-29T21:13:35 | 71,753,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-24 06:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Secret',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=500)),
('postDate', models.DateTimeField(verbose_name='date posted')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
]
| |
491a1f11b35ab27ff0eb2c2ce7bb95b422862b4a | ed7cd7760c708720f5a847a02b0c3a50cca0175e | /docs/conf.py | c6db3e446649d27013be9c86061f2f9677830789 | [
"MIT"
]
| permissive | jcapriot/aurora | bf98b1236e7dc43e0189df71725f7f862d271984 | 08d5ccc671054a2b646a4effb412a2ed48314646 | refs/heads/main | 2023-09-05T00:07:16.984109 | 2021-10-27T02:49:41 | 2021-10-27T02:49:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,585 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import aurora
from sphinx_gallery.sorting import FileNameSortKey
# -- Project information -----------------------------------------------------
project = 'aurora'
copyright = '2021, Karl Kappler, Jared Peacock, Lindsey Heagy, Douglas Oldenburg'
author = 'Karl Kappler, Jared Peacock, Lindsey Heagy, Douglas Oldenburg'
# The full version, including alpha/beta/rc tags
release = '0.0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"matplotlib.sphinxext.plot_directive",
"numpydoc",
# "nbsphinx",
"sphinx_gallery.gen_gallery"
]
# Autosummary pages will be generated by sphinx-autogen instead of sphinx-build
autosummary_generate = True
numpydoc_class_members_toctree = False
# API doc options
apidoc_module_dir = "../aurora"
apidoc_output_dir = "api/generated"
apidoc_toc_file = False
apidoc_excluded_paths = []
apidoc_separate_modules = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
pass
except Exception:
html_theme = "default"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Intersphinx
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"matplotlib": ("https://matplotlib.org/", None),
}
# Sphinx Gallery
sphinx_gallery_conf = {
# path to your examples scripts
"examples_dirs": [
"../examples",
],
"gallery_dirs": [
"examples",
],
"within_subsection_order": FileNameSortKey,
"filename_pattern": "\.py",
"backreferences_dir": "api/generated/backreferences",
"doc_module": "aurora",
# 'reference_url': {'discretize': None},
}
| [
"[email protected]"
]
| |
b4215825da1da85f06424f250276cdaa31fbf895 | 91bcf9bdbe35a9ea450f2a9792ec092dd9146d83 | /loop45.py | 3186c1fb626842967571458dc24712d9880ffbe8 | []
| no_license | degavathmamatha/LOOP_PY | c2c495966dc82750c6ff33276dedd308d97e8ba2 | 354a613513c784860aa6ec5a27e7a3901a91c2fb | refs/heads/main | 2023-05-17T15:19:53.807678 | 2021-06-02T17:06:06 | 2021-06-02T17:06:06 | 373,244,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | i=69
while i>=65:
j=65
while j<=i:
print(chr(i),end=" ")
j=j+1
i=i-1
print() | [
"[email protected]"
]
| |
8bff23b2e9efc484a7dafa78905b9ed3efde7cac | 04d9640857ee86879bdf10147863c484ac520f06 | /main.py | d909d80047c65aac58b19f9486a3f6bc47986c5b | [
"CC0-1.0"
]
| permissive | fretless1/beard-respite | f3b05c9c558f751160ebfd3ede7d13da919d656b | be18f4231fee31606d4f71e0a12cf6ef04c7e870 | refs/heads/main | 2023-05-10T01:36:33.274741 | 2021-05-31T17:21:01 | 2021-05-31T17:21:01 | 372,528,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,664 | py | import math
print("Mit diesem Rechner kannst du eine beliebige Bedenkzeit vor der Rasur deines Bartes berechnen.")
# Gesamtzeit = Wachstumszeit + Bedenkzeit = Wachstumszeit + (Wachstumszeit/Vorlaufzeit)
print()
# Anzahl der Tage für die Wachstumszeit
g = float(input("Gib hier die Anzahl der Tage ein, die vergangen sind seit du dich das letzte Mal rasiert hast: "))
# Vorlaufzeit bzw. Modul für die Bedenkzeit
v = float(input("Gib hier die Anzahl der Tage für die Vorlaufzeit ein, die für einen Tag Bedenkzeit nötig ist: "))
# "addtime" berechnet die Bedenkzeit c in Abhängigkeit von der Vorlaufzeit b
def addtime(a, b):
c = a / b
return c
# unterscheidet beim Output zwischen "Jahr" und "Jahre"
if math.floor(g / 360) == 1:
y = " Jahr"
else:
y = " Jahre"
# unterscheidet beim Output zwischen "Monat" und "Monate"
if math.floor(g % 360) == 30:
m = " Monat"
else:
m = " Monate"
# unterscheidet beim Output zwischen "Tag" und "Tage"
if v == 1 or math.floor(g % 30) == 1:
d = " Tag"
else:
d = " Tage"
# unterscheidet beim Output zwischen "Stunde" und "Stunden"
if math.floor((g * 24) % 24) == 1:
h = " Stunde"
else:
h = " Stunden"
# unterscheidet beim Output zwischen "Minute" und "Minuten"
if math.floor((((g * 24) % 24) * 60) % 60) == 1:
mi = " Minute"
else:
mi = " Minuten"
# unterscheidet beim Output zwischen "Sekunde" und "Sekunden"
if math.floor((((((g * 24) % 24) * 60) % 60) * 60) % 60) == 1:
s = " Sekunde"
else:
s = " Sekunden"
print()
print("Wachstumszeit: ")
print(math.floor(g/360), y)
print(math.floor((g % 360)/30), m)
print(math.floor((g % 360) % 30), d)
print(math.floor(((g % 360) % 30) * 24) % 24, h)
print(math.floor((((((g % 360) % 30) * 24) % 24) * 60) % 60), mi)
print(math.floor((((((((g % 360) % 30) * 24) % 24) * 60) % 60) * 60) % 60), s)
# unterscheidet beim Output zwischen "Jahr" und "Jahre"
if math.floor(addtime(g, v) / 360) == 1:
y = " Jahr"
else:
y = " Jahre"
# unterscheidet beim Output zwischen "Monat" und "Monate"
if math.floor(addtime(g, v) % 360) == 30:
m = " Monat"
else:
m = " Monate"
# unterscheidet beim Output zwischen "Tag" und "Tage"
if v == 1 or math.floor(addtime(g, v) % 30) == 1:
d = " Tag"
else:
d = " Tage"
# unterscheidet beim Output zwischen "Stunde" und "Stunden"
if math.floor((addtime(g, v) * 24) % 24) == 1:
h = " Stunde"
else:
h = " Stunden"
# unterscheidet beim Output zwischen "Minute" und "Minuten"
if math.floor((((addtime(g, v) * 24) % 24) * 60) % 60) == 1:
mi = " Minute"
else:
mi = " Minuten"
# unterscheidet beim Output zwischen "Sekunde" und "Sekunden"
if math.floor((((((addtime(g, v) * 24) % 24) * 60) % 60) * 60) % 60) == 1:
s = " Sekunde"
else:
s = " Sekunden"
print()
print("Bedenkzeit: ")
print(math.floor(addtime(g, v)/360), y)
print(math.floor((addtime(g, v) % 360)/30), m)
print(math.floor((addtime(g, v) % 360) % 30), d)
print(math.floor(((addtime(g, v) % 360) % 30) * 24) % 24, h)
print(math.floor((((((addtime(g, v) % 360) % 30) * 24) % 24) * 60) % 60), mi)
print(math.floor((((((((addtime(g, v) % 360) % 30) * 24) % 24) * 60) % 60) * 60) % 60), s)
# unterscheidet beim Output zwischen "Jahr" und "Jahre"
if math.floor(addtime(g, v) / 360) == 1:
y = " Jahr"
else:
y = " Jahre"
# unterscheidet beim Output zwischen "Monat" und "Monate"
if math.floor((addtime(g, v) + g) % 360) == 30:
m = " Monat"
else:
m = " Monate"
# unterscheidet beim Output zwischen "Tag" und "Tage"
if v == 1 or math.floor((addtime(g, v) + g) % 30) == 1:
d = " Tag"
else:
d = " Tage"
# unterscheidet beim Output zwischen "Stunde" und "Stunden"
if math.floor(((addtime(g, v) + g) * 24) % 24) == 1:
h = " Stunde"
else:
h = " Stunden"
# unterscheidet beim Output zwischen "Minute" und "Minuten"
if math.floor(((((addtime(g, v) + g) * 24) % 24) * 60) % 60) == 1:
mi = " Minute"
else:
mi = " Minuten"
# unterscheidet beim Output zwischen "Sekunde" und "Sekunden"
if math.floor(((((((addtime(g, v) + g) * 24) % 24) * 60) % 60) * 60) % 60) == 1:
s = " Sekunde"
else:
s = " Sekunden"
print()
print("Gesamtzeit: ")
print(math.floor((g + addtime(g, v))/360), y)
print(math.floor(((g + addtime(g, v)) % 360)/30), m)
print(math.floor(((g + addtime(g, v)) % 360) % 30), d)
print(math.floor((((g + addtime(g, v)) % 360) % 30) * 24) % 24, h)
print(math.floor(((((((g + addtime(g, v)) % 360) % 30) * 24) % 24) * 60) % 60), mi)
print(math.floor(((((((((g + addtime(g, v)) % 360) % 30) * 24) % 24) * 60) % 60) * 60) % 60), s)
| [
"[email protected]"
]
| |
08bd1ec4dcb4c0a0083b9de9301ff74b1d6fc755 | 99d14f465a7f725e7b9c3377f5e12b76453de0aa | /bin/old/csv2analyse.py | 2c839873fb8d3a42d0329d243cbfde5f6bd64681 | []
| no_license | suzannejin/nf_homoplasty | c4056e2ff3b7ee7270d4690257beac37c9ffddc2 | 2fb5af9dd9f38cfac3b3e81dffa80dbcbb6f0cfe | refs/heads/master | 2020-12-28T05:01:24.059054 | 2020-05-19T11:07:53 | 2020-05-19T11:07:53 | 238,189,231 | 0 | 0 | null | 2020-02-04T11:23:05 | 2020-02-04T11:23:04 | null | UTF-8 | Python | false | false | 5,211 | py | #!/usr/bin/env python3
__description__='''
Python version of the Cedric's script for the analysis of homoplasy.
'''
def csv2pandas(fil):
import pandas as pd
df=pd.read_csv(fil,header=[0,1],index_col=0)
return(df)
def pandas2headers(df):
# Get headers: families, aligners, trees
families=list(df.index)
tmp=[x[0] for x in df["nseq"].values.tolist()]
nseqs={}
for i in range(len(families)):
fam=families[i]
n=tmp[i]
nseqs[fam]=n
aligners_rep=[x[0] for x in list(df.columns)][1:] # Note that they are repeated (just as in the csv file)
trees_rep=[x[1] for x in list(df.columns)][1:]
aligners,trees=[],[]
for x in aligners_rep:
if x not in aligners:
aligners.append(x)
for x in trees_rep:
if x not in trees:
trees.append(x)
return(families,nseqs,aligners,trees)
def csv2analyse(metricdf,scoredf,families,nseqs,aligners,trees,minseq,maxseq,mrdelta):
import numpy as np
import pandas as pd
# Initialize numbers
nfam=len(families)
naln=len(aligners)
unused=pd.DataFrame(np.zeros( (naln,nfam)), index=aligners, columns=families )
total=pd.DataFrame(np.zeros( (naln,nfam)) ,index=aligners, columns=families )
positiv=pd.DataFrame(np.zeros( (naln,nfam)) ,index=aligners, columns=families )
negativ=pd.DataFrame(np.zeros( (naln,nfam)) ,index=aligners, columns=families )
pick=pd.DataFrame(np.zeros( (naln,4)) ,index=aligners, columns=["minS","maxS","minN","maxN"] )
# Compute
for aligner in aligners:
for family in families:
for tr1 in trees:
for tr2 in trees:
total.at[aligner,family]+=1 # total +1
use=0
# Metrics
h1=metricdf[aligner][tr1][family]
h2=metricdf[aligner][tr2][family]
# Score
s1=scoredf[aligner][tr1][family]
s2=scoredf[aligner][tr2][family]
if h1=="NA" or h2=="NA" or s1=="NA" or s2=="NA":
unused.at[aligner,family]+=1
elif nseqs[family]<minseq or nseqs[family]>maxseq:
unused.at[aligner,family]+=1
elif h1==h2 and h1==0: # If both metrics are 0
unused.at[aligner,family]+=1
elif abs(h1-h2)/(h1+h2)<mrdelta:
unused.at[aligner,family]+=1
elif abs(h1-h2)<=0.001 and abs(h1-h2)<=0.001:
unused.at[aligner,family]+=1
else:
d1=h1-h2
d2=s1-s2
if (d2>=0 and d1>=0) or (d2<=0 and d1<=0):
use=1
positiv.at[aligner,family]+=1
elif (d2>0 and d1<0) or (d2<0 and d1>0):
use=1
negativ.at[aligner,family]+=1
if use>0:
if h1>h2:
pick.at[aligner,"minS"]+=s2
pick.at[aligner,"maxS"]+=s1
else:
pick.at[aligner,"minS"]+=s1
pick.at[aligner,"maxS"]+=s2
pick.at[aligner,"minN"]+=1
pick.at[aligner,"maxN"]+=1
N=negativ.loc[aligner,family]
t=total.loc[aligner,family]
if t!=0:
Nratio=N/t
else:
Nratio=0
u=unused.loc[aligner,family]
if u!=0:
unusedRatio=u/t
else:
unusedRatio=0
sys.stdout.write("FAM::{} [{}]: {:.3f} ({}/{}) -- UNUSED: {:.3f} ({}/{})\n".format(family,aligner,Nratio,N,t,unusedRatio,u,t))
if __name__ == '__main__':
import sys
import argparse
app=argparse.ArgumentParser(description=__description__)
app.add_argument("-dir",type=str,help="Directory where the input (tc,sp,homo,whomo,whomo2,ngap,ngap2).csv files are stored.")
app.add_argument("-score",type=str,choices=["tc","sp"])
app.add_argument("-metrics",type=str,choices=["homo","whomo","whomo2","ngap","ngap2"])
app.add_argument("-norm",type=str,choices=["PerLen","PerSeq","PerLenSeq","ByLen","BySeq","ByLenSeq"],default=None,help="Data normalization. \
Divided by length, number of sequence, or both. Or multiplied by Length, number of sequence, or both.")
app.add_argument("-mrdelta",type=float,default=0)
app.add_argument("-minseq",type=int,default=0)
app.add_argument("-maxseq",type=int,default=100000)
args=app.parse_args()
# Read csv to panda Data Frames
if args.norm:
metricfile=args.dir+"/"+args.metrics+".norm"+args.norm+".csv"
else:
metricfile=args.dir+"/"+args.metrics+".csv"
scorefile=args.dir+"/"+args.score+".csv"
metricdf=csv2pandas(metricfile)
scoredf=csv2pandas(scorefile)
# Headers & index
families,nseqs,aligners,trees=pandas2headers(metricdf)
csv2analyse(metricdf,scoredf,families,nseqs,aligners,trees,args.minseq,args.maxseq,args.mrdelta)
| [
"[email protected]"
]
| |
95bfdd8d4b4b4983e02efdc3723e09cace44a225 | 706d5ff4707793a225f41c469f19a4f1891078da | /mongodb.py | 54b9b0141c20586c5b80ba3c32565d688101a41c | []
| no_license | dcc668/PyDemo1.2 | eb5f13a19343e4d9d82fdd7c54f6f45622c5c00e | f883ca1d9bc04673beb9b40d889da74d2aaa5095 | refs/heads/master | 2020-04-15T02:23:16.000157 | 2019-01-30T23:57:41 | 2019-01-30T23:57:41 | 164,312,703 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | #_*_ecoding:utf8_*_
import pymongo
conn=pymongo.MongoClient()
mydb=conn["mydb"]
users=mydb["t_user"]
user1={"userName":"cc1","sex":"男","password":"111","email":"[email protected]"}
# user2={"userName":"cc2","sex":"男","password":"122","email":"[email protected]"}
# user3={"userName":"cc3","sex":"男","password":"123","email":"[email protected]"}
users.insert(user1)
print("insert finish") | [
"[email protected]"
]
| |
1dea419e68a47fe50ed697d1fa7911da70083a77 | 67f94c361d28c2086c977504f0e2615db7bbe224 | /ski_bible/scripts/initializedb.py | e317354477454a83830e5c8f98b628fe3365b00a | []
| no_license | dyladan/ski_bible | 241c05e198fac914c0b5a981afd19089c65b3e5c | 7a39db18f90f4760278ec608a713251880d1f6ab | refs/heads/master | 2016-09-09T20:58:04.482972 | 2014-09-19T14:21:43 | 2014-09-19T14:21:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | import os
import sys
import transaction
import datetime
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models import (
DBSession,
Pass,
Base,
Skier,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
model = Pass(uid=1, speed=32, line=22, count=6, division="CM", date=datetime.date.today())
DBSession.add(model)
model = Pass(uid=1, speed=34, line=22, count=4, division="CM", date=datetime.date.today())
DBSession.add(model)
model = Pass(uid=2, speed=30, line=15, count=6, division="CM", date=datetime.date.today())
DBSession.add(model)
model = Pass(uid=2, speed=32, line=15, count=3, division="CM", date=datetime.date.today())
DBSession.add(model)
model = Skier(name="Danny Dyla", age=22, division="CM")
DBSession.add(model)
model = Skier(name="David Huisman", age=24, division="CM")
DBSession.add(model)
| [
"[email protected]"
]
| |
5636c3dd3742788051538088505928e3ef3fe2fb | 2fa6156bcb1935b0a4897d1e6229cd0f73714130 | /gs_runplots.py | 3b59643b8e739ca1d34634703efd923eeb25c836 | [
"MIT"
]
| permissive | briangalindoherbert/gs_mapdemo | 9fdaed1196452d438af3573ecb74632bb9930460 | 86f1791e8a8913335a24ea32ae10d16a86dfa415 | refs/heads/main | 2023-02-04T15:53:41.548014 | 2020-12-15T21:14:04 | 2020-12-15T21:14:04 | 314,715,162 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | # encoding=utf-8
"""
this is a script to separate the running of plots on processed data from the import, wrangling, and calc work done in
gs_mapdemo.py. I do a lot of customization on the plotting end that uses the same DataFrames, I don't need to do
that processing to test out minor plotting changes.
"""
import pandas as pd
from plotly.io import renderers
from gs_dochoro import *
runcounty = True
runnyt = False
runstate = True
renderers.default = 'browser'
pd.options.plotting.backend = 'plotly'
pd.set_option('precision',7)
pd.set_option('display.float_format','{:.2f}'.format)
plotly_token = 'pk.eyJ1IjoiYmdoZXJiZXJ0IiwiYSI6ImNrYXl2MmFhYjBncHEyc3Bpa2ozczQwdGgifQ.glPFF4kjwrhP40bncFSnZA'
if runcounty:
# go_cty = do_countyplot(df, updated)
go_cty = do_casesplot(df, date_jhu)
go_ctymort = do_countyplot(df, date_jhu)
go_cty.show()
go_ctymort.show()
if runnyt:
df_nyt1 = do_countystats(df_nyt)
go_nyt = do_nytcounty(df_nyt1, date_nyt)
go_nyt.show()
if runstate:
go_obj = do_stateplot(df_st, date_jhus)
go_obj.show()
| [
"[email protected]"
]
| |
4406adf90e7e6c19c1ba256d21242b43107fe5be | e2c9c8d5176ecb75df24bad668d769db01a3ce55 | /patterns/creational/factory/__init__.py | 8b822e41a17336b1f0db4515d64eb1ee34a9a8bb | [
"Apache-2.0"
]
| permissive | Vyshnavmt94/Python_Design_Patterns | e172d0cdb77534861bca835684999ff8ad099db3 | 3703b3ee7b16e77de2bad68037e2c8542852900b | refs/heads/main | 2023-08-01T06:18:33.655393 | 2021-09-26T09:48:19 | 2021-09-26T09:48:19 | 404,251,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py |
# ========================================= Factory Method ==========================================
# Factory Method is a Creational Design Pattern that allows an interface or a class to create an object, but lets subclasses decide which class or object to instantiate.
# Here, objects are created without exposing the logic to the client, and for creating the new type of object, the client uses the same common interface.
# ========================================= AbstractFactory Method ==========================================
# Allows you to produce the families of related objects without specifying their concrete classes.
# It provides a way to encapsulate a group of individual factories.
# Basically, here we try to abstract the creation of the objects depending on the logic, business, platform choice, etc. | [
"[email protected]"
]
| |
d8cea985ceebd495a9c38d178fb8abcdf16ea8e7 | 2c176520b850380c8912cea4b053e88d7fe6c3f1 | /qbiaq5/precomputation.py | e31c91fec4a789c95b00acbc1b796ea4493bc27e | [
"MIT"
]
| permissive | czyszczonik/Zuf-ninja | 858d0828ae584a66ffd082bde662b7b3b71a2b6c | b6419839d598aee7854339ce82c7036e15ecc967 | refs/heads/master | 2022-09-21T07:09:58.272300 | 2020-05-31T14:36:37 | 2020-05-31T14:36:37 | 268,044,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | import time
from qbiaq5.decorators import benchmark
from qbiaq5.math_commons import h, v, createPrecomputationArray, getBinaryArray, getEmptyArray
@benchmark
def precompute(G, l, S, a, b):
t0 = time.perf_counter()
hc = h(l, a)
vc = v(a, b)
array = createPrecomputationArray(G, a, b, l)
for row in range(1, 2 ** hc):
binaryArray = getBinaryArray(row)
l = h(l, a) - len(binaryArray)
binary = getEmptyArray(hc)
binary[l:] = binaryArray
array = performSquaring(G, a, hc, array, row, binary)
array = performMultiplying(vc, b, row, array)
t1 = time.perf_counter()
print("Time elapsed for precomputation:", t1 - t0)
return array
def performSquaring(G, a, hc, array, row, binary):
for iterator in range(hc):
exponent = pow(2, iterator * a)
r = G * exponent
array[0][row] += r * binary[-(iterator + 1)]
return array
def performMultiplying(vc, b, row, array):
for iterator in range(1, vc):
array[iterator][row] = array[0][row] * (pow(2, iterator * b))
return array
| [
"[email protected]"
]
| |
810981f159c60e6a44398345ef1be95c3cb2ff95 | ab3c571db150fd8860d762049c4f81ba756e03b2 | /texas_holdem/__init__.py | c6b5c4863a4c5aff87de4acc1c6f8a76c7049816 | []
| no_license | notjuanortiz/texas-holdem | 3e27c3d3a464a1e73fbbc1cdc07201aeabb47351 | 1503710ea4c1977f4afaa0f0017256396521b251 | refs/heads/master | 2022-02-17T12:13:43.418354 | 2019-09-13T14:12:56 | 2019-09-13T14:12:56 | 106,612,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | from texas_holdem.poker import Card, Poker, Player
__all__ = ['Card', 'Poker', 'Player']
| [
"[email protected]"
]
| |
3f14c52577f3848eabf3d6d456b3290629934df8 | 6521b069e778f6e7a5be1aabb282cfddde72f190 | /reproduction/Summarization/Baseline/test/test_dataLoader.py | 987f8778ca51720fb8af4a0eb11b06b43c6b2807 | [
"Apache-2.0"
]
| permissive | choosewhatulike/fastNLP | 12068fc618245d9cbb137729063ee390de26d696 | 14778ee071ace8825acc0f0834a26eccfda70667 | refs/heads/master | 2021-04-06T10:51:49.961158 | 2019-07-09T06:00:40 | 2019-07-09T06:00:40 | 124,500,643 | 0 | 0 | null | 2018-03-09T06:54:25 | 2018-03-09T06:54:25 | null | UTF-8 | Python | false | false | 961 | py |
import unittest
from ..data.dataloader import SummarizationLoader
class TestSummarizationLoader(unittest.TestCase):
def test_case1(self):
sum_loader = SummarizationLoader()
paths = {"train":"testdata/train.jsonl", "valid":"testdata/val.jsonl", "test":"testdata/test.jsonl"}
data = sum_loader.process(paths=paths)
print(data.datasets)
def test_case2(self):
sum_loader = SummarizationLoader()
paths = {"train": "testdata/train.jsonl", "valid": "testdata/val.jsonl", "test": "testdata/test.jsonl"}
data = sum_loader.process(paths=paths, domain=True)
print(data.datasets, data.vocabs)
def test_case3(self):
sum_loader = SummarizationLoader()
paths = {"train": "testdata/train.jsonl", "valid": "testdata/val.jsonl", "test": "testdata/test.jsonl"}
data = sum_loader.process(paths=paths, tag=True)
print(data.datasets, data.vocabs) | [
"[email protected]"
]
| |
13f0735af7afa71669e0b00ec47e9d7d07d8bce0 | d5214b1331c9dae59d95ba5b3aa3e9f449ad6695 | /qPloneDropDownMenu/branches/0.2/skins/qPloneDropDownMenu/qpdm_reorder.py | e0bfe0d169c75bd9dae28edd63c26790aeb59ec2 | []
| no_license | kroman0/products | 1661ee25a224c4b5f172f98110944f56136c77cf | f359bb64db22f468db5d1e411638790e94d535a2 | refs/heads/master | 2021-01-10T07:58:04.579234 | 2014-06-11T12:05:56 | 2014-06-11T12:05:56 | 52,677,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | ## Script (Python) "qpdm_reorder"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters= submenu_path, idxs
##title=
##
from Products.CMFCore.utils import getToolByName
menu_tool = getToolByName(context, 'portal_dropdownmenu')
menuitem = menu_tool.manage_reorderItems(idxs, submenu_path)
return context.getSubmenu(submenu=menu_tool.getSubMenuByPath(submenu_path),submenu_path=submenu_path)
| [
"mylan@4df3d6c7-0a05-0410-9bee-ae8b7a76f946"
]
| mylan@4df3d6c7-0a05-0410-9bee-ae8b7a76f946 |
52bc7632cb2fb0f992aefdbbb894875a1607ea42 | 508321d683975b2339e5292202f3b7a51bfbe22d | /Userset.vim/ftplugin/python/CompletePack/maya/app/renderSetup/model/collection.py | 6f5c78e3c5ec754621968564b253a3121787e876 | []
| no_license | cundesi/vimSetSa | 4947d97bcfe89e27fd2727423112bb37aac402e2 | 0d3f9e5724b471ab21aa1199cc3b4676e30f8aab | refs/heads/master | 2020-03-28T05:54:44.721896 | 2018-08-31T07:23:41 | 2018-08-31T07:23:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,254 | py | """Collection node class and utility functions.
This module provides the collection class, as well as utility
functions to operate on collections.
The collection owns its associated selector node: on collection
delete, the collection is deleted as well.
Conceptually, a collection fulfills four roles in render setup:
1) It is a container of overrides. If enabled, the collection will
apply all its enabled overrides on nodes it selects (see (2)).
2) It selects nodes onto which overrides will be applied. These nodes
can be DAG or DG nodes.
3) It is a container of child collections. Child collections always
select nodes based on their parent's selected nodes (see (2)).
4) It defines render layer membership. Members of a render layer can
only be DAG nodes. These are always a subset of the nodes selected
by the collection (see (2)). The members of the render layer are the
union of the top-level collection members; children collections can
exclude or re-include members. See RenderLayer.getMembers for more
details (including the effect of isolate select mode).
The application of overrides only obeys enabled / disabled status.
Render layer membership is determined from enabled / disabled, in
conjunction with isolate select."""
import maya
maya.utils.loadStringResourcesForModule(__name__)
import re
import maya.cmds as cmds
import maya.api.OpenMaya as OpenMaya
import maya.app.renderSetup.model.nodeList as nodeList
import maya.app.renderSetup.model.utils as utils
import maya.app.renderSetup.model.plug as plug
import maya.app.renderSetup.model.typeIDs as typeIDs
import maya.app.renderSetup.model.selector as selector
import maya.app.renderSetup.model.undo as undo
import maya.app.renderSetup.model.override as override
import maya.app.renderSetup.model.overrideUtils as overrideUtils
import maya.app.renderSetup.model.childNode as childNode
import maya.app.renderSetup.model.enabled as computeEnabled
import maya.app.renderSetup.model.namespace as namespace
import maya.app.renderSetup.model.renderSettings as renderSettings
import maya.app.renderSetup.model.rendererCallbacks as rendererCallbacks
import maya.app.renderSetup.model.traverse as traverse
from maya.app.renderSetup.model.renderLayerSwitchObservable import RenderLayerSwitchObservable
import maya.app.renderSetup.model.clipboardData as clipboardData
import maya.app.renderSetup.common.utils as commonUtils
import maya.app.renderSetup.common.profiler as profiler
import maya.app.renderSetup.common.guard as guard
import maya.app.renderSetup.model.context as context
import maya.app.renderSetup.model.jsonTranslatorUtils as jsonTranslatorUtils
import maya.app.renderSetup.model.jsonTranslatorGlobals as jsonTranslatorGlobals
# List all error messages below
kInvalidChildName = maya.stringTable['y_collection.kInvalidChildName' ]
kUnknownChild = maya.stringTable['y_collection.kUnknownChild' ]
kOverrideCreationFailed = maya.stringTable['y_collection.kOverrideCreationFailed' ]
kCollectionMissingSelector = maya.stringTable['y_collection.kCollectionMissingSelector' ]
kRendererMismatch = maya.stringTable['y_collection.kRendererMismatch' ]
kIncorrectChildType = maya.stringTable['y_collection.kIncorrectChildType' ]
# List of undo messages
kChildAttached = maya.stringTable['y_collection.kChildAttached' ]
kChildDetached = maya.stringTable['y_collection.kChildDetached' ]
kSet = maya.stringTable['y_collection.kSet' ]
def collections(c):
return c.getCollections()
class Collection(nodeList.ListBase, childNode.TreeOrderedItem,
childNode.ChildNode):
"""
Collection node.
A collection has an ordered list of children, and a selector to
determine nodes to which the children apply.
MAYA-59277:
- When we start implementing proper hierarchical collections we
need to decide on the relationship between parent and child
selectors. Do we always consider a parent collection to be the
union of its child collections, and propagate the selector
information upwards when a child collection is added or changed?
Or do we go the opposite direction and restrict the child collection
to use the intersection between its selector and its parent's selector?
- Light child collections always have a single light source member.
We should utilize this and create a specific selector for such
use cases for better performance.
"""
kTypeId = typeIDs.collection
kTypeName = 'collection'
# Attributes for collection as list of children.
#
# Connections to lowest-priority and highest-priority child
# on children linked list. The lowest-priority child
# is considered to be the front of the list, and the highest-priority
# child the back of the list.
childLowest = OpenMaya.MObject()
childHighest = OpenMaya.MObject()
# Connection to all children in the list.
children = OpenMaya.MObject()
# Attribute for message connection to selector node associated with the
# collection. This attribute is a destination, as only one selector
# can be associated with each collection.
aSelector = OpenMaya.MObject()
# Enabled behavior. See enabled module for documentation.
enabled = OpenMaya.MObject()
selfEnabled = OpenMaya.MObject()
parentEnabled = OpenMaya.MObject()
# isolateSelected flag as attribute
isolateSelected = OpenMaya.MObject()
# The number of isolate selected children in a collection's subtree.
numIsolatedChildren = OpenMaya.MObject()
# The number of isolate selected ancestors of this collection.
numIsolatedAncestors = OpenMaya.MObject()
# the SimpleSelector is the default.
kDefaultSelectorTypeName = selector.SimpleSelector.kTypeName
@staticmethod
def creator():
return Collection()
@staticmethod
def initializer():
# A collection is a render layer list element.
# inheritAttributesFrom() must be called before adding any other
# attributes.
Collection.inheritAttributesFrom(nodeList.ListItem.kTypeName)
# A collection is a list of children.
Collection.children = Collection.initListItems()
Collection.childLowest = utils.createDstMsgAttr(
'childLowest', 'cl')
Collection.addAttribute(Collection.childLowest)
Collection.childHighest = utils.createDstMsgAttr(
'childHighest', 'ch')
Collection.addAttribute(Collection.childHighest)
Collection.aSelector = utils.createDstMsgAttr('selector', 'sel')
Collection.addAttribute(Collection.aSelector)
# Set up enabled attribute.
computeEnabled.initializeAttributes(Collection)
# Add isolateSelected attribute
Collection.numIsolatedChildren = computeEnabled.createNumIsolatedChildrenAttribute()
Collection.addAttribute(Collection.numIsolatedChildren)
Collection.numIsolatedAncestors = computeEnabled.createHiddenIntAttribute(
"numIsolatedAncestors", "nia")
Collection.addAttribute(Collection.numIsolatedAncestors)
# Add isolateSelected attribute
numAttrFn = OpenMaya.MFnNumericAttribute()
Collection.isolateSelected = numAttrFn.create("isolateSelected", "is", OpenMaya.MFnNumericData.kBoolean, 0)
numAttrFn.storable = True
numAttrFn.keyable = False
numAttrFn.readable = True
numAttrFn.writable = True
numAttrFn.hidden = True
OpenMaya.MPxNode.addAttribute(Collection.isolateSelected)
Collection.attributeAffects(Collection.numIsolatedChildren, Collection.enabled)
Collection.attributeAffects(Collection.numIsolatedAncestors, Collection.enabled)
Collection.attributeAffects(Collection.isolateSelected, Collection.enabled)
def __init__(self):
super(Collection, self).__init__()
self._enabledDirty = False
self._callbackIds = []
def postConstructor(self):
# Call parent class postConstructor
super(Collection, self).postConstructor()
# Listen to changes in the enabled attribute.
self._callbackIds = computeEnabled.addChangeCallbacks(self)
def typeId(self):
return Collection.kTypeId
def typeName(self):
return Collection.kTypeName
def _createSelector(self, parent=None, selArgs=None):
"""Create a selector node, and attach it to the collection.
parent is an optional parent collection. This method must be
overridden by derived classes."""
self.setSelectorType(parent.getSelector().kTypeName if parent else \
self.kDefaultSelectorTypeName)
if parent:
self.getSelector().minimalClone(parent.getSelector())
def _createAndConnectSelector(self, typeName, selArgs=None):
"""Engine method for _createSelector.
selArgs is an optional dictionary passed to _createSelectorNode."""
newSelector = self._createSelectorNode(
typeName, self.name()+'Selector', selArgs)
cmds.connectAttr(newSelector + '.c', self.name() + '.selector')
def _createSelectorNode(self, typeName, selectorName, selArgs):
"""Create the selector node.
Can be overridden by derived classes."""
return cmds.createNode(typeName, name=selectorName, skipSelect=True)
def getSelectorType(self):
try: return self.getSelector().kTypeName
except: return None
def setSelectorType(self, typeName):
'''Sets the selector type of this collection.'''
if self.getSelectorType() == typeName:
return
with undo.NotifyCtxMgr("Set selector type", self._selectorChanged):
children = [child for child in self.getChildren() if isinstance(child, Collection)]
# need to disconnect all selector children
# otherwise they get deleted along with their parent selector
for child in children:
child.getSelector().setParent(None)
try: self._deleteSelector()
except: pass
self._createAndConnectSelector(typeName)
parent = self.parent()
selector = self.getSelector()
if isinstance(parent, Collection):
selector.setParent(parent.getSelector())
for child in children:
child.getSelector().setParent(selector)
def _deleteSelector(self):
selector = self.getSelector()
cmds.disconnectAttr(selector.name() + '.c', self.name() + '.selector')
utils.deleteNode(selector)
def _getInputAttr(self, attr, dataBlock=None):
return dataBlock.inputValue(attr) if dataBlock else OpenMaya.MPlug(self.thisMObject(), attr)
def _getSelfEnabledPlug(self):
return OpenMaya.MPlug(self.thisMObject(), Collection.selfEnabled)
def _getIsolatePlug(self):
return OpenMaya.MPlug(self.thisMObject(), Collection.isolateSelected)
def hasIsolatedAncestors(self, dataBlock=None):
return self._getInputAttr(self.numIsolatedAncestors, dataBlock).asInt() > 0
def hasIsolatedChildren(self, dataBlock=None):
return self._getInputAttr(self.numIsolatedChildren, dataBlock).asInt() > 0
def compute(self, plug, dataBlock):
if plug == self.enabled:
# We are enabled if:
#
# o The normal enabled computation is true (self enabled is true AND
# parent enabled is true).
#
# AND
#
# o We're in batch mode OR
# o No node is isolated OR
# o This node is isolated OR
# o This node has isolate selected children OR
# o This node has isolate selected ancestors.
#
value = computeEnabled.computeEnabled(self, dataBlock) and \
(cmds.about(batch=True) or \
dataBlock.inputValue(self.layerNumIsolatedChildren).asInt()==0 or \
self.isIsolateSelected(dataBlock) or \
self.hasIsolatedAncestors(dataBlock) or \
self.hasIsolatedChildren(dataBlock))
computeEnabled.setEnabledOutput(self, dataBlock, value)
def enabledChanged(self):
layer = self.getRenderLayer()
if layer:
layer._enabledChanged(self)
self.itemChanged()
def isEnabled(self, dataBlock=None):
return self._getInputAttr(self.enabled, dataBlock).asBool()
def isSelfEnabled(self, dataBlock=None):
return self._getInputAttr(self.selfEnabled, dataBlock).asBool()
def setSelfEnabled(self, value):
if value != self.isSelfEnabled():
# pulling isEnabled will trigger enabledChanged
# (no matter if enable output value has changed or not)
with undo.NotifyCtxMgr("Set Override Enabled",self.isEnabled):
cmds.setAttr(self.name()+".selfEnabled", 1 if value else 0)
@guard.state(computeEnabled.isPulling, computeEnabled.setPulling, True)
def pullEnabled(self):
# This will force pulling the enabled plug on overrides. It solves
# the problem of connection overrides not being applied / unapplied
# when not visible in the RenderSetup window; being visible in the
# RenderSetup window causes enabled to be pulled.
#
# Connection overrides are not part of the network; they are a
# procedure that must be run on enable change to modify the
# network. Therefore, the enabled plug is not pulled, contrary to
# value overrides that get inserted in the network, and thus we
# need to force the plug to be pulled.
# Two phase procedure to avoid DG cycle check warnings. First,
# pull on enabled output of connection overrides.
needsUpdate = set()
for n in traverse.depthFirst(self, traverse.nodeListChildren):
if isinstance(n, override.Override) and n.updateOnEnabledChanged():
# Call isEnabled to force computation of the enabled output.
n.isEnabled()
needsUpdate.add(n)
# Second, update the connection override. This will iterate over
# the connection override apply nodes, which query the connection
# override enabled state we've finished computing above. Had we
# done the override enabled computation and the update in the same
# call, we would have gotten a DG evaluation cycle (compute
# enabled, cause update, which queries enabled).
for o in needsUpdate:
o.update()
def getRenderLayer(self):
# For hierarchical collections the parent
# could be another collection, otherwise
# the parent is always the render layer
parent = self.parent()
if isinstance(parent, Collection):
return parent.getRenderLayer()
return parent
def isolateSelectedChanged(self):
layer = self.getRenderLayer()
if layer:
layer._isolateSelectedChanged(self)
def isIsolateSelected(self, dataBlock=None):
""" Get if isolate selected. Will always return False in batch mode """
return False if cmds.about(batch=True) else self._getInputAttr(self.isolateSelected, dataBlock).asBool()
def setIsolateSelected(self, val):
if val!=self.isIsolateSelected() and not cmds.about(batch=True):
with undo.NotifyCtxMgr(kSet % (self.name(), 'isolateSelected', val), self.isolateSelectedChanged):
# Use a command to support the undo mechanism
cmds.setAttr(self._getIsolatePlug().name(), val)
self._updateIsolateSelected(1 if val else -1)
def _findSubcollectionForType(self, typeName):
'''Finds the subcollection of this collection that will handle that typeName
or creates it and returns it if it doesn't exist.'''
filterType, customFilter = selector.Filters.getFiltersFor(typeName)
def predicate(child):
if not isinstance(child, Collection):
return False
sel = child.getSelector()
return sel.kTypeName == selector.SimpleSelector.kTypeName and \
sel.getPattern() == "*" and \
len(sel.staticSelection) == 0 and \
sel.getFilterType() == filterType and \
(filterType != selector.Filters.kCustom or sel.getCustomFilterValue() == customFilter)
def creator():
name = self.name() + "_" + selector.Filters.names.get(filterType, customFilter)
col = create(name)
col.setSelectorType(selector.SimpleSelector.kTypeName)
sel = col.getSelector()
sel.setPattern('*')
sel.setFilterType(filterType)
sel.setCustomFilterValue(customFilter)
return col
return self.findChild(predicate, creator)
@undo.chunk('Create and append an override')
def createOverride(self, overrideName, overrideType):
""" Add an override to the Collection using its node type id or type name."""
# Note: No need to propagate the change notification
# as an empty override does not affect the collection
over = override.create(overrideName, overrideType)
if not over:
raise Exception(kOverrideCreationFailed % overrideName)
# special handle for shader override as they apply to shading engines
# => create subcollection of shading engines if we're in a dag only collection
from maya.app.renderSetup.model.connectionOverride import ShaderOverride
if over.typeId() != typeIDs.shaderOverride or \
self.getSelector().acceptsType('shadingEngine'):
self.appendChild(over)
else:
self._findSubcollectionForType('shadingEngine').appendChild(over)
return over
def _getOverrideType(self, plg, overrideType):
'''Returns the override type that should be created for the given
plg in the given collection (self). Overrides that can't be relative will become absolute.'''
return plg.overrideType(overrideType)
@undo.chunk('Create and append an override')
def _createOverride(self, plg, overrideType):
over = override.create(plg.attributeName, self._getOverrideType(plg, overrideType))
if not over:
raise Exception(kOverrideCreationFailed % attrName)
over.finalize(plg.name)
typeName = OpenMaya.MFnDependencyNode(plg.node()).typeName
collection = self if self.getSelector().acceptsType(typeName) else \
self._findSubcollectionForType(typeName)
collection.appendChild(over)
return over
@undo.chunk('Create and append an absolute override')
def createAbsoluteOverride(self, nodeName, attrName):
""" Add an absolute override to a collection """
return self._createOverride(plug.Plug(nodeName,attrName), typeIDs.absOverride)
@undo.chunk('Create and append a relative override')
def createRelativeOverride(self, nodeName, attrName):
""" Add a relative override to a collection """
return self._createOverride(plug.Plug(nodeName,attrName), typeIDs.relOverride)
@undo.chunk('Create and append a child collection')
def _createCollection(self, collectionName, typeName):
col = create(collectionName, typeName, parent=self)
self.appendChild(col)
return col
def createCollection(self, collectionName):
""" Add a child collection to the Collection. """
return self._createCollection(collectionName, Collection.kTypeName)
def _childAttached(self, child):
'''Perform work to attach a child.
The child has already been added to collection's list when this
method is called.'''
with undo.NotifyCtxMgr(kChildAttached % (self.name(), child.name()), self.itemChanged):
# Once inserted, hook up the child's parentEnabled input to our
# enabled output. Use existing command for undo / redo purposes.
cmds.connectAttr(self.name() + '.enabled',
child.name() + '.parentEnabled')
if isinstance(child, Collection):
child.getSelector().setParent(self.getSelector())
child._attach(self.getRenderLayer())
layer = self.getRenderLayer()
if layer:
layer.descendantAdded(child)
def _detachChild(self, child):
'''Perform work to detach a child.
The child has not yet been removed from the collection's list when
this method is called.'''
with undo.NotifyCtxMgr(kChildDetached % (self.name(), child.name()), self.itemChanged):
# Disconnect the child's parentEnabled input from our enabled
# output. Use existing command for undo / redo purposes.
childParentEnabled = child.name() + '.parentEnabled'
cmds.disconnectAttr(self.name() + '.enabled', childParentEnabled)
# Child parentEnabled will retain its last value, so set it
# to True in case the collection gets parented to the render layer.
cmds.setAttr(childParentEnabled, 1)
if isinstance(child, Collection):
child.getSelector().setParent(None)
child._detach(self.getRenderLayer())
def _attach(self, layer):
"""Attach this collection."""
self._connectLayerIsolatedChildren(layer)
# Number of isolated children doesn't change when we attach.
# Update isolated children of our ancestors.
self._updateAncestorsIsolatedChildren(
self.getNumIsolatedChildren(includeSelf=True))
# Update isolated ancestors of ourselves and our children.
self._updateChildrenIsolatedAncestors(
self.getNumIsolatedAncestors(), includeSelf=True)
def _detach(self, layer):
"""Detach this collection."""
self._disconnectLayerIsolatedChildren(layer)
# Number of isolated children doesn't change when we detach.
# Update isolated children of our ancestors.
self._updateAncestorsIsolatedChildren(
-self.getNumIsolatedChildren(includeSelf=True))
# Update isolated ancestors of ourselves and our children.
self._updateChildrenIsolatedAncestors(
-self.getNumIsolatedAncestors(), includeSelf=True)
@undo.chunk('Append to collection')
def appendChild(self, child):
""" Add a child as the highest-priority child."""
if child.typeId()==RenderSettingsCollection.kTypeId \
or child.typeId()==LightsCollection.kTypeId:
raise RuntimeError(kIncorrectChildType % child.typeName())
nodeList.append(self, child)
self._childAttached(child)
@undo.chunk('Attach to collection')
def attachChild(self, pos, child):
""" Attach a child at a specific position. """
if child.typeId()==RenderSettingsCollection.kTypeId \
or child.typeId()==LightsCollection.kTypeId:
raise RuntimeError(kIncorrectChildType % child.typeName())
nodeList.insert(self, pos, child)
self._childAttached(child)
@undo.chunk('Detach from collection')
def detachChild(self, child):
""" Detach a child whatever its position. """
unapply(child) # NoOp if not applied; otherwise commands are used
# Must perform detach operations before removing from list,
# otherwise parenting information is gone.
self._detachChild(child)
nodeList.remove(self, child)
def getChildren(self, cls=childNode.ChildNode):
""" Get the list of all children.
Optionally only the children matching the given class. """
return list(nodeList.forwardListNodeClassGenerator(self, cls))
def hasChildren(self):
return self.findChild(lambda child: True) is not None
def getCollections(self):
return self.getChildren(cls=Collection)
def getCollectionByName(self, collectionName, nested=False):
for collection in nodeList.forwardListNodeClassGenerator(self, cls=Collection):
if collection.name() == collectionName:
return collection
elif nested:
collection2 = collection.getCollectionByName(collectionName, True)
if collection2:
return collection2
return None
def findChild(self, predicate, creator=None):
'''Find the child of this collection satisfying the predicate function or creates it
with the creator function if not found and a creator function is specified.
Function signatures are:
predicate(childNode): returns boolean.
creator(void) : returns the created node.'''
for child in nodeList.forwardListNodeClassGenerator(self, childNode.ChildNode):
if predicate(child):
return child
if not creator:
return None
child = creator()
self.appendChild(child)
return child
def getChild(self, childName, cls=childNode.ChildNode):
""" Look for an existing child by name and optionally class.
@type childName: string
@param childName: Name of child to look for
@type cls: class name
@param cls: Class name for the type of class to look for
@rtype: Child model instance
@return: Found instance or throw an exception
"""
if not childName:
raise Exception(kInvalidChildName)
for child in nodeList.forwardListNodeClassGenerator(self, cls):
if child.name() == childName:
return child
raise Exception(kUnknownChild % (childName, self.name()))
def isAbstractClass(self):
# Override method inherited from base class: not an abstract class.
return False
def getSelector(self):
"""Return the selector user node for this collection."""
selector = utils.getSrcUserNode(
utils.findPlug(self, Collection.aSelector))
if (selector is None):
raise Exception(kCollectionMissingSelector % self.name())
return selector
@context.applyCollection
def apply(self):
""" Apply all children in this collection. """
with profiler.ProfilerMgr('Collection::apply'):
# Apply all our children to the selection
for child in nodeList.forwardListGenerator(self):
child.apply()
# UI Feedback (progressBar)
RenderLayerSwitchObservable.getInstance().notifyRenderLayerSwitchObserver()
@context.applyCollection
def postApply(self):
'''Post applies all children in this collection. This function may be called to apply a collection (with contained overrides)
after the layer was set visible. It allows inserting new overrides in the currently visible layer
without the need to toggle visibility.'''
with profiler.ProfilerMgr('Collection::postApply'):
# Post apply all our children
for child in nodeList.forwardListGenerator(self):
child.postApply()
@context.unapplyCollection
def unapply(self):
"""Unapply all children in this collection."""
with profiler.ProfilerMgr('Collection::unapply'):
for child in nodeList.reverseListGenerator(self):
child.unapply()
# UI Feedback (progressBar)
RenderLayerSwitchObservable.getInstance().notifyRenderLayerSwitchObserver()
def getOverrides(self):
return self.getChildren(cls=override.Override)
# Collection interface as list of children.
# These methods implement the list requirements for the nodeList module.
#
# The list front and back are destination plugs connected to the child
# node's message plug (which is a source).
def _getFrontAttr(self):
return Collection.childLowest
def _getBackAttr(self):
return Collection.childHighest
def _getListItemsAttr(self):
return Collection.children
def _preChildDelete(self, child):
# Private interface for child to inform its parent that it is
# about to be deleted. Remove the child from our list.
self.detachChild(child)
def _selectedNodesChanged(self):
""" Ownership of this collection or one of its children changed """
layer = self.getRenderLayer()
if layer:
layer._selectedNodesChanged(self)
self.itemChanged()
def _selectorChanged(self):
"""Selector of this collection changed.
Identical to _selectedNodesChanged(), except that the itemChanged()
notification is given with selectorChanged=True."""
layer = self.getRenderLayer()
if layer:
layer._selectedNodesChanged(self)
self.itemChanged(selectorChanged=True)
def _refreshRendering(self):
''' Some changes impose to refresh the rendering for the visible layer only. '''
parent = self.parent()
if parent:
parent._refreshRendering()
def getLayerNumIsolatedChildren(self):
return OpenMaya.MPlug(
self.thisMObject(), Collection.layerNumIsolatedChildren).asInt()
def _getNumIsolatedChildrenPlug(self):
return OpenMaya.MPlug(self.thisMObject(), Collection.numIsolatedChildren)
def getNumIsolatedChildren(self, includeSelf=False):
nic = self._getNumIsolatedChildrenPlug().asInt()
if includeSelf and self.isIsolateSelected():
nic += 1
return nic
def _getNumIsolatedAncestorsPlug(self):
return OpenMaya.MPlug(self.thisMObject(), Collection.numIsolatedAncestors)
def getNumIsolatedAncestors(self):
return self._getNumIsolatedAncestorsPlug().asInt()
# See comments in RenderLayer._updateIsolateSelected.
def _updateNumIsolatedChildren(self, val):
# Use a command to support the undo mechanism
if val != 0:
newVal = self.getNumIsolatedChildren() + val
cmds.setAttr(self._getNumIsolatedChildrenPlug().name(), newVal)
def _updateNumIsolatedAncestors(self, val):
# Use a command to support the undo mechanism
if val != 0:
newVal = self.getNumIsolatedAncestors() + val
cmds.setAttr(self._getNumIsolatedAncestorsPlug().name(), newVal)
def _updateIsolateSelected(self, val):
self._updateAncestorsIsolatedChildren(val)
self._updateChildrenIsolatedAncestors(val)
def _updateAncestorsIsolatedChildren(self, val):
layer = self.getRenderLayer()
if layer:
layer._updateIsolateSelected(val)
for c in self.ancestorCollections():
c._updateNumIsolatedChildren(val)
def _updateChildrenIsolatedAncestors(self, val, includeSelf=False):
# Tell descendants there has been a change in their ancestors'
# isolate select.
for c in traverse.depthFirst(self, collections):
if c is self and not includeSelf:
continue
c._updateNumIsolatedAncestors(val)
def _connectLayerIsolatedChildren(self, layer):
# Connect subtree to layer's isolated children attribute.
if layer:
for c in traverse.depthFirst(self, collections):
c._connectSelfLayerIsolatedChildren(layer)
def _disconnectLayerIsolatedChildren(self, layer):
# Disconnect subtree from layer's isolated children attribute.
if layer:
for c in traverse.depthFirst(self, collections):
c._disconnectSelfLayerIsolatedChildren(layer)
def _connectSelfLayerIsolatedChildren(self, layer):
if layer:
# Use existing command for undo / redo purposes.
cmds.connectAttr(layer.name() + '.numIsolatedChildren',
self.name() + '.parentNumIsolatedChildren')
def _disconnectSelfLayerIsolatedChildren(self, layer):
if layer:
# Use existing command for undo / redo purposes.
cmds.disconnectAttr(layer.name() + '.numIsolatedChildren',
self.name() + '.parentNumIsolatedChildren')
def _importChild(self, childName, nodeType, selArgs=None):
name = cmds.createNode(nodeType, name=childName, skipSelect=True)
child = utils.nameToUserNode(name)
if isinstance(child, Collection):
child._createSelector(None, selArgs)
self.appendChild(child)
return child
def activate(self):
'''
Called when this list item is inserted into the list.
Override this method to do any scene specific initialization.
'''
if len(self._callbackIds) == 0:
self._callbackIds = computeEnabled.addChangeCallbacks(self)
self.getSelector().activate()
def deactivate(self):
'''
Called when this list item is removed from the list.
Override this method to do any scene specific teardown.
'''
# Remove all callbacks.
OpenMaya.MMessage.removeCallbacks(self._callbackIds)
self._callbackIds = []
self.getSelector().deactivate()
def _encodeProperties(self, dict):
super(Collection, self)._encodeProperties(dict)
dict[self._getSelfEnabledPlug().partialName(useLongNames=True)] = self.isEnabled()
dict[self._getIsolatePlug().partialName(useLongNames=True)] = self.isIsolateSelected()
if self.getSelectorType() == selector.BasicSelector.kTypeName: # backward comp with 2016 R2
selectorDict = dict
else:
selectorDict = {}
dict[jsonTranslatorGlobals.SELECTOR_ATTRIBUTE_NAME] = { self.getSelectorType() : selectorDict }
self.getSelector()._encodeProperties(selectorDict)
dict[jsonTranslatorGlobals.CHILDREN_ATTRIBUTE_NAME] = jsonTranslatorUtils.encodeObjectArray(self.getChildren())
def _decodeChildren(self, children, mergeType, prependToName):
jsonTranslatorUtils.decodeObjectArray(children,
jsonTranslatorUtils.MergePolicy(self.getChild,
self._importChild,
mergeType,
prependToName))
def _decodeProperties(self, dict, mergeType, prependToName):
super(Collection, self)._decodeProperties(dict, mergeType, prependToName)
if self._getSelfEnabledPlug().partialName(useLongNames=True) in dict:
self.setSelfEnabled(dict[self._getSelfEnabledPlug().partialName(useLongNames=True)])
if self._getIsolatePlug().partialName(useLongNames=True) in dict:
self.setIsolateSelected(dict[self._getIsolatePlug().partialName(useLongNames=True)])
if jsonTranslatorGlobals.SELECTOR_ATTRIBUTE_NAME not in dict: # backward comp with 2016 R2
self.setSelectorType(selector.BasicSelector.kTypeName)
selectorProperties = dict
else:
selectorType = dict[jsonTranslatorGlobals.SELECTOR_ATTRIBUTE_NAME].keys()[0]
if self.getSelectorType() != selectorType:
self.setSelectorType(selectorType)
selectorProperties = dict[jsonTranslatorGlobals.SELECTOR_ATTRIBUTE_NAME].values()[0]
self.getSelector()._decodeProperties(selectorProperties)
if jsonTranslatorGlobals.CHILDREN_ATTRIBUTE_NAME in dict:
self._decodeChildren(dict[jsonTranslatorGlobals.CHILDREN_ATTRIBUTE_NAME],
mergeType,
prependToName)
def acceptImport(self):
super(Collection, self).acceptImport()
for child in self.getChildren():
child.acceptImport()
def isSelfAcceptableChild(self):
"""Overridden instances that return False, prevent copy/paste of the collection type to itself."""
return True
def isAcceptableChild(self, modelOrData):
""" Check if the model could be a child"""
if isinstance(modelOrData, clipboardData.ClipboardData):
isOverride = modelOrData.typeName() in _overrideTypes
parentTypeName = modelOrData.parentTypeName
else:
isOverride = isinstance(modelOrData, override.Override)
parentTypeName = modelOrData.parent().typeName()
return isOverride and parentTypeName == self.typeName() or (modelOrData.typeName() == self.typeName() and self.isSelfAcceptableChild())
def isTopLevel(self):
"""Is the collection's parent a render layer?"""
# Don't have access to renderLayer.RenderLayer, type check on
# Collection instead.
return not isinstance(self.parent(), Collection)
def ancestorCollections(self):
"""Return this collection's ancestors.
Neither the collection itself, nor the render layer, are included
in the ancestors. Therefore, a top-level collection has no
ancestors."""
parent = self.parent()
while isinstance(parent, Collection):
yield parent
parent = parent.parent()
class LightsCollection(Collection):
"""
LightsCollection node.
A collection node specific for grouping light sources
and overrides on those light sources.
This collection should have all light sources as member by default. All nodes
matching the light classification should be returned by the selector
on this collection.
"""
kTypeId = typeIDs.lightsCollection
kTypeName = 'lightsCollection'
@staticmethod
def creator():
return LightsCollection()
@staticmethod
def initializer():
# Inherit all attributes from parent class
LightsCollection.inheritAttributesFrom(Collection.kTypeName)
def __init__(self):
super(LightsCollection, self).__init__()
def typeId(self):
return LightsCollection.kTypeId
def typeName(self):
return LightsCollection.kTypeName
def _createSelector(self, parent=None, selArgs=None):
self._createAndConnectSelector(selector.SimpleSelector.kTypeName)
# Make it select all light sources in the scene
self.getSelector().setPattern("*")
self.getSelector().setFilterType(selector.Filters.kLights)
def setSelectorType(self, typeName):
raise RuntimeError('Illegal call to derived class method.')
def createCollection(self, collectionName):
""" Add a lights child collection to the Collection. """
return self._createCollection(collectionName, LightsChildCollection.kTypeName)
def isAcceptableChild(self, modelOrData):
"""Check if the argument can be a child of this collection.
We want to prevent copying LightsChildCollections in the same
LightsCollection at the expense of not being able to copy
LightsChildCollections between different LightsCollections.
"""
return False
def compute(self, plug, dataBlock):
computeEnabled.compute(self, plug, dataBlock)
class LightsChildCollection(Collection):
"""
LightsChildCollection node.
A child collection node specific for one single light source
and overrides on this light source.
"""
kTypeId = typeIDs.lightsChildCollection
kTypeName = 'lightsChildCollection'
@staticmethod
def creator():
return LightsChildCollection()
@staticmethod
def initializer():
# Inherit all attributes from parent class
LightsChildCollection.inheritAttributesFrom(Collection.kTypeName)
def __init__(self):
super(LightsChildCollection, self).__init__()
def typeId(self):
return LightsChildCollection.kTypeId
def typeName(self):
return LightsChildCollection.kTypeName
def _createSelector(self, parent=None, selArgs=None):
self._createAndConnectSelector(selector.SimpleSelector.kTypeName)
# Only accepts light sources.
self.getSelector().setFilterType(selector.Filters.kLights)
def setSelectorType(self, typeName):
raise RuntimeError('Illegal call to derived class method.')
def compute(self, plug, dataBlock):
computeEnabled.compute(self, plug, dataBlock)
def isAcceptableChild(self, modelOrData):
"""Check if the argument can be a child of this collection.
Pasting is prevented because the Light Editor considers only the
first override in the LightsChildCollection. Additionally dragging
is prevented between overrides in LightsChildCollections to prevent
dragging between incompatible LightsChildCollection types
(ie. point light, spot light)
"""
return False
class RenderSettingsCollection(Collection):
"""
Render Settings Collection node.
This collection has an ordered list of children, and a static & const selector
to determine nodes to which the children apply. The list of nodes is based
on the selected renderer at the time of creation.
MAYA-66757:
- A base collection will be needed to factorize commonalities and segregate differences.
- A static selector is needed which could be the existing static selection or an object set.
- The name is read-only.
- The selector content is read-only
- The render name should be part of the collection so that the settings are clearly linked
to the used renderer, or linked using a plug
"""
kTypeId = typeIDs.renderSettingsCollection
kTypeName = 'renderSettingsCollection'
# Type of selector created by this collection
kSelectorTypeName = selector.SimpleSelector.kTypeName
@staticmethod
def creator():
return RenderSettingsCollection()
@staticmethod
def initializer():
# A render settings collection is a render layer list element.
# inheritAttributesFrom() must be called before adding any other attributes.
RenderSettingsCollection.inheritAttributesFrom(Collection.kTypeName)
def __init__(self):
super(RenderSettingsCollection, self).__init__()
@staticmethod
def containsNodeName(nodeName):
return nodeName in renderSettings.getDefaultNodes()
def _createSelector(self, parent=None, selArgs=None):
self._createAndConnectSelector(self.kSelectorTypeName)
# Set the default nodes as static selection
# Note: Some renderers could return nodes which do not exist yet.
self.getSelector().staticSelection.setWithoutExistenceCheck(renderSettings.getDefaultNodes())
self.getSelector().setFilterType(selector.Filters.kAll)
def setSelectorType(self, typeName):
raise RuntimeError('Illegal call to derived class method.')
def typeId(self):
return RenderSettingsCollection.kTypeId
def typeName(self):
return RenderSettingsCollection.kTypeName
def appendChild(self, child):
if isinstance(child, Collection):
raise RuntimeError(kIncorrectChildType % child.typeName())
else:
super(RenderSettingsCollection, self).appendChild(child)
def attachChild(self, pos, child):
if isinstance(child, Collection):
raise RuntimeError(kIncorrectChildType % child.typeName())
else:
super(RenderSettingsCollection, self).attachChild(pos, child)
def _createCollection(self, collectionName, typeName):
raise RuntimeError(kIncorrectChildType % typeName)
def compute(self, plug, dataBlock):
computeEnabled.compute(self, plug, dataBlock)
def isAcceptableChild(self, modelOrData):
"""Check if the argument can be a child of this collection.
No collection of any kind can be a child of this collection."""
return modelOrData.typeName() not in _collectionTypes and \
super(RenderSettingsCollection, self).isAcceptableChild(modelOrData)
def _getOverrideType(self, plg, overrideType):
overrideType = super(RenderSettingsCollection, self)._getOverrideType(plg, overrideType)
return typeIDs.absUniqueOverride if overrideType == typeIDs.absOverride else typeIDs.relUniqueOverride
class AOVCollection(Collection):
"""
AOV (arbitrary output variable) parent collection node.
"""
kTypeId = typeIDs.aovCollection
kTypeName = 'aovCollection'
@staticmethod
def creator():
return AOVCollection()
@staticmethod
def initializer():
# An AOV collection is a render layer list element.
# inheritAttributesFrom() must be called before adding any other attributes.
AOVCollection.inheritAttributesFrom(Collection.kTypeName)
def __init__(self):
super(AOVCollection, self).__init__()
@staticmethod
def containsNodeName(nodeName):
callbacks = rendererCallbacks.getCallbacks(rendererCallbacks.CALLBACKS_TYPE_AOVS)
try:
callbacks.getAOVName(nodeName)
return True
except:
return False
def _createSelector(self, parent=None, selArgs=None):
# Selector type name argument is ignored.
self._createAndConnectSelector('')
def _createSelectorNode(self, typeName, selectorName, selArgs):
# Ignore the argument selector type name: get the AOV collection
# selector from the AOV renderer callback.
callbacks = rendererCallbacks.getCallbacks(rendererCallbacks.CALLBACKS_TYPE_AOVS)
return callbacks.getCollectionSelector(selectorName)
def setSelectorType(self, typeName):
raise RuntimeError('Illegal call to derived class method.')
def typeId(self):
return AOVCollection.kTypeId
def typeName(self):
return AOVCollection.kTypeName
def appendChild(self, child):
if isinstance(child, Collection) and not isinstance(child, AOVChildCollection):
raise RuntimeError(kIncorrectChildType % child.typeName())
else:
super(AOVCollection, self).appendChild(child)
def attachChild(self, pos, child):
if isinstance(child, Collection) and not isinstance(child, AOVChildCollection):
raise RuntimeError(kIncorrectChildType % child.typeName())
else:
super(AOVCollection, self).attachChild(pos, child)
# This should never be called, as AOVCollections are created in renderLayer.py in aovCollectionInstance()
def _createCollection(self, collectionName, typeName):
raise RuntimeError(kIncorrectChildType % typeName)
def compute(self, plug, dataBlock):
computeEnabled.compute(self, plug, dataBlock)
class AOVChildCollection(Collection):
"""
AOV (arbitrary output variable) Child Collection node.
"""
kTypeId = typeIDs.aovChildCollection
kTypeName = 'aovChildCollection'
@staticmethod
def creator():
return AOVChildCollection()
@staticmethod
def initializer():
# Inherit all attributes from parent class
AOVChildCollection.inheritAttributesFrom(Collection.kTypeName)
def __init__(self):
super(AOVChildCollection, self).__init__()
def containsNodeName(self, nodeName):
return nodeName in self.getSelector().getAbsoluteNames()
def typeId(self):
return AOVChildCollection.kTypeId
def typeName(self):
return AOVChildCollection.kTypeName
def _createSelector(self, parent=None, selArgs=None):
# Selector type name argument is ignored.
self._createAndConnectSelector('', selArgs)
def _createSelectorNode(self, typeName, selectorName, selArgs):
# Ignore the argument selector type name: get the AOV child
# collection selector from the AOV renderer callback.
#
# selArgs is a dictionary for selector argument
# construction. It must contain a value for 'aovName'.
callbacks = rendererCallbacks.getCallbacks(rendererCallbacks.CALLBACKS_TYPE_AOVS)
return callbacks.getChildCollectionSelector(selectorName, selArgs['aovName'])
def setSelectorType(self, typeName):
raise RuntimeError('Illegal call to derived class method.')
def compute(self, plug, dataBlock):
computeEnabled.compute(self, plug, dataBlock)
def isSelfAcceptableChild(self):
"""This code prevents copy/paste of AOV child collections to themselves/other AOV child collections."""
return False
@undo.chunk('Create collection')
@namespace.root
def create(name, nodeType=Collection.kTypeName, parent=None, **selArgs):
""" Create a collection.
Returns the MPxNode object corresponding to the created
collection node. A RuntimeError is raised in case of error.
The selArgs keyword arguments are passed along to the selector creation.
This function is undoable.
"""
# collection names should never contain namespace delimiter or other invalid characters
# collections belong to current namespace (i.e. root)
name = re.sub(r'[^a-zA-Z0-9_]', '_', name)
if isinstance(nodeType, basestring):
typeName = nodeType
else:
typeName = cmds.objectType(typeFromTag=nodeType.id())
# To avoid writing a command to implement collection creation,
# re-use existing name-based commands for undo / redo purposes, since
# collection creation is not performance-critical. If the name
# flag is specified, it cannot be an empty string.
returnCollectionName = cmds.createNode(
typeName, name=name, skipSelect=True) if name else \
cmds.createNode(typeName, skipSelect=True)
collection = utils.nameToUserNode(returnCollectionName)
collection._createSelector(parent=parent, selArgs=selArgs)
return collection
@undo.chunk('Delete collection')
def delete(collection):
"""Remove the argument collection from the scene.
All overrides and sub-collections in the collection are removed."""
# Inform our parent (if any) of upcoming delete.
# This will remove the collection from its parent,
# and will trigger deactivation of the collection
# causing it and the selector to stop listening to scene and attribute changes.
# Need to call _preChildDelete before removing children, otherwise we lose the parenting information
# to the children which may be used by the parent (ex: renderLayers use that information
# to determine if they need to be refreshed).
parent = collection.parent()
if parent:
parent._preChildDelete(collection)
# Delete the children.
for child in collection.getChildren():
if isinstance(child, Collection):
delete(child)
else:
override.delete(child)
# Deleting the selector means unhooking the selector node
# from the collection and removing it from the scene.
collection._deleteSelector()
# Deleting the node will remove it from the scene.
utils.deleteNode(collection)
@undo.chunk('Unapply a collection')
def unapply(collection):
''' Command to unapply a collection '''
if isinstance(collection, Collection):
for c in collection.getChildren():
unapply(c)
else:
# End of recursion so unapply the override
# using a command
override.UnapplyCmd.execute(collection)
def getAllCollectionClasses():
""" Returns the list of Collection subclasses """
return commonUtils.getSubClasses(Collection)
_collectionTypes = { c.kTypeName for c in getAllCollectionClasses() }
_overrideTypes = { o.kTypeName for o in overrideUtils.getAllOverrideClasses() }
# ===========================================================================
# Copyright 2016 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk license
# agreement provided at the time of installation or download, or which
# otherwise accompanies this software in either electronic or hard copy form.
# ===========================================================================
| [
"[email protected]"
]
| |
9f505f5cd70c6413e2aa1448ca220b48048f7e03 | 0e74c5316187766c833742ff45beac1afe80f803 | /9-2/3/3.py | 556e4499f69807030c0397ba88e588c71daa30c4 | []
| no_license | r4pidstart/hyu-isd | 950d3195590ee34244e7cf476abc94c0d462a2ec | 281da38dfa68f9ee27ff216b11ce5f6d7d16e7a5 | refs/heads/main | 2023-06-03T09:44:40.915065 | 2021-06-22T02:58:10 | 2021-06-22T02:58:10 | 345,556,758 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | def recursion(n):
if(n==0):
return 0
if(n==1):
return 1
return recursion(n-1)+recursion(n-2)
n=int(input())
print(recursion(n)) | [
"[email protected]"
]
| |
4c97d826d472e0652439b84bf0a10fcd553a634a | 3f7fcd4caf0f1caf2b4cdb71f5691ea504358292 | /sampletest3.py | d81d1e4bb717df1f0c61f56b69257a1a18f0657d | []
| no_license | tarak1006/python | 2369d16da5d17c90462068480461f32e3320258a | 1aa1ce6429a3a836073b8a8a359ef8ca91b3bfea | refs/heads/master | 2021-07-10T18:01:30.353637 | 2017-10-14T14:02:34 | 2017-10-14T14:02:34 | 106,932,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py |
a=[2,11,16,12,36,60,71,17,29,144,288,129,432,993]
n =len(a)
def Best(i):
if i==0:
return 1
else:
m=1
for j in range(i):
if a[i]%a[j]==0:
m=max(m,Best(j)+1)
return m
res=[]
for j in range(n):
res.append(Best(j))
print max(res)
| [
"[email protected]"
]
| |
9e7c5e1a231a4c91b408a95ff09e2b71dcfa7f70 | 1348bddbb856b592d1d17a96e36447d74d1d5cc5 | /Lab8/Lab.py | f015a506dbb7b712066f2b022dbb877189245180 | []
| no_license | pivaszbs/DS_labs | d7802b5841c2d084695f4c45fc05fafdd943dc7d | d522a95180670776323cb78b335988bad80ab16e | refs/heads/master | 2020-08-05T05:08:31.215941 | 2019-11-01T16:23:46 | 2019-11-01T16:23:46 | 212,407,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,498 | py | from multiprocessing import Process, Pipe
from os import getpid
from datetime import datetime
def local_time(counter):
return ' (LAMPORT_TIME={}, LOCAL_TIME={})'.format(counter,
datetime.now())
def calc_recv_timestamp(recv_time_stamp, counter):
for id in range(len(counter)):
counter[id] = max(recv_time_stamp[id], counter[id])
return counter
def event(pid, counter):
counter[pid] += 1
return counter
def send_message(pipe, pid, counter):
counter[pid] += 1
pipe.send(('Empty shell', counter))
return counter
def recv_message(pipe, pid, counter):
message, timestamp = pipe.recv()
counter = calc_recv_timestamp(timestamp, counter)
return counter
def process_one(pipe12):
pid = 0
counter = [0, 0, 0]
counter = send_message(pipe12, pid, counter)
counter = send_message(pipe12, pid, counter)
counter = event(pid, counter)
counter = recv_message(pipe12, pid, counter)
counter = event(pid, counter)
counter = event(pid, counter)
counter = recv_message(pipe12, pid, counter)
print('Process {} has lamport_time={}'.format(pid,counter))
def process_two(pipe21, pipe23):
pid = 1
counter = [0, 0, 0]
counter = recv_message(pipe21, pid, counter)
counter = recv_message(pipe21, pid, counter)
counter = send_message(pipe21, pid, counter)
counter = recv_message(pipe23, pid, counter)
counter = event(pid, counter)
counter = send_message(pipe21, pid, counter)
counter = send_message(pipe23, pid, counter)
counter = send_message(pipe23, pid, counter)
print('Process {} has lamport_time={}'.format(pid,counter))
def process_three(pipe32):
pid = 2
counter = [0, 0, 0]
counter = send_message(pipe32, pid, counter)
counter = recv_message(pipe32, pid, counter)
counter = event(pid, counter)
counter = recv_message(pipe32, pid, counter)
print('Process {} has lamport_time={}'.format(pid,counter))
if __name__ == '__main__':
oneandtwo, twoandone = Pipe()
twoandthree, threeandtwo = Pipe()
process1 = Process(target=process_one,
args=(oneandtwo,))
process2 = Process(target=process_two,
args=(twoandone, twoandthree))
process3 = Process(target=process_three,
args=(threeandtwo,))
process1.start()
process2.start()
process3.start()
process1.join()
process2.join()
process3.join() | [
"[email protected]"
]
| |
4ea7ca6c5ee15075612d8d24d97f7d8d407e1ee1 | 8e427e952b67d0cb70f4ae74107b076db1bd08b9 | /tests_py/test_session.py | 0a11d8aea951bbf8733a96875141a9e17b166b78 | []
| no_license | brandonkrull/edgar-analytics | eab4ebfe55c18ac09419d8fb6c1b644a50377d24 | 6f0fb0d78ae4ec0b149d0602419c7db7cfb4fd2a | refs/heads/master | 2020-03-18T11:22:40.032862 | 2018-05-29T15:57:33 | 2018-05-29T15:57:33 | 134,667,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | from src.processor import Session
from dateutil.parser import parse
record = {'ip': '123.123.123.1', 'date': '0366-12-24', 'time': '23:59:59'}
baddate = {'ip': '123.123.123.1', 'date': '0x66-12-24', 'time': '23:59:59'}
badtime = {'ip': '123.123.123.1', 'date': '0366-12-24', 'time': 'z3:59:59'}
def test_session_init_bad_date():
try:
now = parse(baddate['date'] + ' ' + baddate['time'])
sess = Session(start=now)
except ValueError:
assert True
else:
assert False
def test_session_init_bad_time():
try:
now = parse(badtime['date'] + ' ' + badtime['time'])
sess = Session(start=now)
except ValueError:
assert True
else:
assert False
def test_session_init():
now = parse(record['date'] + ' ' + record['time'])
sess = Session(start=now, sleep=0)
assert sess.start == now
assert sess.end == now
assert sess.count == 1
def test_date_output_format():
now = parse(record['date'] + ' ' + record['time'])
expected = record['date'] + ' ' + record['time']
sess = Session(start=now, sleep=0)
output = sess._format_dt_for_output(sess.start)
assert output == expected
| [
"[email protected]"
]
| |
ff96ced9ce7021a3e0768e0e4493dcaaee8df6fd | a6086dcd794ee1419081761e473433081249059f | /app/api/errors.py | 9e92b5acf600ead372909b7faad5a3d73fe777ea | []
| no_license | billy0402/flask-stock-api | f1d6f51d7d67300eccc2d7621eacc41f3a8ec609 | 2d656c80b2a062f8dd4f7f8466ed3060f7d56477 | refs/heads/master | 2023-07-18T15:40:53.869479 | 2021-09-08T18:57:47 | 2021-09-08T18:57:47 | 402,569,924 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | from flask import jsonify
from . import api
from ..exceptions import ValidationError
def bad_request(message):
response = jsonify({'error': 'bad request', 'message': message})
response.status_code = 400
return response
def unauthorized(message):
response = jsonify({'error': 'unauthorized', 'message': message})
response.status_code = 401
return response
def forbidden(message):
response = jsonify({'error': 'forbidden', 'message': message})
response.status_code = 403
return response
@api.errorhandler(ValidationError)
def validation_error(error):
return bad_request(error.args[0])
| [
"[email protected]"
]
| |
156d87bc9085a8d0dd814ef54df798ad47e64e6f | b94f318593fa057f6cf00e8a568430c1086df15a | /users/urls.py | 41f707807a5b27207c68572afc366b8e116b2914 | []
| no_license | bitpixdigital/django-next-train | 6b1cfe3ea6f7c313db6d607c87833a91089203a3 | 5bce0974a5f3f0ef8433decec1505bc417ebee13 | refs/heads/master | 2021-01-17T08:12:39.670325 | 2016-07-15T15:17:02 | 2016-07-15T15:17:02 | 62,678,734 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | """Defines URL patterns for users"""
from django.conf.urls import url
from django.contrib.auth.views import login
from . import views
urlpatterns = [
# Login page
url(r'^login/$', login, {'template_name': 'users/login.html'},
name='login'),
# Logout
url(r'^logout/$', views.logout_view, name='logout'),
# Registration page
url(r'^register/$', views.register, name='register'),
]
| [
"angel@bitpixdigital"
]
| angel@bitpixdigital |
9274e32c22a557cd446ea41224a83a04fd4a48f3 | 9eb9bd999f6502125c1f5aae2abff5d67e322dfc | /apps/csat/migrations/0015_auto_20210723_1858.py | aa8a156316ee36fad91b81939979822b53ae8476 | []
| no_license | lolsecret/csat | 05eb3d208957d81496c9563d0ef0e7364b3462f8 | 551755b26be204cbcb5082901a5ba8a4904c44b5 | refs/heads/master | 2023-08-16T14:25:35.867370 | 2021-10-07T11:23:57 | 2021-10-07T11:23:57 | 386,858,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,405 | py | # Generated by Django 3.2.5 on 2021-07-23 12:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('csat', '0014_auto_20210723_1835'),
]
operations = [
migrations.CreateModel(
name='UserQuestions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Время создания')),
('changed_at', models.DateTimeField(auto_now=True, db_index=True, verbose_name='Время последнего изменения')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_forms', to='csat.applicationquestion', verbose_name='Вопросы')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='application_form', to=settings.AUTH_USER_MODEL, verbose_name='Пользователь')),
],
options={
'abstract': False,
},
),
migrations.DeleteModel(
name='UserApplicationForm',
),
]
| [
"[email protected]"
]
| |
f03b3454fdbdf031adc9ad7bf8439e2ad21b408c | 0afbba909a82d4dd610296453c1c7b38c82fc192 | /Amazon_Price_Tracker.py | 63f490c28729c58efd352f9c20530a87b757ac29 | []
| no_license | Addy-Coder/Amazon_Price_Tracker | 1c7cb17d03e7d6d63ce60c093fa3079bb275254f | 1b92e9ccf66903e0ff6d8ef267575868935dab28 | refs/heads/master | 2020-12-14T20:24:30.985057 | 2020-01-19T07:43:41 | 2020-01-19T07:43:41 | 234,858,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,602 | py | import smtplib
from bs4 import BeautifulSoup
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import time
import sys
#Make sure that you have enabled sercurity access to your below google account
EMAIL_ADDRESS = 'Provide Email'
EMAIL_PASSWORD = 'Provide Password'
price_compare = sys.argv[1]
sending_email = sys.argv[2]
url = sys.argv[3]
while 1:
def getPrice(url):
try:
# headers = {"User Agent": 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0'}
# html = requests.get(url, headers=headers)
soup = BeautifulSoup(url,'lxml')
title = soup.find("span", {"id": "productTitle"}).get_text().replace('\n', '').strip()
price = float(soup.find("span",{"id":"priceblock_dealprice"}).get_text()[2:].replace(',','').strip())
# price_int =
print("---------------------------------")
print(title)
print(price)
print("---------------------------------")
return price
except AttributeError:
title = soup.find("span", {"id": "productTitle"}).get_text().replace('\n', '').strip()
print("---------------------------------")
print(title)
price = float(soup.find("span",{"id":"priceblock_ourprice"}).get_text()[2:].replace(',','').strip())
print(price)
print("---------------------------------")
return price
def sendMail(url):
print("Sending mail......")
with smtplib.SMTP("smtp.gmail.com", 587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
subject = "CHECK OUT THE NEW PRICE"
body = "Price Droped : "+url
msg = f'Subject: {subject}\n\n{body}'
smtp.sendmail(EMAIL_ADDRESS, sending_email, msg)
print("Your Email has been sent successfully to " )
#url = 'https://www.amazon.in/Gaming-G3-3579-i5-8300H-Windows-Graphics/dp/B07XZFYQ18/ref=sr_1_1_sspa?crid=1UHDM8R5ZWB9W&keywords=lenovo+laptops&qid=1579367794&smid=A14CZOWI0VEHLG&sprefix=lenovpo%2Caps%2C381&sr=8-1-spons&psc=1&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUEyODFKV1kxU05NU0g3JmVuY3J5cHRlZElkPUEwNjIzMTk4Mk9PSjVDNkQ3WkRHQSZlbmNyeXB0ZWRBZElkPUEwMjA4NjY2MzFKN0xZWFc3OVZaMCZ3aWRnZXROYW1lPXNwX2F0ZiZhY3Rpb249Y2xpY2tSZWRpcmVjdCZkb05vdExvZ0NsaWNrPXRydWU='
#url = 'https://www.amazon.in/Skybags-Crew-06-Laptop-Backpack/dp/B07P9JJB2X/ref=sr_1_4_sspa?keywords=skybags&qid=1579411412&sr=8-4-spons&psc=1&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUFYVTY3WkIyUzBHM00mZW5jcnlwdGVkSWQ9QTA3MzU1MDFISk1COEdaWFo2RTAmZW5jcnlwdGVkQWRJZD1BMDI3NTg2ME9aNVZSSDUwUlZUTSZ3aWRnZXROYW1lPXNwX2F0ZiZhY3Rpb249Y2xpY2tSZWRpcmVjdCZkb05vdExvZ0NsaWNrPXRydWU='
#url = 'https://www.amazon.in/dp/B07TM676N1/ref=s9_acsd_al_bw_c2_x_0_t?pf_rd_m=A1K21FY43GMZF8&pf_rd_s=merchandised-search-3&pf_rd_r=Z7E0858VT26CM6JM87N3&pf_rd_t=101&pf_rd_p=7d57b3de-af2a-486a-86e4-68512d72663d&pf_rd_i=20669076031'
#url = 'https://www.amazon.in/Apple-MacBook-Air-13-3-inch-MQD32HN/dp/B073Q5R6VR/ref=lp_10559548031_1_1?s=computers&ie=UTF8&qid=1579412452&sr=1-1'
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get(url)
res = driver.execute_script("return document.documentElement.outerHTML")
driver.quit()
getPrice(res)
# price_compare = 1800
price_current = getPrice(res)
if price_current<int(price_compare):
print("*** Price Droped ***")
sendMail(url)
time.sleep(60*60)
| [
"[email protected]"
]
| |
95b9fdca571f3e098ef2c1ff21e6bd48597afc65 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/ModifyHostInfoRequest.py | b5160544bcb63311836cf513c07824b15c12694d | [
"Apache-2.0"
]
| permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,332 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyHostInfoRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'ModifyHostInfo','cms')
self.set_method('POST')
def get_HostName(self):
return self.get_query_params().get('HostName')
def set_HostName(self,HostName):
self.add_query_param('HostName',HostName)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId) | [
"[email protected]"
]
| |
84f632af9d00e78171570751ffcb0138ca9b2fe0 | 21f9c75a4dacc1d3a596296ce2f3896a1b93d9fa | /src/pharmacy_counting.py | b780795f1faed2c6cc9251e055851db9157d9e11 | []
| no_license | richajain44/insightData | 60904c7f9fa1ced65bacc22933c2d0e37e0ff31e | 88046143331555e1089bc3c596667f57fdc8ef1e | refs/heads/master | 2020-04-19T03:35:34.247990 | 2019-01-28T09:56:06 | 2019-01-28T09:56:06 | 167,938,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,639 | py | """
This program takes in input file de_cc_data and outputs top_cost_drug.
It calculates for each drug its total unique count of prescribers and
total cost of each drug.
run.sh provides the necessary input and output paths
"""
#Importing necessary libraries
#sys is used for input and output reading
#csv is used to read the input file
import sys
import csv
#Function implementing the core functionalities of the program
#Input: input file i.e. de_cc_data
#Returns : Two dictonaries - count of unique prescribers for the each drug
# and total cost of each drug.
#It also returns a list of lines which if could not be captured by the program
def costing_counting(input_file):
prescribers={}#dictionary for unique count of prescribers
cost_dict={}#dictionay for cost of each drug
outlier=[] #list for capturing unhandled line
with open(input_file,'rt') as f:
next(f) #skip the header
try:
l = csv.reader(f,delimiter=',')
for line in l:
temp = set() #create a set which which only contain unique list of all the precribers
#since the cost is in str convert it to float and then to int as the output is in interger
cost = int(float(line[-1].rstrip()))
if line[3] not in cost_dict:
cost_dict.update({line[3]:cost}) #add all the new entries to the dict
else:
cost_dict[line[3]]=cost_dict[line[3]]+cost #sum the new and the exisitng entry
if line[3] not in prescribers:
temp.add(line[0]) #add the unqiue precriber to the set
prescribers.update({line[3]:temp}) #update the dict
else:
prescribers[line[3]].add(line[0]) #add new precriber
except:
outlier.append(line) #incase there is any line which is missed in the above code
return prescribers,cost_dict,outlier
#main function
if __name__=='__main__':
print("inside main")
input_file = sys.argv[1]
output_file =sys.argv[2]
prescribers,cost_dict,outlier=costing_counting(input_file)
print(len(prescribers),len(cost_dict))
prescribers_final = {} #dict to count the number of precribers
for k ,v in prescribers.items():
prescribers_final.update({k:len(v)})
#sorting the dict based on cost and if same cost then on drug name
sorted_cost = sorted(cost_dict.items(), key = lambda kv:kv[1], reverse=True)
#writing to the output file, with the header
with open(output_file, 'a') as f:
f.write('drug_name'+','+'num_prescriber'+','+'total_cost'+'\n')
#writing to the output file contents of both the dictionary
with open(output_file,'a') as f:
for i in sorted_cost:
if i[0] in prescribers_final:
f.write(i[0]+','+str(prescribers_final[i[0]])+','+str(i[1])+'\n')
| [
"[email protected]"
]
| |
01b7426b8af93d364a09f009a51635ebc62a467c | 49e0b6094a6841efd74ba57cd01913b465223333 | /tests/test_hash_tables.py | dde5fcf272c310cc928176b9e022f0c7aac4fb07 | []
| no_license | HamzaQahoush/data-structures-and-algorithms--Python | 1c2fdfc8b90efc190108ed139372591741d5acc7 | 81bc4424065bc6b7ef99ab4dbba60524a75058a4 | refs/heads/master | 2023-07-15T04:03:05.158576 | 2021-08-05T17:34:47 | 2021-08-05T17:34:47 | 376,792,369 | 0 | 1 | null | 2021-08-05T17:29:16 | 2021-06-14T11:00:05 | Python | UTF-8 | Python | false | false | 2,263 | py | from data_structures_and_algorithms_python.data_structures.hashtable.hashtable import *
# 1 Adding a key/value to your hashtable results in the value
# being in the data structure
def test_add_value():
hashtable = Hashtable()
hashtable.add('Hamza', 'Qahoush')
actual = ''
for i in hashtable.array:
if i:
actual += i.__str__()
expected = "[['Hamza', 'Qahoush']]"
assert actual == expected
# 2 Retrieving based on a key returns the value stored
def test_get_value():
hashtable = Hashtable()
hashtable.add('Hamza', 'Qahoush')
actual = hashtable.get('Hamza')
expected = 'Qahoush'
assert actual == expected
# 3 Successfully returns null for a key that does not exist in the hashtable
def test_null_for_not_exist_key():
hashtable = Hashtable()
hashtable.add('Hamza', 'Qahoush')
actual = hashtable.get('Hadi')
expected = None
assert actual == expected
# 4 Successfully handle a collision within the hashtable
def test_collosion():
hashtable = Hashtable()
hashtable.add(
'ease', ' it\'s verb , mean make something unpleasant, painful, or intense less serious or severe.')
hashtable.add('seas', ' it\'s Noun plural of sea')
actual = []
for i in hashtable.array:
if i:
actual.append(i.__str__())
expected = ['[[\'seas\', " it\'s Noun plural of sea"]]',
'[[\'ease\', " it\'s verb , mean make something unpleasant, painful, or intense less serious or severe."]]']
assert actual == expected
# 5 Successfully retrieve a value from a bucket within the hashtable that has a collision
def test_retrieve_value_from_collision():
hashtable = Hashtable()
hashtable.add(
'GRAINED', 'Having a grain or markings due to a particular arrangement, as in wood')
hashtable.add(
'READING', 'The act of looking at printed words and comprehending them')
actual = hashtable.get('READING')
expected = 'The act of looking at printed words and comprehending them'
assert actual == expected
# 6 Successfully hash a key to an in-range value
def test_hash_key():
hashtable = Hashtable()
actual = f"{hashtable.hash('Name')}"
expected = '401'
assert actual == expected
| [
"[email protected]"
]
| |
e31e3067d04cecd66e2553c00fd595e0672a1503 | a92662f0b14ddb2ae2d49d82584040c328bc38b0 | /_starter/04_while_2.py | 0aeba58fe6e0738380fb5040816589ee9c84bebd | []
| no_license | DeeCoob/hello_python | 34c80e78dece35d7b03bd914bd462390288b336a | 219834031b0655a764de82c68cd47c218be4cc35 | refs/heads/master | 2020-04-29T00:17:41.133690 | 2019-03-20T21:24:00 | 2019-03-20T21:24:00 | 175,688,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | number = 0
count = 0
while number <= 0:
number = int(input("Enter positive integer: "))
count += 1
print("Try number", count)
if count == 3:
print("Try is done, exiting...")
exit()
print("You entered number", number)
| [
"[email protected]"
]
| |
432aae4837c6d251b61eb69326fd327cebce4c6c | a63d907ad63ba6705420a6fb2788196d1bd3763c | /src/api/resourcecenter/serializers/processing_metrics_serializers.py | bb72d5540d96efd33b60750a04d702611cbf0b03 | [
"MIT"
]
| permissive | Tencent/bk-base | a38461072811667dc2880a13a5232004fe771a4b | 6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2 | refs/heads/master | 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 | NOASSERTION | 2022-04-02T10:30:56 | 2021-06-29T06:10:01 | Python | UTF-8 | Python | false | false | 2,364 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import datetime
from django.utils.translation import ugettext as _
from rest_framework import serializers
from common.exceptions import ValidationError
class ProcessingMetricSummarySerializer(serializers.Serializer):
start_time = serializers.CharField(label=_("开始日期"))
end_time = serializers.CharField(label=_("结束日期"))
geog_area_code = serializers.CharField(required=False, label=_("地区"))
def validate_start_time(self, start_time):
try:
datetime.datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
except ValueError:
raise ValidationError(_("开始日期,格式为YYYY-MM-DD HH:mm:SS"))
return start_time
def validate_end_time(self, end_time):
try:
datetime.datetime.strptime(end_time, "%Y-%m-%d %H:%M:%S")
except ValueError:
raise ValidationError(_("结束日期,格式为YYYY-MM-DD HH:mm:SS"))
return end_time
| [
"[email protected]"
]
| |
15d69568efc05a970d5ddadd64603478733f8e55 | 668666cffdb0c0dbfed9af931b39b2e3fcb697d1 | /pycsw/util.py | 5a0361601ceab16916cebf340ff4b88229dac9fc | [
"MIT"
]
| permissive | doclements/pycsw | ee8de72c5a3ed2f7f08b0d5e3f0e482904a43a35 | 07ab48e02152b517e1ec5d1952a2aadfae9db191 | refs/heads/master | 2020-12-25T12:40:41.340666 | 2012-07-01T16:27:31 | 2012-07-01T16:27:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,072 | py | # -*- coding: iso-8859-15 -*-
# =================================================================
#
# $Id$
#
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2010 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import time
import datetime
from lxml import etree
from shapely.wkt import loads
def get_today_and_now():
''' Get the date, right now, in ISO8601 '''
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime())
def datetime2iso8601(value):
''' Return a datetime value as ISO8601 '''
if isinstance(value, datetime.date):
return value.strftime('%Y-%m-%d')
if value.hour == 0 and value.minute == 0 and value.second == 0:
# YYYY-MM-DD only
return value.strftime('%Y-%m-%d')
else:
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
def get_time_iso2unix(isotime):
''' Convert ISO8601 to UNIX timestamp '''
return int(time.mktime(time.strptime(
isotime, '%Y-%m-%dT%H:%M:%SZ'))) - time.timezone
def get_version_integer(version):
''' Get an integer of the OGC version value x.y.z '''
if version is not None: # split and make integer
xyz = version.split('.')
if len(xyz) != 3:
return -1
try:
return int(xyz[0]) * 10000 + int(xyz[1]) * 100 + int(xyz[2])
except Exception, err:
raise RuntimeError('%s' % str(err))
else: # not a valid version string
return -1
def find_exml(val, attrib=False):
''' Test that the XML value exists, return value, else return None '''
if val is not None:
if attrib: # it's an XML attribute
return val
else: # it's an XML value
return val.text
else:
return None
def nspath_eval(xpath, nsmap):
''' Return an etree friendly xpath '''
out = []
for chunks in xpath.split('/'):
namespace, element = chunks.split(':')
out.append('{%s}%s' % (nsmap[namespace], element))
return '/'.join(out)
def xmltag_split(tag):
''' Return XML element bare tag name (without prefix) '''
try:
return tag.split('}')[1]
except:
return tag
def xmltag_split2(tag, namespaces, colon=False):
''' Return XML namespace prefix of element '''
try:
nsuri = tag.split('}')[0].split('{')[1]
nsprefix = [key for key, value in namespaces.iteritems() \
if value == nsuri]
value = nsprefix[0]
if colon:
return '%s:' % nsprefix[0]
else:
return nsprefix[0]
except:
return ''
def bbox2wktpolygon(bbox):
''' Return OGC WKT Polygon of a simple bbox string '''
tmp = bbox.split(',')
minx = float(tmp[0])
miny = float(tmp[1])
maxx = float(tmp[2])
maxy = float(tmp[3])
return 'POLYGON((%.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f))' \
% (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny)
def query_spatial(bbox_data_wkt, bbox_input_wkt, predicate, distance):
''' perform spatial query '''
if bbox_data_wkt is None or bbox_input_wkt is None:
return 'false'
if predicate in ['beyond', 'dwithin'] and distance == 'false':
return 'false'
if bbox_data_wkt.find('SRID') != -1: # it's EWKT; chop off 'SRID=\d+;'
bbox1 = loads(bbox_data_wkt.split(';')[-1])
else:
bbox1 = loads(bbox_data_wkt)
bbox2 = loads(bbox_input_wkt)
# map query to Shapely Binary Predicates:
if predicate == 'bbox':
result = bbox1.intersects(bbox2)
elif predicate == 'beyond':
result = bbox1.distance(bbox2) > float(distance)
elif predicate == 'contains':
result = bbox1.contains(bbox2)
elif predicate == 'crosses':
result = bbox1.crosses(bbox2)
elif predicate == 'disjoint':
result = bbox1.disjoint(bbox2)
elif predicate == 'dwithin':
result = bbox1.distance(bbox2) <= float(distance)
elif predicate == 'equals':
result = bbox1.equals(bbox2)
elif predicate == 'intersects':
result = bbox1.intersects(bbox2)
elif predicate == 'overlaps':
if bbox1.intersects(bbox2) and not bbox1.touches(bbox2):
result = True
else:
result = False
elif predicate == 'touches':
result = bbox1.touches(bbox2)
elif predicate == 'within':
result = bbox1.within(bbox2)
else:
raise RuntimeError, ('Invalid spatial query predicate: %s' % predicate)
if result:
return 'true'
else:
return 'false'
def bbox_from_polygons(bboxs):
''' Derive an aggregated bbox from n polygons'''
from shapely.geometry import MultiPolygon
polys = []
for b in bboxs:
polys.append(loads(b))
try:
b = MultiPolygon(polys).bounds
bstr = '%.2f,%.2f,%.2f,%.2f' % (b[0], b[1], b[2], b[3])
return bbox2wktpolygon(bstr)
except Exception, err:
raise RuntimeError, ('Cannot aggregate polygons: %s' % str(err))
def update_xpath(nsmap, xml, recprop):
''' Update XML document XPath values '''
if isinstance(xml, unicode): # not lxml serialized yet
xml = etree.fromstring(xml)
recprop = eval(recprop)
nsmap = eval(nsmap)
try:
nodes = xml.xpath(recprop['rp']['xpath'], namespaces=nsmap)
if len(nodes) > 0: # matches
for node1 in nodes:
if node1.text != recprop['value']: # values differ, update
node1.text = recprop['value']
except Exception, err:
raise RuntimeError, ('ERROR: %s' % str(err))
return etree.tostring(xml)
def transform_mappings(queryables, typename, reverse=False):
''' transform metadata model mappings '''
if reverse: # from csw:Record
for qbl in queryables.keys():
if qbl in typename.values():
tmp = [k for k, v in typename.iteritems() if v == qbl][0]
val = queryables[tmp]
queryables[qbl] = {}
queryables[qbl]['xpath'] = val['xpath']
queryables[qbl]['dbcol'] = val['dbcol']
else: # to csw:Record
for qbl in queryables.keys():
if qbl in typename.keys():
queryables[qbl] = queryables[qbl]
def get_anytext(xml):
''' get all element and attribute data from an XML document '''
if isinstance(xml, unicode) or isinstance(xml, str): # not serialized yet
xml = etree.fromstring(xml)
return '%s %s' % (' '.join([value for value in xml.xpath('//text()')]),
' '.join([value for value in xml.xpath('//attribute::*')]))
def exml2dict(element, namespaces):
''' Convert an lxml object to JSON
From:
https://bitbucket.org/smulloni/pesterfish/src/1578db946d74/pesterfish.py
'''
d=dict(tag='%s%s' % \
(xmltag_split2(element.tag, namespaces, True), xmltag_split(element.tag)))
if element.text:
if element.text.find('\n') == -1:
d['text']=element.text
if element.attrib:
d['attributes']=dict(('%s%s' %(xmltag_split2(k, namespaces, True), \
xmltag_split(k)),f(v) if hasattr(v,'keys') else v) \
for k,v in element.attrib.items())
children=element.getchildren()
if children:
d['children']=map(lambda x: exml2dict(x, namespaces), children)
return d
def getqattr(obj, name):
''' get value of an object, safely '''
try:
value = getattr(obj, name)
if hasattr(value, '__call__'): # function generated value
if name.find('link') != -1: # link tuple triplet
return _linkify(value())
return value()
elif (isinstance(value, datetime.datetime)
or isinstance(value, datetime.date)): # datetime object
return datetime2iso8601(value)
return value
except:
return None
def _linkify(value):
''' create link format '''
out = []
for link in value:
out.append(','.join(list(link)))
return '^'.join(out)
| [
"[email protected]"
]
| |
553fc43f756ad2c72fad6fb64a24798b17c40e4d | c82d051d42ce4fcd32bb08e73f5a37015afb1645 | /app.py | 740d40e89be7d9014fbfa43c579b1825f246bb7b | []
| no_license | nedfrine/ensembl | ae759d1728bc45a7cf2cbfac6a28295330bc427d | 77c5130abec3885aca9a7b134cafdf28f56f2451 | refs/heads/master | 2022-01-11T14:55:23.406075 | 2022-01-01T21:26:08 | 2022-01-01T21:26:08 | 174,574,204 | 0 | 0 | null | 2022-01-01T21:26:09 | 2019-03-08T16:48:00 | null | UTF-8 | Python | false | false | 2,870 | py | import flask
import json
import os
from flask import render_template
from flask import request
from flask import Response
from sqlalchemy import and_
from flask_sqlalchemy import SQLAlchemy
project_dir = os.path.dirname(os.path.abspath(__file__))
database_file = "mysql://anonymous:@193.62.193.10/ensembl_website_90"
app = flask.Flask(__name__)
app.config["DEBUG"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = database_file
db = SQLAlchemy(app)
@app.route('/', methods=['GET','POST'])
def home():
return render_template("form.html")
@app.route('/v1/gene_suggest', methods=['GET','POST'])
def gene_suggest():
in_species = request.args.get('species')
partial = request.args.get('query')
in_limit = request.args.get('limit') or 10
if(in_species == '' or partial == '' or in_limit == ''):
raise InvalidUsage('Values missing', status_code=410)
match_string = partial + '%'
gene = gene_autocomplete.query.filter(and_(gene_autocomplete.species == in_species,gene_autocomplete.display_label.like(match_string))).limit(in_limit).all()
return flask.jsonify(eqtls=[e.serialize() for e in gene])
@app.route('/v1/gene_species', methods=['GET','POST'])
def gene_species():
in_species = request.args.get('species')
gene_spec = gene_autocomplete.query.filter(gene_autocomplete.species == in_species).all()
return flask.jsonify(eqtls=[e.serialize() for e in gene_spec])
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = flask.jsonify(error.to_dict())
response.status_code = error.status_code
return response
class gene_autocomplete(db.Model):
display_label = db.Column(db.String(128), unique=False, nullable=True, primary_key=False)
species = db.Column(db.String(255), unique=False, nullable=True, primary_key=False)
stable_id = db.Column(db.String(128), unique=True, nullable=False, primary_key=True)
location = db.Column(db.String(60), unique=False, nullable=True, primary_key=False)
def __repr__(self):
#return self.display_label
return {
'label': self.display_label,
'species': self.species,
'stable_id': self.stable_id
}
def serialize(self):
return {
'label': self.display_label,
'species': self.species,
'stable_id': self.stable_id
}
def __iter__(self):
return self.to_dict().iteritems()
app.run(host= '0.0.0.0')
| [
"[email protected]"
]
| |
eedc1a1a7b87294894b34aefd03488bb442339be | 33e5e4b883671f7f40a48e6e0a4b544b3f8f839a | /imageflow/apps.py | 2b8872cb5e0adfd69a6677056fd89db00b564baa | [
"MIT"
]
| permissive | typpo/astrokit | ad7ee83664e3d920733d7e008aec4801c7aa84f2 | 59cea2e06c027e83dfa70defb4053820c79ccced | refs/heads/master | 2023-04-12T15:44:11.669710 | 2022-06-21T21:21:04 | 2022-06-21T21:21:04 | 47,933,931 | 9 | 7 | MIT | 2023-03-31T14:28:40 | 2015-12-13T19:52:01 | Python | UTF-8 | Python | false | false | 134 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class ImageflowConfig(AppConfig):
name = 'imageflow'
| [
"[email protected]"
]
| |
4685b3d3ff1bc3cfe8898f1d8edcf37a08088731 | ad3cd7c5e05d3a1043cc510b3af10c3ef56430f4 | /lib/bbox.py | 5eb528dc572d9103139a0ef3e35d647785748706 | []
| no_license | kl456123/vis_tools | f1ef504daeccee11438edd4a7b0c4f0349364b35 | 6e29ed34948e89810dbcaf3a8fe18d06f83d39be | refs/heads/master | 2021-06-23T16:14:23.960129 | 2017-08-17T10:44:16 | 2017-08-17T10:44:16 | 100,592,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,058 | py | #!/usr/bin/env python
# encoding: utf-8
class BBox(object):
def __init__(self, coords, code_type='xxyy'):
self._bbox_xxyy = coords
self._code_type = code_type
self._bbox_xywh = None
self.convert_bbox_format()
def convert_bbox_format(self):
if self._code_type == 'xywh':
self._bbox_xywh = BBoxTools.convert_bbox_format(self._xxyy,self.code_type)
def get_bbox(self,code_type):
if code_type=='xxyy':
return self._bbox_xxyy
elif code_type=='xywh':
return self._bbox_xywh
else:
raise TypeError('code type is unknown')
class BBoxTools(object):
def __init__(self):
pass
def bbox_union(self, bbgt, bb, inters):
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(bbgt[2] - bbgt[0] + 1.) *
(bbgt[3] - bbgt[1] + 1.) - inters)
return uni
# note that it is no matter with the order of arguments
def bbox_intersection(self, bbgt, bb):
# intersection
ixmin = np.maximum(bbgt[0], bb[0])
iymin = np.maximum(bbgt[1], bb[1])
ixmax = np.minimum(bbgt[2], bb[2])
iymax = np.minimum(bbgt[3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
return inters
def bbox_iou(self, bbgt, bb):
inters = bbox_intersection(bbgt, bb)
uni = bbox_union(bbgt, bb, inters)
return inters / uni
def convert_bbox_format(self, bbox, code_type,inplace='False'):
xmin = bbox[0]
ymin = bbox[1]
xmax = bbox[2]
ymax = bbox[3]
if code_type == 'xywh':
center_x = (xmin + xmax) / 2.0
center_y = (ymin + ymax) / 2.0
width = xmax - xmin
height = ymax - ymin
if inplace:
bbox[0] = center_x
bbox[1] = center_y
bbox[2] = width
bbox[3] = height
return center_x,center_y,width,height
| [
"[email protected]"
]
| |
d21c0896c06e1415355d55f1c6aa4eda00358cbc | 46559fa48bb8ae722149b600ecd5e05e558553ac | /RumourEval2019Models/Bert-MFajcik/data_preprocessing/text_preprocessing.py | 185113f8777ca3d00c738e17f3b504dde6cda8ea | [
"MIT"
]
| permissive | isspek/veracity-detection | f84eeba6aceb8b2f3f753c5e856bb46d9581c0c5 | 9368309722bead209e49e52c206758e3d173092a | refs/heads/master | 2022-07-15T10:25:10.327352 | 2019-11-14T13:24:55 | 2019-11-14T13:24:55 | 214,429,773 | 0 | 0 | MIT | 2022-06-21T23:08:54 | 2019-10-11T12:23:39 | Python | UTF-8 | Python | false | false | 7,549 | py | import re
import string
import warnings
import preprocessor as twitter_preprocessor
import spacy
# See spacy tag_map.py for tag explanation
from nltk.corpus import stopwords
from spacy.symbols import PUNCT, SYM, ADJ, CCONJ, NUM, DET, ADV, ADP, VERB, NOUN, PROPN, PART, PRON, ORTH
from utils import DotDict
warnings.filterwarnings("ignore", category=UserWarning, module='bs4')
nlp = None
punctuation = list(string.punctuation) + ["``"]
stopWords = set(stopwords.words('english'))
validPOS = [PUNCT, SYM, ADJ, CCONJ, NUM, DET, ADV, ADP, VERB, NOUN, PROPN, PART, PRON]
POS_dict = {x: i + 2 for i, x in enumerate(validPOS)}
POS_dict['UNK'] = 0
POS_dict['EOS'] = 1
validNER = ["UNK",
"PERSON", # People, including fictional.
"NORP", # Nationalities or religious or political groups.
"FAC", # Buildings, airports, highways, bridges, etc.
"ORG", # Companies, agencies, institutions, etc.
"GPE", # Countries, cities, states.
"LOC", # Non-GPE locations, mountain ranges, bodies of water.
"PRODUCT", # Objects, vehicles, foods, etc. (Not services.)
"EVENT", # Named hurricanes, battles, wars, sports events, etc.
"WORK_OF_ART", # Titles of books, songs, etc.
"LAW", # Named documents made into laws.
"LANGUAGE", # Any named language.
"DATE", # Absolute or relative dates or periods.
"TIME", # Times smaller than a day.
"PERCENT", # Percentage, including "%".
"MONEY", # Monetary values, including unit.
"QUANTITY", # Measurements, as of weight or distance.
"ORDINAL", # "first", "second", etc.
"CARDINAL", # Numerals that do not fall under another type.
]
validDEPS = ['UNK',
'acl',
'acomp',
'advcl',
'advmod',
'agent',
'amod',
'appos',
'attr',
'aux',
'auxpass',
'case',
'cc',
'ccomp',
'complm',
'compound',
'conj',
'cop',
'csubj',
'csubjpass',
'dative',
'dep',
'det',
'dobj',
'expl',
'hmod',
'hyph',
'infmod',
'intj',
'iobj',
'mark',
'meta',
'neg',
'nmod',
'nn',
'npadvmod',
'nsubj',
'nsubjpass',
'num',
'number',
'nummod',
'obj',
'obl',
'oprd',
'parataxis',
'partmod',
'pcomp',
'pobj',
'poss',
'possessive',
'preconj',
'predet',
'prep',
'prt',
'punct',
'quantmod',
'rcmod',
'relcl',
'root',
'xcomp']
def preprocess_text(text: str, opts, nlpengine=None, lang='en', special_tags=["<pad>", "<eos>"],
use_tw_preprocessor=True):
if use_tw_preprocessor:
## ! There is a bug in original package for twitter preprocessing
# Sometomes regexp for link preprocessing freezes
# So we preprocess links separately
text = re.sub(r"(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?", "$URL$",
text.strip())
twitter_preprocessor.set_options('mentions')
text = twitter_preprocessor.tokenize(text)
# processed_chunk = twitter_preprocessor.clean(text)
if nlpengine is None:
global nlp
if nlp is None:
nlp = spacy.load(lang)
nlp.add_pipe(nlp.create_pipe('sentencizer'))
for x in ['URL', 'MENTION', 'HASHTAG', 'RESERVED', 'EMOJI', 'SMILEY', 'NUMBER', ]:
nlp.tokenizer.add_special_case(f'${x}$', [{ORTH: f'${x}$'}])
nlpengine = nlp
BLvec = []
POSvec = []
DEPvec = []
NERvec = []
processed_chunk = ""
doc = nlpengine(text)
doclen = 0
for sentence in doc.sents:
for w in sentence:
# Some phrases are automatically tokenized by Spacy
# i.e. New York, in that case we want New_York in our dictionary
word = "_".join(w.text.split())
if word.isspace() or word == "":
continue
if opts.remove_stop_words and word.lower() in stopWords:
continue
if opts.remove_puncuation and word in punctuation:
continue
# Spacy lemmatized I,He/She/It into artificial
# -PRON- lemma, which is unwanted
if opts.lemmatize_words:
output = w.lemma_ if w.lemma_ != '-PRON-' else w.lower_
else:
output = word
if opts.to_lowercase:
output = output.lower()
if opts.replace_nums and output.replace('.', '', 1).isdigit():
output = opts.num_replacement
output = output.replace("n't", "not")
doclen += 1
processed_chunk += "%s " % (output)
# Sometimes, when the word contains punctuation and we split it manually
# the output can contain multiple tokens
# In such case, just copy the features..., it happens rarely
if opts.returnbiglettervector:
BLvec.append(int(w.text[0].isupper()))
if opts.returnposvector:
POSvec.append(POS_dict.get(w.pos, POS_dict['UNK']))
if opts.returnDEPvector:
try:
DEPvec.append(validDEPS.index(w.dep_.lower()))
except ValueError:
DEPvec.append(validDEPS.index('UNK'))
if opts.returnNERvector:
try:
NERvec.append(validNER.index(w.ent_type_))
except ValueError:
NERvec.append(validNER.index('UNK'))
if opts.add_eos:
doclen += 1
processed_chunk += opts.eos + "\n"
if opts.returnbiglettervector:
BLvec.append(0)
if opts.returnposvector:
POSvec.append(POS_dict['EOS'])
if opts.returnDEPvector:
DEPvec.append(0)
if opts.returnNERvector:
NERvec.append(0)
else:
processed_chunk += "\n"
processed_chunk = processed_chunk.strip()
assert len(processed_chunk.split()) == len(BLvec) == len(POSvec) == len(DEPvec) == len(NERvec)
return processed_chunk, BLvec, POSvec, DEPvec, NERvec
def initopts():
o = DotDict()
o.stopwords_file = ""
o.remove_puncuation = False
o.remove_stop_words = False
o.lemmatize_words = False
o.num_replacement = "[NUM]"
o.to_lowercase = False
o.replace_nums = False # Nums are important, since rumour may be lying about count
o.eos = "[EOS]"
o.add_eos = True
o.returnNERvector = True
o.returnDEPvector = True
o.returnbiglettervector = True
o.returnposvector = True
return o
if __name__ == "__main__":
print(preprocess_text(
"Appalled by the attack on Charlie Hebdo in Paris, 10 - probably journalists - now confirmed dead. An attack on free speech everywhere.",
initopts()))
| [
"[email protected]"
]
| |
ceb4b01859eec51102efa567136810d52af86ace | 0a986d303056fb20e55adf75f279e059f3cfc280 | /breakfast/views.py | 8dceac42c1aada1440bf0616a6dba1d4347e33b3 | []
| no_license | EleaZhong/my-first-blog | 4301faa59dc5a2fec17318a6c05f34e707f0373b | b37a8056e9594d89946b7afbf762524c9c13573c | refs/heads/master | 2022-04-27T18:03:00.502240 | 2019-03-23T06:52:44 | 2019-03-23T06:52:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,659 | py | from django.shortcuts import render, get_object_or_404
from django.http import Http404
# Create your views here.
from .models import breakfastho
from django.http import HttpResponse
from .forms import breakfast_form,breakfast_form_new
def breakfast_product_view(request):
breakfast_object = breakfastho.objects.get(id=1)
breakfast_object_1 = {
'breakfast1': breakfast_object
}
return render(request,'breakfast_page1.html',breakfast_object_1)
'''
def create_breakfast_view(request):
b_form = breakfast_form()
if request.method == 'POST':
b_form = breakfast_form(request.POST )
if b_form.is_valid():
print(b_form.cleaned_data)
breakfastho.objects.create(**b_form.cleaned_data)
created = {
'form': b_form
}
return render(request,'breakfast_create.html',created)
'''
def create_breakfast_view(request,b_id):# b id used in models and url
#breakfastobject = breakfastho.objects.get(id=b_id)
breakfastobject = get_object_or_404(breakfastho,id=b_id)
objectset = breakfastho.objects.all()
created = {
'b': breakfastobject,
'b_id_up':'b/'+str(b_id+1),
'b_id_down':'b/'+str(b_id-1),
'all_objects': objectset
}
return render(request, 'bshow.html', created)
def create_new_b_view(request):
bform = breakfast_form_new(request.POST or None)
if bform.is_valid:
#print('ass')
#print(bform.cleaned_data)
bform.save()
bform = breakfast_form_new(request.POST or None)
created = {
'form' : bform
}
return render(request, 'breakfast_create.html', created)
| [
"[email protected]"
]
| |
f61eca67c5c12e16e1a5e52966b33565843168a4 | 4bf7ffbb76f89412340808fbc999a027165e7b8f | /code1.py | 7bcccfdfe79a6ebaa8c76b431dc208b11007fb3b | []
| no_license | mrFred489/AoC2017 | 7e62d1695b7340b95640a4563f4ce611d4399521 | b4cc1609b41104165cd4c5442d6508b3dd3178c5 | refs/heads/master | 2021-09-01T06:01:28.908747 | 2017-12-25T07:53:01 | 2017-12-25T07:53:01 | 115,197,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py |
f = open("data1.txt")
data = f.readline()
data = data.strip()
num = 0
for i in range(1,len(data)):
if data[i-1] == data[i]:
num += int(data[i])
if data[0] == data[-1]:
num += int(data[0])
print("svar1")
print(num)
k = int(len(data)/2)
num2 = 0
for i in range(len(data)):
if k + i < len(data):
ind2 = k+i
else:
ind2 = k+i-len(data)
if data[i] == data[ind2]:
num2 += int(data[i])
print(num2)
num3 = 0
for i in range(len(data)):
if data[i] == data[(i+k)%len(data)]:
num3 += int(data[i])
print(num3)
| [
"[email protected]"
]
| |
c281292e06d3742bd682a5d8766d5862720b284c | 6f63ad6f179c4c5bc4e2c82f741067fc030e1351 | /CSE111/esteem.py | dfdfd26f68658ce800c957c9f573228e2ca89a12 | []
| no_license | Kyle5150/CSE111 | 8e152fdbac93854259de8b7328dd183ccbafe35a | 7f4db03eed69232085323fb5dd80e60a0fa5b618 | refs/heads/main | 2023-06-02T20:00:49.043137 | 2021-06-17T07:26:51 | 2021-06-17T07:26:51 | 377,741,877 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,545 | py | NEGATIVE = -1
POSITIVE = 1
def main():
print("This program is an implementaiton of the Rosenberg Self-Esteem Scale.")
print("This program will show you ten statements that you could possibly")
print("apply to yourself. Please rate how much you agree with each of the")
print("statements by responding with one of these four letters:")
print()
print("D means you strongly disagree with the statement.")
print("d means you disagree with the statement.")
print("a means you agree with the statement.")
print("A means you strongly agree with the statement.")
print()
score = 0
score += ask_question("1. I feel that I am a person of worth, at least on an equal plane with others.", POSITIVE)
score += ask_question("2. I feel that I have a number of good qualities.", POSITIVE)
score += ask_question("3. All in all, I am inclined to feel that I am a failure.", NEGATIVE)
score += ask_question("4. I am able to do things as well as most other people.", POSITIVE)
score += ask_question("5. I feel I do not have much to be proud of.", NEGATIVE)
score += ask_question("6. I take a positive attitude toward myself.", POSITIVE)
score += ask_question("7. On the whole, I am satisfied with myself.", POSITIVE)
score += ask_question("8. I wish I could have more respect for myself.", NEGATIVE)
score += ask_question("9. I certainly feel useless at times.", NEGATIVE)
score += ask_question("10. At times I think I am no good at all.", NEGATIVE)
print()
print(f"Your score is {score}.")
print("A score below 15 may indicate problematic low self-esteem.")
def ask_question(statement, pos_or_neg):
"""Display one statement to the user and get the user's response.
Then determine the score for the response and return the score.
Parameters
statement: The statement to show the user.
pos_or_neg: Either the constant POSITIVE or NEGATIVE.
Return: the score from the user's response to the statement.
"""
print(statement)
answer = input("Enter D, d, a, or A: ")
score = 0
if answer == 'D':
score = 0
elif answer == 'd':
score = 1
elif answer == 'a':
score = 2
elif answer == 'A':
score = 3
if pos_or_neg == NEGATIVE:
score = 3 - score
return score
# If this file was executed like this:
# > python esteem.py
# then call the main function. However, if this file
# was simply imported, then skip the call to main.
if __name__ == "__main__":
main() | [
"[email protected]"
]
| |
51573e5e37c9a202919b9704bc45f94207c17118 | 925216786fd041fcf5eaffaecb323e3e7d46e6fc | /cosc343worldPythonMacOS/cosc343world.py | ee5440b02da2f5c7feebb94f44fdfeaa8355e4ce | []
| no_license | HarryMead/NeuralNetworkWorld | a0f1fe8a2e76868612297242a8e3f66d7e080f74 | 98a8ad721c2673b7b26f3cbed19097a717f29cf5 | refs/heads/master | 2022-12-05T22:47:35.688295 | 2020-08-23T12:53:50 | 2020-08-23T12:53:50 | 283,945,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,296 | py | #!/usr/bin/env python
from cosc343worldcc import _cCreature, _cWorld
import numpy as np
import time
import sys
# This is a creature class that your EvolvingCreature needs to inherit from.
# This class wraps the _cCreature class which was implemented in C.
class Creature(_cCreature):
# Your child class must override this method, where the
# mapping of percepts to actions is implemented
def AgentFunction(self, percepts, nActions):
print("Your EvolvingCreature needs to override the AgentFunction method!")
sys.exit(-1)
# Agent function, which is called from the simulation of the world implemented in C.
# This method translates the percepts to a python list, and translates back
# the list representing the actions into C format.
def internal_AgentFunction(self):
# Get the number of percepts and actions
nPercepts = self.numPercepts()
nActions = self.numActions()
# Create lists of percepts
percepts = np.zeros((nPercepts))
for i in range(nPercepts):
percepts[i] = self.getPercept(i)
# Execute the AgentFunction method that needs to be implemented
# by the EvolvingCreature. Pass in the list of percepts and
# specify the number of actions expected.
actions = self.AgentFunction(percepts, nActions)
if not isinstance(actions, list) or len(actions) != nActions:
print("Error! Expecting the actions returned from the AgentFunction to be a list of %d numbers." % nActions)
# Translate actions and feed it back to the engine
for i in range(nActions):
self.setAction(i, actions[i])
# Wrapper class for _cWorld which implements the engine for the simulation
class World(_cWorld):
# Initialise the wrapper with some defaults for the world type, grid size
# and the repeatability setting.
def __init__(self, worldType=1, gridSize=24, repeatable=False):
self.ph = None
self.worldType = worldType
super().__init__(worldType, gridSize, repeatable)
# Feed the next generation of creatures to the simulation
#
# Input: population - a list of creatures for the simulation
def setNextGeneration(self, population):
self.resetCreatures()
for i in range(len(population)):
self.addCreature(population[i])
# Animation of the simulation
#
# Input: titleStr - title string of the simulation
# speed - of the visualisation: can be 'slow', 'normal' or 'fast'
def show_simulation(self, titleStr = "", speed='normal'):
import pygame
gridSize = self.gridSize()
left_frame = 100
# Initialise pygame
pygame.init()
# Specify the size of the widnow
size = width, height = 720, 480
WHITE = (255, 255, 255)
BLACK = 0, 0, 0
if speed == "normal":
frameTurns = 20
nSteps = 10
elif speed == "fast":
frameTurns = 1
nSteps = 5
elif speed == "slow":
frameTurns = 40
nSteps = 10
# Create pygame screen
screen = pygame.display.set_mode(size)
# Compute the size of the individual square
unit = int(np.min([width-left_frame, height])/gridSize)
# Load images
im_strawbs = [pygame.image.load('images/strawberry-green.png'),
pygame.image.load('images/strawberry-red.png')
]
im_creatures = [pygame.image.load('images/smiley_happy.png'),
pygame.image.load('images/smiley_hungry.png'),
pygame.image.load('images/smiley_sick.png')
]
# Scale the images for the size of the individual square
for i in range(len(im_strawbs)):
im_strawbs[i] = pygame.transform.scale(im_strawbs[i], (unit, unit))
for i in range(len(im_creatures)):
im_creatures[i] = pygame.transform.scale(im_creatures[i], (unit, unit))
im_monster = pygame.transform.scale(pygame.image.load("images/monster.png"), (unit, unit))
# Read the total number of turns from the engine
nTurns = self.vis_numTurns()
# The speed of animation depends on specified speed
stepDiff = 1.0/float(nSteps)
# Read the number food items, creatures and monsters from the engine
nFood = self.vis_num(0)
nCreatures = self.vis_num(1)
nMonsters = self.vis_num(2)
nBodies = [nFood, nCreatures, nMonsters]
halfSteps = int(np.floor(nSteps/2))
# Showing visulisation of the simulation state at each turn
for t in range(1, nTurns + 1):
# Update the window caption to specify the turn number
pygame.display.set_caption("World %d, %s (turn %d)" % (self.worldType, titleStr, t))
# The nSteps is the number of animations between a turn (the slower, the smoother the animation)
for k in range(nSteps):
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
# Paint the window in white
screen.fill(WHITE)
# Draw the grid lines in black
for i in range(gridSize + 1):
pygame.draw.line(screen, BLACK, [left_frame, i*unit], [left_frame+(gridSize*unit), i*unit])
pygame.draw.line(screen, BLACK, [left_frame+(i*unit), 0], [left_frame+(i*unit), gridSize * unit])
# Iterate over all item types...
for type in range(3):
# For the number of items in each type...
for i in range(nBodies[type]):
# Get the position and state at turn t
x = self.vis(type, 0, i, t)
y = self.vis(type, 1, i, t)
s = self.vis(type, 2, i, t)
# Get the position at turn t-1
xprev = self.vis(type, 0, i, t-1)
yprev = self.vis(type, 1, i, t-1)
# Compute the shift from t-1 to t based on current frame
xshift = xprev-x
if np.abs(xshift)<=1:
xdiff = (x - xprev) * k * stepDiff
elif k <= halfSteps:
xdiff = np.sign(xshift) * k * stepDiff
else:
xdiff = -np.sign(xshift) * k * stepDiff
xprev = x
yshift = yprev - y
if np.abs(yshift) <= 1:
ydiff = (y - yprev) * k * stepDiff
elif k <= halfSteps:
ydiff = np.sign(yshift) * k * stepDiff
else:
ydiff = -np.sign(yshift) * k * stepDiff
yprev = y
# If the item is food...
if type==0:
# ...depending on the state show the green or red strawberry icon
if s >= 0 and s <= 1:
obj_loc = pygame.Rect(left_frame + (x * unit), y * unit, unit, unit)
obj_im = im_strawbs[s]
screen.blit(obj_im, obj_loc)
# If the item is a creature...
elif type==1:
# ...show only if not dead
if s > 0:
# Depending on state show different creature icon
obj_im = im_creatures[s-1]
obj_loc = pygame.Rect(left_frame + (xprev + xdiff) * unit, (yprev + ydiff) * unit, unit,
unit)
screen.blit(obj_im, obj_loc)
# If the item is a monster...
elif type==2:
#...show the monster icon
obj_loc = pygame.Rect(left_frame+(xprev + xdiff) * unit, (yprev + ydiff) * unit, unit, unit)
screen.blit(im_monster, obj_loc)
# Update the dislplay
pygame.display.flip()
pygame.time.delay(frameTurns)
pygame.display.quit()
pygame.quit()
| [
"[email protected]"
]
| |
1eb0b348f203ec637e1b4118ee9118c894482dd4 | faaeb8aa3565112998556293a6617f7160823910 | /fruitday/userinfo/urls.py | d128a3d28d7f20743322b6f8d9bf06fdf7a883c9 | []
| no_license | meitianjinbu/shoping | 58941f53a91925e001645e51f64b2cd5417bf3c6 | d99210a4238dff59786295e58e0e49858e040750 | refs/heads/master | 2020-03-22T17:43:54.782120 | 2018-07-10T10:03:33 | 2018-07-10T10:03:33 | 140,412,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | # _*_ coding:utf-8 _*_
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from .views import *
urlpatterns = [
url(r'^register/', register_in, name='register'),
url(r'^registerin/', register_, name='register_in'),
url(r'^login/', login_in, name='login'),
url(r'^loginin/', login_, name='login_in'),
url(r'^logout/', logout, name='logout'),
url(r'^addads/', add_ads_in, name='addads'),
url(r'^addadsin/', add_ads_, name='addadsin'),
url(r'^adslist/', user_ads, name='adslist'),
url(r'^delads/', delete_ads, name='delads'),
] | [
"[email protected]"
]
| |
cf271b92cae5bee54534c95ab110740da68e58ed | 930262f95cfb17d77da3c3674521a70c3b69d869 | /apps/JazzySnek/views.py | f094e6c274c3808fc3880f3ef77db0e908760b95 | []
| no_license | kevan-wang/JazzySnek | 905e7402bc3448f3305bba3a8665e164c9b5cd46 | dc084e34cad59ab318529860a8d9a62955d338ef | refs/heads/master | 2020-03-13T20:35:16.906966 | 2018-04-28T08:19:09 | 2018-04-28T08:19:09 | 131,277,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,230 | py | from django.shortcuts import render, HttpResponse, redirect
from django.contrib import messages
from time import gmtime, strftime
from . models import User
import re, datetime, bcrypt, random, string
# Create your views here.
def index(request):
context = {}
if "userID" not in request.session: # Security feature: This process will not execute without an authentic login.
context["name"] = "Guest"
else:
context["name"] = User.objects.get(id=request.session["userID"]).userName
return render(request, "JazzySnek/index.html", context)
def loginReg(request):
return render(request, "JazzySnek/regLogin.html")
def quickGame(request):
return render(request, "JazzySnek/game.html")
def howTo(request):
return render(request, "JazzySnek/howToPlay.html")
def storyMode(request):
return render(request, "JazzySnek/storyMode.html")
def highScores(request):
context = {}
if "userID" not in request.session: # Security feature: This process will not execute without an authentic login.
context["name"] = "Guest"
else:
context["name"] = User.objects.get(id=request.session["userID"]).userName
context["users"] = User.objects.order_by("-highScore")[:10]
return render(request, "JazzySnek/highScores.html", context)
def register(request):
# Executed when the registration form is submitted.
userData = retrieveForms(request)
errors = User.objects.validatorReg(userData)
# If any errors are found, store the errors as messages & redirect to root.
if len(errors):
for key, value in errors.items():
messages.error(request, value)
return redirect('/login_reg')
# If user is not in the database & all the forms are valid, create a new user with the hashed password and store on server database.
else:
password = userData['password1']
hashedPW = bcrypt.hashpw(password.encode(), bcrypt.gensalt())
securityKey = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30))
User.objects.create(userName=userData['userName'], passwordHash=hashedPW, securityKey=securityKey)
messages.info(request, slangPos() + " You have successfully registered!")
return redirect('/login_reg')
def login(request):
if "userID" in request.session:
messages.info(request, slangPos() + " You are already logged in!")
return redirect("/login_reg")
errors = User.objects.validatorLogin(request.POST)
# If any errors are found, store the errors as messages & redirect to root.
if len(errors):
for key, value in errors.items():
messages.error(request, value)
return redirect('/login_reg')
else:
# If passwords match, flash a successful login message and redirect to dashboard.
user = User.objects.filter(userName=request.POST["userName"]).first()
# Confirmation of login is the user's ID number stored in the session.
request.session["userID"] = user.id
request.session["securityKey"] = user.securityKey
messages.info(request, slangPos() + " Welcome back, " + user.userName + "!")
return redirect("/login_reg")
def logout(request):
# Logging out removes he user's ID from session.
if "userID" in request.session:
request.session.pop("userID")
request.session.pop("securityKey")
messages.error(request, slangPos() + " You have logged out.")
return redirect('/')
else:
return redirect('/')
def logScore(request):
if "userID" in request.session:
score = request.POST["finalScore"]
if score != "":
scoreInt = int(score)
user = User.objects.get(id=request.session["userID"])
if scoreInt > user.highScore:
user.highScore = scoreInt
user.save()
return redirect('/')
##### HELPER FUNCTIONS
def retrieveForms(request):
# Returns a dictionary of the fields' names as keys and the fields' values (from registration/login page) as values.
data = { }
keys = ['userName', 'password1', 'password2']
for key in keys:
data[key] = request.POST[key]
return data
def slangPos():
randIndex = random.randint(0, len(positiveSlang)-1)
return positiveSlang[randIndex]
positiveSlang = [ "Breakin' it Up!", "Bustin' the Conk!", "Collarin' the Jive!",
"Dicty Dukes!", "Friskin' Whiskers!", "Get Your Boots On!", "In the Groove!",
"Swell Jam!", "Hittin' the Licks!", "Muggin' Heavy!", "Neigho, Pops!", "Ridin' the Riffs!"
]
| [
"[email protected]"
]
| |
0a8c30ddc938aa6edd61a8e58586b053b788471d | 619122375a4e4a65f02d797b0a066de50597a931 | /web/blueprints/school_profile/__init__.py | 072ee14aed015d5784c25cbf0cda5c158ed210ab | []
| no_license | zhangshuo1996/school_profile | e2a50505da626b20ca39ebde459e3046733e0fd1 | 5965015345c1878fbd6860a3cdca344fb088aa1b | refs/heads/master | 2023-02-15T16:42:09.451514 | 2021-01-04T01:16:53 | 2021-01-04T01:16:53 | 319,231,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from web.blueprints.school_profile.profile import school_profile_bp
from web.blueprints.school_profile.search import school_search_bp
| [
"[email protected]"
]
| |
106c1c6b1de661b947c7f6f43927e0c261cc39ac | 186fd3420112162d1f5603941b508b519117f2f6 | /podcast_site/wsgi.py | 0706e23aff9585977a545e314e4242d4ad9db2d6 | []
| no_license | 2016asuri/podcast-search-engine | 0bd3897d4e20c0c48233dfce5cad288eff2f78ef | d0421156c8469389e5ac81e945b2ce7d6d84fce9 | refs/heads/master | 2022-12-09T12:10:51.377942 | 2018-07-10T21:51:26 | 2018-07-10T21:51:26 | 136,760,996 | 0 | 0 | null | 2022-12-07T23:50:44 | 2018-06-09T21:59:05 | Python | UTF-8 | Python | false | false | 492 | py | """
WSGI config for podcast_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "podcast_site.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application) | [
"[email protected]"
]
| |
3941489ec2a7e0de2b1adcec8caab3fafca2f3a0 | 4b4df51041551c9a855468ddf1d5004a988f59a2 | /leetcode_python/Array/corporate-flight-bookings.py | d6486593ea2dc4f37b79869a1f72ef71fc6dc067 | []
| no_license | yennanliu/CS_basics | 99b7ad3ef6817f04881d6a1993ec634f81525596 | 035ef08434fa1ca781a6fb2f9eed3538b7d20c02 | refs/heads/master | 2023-09-03T13:42:26.611712 | 2023-09-03T12:46:08 | 2023-09-03T12:46:08 | 66,194,791 | 64 | 40 | null | 2022-08-20T09:44:48 | 2016-08-21T11:11:35 | Python | UTF-8 | Python | false | false | 5,073 | py | """
1109. Corporate Flight Bookings
Medium
There are n flights that are labeled from 1 to n.
You are given an array of flight bookings bookings, where bookings[i] = [firsti, lasti, seatsi] represents a booking for flights firsti through lasti (inclusive) with seatsi seats reserved for each flight in the range.
Return an array answer of length n, where answer[i] is the total number of seats reserved for flight i.
Example 1:
Input: bookings = [[1,2,10],[2,3,20],[2,5,25]], n = 5
Output: [10,55,45,25,25]
Explanation:
Flight labels: 1 2 3 4 5
Booking 1 reserved: 10 10
Booking 2 reserved: 20 20
Booking 3 reserved: 25 25 25 25
Total seats: 10 55 45 25 25
Hence, answer = [10,55,45,25,25]
Example 2:
Input: bookings = [[1,2,10],[2,2,15]], n = 2
Output: [10,25]
Explanation:
Flight labels: 1 2
Booking 1 reserved: 10 10
Booking 2 reserved: 15
Total seats: 10 25
Hence, answer = [10,25]
Constraints:
1 <= n <= 2 * 104
1 <= bookings.length <= 2 * 104
bookings[i].length == 3
1 <= firsti <= lasti <= n
1 <= seatsi <= 104
"""
# V0
# V1
# IDEA : ARRAY + prefix sum
# https://leetcode.com/problems/corporate-flight-bookings/discuss/328856/JavaC%2B%2BPython-Sweep-Line
# IDEA :
# Set the change of seats for each day.
# If booking = [i, j, k],
# it needs k more seat on ith day,
# and we don't need these seats on j+1th day.
# We accumulate these changes then we have the result that we want.
# Complexity
# Time O(booking + N) for one pass on bookings
# Space O(N) for the result
class Solution:
def corpFlightBookings(self, bookings, n):
res = [0] * (n + 1)
for i, j, k in bookings:
res[i - 1] += k
res[j] -= k
for i in range(1, n):
res[i] += res[i - 1]
return res[:-1]
# V1'
# IDEA : ARRAY + prefix sum
# https://leetcode.com/problems/corporate-flight-bookings/discuss/328949/Simple-Python-solution
class Solution:
def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:
answer = n * [0]
lst = []
for i, j, num in bookings:
lst.append((i - 1, num))
lst.append((j, -num))
lst.sort()
curr_num = 0
prev_i = 0
for i, num in lst:
for j in range(prev_i, i):
answer[j] += curr_num
prev_i = i
curr_num += num
return answer
# V1''
# IDEA : ARRAY
# https://leetcode.com/problems/corporate-flight-bookings/discuss/328893/Short-python-solution
# IDEA : Simply use two arrays to keep track of how many bookings are added for every flight.
class Solution:
def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:
opens = [0]*n
closes = [0]*n
for e in bookings:
opens[e[0]-1] += e[2]
closes[e[1]-1] += e[2]
ret, tmp = [0]*n, 0
for i in range(n):
tmp += opens[i]
ret[i] = tmp
tmp -= closes[i]
return ret
# V1'''
# https://leetcode.com/problems/corporate-flight-bookings/discuss/328986/Python-linear-solution
class Solution:
def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:
res = [0] * (n + 2)
for booking in bookings:
start, end, seats = booking
res[start] += seats
res[end + 1] -= seats
for i in range(1, len(res)):
res[i] += res[i - 1]
# don't keep first because bookings are 1-based
# don't keep last because it's out of range
return res[1:-1]
# V1''''
# https://leetcode.com/problems/corporate-flight-bookings/discuss/328863/Python-concise-sum
class Solution:
def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:
res = [0] * n
i = cur = 0
for j, val in sorted([[i - 1, k] for i, j, k in bookings] + [[j, -k] for i, j, k in bookings]):
while i < j:
res[i] = cur
i += 1
cur += val
return res
# V1''''''
# https://zxi.mytechroad.com/blog/math/leetcode-1109-corporate-flight-bookings/
# C++
# class Solution {
# public:
# vector<int> corpFlightBookings(vector<vector<int>>& bookings, int n) {
# vector<int> ans(n + 1);
# for (const auto& b : bookings) {
# ans[b[0] - 1] += b[2];
# ans[b[1]] -= b[2];
# }
# for (int i = 1; i < n; ++i)
# ans[i] += ans[i - 1];
# ans.pop_back();
# return ans;
# }
# };
# V1''''''''
# https://blog.51cto.com/u_15344287/3646723
class Solution:
def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:
lst = [0] * (n + 1)
for j, k, l in bookings:
lst[j - 1] += l
lst[k] -= l
lst.pop()
ans = []
now = 0
for i in range(len(lst)):
now += lst[i]
ans.append(now)
return ans
# V2 | [
"[email protected]"
]
| |
6fa980d4dd5a9231591dcd0dfff776d63cf6e4d2 | 64530babd4336421ef28feea8e0c69fddf6ca394 | /tuple.py | 5bc07629159ff6e5ae7bbd6e389cd31caa5246fe | []
| no_license | judy1116/pythonDemo | 0e35e51da7bc82354b11e67219bfe1ba742048d7 | 1e8f3e2a48b1804a12f7d93a7477a8d3e5bdf450 | refs/heads/master | 2021-08-31T10:49:22.219034 | 2017-12-21T03:20:23 | 2017-12-21T03:20:23 | 114,954,420 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | #不可变的tuple有什么意义?因为tuple不可变,所以代码更安全。如果可能,能用tuple代替list就尽量用tuple。
#另一种有序列表叫元组:tuple。tuple和list非常类似,但是tuple一旦初始化就不能修改,比如同样是列出同学的名字:
classmates = ('Michael', 'Bob', 'Tracy')
#现在,classmates这个tuple不能变了,它也没有append(),insert()这样的方法。其他获取元素的方法和list是一样的,你可以正常地使用classmates[0],classmates[-1],但不能赋值成另外的元素。
print(classmates[0])
t = (1, 2)
f = ()
#定义的不是tuple,是1这个数!这是因为括号()既可以表示tuple,又可以表示数学公式中的小括号,这就产生了歧义,因此,Python规定,这种情况下,按小括号进行计算,计算结果自然是1。
#所以,只有1个元素的tuple定义时必须加一个逗号,,来消除歧义:
e=(1,)
#"可变的"tuple
#表面上看,tuple的元素确实变了,但其实变的不是tuple的元素,而是list的元素。tuple一开始指向的list并没有改成别的list,所以,tuple所谓的“不变”是说,tuple的每个元素,指向永远不变。即指向'a',就不能改成指向'b',指向一个list,就不能改成指向其他对象,但指向的这个list本身是可变的!
g=('a','b',['A','B'])
g[2][0]='X'
g[2][1]='Y'
print(g) | [
"[email protected]"
]
| |
8da121d649ea828a915d2f8fee0f8d2f41569f13 | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories_2to3/120243/tradeshift-text-classification-master/src/online-model/tk7_solution.py | ffa812f783556c5f81ae943cd1fa4a0497105321 | [
"MIT"
]
| permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,176 | py | '''
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <[email protected]>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
'''
from datetime import datetime
from math import log, exp, sqrt
# TL; DR
# the main learning process start at line 122
# parameters #################################################################
import sys
data_dir=sys.argv[1]
sub_dir=sys.argv[2]
train = data_dir+'train.csv' # path to training file
label = data_dir+'trainLabels.csv' # path to label file of training data
test = data_dir+'test.csv' # path to testing file
D = 2 ** 23 # number of weights use for each model, we have 32 of them
alpha = .1 # learning rate for sgd optimization
# function, generator definitions ############################################
# A. x, y generator
# INPUT:
# path: path to train.csv or test.csv
# label_path: (optional) path to trainLabels.csv
# YIELDS:
# ID: id of the instance (can also acts as instance count)
# x: a list of indices that its value is 1
# y: (if label_path is present) label value of y1 to y33
def data(path, label_path=None):
for t, line in enumerate(open(path)):
# initialize our generator
if t == 0:
# create a static x,
# so we don't have to construct a new x for every instance
x = [0] * (146+13*14/2+1)
if label_path:
label = open(label_path)
label.readline() # we don't need the headers
continue
# parse x
for m, feat in enumerate(line.rstrip().split(',')):
if m == 0:
ID = int(feat)
else:
# one-hot encode everything with hash trick
# categorical: one-hotted
# boolean: ONE-HOTTED
# numerical: ONE-HOTTED!
# note, the build in hash(), although fast is not stable,
# i.e., same value won't always have the same hash
# on different machines
x[m] = abs(hash(str(m) + '_' + feat)) % D
row=line.rstrip().split(',')
hash_cols = [64,65,61,62,91,92,142,3,4,61,34,91,94,95]
t = 146
for i in range(14):
for j in range(i+1,14):
t += 1
x[t] = abs(hash(str(i)+'_'+str(j)+'_'+row[hash_cols[i]]+"_x_"+row[hash_cols[j]])) % D
# parse y, if provided
if label_path:
# use float() to prevent future type casting, [1:] to ignore id
y = [float(y) for y in label.readline().split(',')[1:]]
yield (ID, x, y) if label_path else (ID, x)
# B. Bounded logloss
# INPUT:
# p: our prediction
# y: real answer
# OUTPUT
# bounded logarithmic loss of p given y
def logloss(p, y):
p = max(min(p, 1. - 10e-15), 10e-15)
return -log(p) if y == 1. else -log(1. - p)
# C. Get probability estimation on x
# INPUT:
# x: features
# w: weights
# OUTPUT:
# probability of p(y = 1 | x; w)
def predict(x, w):
wTx = 0.
for i in x: # do wTx
wTx += w[i] * 1. # w[i] * x[i], but if i in x we got x[i] = 1.
return 1. / (1. + exp(-max(min(wTx, 20.), -20.))) # bounded sigmoid
# D. Update given model
# INPUT:
# alpha: learning rate
# w: weights
# n: sum of previous absolute gradients for a given feature
# this is used for adaptive learning rate
# x: feature, a list of indices
# p: prediction of our model
# y: answer
# MODIFIES:
# w: weights
# n: sum of past absolute gradients
def update(alpha, w, n, x, p, y):
for i in x:
# alpha / sqrt(n) is the adaptive learning rate
# (p - y) * x[i] is the current gradient
# note that in our case, if i in x then x[i] = 1.
n[i] += abs(p - y)
w[i] -= (p - y) * 1. * alpha / sqrt(n[i])
# training and testing #######################################################
start = datetime.now()
# a list for range(0, 33) - 13, no need to learn y14 since it is always 0
K = [k for k in range(33) if k != 13]
# initialize our model, all 32 of them, again ignoring y14
w = [[0.] * D if k != 13 else None for k in range(33)]
n = [[0.] * D if k != 13 else None for k in range(33)]
loss = 0.
loss_y14 = log(1. - 10**-15)
for ID, x, y in data(train, label):
# get predictions and train on all labels
for k in K:
p = predict(x, w[k])
update(alpha, w[k], n[k], x, p, y[k])
loss += logloss(p, y[k]) # for progressive validation
loss += loss_y14 # the loss of y14, logloss is never zero
# print out progress, so that we know everything is working
if ID % 100000 == 0:
print(('%s\tencountered: %d\tcurrent logloss: %f' % (
datetime.now(), ID, (loss/33.)/ID)))
for ID, x, y in data(train, label):
# get predictions and train on all labels
for k in K:
p = predict(x, w[k])
update(alpha, w[k], n[k], x, p, y[k])
loss += logloss(p, y[k]) # for progressive validation
loss += loss_y14 # the loss of y14, logloss is never zero
# print out progress, so that we know everything is working
if ID % 100000 == 0:
print(('%s\tencountered: %d\tcurrent logloss: %f' % (
datetime.now(), ID, (loss/33.)/ID)))
with open(sub_dir+'./submissiontk7.csv', 'w') as outfile:
outfile.write('id_label,pred\n')
for ID, x in data(test):
for k in K:
p = predict(x, w[k])
outfile.write('%s_y%d,%s\n' % (ID, k+1, str(p)))
if k == 12:
outfile.write('%s_y14,0.0\n' % ID)
print(('Done, elapsed time: %s' % str(datetime.now() - start)))
| [
"[email protected]"
]
| |
f6d7ae62aec0c8ee20db1465a48e9ad487cd7662 | 6430f1d91ca5b7a5d4c19dac26f148e746166f65 | /evaluate.py | eb8fe7db2ce94130d64f14c25b53b9538424d8fd | []
| no_license | cp917/speech_enhancement | e855a3ba82d48f4579367e792b93d81d0faf3f40 | 815c7f3a8f78344d206a58d48ce49d7e4ba657d9 | refs/heads/master | 2020-12-09T10:37:40.157443 | 2019-09-17T03:27:34 | 2019-09-17T03:27:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,931 | py | """
Summary: Calculate PESQ and overal stats of enhanced speech.
Author: Qiuqiang Kong
Created: 2017.12.22
Modified: -
"""
import argparse
import os
import csv
import numpy as np
import cPickle
import soundfile
from pypesq import pypesq
from pystoi.stoi import stoi
from prepare_data import create_folder
#import matplotlib.pyplot as plt
def plot_training_stat(args):
"""Plot training and testing loss.
Args:
workspace: str, path of workspace.
tr_snr: float, training SNR.
bgn_iter: int, plot from bgn_iter
fin_iter: int, plot finish at fin_iter
interval_iter: int, interval of files.
"""
workspace = args.workspace
tr_snr = args.tr_snr
bgn_iter = args.bgn_iter
fin_iter = args.fin_iter
interval_iter = args.interval_iter
tr_losses, te_losses, iters = [], [], []
# Load stats.
stats_dir = os.path.join(workspace, "training_stats", "%ddb" % int(tr_snr))
for iter in xrange(bgn_iter, fin_iter, interval_iter):
stats_path = os.path.join(stats_dir, "%diters.p" % iter)
dict = cPickle.load(open(stats_path, 'rb'))
tr_losses.append(dict['tr_loss'])
te_losses.append(dict['te_loss'])
iters.append(dict['iter'])
# Plot
# line_tr, = plt.plot(tr_losses, c='b', label="Train")
# line_te, = plt.plot(te_losses, c='r', label="Test")
# plt.axis([0, len(iters), 0, max(tr_losses)])
# plt.xlabel("Iterations")
# plt.ylabel("Loss")
# plt.legend(handles=[line_tr, line_te])
# plt.xticks(np.arange(len(iters)), iters)
# plt.show()
def calculate_pesq(args):
"""Calculate PESQ of all enhaced speech.
Args:
workspace: str, path of workspace.
speech_dir: str, path of clean speech.
te_snr: float, testing SNR.
"""
# Remove already existed file.
data_type = args.data_type
speech_dir = "mini_data/test_speech"
f = "{0:<16} {1:<16} {2:<16}"
print(f.format("0", "Noise", "PESQ"))
f1 = open(data_type + '_pesq_results.csv', 'w')
f1.write("%s\t%s\n"%("audio_id", "PESQ"))
# Calculate PESQ of all enhaced speech.
if data_type=="DM":
enh_speech_dir = os.path.join("workspace", "enh_wavs", "test", "mixdb")
elif data_type=="IRM":
enh_speech_dir = os.path.join("workspace", "enh_wavs", "test", "mask_mixdb")
elif data_type=="CRN":
enh_speech_dir = os.path.join("workspace", "enh_wavs", "test", "crn_mixdb")
elif data_type=="PHASE":
enh_speech_dir = os.path.join("workspace", "enh_wavs", "test", "phase_spec_clean_mixdb")
elif data_type=="VOLUME":
enh_speech_dir = os.path.join("workspace", "enh_wavs", "test", "volume_mixdb")
elif data_type=="NOISE":
enh_speech_dir = os.path.join("workspace" ,'mixed_audios','spectrogram','test','mixdb')
names = os.listdir(enh_speech_dir)
for (cnt, na) in enumerate(names):
enh_path = os.path.join(enh_speech_dir, na)
enh_audio, fs = soundfile.read(enh_path)
speech_na = na.split('.')[0]
speech_path = os.path.join(speech_dir, "%s.WAV" % speech_na)
speech_audio, fs = soundfile.read(speech_path)
#alpha = 1. / np.max(np.abs(speech_audio))
#speech_audio *=alpha
pesq_ = pypesq(16000, speech_audio, enh_audio, 'wb')
print(f.format(cnt, na, pesq_))
f1.write("%s\t%f\n"%(na, pesq_))
# Call executable PESQ tool.
#cmd = ' '.join(["./pesq", speech_path, enh_path, "+16000"])
#os.system(cmd)
os.system("mv %s_pesq_results.csv ./pesq_result/%s_pesq_results.csv"%(data_type, data_type))
def get_stats(args):
"""Calculate stats of PESQ.
"""
data_type = args.data_type
pesq_path = "./pesq_result/"+ data_type+ "_pesq_results.csv"
with open(pesq_path, 'rb') as f:
reader = csv.reader(f, delimiter='\t')
lis = list(reader)
pesq_dict = {}
for i1 in xrange(1, len(lis) - 1):
li = lis[i1]
na = li[0]
pesq = float(li[1])
noise_type = na.split('.')[1]
if noise_type not in pesq_dict.keys():
pesq_dict[noise_type] = [pesq]
else:
pesq_dict[noise_type].append(pesq)
out_csv_path ='./pesq_result/'+ data_type +'_pesq_differentnoise.csv'
csv_file = open(out_csv_path, 'w')
avg_list, std_list = [], []
f = "{0:<16} {1:<16}"
print(f.format("Noise", "PESQ"))
csv_file.write("%s\t%s\n"%("Noise", "PESQ"))
print("---------------------------------")
csv_file.write("----------------\t-----------------\n")
for noise_type in pesq_dict.keys():
pesqs = pesq_dict[noise_type]
avg_pesq = np.mean(pesqs)
std_pesq = np.std(pesqs)
avg_list.append(avg_pesq)
std_list.append(std_pesq)
print(f.format(noise_type, "%.2f +- %.2f" % (avg_pesq, std_pesq)))
csv_file.write("%s\t%s\n"%(noise_type, "%.2f +- %.2f" % (avg_pesq, std_pesq)))
print("---------------------------------")
csv_file.write("----------------\t-----------------\n")
print(f.format("Avg.", "%.2f +- %.2f" % (np.mean(avg_list), np.mean(std_list))))
csv_file.write("%s\t%s\n"%("Avg.", "%.2f +- %.2f" % (np.mean(avg_list), np.mean(std_list))))
csv_file.close()
def get_snr_stats(args):
data_type = args.data_type
pesq_path = os.path.join("pesq_result", data_type + "_pesq_results.csv")
with open(pesq_path, 'rb') as f:
reader = csv.reader(f, delimiter='\t')
pesq_lis = list(reader)
pesq_lis[0].append("SNR")
pesq_title = pesq_lis[0]
pesq_lis = pesq_lis[:-1]
csv_path = os.path.join("workspace", "mixture_csvs", "test_1hour_even.csv")
with open(csv_path, 'rb') as f:
reader = csv.reader(f, delimiter='\t')
csv_lis = list(reader)
count = 0
for csv_name in csv_lis[1:]:
if data_type=="NOISE":
csv_na = csv_name[0].split(".")[0] + "." + csv_name[1].split(".")[0]+ "."+csv_name[-1] + "db.wav"
else:
csv_na = csv_name[0].split(".")[0] + "." + csv_name[1].split(".")[0]+ "."+csv_name[-1] + "db.enh.wav"
for pesq_name in pesq_lis[1:]:
if csv_na == pesq_name[0]:
count+=1
pesq_name.append(csv_name[-1])
break
pesq_dict = {}
for i1 in xrange(1, len(pesq_lis)):
li = pesq_lis[i1]
na = li[0]
pesq = float(li[1][0:4])
snr = float(li[-1])
snr_key = snr
if snr_key not in pesq_dict.keys():
pesq_dict[snr_key] = [pesq]
else:
pesq_dict[snr_key].append(pesq)
out_csv_path = os.path.join( "pesq_result", data_type + "_snr_results.csv")
create_folder(os.path.dirname(out_csv_path))
csv_file = open(out_csv_path, 'w')
avg_list, std_list = [], []
sample_sum = 0
f = "{0:<16} {1:<16} {2:<16}"
print(f.format("SNR", "PESQ", "SAMPLE_NUM"))
csv_file.write("%s\t%s\t%s\n"%("SNR", "PESQ", "SAMPLE_NUM"))
csv_file.flush()
print("---------------------------------")
for snr_type in sorted(pesq_dict.keys()):
pesqs = pesq_dict[snr_type]
sample_num = len(pesqs)
sample_sum+=sample_num
avg_pesq = np.mean(pesqs)
std_pesq = np.std(pesqs)
avg_list.append(avg_pesq)
std_list.append(std_pesq)
print(f.format(snr_type, "%.2f +- %.2f" % (avg_pesq, std_pesq), sample_num))
csv_file.write("%s\t%s\t%s\n"%(snr_type, "%.2f +- %.2f" % (avg_pesq, std_pesq), sample_num))
csv_file.flush()
print("---------------------------------")
print(f.format("Avg.", "%.2f +- %.2f" % (np.mean(avg_list), np.mean(std_list)), sample_sum))
csv_file.write("%s\t%s\t%s\n"%("Avg.", "%.2f +- %.2f" % (np.mean(avg_list), np.mean(std_list)), "%d"%sample_sum))
csv_file.close()
def calculate_stoi(args):
workspace = "workspace"
speech_dir = "mini_data/test_speech"
# Calculate PESQ of all enhaced speech.
enh_speech_dir = os.path.join(workspace, "enh_wavs", "test", "mixdb")
#enh_speech_dir = "/data00/wangjinchao/sednn-master/mixture2clean_dnn/workspace/mixed_audios/spectrogram/test/mixdb"
# enh_speech_dir = os.path.join(workspace ,'mixed_audios','spectrogram','test','mixdb')
names = os.listdir(enh_speech_dir)
f = open("IRM_stoi.txt", "w")
f.write("%s\t%s\n"%("speech_id", "stoi"))
f.flush()
for (cnt, na) in enumerate(names):
print(cnt, na)
enh_path = os.path.join(enh_speech_dir, na)
speech_na = na.split('.')[0]
speech_path = os.path.join(speech_dir, "%s.WAV" % speech_na)
speech_audio, fs = read_audio(speech_path, 16000)
enhance_audio, fs = read_audio(enh_path, 16000)
if len(speech_audio)>len(enhance_audio):
speech_audio = speech_audio[:len(enhance_audio)]
else:
enhance_audio = enhance_audio[:len(speech_audio)]
stoi_value = stoi(speech_audio, enhance_audio, fs, extended = False)
f.write("%s\t%f\n"%(na, stoi_value))
f.flush()
f.close()
def get_stoi_stats(args):
stoi_path = "./stoi_result/IRM_stoi.txt"
with open(stoi_path, 'rb') as f:
reader = csv.reader(f, delimiter='\t')
lis = list(reader)
stoi_dict = {}
for i1 in xrange(1, len(lis) - 1):
li = lis[i1]
na = li[0]
stoi = float(li[1])
noise_type = na.split('.')[1]
if noise_type not in stoi_dict.keys():
stoi_dict[noise_type] = [stoi]
else:
stoi_dict[noise_type].append(stoi)
#out_csv_path ='./stoi_result/gvdm_enhance.csv'
#csv_file = open(out_csv_path, 'w')
avg_list, std_list = [], []
f = "{0:<16} {1:<16}"
print(f.format("Noise", "STOI"))
#csv_file.write("%s\t%s\n"%("Noise", "stoi"))
print("---------------------------------")
#csv_file.write("----------------\t-----------------\n")
for noise_type in stoi_dict.keys():
stois = stoi_dict[noise_type]
avg_stoi = np.mean(stois)
std_stoi = np.std(stois)
avg_list.append(avg_stoi)
std_list.append(std_stoi)
print(f.format(noise_type, "%.5f +- %.5f" % (avg_stoi, std_stoi)))
#csv_file.write("%s\t%s\n"%(noise_type, "%.2f +- %.2f" % (avg_stoi, std_stoi)))
print("---------------------------------")
#csv_file.write("----------------\t-----------------\n")
print(f.format("Avg.", "%.2f +- %.2f" % (np.mean(avg_list), np.mean(std_list))))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='mode')
parser_plot_training_stat = subparsers.add_parser('plot_training_stat')
parser_plot_training_stat.add_argument('--workspace', type=str, required=True)
parser_plot_training_stat.add_argument('--tr_snr', type=float, required=True)
parser_plot_training_stat.add_argument('--bgn_iter', type=int, required=True)
parser_plot_training_stat.add_argument('--fin_iter', type=int, required=True)
parser_plot_training_stat.add_argument('--interval_iter', type=int, required=True)
parser_calculate_pesq = subparsers.add_parser('calculate_pesq')
parser_calculate_pesq.add_argument('--data_type', type=str, required=True)
parser_get_stats = subparsers.add_parser('get_stats')
parser_get_stats.add_argument('--data_type', type=str, required=True)
parser_get_snr_stats = subparsers.add_parser('get_snr_stats')
parser_get_snr_stats.add_argument('--data_type', type=str, required=True)
args = parser.parse_args()
if args.mode == 'plot_training_stat':
plot_training_stat(args)
elif args.mode == 'calculate_pesq':
calculate_pesq(args)
elif args.mode == 'get_stats':
get_stats(args)
elif args.mode == 'get_snr_stats':
get_snr_stats(args)
else:
raise Exception("Error!")
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.