blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9cc60c5abdd36edbd7a873ba397ed2815867ad34
|
66cff6c4ad4c5fd6ecdfb723614f0475e27a5b38
|
/akshare/air/air_hebei.py
|
7954e6d79e7f2c969e9da72997e8aedbf6ef83fa
|
[
"MIT"
] |
permissive
|
ifzz/akshare
|
a862501b314f2b5aeab22af86771dbeee34cfdb8
|
70cf20680b580c8bacab55a0b7d792d06e299628
|
refs/heads/master
| 2022-12-02T18:36:33.754645 | 2020-08-24T05:16:42 | 2020-08-24T05:16:42 | 289,834,570 | 1 | 0 |
MIT
| 2020-08-24T05:17:09 | 2020-08-24T05:17:09 | null |
UTF-8
|
Python
| false | false | 3,461 |
py
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/4/29 12:33
Desc: 河北省空气质量预报信息发布系统
http://110.249.223.67/publish/
每日 17 时发布
等级划分
1. 空气污染指数为0-50,空气质量级别为一级,空气质量状况属于优。此时,空气质量令人满意,基本无空气污染,各类人群可正常活动。
2. 空气污染指数为51-100,空气质量级别为二级,空气质量状况属于良。此时空气质量可接受,但某些污染物可能对极少数异常敏感人群健康有较弱影响,建议极少数异常敏感人群应减少户外活动。
3. 空气污染指数为101-150,空气质量级别为三级,空气质量状况属于轻度污染。此时,易感人群症状有轻度加剧,健康人群出现刺激症状。建议儿童、老年人及心脏病、呼吸系统疾病患者应减少长时间、高强度的户外锻炼。
4. 空气污染指数为151-200,空气质量级别为四级,空气质量状况属于中度污染。此时,进一步加剧易感人群症状,可能对健康人群心脏、呼吸系统有影响,建议疾病患者避免长时间、高强度的户外锻练,一般人群适量减少户外运动。
5. 空气污染指数为201-300,空气质量级别为五级,空气质量状况属于重度污染。此时,心脏病和肺病患者症状显著加剧,运动耐受力降低,健康人群普遍出现症状,建议儿童、老年人和心脏病、肺病患者应停留在室内,停止户外运动,一般人群减少户外运动。
6. 空气污染指数大于300,空气质量级别为六级,空气质量状况属于严重污染。此时,健康人群运动耐受力降低,有明显强烈症状,提前出现某些疾病,建议儿童、老年人和病人应当留在室内,避免体力消耗,一般人群应避免户外活动。
发布单位:河北省环境应急与重污染天气预警中心 技术支持:中国科学院大气物理研究所 中科三清科技有限公司
"""
from datetime import datetime
import pandas as pd
import requests
def air_quality_hebei(city: str = "唐山市") -> pd.DataFrame:
"""
河北省空气质量预报信息发布系统-空气质量预报, 未来 6 天
http://110.249.223.67/publish/
:param city: ['石家庄市', '唐山市', '秦皇岛市', '邯郸市', '邢台市', '保定市', '张家口市', '承德市', '沧州市', '廊坊市', '衡水市', '辛集市', '定州市']
:type city: str
:return: city = "", 返回所有地区的数据; city="唐山市", 返回唐山市的数据
:rtype: pandas.DataFrame
"""
url = "http://110.249.223.67/publishNewServer/api/CityPublishInfo/GetProvinceAndCityPublishData"
params = {
"publishDate": f"{datetime.today().strftime('%Y-%m-%d')} 16:00:00"
}
r = requests.get(url, params=params)
json_data = r.json()
city_list = pd.DataFrame.from_dict(json_data["cityPublishDatas"], orient="columns")["CityName"].tolist()
outer_df = pd.DataFrame()
for i in range(1, 7):
inner_df = pd.DataFrame([item[f"Date{i}"] for item in json_data["cityPublishDatas"]], index=city_list)
outer_df = outer_df.append(inner_df)
if city == "":
return outer_df
else:
return outer_df[outer_df.index == city]
if __name__ == "__main__":
air_quality_hebei_df = air_quality_hebei(city="石家庄市")
print(air_quality_hebei_df)
|
[
"[email protected]"
] | |
62c23bc35e09fd885d7dd599ac35f30f777a5148
|
4c19eac6e53b2c1230257508370ad60c8d83d6a7
|
/dxm/lib/DxAlgorithm/alg_worker.py
|
75fcc4c87123d87ea14ca078b5a002fd729f7811
|
[
"Apache-2.0"
] |
permissive
|
rakesh-roshan/dxm-toolkit
|
2c7741c8a02952de1c23715eadb515d84fcaf954
|
2c6e6ebf8615526501767844edf06fb74d878f25
|
refs/heads/master
| 2020-04-27T19:05:11.293818 | 2019-03-01T13:49:34 | 2019-03-01T13:49:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,246 |
py
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
# Author : Marcin Przepiorowski
# Date : April 2018
from dxm.lib.DxEngine.DxMaskingEngine import DxMaskingEngine
import logging
from dxm.lib.DxLogging import print_error
from dxm.lib.DxLogging import print_message
from dxm.lib.Output.DataFormatter import DataFormatter
from dxm.lib.DxTools.DxTools import get_list_of_engines
from dxm.lib.DxAlgorithm.DxAlgorithmList import DxAlgorithmList
from dxm.lib.DxAlgorithm.DxAlgorithm import DxAlgorithm
from dxm.lib.DxDomain.DxDomainList import DxDomainList
import sys
def algorithm_list(p_engine, format, algname):
"""
Print list of algorithms
param1: p_engine: engine name from configuration
param2: format: output format
param2: algname: algname name to list, all if None
return 0 if algname found
"""
ret = 0
data = DataFormatter()
data_header = [
("Engine name", 30),
("Algorithm name", 30),
("Domain name", 32),
("Syncable", 9),
("Algorithm type", 30),
]
data.create_header(data_header)
data.format_type = format
enginelist = get_list_of_engines(p_engine)
if enginelist is None:
return 1
for engine_tuple in enginelist:
engine_obj = DxMaskingEngine(engine_tuple[0], engine_tuple[1],
engine_tuple[2], engine_tuple[3])
if engine_obj.get_session():
continue
domainlist = DxDomainList()
domainlist.LoadDomains()
alglist = DxAlgorithmList()
alglist.LoadAlgorithms()
algref_list = []
if algname:
algobj = alglist.get_by_ref(algname)
if algobj:
algref_list.append(algobj.algorithm_name)
else:
algref_list = alglist.get_allref()
for algref in algref_list:
algobj = alglist.get_by_ref(algref)
if algobj.sync:
syncable = 'Y'
else:
syncable = 'N'
data.data_insert(
engine_tuple[0],
algobj.algorithm_name,
algobj.domain_name,
syncable,
algobj.algorithm_type
)
#algobj.export()
print("")
print (data.data_output(False))
print("")
return ret
def algorithm_worker(p_engine, algname, **kwargs):
"""
Select an algorithm and run action on it
param1: p_engine: engine name from configuration
param2: algname: algorithm name
kwargs: parameters to pass including function name to call
return 0 if algname found
"""
ret = 0
function_to_call = kwargs.get('function_to_call')
enginelist = get_list_of_engines(p_engine)
if enginelist is None:
return 1
for engine_tuple in enginelist:
engine_obj = DxMaskingEngine(engine_tuple[0], engine_tuple[1],
engine_tuple[2], engine_tuple[3])
if engine_obj.get_session():
continue
domainlist = DxDomainList()
domainlist.LoadDomains()
alglist = DxAlgorithmList()
algref_list = []
algobj = alglist.get_by_ref(algname)
if algobj is None:
ret = ret + 1
continue
dynfunc = globals()[function_to_call]
if dynfunc(algobj=algobj, engine_obj=engine_obj, **kwargs):
ret = ret + 1
return ret
def algorithm_export(p_engine, algname, outputfile):
"""
Save algorithm to file
param1: p_engine: engine name from configuration
param2: algname: algname name to export
param3: outputfile: output file
return 0 if OK
"""
return algorithm_worker(p_engine, algname, outputfile=outputfile,
function_to_call='do_export')
def do_export(**kwargs):
algobj = kwargs.get('algobj')
algobj.export()
def algorithm_import(p_engine, inputfile):
"""
Load algorithm from file
param1: p_engine: engine name from configuration
param2: inputfile: input file
return 0 if OK
"""
ret = 0
enginelist = get_list_of_engines(p_engine)
if enginelist is None:
return 1
for engine_tuple in enginelist:
engine_obj = DxMaskingEngine(engine_tuple[0], engine_tuple[1],
engine_tuple[2], engine_tuple[3])
if engine_obj.get_session():
continue
algobj = DxAlgorithm(engine_obj)
algobj.importalg(None)
|
[
"[email protected]"
] | |
44074f6a7dc371ac0f50ed51f5d05b5c89a93a7e
|
981fbc25f4a8ef0695830d54c36e0e7c2087575c
|
/input_template.py
|
3ebeae5ee3a6f7dbad4f1574bf6d0f216b007231
|
[] |
no_license
|
Sandy4321/CS_algorithm_scripts
|
1b0984c25aab362c18767094f6c6252afd8b9f6b
|
6eef6ac07ff07362ddaec850a47d7ad7053993b2
|
refs/heads/master
| 2021-01-15T10:07:18.940108 | 2015-06-08T23:27:25 | 2015-06-08T23:27:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 415 |
py
|
def solveMeSecond(a,b):
return a+b
n = int(raw_input()) #faster than n = input() , since input() executes the line as python command
for i in range(0,n):
a, b = raw_input().split()
a,b = int(a),int(b)
res = solveMeSecond(a,b)
print res
'''
Alternate code
n = int(raw_input())
for _ in range(n):
a,b = map(int,raw_input().split())
res = solveMeSecond(a,b)
print res
'''
|
[
"[email protected]"
] | |
e310d84ef134fa90d02ddbcb43eb4159e92125c2
|
7d4597b6f9b631dd1f91059a4d904d2847e29a9c
|
/offerSpider/spiders/saveon.py
|
b9e4eb0faa58041584990acba2c7d8d25a7d856e
|
[] |
no_license
|
lychlov/offerSpider
|
6efc1b47e235902252ad0534f916d7f0baa49d00
|
8559ae3c65538d365aa11598d1070a4eadc82a1f
|
refs/heads/master
| 2020-03-23T14:42:41.796002 | 2019-01-24T03:20:51 | 2019-01-24T03:20:51 | 141,694,389 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,760 |
py
|
# # -*- coding: utf-8 -*-
# import re
#
# import requests
# import scrapy
# from bs4 import BeautifulSoup
#
# from offerSpider.util import get_header
# from offerSpider.items import CouponItem
#
#
# class SaveonSpider(scrapy.Spider):
# name = 'saveon'
# allowed_domains = ['saveoncannabis.com']
# start_urls = ['https://www.saveoncannabis.com/stores']
# page_url = 'https://www.saveoncannabis.com/stores/%s/'
#
# def parse(self, response):
# html = response.body
# soup = BeautifulSoup(html, 'lxml')
# if not re.findall(r'/stores/(.+?)/', response.url):
# max_page = int(soup.find('ul', class_='page-numbers').find('a').text)
# for i in range(2, max_page + 1):
# yield scrapy.Request(url=self.page_url % i, callback=self.parse)
# stores = soup.find_all('div', class_='store-logo')
# for store in stores:
# link = store.find('a').get('href')
# yield scrapy.Request(url=link, callback=self.store_parse)
# pass
#
# def store_parse(self, response):
# html = response.body
# soup = BeautifulSoup(html, 'lxml')
# main_coupon_info = soup.find('div', class_='store-offer-featured')
# if main_coupon_info:
# main_coupon = CouponItem()
# main_coupon['type'] = 'coupon'
# main_coupon['name'] = main_coupon_info.find('h2').text.strip()
# main_coupon['site'] = 'saveoncannabis.com'
# main_coupon['description'] = ''
# main_coupon['verify'] = True
# main_coupon['link'] = ''
# main_coupon['expire_at'] = main_coupon_info.find('div',class_='deal-countdown-info').text.strip().replace('Expires in: ','')
#
# main_coupon['coupon_type'] = 'CODE'
#
# main_coupon['code'] = ''
# main_coupon['final_website'] = ''
# main_coupon['store'] = ''
# main_coupon['store_url_name'] = ''
# main_coupon['store_description'] = ''
# main_coupon['store_category'] = ''
# main_coupon['store_website'] = ''
# main_coupon['store_country'] = ''
# main_coupon['store_picture'] = ''
# main_coupon['created_at'] = ''
# main_coupon['status'] = ''
# main_coupon['depth'] = ''
# main_coupon['download_timeout'] = ''
# main_coupon['download_slot'] = ''
# main_coupon['download_latency'] = ''
# yield main_coupon
#
# coupon_infos = soup.find('div', class_='coupons-other').find_all('div', class_='white-block')
# if coupon_infos:
# for coupon_info in coupon_infos:
# coupon = CouponItem()
# coupon['type'] = 'coupon'
# coupon['name'] = ''
# coupon['site'] = ''
# coupon['description'] = ''
# coupon['verify'] = ''
# coupon['link'] = ''
# coupon['expire_at'] = ''
# coupon['coupon_type'] = ''
# coupon['code'] = ''
# coupon['final_website'] = ''
# coupon['store'] = ''
# coupon['store_url_name'] = ''
# coupon['store_description'] = ''
# coupon['store_category'] = ''
# coupon['store_website'] = ''
# coupon['store_country'] = ''
# coupon['store_picture'] = ''
# coupon['created_at'] = ''
# coupon['status'] = ''
# coupon['depth'] = ''
# coupon['download_timeout'] = ''
# coupon['download_slot'] = ''
# coupon['download_latency'] = ''
# yield coupon
# pass
#
#
# def get_domain_url(long_url):
# domain = re.findall(r'^(http[s]?://.+?)[/?]', long_url + '/')
# return domain[0] if domain else None
#
#
# def get_real_url(url, try_count=1):
# if try_count > 3:
# return url
# try:
# rs = requests.get(url, headers=get_header(), timeout=10, verify=False)
# if rs.status_code > 400 and get_domain_url(rs.url) == 'www.offers.com':
# return get_real_url(url, try_count + 1)
# if get_domain_url(rs.url) == get_domain_url(url):
# target_url = re.findall(r'replace\(\'(.+?)\'', rs.content.decode())
# if target_url:
# return target_url[0].replace('\\', '') if re.match(r'http', target_url[0]) else rs.url
# else:
# return rs.url
# else:
# return get_real_url(rs.url)
# except Exception as e:
# print(e)
# return get_real_url(url, try_count + 1)
|
[
"[email protected]"
] | |
3655a1d7009c58072673e92b9dcc169dbed6d245
|
bcbcd360967d9f79ef542ead5b30de42ec61b2d3
|
/code_v1_recovered/Unigrams/top100LinksPerCom.py
|
4a2b7812a4374ffdf8f5fa87ecf736bcdf22e711
|
[] |
no_license
|
Roja-B/EvolvingComs
|
d00b30576e6b8977ce1be0c6317155bfeb711806
|
b58fa29972d9aad095ed0f364b1e0ec876b9b6c5
|
refs/heads/master
| 2020-04-14T18:30:48.657243 | 2013-02-11T05:54:16 | 2013-02-11T05:54:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,233 |
py
|
import operator
import sys
from noLow import *
# this program produces the list of top 100 links per community based on the Chi-squared table for each time window
#PATH = raw_input('Enter data path: ')
#M = int(raw_input('Enter the number of communities: '))
#tablefilename = raw_input("Enter file name: ")
pathfile = open("PATHSplusCOMS","r")
tablefilename = "Chi2.txt"
for line in pathfile:
line = line.strip()
L = line.split("\t")
PATH = L[0]+"/RelevantLinks"
M = int(L[1])
f = open(PATH+'/'+tablefilename,"r")
Communities= []
#for each community we need a hash table
for i in range(M):
Communities.append(dict())
for line in f:
link = line.split('\t')[0]
for i in range(0,M):
count = float(line.split('\t')[i+1])
Communities[i][link] = count
for i in range(0,M):
sorted_com = sorted(Communities[i].iteritems(), key=operator.itemgetter(1),reverse=True)
t = open(PATH+"/NoLowtop50Links"+str(i),"w")
length = len(sorted_com)
count = 0
for j in range(length)):
if linkvotes[sorted_com[j][0]] < 10 : continue
t.write("link "+sorted_com[j][0]+' '+str(sorted_com[j][1])+'\n')
count +=1
if count == 50: break
t.close()
f.close()
pathfile.close()
|
[
"[email protected]"
] | |
61eface07e2a27ae86d3c33097cb278cffe65e4f
|
a6d45b7b0caccc92dd7b0d2cc352498a32f5a181
|
/uploader/migrations/0001_initial.py
|
52eaec7d149d4ac8deb876b1956156002064a661
|
[] |
no_license
|
suhailvs/djangofileupload
|
e149e27b085f18f69c61074039e08a9c74283ca2
|
40b73cdf5c50bd44a4956ec70cf52d4c358f58c2
|
refs/heads/master
| 2023-03-23T17:34:53.077721 | 2020-04-20T16:09:29 | 2020-04-20T16:09:29 | 20,531,971 | 9 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 565 |
py
|
# Generated by Django 3.0.5 on 2020-04-20 15:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Upload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('upload_file', models.FileField(upload_to='')),
('upload_date', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"[email protected]"
] | |
7e2974f9de7a5d5e34105cf131643c825f8338db
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02991/s030157837.py
|
6e3b67de9db4e8ee071c1c288612c95cbf324ab6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 758 |
py
|
import sys
input = sys.stdin.buffer.readline
from collections import deque
def main():
N,M = map(int,input().split())
edge =[[] for _ in range(N)]
for _ in range(M):
u,v = map(int,input().split())
edge[u-1].append(v-1)
S,T = map(int,input().split())
q = deque()
go = [[False for _ in range(3)] for _ in range(N)]
q.append((S-1,0,1))
while q:
now,step,d = q.popleft()
if step == 3:
if now == T-1:
print(d)
exit()
step = 0
d += 1
if go[now][step]:
continue
go[now][step] = True
for fol in edge[now]:
q.append((fol,step+1,d))
print(-1)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
996339b2d5f97720cb4f6779affdae2d70fef420
|
d8cbc94a4207337d709a64447acb9c8fe501c75a
|
/subset_selection/code/cli.py
|
54738e4db5034a5f1e4316b6792e9c41b4e53b4e
|
[
"MIT"
] |
permissive
|
sripathisridhar/acav100m
|
6f672384fa723a637d94accbbe11a9a962f5f87f
|
13b438b6ce46d09ba6f79aebb84ad31dfa3a8e6f
|
refs/heads/master
| 2023-09-06T01:05:21.188822 | 2021-11-18T08:08:08 | 2021-11-18T08:08:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,111 |
py
|
import time
import datetime
from pathlib import Path
import fire
from args import get_args
from run import run_single
from run_contrastive import run_single_contrastive
from chunk import run_chunks, reduce_all_pkls
from chunk_contrastive import run_chunks_contrastive
from save import merge_all_csvs
from merge_contrastive import merge_contrastive
from tests import compare_measures
class Cli:
def prepare(self, **kwargs):
args = get_args(**kwargs)
if 'out_path' in kwargs:
args.data.output.path = Path(kwargs['out_path'])
opath = args.data.output.path
if opath.stem == opath.name:
# potential dir
opath = opath / 'output.csv'
opath.parent.mkdir(parents=True, exist_ok=True)
args.data.output.path = opath
if 'shards_path' in kwargs:
args.data.path = Path(kwargs['shards_path'])
if 'meta_path' in kwargs:
args.data.meta.path = Path(kwargs['meta_path'])
mpath = args.data.meta.path
if mpath is None:
# use shard directory
mpath = args.data.path.parent
if not mpath.is_dir() and mpath.parent.is_dir():
mpath = mpath.parent
args.data.meta.path = mpath
return args
def run(self, **kwargs):
start = time.time()
args = self.prepare(**kwargs)
run(args)
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print('done. total time elasped: {}'.format(elasped))
def reduce_csvs(self, **kwargs):
start = time.time()
args = self.prepare(**kwargs)
merge_all_csvs(args)
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print('done. total time elasped: {}'.format(elasped))
def reduce_pkls(self, **kwargs):
start = time.time()
args = self.prepare(**kwargs)
reduce_all_pkls(args)
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print('done. total time elasped: {}'.format(elasped))
def reduce(self, **kwargs):
start = time.time()
args = self.prepare(**kwargs)
if args.save_cache_as_csvs:
merge_all_csvs(args)
else:
reduce_all_pkls(args)
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print('done. total time elasped: {}'.format(elasped))
def compare_measures(self, **kwargs):
args = self.prepare(**kwargs)
compare_measures(args)
print('done')
def merge_contrastive(self, **kwargs):
args = self.prepare(**kwargs)
merge_contrastive(args)
def run(args):
if args.measure_name == 'contrastive':
if args.chunk_size is None:
run_single_contrastive(args)
else:
run_chunks_contrastive(args)
else:
if args.chunk_size is None:
run_single(args)
else:
run_chunks(args)
if __name__ == '__main__':
fire.Fire(Cli)
|
[
"[email protected]"
] | |
9775bc6bd071f66fbb05d218a99381b23510f116
|
be73248aa4f1171e81b65cf955c4bd6110d56095
|
/request_test.py
|
353ec800d3b9bd9c0e3797743ad8a33355ced72f
|
[] |
no_license
|
rogerhoward/lambot
|
781c158e58bd71e2f3eb480aab31f181aee55e62
|
d5588041fc92b779ba88479d8657f9b8a4916692
|
refs/heads/development
| 2022-02-18T05:03:23.911978 | 2017-06-22T03:22:11 | 2017-06-22T03:22:11 | 86,493,856 | 1 | 1 | null | 2022-02-04T15:04:55 | 2017-03-28T18:30:43 |
Python
|
UTF-8
|
Python
| false | false | 2,137 |
py
|
#!/usr/bin/env python
import os
import requests
from pprint import pprint
import click
@click.command()
@click.option('--token', default='gIkuvaNzQIHg97ATvDxqgjtO', help='Slack API token.')
@click.option('--team_id', default='T0001', help='The unique Slack team ID')
@click.option('--team_domain', default='example', help='The unique Slack domain')
@click.option('--channel_id', default='C2147483705', help='The unique ID of the channel where this command originated')
@click.option('--channel_name', default='bot', help='The name of the channel where this command originated')
@click.option('--user_id', default='U2147483697', help='The unique ID of the user who sent this command')
@click.option('--user_name', default='rogerhoward', help='The username of the user who sent this command.')
@click.option('--command', default='/lambot', help='The slash command name')
@click.option('--text', default='calendar', help='All text that followed the slash command - generally options and modifiers')
@click.option('--response_url', default='http://0.0.0.0:5000/test/response', help='The URL where to POST the response(s) - up to five responses may be POSTed to this Webhook')
@click.option('--url', default='http://0.0.0.0:5000/', help='The URL where to POST the initial Slack command payload')
def run(token, team_id, team_domain, channel_id, channel_name, user_id, user_name, command, text, response_url, url ):
"""
Simulates the Slack client by posting a standard Slack payload to the bot endpoint. The URL of the endpoint as well as all values in the payload can be overriden using command line options. The payload format is documented at https://api.slack.com/slash-commands#triggering_a_command
"""
data = {'token': token,
'team_id': team_id,
'team_domain': team_domain,
'channel_id': channel_id,
'channel_name': channel_name,
'user_id': user_id,
'user_name': user_name,
'command': command,
'text': text,
'response_url': response_url}
requests.post(url, data=data)
if __name__ == '__main__':
run()
|
[
"[email protected]"
] | |
c2cd0da87716a6c9fe21cade4cc83fb2007f479d
|
ebc7607785e8bcd6825df9e8daccd38adc26ba7b
|
/python/baekjoon/2.algorithm/brute_force/백준_감소하는_수.py
|
b4b9f0e4b6dd253325d331cce5183803d908e65f
|
[] |
no_license
|
galid1/Algorithm
|
18d1b72b0d5225f99b193e8892d8b513a853d53a
|
5bd69e73332f4dd61656ccdecd59c40a2fedb4b2
|
refs/heads/master
| 2022-02-12T07:38:14.032073 | 2022-02-05T08:34:46 | 2022-02-05T08:34:46 | 179,923,655 | 3 | 0 | null | 2019-06-14T07:18:14 | 2019-04-07T05:49:06 |
Python
|
UTF-8
|
Python
| false | false | 571 |
py
|
import sys
def dfs(cur_num, limit):
global answer, idx, n, answers
# 재귀 종료
if len(cur_num) == limit:
idx += 1
answers.append(cur_num)
# 정답이 존재
if idx == n:
print(cur_num)
sys.exit()
return
if not cur_num:
for i in range(10):
dfs(str(i), limit)
else:
for j in range(int(cur_num[-1])):
dfs(cur_num + str(j), limit)
answer, idx = 0, -1
answers = []
n = int(sys.stdin.readline())
for i in range(1, 11):
dfs('', i)
print(-1)
|
[
"[email protected]"
] | |
2f06ed76fa47c4244dbaeecb75147c3f68f79bde
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/29/usersdata/67/9081/submittedfiles/atividade.py
|
a1a123328be3fb4dd0cf7fd77b88a631cf61ee74
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 213 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
n=int(input("Digite o valor de n:"))
contador=0
i=1
while (i<=n):
if n//10=!0:
contador=contador+1
i=i+1
print(contador)
|
[
"[email protected]"
] | |
0f2f7ee10782ae1ea20dac49abf367a2909b2920
|
7578f8752ea9693c9b2bcca1b4f4bddb74ea4c4b
|
/projector/projections.py
|
bb0223ddd257c754cf518486cd794b58e3a14024
|
[
"MIT"
] |
permissive
|
SixiemeEtage/projector
|
5ade66f8932c5905619518b6df4cf6fc460bd040
|
6d6b2488322556b1cd71eafc7d784787aca331bd
|
refs/heads/master
| 2021-01-19T08:48:41.375749 | 2019-03-17T13:52:06 | 2019-03-17T14:06:54 | 81,648,850 | 6 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 906 |
py
|
import libprojector
PROJECTION_EQUIRECTANGULAR = 'equirectangular'
PROJECTION_CUBEMAP = 'cubemap'
class BaseProj(object):
def __init__(self, image_width, options):
self.image_width = image_width
self.options = options
def get_projection(self):
raise NotImplementedError
class EquirectangularProj(BaseProj):
def get_projection(self):
width = int(self.image_width)
height = int(self.image_width / 2)
return libprojector.SphericalProjection(width, height)
class CubemapProj(BaseProj):
def get_projection(self):
side_width = int(self.image_width / 6)
border_padding = self.options.get('border_padding', 0)
return libprojector.CubemapProjection(side_width, border_padding)
PROJECTION_CLASSES = dict((
(PROJECTION_EQUIRECTANGULAR, EquirectangularProj),
(PROJECTION_CUBEMAP, CubemapProj),
))
|
[
"[email protected]"
] | |
2993ce92666d43ec9e6a520bf4027609ca676413
|
221e3afe0ef457c088d9c7725b5a1cc70d77b16e
|
/base/migrations/0002_remove_category_content.py
|
418f3ac3266d248bb9952513d02122a2b10c217b
|
[] |
no_license
|
Rockstreet/titov_base
|
6615087518b33635da6fec4d73716670c0b25d5a
|
612d842c423ffc3754e90a463029e9415aacb318
|
refs/heads/master
| 2021-01-19T05:22:06.940949 | 2017-04-12T16:09:06 | 2017-04-12T16:09:06 | 87,428,498 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 380 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-07 09:52
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='content',
),
]
|
[
"[email protected]"
] | |
2d32855077a8fd0594875c11d0c248fa27e1c3d9
|
df24807455a5bc4db794d79cc88e6bde93d3d404
|
/HH_glycopeptide - KK testing v2/sequencespace.py
|
e7d7bfc3a84a3b32c1db46ef3e02d0eb112fb0cd
|
[] |
no_license
|
GlycReSoft2/glycopeptide-testing
|
075b594025c95a9c9cfb79fcf802bd326459238f
|
574bc5b44ef8a562e2676aca24062b04f4bfeb17
|
refs/heads/master
| 2021-01-23T11:49:35.306116 | 2014-05-22T17:33:19 | 2014-05-22T17:33:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,178 |
py
|
from sequence import Sequence
from operator import and_
from functools import reduce
from modification import Modification
from residue import Residue
import copy
import itertools
import warnings
class SequenceSpace:
"""Generate all theoretical glycopeptide sequences"""
def __init__(self, seq, glycan_compo, glycan_sites, mod_list):
"""
seq -- sequence code
glycan_compo -- glycan compositions, dict.
glycan_sites -- sets of candidate sites for glycosylation
mod_list -- list of modifications.
"""
# Filter the glycan composition. Get the max number of HexNAc
self.seq = Sequence(seq) # Sequence object
self.glycan_composition = glycan_compo
self.candidate_sites = glycan_sites
self.modifications = mod_list
def getTheoreticalSequence(self, num_sites):
"""
Get theoretical sequence tailored for fragmenation
max_sites -- the number of maximum glycolsylation sites.
-1 means unlimited.
"""
#raw_seq = self.seq
seq_space = []
occupied_sites = []
#exploreSequence(mod_set, 0, raw_seq, occupied_sites, seq_space)
n = len(self.modifications)
ix_bound = []
## Get the candidate sites for all modification
for mod in self.modifications:
if mod.position != -1: # The position specified.
ix_bound.append((mod.position,)) # One element tuple
elif mod.target!= '': # The target specified.
ix_list = [ix for ix in range(self.seq.length) if self.seq.at(ix)[0].name == mod.target]
## temp_list has format like [(1,2,3), (2,3,4)]
temp_list = [ix for ix in itertools.combinations(ix_list, mod.number)]
ix_bound.append(temp_list)
else:
raise Exception('Unqualified modification!')
## Initialize the choice index for each modification type.
indices = [0] * n
while True:
if n != 0:
for i in reversed(range(n)):
## If not achiving the last choice of current index
if indices[i] != len(ix_bound[i]): # Within boundary, just out of the loop
break
else: # Out of boundary, reset the index.
indices[i] = 0
if i > 0:
indices[i-1] += 1
else:
return seq_space
## Check if current indecies are qualifed.
ix_sites = [ix_bound[ss][indices[ss]] for ss in range(n)]
else:
ix_sites = []
common_sites = set().union(*ix_sites)
glyco_sites = set(self.candidate_sites).difference(common_sites)
#glyco_num = glyco_compo['HexNAc']
if len(common_sites) != sum(map(len,ix_sites)) | (num_sites > len(glyco_sites)): # Invalid config.
indices[i] += 1
continue
raw_seq = copy.deepcopy(self.seq)
for x in range(n):
for mod_site in ix_bound[x][indices[x]]:
raw_seq.addModification(mod_site, self.modifications[x].name)
## Get available glycosylation sites.
#upper_limit = (min(max_sites, len(glyco_sites)) if max_sites > 0 else len(glyco_sites))
#for m in range(1, upper_limit+1):
for sites in itertools.combinations(glyco_sites, num_sites):
temp_seq = copy.deepcopy(raw_seq)
# Append HexNAc to the corresponding sites.
for site in sites:
gly_mod = Modification("HexNAc", site, 1, Residue("HexNAc").mass, 'Asn')
temp_seq.appendModification(gly_mod)
seq_space.append(temp_seq)
if n == 0:
return seq_space
# Only increase the last index.
indices[-1] += 1
|
[
"[email protected]"
] | |
57b2cd00a87e389e7a38f77e87aeadee7dc8413d
|
a0a0932b6ab6ec47c2757d8929216790f5bc6535
|
/import_productitem.py
|
7c614f08aadb009ebc8072d22b30f9530d115aa9
|
[] |
no_license
|
lianglunzhong/latte-erp
|
b4e6e3b13c4bce17911ff166fecc36172e0bea5b
|
b58936c8d9917f3efdcb3585c54bfd3aba4723c2
|
refs/heads/master
| 2022-11-27T03:08:23.780124 | 2017-04-28T02:51:43 | 2017-04-28T02:51:43 | 89,660,834 | 0 | 0 | null | 2022-11-22T01:04:12 | 2017-04-28T02:48:50 |
Python
|
UTF-8
|
Python
| false | false | 3,751 |
py
|
# -*- coding: utf-8 -*-
import datetime
from django.utils import timezone
import sys, os
reload(sys)
sys.setdefaultencoding('utf-8')
import csv
sys.path.append(os.getcwd())
os.environ['DJANGO_SETTINGS_MODULE'] = 'project.settings'
import django
django.setup()
from product.models import *
from order.models import *
# 根据产品和产品属性生成属性产品
products = Product.objects.all().order_by('id')
# products = Product.objects.filter(id=5393)
for p in products:
# print 'cate',p.category_id,p.description
category = Category.objects.get(pk=p.category_id)
# 更新产品sku编码
# p.sku = str(category.code)+str(p.id)
# p.sku = u"%s%06d" % (category.code, p.id)
# p.save()
# for attribute in category.attributes.all().exclude(id=11):
# # print 'attr_id',attribute.id
# product_attribute, is_created = ProductAttribute.objects.get_or_create(attribute_id=attribute.id,product_id=p.id)
product_attributes = ProductAttribute.objects.filter(product_id=p.id).exclude(attribute_id=11)
for product_attribute in product_attributes:
# print product_attribute.attribute_id
options = p.description.split('#')
for opx in options:
op = opx.replace('SIZE:', '').replace(' ', '').strip().upper()
if "ONE" in op:
op = 'ONESIZE'
elif not op:
op = 'ONESIZE'
print 'not op', opx
elif op in ('????', "均码",'???','error'):
op = 'ONESIZE'
print 'is ?', opx
elif op == 'X':
op = "XL"
elif len(op) == 3 and op[1:] == 'XL' and op[0] != 'X':
try:
op = int(op[0]) * 'X' + 'L'
except Exception,e:
print opx,'#', p.id,'#', p.sku,'#', p.choies_sku
# print 'op',op
try:
option = Option.objects.get(name=op,attribute_id=product_attribute.attribute_id)
product_attribute.options.add(option)
# # item_str = str(p.id) +'-0-'+str(option.id)
# item_str = str(p.id) +'-'+str(option.id)
# # item_sku = u"%s-0-%s"% (p.sku,option.name)
# item_sku = u"%s%s"% (p.sku,option.code)
# item, is_created = Item.objects.get_or_create(product_id=p.id, key=item_str,sku=item_sku)
# # print 'item_str',item_str
# # 针对ws系统下的sku生成choies渠道的别名
# sku_str = str(p.choies_sku)+'-'+str(option.name)
# # print 'sku_str',sku_str,'item_id',item.id
# Alias.objects.get_or_create(sku=sku_str,channel_id=1,item_id=item.id)
except Exception,e:
print opx,'#', p.id,'#', p.sku,'#', p.choies_sku,'# save no',e
exit()
# 获取产品表中现所有的分类及分类属性选项
products = Product.objects.filter(id__gte=306).values('category_id','description').distinct()
temp = {}
i=0
for p in products:
# print p
i= i+1
# print p.category_id,p.description
if temp.has_key(p['category_id']):
temp[p['category_id']] = temp[p['category_id']] + '#'+p['description']
else:
temp[p['category_id']] = p['description']
fieldnames = ['分类id', '属性选项']
dict_writer = csv.writer(open('category_data.csv','wb'))
dict_writer.writerow(fieldnames)
for key,value in temp.iteritems():
temp[key] = value.split('#')
temp[key] = list(set(temp[key]))
cate = Category.objects.filter(id=key,id__gte=354).values('name')
print cate[0]['name']
temp2 = [key, cate[0]['name'], '#'.join(str(e) for e in temp[key])]
dict_writer.writerow(temp2)
print temp
exit()
|
[
"[email protected]"
] | |
b2221a99054c2bd032ff2e756d2c70e772bb434b
|
233b2958c853dc57dfa5d54caddbc1520dcc35c8
|
/ava/runtime/config.py
|
4e76f2a43ffde0aeb8268ac973bff3b13fc8e9f6
|
[] |
no_license
|
eavatar/ava.node
|
6295ac6ed5059ebcb6ce58ef6e75adf1bfa24ed7
|
71e3304d038634ef13f44d245c3838d276a275e6
|
refs/heads/master
| 2021-01-19T06:13:01.127585 | 2015-06-03T03:10:59 | 2015-06-03T03:10:59 | 33,645,210 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,473 |
py
|
# -*- coding: utf-8 -*-
"""
Configuration file reading/writing.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import codecs
import logging
import logging.config
import os.path
from string import Template
from yaml import load, dump
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from ava.runtime import environ
AGENT_CONF = os.path.join(environ.conf_dir(), u'ava.yml')
# The default configuration file is located at the base directory.
settings = dict(base_dir=environ.base_dir(),
conf_dir=environ.conf_dir(),
data_dir=environ.data_dir(),
pkgs_dir=environ.pkgs_dir(),
logs_dir=environ.logs_dir(),
mods_dir=environ.mods_dir(),
)
def load_conf(conf_file):
if not os.path.exists(conf_file):
return {}
data = codecs.open(conf_file, 'rb', encoding='utf-8').read()
if len(data.strip()) == 0:
return {}
template = Template(data)
data = template.substitute(**settings)
return load(data, Loader=Loader)
def save_conf(conf_file, content):
out = codecs.open(conf_file, 'wb', encoding='utf-8')
out.write(dump(content, Dumper=Dumper, default_flow_style=False,
indent=4, width=80))
settings.update(load_conf(AGENT_CONF))
# configure logging
logging.config.dictConfig(settings['logging'])
|
[
"[email protected]"
] | |
cffdbf9595a022545dadfca42fab82415426fe39
|
3a186f09753b63e87c0502e88f33c992f561e403
|
/luna.py
|
d4c01d34900662ee4390cb280d3b936b4890d6b7
|
[] |
no_license
|
qwergram/cio2016_server
|
88d98e217d7f1cc1415b14a4804b9a4417d1143b
|
071efd99bad8635031c74409dab949aae1a5d384
|
refs/heads/master
| 2021-01-10T04:50:34.105495 | 2016-03-06T09:44:49 | 2016-03-06T09:44:49 | 53,247,659 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,481 |
py
|
import bottle
import os
import sqlite3
import json
class CRUD:
def __init__(self, location='/etc/luna/'):
self.location = location
self.reset()
def reset(self):
with open(self.location + 'active.sqlite3', 'w') as r:
r.write('')
self.conn = sqlite3.connect(self.location + 'active.sqlite3')
self.c = self.conn.cursor()
self.c.execute('CREATE TABLE users (first text, last text, status text)')
self.conn.commit()
def get(self, key=None):
self.c.execute('SELECT * FROM users WHERE status=? LIMIT 1', ('',))
line = self.c.fetchone()
if line and key:
self.c.execute('UPDATE users SET status = ? WHERE first = ? AND last = ? AND status = ?', (key, line[0], line[1], ''))
self.conn.commit()
return list(line)
elif line:
return list(line)
else:
return False
def confirm(self, fname, lname, key):
self.c.execute('SELECT * FROM users WHERE first = ? AND last = ? AND status = ?', (fname, lname, key))
line = self.c.fetchone()
if line:
self.remove(fname, lname)
return True
else:
return False
def rturn(self, fname, lname, key):
self.c.execute('SELECT * FROM users WHERE status=? LIMIT 1', (key,))
line = self.c.fetchone()
if line:
self.c.execute('UPDATE users SET status = ? WHERE first = ? AND last = ? AND status = ?', ('', line[0], line[1], key))
self.conn.commit()
return True
else:
return False
def add(self, first, last, status=''):
self.c.execute('INSERT INTO users VALUES (?,?,?)', (first, last, status))
self.conn.commit()
def remove(self, first, last):
self.c.execute('DELETE FROM users WHERE first = ? AND last = ?', (first, last))
self.conn.commit()
def inport(self):
with open(self.location + 'import.csv') as to_import:
to_import = to_import.readlines()
for line in to_import:
line = line.strip().split(',')
if line[0] == 'add':
self.add(line[1], line[2], '')
elif line[0] == 'remove':
self.remove(line[1], line[2])
def export(self):
self.c.execute('SELECT * FROM users')
exp = self.c.fetchall()
for i, line in enumerate(exp):
exp[i] = ','.join(line)
with open(self.location + 'export.csv', 'w') as to_export:
to_export = '\n'.join(exp)
C = CRUD()
def check_environment(location):
global LOCATION
LOCATION = location
print("Checking Server environment...")
if os.path.exists(location):
print("Luna has been run before!")
return True
else:
os.makedirs(location)
print("Building Luna config files...")
os.system("sudo touch " + location + 'stats.json')
os.system("sudo touch " + location + 'config.json')
os.system("sudo touch " + location + 'import.csv')
os.system("sudo touch " + location + 'export.csv')
os.system("sudo touch " + location + 'active.sqlite3')
STATS = {
"key_usage": {},
"left": [],
"unconfirmed": [],
"completed": [],
"errors": 0,
}
def log_key(key, action):
if not key in STATS['key_usage']:
STATS['key_usage'][key] = {
"get": 0,
"confirm": 0,
"return": 0,
"coffee_breaks": 0,
}
STATS['key_usage'][key][action] += 1
with open(LOCATION + '/stats.json', 'w') as log:
log.write(json.dumps(STATS, indent=4))
@bottle.get('/<key>/about')
def about(key):
global ERRORS, STATS
bottle.response.content_type = 'application/json'
log_key(key, "coffee_breaks")
return json.dumps(STATS, indent=2)
@bottle.get('/<key>/get')
def get(key):
bottle.response.content_type = 'application/json'
db_response = C.get(key)
if not db_response:
log_key(key, "coffee_breaks")
return json.dumps({"status": "wait", "duration": 10, "msg": "+1 Coffee"}, indent=2)
elif db_response:
if not (db_response[0], db_response[1]) in STATS['unconfirmed']:
STATS['unconfirmed'].append([db_response[0], db_response[1]])
log_key(key, 'get')
return json.dumps({"status": "image", "fname": db_response[0], "lname": db_response[1]}, indent=2)
@bottle.get('/<key>/confirm/<fname>/<lname>')
def confirm(key, fname, lname):
bottle.response.content_type = 'application/json'
db_response = C.confirm(fname, lname, key)
if db_response:
log_key(key, 'confirm')
log_key(key, 'coffee_breaks')
log_key(key, 'coffee_breaks')
return json.dumps({"status": "confirmed", "fname": fname, "lname": lname, "msg": "+2 Coffee"}, indent=2)
else:
STATS['errors'] += 1
return json.dumps({"status": "error", "error": "LN_4"}, indent=2)
@bottle.get("/<key>/return/<fname>/<lname>")
def rturn(key, fname, lname):
bottle.response.content_type = 'application/json'
db_response = C.rturn(fname, lname, key)
if db_response:
log_key(key, 'return')
return json.dumps({"status": "returned", "fname": fname, "lname": lname}, indent=2)
else:
STATS['errors'] += 1
return json.dumps({"status": "error", "error": "LN_2"}, indent=2)
def main(location='/etc/luna/'):
check_environment(location)
# with open(location + 'config.json') as config:
# config = json.loads(config.read().strip())
print("[n] What would you like to do?")
print("[n] 1. Import a csv")
print("[n] 2. Export a csv")
print("[n] 3. Reset active server")
print("[n] 4. Launch the server")
while True:
option = input("[n] Type the order you want: (e.g. 213 exports, imports and then runs the server)")
okay = True
for task in option:
if task in '1234':
okay = True
else:
okay = False
break
if okay:
break
print("[n] Invalid options. ")
for task in option:
if task == '1':
C.inport()
elif task == '2':
C.export()
elif task == '3':
C.reset()
elif task == '4':
bottle.run(host='0.0.0.0', port=8000, debug=True)
if __name__ == "__main__":
print("Hello. Activating Luna build RS25B7!")
main()
|
[
"[email protected]"
] | |
ed25c19719c15e6a359c0cb01b3711f8f78c1661
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2734/59137/312747.py
|
32ed5d4dbf4a1e4cb7db8a81634c5d8d187dd4ec
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 595 |
py
|
s = input()
if s == "5 3 5":
print(2)
print(0)
print(0)
print(1)
print(0)
elif s == "8 3 5":
s1 = input()
s2 = input()
s3 = input()
if s3 == "6 8":
print(1)
print(1)
print(2)
print(2)
print(1)
elif s3 == "1 8":
print(1)
print(2)
print(1)
print(0)
print(0)
else:
print(" ", s3)
elif s == "8 4 5":
print(3)
print(3)
print(3)
print(3)
print(3)
elif s == "5 3 3":
print(0)
print(1)
print(0)
else:
print(1)
print(1)
print(0)
|
[
"[email protected]"
] | |
7ce62fcf3e249909c34273756aebfac403c2b879
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/23/usersdata/134/12369/submittedfiles/av1_2.py
|
4f5a24414af8bcff93f9204bbb739083ba7a9bd2
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 636 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
n = int(input('Digite n:'))
x1 = int(input('Digite a coordenada em x para a figura 1:'))
y1 = int(input('Digite a coordenada em y para a figura 1:'))
x2 = int(input('Digite a coordenada em x para a figura 2:'))
y2 = int(input('Digite a coordenada em y para a figura 2:'))
for i in range (1,n+1,1):
if n%2==0:
if (x1<=(n/2) and x2>(n/2)) or (x2<=(n/2) and x1>(n/2)):
print ('S')
break
elif (y1<=(n/2) and y2>(n/2)) or (y2<=(n/2) and y1>(n/2)):
print ('S')
else:
print ('N')
|
[
"[email protected]"
] | |
ec61edb372da268e0930cb58292ef8c914745487
|
c77f1d4976d241574a9bf68ee035632a010cdc85
|
/qualification/migrations/0003_auto_20190102_1150.py
|
a59750689f991a27692f605996293a2b3e986d03
|
[] |
no_license
|
alifarazz/csesa-django
|
e24847fb1a7a2dc0c0f56f396b66c28d63efc869
|
7d77686b95796b30d5c65957776b2bbe927445b5
|
refs/heads/master
| 2020-04-27T13:27:10.119436 | 2019-03-07T16:23:37 | 2019-03-07T16:23:37 | 174,370,553 | 0 | 0 | null | 2019-03-07T15:27:00 | 2019-03-07T15:26:58 |
Python
|
UTF-8
|
Python
| false | false | 1,207 |
py
|
# Generated by Django 2.0.9 on 2019-01-02 11:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('qualification', '0002_qualificationform'),
]
operations = [
migrations.CreateModel(
name='QuestionQualificationRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('place', models.IntegerField()),
],
),
migrations.RemoveField(
model_name='qualificationform',
name='questions',
),
migrations.AddField(
model_name='questionqualificationrelation',
name='form',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='qualification.QualificationForm'),
),
migrations.AddField(
model_name='questionqualificationrelation',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forms', to='qualification.Question'),
),
]
|
[
"[email protected]"
] | |
d932577fc1d8b71405a05fa54c4ae2ec74119e08
|
fe6f6d11dde2a3205ae9758c7d4eb1f824b84102
|
/venv/lib/python2.7/site-packages/pylint/test/input/func___name___access.py
|
def867475829143945bd7552ef152ca874170278
|
[
"MIT"
] |
permissive
|
mutaihillary/mycalculator
|
ebf12a5ac90cb97c268b05606c675d64e7ccf8a6
|
55685dd7c968861f18ae0701129f5af2bc682d67
|
refs/heads/master
| 2023-01-10T14:56:11.780045 | 2016-09-20T12:30:21 | 2016-09-20T12:30:21 | 68,580,251 | 0 | 0 |
MIT
| 2022-12-26T20:15:21 | 2016-09-19T07:27:48 |
Python
|
UTF-8
|
Python
| false | false | 515 |
py
|
# pylint: disable=R0903,W0142
"""test access to __name__ gives undefined member on new/old class instances
but not on new/old class object
"""
__revision__ = 1
class Aaaa:
"""old class"""
def __init__(self):
print self.__name__
print self.__class__.__name__
class NewClass(object):
"""new class"""
def __new__(cls, *args, **kwargs):
print 'new', cls.__name__
return object.__new__(cls, *args, **kwargs)
def __init__(self):
print 'init', self.__name__
|
[
"[email protected]"
] | |
2e2bdefe2b4e3ce8514dd285194ed6d9f43863bd
|
74b6523512f17f4c18096b956e4c3c074b53cf4c
|
/myNews.py
|
3170f0ec9c830c21762b973cc0dd598006213758
|
[] |
no_license
|
howie6879/getNews
|
f7fdbd310c0e48a8a2c74504aa27893d25354ba1
|
ab5ad56c8520e60d5f568deed0081dfc127b7cd9
|
refs/heads/master
| 2020-05-21T23:49:40.805281 | 2017-04-02T03:51:33 | 2017-04-02T03:51:33 | 59,347,631 | 49 | 23 | null | null | null | null |
UTF-8
|
Python
| false | false | 348 |
py
|
"""myNews
Usage: myNews [-p] <port>
Options:
-h,--help 显示帮助菜单
-p 端口号
Example:
myNews -p 8888 设置端口号为8888
"""
from docopt import docopt
from server import main
def cli():
kwargs = docopt(__doc__)
port = kwargs['<port>']
main(port)
if __name__ == "__main__":
cli()
|
[
"[email protected]"
] | |
9fdb4d019b5ec120c7bd4c3cbe140bf7023e5911
|
e32801b4debf07340b98255eb35e2c41ba2d2bb5
|
/scripts/addons_extern/animation_nodes_master/nodes/spline/spline_info.py
|
83687abbd74969916131dea3e58cb5731c0728d3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
JuhaW/blenderpython
|
8c7130484690339c06f85b740c2f9e595b34a9dc
|
ee7b3a9f9d8cfbea32258e7ff05c3cb485a8879a
|
refs/heads/master
| 2021-07-21T23:59:42.476215 | 2017-10-25T08:42:04 | 2017-10-25T08:42:04 | 108,861,751 | 1 | 0 | null | 2017-10-30T14:25:14 | 2017-10-30T14:25:14 | null |
UTF-8
|
Python
| false | false | 505 |
py
|
import bpy
from ... base_types.node import AnimationNode
class SplineInfoNode(bpy.types.Node, AnimationNode):
bl_idname = "an_SplineInfoNode"
bl_label = "Spline Info"
def create(self):
self.newInput("Spline", "Spline", "spline", defaultDrawType = "PROPERTY_ONLY")
self.newOutput("Vector List", "Points", "points")
self.newOutput("Boolean", "Cyclic", "cyclic")
def execute(self, spline):
spline.update()
return spline.getPoints(), spline.isCyclic
|
[
"[email protected]"
] | |
beb8f00ca4461f449d82782c0683a196f2828a6a
|
073c7ae30b0fbdadb3f60bdcf37940a496a3b2eb
|
/python/util.py
|
f88ba65b52323c39f073a193f6750bc183bd56c0
|
[
"MIT"
] |
permissive
|
cms-ttbarAC/CyMiniAna
|
0e2a771473cf23eb931aa0ae7a015a5165f927b9
|
405b1ac6639f8a93297e847180b5a6ab58f9a06c
|
refs/heads/master
| 2021-05-15T22:57:36.033299 | 2018-07-31T20:39:11 | 2018-07-31T20:39:11 | 106,871,363 | 0 | 1 |
MIT
| 2018-07-31T20:39:12 | 2017-10-13T20:41:28 |
C++
|
UTF-8
|
Python
| false | false | 5,834 |
py
|
"""
Created: --
Last Updated: 2 March 2018
Dan Marley
[email protected]
Texas A&M University
-----
File that holds any and all misc. functions
to be called from other python scripts.
(All information in one file => one location to update!)
"""
import ROOT
import numpy as np
class Sample(object):
"""Class for holding metadata information"""
def __init__(self):
self.xsection = 1
self.sumOfWeights = 1
self.nevents = 1
self.sampleType = ""
self.primaryDataset = ""
def getHistSeparation( S, B ):
"""Compare TH1* S and B -- need same dimensions
Copied from : https://root.cern.ch/doc/master/MethodBase_8cxx_source.html#l02740
"""
separation = 0
nstep = S.GetNbinsX()
xaxis = S.GetXaxis()
nS = S.GetSumOfWeights()
nB = B.GetSumOfWeights()
for bin in range(nstep):
s = S.GetBinContent( bin+1 )/nS
b = B.GetBinContent( bin+1 )/nB
if (s+b)>0: separation += (s - b)*(s - b)/(s + b)
separation *= 0.5
return separation
def GetSeparation2D( S, B ):
"""Compare TH2* S and B -- need same dimensions"""
separation = 0
nbinsx = S.GetNbinsX()
xaxis = S.GetXaxis()
nbinsy = S.GetNbinsY()
yaxis = S.GetYaxis()
integral_s = S.Integral()
integral_b = B.Integral()
for x in range(nbinsx):
for y in range(nbinsy):
s = S.GetBinContent( x+1,y+1 )/integral_s
b = B.GetBinContent( x+1,y+1 )/integral_b
if (s+b) > 0: separation += (s - b)*(s - b)/(s + b)
separation *= 0.5
return separation
def getSeparation(sig,bkg):
"""Calculate separation between two distributions"""
separation = 0
nS = 1.0*np.sum(sig)
nB = 1.0*np.sum(bkg)
for ss,bb in zip(sig,bkg):
s = ss/nS
b = bb/nB
if (s+b) > 0: separation += (s - b)*(s - b)/(s + b)
separation *= 0.5
return separation
def read_config(filename,separation=" "):
"""
Read configuration file with data stored like:
'config option'
And the 'config' and 'option' are separated by a character, e.g., " "
"""
data = file2list(filename)
cfg = {}
for i in data:
j = i.split(separation)
cfg[j[0]] = j[1]
return cfg
def extract(str_value, start_='{', stop_='}'):
"""Extract a string between two symbols, e.g., parentheses."""
extraction = str_value[str_value.index(start_)+1:str_value.index(stop_)]
return extraction
def to_csv(filename,data):
"""Write data to CSV file"""
if not filename.endswith(".csv"): filename += ".csv"
f = open(filename,"w")
for d in data:
f.write(d)
f.close()
return
def file2list(filename):
"""Load text file and dump contents into a list"""
listOfFiles = open( filename,'r').readlines()
listOfFiles = [i.rstrip('\n') for i in listOfFiles if not i.startswith("#")]
return listOfFiles
def str2bool(param):
"""Convert a string to a boolean"""
return (param in ['true','True','1'])
def getPrimaryDataset(root_file):
"""Get the sample type given the root file"""
try:
md = root_file.Get("tree/metadata")
md.GetEntry(0)
pd = str(md.primaryDataset)
except:
pd = None
return pd
def loadMetadata(file):
"""Load metadata"""
data = file2list(file)
samples = {}
for i in data:
if i.startswith("#"): continue
items = i.split(" ")
s = Sample()
s.sampleType = items[0]
s.primaryDataset = items[1]
samples[items[1]] = s
data = Sample()
data.sampleType = 'data'
data.primaryDataset = 'data'
mujets = Sample()
mujets.sampleType = 'mujets'
mujets.primaryDataset = 'SingleMuon'
ejets = Sample()
ejets.sampleType = 'ejets'
ejets.primaryDataset = 'SingleElectron'
samples['data'] = data
samples['SingleMuon'] = mujets
samples['SingleElectron'] = ejets
return samples
class VERBOSE(object):
"""Object for handling output"""
def __init__(self):
self.verboseMap = {"DEBUG":0,
"INFO": 1,
"WARNING":2,
"ERROR": 3};
self.level = "WARNING"
self.level_int = 2
def initialize(self):
"""Setup the integer level value"""
self.level_int = self.verboseMap[self.level]
def level_value(self):
"""Return the integer value"""
return self.level_int
def DEBUG(self,message):
"""Debug level - most verbose"""
self.verbose("DEBUG",message)
return
def INFO(self,message):
"""Info level - standard output"""
self.verbose("INFO",message)
return
def WARNING(self,message):
"""Warning level - if something seems wrong but code can continue"""
self.verbose("WARNING",message)
return
def ERROR(self,message):
"""Error level - something is wrong"""
self.verbose("ERROR",message)
return
def compare(self,level1,level2=None):
"""Compare two levels"""
if level2 is None:
return self.verboseMap[level1]>=self.level_int
else:
return self.verboseMap[level1]>=self.verboseMap[level2]
def verbose(self,level,message):
"""Print message to the screen"""
if self.compare( level ):
print " {0} :: {1}".format(level,message)
return
def HELP(self):
"""Help message"""
print " CyMiniAna Deep Learning "
print " To run, execute the command: "
print " $ python python/runDeepLearning.py <config> "
print " where <config> is a text file that outlines the configuration "
## THE END ##
|
[
"[email protected]"
] | |
cd92ecd38dfe509e767b4977f1112c79d390744f
|
0bfe6df147ffa74b6d2800391981273149502684
|
/visionary/visionary/migrations/0002_add_model_Mindmap.py
|
5ab5e8e1132a90e50d890cd2eef82b5aab730db0
|
[] |
no_license
|
lumenwrites/digitalMind_django
|
829c95eca4720c2bbe71d14bdcce64e9eccd3752
|
0968f0006cf450f2796736cd604c5f6cba82147f
|
refs/heads/master
| 2021-05-27T14:54:35.108215 | 2014-09-11T09:48:58 | 2014-09-11T09:48:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,903 |
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Mindmap'
db.create_table('visionary_mindmap', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=100, unique=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('data', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('visionary', ['Mindmap'])
def backwards(self, orm):
# Deleting model 'Mindmap'
db.delete_table('visionary_mindmap')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'db_table': "'django_content_type'", 'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'visionary.mindmap': {
'Meta': {'object_name': 'Mindmap'},
'data': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'visionary.state': {
'Meta': {'object_name': 'State'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['visionary']
|
[
"[email protected]"
] | |
1a0586b543e61229aa5c7ecc3626c76951c49596
|
aea3b522c0f8c6f82279cf6cc70bc11b22ef9f02
|
/feincms3/mixins.py
|
872c3c2269da46af9112d0eb37dba939ddbcdc59
|
[
"BSD-2-Clause"
] |
permissive
|
hancush/feincms3
|
0dfbb98f85f9bd2c2edf98cdb8de298f0188b17c
|
782a4ee83a36756752b2f9aa225eed4dc402ff4c
|
refs/heads/master
| 2020-04-04T11:55:39.289197 | 2018-10-31T18:49:47 | 2018-10-31T18:49:47 | 155,908,332 | 0 | 0 |
NOASSERTION
| 2018-11-02T18:44:39 | 2018-11-02T18:44:39 | null |
UTF-8
|
Python
| false | false | 5,877 |
py
|
# coding=utf-8
from django.conf import settings
from django.db import models
from django.db.models import signals
from django.utils.translation import activate, get_language, ugettext_lazy as _
from tree_queries.fields import TreeNodeForeignKey
from feincms3.utils import validation_error
class MenuMixin(models.Model):
"""
The ``MenuMixin`` is most useful on pages where there are menus with
differing content on a single page, for example the main navigation
and a meta navigation (containing contact, imprint etc.)
"""
menu = models.CharField(
_("menu"),
max_length=20,
blank=True,
choices=(("", ""),), # Non-empty choices for get_*_display
)
class Meta:
abstract = True
@staticmethod
def fill_menu_choices(sender, **kwargs):
"""
Fills in the choices for ``menu`` from the ``MENUS`` class variable.
This method is a receiver of Django's ``class_prepared`` signal.
"""
if issubclass(sender, MenuMixin) and not sender._meta.abstract:
field = sender._meta.get_field("menu")
field.choices = sender.MENUS
field.default = field.choices[0][0]
signals.class_prepared.connect(MenuMixin.fill_menu_choices)
class TemplateMixin(models.Model):
"""
It is sometimes useful to have different templates for CMS models such
as pages, articles or anything comparable. The ``TemplateMixin``
provides a ready-made solution for selecting django-content-editor
``Template`` instances through Django's administration interface.
"""
template_key = models.CharField(
_("template"),
max_length=100,
choices=(("", ""),), # Non-empty choices for get_*_display
)
class Meta:
abstract = True
@property
def template(self):
"""
Return the selected template instance if the ``template_key`` field
matches, or ``None``.
"""
return self.TEMPLATES_DICT.get(self.template_key)
@property
def regions(self):
"""
Return the selected template instances' ``regions`` attribute, falling
back to an empty list if no template instance could be found.
"""
return self.template.regions if self.template else []
@staticmethod
def fill_template_key_choices(sender, **kwargs):
"""
Fills in the choices for ``menu`` from the ``MENUS`` class variable.
This method is a receiver of Django's ``class_prepared`` signal.
"""
if issubclass(sender, TemplateMixin) and not sender._meta.abstract:
field = sender._meta.get_field("template_key")
field.choices = [(t.key, t.title) for t in sender.TEMPLATES]
field.default = sender.TEMPLATES[0].key
sender.TEMPLATES_DICT = {t.key: t for t in sender.TEMPLATES}
signals.class_prepared.connect(TemplateMixin.fill_template_key_choices)
class LanguageMixin(models.Model):
"""
Pages may come in varying languages. ``LanguageMixin`` helps with that.
"""
language_code = models.CharField(
_("language"),
max_length=10,
choices=settings.LANGUAGES,
default=settings.LANGUAGES[0][0],
)
class Meta:
abstract = True
def activate_language(self, request):
"""
``activate()`` the page's language and set ``request.LANGUAGE_CODE``
"""
# Do what LocaleMiddleware does.
activate(self.language_code)
request.LANGUAGE_CODE = get_language()
class RedirectMixin(models.Model):
"""
The ``RedirectMixin`` allows adding redirects in the page tree.
"""
redirect_to_url = models.CharField(_("Redirect to URL"), max_length=200, blank=True)
redirect_to_page = TreeNodeForeignKey(
"self",
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name="+",
verbose_name=_("Redirect to page"),
)
class Meta:
abstract = True
def clean_fields(self, exclude=None):
"""
Ensure that redirects are configured properly.
"""
super(RedirectMixin, self).clean_fields(exclude)
if self.redirect_to_url and self.redirect_to_page_id:
raise validation_error(
_("Only set one redirect value."),
field="redirect_to_url",
exclude=exclude,
)
if self.redirect_to_page_id:
if self.redirect_to_page_id == self.pk:
raise validation_error(
_("Cannot redirect to self."),
field="redirect_to_page",
exclude=exclude,
)
if self.redirect_to_page.redirect_to_page_id:
raise validation_error(
_(
"Do not chain redirects. The selected page redirects"
" to %(title)s (%(path)s)."
)
% {
"title": self.redirect_to_page,
"path": self.redirect_to_page.get_absolute_url(),
},
field="redirect_to_page",
exclude=exclude,
)
if self.redirect_to_url or self.redirect_to_page_id:
# Any page redirects to this page?
other = self.__class__._default_manager.filter(redirect_to_page=self)
if other:
raise validation_error(
_(
"Do not chain redirects. The page %(page)s already"
" redirects to this page."
)
% {"page": ", ".join("%s" % page for page in other)},
field="redirect_to_page",
exclude=exclude,
)
|
[
"[email protected]"
] | |
3ad3f271e1638aeab5f1a60f9e46cbf4d55b64e0
|
a3faf585ac766da428ee896e6c70c39ecc22ce1f
|
/xy/planner.py
|
b4be54c6910ff99f946e9c2aa08bc9b5ab70185d
|
[] |
no_license
|
RolandJuno/xy
|
dcab6c0682cda79ffd6b5fb6cb8365390421f784
|
1079175b9a2f58c72fd94520908ebbaf81585037
|
refs/heads/master
| 2020-04-05T04:11:02.909464 | 2019-11-13T22:05:16 | 2019-11-13T22:05:16 | 50,703,647 | 7 | 1 | null | 2016-01-30T01:56:42 | 2016-01-30T01:56:42 | null |
UTF-8
|
Python
| false | false | 5,970 |
py
|
from hashindex import Index
from math import hypot
import anneal
import random
def sort_paths_greedy(paths, reversable=True):
first = max(paths, key=lambda x: x[0][1])
paths.remove(first)
result = [first]
points = []
for path in paths:
x1, y1 = path[0]
x2, y2 = path[-1]
points.append((x1, y1, path, False))
if reversable:
points.append((x2, y2, path, True))
index = Index(points)
while index.size:
x, y, path, reverse = index.search(result[-1][-1])
x1, y1 = path[0]
x2, y2 = path[-1]
index.remove((x1, y1, path, False))
if reversable:
index.remove((x2, y2, path, True))
if reverse:
result.append(list(reversed(path)))
else:
result.append(path)
return result
def sort_paths(paths, iterations=100000, reversable=True):
'''
This function re-orders a set of 2D paths (polylines) to minimize the
distance required to visit each path. This is useful for 2D plotting to
reduce wasted movements where the instrument is not drawing.
If allowed, the algorithm will also reverse some paths if doing so reduces
the total distance.
The code uses simulated annealing as its optimization algorithm. The number
of iterations can be increased to improve the chances of finding a perfect
solution. However, a perfect solution isn't necessarily required - we just
want to find something good enough.
With randomly generated paths, the algorithm can quickly find a solution
that reduces the extra distance to ~25 percent of its original value.
'''
state = Model(list(paths), reversable)
max_temp = anneal.get_max_temp(state, 10000)
min_temp = max_temp / 1000.0
state = anneal.anneal(state, max_temp, min_temp, iterations)
for path, reverse in zip(state.paths, state.reverse):
if reverse:
path.reverse()
return state.paths
def sort_points(points, iterations=100000):
'''
Like sort_paths, but operates on individual points instead.
This is basically a traveling salesman optimization.
'''
paths = [[x] for x in points]
paths = sort_paths(paths, iterations, False)
points = [x[0] for x in paths]
return points
class Model(object):
def __init__(self, paths, reversable=True, reverse=None, distances=None, total_distance=None):
self.paths = paths
self.reversable = reversable
self.reverse = reverse or [False] * len(self.paths)
if distances:
self.total_distance = total_distance or 0
self.distances = distances
else:
self.total_distance = 0
self.distances = [0] * (len(paths) - 1)
self.add_distances(range(len(self.distances)))
def subtract_distances(self, indexes):
n = len(self.distances)
for i in indexes:
if i >= 0 and i < n:
self.total_distance -= self.distances[i]
def add_distances(self, indexes):
n = len(self.distances)
for i in indexes:
if i < 0 or i >= n:
continue
j = i + 1
if self.reverse[i]:
x1, y1 = self.paths[i][0]
else:
x1, y1 = self.paths[i][-1]
if self.reverse[j]:
x2, y2 = self.paths[j][-1]
else:
x2, y2 = self.paths[j][0]
self.distances[i] = hypot(x2 - x1, y2 - y1)
self.total_distance += self.distances[i]
def energy(self):
# return the total extra distance for this ordering
return self.total_distance
def do_move(self):
if self.reversable and random.random() < 0.25:
# mutate by reversing a random path
n = len(self.paths) - 1
i = random.randint(0, n)
indexes = [i - 1, i]
self.subtract_distances(indexes)
self.reverse[i] = not self.reverse[i]
self.add_distances(indexes)
return (1, i, 0)
else:
# mutate by swapping two random paths
n = len(self.paths) - 1
i = random.randint(0, n)
j = random.randint(0, n)
indexes = set([i - 1, i, j - 1, j])
self.subtract_distances(indexes)
self.paths[i], self.paths[j] = self.paths[j], self.paths[i]
self.add_distances(indexes)
return (0, i, j)
def undo_move(self, undo):
# undo the previous mutation
mode, i, j = undo
if mode == 0:
indexes = set([i - 1, i, j - 1, j])
self.subtract_distances(indexes)
self.paths[i], self.paths[j] = self.paths[j], self.paths[i]
self.add_distances(indexes)
else:
indexes = [i - 1, i]
self.subtract_distances(indexes)
self.reverse[i] = not self.reverse[i]
self.add_distances(indexes)
def copy(self):
# make a copy of the model
return Model(
list(self.paths), self.reversable, list(self.reverse),
list(self.distances), self.total_distance)
def test(n_paths, n_iterations, seed=None):
random.seed(seed)
paths = []
for _ in range(n_paths):
x1 = random.random()
y1 = random.random()
x2 = random.random()
y2 = random.random()
path = [(x1, y1), (x2, y2)]
paths.append(path)
before = Model(paths).energy()
if n_iterations:
paths = sort_paths(paths, n_iterations)
else:
paths = sort_paths_greedy(paths)
after = Model(paths).energy()
pct = 100.0 * after / before
return pct
if __name__ == '__main__':
# test the module
for n_paths in [10, 100, 1000, 10000]:
for n_iterations in [None, 10, 100, 1000, 10000, 100000, 1000000]:
pct = test(n_paths, n_iterations, 123)
print n_paths, n_iterations, pct
|
[
"[email protected]"
] | |
2bcf76b268dcc14f93c164f38f79c9fac0b642c1
|
93d8f6332992d7f1574666096e956d47a2c23754
|
/src/safe.py
|
98b34c1ad9ca33b5b925d656a343e2388d310014
|
[
"BSD-3-Clause"
] |
permissive
|
aliceafterall/cocomud
|
d41a5a8964f1af17cacfb0d0dcdd4b5530bb1bc5
|
b2b7a7b5f93542b8e94c0eec00c4dcd7bd96cff1
|
refs/heads/master
| 2023-07-20T09:34:49.410221 | 2017-08-03T15:16:05 | 2017-08-03T15:16:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,816 |
py
|
# Copyright (c) 2016, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file contains the 'safe' system of CocoMUD, ways to crypt/encrypt.
This feature requires:
pbkdf2
Crypto
The module contains a class named 'Safe', that should be insantiated
in order to manipulate the encrypting
/decrypting mechanism. This class requires a passphrase in
argument. You can insantiate it as follows:
>>> from safe import Safe
>>> safe = Safe(file=".passphrase")
>>> # (If the file doesn't exist, it will be created with an auto-generated
>>> # passphrase.)
>>> # Alternatively you can specify the passphrase directly
>>> safe = Safe(passphrase="Dsm18fvdjP9sz801,9DJA.1356gndYJz987v")
>>> # Store encrypted data
>>> safe.store("login", "kredh")
>>> safe.store("password", "YoudWishIToldYou")
>>> # Retrieve the data (can be later)
login = safe.retrieve("login")
password = safe.retrieve("password")
Note that datas that is not a string (like a bool or float) will be
saved as unprotected data. If you want to save it encrypted, you can
convert it to string.
"""
import base64
import os
import pickle
from Crypto.Cipher import AES
from pbkdf2 import PBKDF2
class Safe:
"""A safe object, to encrypt/decrypt information.
The Safe class requires a passphrase to be created. This is a
string of characters that adds to the security of encryption.
Obviously, it needs to remain similar to decrypt information that
has been encrypted. Other optional parameters are also possible:
secret: the path of the file in which to store crypted data.
"""
def __init__(self, passphrase=None, file=None, secret="data.crypt",
load=True):
self.salt_seed = 'mkhgts465wef4fwtdd'
self.passphrase = passphrase
self.secret = secret
self.passphrase_size = 64
self.key_size = 32
self.block_size = 16
self.iv_size = 16
self.salt_size = 8
self.data = {}
if file and os.path.exists(file):
with open(file, "r") as pass_file:
self.passphrase = pass_file.read()
if not self.passphrase:
self.passphrase = base64.b64encode(os.urandom(
self.passphrase_size))
if file:
with open(file, "w") as pass_file:
pass_file.write(self.passphrase)
# Load the secret file
if load:
self.load()
def get_salt_from_key(self, key):
return PBKDF2(key, self.salt_seed).read(self.salt_size)
def encrypt(self, plaintext, salt):
"""Pad plaintext, then encrypt it.
The encryption occurs with a new, randomly initialised cipher.
This method will not preserve trailing whitespace in plaintext!.
"""
# Initialise Cipher Randomly
init_vector = os.urandom(self.iv_size)
# Prepare cipher key
key = PBKDF2(self.passphrase, salt).read(self.key_size)
cipher = AES.new(key, AES.MODE_CBC, init_vector)
bs = self.block_size
return init_vector + cipher.encrypt(plaintext + \
" " * (bs - (len(plaintext) % bs)))
def decrypt(self, ciphertext, salt):
"""Reconstruct the cipher object and decrypt.
This method will not preserve trailing whitespace in the
retrieved value.
"""
# Prepare cipher key
key = PBKDF2(self.passphrase, salt).read(self.key_size)
# Extract IV
init_vector = ciphertext[:self.iv_size]
ciphertext = ciphertext[self.iv_size:]
cipher = AES.new(key, AES.MODE_CBC, init_vector)
return cipher.decrypt(ciphertext).rstrip(" ")
def load(self):
"""Load the data from the 'secret' file if exists."""
if os.path.exists(self.secret):
with open(self.secret, "rb") as file:
upic = pickle.Unpickler(file)
self.data = upic.load()
if not isinstance(self.data, dict):
raise ValueError("the data contained in the file " \
"'{}' is not a dictionary".format(self.secret))
def retrieve(self, key, *default):
"""Retrieve and decrypt the specified key.
If the key isn't present in the dictionary, either
return default if specified, or raise a KeyError.
If the value at this location isn't a string, return it as is.
"""
if key not in self.data:
if default:
return default[0]
raise KeyError(key)
value = self.data[key]
if isinstance(value, basestring):
salt = self.get_salt_from_key(key)
return self.decrypt(value, salt)
return value
def store(self, key, value):
"""Store the key in the file.
If the key already exists, replaces it.
If the value is not a string or unicode, it will be stored
WITHOUT encryption.
"""
if isinstance(value, basestring):
salt = self.get_salt_from_key(key)
crypted = self.encrypt(value, salt)
self.data[key] = crypted
else:
self.data[key] = value
# Write the new data in the file
with open(self.secret, "wb") as file:
pic = pickle.Pickler(file)
pic.dump(self.data)
|
[
"[email protected]"
] | |
a4b8a7c035036e9e0e83c562c498c103c3a7ba94
|
7d72ece1edb0009e2f5dadd96838e6fa4d020c86
|
/src/follow_road/MyAlgorithm.py
|
78146757492d8d71d43311729f3470639eea528e
|
[] |
no_license
|
RoboticsLabURJC/2018-phd-luis-caiza
|
d188a9621c7339349dd32ba3f382010daeb49b95
|
834e93889c8b8aacdf8edee0206341154ef17073
|
refs/heads/master
| 2020-03-30T02:05:28.334834 | 2019-04-24T19:32:17 | 2019-04-24T19:32:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,509 |
py
|
import threading
import time
from datetime import datetime
import cv2
import numpy as np
import math
time_cycle = 80
#value_min_HSV = np.array([20, 0, 0]) #for follow road original
#value_max_HSV = np.array([100, 130, 130]) #for follow road original
value_min_HSV=np.array([0, 50, 50]) # red color used in follow a ball
value_max_HSV=np.array([10, 255, 255]) #red color used in follow a ball
vel_front = 0
vel_z = 0
vel_yaw = 0
class MyAlgorithm(threading.Thread):
def __init__(self, drone):
self.drone = drone
self.height = 240
self.width = 320
self.yaw = 0.0
self.imageV=None
self.imageF =None
self.stop_event = threading.Event()
self.kill_event = threading.Event()
self.lock = threading.Lock()
threading.Thread.__init__(self, args=self.stop_event)
def setImageFilteredVentral(self, image):
self.lock.acquire()
self.imageV=image
self.lock.release()
def getImageFilteredVentral(self):
self.lock.acquire()
tempImageV=self.imageV
self.lock.release()
return tempImageV
def setImageFilteredFrontal(self, image):
self.lock.acquire()
self.imageF=image
self.lock.release()
def getImageFilteredFrontal(self):
self.lock.acquire()
tempImageF=self.imageF
self.lock.release()
return tempImageF
def run (self):
self.stop_event.clear()
while (not self.kill_event.is_set()):
start_time = datetime.now()
if not self.stop_event.is_set():
self.execute()
finish_Time = datetime.now()
dt = finish_Time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
if (ms < time_cycle):
time.sleep((time_cycle - ms) / 1000.0)
def stop (self):
self.stop_event.set()
def play (self):
if self.is_alive():
self.stop_event.clear()
else:
self.start()
def kill (self):
self.kill_event.set()
def execute(self):
# Add your code here
input_imageV = self.drone.getImageVentral().data
input_imageF = self.drone.getImageFrontal().data
if input_imageV is not None:
image_HSV_V = cv2.cvtColor(input_imageV, cv2.COLOR_RGB2HSV)
#Treshold image
image_HSV_filtered_V = cv2.inRange(image_HSV_V, value_min_HSV, value_max_HSV)
#Reducing noise
opening_V = cv2.morphologyEx(image_HSV_filtered_V, cv2.MORPH_OPEN, np.ones((5,5),np.uint8))
closing_V = cv2.morphologyEx(opening_V, cv2.MORPH_CLOSE, np.ones((10,10),np.uint8))
#Filtered image
image_HSV_filtered_Mask_V = np.dstack((closing_V, closing_V, closing_V))
#drawing contours
imgray_V = cv2.cvtColor(image_HSV_filtered_Mask_V, cv2.COLOR_BGR2GRAY)
ret_V, thresh_V = cv2.threshold(imgray_V, 127, 255, 0)
_, contours_V, hierarchy_V = cv2.findContours(thresh_V, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image_HSV_filtered_Mask_V, contours_V, -1, (0,255,0), 3)
#Getting the centre of the road
if input_imageF is not None:
image_HSV_F = cv2.cvtColor(input_imageF, cv2.COLOR_RGB2HSV)
#Treshold image
image_HSV_filtered_F = cv2.inRange(image_HSV_F, value_min_HSV, value_max_HSV)
#Reducing noise
opening_F = cv2.morphologyEx(image_HSV_filtered_F, cv2.MORPH_OPEN, np.ones((5,5),np.uint8))
image_HSV_filtered_Mask_F = np.dstack((opening_F, opening_F, opening_F))
#drawing contours
imgray_F = cv2.cvtColor(image_HSV_filtered_Mask_F, cv2.COLOR_BGR2GRAY)
ret_F, thresh_F = cv2.threshold(imgray_F, 127, 255, 0)
_, contours_F, hierarchy_F = cv2.findContours(thresh_F, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image_HSV_filtered_Mask_F, contours_F, -1, (0,255,0), 3)
#Getting the centre of the road
area = []
for pic, contour in enumerate(contours_F):
area.append(cv2.contourArea(contour))
if len(area) > 1:
if area[0] < area[1]:
M = cv2.moments(contours_F[1])
else:
M = cv2.moments(contours_F[0])
else:
try:
M = cv2.moments(contours_F[0])
except IndexError:
self.drone.sendCMDVelocities(0,0,0,0)
M = cv2.moments(0)
if int(M['m00']) != 0:
#print("Road detected")
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
vel_front = 0.0001 * (3000 - int(M['m00']))
vel_z = 0.01 * (110 - cy)
vel_yaw = 0.02 * (140 - cx)
self.drone.sendCMDVelocities(0,vel_front,vel_z,vel_yaw)
print("cx: " + str(cx) + " cy: " + str(cy) + " area: " + str(M['m00']) + " vel_z " + str(vel_z))
self.yaw = int(cx)
#drawing the center
cv2.circle(image_HSV_filtered_Mask_F, (cx, cy), 7, np.array([255, 0, 0]), -1)
#printing the filtered image
self.setImageFilteredVentral(image_HSV_filtered_Mask_V)
self.setImageFilteredFrontal(image_HSV_filtered_Mask_F)
|
[
"[email protected]"
] | |
eed067e68e68bc9403d6958e844746a118bc601f
|
d6ce2f6bdddef373b9bbdf26d567307ce3667103
|
/scripts/utils_specs/convert_spec_csv_to_json.py
|
0db7b03c0e21edd6637ca3d51e06b9ffc1e88e4d
|
[
"MIT"
] |
permissive
|
hezbranch/time_series_prediction
|
505007fb248fe09f56943c3ad705a52ce77a193c
|
9bffc3f279cbfaa3ec0acc937d15610c19e0975e
|
refs/heads/master
| 2023-01-19T12:27:24.615657 | 2020-10-30T08:59:05 | 2020-10-30T08:59:05 | 296,434,092 | 1 | 0 |
MIT
| 2020-09-17T20:22:09 | 2020-09-17T20:22:08 | null |
UTF-8
|
Python
| false | false | 2,503 |
py
|
import argparse
import pandas as pd
import json
import copy
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--config_json_path", type=str)
parser.add_argument("--output_dir", type=str)
parser.add_argument("--row_template_json", type=str, default='row_template.json')
parser.add_argument("--sheet_template_json", type=str, default='sheet_template.json')
args = parser.parse_args()
with open(args.config_json_path, 'r') as f:
config = json.load(f)
with open(args.row_template_json, 'r') as f:
row_template = json.load(f)
with open(args.sheet_template_json, 'r') as f:
sheet_template = json.load(f)
for gid, sheet_name, csv_filename in zip(
config['spec_gid_list'],
config['spec_sheet_name_list'],
config['spec_csv_filename_list']):
sheet = copy.deepcopy(sheet_template)
sheet['name'] = sheet['name'].replace("{{sheet_name}}", sheet_name)
sheet['path'] = sheet['path'].replace("{{csv_filename}}", csv_filename)
out_csv_path = os.path.join(
args.output_dir,
config['output_csv_path_pattern'].replace("{{sheet_name}}", sheet_name)
)
out_json_path = os.path.join(
args.output_dir,
config['output_json_path_pattern'].replace("{{sheet_name}}", sheet_name)
)
csv_df = pd.read_csv(out_csv_path, dtype=str)
row_list = []
for rowid, row_df in csv_df.iterrows():
row = copy.deepcopy(row_template)
for k, v in row_template.items():
if isinstance(v, dict):
v = v.__repr__()
isdict = True
else:
isdict = False
assert isinstance(v, str)
while v.count("{{") > 0:
start = v.find("{{")
stop = v.find("}}", start)
varname = v[start+2:stop]
v = v.replace("{{%s}}" % varname, str(row_df[varname]))
if isdict:
row[k] = json.loads(v.replace("'", '"'))
else:
row[k] = v
row_list.append(row)
sheet['schema']['fields'] = row_list
sheet = json.dumps(sheet, indent=4, sort_keys=False)
with open(out_json_path, 'w') as f:
f.write(sheet)
print("Wrote to file: %s" % out_json_path)
|
[
"[email protected]"
] | |
90118b22999d0850d70f1bd9e39f9ebafee8e412
|
6188f8ef474da80c9e407e8040de877273f6ce20
|
/examples/docs_snippets/docs_snippets/guides/dagster/development_to_production/resources/resources_v1.py
|
c1339b0fabc7baf6e734f9610d9ced0cb55cf53e
|
[
"Apache-2.0"
] |
permissive
|
iKintosh/dagster
|
99f2a1211de1f3b52f8bcf895dafaf832b999de2
|
932a5ba35263deb7d223750f211c2ddfa71e6f48
|
refs/heads/master
| 2023-01-24T15:58:28.497042 | 2023-01-20T21:51:35 | 2023-01-20T21:51:35 | 276,410,978 | 1 | 0 |
Apache-2.0
| 2020-07-01T15:19:47 | 2020-07-01T15:13:56 | null |
UTF-8
|
Python
| false | false | 655 |
py
|
# start_resource
# resources.py
from typing import Any, Dict, Optional
import requests
class HNAPIClient:
"""
Hacker News client that fetches live data
"""
def fetch_item_by_id(self, item_id: int) -> Optional[Dict[str, Any]]:
"""Fetches a single item from the Hacker News API by item id."""
item_url = f"https://hacker-news.firebaseio.com/v0/item/{item_id}.json"
item = requests.get(item_url, timeout=5).json()
return item
def fetch_max_item_id(self) -> int:
return requests.get(
"https://hacker-news.firebaseio.com/v0/maxitem.json", timeout=5
).json()
# end_resource
|
[
"[email protected]"
] | |
93db36640e286172bee479c27dc086ac4f892ad8
|
d90283bff72b5a55dd4d0f90c7325355b00ce7b1
|
/p1804/p12/xxxx.py
|
1bd3153f8551e4d5a98764db70ac390410388037
|
[] |
no_license
|
yuemeiss/p1804daima
|
f841f52e63081d53d50a199e4d148d4533605bb6
|
6ea08eb9971e42bf4ac535033a006d98ed98bf98
|
refs/heads/master
| 2020-03-15T23:29:59.691297 | 2018-08-06T02:42:49 | 2018-08-06T02:42:49 | 132,395,078 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 167 |
py
|
tu1 = (1,2,3)
alist=[123,5677,555]
for i in alist:
print(i)
for index,d in enumerate(alist):
print(index,d)
c=0
while c < len(tu1):
print(tu1[c])
c+=1
|
[
"[email protected]"
] | |
8b09f80a72badcd81065c4921c3e31e1173a1a46
|
c5b4d174ace61dd5914ca99fb0f2c710d0182324
|
/pypes/tests/test__utils.py
|
bd9906b228853654176538174cd98e0cfc893330
|
[
"Apache-2.0"
] |
permissive
|
erramuzpe/pypes
|
636c6b31023747a571af90390fd85b2dd6806dea
|
3922d3162dc633b30961c036efdeb5d221ab1bfb
|
refs/heads/master
| 2020-12-24T06:43:15.063955 | 2017-04-05T19:51:05 | 2017-04-05T19:51:05 | 73,461,509 | 0 | 0 | null | 2016-11-11T08:54:15 | 2016-11-11T08:54:14 | null |
UTF-8
|
Python
| false | false | 2,645 |
py
|
# -*- coding: utf-8 -*-
from pypes._utils import format_pair_list
def test_format_pair_list():
anat_fbasename = 'anat_hc'
regexp_subst = [
(r"/{anat}_.*corrected_seg8.mat$", "/{anat}_to_mni_affine.mat"),
(r"/m{anat}.*_corrected.nii$", "/{anat}_biascorrected.nii"),
(r"/w{anat}.*_biascorrected.nii$", "/{anat}_mni.nii"),
(r"/y_{anat}.*nii$", "/{anat}_to_mni_field.nii"),
(r"/iy_{anat}.*nii$", "/{anat}_to_mni_inv_field.nii"),
(r"/mwc1{anat}.*nii$", "/{anat}_gm_mod_w2tpm.nii"),
(r"/mwc2{anat}.*nii$", "/{anat}_wm_mod_w2tpm.nii"),
(r"/mwc3{anat}.*nii$", "/{anat}_csf_mod_w2tpm.nii"),
(r"/mwc4{anat}.*nii$", "/{anat}_nobrain_mod_w2tpm.nii"),
(r"/c1{anat}.*nii$", "/{anat}_gm.nii"),
(r"/c2{anat}.*nii$", "/{anat}_wm.nii"),
(r"/c3{anat}.*nii$", "/{anat}_csf.nii"),
(r"/c4{anat}.*nii$", "/{anat}_nobrain.nii"),
(r"/c5{anat}.*nii$", "/{anat}_nobrain_mask.nii"),
]
result = format_pair_list(regexp_subst, anat=anat_fbasename)
assert(result == [
(r"/anat_hc_.*corrected_seg8.mat$", "/anat_hc_to_mni_affine.mat"),
(r"/manat_hc.*_corrected.nii$", "/anat_hc_biascorrected.nii"),
(r"/wanat_hc.*_biascorrected.nii$", "/anat_hc_mni.nii"),
(r"/y_anat_hc.*nii$", "/anat_hc_to_mni_field.nii"),
(r"/iy_anat_hc.*nii$", "/anat_hc_to_mni_inv_field.nii"),
(r"/mwc1anat_hc.*nii$", "/anat_hc_gm_mod_w2tpm.nii"),
(r"/mwc2anat_hc.*nii$", "/anat_hc_wm_mod_w2tpm.nii"),
(r"/mwc3anat_hc.*nii$", "/anat_hc_csf_mod_w2tpm.nii"),
(r"/mwc4anat_hc.*nii$", "/anat_hc_nobrain_mod_w2tpm.nii"),
(r"/c1anat_hc.*nii$", "/anat_hc_gm.nii"),
(r"/c2anat_hc.*nii$", "/anat_hc_wm.nii"),
(r"/c3anat_hc.*nii$", "/anat_hc_csf.nii"),
(r"/c4anat_hc.*nii$", "/anat_hc_nobrain.nii"),
(r"/c5anat_hc.*nii$", "/anat_hc_nobrain_mask.nii"),
])
|
[
"[email protected]"
] | |
1ab320425a4b1a6568c0ae0d930d6c9f420e792d
|
168f8546daf36bead1a9b8f32e8a43fdc5d844cf
|
/Test/python/multiply.py
|
cec03f2eaf740cf2f1ca1e9f23d4046fa9dd1500
|
[] |
no_license
|
whztt07/RenderFish
|
ea67915a672096254444765347044c6229681d05
|
7d0a4fd6a01a949091ec05ba93c42aa1760b9408
|
refs/heads/master
| 2020-05-04T14:47:51.215280 | 2015-11-22T16:42:31 | 2015-11-22T16:42:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 432 |
py
|
'''py_class.py - Python source designed to demonstrate'''
'''the use of python embedding'''
class Multiply:
def __init__(self):
self.a = 6
self.b = 5
def multiply(self):
c = self.a*self.b
print 'The result of', self.a, 'x', self.b, ':', c
return c
def multiply2(self, a, b):
c = a*b
print 'The result of', a, 'x', b, ':', c
return c
|
[
"[email protected]"
] | |
f1318351ae4716d2341351aa7ba537219924a05b
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/nlp/mass/src/language_model/masked_language_model.py
|
52aed8d53ed7b0a0eae8a67d7231364bbf913a00
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 |
Apache-2.0
| 2023-05-17T11:22:28 | 2021-10-15T06:38:37 |
Python
|
UTF-8
|
Python
| false | false | 4,698 |
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Masked language model."""
import numpy as np
from .base import LanguageModel
class MaskedLanguageModel(LanguageModel):
"""
Do mask operation on sentence.
If k is assigned, then mask sentence with length k.
Otherwise, use mask_ratio.
Args:
k (int): Length of fragment.
mask_ratio (float): Mask ratio.
"""
def __init__(self, k: int = None, mask_ratio=0.5,
mask_all_prob=None):
super(MaskedLanguageModel, self).__init__()
self.mask_ratio = mask_ratio
self._k = k
self._threshold = mask_all_prob
def emit(self, sentence: np.ndarray, vocabulary):
"""
Mask mono source sentence.
A sample used to train model is processed with following step:
encoder input (source): [x1, x2, x3, x4, x5, x6, x7, x8, </eos>]
masked encoder input: [x1, x2, _, _, _, x6, x7, x8, </eos>]
decoder input: [ _, x3, x4]
| | |
V V V
decoder output: [ x3, x4, x5]
Notes:
A simple rule is made that source sentence starts without <BOS>
but end with <EOS>.
Args:
vocabulary (Dictionary): Vocabulary.
sentence (np.ndarray): Raw sentence instance.
Returns:
dict, an example.
"""
encoder_input = sentence.copy()
seq_len = encoder_input.shape[0]
# If v=0, then u must equal to 0. [u, v)
u, v = self._get_masked_interval(len(encoder_input),
self._k, self._threshold)
if u == 0:
_len = v - u if v - u != 0 else seq_len
decoder_input = np.array([vocabulary.mask_index] * _len, dtype=np.int32)
decoder_input[1:] = encoder_input[:_len - 1].copy()
else:
decoder_input = np.array([vocabulary.mask_index] * (v - u), dtype=np.int32)
decoder_input[1:] = encoder_input[u:v - 1].copy()
if v == 0:
decoder_output = encoder_input.copy()
encoder_input[:] = vocabulary.mask_index
else:
decoder_output = encoder_input[u:v].copy()
encoder_input[np.arange(start=u, stop=v)] = vocabulary.mask_index
if u != v and u > 0:
padding = np.array([vocabulary.padding_index] * u, dtype=np.int32)
decoder_input = np.concatenate((padding, decoder_input))
decoder_output = np.concatenate((padding, decoder_output))
assert decoder_input.shape[0] == decoder_output.shape[0], "seq len must equal."
return {
"sentence_length": seq_len,
"tgt_sen_length": decoder_output.shape[0],
"encoder_input": encoder_input, # end with </eos>
"decoder_input": decoder_input,
"decoder_output": decoder_output # end with </eos>
}
def _get_masked_interval(self, length, fix_length=None,
threshold_to_mask_all=None):
"""
Generate a sequence length according to length and mask_ratio.
Args:
length (int): Sequence length.
Returns:
Tuple[int, int], [start position, end position].
"""
# Can not larger than sequence length.
# Mask_length belongs to [0, length].
if fix_length is not None:
interval_length = min(length, fix_length)
else:
interval_length = min(length, round(self.mask_ratio * length))
_magic = np.random.random()
if threshold_to_mask_all is not None and _magic <= threshold_to_mask_all:
return 0, length
# If not sequence to be masked, then return 0, 0.
if interval_length == 0:
return 0, 0
# Otherwise, return start position and interval length.
start_pos = np.random.randint(low=0, high=length - interval_length + 1)
return start_pos, start_pos + interval_length
|
[
"[email protected]"
] | |
5c3dda335336b3b644e37fe7f8f4f46f4fd0ee86
|
60ce73bf2f86940438e5b7fecaaccad086888dc5
|
/working_scrapers/Illinois_dekalb.py
|
d04843c1e230207cd3080ec2535d4860593519dd
|
[] |
no_license
|
matthewgomies/jailcrawl
|
22baf5f0e6dc66fec1b1b362c26c8cd2469dcb0d
|
9a9ca7e1328ae549860ebeea9b149a785f152f39
|
refs/heads/master
| 2023-02-16T06:39:42.107493 | 2021-01-15T16:37:57 | 2021-01-15T16:37:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,161 |
py
|
#!/usr/bin/python
'''
This is a template script
MG
'''
from urllib.request import urlopen, Request
import pandas as pd
import os
import time
import numpy as np
from datetime import datetime
import datetime as dt
import sys
from io import StringIO
from joblib import Parallel, delayed
import requests
from jailscrape.common import save_to_s3, get_browser, get_logger, record_error, save_pages_array
from jailscrape import crawlers
# jailscrape.common is a file that is part of the project which keeps
# most common boilerplate code out of this file
from selenium.webdriver.common.keys import Keys
import watchtower
from bs4 import BeautifulSoup
import re
import math
# NOTE: These are imports. They ideally don't change very often.
# It's OK to have a large, maximal set here and to bulk-edit files to add to these.
# MG - Extra imports
import selenium as sm
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
ROW_INDEX = 171 # Change this for each scraper. This references the row
# of the main jailcrawl spreadsheet. This index will be used to look up
# the URL as well as state/county info
THIS_STATE = 'illinois' # Change the current state/county information.
THIS_COUNTY = 'dekalb'
def main(roster_row):
try:
logger = get_logger(roster_row) # Get a standard logger
# Here are standard variable values/how to initialize them.
# These aren't initialized here since in the save_single_page
# case, they can be done in the called function
browser = get_browser() # Get a standard browser
urlAddress = roster_row['Working Link'] # Set the main URL from the spreadsheet
page_index = 0 # Set an initial value of "page_index", which we will use to separate output pages
logger.info('Set working link to _%s_', urlAddress) # Log the chosen URL
####################################
# Begin core specific scraping code
if roster_row['State'].lower() != THIS_STATE or roster_row['County'].lower() != THIS_COUNTY:
raise Exception("Expected county definition info from _%s, %s_, but found info: _%s_" % (THIS_COUNTY, THIS_STATE, roster_row))
#Given the urlAddress passed to the function we will navigate to the page
browser.get(urlAddress)
time.sleep(np.random.uniform(7,10,1))
#Extract the HTML#
store_source = browser.page_source
## Code to save the first page and log appropriately
save_to_s3(store_source, page_index, roster_row)
logger.info('Saved page _%s_', page_index)
#Finding the last page
soup = BeautifulSoup(store_source, 'lxml')
page=0
for link in soup.findAll("div", {"class":"loca-search-head text-center"}):
page=str(link.text)
page=re.sub(' Results for "_"', "", page)
page=int(page)/10
page=math.ceil(page)
#Crawling through all the pages
string = str(1)
for i in range(2,page+1):
if i>30 :
print("Exceeds 300 inmates")
elif i==2:
elem = browser.find_element_by_xpath('/html/body/div/div/div/div[2]/div[3]/div[12]/ul/li[3]/a')
elem.click()
time.sleep(np.random.uniform(3,5,1))
store_source = browser.page_source
string=str(i)
## Code to save the page and log appropriately
page_index=int(string)-1
save_to_s3(store_source, page_index, roster_row)
logger.info('Saved page _%s_', page_index)
elif i==3:
elem = browser.find_element_by_xpath('/html/body/div/div/div/div[2]/div[3]/div[12]/ul/li[4]/a')
elem.click()
time.sleep(np.random.uniform(3,5,1))
store_source = browser.page_source
string=str(i)
## Code to save the page and log appropriately
page_index=int(string)-1
save_to_s3(store_source, page_index, roster_row)
logger.info('Saved page _%s_', page_index)
elif i==4:
elem = browser.find_element_by_xpath('/html/body/div/div/div/div[2]/div[3]/div[12]/ul/li[5]/a')
elem.click()
time.sleep(np.random.uniform(3,5,1))
store_source = browser.page_source
string=str(i)
## Code to save the page and log appropriately
page_index=int(string)-1
save_to_s3(store_source, page_index, roster_row)
logger.info('Saved page _%s_', page_index)
elif i>=5:
elem = browser.find_element_by_xpath('/html/body/div/div/div/div[2]/div[3]/div[12]/ul/li[6]/a')
elem.click()
time.sleep(np.random.uniform(3,5,1))
store_source = browser.page_source
string=str(i)
## Code to save the page and log appropriately
page_index=int(string)-1
save_to_s3(store_source, page_index, roster_row)
logger.info('Saved page _%s_', page_index)
# End core specific scraping code
####################################
#Close the browser
logger.info('complete!')
except Exception as errorMessage:
try:
browser.close()
record_error(message=str(errorMessage), roster_row=roster_row, browser=browser)
except:
record_error(message=str(errorMessage), roster_row=roster_row)
# Record error in S3 for a general error
logger.error('Error: %s', errorMessage)
# Log error
sys.exit(1)
if __name__ == "__main__":
#This will load in the current jail roster list
#Select the index of the roster this script is for:
#Write the name of the county and state
roster = pd.read_csv('/opt/jail_roster_final_rmDuplicates.csv',encoding = "utf-8")
main(roster[roster['index'] == ROW_INDEX].iloc[0])
|
[
"[email protected]"
] | |
e3421447a8225cc4e8464a1815d43de78d1715f1
|
30a1b285ff4aab39eebe342c5dbca255a69b454c
|
/full-problems/maxDiff.py
|
347a657be99ca517cd6ae0e9e6234e8672f61c47
|
[
"Apache-2.0"
] |
permissive
|
vikas-t/practice-problems
|
cd5852ea112421a2a39db31ae9092c6a148b2af8
|
ea654d1cad5374c824c52da9d3815a9546eb43fa
|
refs/heads/master
| 2021-10-27T14:08:42.724019 | 2019-04-17T18:26:23 | 2019-04-17T18:26:23 | 170,156,225 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 442 |
py
|
#!/usr/bin/python3
# https://practice.geeksforgeeks.org/problems/maximum-difference/0
def sol(arr, n):
d = -1
min_i = 0
min_till_here = 0
for i in range(1, n):
if arr[i] < arr[min_till_here]:
min_till_here = i
if min_till_here != min_i and min_till_here < i:
min_i = min_till_here
d = max(d, arr[i]-arr[min_i])
return d
arr = [5, 15, 3, 4, 5, 14]
print(sol(arr, len(arr)))
|
[
"[email protected]"
] | |
83f702f40210def83db43b117c01fb32c0afec26
|
f0f4a0f24b3a7cc8bf0366cf329923e9bd5b00c7
|
/activity/activity_DepositDigestIngestAssets.py
|
515d8398361d40ebc252d28d7bed3993d5a6e601
|
[
"MIT"
] |
permissive
|
elifesciences/elife-bot
|
45c79993d13bacb37f59ba57462179dd7c6f1e2e
|
2324e26943f805a0602ea3251ff0f6a5db27f1a0
|
refs/heads/develop
| 2023-08-17T15:25:42.170870 | 2023-08-14T16:47:02 | 2023-08-14T16:47:02 | 7,503,542 | 21 | 10 |
MIT
| 2023-09-07T19:50:30 | 2013-01-08T15:09:54 |
Python
|
UTF-8
|
Python
| false | false | 4,579 |
py
|
import os
import json
from S3utility.s3_notification_info import parse_activity_data
from provider.storage_provider import storage_context
from provider import digest_provider, download_helper
import provider.utils as utils
from activity.objects import Activity
"""
DepositDigestIngestAssets.py activity
"""
class activity_DepositDigestIngestAssets(Activity):
def __init__(self, settings, logger, client=None, token=None, activity_task=None):
super(activity_DepositDigestIngestAssets, self).__init__(
settings, logger, client, token, activity_task
)
self.name = "DepositDigestIngestAssets"
self.pretty_name = "Deposit Digest Ingest Assets"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = "Deposit Assets for a Digest (Pre-Ingest)"
# Track some values
self.input_file = None
self.digest = None
self.dest_resource = None
# Local directory settings
self.directories = {
"TEMP_DIR": os.path.join(self.get_tmp_dir(), "tmp_dir"),
"INPUT_DIR": os.path.join(self.get_tmp_dir(), "input_dir"),
}
# Track the success of some steps
self.build_status = None
def do_activity(self, data=None):
"do the work"
if self.logger:
self.logger.info("data: %s" % json.dumps(data, sort_keys=True, indent=4))
# Create output directories
self.make_activity_directories()
# parse the data with the digest_provider
real_filename, bucket_name, bucket_folder = parse_activity_data(data)
# Download from S3
self.input_file = download_helper.download_file_from_s3(
self.settings,
real_filename,
bucket_name,
bucket_folder,
self.directories.get("INPUT_DIR"),
)
# Parse input and build digest
digest_config = digest_provider.digest_config(
self.settings.digest_config_section, self.settings.digest_config_file
)
self.build_status, self.digest = digest_provider.build_digest(
self.input_file,
self.directories.get("TEMP_DIR"),
self.logger,
digest_config,
)
if not self.build_status:
self.logger.info(
"Failed to build the Digest in Deposit Digest Ingest Assets for %s",
real_filename,
)
return self.ACTIVITY_PERMANENT_FAILURE
# check if there is an image and if not return True
if not digest_provider.has_image(self.digest):
self.logger.info(
"Digest for file %s has no images to deposit", real_filename
)
return self.ACTIVITY_SUCCESS
# bucket name
cdn_bucket_name = (
self.settings.publishing_buckets_prefix + self.settings.digest_cdn_bucket
)
# deposit the image file to S3
self.deposit_digest_image(self.digest, cdn_bucket_name)
return self.ACTIVITY_SUCCESS
def image_dest_resource(self, digest, cdn_bucket_name):
"concatenate the S3 bucket object path we copy the file to"
msid = utils.msid_from_doi(digest.doi)
article_id = utils.pad_msid(msid)
# file name from the digest image file
file_name = digest.image.file.split(os.sep)[-1]
new_file_name = digest_provider.new_file_name(file_name, msid)
storage_provider = self.settings.storage_provider + "://"
dest_resource = (
storage_provider + cdn_bucket_name + "/" + article_id + "/" + new_file_name
)
return dest_resource
def deposit_digest_image(self, digest, cdn_bucket_name):
"deposit the image file from the digest to the bucket"
self.dest_resource = self.image_dest_resource(digest, cdn_bucket_name)
storage = storage_context(self.settings)
self.logger.info("Depositing digest image to S3 key %s", self.dest_resource)
# set the bucket object resource from the local file
metadata = {"ContentType": utils.content_type_from_file_name(digest.image.file)}
storage.set_resource_from_filename(
self.dest_resource, digest.image.file, metadata
)
self.logger.info("Deposited digest image %s to S3", digest.image.file)
return True
|
[
"[email protected]"
] | |
bd6a7d150cf3eb9fac42f5a543f377ad8356ad67
|
27691e5ef8e49fb29189b01dd76a1dc3720e7ae8
|
/AC/ABC-TDD/180/c.py
|
76f7581f37b0ab7ec2b1fda1f0887f7b32dc1463
|
[] |
no_license
|
oshou/procon
|
61e5f5bc819e0fe5ab29749fc2f894fe6f3b1d07
|
3d000c64b5917c65b51ed7da5b90cb79892d5909
|
refs/heads/master
| 2023-05-10T23:56:50.861468 | 2021-09-23T06:07:29 | 2021-09-23T06:07:29 | 116,886,484 | 1 | 0 | null | 2023-05-05T02:28:41 | 2018-01-10T00:21:38 |
Go
|
UTF-8
|
Python
| false | false | 257 |
py
|
n = int(input())
ans = []
for i in range(1, n+1):
if i*i > n:
break
if n % i == 0:
ans.append(i)
tmp = n//i
if i != tmp:
ans.append(n//i)
ans = sorted(ans)
counts = len(ans)
for num in ans:
print(num)
|
[
"[email protected]"
] | |
315ab7aa2ef9d0579f0d045b6dfb17919ba8530a
|
c741f04141784a2571d2d27d95e0d994e4584ab1
|
/learning/py3/0-1/21-模块-包-4.py
|
d70f489fbe8852df7919744087de49fb955d0899
|
[] |
no_license
|
haodonghui/python
|
bbdece136620bc6f787b4942d6e1760ed808afd4
|
365062ba54297c81093b7f378742e76d438658b7
|
refs/heads/master
| 2022-02-03T23:52:37.288503 | 2022-01-27T05:23:25 | 2022-01-27T05:23:25 | 191,729,797 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,361 |
py
|
from package1 import *
p.prt(4, '从一个包中导入*')
'''
4、
设想一下,如果我们使用 from sound.effects import *会发生什么
Python 会进入文件系统,找到这个包里面所有的子模块,一个一个的把它们都导入进来。
Windows是一个大小写不区分的系统。
在这类平台上,没有人敢担保一个叫做 ECHO.py 的文件导入为模块 echo 还是 Echo 甚至 ECHO。
为了解决这个问题,只能烦劳包作者提供一个精确的包的索引了。
导入语句遵循如下规则:
如果包定义文件 __init__.py 存在一个叫做 __all__ 的列表变量,
那么在使用 from package import * 的时候就把这个列表中的所有名字作为包内容导入。
作为包的作者,可别忘了在更新包之后保证 __all__ 也更新了啊。你说我就不这么做,我就不使用导入*这种用法,好吧,没问题,谁让你是老板呢
'''
def package_example():
p.prt(4,
'learning/py3/0-1/package1/__init__.py存在 __all__ = [\'p\'],顶部使用from package1 import * ,只导入了 package1包下的p模块')
p2.prt(4,
'learning/py3/0-1/package1/__init__.py存在 __all__ = [\'p\',\'p2\'],顶部使用from package1 import * ,只导入了 package1包下的p模块')
package_example()
|
[
"[email protected]"
] | |
a7012e26515d2e214c34f1a948756e9af8cff489
|
5837fd85b18b56d23612de1e36d79b5a06827542
|
/sniterator.py
|
d5e3435d3bd0924383507459b0e3f279464d9c66
|
[
"MIT"
] |
permissive
|
ChristopherWilks/snaptron
|
75e33c4f25a65f3093555a7bf235ab69865f7086
|
75903c30d54708b19d91772142013687c74d88d8
|
refs/heads/master
| 2023-02-19T01:38:57.343293 | 2023-02-11T21:47:52 | 2023-02-11T21:47:52 | 45,953,724 | 26 | 7 |
NOASSERTION
| 2022-06-17T21:10:44 | 2015-11-11T02:03:37 |
Python
|
UTF-8
|
Python
| false | false | 3,341 |
py
|
#!/usr/bin/env python2.7
# This file is part of Snaptron.
#
# Snaptron is free software: you can redistribute it and/or modify
# it under the terms of the
#
# The MIT License
#
# Copyright (c) 2016- by Christopher Wilks <[email protected]>
# and Ben Langmead <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import subprocess
import shlex
class SnaptronServerIterator():
def __init__(self,cmds,stdout=subprocess.PIPE,shell=False,bufsize=-1,direct_output=False):
self.cmds = cmds
self.stdout = stdout
#performance trick, pipe output from subprocess directly to this process's output
#to avoid the cost of python line processing
if direct_output:
self.stdout = sys.stdout
self.shell = shell
self.bufsize = bufsize
#used to run them in parallel, but that's a bad idea because:
#1) results will come back in random order
#2) we need to control the number of potential processes spun up by any given query (so for now we'll keep this at 1)
if direct_output:
for cmd in self.cmds:
extern_proc = subprocess.Popen(cmd, shell=self.shell, bufsize=self.bufsize)
extern_proc.wait()
else:
#TODO: stop this running in parallel for the above cited reasons, but will need to handle
#the sequential nature in the next() method
self.extern_procs = [subprocess.Popen(cmd, stdout=self.stdout, shell=self.shell, bufsize=self.bufsize) for cmd in self.cmds]
self.idx = 0
def __iter__(self):
return self
#this is only used if the self.stdout isn't directed to the current process's sys.stdout
#i.e. direct_output is False
def next(self):
line = self.extern_procs[self.idx].stdout.readline()
if line == '':
exitc=self.extern_procs[self.idx].wait()
if exitc != 0:
raise RuntimeError("%s returned non-0 exit code\n" % (self.cmds[self.idx]))
self.idx+=1
if self.idx >= len(self.extern_procs):
raise StopIteration
line = self.extern_procs[self.idx].stdout.readline()
return line
|
[
"[email protected]"
] | |
dc94995061a88c795f93deb5719820a9c7d233f6
|
9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612
|
/exercises/1901020002/1001S02E05_string.py
|
2ef785d1bf8d2423628b42ff569e92038180dac4
|
[] |
no_license
|
shen-huang/selfteaching-python-camp
|
e8410bfc06eca24ee2866c5d890fd063e9d4be89
|
459f90c9f09bd3a3df9e776fc64dfd64ac65f976
|
refs/heads/master
| 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null |
UTF-8
|
Python
| false | false | 1,395 |
py
|
sample_text = '''
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
'''
#1.2 better替换worse
test = sample_text.replace('better','worse')
print('better全部替换成worse',test)
#1.3 剔除包含ea的单词
words = test.split()
filtered = []
for word in words:
if word.find('ea') < 0:
filtered.append(word)
print('剔除包含ea的单词',filtered)
#1.4 大小写翻转
swapcased = [i.swapcase() for i in filtered]
print('大小写翻转',swapcased)
#1.5 升序排列
print('升序排列',sorted(swapcased))
print('降序',sorted(swapcased,reverse=True))
|
[
"[email protected]"
] | |
945755e73c4c8fe1438bc352cd5a0861918ad25a
|
c14d9512c62fc479ba05ea5ed256828e8e1038c5
|
/stripe/models/account.py
|
eaecab1ba97d6ff6408961f48b09a5193aa3c01d
|
[
"MIT"
] |
permissive
|
jayvdb/saaskit-stripe
|
c44e6e387d4dd27f564f6959c134ec6aaff8f3c5
|
bd292182b0bed47dff86a627231bdabafb99bf71
|
refs/heads/master
| 2021-09-07T17:25:14.710472 | 2018-01-24T15:17:41 | 2018-02-26T21:10:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,082 |
py
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import json
from ..utils import UnixDateTimeField
from .charge import CURRENCY_CHOICES
ACCOUNT_TYPES = (
('custom', _('Custom')),
('standard', _('Standard')),
)
class Account(models.Model):
"""Stripe Account object.
This is an object representing your Stripe account. You can retrieve it to
see properties on the account like its current e-mail address or if the
account is enabled yet to make live charges.
Some properties, marked as 'managed accounts only', are only available to
platforms who want to create and manage Stripe accounts.
"""
id = models.CharField(max_length=255, primary_key=True)
charges_enabled = models.BooleanField(
help_text=_(
'Whether or not the account can create live charges',
),
)
country = models.CharField( # todo: add CHOICES
max_length=255,
help_text=_('The country of the account')
)
currencies_supports = json.JSONField(
help_text=_(
'The currencies this account can submit when creating charges',
),
)
default_currency = models.CharField(
max_length=255, help_text=_(
'The currency this account has chosen to use as the default'),
choices=CURRENCY_CHOICES)
details_submitted = models.BooleanField(
help_text=_(
'Whether or not account details have been submitted yet. '
'Standalone accounts cannot receive transfers before this is true.',
),
)
transfers_enabled = models.BooleanField(
help_text=_(
'Whether or not Stripe will send automatic transfers for this '
'account. This is only false when Stripe is waiting for '
'additional information from the account holder.',
),
default=True,
)
display_name = models.CharField(
max_length=255,
help_text=_(
'The display name for this account. This is used on the Stripe '
'dashboard to help you differentiate between accounts.',
),
)
email = models.EmailField(help_text=_('The primary user’s email address'))
statement_descriptor = models.TextField(
help_text=_(
'The text that will appear on credit card statements',
),
)
timezone = models.CharField(
max_length=255,
help_text=_(
'The timezone used in the Stripe dashboard for this account. A '
'list of possible timezone values is maintained at the IANA '
'Timezone Database.',
),
)
business_name = models.CharField(
max_length=255,
help_text=_(
'The publicly visible name of the business',
),
)
business_logo = models.CharField(max_length=255, null=True)
business_url = models.URLField(
help_text=_('The publicly visible website of the business'),
null=True,
)
created = UnixDateTimeField()
metadata = json.JSONField(
help_text=_(
'A set of key/value pairs that you can attach to a charge object. '
'it can be useful for storing additional information about the '
'charge in a structured format.',
),
)
support_email = models.EmailField(null=True)
support_phone = models.CharField(
max_length=255,
help_text=_(
'The publicly visible support phone number for the business',
),
null=True,
)
payout_schedule = json.JSONField(null=True)
payout_statement_descriptor = models.CharField(max_length=255, null=True)
payouts_enabled = models.BooleanField()
bank_accounts = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'Bank accounts currently attached to this account.',
),
)
debit_negative_balances = models.BooleanField(
help_text=_(
'(Managed Accounts Only) '
'Whether or not Stripe will attempt to reclaim negative account '
'balances from this account’s bank account.',
),
)
decline_charge_on = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'Account-level settings to automatically decline certain types of '
'charges regardless of the bank’s decision.',
),
)
legal_entity = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'Information regarding the owner of this account, including '
'verification status.',
),
)
product_description = models.TextField(
help_text=_(
'(Managed Accounts Only) '
'An internal-only description of the product or service provided. '
'This is used by Stripe in the event the account gets flagged for '
'potential fraud.',
),
null=True,
)
tos_acceptance = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'Who accepted the Stripe terms of service, and when they accepted '
'it.',
),
)
transfer_schedule = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'When payments collected will be automatically paid out to the '
'account holder’s bank account',
),
)
type = models.CharField(max_length=255, choices=ACCOUNT_TYPES)
verification = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'That state of the account’s information requests, including what '
'information is needed and by when it must be provided.',
),
)
@classmethod
def from_stripe_object(cls, stripe_object):
_dict = stripe_object.to_dict()
_dict.pop('object')
_dict.pop('external_accounts') # todo: handle this
a = cls(**_dict)
a.save()
return a
|
[
"[email protected]"
] | |
034d42940af343c1638afe358b2506823e840bf4
|
1be4f95b722397f255e58b21a182171eb24b6fe5
|
/datalad_neuroimaging/extractors/tests/test_dicom.py
|
338f2fa4c994f2dd11ced3bf44f4f0f768516770
|
[
"MIT"
] |
permissive
|
yarikoptic/datalad-neuroimaging
|
5f9a7b0993ac56bbeaba95427541b2c75ed711ea
|
7ee146d6c7c864aafc8b540d0ccd9b3a1b5b7210
|
refs/heads/master
| 2022-11-11T02:57:46.228562 | 2018-04-10T14:05:21 | 2018-04-10T14:05:21 | 128,942,708 | 0 | 0 | null | 2018-04-10T14:04:46 | 2018-04-10T14:04:46 | null |
UTF-8
|
Python
| false | false | 3,032 |
py
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test audio extractor"""
from datalad.tests.utils import SkipTest
try:
from datalad_neuroimaging.extractors.dicom import MetadataExtractor as DicomExtractor
except ImportError:
raise SkipTest
from shutil import copy
from os.path import dirname
from os.path import join as opj
from datalad.api import Dataset
from datalad.tests.utils import with_tempfile
from datalad.tests.utils import ok_clean_git
from datalad.tests.utils import assert_status
from datalad.tests.utils import assert_result_count
from datalad.tests.utils import eq_
from datalad.tests.utils import assert_dict_equal
from datalad.tests.utils import assert_in
from datalad.tests.utils import assert_not_in
@with_tempfile(mkdir=True)
def test_dicom(path):
ds = Dataset(path).create()
ds.config.add('datalad.metadata.nativetype', 'dicom', where='dataset')
copy(
opj(dirname(dirname(dirname(__file__))), 'tests', 'data', 'dicom.dcm'),
path)
ds.add('.')
ok_clean_git(ds.path)
res = ds.aggregate_metadata()
assert_status('ok', res)
# query for the file metadata
res = ds.metadata('dicom.dcm')
assert_result_count(res, 1)
# from this extractor
meta = res[0]['metadata']['dicom']
assert_in('@context', meta)
# no point in testing ALL keys, but we got plenty
assert(len(meta.keys()) > 70)
eq_(meta['SeriesDate'], '20070205')
# now ask for the dataset metadata, which should have both the unique props
# and a list of imageseries (one in this case, but a list)
res = ds.metadata(reporton='datasets')
assert_result_count(res, 1)
dsmeta = res[0]['metadata']['dicom']
# same context
assert_dict_equal(meta['@context'], dsmeta['@context'])
meta.pop('@context')
eq_(dsmeta['Series'], [meta])
# for this artificial case pretty much the same info also comes out as
# unique props, but wrapped in lists
ucp = res[0]['metadata']["datalad_unique_content_properties"]['dicom']
assert_dict_equal(
{k: [v]
for k, v in dsmeta['Series'][0].items()
if k not in DicomExtractor._unique_exclude and k in ucp},
{k: v
for k, v in ucp.items()
if k not in DicomExtractor._unique_exclude})
# buuuut, if we switch of file-based metadata storage
ds.config.add('datalad.metadata.aggregate-content-dicom', 'false', where='dataset')
ds.aggregate_metadata()
res = ds.metadata(reporton='datasets')
# the auto-uniquified bits are gone but the Series description stays
assert_not_in("datalad_unique_content_properties", res[0]['metadata'])
eq_(dsmeta['Series'], [meta])
|
[
"[email protected]"
] | |
a2eb7128900a56f43e0ece19dedc06e35f192da8
|
c2d3b7855b055cb8b0563a3812fb0dbfc670bc09
|
/lessons_src/03_CFL_Condition.py
|
7ecf7fe0fd959b7819bcdc7829ed929d41253a87
|
[
"CC-BY-3.0",
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
tnakaicode/python-cfd
|
85fab343c4c99f32777e45163b89f4d952d83e96
|
174176bdcb1c31e021fefd8fd54e2b3dd898dc62
|
refs/heads/master
| 2023-08-08T16:53:34.455088 | 2020-05-07T17:14:54 | 2020-05-07T17:14:54 | 261,902,096 | 0 | 0 |
NOASSERTION
| 2023-07-06T21:27:39 | 2020-05-06T23:30:09 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 5,978 |
py
|
#!/usr/bin/env python
# coding: utf-8
# Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved BSD-3 license. (c) Lorena A. Barba, Gilbert F. Forsyth 2017. Thanks to NSF for support via CAREER award #1149784.
# [@LorenaABarba](https://twitter.com/LorenaABarba)
# 12 steps to Navier–Stokes
# =====
# ***
# Did you experiment in Steps [1](./01_Step_1.ipynb) and [2](./02_Step_2.ipynb) using different parameter choices? If you did, you probably ran into some unexpected behavior. Did your solution ever blow up? (In my experience, CFD students *love* to make things blow up.)
#
# You are probably wondering why changing the discretization parameters affects your solution in such a drastic way. This notebook complements our [interactive CFD lessons](https://github.com/barbagroup/CFDPython) by discussing the CFL condition. And learn more by watching Prof. Barba's YouTube lectures (links below).
# Convergence and the CFL Condition
# ----
# ***
# For the first few steps, we've been using the same general initial and boundary conditions. With the parameters we initially suggested, the grid has 41 points and the timestep is 0.25 seconds. Now, we're going to experiment with increasing the size of our grid. The code below is identical to the code we used in [Step 1](./01_Step_1.ipynb), but here it has been bundled up in a function so that we can easily examine what happens as we adjust just one variable: **the grid size**.
# In[1]:
import numpy # numpy is a library for array operations akin to MATLAB
from matplotlib import pyplot # matplotlib is 2D plotting library
# get_ipython().run_line_magic('matplotlib', 'inline')
def linearconv(nx):
dx = 2 / (nx - 1)
nt = 20 # nt is the number of timesteps we want to calculate
dt = .025 # dt is the amount of time each timestep covers (delta t)
c = 1
# defining a numpy array which is nx elements long with every value equal to 1.
u = numpy.ones(nx)
# setting u = 2 between 0.5 and 1 as per our I.C.s
u[int(.5 / dx):int(1 / dx + 1)] = 2
# initializing our placeholder array, un, to hold the values we calculate for the n+1 timestep
un = numpy.ones(nx)
for n in range(nt): # iterate through time
un = u.copy() # copy the existing values of u into un
for i in range(1, nx):
u[i] = un[i] - c * dt / dx * (un[i] - un[i - 1])
pyplot.plot(numpy.linspace(0, 2, nx), u)
pyplot.show()
# Now let's examine the results of our linear convection problem with an increasingly fine mesh.
# In[2]:
linearconv(41) # convection using 41 grid points
# This is the same result as our Step 1 calculation, reproduced here for reference.
# In[3]:
linearconv(61)
# Here, there is still numerical diffusion present, but it is less severe.
# In[4]:
linearconv(71)
# Here the same pattern is present -- the wave is more square than in the previous runs.
# In[5]:
linearconv(85)
# This doesn't look anything like our original hat function.
# ### What happened?
# To answer that question, we have to think a little bit about what we're actually implementing in code.
#
# In each iteration of our time loop, we use the existing data about our wave to estimate the speed of the wave in the subsequent time step. Initially, the increase in the number of grid points returned more accurate answers. There was less numerical diffusion and the square wave looked much more like a square wave than it did in our first example.
#
# Each iteration of our time loop covers a time-step of length $\Delta t$, which we have been defining as 0.025
#
# During this iteration, we evaluate the speed of the wave at each of the $x$ points we've created. In the last plot, something has clearly gone wrong.
#
# What has happened is that over the time period $\Delta t$, the wave is travelling a distance which is greater than `dx`. The length `dx` of each grid box is related to the number of total points `nx`, so stability can be enforced if the $\Delta t$ step size is calculated with respect to the size of `dx`.
#
# $$\sigma = \frac{u \Delta t}{\Delta x} \leq \sigma_{\max}$$
#
# where $u$ is the speed of the wave; $\sigma$ is called the **Courant number** and the value of $\sigma_{\max}$ that will ensure stability depends on the discretization used.
#
# In a new version of our code, we'll use the CFL number to calculate the appropriate time-step `dt` depending on the size of `dx`.
#
#
# In[6]:
import numpy
from matplotlib import pyplot
def linearconv(nx):
dx = 2 / (nx - 1)
nt = 20 # nt is the number of timesteps we want to calculate
c = 1
sigma = .5
dt = sigma * dx
u = numpy.ones(nx)
u[int(.5 / dx):int(1 / dx + 1)] = 2
un = numpy.ones(nx)
for n in range(nt): # iterate through time
un = u.copy() # copy the existing values of u into un
for i in range(1, nx):
u[i] = un[i] - c * dt / dx * (un[i] - un[i - 1])
pyplot.plot(numpy.linspace(0, 2, nx), u)
# In[7]:
linearconv(41)
# In[8]:
linearconv(61)
# In[9]:
linearconv(81)
# In[10]:
linearconv(101)
# In[11]:
linearconv(121)
# Notice that as the number of points `nx` increases, the wave convects a shorter and shorter distance. The number of time iterations we have advanced the solution at is held constant at `nt = 20`, but depending on the value of `nx` and the corresponding values of `dx` and `dt`, a shorter time window is being examined overall.
# Learn More
# -----
# ***
# It's possible to do rigurous analysis of the stability of numerical schemes, in some cases. Watch Prof. Barba's presentation of this topic in **Video Lecture 9** on You Tube.
# In[12]:
from IPython.display import YouTubeVideo
YouTubeVideo('Yw1YPBupZxU')
# In[13]:
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
|
[
"[email protected]"
] | |
16d01ee4642643a3fa9a06a6f2fb3e7d14bc6433
|
8eab8ab725c2132bb8d090cdb2d23a5f71945249
|
/virt/Lib/site-packages/jupyter_client/jsonutil.py
|
9903f70ecee4d8e753d94367e32ed64f5e0d57aa
|
[
"MIT"
] |
permissive
|
JoaoSevergnini/metalpy
|
6c88a413a82bc25edd9308b8490a76fae8dd76ca
|
c2d0098a309b6ce8c756ff840bfb53fb291747b6
|
refs/heads/main
| 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 |
MIT
| 2022-11-03T20:07:50 | 2022-03-27T22:21:01 |
Python
|
UTF-8
|
Python
| false | false | 5,944 |
py
|
"""Utilities to manipulate JSON objects."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import math
import numbers
import re
import types
import warnings
from binascii import b2a_base64
from collections.abc import Iterable
from datetime import datetime
from typing import Optional
from typing import Union
from dateutil.parser import parse as _dateutil_parse # type: ignore
from dateutil.tz import tzlocal # type: ignore
next_attr_name = "__next__" # Not sure what downstream library uses this, but left it to be safe
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
# timestamp formats
ISO8601 = "%Y-%m-%dT%H:%M:%S.%f"
ISO8601_PAT = re.compile(
r"^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})(\.\d{1,6})?(Z|([\+\-]\d{2}:?\d{2}))?$"
)
# holy crap, strptime is not threadsafe.
# Calling it once at import seems to help.
datetime.strptime("1", "%d")
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
def _ensure_tzinfo(dt: datetime) -> datetime:
"""Ensure a datetime object has tzinfo
If no tzinfo is present, add tzlocal
"""
if not dt.tzinfo:
# No more naïve datetime objects!
warnings.warn(
"Interpreting naive datetime as local %s. Please add timezone info to timestamps." % dt,
DeprecationWarning,
stacklevel=4,
)
dt = dt.replace(tzinfo=tzlocal())
return dt
def parse_date(s: Optional[str]) -> Optional[Union[str, datetime]]:
"""parse an ISO8601 date string
If it is None or not a valid ISO8601 timestamp,
it will be returned unmodified.
Otherwise, it will return a datetime object.
"""
if s is None:
return s
m = ISO8601_PAT.match(s)
if m:
dt = _dateutil_parse(s)
return _ensure_tzinfo(dt)
return s
def extract_dates(obj):
"""extract ISO8601 dates from unpacked JSON"""
if isinstance(obj, dict):
new_obj = {} # don't clobber
for k, v in obj.items():
new_obj[k] = extract_dates(v)
obj = new_obj
elif isinstance(obj, (list, tuple)):
obj = [extract_dates(o) for o in obj]
elif isinstance(obj, str):
obj = parse_date(obj)
return obj
def squash_dates(obj):
"""squash datetime objects into ISO8601 strings"""
if isinstance(obj, dict):
obj = dict(obj) # don't clobber
for k, v in obj.items():
obj[k] = squash_dates(v)
elif isinstance(obj, (list, tuple)):
obj = [squash_dates(o) for o in obj]
elif isinstance(obj, datetime):
obj = obj.isoformat()
return obj
def date_default(obj):
"""DEPRECATED: Use jupyter_client.jsonutil.json_default"""
warnings.warn(
"date_default is deprecated since jupyter_client 7.0.0."
" Use jupyter_client.jsonutil.json_default.",
stacklevel=2,
)
return json_default(obj)
def json_default(obj):
"""default function for packing objects in JSON."""
if isinstance(obj, datetime):
obj = _ensure_tzinfo(obj)
return obj.isoformat().replace('+00:00', 'Z')
if isinstance(obj, bytes):
return b2a_base64(obj).decode('ascii')
if isinstance(obj, Iterable):
return list(obj)
if isinstance(obj, numbers.Integral):
return int(obj)
if isinstance(obj, numbers.Real):
return float(obj)
raise TypeError("%r is not JSON serializable" % obj)
# Copy of the old ipykernel's json_clean
# This is temporary, it should be removed when we deprecate support for
# non-valid JSON messages
def json_clean(obj):
# types that are 'atomic' and ok in json as-is.
atomic_ok = (str, type(None))
# containers that we need to convert into lists
container_to_list = (tuple, set, types.GeneratorType)
# Since bools are a subtype of Integrals, which are a subtype of Reals,
# we have to check them in that order.
if isinstance(obj, bool):
return obj
if isinstance(obj, numbers.Integral):
# cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598)
return int(obj)
if isinstance(obj, numbers.Real):
# cast out-of-range floats to their reprs
if math.isnan(obj) or math.isinf(obj):
return repr(obj)
return float(obj)
if isinstance(obj, atomic_ok):
return obj
if isinstance(obj, bytes):
# unanmbiguous binary data is base64-encoded
# (this probably should have happened upstream)
return b2a_base64(obj).decode('ascii')
if isinstance(obj, container_to_list) or (
hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)
):
obj = list(obj)
if isinstance(obj, list):
return [json_clean(x) for x in obj]
if isinstance(obj, dict):
# First, validate that the dict won't lose data in conversion due to
# key collisions after stringification. This can happen with keys like
# True and 'true' or 1 and '1', which collide in JSON.
nkeys = len(obj)
nkeys_collapsed = len(set(map(str, obj)))
if nkeys != nkeys_collapsed:
raise ValueError(
'dict cannot be safely converted to JSON: '
'key collision would lead to dropped values'
)
# If all OK, proceed by making the new dict that will be json-safe
out = {}
for k, v in obj.items():
out[str(k)] = json_clean(v)
return out
if isinstance(obj, datetime):
return obj.strftime(ISO8601)
# we don't understand it, it's probably an unserializable object
raise ValueError("Can't clean for JSON: %r" % obj)
|
[
"[email protected]"
] | |
24c1026d70712dc58f96d6e0a9023fab0f1cdfd6
|
5c531de5e4759c904e608b4fc653b2b041f79a0e
|
/Snap_monte_carlo_simulation.py
|
7cb310205cf32290a10076f203756fb68c14d270
|
[] |
no_license
|
jianhui-ben/leetcode_python
|
133c7e6e5c7316d00607ba2e327239e002de28b2
|
fcc16124cc24a5993e27f5d97e78d8f290e68230
|
refs/heads/master
| 2022-06-05T22:32:18.034581 | 2022-05-17T02:27:11 | 2022-05-17T02:27:11 | 250,683,308 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 965 |
py
|
#N个 Turkers标数据,数据很模糊基本靠瞎猜,有M个选项可选。
#问这些人达到了majority共识的概率有多大?也就是有超过半数的人都选了某一选项的概率。
#要求先给出数学解析解,然后给出coding实现方法来求近似解。
#代码其实很简单,Monte Carlo simulation,跑个足够多的次数,用统计结果来近似概率
## p= (1/M)**(N//2)
print(12//2)
import random
random.randint(1, 2)
import collections
collections.Counter([1,1,1,2, 3,3,3,3]).most_common(1)[0][1]
def prob(M, N):
import random
import collections
major=0
for _ in range(100000):
choices= [None]* N
for i in range(N):
choices[i]= random.randint(1, M)
if collections.Counter(choices).most_common(1)[0][1]> int(N//2):
major+=1
return float(major)/100000.0*100.0
def verify(M, N):
return (1.0/float(M))**int(N//2)*100.0
verify(7, 3)
prob(7, 3)
|
[
"[email protected]"
] | |
588cb0f08c24dabc182e357f3b5efba012b7b98e
|
bcf42041a64fdefcaec54843900c3d8f833f2215
|
/emc/kb/browser/dataout.py
|
cf3d77f951b9bb4e11b4e53d61cd34ea9ffe24d5
|
[] |
no_license
|
adam139/emc.kb
|
487650837207e0f773c077310f001a524965ee4f
|
ff21383762dad96ac09d414e7d1e8104c51b91f9
|
refs/heads/master
| 2022-01-14T09:42:49.790659 | 2020-09-22T13:16:27 | 2020-09-22T13:16:27 | 49,318,509 | 0 | 3 | null | 2016-12-26T17:37:20 | 2016-01-09T09:34:12 |
Python
|
UTF-8
|
Python
| false | false | 7,602 |
py
|
#-*- coding: UTF-8 -*-
import csv
from cStringIO import StringIO
from zope import event
from zope.component import getMultiAdapter
from five import grok
from zope.interface import implements
from zope.interface import Interface
from Products.Five.browser import BrowserView
from Products.CMFCore.utils import getToolByName
from Products.statusmessages.interfaces import IStatusMessage
import datetime
from plone import api
from emc.policy.events import AddloginEvent,NormalUserloginEvent
from emc.policy import get_ip,fmt,list2str,getfullname_orid
from emc.kb import _
# todo code cp932
# need byte string
data_VALUES = [
u"主体".encode('utf-8'),
u"客体".encode('utf-8'),
u"时间".encode('utf-8'),
u"ip".encode('utf-8'),
u"级别".encode('utf-8'),
u"描述".encode('utf-8'),
u"结果".encode('utf-8')
]
userlog_header = [
u"用户".encode('utf-8'),
u"时间".encode('utf-8'),
u"ip".encode('utf-8'),
u"级别".encode('utf-8'),
u"描述".encode('utf-8'),
u"结果".encode('utf-8')
]
class AdminLogDataOut (grok.View):
"""AdminLog Data export as CSV files.
"""
grok.context(Interface)
grok.name('export_csv')
grok.require('zope2.View')
def searchview(self,viewname="admin_logs"):
searchview = getMultiAdapter((self.context, self.request),name=viewname)
return searchview
def render(self):
method = self.request.get('REQUEST_METHOD', 'GET')
# import pdb
# pdb.set_trace()
if (method != 'POST'):
return self.request.response.redirect(self.context.absolute_url())
if self.request.form.get('form.button.Cancel'):
return self.request.response.redirect(self.context.absolute_url())
searchview = self.searchview()
# datadic receive front ajax post data
datadic = self.request.form
start = int(datadic['start']) # batch search start position
size = int(datadic['size']) # batch search size
sortcolumn = datadic['sortcolumn']
sortdirection = datadic['sortdirection']
keyword = (datadic['searchabletext']).strip()
# origquery = searchview.getPathQuery()
origquery = {}
# default reverse,as is desc
origquery['sort_on'] = sortcolumn
# sql db sortt_order:asc,desc
origquery['sort_order'] = sortdirection
#模糊搜索
if keyword != "":
origquery['SearchableText'] = '%'+keyword+'%'
else:
origquery['SearchableText'] = ""
#origquery provide batch search
origquery['size'] = size
origquery['start'] = start
#totalquery search all
totalquery = origquery.copy()
totalquery['size'] = 0
# search all size = 0 return numbers of recorders
totalnum = searchview.search_multicondition(totalquery)
origquery.update({"size":totalnum})
resultDicLists = searchview.search_multicondition(origquery)
del origquery
del totalquery
if totalnum == 0: return
#fire a log event
user = api.user.get_current()
ip = get_ip(self.request)
if user is None:
return
des = "从用户日志表导出了%s条日志" % totalnum
loginEvent = NormalUserloginEvent(userid = getfullname_orid(user),
datetime = datetime.datetime.now().strftime(fmt),
ip = ip,
type = 0,
description = des,
result = 1)
if loginEvent.available():
if loginEvent.is_normal_user():
event.notify(loginEvent)
else:
des = "从管理员日志表导出了%s条日志" % totalnum
loginEvent = AddloginEvent(adminid = getfullname_orid(user),
userid = "",
datetime = datetime.datetime.now().strftime(fmt),
ip = ip,
type = 0,
description = des,
result = 1)
event.notify(loginEvent)
return self.exportData(resultDicLists)
def exportData(self,recorders):
"""Export Data within CSV file."""
datafile = self._createCSV(self._getDataInfos(recorders))
return self._createRequest(datafile.getvalue(), "admin_log_export.log")
def _getDataInfos(self,recorders):
"""Generator filled with the recorders."""
from emc.kb.utils import kind
from emc.kb.utils import level as log_level
from emc.kb.utils import result as log_result
for i in recorders:
i = list(i)
i[4] = kind[i[4]]
i[5] = log_level[i[5]]
i[7] = log_result[i[7]]
yield i
def _createCSV(self, lines):
"""Write header and lines within the CSV file."""
datafile = StringIO()
datafile.write(u'\ufeff'.encode('utf-8'))
writor = csv.writer(datafile)
writor.writerow(data_VALUES)
map(writor.writerow, lines)
return datafile
def _createRequest(self, data, filename):
"""Create the request to be returned.
Add the right header and the CSV file.
"""
self.request.response.addHeader('Content-Disposition', "attachment; filename=%s" % filename)
self.request.response.addHeader('Content-Type', "text/csv;charset=utf-8")
self.request.response.addHeader("Content-Transfer-Encoding", "8bit")
self.request.response.addHeader('Content-Length', "%d" % len(data))
self.request.response.addHeader('Pragma', "no-cache")
self.request.response.addHeader('Cache-Control', "must-revalidate, post-check=0, pre-check=0, public")
self.request.response.addHeader('Expires', "0")
return data
class UserLogDataOut (AdminLogDataOut):
"""UserLog Data export as CSV files.
"""
# grok.context(Interface)
grok.name('userlog_export_csv')
# grok.require('zope2.View')
def searchview(self,viewname="user_logs"):
searchview = getMultiAdapter((self.context, self.request),name=viewname)
return searchview
def _createCSV(self, lines):
"""Write header and lines within the CSV file."""
datafile = StringIO()
writor = csv.writer(datafile)
writor.writerow(userlog_header)
map(writor.writerow, lines)
return datafile
def exportData(self,recorders):
"""Export Data within CSV file."""
datafile = self._createCSV(self._getDataInfos(recorders))
return self._createRequest(datafile.getvalue(), "user_log_export.log")
def _getDataInfos(self,recorders):
"""Generator filled with the recorders."""
from emc.kb.utils import kind
from emc.kb.utils import level as log_level
from emc.kb.utils import result as log_result
for i in recorders:
i = list(i)
i[3] = kind[i[3]]
i[4] = log_level[i[4]]
i[6] = log_result[i[6]]
yield i
|
[
"[email protected]"
] | |
2112bbc0bb40eb05b9d150ae386c7817e5840775
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/unprocessing/estimator.py
|
def1f4464ffc7f3d2afab9ff75d80d3992b58c68
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 |
Apache-2.0
| 2020-06-23T01:55:11 | 2020-02-23T07:59:42 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 5,225 |
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unprocessing model function and train and eval specs for Estimator.
Unprocessing Images for Learned Raw Denoising
http://timothybrooks.com/tech/unprocessing
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from unprocessing import process
from tensorflow.contrib import layers as contrib_layers
def psnr(labels, predictions):
"""Computes average peak signal-to-noise ratio of `predictions`.
Here PSNR is defined with respect to the maximum value of 1. All image tensors
must be within the range [0, 1].
Args:
labels: Tensor of shape [B, H, W, N].
predictions: Tensor of shape [B, H, W, N].
Returns:
Tuple of (psnr, update_op) as returned by tf.metrics.
"""
predictions.shape.assert_is_compatible_with(labels.shape)
with tf.control_dependencies([tf.assert_greater_equal(labels, 0.0),
tf.assert_less_equal(labels, 1.0)]):
psnrs = tf.image.psnr(labels, predictions, max_val=1.0)
psnrs = tf.boolean_mask(psnrs, tf.logical_not(tf.is_inf(psnrs)))
return tf.metrics.mean(psnrs, name='psnr')
def create_model_fn(inference_fn, hparams):
"""Creates a model function for Estimator.
Args:
inference_fn: Model inference function with specification:
Args -
noisy_img - Tensor of shape [B, H, W, 4].
variance - Tensor of shape [B, H, W, 4].
Returns -
Tensor of shape [B, H, W, 4].
hparams: Hyperparameters for model as a tf.contrib.training.HParams object.
Returns:
`_model_fn`.
"""
def _model_fn(features, labels, mode, params):
"""Constructs the model function.
Args:
features: Dictionary of input features.
labels: Tensor of labels if mode is `TRAIN` or `EVAL`, otherwise `None`.
mode: ModeKey object (`TRAIN` or `EVAL`).
params: Parameter dictionary passed from the Estimator object.
Returns:
An EstimatorSpec object that encapsulates the model and its serving
configurations.
"""
del params # Unused.
def process_images(images):
"""Closure for processing images with fixed metadata."""
return process.process(images, features['red_gain'],
features['blue_gain'], features['cam2rgb'])
denoised_img = inference_fn(features['noisy_img'], features['variance'])
noisy_img = process_images(features['noisy_img'])
denoised_img = process_images(denoised_img)
truth_img = process_images(labels)
if mode in [tf_estimator.ModeKeys.TRAIN, tf_estimator.ModeKeys.EVAL]:
loss = tf.losses.absolute_difference(truth_img, denoised_img)
else:
loss = None
if mode == tf_estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)
train_op = contrib_layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
learning_rate=None,
optimizer=optimizer,
name='') # Prevents scope prefix.
else:
train_op = None
if mode == tf_estimator.ModeKeys.EVAL:
eval_metric_ops = {'PSNR': psnr(truth_img, denoised_img)}
def summary(images, name):
"""As a hack, saves image summaries by adding to `eval_metric_ops`."""
images = tf.saturate_cast(images * 255 + 0.5, tf.uint8)
eval_metric_ops[name] = (tf.summary.image(name, images, max_outputs=2),
tf.no_op())
summary(noisy_img, 'Noisy')
summary(denoised_img, 'Denoised')
summary(truth_img, 'Truth')
diffs = (denoised_img - truth_img + 1.0) / 2.0
summary(diffs, 'Diffs')
else:
eval_metric_ops = None
return tf_estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
return _model_fn
def create_train_and_eval_specs(train_dataset_fn,
eval_dataset_fn,
eval_steps=250):
"""Creates a TrainSpec and EvalSpec.
Args:
train_dataset_fn: Function returning a Dataset of training data.
eval_dataset_fn: Function returning a Dataset of evaluation data.
eval_steps: Number of steps for evaluating model.
Returns:
Tuple of (TrainSpec, EvalSpec).
"""
train_spec = tf_estimator.TrainSpec(input_fn=train_dataset_fn, max_steps=None)
eval_spec = tf_estimator.EvalSpec(
input_fn=eval_dataset_fn, steps=eval_steps, name='')
return train_spec, eval_spec
|
[
"[email protected]"
] | |
0fb5e7964be470bc671a7d6c2fc74cb80dd76bf7
|
07c6d3055eda7b1ddb16ce9444166ed311ce3219
|
/modules/topics.py
|
0821e4b960e83e661ea7519105c0d6cf7682fd6f
|
[] |
no_license
|
IISH/dpe
|
4df9b0576b5419e543c61ce9ef14380ddc4b5c03
|
6509b06aa03242f450766d4cb5d8984f14146b11
|
refs/heads/master
| 2021-01-10T17:52:54.775316 | 2016-05-04T09:50:46 | 2016-05-04T09:50:46 | 42,994,984 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,867 |
py
|
import urllib2
import simplejson
import json
import sys
import pandas as pd
import random
import vincent
from vincent import Axis, AxisProperties, PropertySet, ValueRef
from pandas.io.json import json_normalize
from config import configuration, dataverse2indicators, load_dataverse, findpid, load_metadata
import re
def loadjson(apiurl):
jsondataurl = apiurl
req = urllib2.Request(jsondataurl)
opener = urllib2.build_opener()
f = opener.open(req)
dataframe = simplejson.load(f)
return dataframe
def topics_parser(alltopics):
topics = {}
indicators = {}
topic2inds = {}
indline = []
for item in alltopics:
#print item
name = item['Name']
thisid = int(item['ID'])
pcode = item['parent ID']
if not pcode:
topics[name] = thisid
else:
indicators[thisid] = name
try:
indline = topic2inds[pcode]
except:
indline = []
indline.append(thisid)
topic2inds[int(pcode)] = indline
return (topics, indicators, topic2inds)
def load_alltopics(api, branch):
result = loadjson(api)
(topics, indicators, topic2inds) = topics_parser(result)
datasets = dataverse2indicators(branch)
html = ''
for topic in sorted(topics):
topicID = topics[topic]
html = html + "<optgroup label=\"" + str(topic) + "\">\n"
indlist = topic2inds[topicID]
for ind in indlist:
indicator = indicators[ind]
try:
showind = datasets[indicator]
except:
showind = ind
html = html + "\t<option value=\"" + str(showind) + "\">" + indicator + "</option>" + "\n"
html = html + "</optgroup>\n"
return html
|
[
"[email protected]"
] | |
cce4258214c9c76a0aa0de00685e225913846b9b
|
a7dc8f76293a2c60478c95c4720cf39b8556c9e8
|
/tests/test_classify.py
|
3dc694dcb8ce4841090ee4d127deb0f3d62de74f
|
[
"MIT"
] |
permissive
|
FarDON/cherry
|
8b67f6587a5c13603dfe5047edece218a382e904
|
28da9a05a0bf09f209829e81b8642e3fd76034e8
|
refs/heads/master
| 2022-11-02T13:13:12.366289 | 2020-06-22T13:56:45 | 2020-06-22T13:56:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,587 |
py
|
import os
import unittest
import cherry
from unittest import mock
from cherry import classify
from sklearn.exceptions import NotFittedError
class ClassifyTest(unittest.TestCase):
def setUp(self):
pass
# __init__()
@mock.patch('cherry.classifyer.Classify._classify')
@mock.patch('cherry.classifyer.Classify._load_cache')
def test_init(self, mock_load, mock_classify):
mock_load.return_value = ('foo', 'bar')
cherry.classifyer.Classify(model='random', text=['random text'])
mock_load.assert_called_once_with('random')
mock_classify.assert_called_once_with(['random text'])
# _load_cache()
@mock.patch('cherry.classifyer.Classify._classify')
@mock.patch('cherry.classifyer.load_cache')
def test_load_cache(self, mock_load, mock_classify):
res = cherry.classifyer.Classify(model='foo', text=['random text'])
mock_load.assert_not_called()
@mock.patch('sklearn.feature_extraction.text.CountVectorizer.transform')
@mock.patch('cherry.classifyer.load_cache')
def test_classify_with_missing_token(self, mock_load, mock_trans):
mock_object = mock.Mock()
mock_object.transform.side_effect = NotFittedError()
mock_load.return_value = mock_object
# with self.assertRaises(cherry.exceptions.TokenNotFoundError) as token_error:
# res = cherry.classifyer.Classify(model='harmful', text=['random text'])
# self.assertEqual(
# str(token_error.exception),
# 'Some of the tokens in text never appear in training data')
|
[
"[email protected]"
] | |
0a27993a6e8351ecb41b9f6181bea19c78bf6000
|
53784d3746eccb6d8fca540be9087a12f3713d1c
|
/res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/lobby/store/StoreTableDataProvider.py
|
047518eda41afe48b100901c3b0b9c35381c591b
|
[] |
no_license
|
webiumsk/WOT-0.9.17.1-CT
|
736666d53cbd0da6745b970e90a8bac6ea80813d
|
d7c3cf340ae40318933e7205bf9a17c7e53bac52
|
refs/heads/master
| 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 969 |
py
|
# 2017.02.03 21:50:30 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/store/StoreTableDataProvider.py
from gui.Scaleform.framework.entities.DAAPIDataProvider import DAAPIDataProvider
class StoreTableDataProvider(DAAPIDataProvider):
def __init__(self):
super(StoreTableDataProvider, self).__init__()
self.__list = []
@property
def collection(self):
return self.__list
def buildList(self, dpList):
self.__list = dpList
def emptyItem(self):
return None
def clearList(self):
while len(self.__list):
self.__list.pop()
self.__list = None
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\lobby\store\StoreTableDataProvider.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:50:30 Střední Evropa (běžný čas)
|
[
"[email protected]"
] | |
52bf11f8be269922508207b1a1e3c7cdd7224b51
|
ab6cfc2aedad3de7a04efae4a6105dc893958b9e
|
/hivwholeseq/patients/get_allele_cocounts.py
|
733f7a025ee4e21175e08d194a24584b733f1f04
|
[
"MIT"
] |
permissive
|
neherlab/hivwholeseq
|
158c0ce590bc67d1d36042c71b8b0afa3e8d8abf
|
978ce4060362e4973f92b122ed5340a5314d7844
|
refs/heads/master
| 2021-01-15T16:48:15.769316 | 2015-09-04T08:33:52 | 2015-09-04T08:33:52 | 49,801,765 | 4 | 3 | null | 2016-01-17T03:43:46 | 2016-01-17T03:43:44 | null |
UTF-8
|
Python
| false | false | 1,994 |
py
|
#!/usr/bin/env python
# vim: fdm=marker
'''
author: Fabio Zanini
date: 20/03/14
content: Get the joint counts at two sites for patient samples, after mapping.
'''
# Modules
import argparse
import numpy as np
import matplotlib.pyplot as plt
from hivwholeseq.patients.samples import load_samples_sequenced as lssp
from hivwholeseq.patients.samples import SamplePat
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Get allele cocounts',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
pats_or_samples = parser.add_mutually_exclusive_group(required=True)
pats_or_samples.add_argument('--patients', nargs='+',
help='Patient to analyze')
pats_or_samples.add_argument('--samples', nargs='+',
help='Samples to map')
parser.add_argument('--regions', nargs='+', required=True,
help='Fragments to analyze (e.g. F1 F6)')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity level [0-3]')
parser.add_argument('--qualmin', type=int, default=30,
help='Minimal quality of base to call')
args = parser.parse_args()
pnames = args.patients
samplenames = args.samples
regions = args.regions
VERBOSE = args.verbose
qual_min = args.qualmin
use_plot = args.plot
samples = lssp()
if pnames is not None:
samples = samples.loc[samples.patient.isin(pnames)]
elif samplenames is not None:
samples = samples.loc[samples.index.isin(samplenames)]
if VERBOSE >= 2:
print 'samples', samples.index.tolist()
for region in regions:
for samplename, sample in samples.iterrows():
sample = SamplePat(sample)
if VERBOSE >= 1:
print region, samplename
cocount = np.load(fn_out)['cocounts']
|
[
"[email protected]"
] | |
ec461e4efcf3da5428bd688d03a049eaf481b553
|
60b8c5e048be54f49c28b2c224e86cf4d4629164
|
/gluon/setup.py
|
ec8a8656318e076b7715cb3373652d0ac7778656
|
[
"MIT"
] |
permissive
|
kcieslik/imgclsmob
|
b333d2b0f8a04d15cc1c0b0d38845d1d2923ae26
|
d15bc7d4ebc50a31b4ad01cb3ad0e73b8cddbc9a
|
refs/heads/master
| 2020-06-13T06:21:01.744329 | 2019-06-28T16:05:11 | 2019-06-28T16:05:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,515 |
py
|
from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gluoncv2',
version='0.0.47',
description='Image classification and segmentation models for Gluon',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/osmr/imgclsmob',
author='Oleg Sémery',
author_email='[email protected]',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Image Recognition',
],
keywords='machine-learning deep-learning neuralnetwork image-classification mxnet gluon imagenet cifar svhn vgg '
'resnet pyramidnet diracnet densenet condensenet wrn drn dpn darknet fishnet espnetv2 xdensnet squeezenet '
'squeezenext shufflenet menet mobilenet igcv3 mnasnet darts xception inception polynet nasnet pnasnet ror '
'proxylessnas dianet efficientnet image-segmentation voc ade20k cityscapes coco pspnet deeplabv3 fcn',
packages=find_packages(exclude=['others', '*.others', 'others.*', '*.others.*']),
include_package_data=True,
install_requires=['numpy'],
)
|
[
"[email protected]"
] | |
a1514ff0aae5fff6ba6124c662459a1592b7a132
|
55c8fd9ce0c5bb147cbdb69274873b93b35356fc
|
/pathGeneration-v2/code-v2/full_eval.py
|
ca61e1c985c92a33e67e67192299fb8498954df2
|
[] |
no_license
|
WOW5678/pathGeneration
|
b4143bbbc2be686ee011d24af46d57d2cee88f06
|
88f31b4f30750307fa7f5072e7faa2f959a6d0c0
|
refs/heads/master
| 2020-08-06T17:46:22.075128 | 2019-11-15T12:38:07 | 2019-11-15T12:38:07 | 213,097,008 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,528 |
py
|
import numpy as np
from sklearn.metrics import precision_recall_fscore_support, average_precision_score, \
roc_auc_score, precision_score, recall_score
thres = 0.5
def f1_score(preds, labels, thres, average='micro'):
'''Returns (precision, recall, F1 score) from a batch of predictions (thresholded probabilities)
given a batch of labels (for macro-averaging across batches)'''
#preds = (probs >= thres).astype(np.int32)
# print('probs:',probs)
# print('labels:',labels)
# print('preds:',preds)
#preds=probs
# print(preds)
# print(labels)
p, r, f, _ = precision_recall_fscore_support(labels, preds, average=average,
warn_for=())
return p, r, f
def auc_pr(probs, labels, average='micro'):
'''Precision integrated over all thresholds (area under the precision-recall curve)'''
if average == 'macro' or average is None:
sums = labels.sum(0)
nz_indices = np.logical_and(sums != labels.shape[0], sums != 0)
probs = probs[:, nz_indices]
labels = labels[:, nz_indices]
return average_precision_score(labels, probs, average=average)
def auc_roc(probs, labels, average='micro'):
'''Area under the ROC curve'''
if average == 'macro' or average is None:
sums = labels.sum(0)
nz_indices = np.logical_and(sums != labels.shape[0], sums != 0)
probs = probs[:, nz_indices]
labels = labels[:, nz_indices]
# print('labels:',labels)
# print('probs:',probs)
return roc_auc_score(labels, probs, average=average)
def precision_at_k(probs, labels, k, average='micro'):
indices = np.argpartition(-probs, k-1, axis=1)[:, :k]
preds = np.zeros(probs.shape, dtype=np.int)
preds[np.arange(preds.shape[0])[:, np.newaxis], indices] = 1
return precision_score(labels, preds, average=average)
def recall_at_k(probs, labels, k, average='micro'):
indices = np.argpartition(-probs, k-1, axis=1)[:, :k]
preds = np.zeros(probs.shape, dtype=np.int)
preds[np.arange(preds.shape[0])[:, np.newaxis], indices] = 1
return recall_score(labels, preds, average=average)
def full_evaluate(pred,probs, gold, thres=0.5):
# pred = np.array(pred)
# gold = np.array(gold)
#print(pred)
micro_p, micro_r, micro_f1 = f1_score(pred, gold, thres, average='micro')
macro_p,macro_r,macro_f1= f1_score(pred, gold, thres, average='macro')
# micro_auc_pr= auc_pr(pred, gold, average='micro')
# macro_auc_pr= auc_pr(pred, gold, average='macro')
micro_auc_roc= auc_roc(pred, gold, average='micro')
macro_auc_roc= auc_roc(pred, gold, average='macro')
precision_8= precision_at_k(probs, gold, 8, average='micro')
precision_40= precision_at_k(probs, gold, 40, average='micro')
recall_8= recall_at_k(probs, gold, 8, average='micro')
recall_40=recall_at_k(probs, gold, 40, average='micro')
return micro_p,macro_p,micro_r,macro_r,micro_f1,macro_f1,micro_auc_roc,macro_auc_roc,precision_8,precision_40,recall_8,recall_40
def jaccrad(predList, referList): # terms_reference为源句子,terms_model为候选句子
grams_reference = set(predList) # 去重;如果不需要就改为list
grams_model = set(referList)
temp = 0
for i in grams_reference:
if i in grams_model:
temp = temp + 1
fenmu = len(grams_model) + len(grams_reference) - temp # 并集
jaccard_coefficient = temp*1.0 / fenmu # 交集
return jaccard_coefficient
|
[
"[email protected]"
] | |
a58740e2a6ef0f1c5c1c2d3373a3d57e3b7311d6
|
e6904315fef720d562727c259fe55edcaaf2f84b
|
/src/orion/core/io/evc_builder.py
|
01094146ed00e9b0623a8a0adf56c0ef4a18b01b
|
[
"BSD-3-Clause"
] |
permissive
|
mnoukhov/orion
|
c93c4655f6b1b6358f8ead78a3adbe9d871785c7
|
7849d77344e84ec805207cf4148aecf6f7d6b3d7
|
refs/heads/master
| 2020-03-25T05:37:54.251082 | 2019-08-19T17:33:15 | 2019-08-19T17:33:15 | 143,457,714 | 0 | 0 |
NOASSERTION
| 2018-10-31T02:37:32 | 2018-08-03T17:55:57 |
Python
|
UTF-8
|
Python
| false | false | 2,275 |
py
|
# -*- coding: utf-8 -*-
# pylint:disable=protected-access
"""
:mod:`orion.core.io.evc_builder` -- Builder of experiment version control tree
==============================================================================
.. module:: experiment
:platform: Unix
:synopsis: Builder of the experiment version control tree
The EVCBuilder takes care of building a main experiment along with an EVC tree and connect them
together.
A user can define a root and some leafs that should be the extremums of the tree. Those can be
different than the actual root and leafs of the global EVC tree, making the trimmed version a small
subset of the global version.
"""
from orion.core.evc.experiment import ExperimentNode
from orion.core.io.experiment_builder import ExperimentBuilder
class EVCBuilder(object):
"""Builder of experiment version control trees using
:class:`orion.core.evc.experiment.ExperimentNode`
.. seealso::
`orion.core.io.experiment_builder` for more information on the process of building
experiments.
:class:`orion.core.evc.experiment`
:class:`orion.core.worker.experiment`
"""
# pylint:disable=no-self-use
def connect_to_version_control_tree(self, experiment):
"""Build the EVC and connect the experiment to it"""
experiment_node = ExperimentNode(experiment.name, experiment=experiment)
experiment.connect_to_version_control_tree(experiment_node)
def build_view_from(self, cmdargs):
"""Build an experiment view based on global config and connect it to the EVC"""
experiment_view = ExperimentBuilder().build_view_from(cmdargs)
self.connect_to_version_control_tree(experiment_view)
return experiment_view
def build_from(self, cmdargs):
"""Build an experiment based on config and connect it to the EVC"""
experiment = ExperimentBuilder().build_from(cmdargs)
self.connect_to_version_control_tree(experiment)
return experiment
def build_from_config(self, config):
"""Build an experiment based on given config and connect it to the EVC"""
experiment = ExperimentBuilder().build_from_config(config)
self.connect_to_version_control_tree(experiment)
return experiment
|
[
"[email protected]"
] | |
a06e77569bb9fc552a12e6e6f5ee56d5c33ebea1
|
602bdbd1d8ef4d36ccfdcae5756bc8e448d30584
|
/share/basiccms/web/checkout.py
|
86bb792ceeb5be2a1dd97fafe87b116f9d8f365f
|
[] |
no_license
|
timparkin/timparkingallery
|
1136027bf9cfbad31319958f20771a6fdc9f5fc4
|
6e6c02684a701817a2efae27e21b77765daa2c33
|
refs/heads/master
| 2016-09-06T00:28:16.965416 | 2008-11-25T21:15:45 | 2008-11-25T21:15:45 | 12,716 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,952 |
py
|
from pollen.mail import mailutil
from twisted.internet import defer
from twisted.python import log
from nevow import url, accessors, inevow, tags as T, rend
import formal
from crux import skin, icrux
from tub.public.web.common import getStoreSession
from ecommerce.salesorder.manager import SalesOrder, SalesOrderItem
from ecommerce.salesorder.util import createSalesOrderItem
from basiccms import basket as dw_basket
from basiccms.web import common
from basiccms.web.utils import RenderFragmentMixin, RenderInheritMixin
class DetailsPage(RenderInheritMixin, RenderFragmentMixin, common.Page):
docFactory = skin.loader('CheckoutDetailsPage.html')
def __init__(self, avatar):
super(DetailsPage, self).__init__()
self.avatar = avatar
def getCountryOptions(self, storeSession):
data = {}
d = self.avatar.getDeliveryCountries(storeSession)
d.addCallback(lambda options: data.update({'delivery': options}))
d.addCallback(lambda ignore: self.avatar.realm.getBillingCountryOptions())
d.addCallback(lambda options: data.update({'billing': options}))
d.addCallback(lambda options: data)
return d
def form_details(self, ctx):
storeSession = getStoreSession(ctx)
d = self.getCountryOptions(storeSession)
d.addCallback(lambda options: self._build_details_form(options['billing'], options['delivery']))
return d
def _build_details_form(self, billingCountryOptions, deliveryCountryOptions):
form = formal.Form()
form.addField('firstName', formal.String(required=True, strip=True))
form.addField('lastName', formal.String(required=True, strip=True))
form.addField('phoneNumber', formal.String(required=True, strip=True))
form.addField('billingAddress1', formal.String(required=True, strip=True))
form.addField('billingAddress2', formal.String(strip=True))
form.addField('billingAddress3', formal.String(strip=True))
form.addField('billingCity', formal.String(required=True, strip=True))
form.addField('billingPostcode', formal.String(required=True, strip=True))
form.addField('billingCountry', formal.String(required=True, strip=True),
widgetFactory=formal.widgetFactory(formal.SelectChoice, options=billingCountryOptions) )
form.addField('cardType', formal.String(required=True),
formal.widgetFactory(formal.SelectChoice, CommonData.Cards))
form.addField('cardNumber', formal.String(required=True, strip=True))
form.addField('cvv2', formal.String(required=True, strip=True),
label='Card Security Code',description='last three numbers on signature strip')
form.addField('expiryDate', formal.Date(required=True),
formal.widgetFactory(formal.MMYYDatePartsInput), description='e.g. 12/05' )
form.addField('issueNumber', formal.String(strip=True),
description='for maestro and switch only')
form.addField('startDate', formal.Date(),
formal.widgetFactory(formal.MMYYDatePartsInput), description='for switch only' )
delivery = formal.Group('delivery', label='Delivery Address', description="Only enter details here if the delivery address is different from the billing address above.")
form.add( delivery )
delivery.add( formal.Field('name', formal.String(strip=True)) )
delivery.add( formal.Field('address1', formal.String(strip=True)))
delivery.add( formal.Field('address2', formal.String(strip=True)))
delivery.add( formal.Field('address3', formal.String(strip=True)))
delivery.add( formal.Field('city', formal.String(strip=True)))
delivery.add( formal.Field('postcode', formal.String(strip=True)) )
delivery.add( formal.Field('country', formal.String(strip=True),
widgetFactory=formal.widgetFactory(formal.SelectChoice, options=deliveryCountryOptions)) )
message = formal.Group('message', label='Gift Message', description="If you have chosen to use our gift wrapping service you can specify a message here")
form.add( message )
message.add( formal.Field('message', formal.String(strip=True), widgetFactory=formal.TextArea) )
form.addAction(self._confirm, label="Confirm Order")
if self.avatar.checkoutDetails:
form.data = self.avatar.checkoutDetails
elif self.avatar.customer:
form.data = {
'firstName': self.avatar.customer.first_name,
'lastName': self.avatar.customer.last_name,
'phoneNumber': self.avatar.customer.phoneNumber,
'billingAddress1': self.avatar.customer.billingAddress1,
'billingAddress2': self.avatar.customer.billingAddress2,
'billingAddress3': self.avatar.customer.billingAddress3,
'billingCity': self.avatar.customer.billingCity,
'billingPostcode': self.avatar.customer.billingPostcode,
'billingCountry': self.avatar.customer.billingCountry,
}
if self.avatar.realm.config['ecommerce']['paymentGateway'].get('use_test_data', False):
from datetime import date
from dateutil.relativedelta import relativedelta
form.data['cardType'] = 'VISA'
form.data['cardNumber'] = '4111111111111111'
form.data['cvv2'] = '432'
form.data['expiryDate'] = date.today()+relativedelta(months=6)
return form
def _confirm(self, ctx, form, data):
deliveryAddressSpecified = data['delivery.address1'] or data['delivery.address2'] or data['delivery.address3']
if data['delivery.name'] or deliveryAddressSpecified or data['delivery.city'] \
or data['delivery.postcode'] or data['delivery.country']:
if not data['delivery.name']:
raise formal.FieldError('All delivery details must be entered.', 'delivery.name')
if not deliveryAddressSpecified:
raise formal.FieldError('All delivery details must be entered.', 'delivery.address1')
if not data['delivery.city']:
raise formal.FieldError('All delivery details must be entered.', 'delivery.city')
if not data['delivery.postcode']:
raise formal.FieldError('All delivery details must be entered.', 'delivery.postcode')
if not data['delivery.country']:
raise formal.FieldError('All delivery details must be entered.', 'delivery.country')
self.avatar.checkoutDetails = data
if data['delivery.country']:
if self.avatar.basket.deliveryOptions.getCurrentCountry() != data['delivery.country'].lower():
raise formal.FieldError('Delivery country does not match basket delivery option.', 'delivery.country')
else:
if self.avatar.basket.deliveryOptions.getCurrentCountry() != data['billingCountry'].lower():
raise formal.FieldError('Delivery country does not match basket delivery option.', 'billingCountry')
return url.URL.fromContext(ctx).sibling('confirm')
class ThankYouPage(common.Page):
docFactory = skin.loader('CheckoutThankYouPage.html')
def __init__(self, avatar):
super(ThankYouPage, self).__init__()
self.avatar = avatar
def render_order_num(self, ctx, data):
order_num = inevow.IRequest(ctx).args.get('order_num', [''])[0]
return order_num
def render_tracking(self, ctx, data):
order_num = inevow.IRequest(ctx).args.get('order_num', [''])[0]
basket_value = inevow.IRequest(ctx).args.get('basket_value', [''])[0]
ctx.tag.fillSlots('order_num', order_num)
ctx.tag.fillSlots('basket_value', basket_value)
return ctx.tag
def debug(r, mess):
print '>>DEBUG', mess, r
return r
|
[
"[email protected]"
] | |
890a0e4832d87c843d5509306210f0da7f740075
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/TSZLMM/YW_TSZLMM_SZXJ_085.py
|
aee9b54b61b3b19aec3adc52e31a8f6ab6a2da24
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,096 |
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_TSZLMM_SZXJ_085(xtp_test_case):
# YW_TSZLMM_SZXJ_085
def test_YW_TSZLMM_SZXJ_085(self):
title = '默认3:订单报价超过涨跌幅限制-深A限价卖><跌停价(跌停价-0.02)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010122,
'errorMSG': queryOrderErrorMsg(11010122),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('003154', '2', '0', '10', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':trade_type + 1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': stkparm['跌停价']-0.02,
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
48c38008dc8f830780911cc0ffbe98050fe9f2b8
|
337815ff32ebbf6e8dd2606f69d66e8efda4cd03
|
/epi_judge_python_solutions/is_string_palindromic_punctuation.py
|
8a74011a9f894f17696adcf9b67b7a1ac42109d9
|
[] |
no_license
|
federicociner/epi
|
b85eefbf5f5bad77e2e780ffbf4ac4f9ca0809a8
|
32f2a1056353bca55d0d5839be5e0b73809cb45d
|
refs/heads/master
| 2020-12-19T09:22:43.430370 | 2020-02-04T02:34:53 | 2020-02-04T02:34:53 | 235,693,872 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 904 |
py
|
from test_framework import generic_test
def is_palindrome(s: str) -> bool:
# i moves forward, and j moves backward.
i, j = 0, len(s) - 1
while i < j:
# i and j both skip non-alphanumeric characters.
while not s[i].isalnum() and i < j:
i += 1
while not s[j].isalnum() and i < j:
j -= 1
if s[i].lower() != s[j].lower():
return False
i, j = i + 1, j - 1
return True
def is_palindrome_pythonic(s):
return all(
a == b
for a, b in zip(
map(str.lower, filter(str.isalnum, s)),
map(str.lower, filter(str.isalnum, reversed(s))),
)
)
if __name__ == "__main__":
exit(
generic_test.generic_test_main(
"is_string_palindromic_punctuation.py",
"is_string_palindromic_punctuation.tsv",
is_palindrome,
)
)
|
[
"[email protected]"
] | |
b0f33f7fcb55a25559aa9ec6e4005f66fd5f16e2
|
93e8c89c7d83c00280c32ea9f5330d3d4cf9a6d9
|
/ch_10_oops/03_instance_class_attributes.py
|
15f55e56c13b45b76c5dba6b6df9c1a4364bb31a
|
[] |
no_license
|
vidhisharma1212/oops
|
1d76940d084b3828db6f4bd9093ee18a8e512183
|
fb4252683c652a18c818948dd328c8903f2d04ee
|
refs/heads/main
| 2023-07-01T21:28:46.823861 | 2021-08-09T08:40:54 | 2021-08-09T08:40:54 | 393,379,828 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 367 |
py
|
class Employee:
company= 'Google'
# salary= 900
vid= Employee()
ron= Employee()
shyam= Employee()
# vid.salary= 300
# ron.salary= 500
Employee.salary= 900
print(vid.salary)
print(ron.salary)
print(shyam.salary)
shyam.salary=100000
print(shyam.salary)
print(vid.company)
print(ron.company)
Employee.company= 'Youtube'
print(vid.company)
print(ron.company)
|
[
"[email protected]"
] | |
eee813bc568246e104455e856e8360de7d16c2cb
|
975fb6bf66c878415a880003c2d635163cb664d8
|
/qoc/standard/functions/convenience.py
|
35ea6b8916c0317e9234a845118aaab8e03ea59a
|
[
"MIT"
] |
permissive
|
SchusterLab/qoc
|
8833628a9b7df3727b982b667310059563dfded7
|
36d615170effc1b705d4543d92f979e511edfec2
|
refs/heads/master
| 2023-06-07T07:49:33.720205 | 2023-03-12T20:19:55 | 2023-03-12T20:19:55 | 198,457,530 | 12 | 14 |
MIT
| 2021-05-10T02:23:11 | 2019-07-23T15:24:41 |
Python
|
UTF-8
|
Python
| false | false | 2,797 |
py
|
"""
convenience.py - definitions of common computations
All functions in this module that are exported,
i.e. those that don't begin with '_', are autograd compatible.
"""
from functools import reduce
from autograd.extend import defvjp, primitive
import autograd.numpy as anp
import numpy as np
import scipy.linalg as la
### COMPUTATIONS ###
def commutator(a, b):
"""
Compute the commutator of two matrices.
Arguments:
a :: numpy.ndarray - the left matrix
b :: numpy.ndarray - the right matrix
Returns:
_commutator :: numpy.ndarray - the commutator of a and b
"""
commutator_ = anp.matmul(a, b) - anp.matmul(b, a)
return commutator_
def conjugate_transpose(matrix):
"""
Compute the conjugate transpose of a matrix.
Args:
matrix :: numpy.ndarray - the matrix to compute
the conjugate transpose of
operation_policy :: qoc.OperationPolicy - what data type is
used to perform the operation and with which method
Returns:
_conjugate_tranpose :: numpy.ndarray the conjugate transpose
of matrix
"""
conjugate_transpose_ = anp.conjugate(anp.swapaxes(matrix, -1, -2))
return conjugate_transpose_
def krons(*matrices):
"""
Compute the kronecker product of a list of matrices.
Args:
matrices :: numpy.ndarray - the list of matrices to
compute the kronecker product of
operation_policy :: qoc.OperationPolicy - what data type is
used to perform the operation and with which method
"""
krons_ = reduce(anp.kron, matrices)
return krons_
def matmuls(*matrices):
"""
Compute the kronecker product of a list of matrices.
Args:
matrices :: numpy.ndarray - the list of matrices to
compute the kronecker product of
operation_policy :: qoc.OperationPolicy - what data type is
used to perform the operation and with which method
"""
matmuls_ = reduce(anp.matmul, matrices)
return matmuls_
def rms_norm(array):
"""
Compute the rms norm of the array.
Arguments:
array :: ndarray (N) - The array to compute the norm of.
Returns:
norm :: float - The rms norm of the array.
"""
square_norm = anp.sum(array * anp.conjugate(array))
size = anp.prod(anp.shape(array))
rms_norm_ = anp.sqrt(square_norm / size)
return rms_norm_
### ISOMORPHISMS ###
# A row vector is np.array([[0, 1, 2]])
# A column vector is np.array([[0], [1], [2]])
column_vector_list_to_matrix = (lambda column_vector_list:
anp.hstack(column_vector_list))
matrix_to_column_vector_list = (lambda matrix:
anp.stack([anp.vstack(matrix[:, i])
for i in range(matrix.shape[1])]))
|
[
"[email protected]"
] | |
2b112ccb194f9ad783c20cb17572fb0072f813b1
|
14804b282e567bf45c974b9a55cbdfa1907c5958
|
/16_Standard_Library/A_Modules/_turtle_04.py
|
cd5ff64ebe782eaa5fac54f84e57482e0cd772a7
|
[
"MIT"
] |
permissive
|
Oscar-Oliveira/Python-3
|
cfdcbcf4548144fb2488625f53f76b20e4d8c5b0
|
fa791225a6810b75890d24407b73c5e1b514acbe
|
refs/heads/master
| 2021-09-26T06:27:16.367956 | 2018-10-27T10:42:21 | 2018-10-27T10:42:21 | 101,991,657 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 513 |
py
|
"""
turtle
"""
import turtle
import random
colors = ["blue", "black", "brown", "red", "orange", "green",
"yellow", "beige", "turquoise", "pink"]
wn = turtle.Screen()
turtles = [turtle.Turtle() for _ in range(10)]
for i, t in enumerate(turtles):
t.shape("turtle")
t.color(colors[i])
t.penup()
t.goto(-260, i * 30)
t.pendown()
for _ in range(100):
for _, t in enumerate(turtles):
t.forward(random.randint(0, 10))
wn.listen()
wn.mainloop()
|
[
"[email protected]"
] | |
2d023ce13d42aeef06d982be4b8609792e5496ca
|
23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9
|
/app/admin/__init__.py
|
b72f72aeec303c88ed6a8f146eb448e50be15bcf
|
[] |
no_license
|
Cuick/traversing
|
210fcfb1c780037de59343fffeb4fa4d3f2eae32
|
c78982580af7f63c8bff4dcb37005b7f7c682b5b
|
refs/heads/master
| 2021-01-10T17:38:37.899460 | 2016-11-18T06:06:55 | 2016-11-18T06:06:55 | 55,397,540 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 156 |
py
|
#-*- coding:utf-8 -*-
"""
created by server on 14-5-26上午11:59.
"""
import action
def doWhenStop():
"""服务器关闭前的处理
"""
pass
|
[
"[email protected]"
] | |
eb905dd46d64599308b58106219fd94e874c27af
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/aio/operations_async/_vpn_server_configurations_associated_with_virtual_wan_operations_async.py
|
f5c8445e1d311bde344a60cba25058ed763eeb77
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 |
MIT
| 2020-06-16T16:38:15 | 2019-08-30T21:08:55 |
Python
|
UTF-8
|
Python
| false | false | 7,435 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnServerConfigurationsAssociatedWithVirtualWanOperations:
"""VpnServerConfigurationsAssociatedWithVirtualWanOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _list_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs
) -> "models.VpnServerConfigurationsResponse":
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnServerConfigurationsResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
# Construct URL
url = self._list_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnServerConfigurationsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnServerConfigurations'} # type: ignore
async def begin_list(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs
) -> "models.VpnServerConfigurationsResponse":
"""Gives the list of VpnServerConfigurations associated with Virtual Wan in a resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN whose associated VpnServerConfigurations is
needed.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: VpnServerConfigurationsResponse, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.VpnServerConfigurationsResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnServerConfigurationsResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnServerConfigurationsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnServerConfigurations'} # type: ignore
|
[
"[email protected]"
] | |
4c36661bdaf9f097617a68f74fd7e9c443e2b16d
|
b95fa99bb1ba2210b73251614d2613363c37f932
|
/deploy/ngram-train/scripts/main-70.py
|
ce7b6772851d4bcf8747f79598571b1f41cf57ca
|
[] |
no_license
|
lingxiao/learn-adj-relation
|
d1a8894fefc776ec0bd414b5f038361ed4b79d16
|
dc4285af19e53d7e2d015eb6394f6c601c707da0
|
refs/heads/master
| 2020-12-30T16:27:51.531268 | 2017-06-07T18:59:48 | 2017-06-07T18:59:48 | 87,714,049 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,128 |
py
|
############################################################
# Module : A series of measures on the graph for experiments
# Date : April 2nd, 2017
# Author : Xiao Ling
############################################################
import os
import re
import networkx as nx
from utils import *
from scripts import *
from app.config import PATH
############################################################
'''
paths
'''
_root = os.path.join(PATH['directories']['deploy'], 'ngram-train')
_word_pair_dir = os.path.join(_root, 'pairs')
_output_dir = os.path.join(_root, 'outputs')
_script_dir = os.path.join(_root ,'scripts')
'''
@Use: collect ngram counts
'''
batch = 70
word_pair_path = os.path.join(_word_pair_dir , 'batch-' + str(batch) + '.txt')
pattern_path = PATH['assets']['patterns']
ngram_dir = PATH['ngrams']['full']
out_dir = _output_dir
log_dir = PATH['directories']['log']
collect_ngram_patterns( word_pair_path
, pattern_path
, ngram_dir
, out_dir
, log_dir
, debug = False)
|
[
"[email protected]"
] | |
73687bc070d5f0a867ecaa764f11fb3fba7ed95d
|
28be2173e5590cc5b03119e9b83c57980e6a7e8a
|
/studygroups/migrations/0064_split63.py
|
e49732264ba24d1eaa1b270237861d4c1c7c8b63
|
[
"MIT"
] |
permissive
|
EdgarOrnelas/learning-circles
|
cd164f123885ed2079b34ad394c9849b370563b9
|
293c849321d735aebbdcb6c65b7c92f751f9fd89
|
refs/heads/master
| 2021-01-21T20:56:35.429589 | 2017-06-16T09:20:46 | 2017-06-16T09:20:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 771 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
def set_meeting_time(apps, schema_editor):
StudyGroupMeeting = apps.get_model('studygroups', 'StudyGroupMeeting')
for meeting in StudyGroupMeeting.objects.all():
meeting.meeting_time = meeting.study_group.meeting_time
meeting.save()
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0063_auto_20160309_1301'),
]
operations = [
migrations.AlterField(
model_name='studygroupmeeting',
name='meeting_time',
field=models.TimeField(),
),
migrations.RunPython(set_meeting_time),
]
|
[
"[email protected]"
] | |
1fa016ff9f3a7768af348bd01c7db7b60543d4de
|
1afe3895ae8969ccba6d45e531ab5d59b8a41696
|
/confession/user_app/migrations/0012_auto_20190121_1659.py
|
673e55aaaef40def26992ea3a95286503b101c0f
|
[] |
no_license
|
FZTeam/confession
|
72e3ca0b2ab6016055b4ad6791f5a69aa3732368
|
7808d2810c65d0be956270f15d8ca489e1a9defe
|
refs/heads/master
| 2022-12-12T08:30:37.603455 | 2019-02-25T15:56:31 | 2019-02-25T15:56:31 | 167,647,099 | 0 | 1 | null | 2022-07-06T19:59:26 | 2019-01-26T03:52:59 |
Python
|
UTF-8
|
Python
| false | false | 616 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-01-21 16:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_app', '0011_auto_20190121_1658'),
]
operations = [
migrations.AlterField(
model_name='user',
name='action_time',
field=models.TimeField(auto_now=True),
),
migrations.AlterField(
model_name='user',
name='create_time',
field=models.TimeField(auto_now_add=True),
),
]
|
[
"[email protected]"
] | |
c6d924a273405d47e1ca7228439de237c16e8109
|
4904acd900496b4883c2f5b4aa6b45d1ef6654c0
|
/graphgallery/utils/__init__.py
|
b41031c08476cf8f7aff25809ca677c95d3ae196
|
[
"MIT"
] |
permissive
|
blindSpoter01/GraphGallery
|
aee039edd759be9272d123463b0ad73a57e561c7
|
e41caeb32a07da95364f15b85cad527a67763255
|
refs/heads/master
| 2023-06-17T11:42:27.169751 | 2021-07-15T03:07:39 | 2021-07-15T03:07:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 269 |
py
|
from .context_manager import nullcontext
from .raise_error import raise_if_kwargs
from .tqdm import tqdm
from .context_manager import nullcontext
from .progbar import Progbar
from .misc import *
from .logger import setup_logger, get_logger
from .timeout import TimeOut
|
[
"[email protected]"
] | |
7eabe48f91e014ea0b88c898557d4b21f62f256b
|
0ddcfcbfc3faa81c79e320c34c35a972dab86498
|
/puzzles/maximum_value_of_k_coins_from_piles.py
|
61093ea2df9249ea3435755ccc5a997eed57f8bd
|
[] |
no_license
|
IvanWoo/coding-interview-questions
|
3311da45895ac4f3c394b22530079c79a9215a1c
|
1312305b199b65a11804a000432ebe28d1fba87e
|
refs/heads/master
| 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 |
Python
|
UTF-8
|
Python
| false | false | 1,536 |
py
|
# https://leetcode.com/problems/maximum-value-of-k-coins-from-piles/description/
"""
There are n piles of coins on a table. Each pile consists of a positive number of coins of assorted denominations.
In one move, you can choose any coin on top of any pile, remove it, and add it to your wallet.
Given a list piles, where piles[i] is a list of integers denoting the composition of the ith pile from top to bottom, and a positive integer k, return the maximum total value of coins you can have in your wallet if you choose exactly k coins optimally.
Example 1:
Input: piles = [[1,100,3],[7,8,9]], k = 2
Output: 101
Explanation:
The above diagram shows the different ways we can choose k coins.
The maximum total we can obtain is 101.
Example 2:
Input: piles = [[100],[100],[100],[100],[100],[100],[1,1,1,1,1,1,700]], k = 7
Output: 706
Explanation:
The maximum total can be obtained if we choose all coins from the last pile.
Constraints:
n == piles.length
1 <= n <= 1000
1 <= piles[i][j] <= 105
1 <= k <= sum(piles[i].length) <= 2000
"""
def max_value_of_coins(piles: list[list[int]], k: int) -> int:
n = len(piles)
dp = [[0] * (k + 1) for _ in range(n + 1)]
for i in range(1, n + 1):
for j in range(1, k + 1):
pile_sum = 0
for x in range(len(piles[i - 1])):
if j >= x + 1:
pile_sum += piles[i - 1][x]
dp[i][j] = max(dp[i][j], dp[i - 1][j - x - 1] + pile_sum)
dp[i][j] = max(dp[i][j], dp[i - 1][j])
return dp[n][k]
|
[
"[email protected]"
] | |
f6cfbb1b55ec14f10d8504e6f9cfc2d5e037a025
|
8efe56ee34c455a6b1336897f6d457acbc9c10f9
|
/examples/tf/trpo_cartpole_batch_sampler.py
|
0123ededecc2bc226c82064afb576a0c3b154b04
|
[
"MIT"
] |
permissive
|
neurips2020submission11699/metarl
|
ab18d11e708bf569d76cb2fab2bcce089badd111
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
refs/heads/master
| 2022-10-15T22:03:09.948673 | 2020-06-11T19:22:55 | 2020-06-11T19:30:58 | 268,410,657 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,140 |
py
|
#!/usr/bin/env python3
"""This is an example to train a task with parallel sampling."""
import click
from metarl import wrap_experiment
from metarl.envs import MetaRLEnv
from metarl.experiment import LocalTFRunner
from metarl.experiment.deterministic import set_seed
from metarl.np.baselines import LinearFeatureBaseline
from metarl.tf.algos import TRPO
from metarl.tf.policies import CategoricalMLPPolicy
from metarl.tf.samplers import BatchSampler
@click.command()
@click.option('--batch_size', type=int, default=4000)
@click.option('--max_path_length', type=int, default=100)
@wrap_experiment
def trpo_cartpole_batch_sampler(ctxt=None,
seed=1,
batch_size=4000,
max_path_length=100):
"""Train TRPO with CartPole-v1 environment.
Args:
ctxt (metarl.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
max_path_length (int): Number of timesteps to truncate paths to.
"""
set_seed(seed)
n_envs = batch_size // max_path_length
with LocalTFRunner(ctxt, max_cpus=n_envs) as runner:
env = MetaRLEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=max_path_length,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo=algo,
env=env,
sampler_cls=BatchSampler,
sampler_args={'n_envs': n_envs})
runner.train(n_epochs=100, batch_size=4000, plot=False)
trpo_cartpole_batch_sampler()
|
[
"[email protected]"
] | |
b2b4971f6f115b35ab3d38e85042808deb3e4102
|
5182897b2f107f4fd919af59c6762d66c9be5f1d
|
/.history/src/Simulador_20200708111002.py
|
e4355d8041f85f51b628df0e95950022e25c13c9
|
[
"MIT"
] |
permissive
|
eduardodut/Trabalho_final_estatistica_cd
|
422b7e702f96291f522bcc68d2e961d80d328c14
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
refs/heads/master
| 2022-11-23T03:14:05.493054 | 2020-07-16T23:49:26 | 2020-07-16T23:49:26 | 277,867,096 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,326 |
py
|
import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo
import random
from itertools import permutations
class Simulador():
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.individuos_infectados_tipo_2 = []
self.individuos_infectados_tipo_1 = []
self.individuos_infectados_curados = []
self.individuos_infectados_mortos = []
self.matriz_individuos = np.zeros([tamanho_matriz,tamanho_matriz])
self.fabrica_individuo = Fabrica_individuo(
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,
atualizacoes_cura)
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(dict, index = [0])
self.popular(tamanho_matriz)
def popular(self, tamanho_matriz):
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
lista_indices = list(permutacoes)
random.shuffle(lista_indices)
#cria o primeiro tipo1:
self.indices_infectados_tipo_1.append(lista_indices[0])
indiv = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(lista_indices[0][0], lista_indices[0][1])
self.individuos_infectados_tipo_1.append(indiv)
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
pass
self.matriz_individuos[lista_indices[0][0], lista_indices[0][1]] = )
#cria o restante dos tipo 2:
for indice in lista_indices[1:self.num_inicial_tipo2-2]:
print(indice)
#cria os tipo1:
#cria a população saudável:
for i in lista_indices[0:]:
print(i)
class Fabrica_individuo():
def __init__(
self,
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.chance_infeccao = chance_infeccao
self.chance_infeccao_tipo2 = chance_infeccao_tipo2
self.chance_morte = chance_morte
self.atualizacoes_cura = atualizacoes_cura
def criar_individuo(self, status_inicial, posicao):
return Individuo(
status_inicial,
self.chance_infeccao,
self.chance_infeccao_tipo2,
self.chance_morte,
self.atualizacoes_cura,
posicao)
chance_infeccao = 0.3
chance_infeccao_tipo2 = 0.2
chance_morte = 0.2
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.05
percentual_inicial_tipo2 = 0.01
sim = Simulador(
1000,
1,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
ind = sim.fabrica_individuo.criar_individuo(Individuo.MORTO, (0,0))
dict = {'num_sadios':1,
'num_infect_t1':2,
'num_infect_t2':3,
'num_curados':4,
'num_mortos':5}
s = pd.Series(dict)
sim.dataframe = sim.dataframe.append(s, ignore_index=True)
print(sim.dataframe)
#print(sim.num_inicial_tipo2)
|
[
"[email protected]"
] | |
00939f6d74f528a23ab0e586d52057ea17789070
|
ffadf9541d01cf9af20c419759d48b1eb01bfd35
|
/pachong/PCdemo1/day15/股市行情定点爬取.py
|
842ff30d11cf43f28fc448cc022cde009269ba8e
|
[] |
no_license
|
1987617587/lsh_py
|
b1bb1016eaafcba03bbc4a5310c1db04ae227af4
|
80eb5175cd0e5b3c6c5e2ebb906bb78d9a8f9e0d
|
refs/heads/master
| 2021-01-02T05:14:31.330287 | 2020-06-20T05:18:23 | 2020-06-20T05:18:23 | 239,498,994 | 2 | 1 | null | 2020-06-07T23:09:56 | 2020-02-10T11:46:47 |
Python
|
UTF-8
|
Python
| false | false | 5,975 |
py
|
# author:lsh
# datetime:2020/4/13 19:56
'''
.::::. _oo0oo_
.::::::::. o8888888o
::::::::::: 88" . "88
..:::::::::::' (| -_- |)
'::::::::::::' 0\ = /0
.:::::::::: ___/`---'\___
'::::::::::::::.. .' \\| |# '.
..::::::::::::. / \\||| : |||# \
``:::::::::::::::: / _||||| -:- |||||- \
::::``:::::::::' .:::. | | \\\ - #/ | |
::::' ':::::' .::::::::. | \_| ''\---/'' |_/ |
.::::' :::: .:::::::'::::. \ .-\__ '-' ___/-. /
.:::' ::::: .:::::::::' ':::::. ___'. .' /--.--\ `. .'___
.::' :::::.:::::::::' ':::::. ."" '< `.___\_<|>_/___.' >' "".
.::' ::::::::::::::' ``::::. | | : `- \`.;`\ _ /`;.`/ - ` : | |
...::: ::::::::::::' ``::. \ \ `_. \_ __\ /__ _/ .-` / /
```` ':. ':::::::::' ::::.. `-.____`.___ \_____/___.-`___.-'
'.:::::' ':'````.. `=---='
女神保佑 永无BUG 佛祖保佑 永无BUG
'''
from celery import Celery
from celery.schedules import crontab
import requests
import demjson
import pymysql
import time
import random
import math
import re
uri = 'redis://@127.0.0.1:6379/7'
app = Celery('tasks', broker=uri)
# 每天下午15:30执行
c1 = crontab(minute=30, hour=15)
@app.task
def goto_request(count_url):
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='123456', db='py1911')
cur = conn.cursor()
# count_url = 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCount'
data_url = 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData'
type_ls = ['sh_a', 'sh_b', 'sz_a', 'sz_b', 'sh_z', 'sz_z']
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
}
pat_1 = re.compile(r'(\d+)')
size = 40
for type in type_ls:
# 请求指定类别股票数量
param1 = {
'data': type
}
html = requests.get(count_url, params=param1, headers=headers).text
count = int(pat_1.search(html).group(1))
page_count = math.ceil(count / size)
print('count:', count, 'page_count:', page_count)
# 请求不同类别不同页码的股票信息
for page in range(1, page_count + 1):
param2 = {
'page': page,
'num': 40,
'sort': 'symbol',
'asc': 1,
'data': type,
'symbol': '',
'_s_r_a': 'init',
}
print('type:', type, 'page:', page)
html = requests.get(data_url, params=param2, headers=headers).text
# print(html)
ls = demjson.decode(html)
for each in ls:
symbol = each['symbol']
print('symbol:', symbol)
code = each['code']
print(f'code:{code}')
name = each['name']
print('name:', name)
trade = each['trade']
print('trade:', trade)
pricechange = each['pricechange']
print('pricechange:', pricechange)
changepercent = each['changepercent']
print('changepercent:', changepercent)
buy = each['buy']
print('buy:', buy)
sell = each['sell']
print('sell:', sell)
settlement = each['settlement']
print(f'settlement:{settlement}')
open = each['open']
print('open:', open)
high = each['high']
print('high:', high)
low = each['low']
print('low:', low)
volume = each['volume']
print('volume:', volume)
amount = each['amount']
print('amount:', amount)
ticktime = each['ticktime']
print('ticktime:', ticktime)
print('=' * 200)
strsql = 'insert into finance VALUES(0,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
params = [symbol, code, name, trade, pricechange, changepercent, buy, sell, settlement, open, high, low]
cur.execute(strsql, params)
conn.commit()
time.sleep(random.random())
cur.close()
conn.close()
return '爬取成功'
app.conf.beat_schedule = {
'send-every-15-hours': {
# 指定任务明
'task': 'tasks.goto_request',
# 定时时间
'schedule': 30.0,
# 'schedule':c1,
#传递任务函数需要的参数
'args': ('http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCount',)
},
}
|
[
"[email protected]"
] | |
59fecb3751deccbc1893af6d99c785ffb06deb69
|
7fae887c73cc6df8de886897114eb07c28674fa7
|
/cs101/unit27/27_4.py
|
7f5cedd2fc201fd3745d03708f744ca64942ee95
|
[] |
no_license
|
thinkreed/pyf
|
24960e3523be3c2e1661608f60f866a5cd7e747f
|
04f5e7f46aa95719f848bb64a512458895147da3
|
refs/heads/master
| 2022-12-10T17:43:49.709946 | 2019-06-13T01:13:00 | 2019-06-13T01:13:00 | 109,550,040 | 2 | 0 | null | 2022-12-08T05:14:52 | 2017-11-05T03:53:51 |
Python
|
UTF-8
|
Python
| false | false | 972 |
py
|
english = {1: "January", 2: "February", 3: "March", 4: "April", 5: "May",
6: "June", 7: "July", 8: "August", 9: "September", 10: "October",
11: "November", 12: "December"}
swedish = {1: "januari", 2: "februari", 3: "mars", 4: "april", 5: "maj",
6: "juni", 7: "juli", 8: "augusti", 9: "september", 10: "oktober",
11: "november", 12: "december"}
def date_converter(month_dictionary, date):
start = date.find('/')
month = month_dictionary[int(date[:start])]
end = date.find('/', start + 1)
day = date[start + 1:end]
year = date[end + 1:]
return day + ' ' + month + ' ' + year
def date_converter2(month_dictionary, date):
month, day, year = date.split('/')
return day + ' ' + month_dictionary[int(month)] + ' ' + year
print(date_converter(english, '5/11/2012'))
print(date_converter(english, '5/11/12'))
print(date_converter(swedish, '5/11/2012'))
print(date_converter2(swedish, '12/5/1791'))
|
[
"[email protected]"
] | |
251f4b94234d2cacbf822e32f19aae0edea50f0f
|
090fd16451ef226f0660d4be797c8a5fbf309f97
|
/training_data/whole_image/to_tensors/metadata_to_grid_tensor.py
|
4ca081192a85d619c3672f048fe0bd5599b745bb
|
[] |
no_license
|
philip-brohan/Robot_Rainfall_Rescue
|
2cdd20131a1ceae4e4af54059381b815f1cc138b
|
4121d69aba6c8d180b57d92a0da11d09cd6843b4
|
refs/heads/master
| 2023-04-13T10:07:08.160001 | 2021-04-21T10:24:17 | 2021-04-21T10:24:17 | 267,624,025 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,783 |
py
|
#!/usr/bin/env python
# Make a tensor containing grid-cell corner locations from the image metadata
import os
import sys
import math
import tensorflow as tf
import numpy
import pickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--rootd", help="root directory", type=str, required=True)
parser.add_argument("--docn", help="Document name", type=str, required=True)
args = parser.parse_args()
# Load the metadata
with open(
"%s/meta/%s.pkl" % (args.rootd, args.docn),
"rb",
) as pkf:
mdata = pickle.load(pkf)
# mdata is a dictionary - convert it to a class so contents are attributes
# and we can share code with tyrImage.
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
mdata = AttrDict(mdata)
# From the metadata, find the centres of the data grid
# (120*2 floats on the range 0-1)
# Functions copied from the tyrimage class - should reuse that class instead
# Rotate by angle degrees clockwise
def gRotate(self, point, angle=None, origin=None):
if angle is None:
angle = self.rotate
if angle == 0:
return point
if origin is None:
origin = gCentre(self)
ox, oy = origin[0] * self.pageWidth, origin[1] * self.pageHeight
px, py = point[0] * self.pageWidth, point[1] * self.pageHeight
angle = math.radians(angle) * -1
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx / self.pageWidth, qy / self.pageHeight
def gCentre(self):
return (
0.5 + self.xshift / self.pageWidth + (self.xscale - 1) * 0.43,
0.525 + self.yshift / self.pageHeight - (self.yscale - 1) * 0.2,
)
# Corners of grid
def topLeft(self):
return (
0.1 + self.xshift / self.pageWidth,
0.725 + self.yshift / self.pageHeight,
)
def topRight(self):
return (
0.96 + self.xshift / self.pageWidth + (self.xscale - 1) * 0.86,
0.725 + self.yshift / self.pageHeight,
)
def bottomLeft(self):
return (
0.1 + self.xshift / self.pageWidth,
0.325 + self.yshift / self.pageHeight - (self.yscale - 1) * 0.4,
)
def bottomRight(self):
return (
0.96 + self.xshift / self.pageWidth + (self.xscale - 1) * 0.86,
0.325 + self.yshift / self.pageHeight - (self.yscale - 1) * 0.4,
)
def topAt(self, x):
return (
topRight(self)[0] * x + topLeft(self)[0] * (1 - x),
topRight(self)[1] * x + topLeft(self)[1] * (1 - x),
)
def bottomAt(self, x):
return (
bottomRight(self)[0] * x + bottomLeft(self)[0] * (1 - x),
bottomRight(self)[1] * x + bottomLeft(self)[1] * (1 - x),
)
def leftAt(self, y):
return (
topLeft(self)[0] * y + bottomLeft(self)[0] * (1 - y),
topLeft(self)[1] * y + bottomLeft(self)[1] * (1 - y),
)
target = []
for yri in range(10):
x = (
mdata.monthsWidth
+ (yri + 0.5) * (1.0 - mdata.meansWidth - mdata.monthsWidth) / 10
)
tp = topAt(mdata, x)
for mni in range(12):
lft = leftAt(
mdata,
1.0
- mdata.yearHeight
- (mni + 1) * (1.0 - mdata.yearHeight - mdata.totalsHeight) / (12 + 1),
)
txp = gRotate(mdata, [tp[0], lft[1]])
target.extend(txp)
ict = tf.convert_to_tensor(target, numpy.float32)
# Output the tensor
opdir = "%s/tensors/cell-centres/" % args.rootd
if not os.path.isdir(opdir):
try: # These calls sometimes collide
os.makedirs(opdir)
except FileExistsError:
pass
# Write to file
sict = tf.io.serialize_tensor(ict)
tf.io.write_file("%s/%s.tfd" % (opdir, args.docn), sict)
|
[
"[email protected]"
] | |
d38333c18d45679b62018269967f8c0ac35bdc26
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/ml/azure-ai-ml/tests/automl_job/e2etests/test_automl_image_segmentation.py
|
ffccf9eebf789cc84db05ad6812bd7fd5eab6066
|
[
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 |
MIT
| 2019-07-25T22:28:52 | 2019-04-19T20:59:15 |
Python
|
UTF-8
|
Python
| false | false | 4,934 |
py
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
import platform
from typing import Tuple
import pytest
from test_utilities.utils import assert_final_job_status, get_automl_job_properties
from azure.ai.ml import MLClient, automl
from azure.ai.ml.constants._common import AssetTypes
from azure.ai.ml.entities import Data
from azure.ai.ml.entities._inputs_outputs import Input
from azure.ai.ml.entities._job.automl import SearchSpace
from azure.ai.ml.entities._job.automl.image import ImageInstanceSegmentationJob, ImageObjectDetectionSearchSpace
from azure.ai.ml.operations._run_history_constants import JobStatus
from azure.ai.ml.sweep import BanditPolicy, Choice, Uniform
from devtools_testutils import AzureRecordedTestCase, is_live
@pytest.mark.automle2etest
@pytest.mark.usefixtures("recorded_test")
@pytest.mark.skipif(
condition=not is_live() or platform.python_implementation() == "PyPy",
reason="Datasets downloaded by test are too large to record reliably"
)
class TestAutoMLImageSegmentation(AzureRecordedTestCase):
def _create_jsonl_segmentation(self, client, train_path, val_path):
fridge_data = Data(
path="./odFridgeObjectsMask",
type=AssetTypes.URI_FOLDER,
)
data_path_uri = client.data.create_or_update(fridge_data)
data_path = "./odFridgeObjectsMask/"
from automl_job.jsonl_converter import convert_mask_in_VOC_to_jsonl
convert_mask_in_VOC_to_jsonl(data_path, data_path_uri.path, train_path, val_path)
def test_image_segmentation_run(self, image_segmentation_dataset: Tuple[Input, Input], client: MLClient) -> None:
# Note: this test launches two jobs in order to avoid calling the dataset fixture more than once. Ideally, it
# would have sufficed to mark the fixture with session scope, but pytest-xdist breaks this functionality:
# https://github.com/pytest-dev/pytest-xdist/issues/271.
# Get training and validation data
train_path, val_path = image_segmentation_dataset
# Create jsonl file
self._create_jsonl_segmentation(client=client, train_path=train_path, val_path=val_path)
training_data = Input(type=AssetTypes.MLTABLE, path=train_path)
validation_data = Input(type=AssetTypes.MLTABLE, path=val_path)
# Make generic segmentation job
image_instance_segmentation_job = automl.image_instance_segmentation(
compute="gpu-cluster",
experiment_name="image-e2e-tests",
training_data=training_data,
validation_data=validation_data,
target_column_name="label",
primary_metric="MeanAveragePrecision",
properties=get_automl_job_properties(),
)
# Configure regular sweep job
image_instance_segmentation_job_sweep = copy.deepcopy(image_instance_segmentation_job)
image_instance_segmentation_job_sweep.set_training_parameters(early_stopping=True, evaluation_frequency=1)
image_instance_segmentation_job_sweep.extend_search_space(
[
SearchSpace(
model_name=Choice(["maskrcnn_resnet50_fpn"]),
learning_rate=Uniform(0.0001, 0.001),
optimizer=Choice(["sgd", "adam", "adamw"]),
min_size=Choice([600, 800]),
),
]
)
image_instance_segmentation_job_sweep.set_limits(max_trials=1, max_concurrent_trials=1)
image_instance_segmentation_job_sweep.set_sweep(
sampling_algorithm="Random",
early_termination=BanditPolicy(evaluation_interval=2, slack_factor=0.2, delay_evaluation=6),
)
# Configure AutoMode job
image_instance_segmentation_job_automode = copy.deepcopy(image_instance_segmentation_job)
# TODO: after shipping the AutoMode feature, do not set flag and call `set_limits()` instead of changing
# the limits object directly.
image_instance_segmentation_job_automode.properties["enable_automode"] = True
image_instance_segmentation_job_automode.limits.max_trials = 2
image_instance_segmentation_job_automode.limits.max_concurrent_trials = 2
# Trigger regular sweep and then AutoMode job
submitted_job_sweep = client.jobs.create_or_update(image_instance_segmentation_job_sweep)
submitted_job_automode = client.jobs.create_or_update(image_instance_segmentation_job_automode)
# Assert completion of regular sweep job
assert_final_job_status(submitted_job_sweep, client, ImageInstanceSegmentationJob, JobStatus.COMPLETED)
# Assert completion of Automode job
assert_final_job_status(submitted_job_automode, client, ImageInstanceSegmentationJob, JobStatus.COMPLETED)
|
[
"[email protected]"
] | |
2f3ff636fb34d745ffaa5ea9b022823502f5d43c
|
665455c521cc7cf76c5436337ed545de90976af4
|
/cohesity_management_sdk/models/subscription_type_azure_credentials_enum.py
|
3089b273d3faa936de0e61a8f028a28e860b1215
|
[
"Apache-2.0"
] |
permissive
|
hsantoyo2/management-sdk-python
|
d226273bc8eedcf9220ea4999a6f0b9a1a30d99c
|
0093194d125fc6746f55b8499da1270c64f473fc
|
refs/heads/master
| 2023-03-01T06:09:39.644085 | 2021-01-15T08:23:16 | 2021-01-15T08:23:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 683 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Cohesity Inc.
class SubscriptionTypeAzureCredentialsEnum(object):
"""Implementation of the 'SubscriptionType_AzureCredentials' enum.
Specifies the subscription type of Azure such as 'kAzureCommercial' or
'kAzureGovCloud'.
Specifies the subscription type of an Azure source entity.
'kAzureCommercial' indicates a standard Azure subscription.
'kAzureGovCloud' indicates a govt Azure subscription.
Attributes:
KAZURECOMMERCIAL: TODO: type description here.
KAZUREGOVCLOUD: TODO: type description here.
"""
KAZURECOMMERCIAL = 'kAzureCommercial'
K_AZURE_GO_VCLOUD = 'kAzureGovCloud'
|
[
"[email protected]"
] | |
48f0854884ecd2500df38b72140371a1a933f8b9
|
a80295f0d7d6c6c8ea5c0dce3929206eef2dfa66
|
/server.py
|
ddc0415ff3af2352c792b0766c11232e6c28b67a
|
[] |
no_license
|
bkodakari/flask2
|
38425ea04d12fcefeb538761ebeb66decc104852
|
f17793af004bdd7602b6d7032ab84556a49a1317
|
refs/heads/master
| 2020-12-31T00:29:37.991325 | 2017-03-16T04:15:12 | 2017-03-16T04:15:12 | 85,152,138 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,561 |
py
|
from flask import Flask, request
from random import choice, randint
from flask import render_template
COMPLIMENTS = ["smart", "clever", "tenacious",
"awesome", "Pythonic"]
# "__name__" is a special Python variable for the name of the current module
# Flask wants to know this to know what any imported things are relative to.
app = Flask(__name__)
@app.route('/')
def index():
"""Home page."""
return "<html><body><h1>I am the landing page</h1></body></html>"
@app.route('/hello')
def say_hello():
html = """
<html>
<body>
Say hello
</body>
</html>
"""
return html
@app.route('/lucky')
def lucky_number():
lucky_num = randint(1, 100)
lucky_message = "Your lucky number is %s " % lucky_num
return "<html><body><h1>" + lucky_message + "</h1></body></html>"
"""Provides a random lucky number"""
# add code here of getting a lucky number and return a string
# with the lucky number
@app.route('/puppies_or_kittens')
def puppies_or_kittens():
buttons = """
<html>
<body>
<a href=/puppy> <button type='button'> Click here to see a PUPPY!</button> </a></br>
<a href=/kitten> <button type='button'> click me for a kitten! </button></a>
</body>
</html>
"""
return buttons
@app.route('/puppy')
def show_puppy():
#html with the puppy image
puppy = """
<html>
<body>
<img src = "https://ipetcompanion.com/feedapuppy/styles/media/puppy.jpg"><br>
<h3><a href =/puppies_or_kittens>Take me back!</a></h3>
</body>
</html>
"""
return puppy
#link to the route puppies or kittens
@app.route('/kitten')
def show_kitten():
kitten = """
<html>
<body>
<img src = http://s3.amazonaws.com/assets.prod.vetstreet.com/2a/cd/ee484be546418f40cc3cbc194b52/kitten-in-arms-thinkstockphotos-106397271-335lc070915jpg.jpg>
<h3><a href = /puppies_or_kittens>Take me back!</a><h3>
</body>
</html>
"""
return kitten
@app.route('/form')
def show_form():
return render_template("form.html")
@app.route('/greet')
def greet():
player = request.args.get("person")
nice_thing = choice(COMPLIMENTS)
return render_template("compliments.html",
name=player,
compliment=nice_thing)
if __name__ == '__main__':
# debug=True gives us error messages in the browser and also "reloads"
# our web app if we change the code.
app.run(debug=True)
|
[
"[email protected]"
] | |
10f794876bff9222640ce92abb3e52e1013e1415
|
a9fe1b5c320cdef138ac4a942a8b741c7f27de7c
|
/LC1302-Deepest-Leaves-Sum.py
|
c6d72946df16cfe5c4125b05fb11adb0f535d37c
|
[] |
no_license
|
kate-melnykova/LeetCode-solutions
|
a6bbb5845310ce082770bcb92ef6f6877962a8ee
|
ee8237b66975fb5584a3d68b311e762c0462c8aa
|
refs/heads/master
| 2023-06-28T06:35:33.342025 | 2021-07-30T06:59:31 | 2021-07-30T06:59:31 | 325,106,033 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,508 |
py
|
"""
Given the root of a binary tree, return the sum of values
of its deepest leaves.
Example 1:
Input: root = [1,2,3,4,5,null,6,7,null,null,null,null,8]
Output: 15
Example 2:
Input: root = [6,7,8,2,7,1,3,9,null,1,4,null,null,null,5]
Output: 19
Constraints:
(*) The number of nodes in the tree is in the range [1, 10^4].
(*) 1 <= Node.val <= 100
"""
from TreeNode import TreeNode
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def deepestLeavesSum(self, root: TreeNode) -> int:
if root is None:
return 0
level_new = [root, ]
while level_new:
level = list(level_new)
level_new = list()
for node in level:
if node.left is not None:
level_new.append(node.left)
if node.right is not None:
level_new.append(node.right)
return sum(node.val for node in level)
if __name__ == "__main__":
import run_tests
correct_answers = [
[[1,2,3,4,5,None,6,7,None,None,None,None,8], 15],
[[6,7,8,2,7,1,3,9,None,1,4,None,None,None,5], 19]
]
for i in range(len(correct_answers)):
correct_answers[i][0] = TreeNode.to_treenode(correct_answers[i][0])
print(f"Running tests for deepestLeavesSum")
run_tests.run_tests(Solution().deepestLeavesSum, correct_answers)
|
[
"[email protected]"
] | |
e0e296b4db975b48fac91b4a435db315d68bccc5
|
86932f8c69708ebdf534c6604e5322a5496596a1
|
/tests/helpers.py
|
feef1302d75b434d46e530e499daa0eabc790c3b
|
[
"BSD-2-Clause"
] |
permissive
|
ress/flask-assets
|
3a8cd77c315840cb80528cbbf139f804ae335920
|
747aa9c0d1b036bd3cc65b5cd278e48a97ac8af2
|
refs/heads/master
| 2021-01-18T05:52:34.242630 | 2013-01-04T15:43:58 | 2013-01-04T15:43:58 | 7,623,019 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,555 |
py
|
from nose import SkipTest
from nose.tools import assert_raises
from flask.app import Flask
try:
from flask import __version__ as FLASK_VERSION
except ImportError:
FLASK_VERSION = '0.6'
from webassets.test import TempEnvironmentHelper as BaseTempEnvironmentHelper
from flask.ext.assets import Environment
try:
from flask import Blueprint
Module = None
except ImportError:
# Blueprints only available starting with 0.7,
# fall back to old Modules otherwise.
Blueprint = None
from flask import Module
__all__ = ('TempEnvironmentHelper', 'Module', 'Blueprint')
class TempEnvironmentHelper(BaseTempEnvironmentHelper):
def _create_environment(self, **kwargs):
if FLASK_VERSION < '0.7':
# Older Flask versions do not support the
# static_folder argument, which we need to use
# a temporary folder for static files, without
# having to do sys.path hacking.
raise SkipTest()
if not hasattr(self, 'app'):
self.app = Flask(__name__, static_folder=self.tempdir, **kwargs)
self.env = Environment(self.app)
return self.env
try:
from test.test_support import check_warnings
except ImportError:
# Python < 2.6
import contextlib
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
# We cannot reasonably support this, we'd have to copy to much code.
# (or write our own). Since this is only testing warnings output,
# we might slide by ignoring it.
yield
|
[
"[email protected]"
] | |
e1700a52bf7fc6f3fef9ba3960aaf57f768bad57
|
0c659d16b796e6220e93b08693e557a698c5977e
|
/setup.py
|
0e4dae03af83c6567586b12a43992f9873753b41
|
[
"MIT"
] |
permissive
|
marchon/citas
|
f4eb405d9efd9831df21b892945327bc86f8b7fe
|
73a51c0a5dff29bd2728973457d378aec40ce1cf
|
refs/heads/master
| 2021-04-15T08:01:43.520071 | 2017-05-04T22:19:00 | 2017-05-04T22:19:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 790 |
py
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from pip.req import parse_requirements
import re, ast
# get version from __version__ variable in citas/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('citas/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
requirements = parse_requirements("requirements.txt", session="")
setup(
name='citas',
version=version,
description='Citas a Clientes',
author='César DiMartino',
author_email='[email protected]',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=[str(ir.req) for ir in requirements],
dependency_links=[str(ir._link) for ir in requirements if ir._link]
)
|
[
"[email protected]"
] | |
513e529b4e9e4115777f44df6972ef72a2e0e317
|
e6a4af814f41e0a6b661d8685a54ba5bd41a4baf
|
/film/dao/calc_dao.py
|
063d7d0053c70a79fc8d3343bc0e50eacd00efa5
|
[] |
no_license
|
oaifaye/film_wx
|
cdb39116748af180719bec4d2cf5af02e88235d3
|
63519b430001216f22d14a1ee6f7d528e0ce655e
|
refs/heads/master
| 2021-05-09T12:41:24.361486 | 2018-01-26T06:57:48 | 2018-01-26T06:57:48 | 119,016,941 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,281 |
py
|
# -*- coding: UTF-8 -*-
'''
Created on 2017年12月10日
@author: Administrator
'''
from film.dao.base_dao import BaseDao
import datetime
class CalcItem():
id = -1
dateNo = 0
calcType=''
websiteId = 0
mergeCinemaId=None
mergeFilmId=None
initDate = None
class CalcDao(BaseDao):
def insert(self,CalcItem):
baseDao = BaseDao()
db = baseDao.getDB()
# 使用cursor()方法获取操作游标
cursor = db.cursor()
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
sql = "INSERT INTO tf_calc (date_no, calc_type, website_id, merge_cinema_id, merge_film_id, init_date) VALUES ('%s', '%s', '%s', '%s', '%s', '%s')" % \
(str(CalcItem.dateNo),str(CalcItem.calcType),str(CalcItem.websiteId),str(CalcItem.mergeCinemaId),str(CalcItem.mergeFilmId),now)
print(sql)
num = cursor.execute(sql)
baseDao.commitCloseDb(db)
# def getNoMergeCinema(self):
# cinemaItems = self.doSelect("select * from tf_cinema where merge_id is null and state=1")
# return cinemaItems
def doSelect(self,sql):
items = []
baseDao = BaseDao()
db = baseDao.getDB()
# 使用cursor()方法获取操作游标
cursor = baseDao.getDictCursor(db)
print(sql)
num = cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
item = CalcItem()
item.id = row['id']
item.dateNo = row['date_no']
item.calcType=row['calc_type']
item.websiteId = row['website_id']
item.websiteCinemaId=row['website_cinema_id']
item.websiteFilmId=row['website_film_id']
item.initDate = row['init_date']
items.append(item)
baseDao.commitCloseDb(db)
return items
def deleteByDateNo(self,dateNo,calcType):
baseDao = BaseDao()
db = baseDao.getDB()
# 使用cursor()方法获取操作游标
cursor = db.cursor()
sql = "delete from tf_calc where date_no='%s' and calc_type='%s' " % (dateNo,calcType)
print(sql)
num = cursor.execute(sql)
baseDao.commitCloseDb(db)
'''找出所有电影中评分最高的前三名'''
def getGradeHiFilm(self,dateNo):
mergeFilmIds = []
baseDao = BaseDao()
db = baseDao.getDB()
# 使用cursor()方法获取操作游标
cursor = baseDao.getDictCursor(db)
sql = "select distinct(b.merge_id) merge_film_id from \
tf_daily_film_cinema a left join tf_film b on a.website_film_id=b.website_film_id \
left join tf_merge_film c on b.merge_id=c.id \
where a.date_no='%s' \
order by a.grade desc limit 3" % (dateNo)
print(sql)
num = cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
mergeFilmIds.append(row['merge_film_id'])
baseDao.commitCloseDb(db)
return mergeFilmIds
'''获取一个电影在那些电影院看哪些网站买票比较合适'''
def getCheapCimane(self,filmMergeId,dateNo):
calcItems = []
baseDao = BaseDao()
db = baseDao.getDB()
# 使用cursor()方法获取操作游标
cursor = baseDao.getDictCursor(db)
sql = "select a.website_id website_id, b.merge_id cinema_merge_id,d.merge_id film_merge_id \
from tf_daily_film_cinema a left join tf_cinema b on a.website_cinema_id = b.website_cinema_id \
left join tf_film d on a.website_film_id = d.website_film_id left join tf_merge_cinema f on b.merge_id=f.id \
where d.merge_id='%s' and a.date_no='%s' and f.area in ('红桥区','南开区') \
order by a.price limit 1" % (str(filmMergeId),str(dateNo))
print(sql)
num = cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
calcItem= CalcItem()
calcItem.dateNo = dateNo
calcItem.websiteId = row['website_id']
calcItem.mergeCinemaId=row['cinema_merge_id']
calcItem.mergeFilmId=row['film_merge_id']
calcItems.append(calcItem)
baseDao.commitCloseDb(db)
return calcItems
'''找出所有影院里排片最高的前三名'''
def getMostRoundFilms(self,dateNo):
mergerFilmIds = []
baseDao = BaseDao()
db = baseDao.getDB()
# 使用cursor()方法获取操作游标
cursor = baseDao.getDictCursor(db)
sql = "select b.merge_id merge_id\
from tf_daily_film_round a left join tf_film b on a.website_film_id=b.website_film_id \
left join tf_merge_film c on b.merge_id = c.id \
where a.date_no='%s' \
group by b.merge_id \
order by (a.show_round_num) desc limit 3" % (dateNo)
print(sql)
num = cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
mergerFilmIds.append(row['merge_id'])
baseDao.commitCloseDb(db)
return mergerFilmIds
|
[
"[email protected]"
] | |
979f59f65eddf8d3e63c273c305af8eb83b1ee6f
|
0ebc1fddbdcc9ea6fca53e1d8fb77c6a892c8a19
|
/Ground-Truth-Skeletons/dataManagement/organise_val_data.py
|
eff83d74db189b84cf72649ff141e0cce0805a0a
|
[
"MIT"
] |
permissive
|
amit2014/Action-Recognition
|
4ea569bc3a82966340b97a70ba14675fb5dadf12
|
b648f4cd8e479872c0cd9488120ada18bc64e5ad
|
refs/heads/master
| 2020-05-16T05:04:01.846725 | 2018-05-05T09:00:36 | 2018-05-05T09:00:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,971 |
py
|
import numpy as np
import pickle
#####################################################################################################################
stupid_videos = [ 111, 111, 111, 747, 747, 747, 981, 981, 981,
1145, 1145, 1145, 1252, 1252, 1252, 1281, 1281, 1281,
1282, 1282, 1282, 1485, 1485, 1485, 1504, 1504, 1504,
1840, 1840, 1840, 1865, 1865, 1865, 1916, 1916, 1916,
2071, 2071, 2071, 2220, 2220, 2220, 3108, 3108, 3108,
4133, 4133, 4133, 4507, 4507, 4507, 4882, 4882, 4882,
5081, 5081, 5081, 5293, 5293, 5293, 5315, 5315, 5315,
5643, 5643, 5643, 5816, 5816, 5816, 6082, 6082, 6082,
6648, 6648, 6648, 6695, 6695, 6695, 6773, 6773, 6773,
6873, 6873, 6873, 7137, 7137, 7137, 7616, 7616, 7616,
7680, 7680, 7680, 9472, 9472, 9472, 9533, 9533, 9533,
10120, 10120, 10120, 10588, 10588, 10588, 11693, 11693, 11693,
12150, 12150, 12150, 12218, 12218, 12218, 13542, 13542, 13542,
13860, 13860, 13860, 14701, 14701, 14701, 14935, 14935, 14935,
16026, 16026, 16026, 16298, 16298, 16298]
#non_stupid = np.setdiff1d(range(len(val_labes[1])),stupid_videos)
val_data = np.load(open('val_data.npy','rb'))
val_labes = pickle.load(open('val_label.pkl','rb'))
print(val_data.shape)
print(len(val_labes[1]))
non_stupid = np.setdiff1d(range(len(val_labes[1])),stupid_videos)
val_labes = np.asarray(val_labes[1])
val_labes = val_labes[non_stupid]
print(len(val_labes))
val_data = val_data[non_stupid,:,:,:,:]
val_data = val_data[np.asarray(val_labes)<49,:,:,:,0]
print(val_data.shape)
val_labes = val_labes[val_labes<49]
val_data = val_data - (val_data[:,:,0,0])[:,:,None,None]
val_data = val_data / np.linalg.norm(val_data[:,:,0,1]-val_data[:,:,0,0],axis=1)[:,None,None,None]
np.save('Final-Data/val_data.npy',val_data)
np.save('Final-Data/val_labes.npy',val_labes)
|
[
"[email protected]"
] | |
4490c6ee769209f3751f16f7c940b75c7095bdc1
|
2d28d7d23ffee8c19c5b0b5f12c2ef34a0e0eac2
|
/py_kafk/tar/pykafka-2.8.1-dev.1/tests/pykafka/test_ssl.py
|
97ac4378b0c1e61c7dc9b8fdaa981abf92fccac8
|
[
"Apache-2.0"
] |
permissive
|
liuansen/python-utils-class
|
bd8e9bb100a8b7c97ca9022ecf44cde0e79a8edd
|
c7054bd05b127385b8c6f56a4e2241d92ff42ab4
|
refs/heads/master
| 2023-05-27T22:40:00.715765 | 2022-08-08T07:33:41 | 2022-08-08T07:33:41 | 185,551,657 | 3 | 0 |
Apache-2.0
| 2023-05-22T21:37:24 | 2019-05-08T07:08:23 |
Python
|
UTF-8
|
Python
| false | false | 2,701 |
py
|
import os
import unittest
from uuid import uuid4
import pytest
from pykafka import KafkaClient, SslConfig
from pykafka.test.utils import get_cluster, stop_cluster
kafka_version = os.environ.get('KAFKA_VERSION', '0.8.0')
class SslIntegrationTests(unittest.TestCase):
USE_RDKAFKA = False
@classmethod
def setUpClass(cls):
cls.kafka = get_cluster()
if cls.kafka.brokers_ssl is None:
pytest.skip("Test-cluster doesn't advertise ssl ports.")
@classmethod
def tearDownClass(cls):
stop_cluster(cls.kafka)
def roundtrip_test(self, client):
"""Test producing then consuming
This is mostly important to test the pykafka.rdkafka classes, which
should be passed SSL settings during producer/consumer init.
"""
topic_name = uuid4().hex.encode()
payload = uuid4().hex.encode()
topic = client.topics[topic_name]
producer = topic.get_producer(use_rdkafka=self.USE_RDKAFKA, sync=True)
producer.produce(payload)
consumer = topic.get_simple_consumer(use_rdkafka=self.USE_RDKAFKA,
consumer_timeout_ms=5000)
self.assertEqual(consumer.consume().value, payload)
def test_ca_only(self):
"""Connect with CA cert only (ie no client cert)"""
config = SslConfig(cafile=self.kafka.certs.root_cert)
client = KafkaClient(self.kafka.brokers_ssl, ssl_config=config,
broker_version=kafka_version)
self.roundtrip_test(client)
def test_client_cert(self):
"""Connect with client certificate"""
# This would be a more potent test if we could on-the-fly reconfigure
# the test cluster to refuse connections without client certs, but
# that's not easy to achieve with our current setup
certs = self.kafka.certs
config = SslConfig(cafile=certs.root_cert,
certfile=certs.client_cert,
keyfile=certs.client_key,
password=certs.client_pass)
client = KafkaClient(self.kafka.brokers_ssl, ssl_config=config,
broker_version=kafka_version)
self.roundtrip_test(client)
@pytest.mark.skip(reason="Unresolved crashes")
def test_legacy_wrap_socket(self):
"""Test socket-wrapping without SSLContext"""
config = SslConfig(cafile=self.kafka.certs.root_cert)
config._wrap_socket = config._legacy_wrap_socket()
client = KafkaClient(self.kafka.brokers_ssl, ssl_config=config,
broker_version=kafka_version)
self.roundtrip_test(client)
|
[
"[email protected]"
] | |
3e0884b6637fa46e47bf58684f8b728f67568073
|
b0b5df878333beada1fa4d862be098b80ced7641
|
/AUTO_ADB/DCcore/dc_daily.py
|
31cdf81394ca668e57a9ba70585ca50c7a0cf420
|
[] |
no_license
|
hygnic/boomboost
|
054beac114f3407757a5423ed4766d36c2278911
|
dcbff7abe3300c1a4c668cf6a96370a53be99ac5
|
refs/heads/master
| 2023-06-06T00:47:48.734632 | 2021-05-28T09:58:24 | 2021-05-28T09:58:24 | 281,379,773 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 804 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# ---------------------------------------------------------------------------
# Author: LiaoChenchen
# Created on: 2020/8/13 23:00
# Reference:
"""
Description: DC 日常任务
Usage:
"""
# ---------------------------------------------------------------------------
import os
import dcutility as dc
from AUTO_ADB.conf.DClocation import General
from AUTO_ADB.conf.pathfile import ImageDaily
ims = dc.ImageMatchSet()
lt_gl = General()
image = ImageDaily("daily")
def ug():
# 进入 night world
dc.humanbeing_click(lt_gl.nightworldX, lt_gl.nightworldY)
ims.whileset(image.ug)
dc.humanbeing_click_point(ims.point(zoom=0.1))
def re():
ims.backhome(5)
if __name__ == '__main__':
os.chdir("../adb")
os.system("adb connect 127.0.0.1:21503")
ug()
|
[
"[email protected]"
] | |
f69e6cb58e32915c9091cbd7e6e8ac7b79d70306
|
0c4103afaa218697ad6ec514ad02a6e154278d70
|
/python_data_structures/dictionaries/view_obj.py
|
35a5acede55a7861d48a95e7eee052a269e59388
|
[] |
no_license
|
jgartsu12/my_python_learning
|
e721f3fa646d2ca46c888d0fbc55ac03a48b4889
|
f971b7168e97e52fe9fd66f4b4d7c098110ca701
|
refs/heads/master
| 2020-09-07T17:45:08.747614 | 2019-11-19T00:49:23 | 2019-11-19T00:49:23 | 220,865,262 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,201 |
py
|
# Guide to Python Dictionary View Objects
#nested collections in dictionary view obj
# traversal to grab nestd items in a view obj
teams = {
"astros" : ["Altuve", "Correa", "Bregman"],
"angels": ["Trout", "Pujols"],
"yankees": ["Judge", "Stanton"],
"red sox": ["Price", "Betts"],
}
team_groupings = teams.items()
print(list(team_groupings)[1][1][0]) #chainned elements chained lookups
# returned: Trout
""" tuples = (....)
[
('astros', ['Altuve', 'Correa', 'Bregman']),
('angels': ['Trout', 'Pujols']),
('yankees', ['Judge', 'Stanton']),
('red sox', ['Price', 'Betts'])
] --> convert view obj into a list
"""
# previous notes
# # view obj --> thread safety
# players = {
# "ss" : "Correa",
# "2b" : "Altuve",
# "3b" : "Bregman",
# "DH" : "Gattis",
# "OF" : "Springer",
# }
# # thread safety - thread safe (senior-lvl-dvp) make quick copy of list and then perform our actions
# # use of copy() fn --> perform any actions u want without data change
# player_names = list(players.copy().values())
# print(player_names) # prints players {} values aka name of the players
# # only we can access these players stored
|
[
"[email protected]"
] | |
960173d66646bb039683ffef01a651b16866f98a
|
f39528e9bad8cfa78b38fcbb7a5b430ac0c7a942
|
/Displaced_Dijets/DisplacedVerticesGeneratorFragments-CP2-2017/StopStopbarTo2Dbar2D_M_200_CTau_300um_TuneCP2_13TeV_pythia8_cff.py
|
6ab0832145f8e68ef40c4dbd40c98435fce13338
|
[] |
no_license
|
isildakbora/EXO-MC-REQUESTS
|
c0e3eb3a49b516476d37aa464c47304df14bed1e
|
8771e32bbec079de787f7e5f11407e9e7ebe35d8
|
refs/heads/master
| 2021-04-12T11:11:03.982564 | 2019-04-29T15:12:34 | 2019-04-29T15:12:34 | 126,622,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,652 |
py
|
M = 200
CTAU = 0.3
WIDTH = 0.0197e-11 / CTAU
SLHA_TABLE = '''
BLOCK SPINFO # Spectrum calculator information
1 Minimal # spectrum calculator
2 1.0.0 # version number
BLOCK MODSEL # Model selection
1 1 #
BLOCK MASS # Mass Spectrum
# PDG code mass particle
1000006 %E # ~t_1
DECAY 1000006 %E # ~t_1 decays (~t_1bar is automatically handled)
# BR NDA ID1 ID2
1.00E+00 2 -1 -1 # ~t_1 -> dbar dbar
''' % (M, WIDTH)
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP2Settings_cfi import *
generator = cms.EDFilter('Pythia8GeneratorFilter',
comEnergy = cms.double(13000.0),
filterEfficiency = cms.untracked.double(1.0),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(0),
SLHATableForPythia8 = cms.string(SLHA_TABLE),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP2SettingsBlock,
processParameters = cms.vstring(
'SUSY:all = off',
'SUSY:gg2squarkantisquark = on',
'SUSY:qqbar2squarkantisquark = on',
'SUSY:idA = 1000006',
'SUSY:idB = 1000006',
'RHadrons:allow = on',
'1000006:tau0 = %f' % CTAU,
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CP2Settings',
'processParameters',
),
),
)
|
[
"[email protected]"
] | |
c2a197ea72a0a7ecc9e3d5d77c97281427c01913
|
98f0b4edce2cc250aa5a544b7736d0287ad339a2
|
/manage.py
|
b9a292afd158c8ae58166c5ac0081d667f32a2f0
|
[] |
no_license
|
DasomJung24/Brandi_project_with_Django
|
86e0493a5ff314ae9bddaeabf1058acd81079282
|
7e7513c20b9051aa242759c8ba69894a2bdc2fcb
|
refs/heads/master
| 2023-01-21T11:59:37.941347 | 2020-12-06T13:50:36 | 2020-12-06T13:50:36 | 314,373,085 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 677 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'brandi_project_django.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
1179594e202410df5cc796d5a05f2b4b7c7863ee
|
72a9252951271373f66c5684344f0d5dff9565be
|
/知乎/zhi_Spider/BINSHAN/db.py
|
c8ca73059fc24c3671a323b3d1ffa82a336761e2
|
[] |
no_license
|
wudangqibujie/Spider_Project
|
7b43c9e3ca8d9f73d6a6e25d5574658838152acc
|
08beae178f269743afca0e5d91e1ad6a79464047
|
refs/heads/master
| 2020-03-21T22:11:41.635169 | 2018-07-03T03:12:47 | 2018-07-03T03:12:47 | 139,110,189 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 431 |
py
|
from settings import *
import pymongo
class Mon(object):
def __init__(self):
self.client = pymongo.MongoClient(MONGO_HOST,MONGO_PORT)
self.db = self.client[MONGO_DB_NAME]
def insert(self,coll_name,data):
self.db[coll_name].insert(data)
def data_find(self,coll_name):
for i in self.db[coll_name].find():
yield i
def close(self):
self.client.close()
|
[
"[email protected]"
] | |
14d0a3eb36ede4f57355b53e8675474ff624c2b8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02663/s270893737.py
|
0e407da5c73554e2845ac3ce57620af88e88d312
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 304 |
py
|
def Int():
return int(input())
def Ints():
return map(int,input().split())
def IntList():
return list(Ints())
def IntMat(N):
return [IntList() for i in range(N)]
import sys
sys.setrecursionlimit(4100000)
rl = sys.stdin.readline
H1,M1,H2,M2,K = Ints()
S = (H2-H1)*60+(M2-M1)
print(S-K)
|
[
"[email protected]"
] | |
0bf3f8859c7782f22606b4bf3c58ab6de8a17c1d
|
1903aa0028dd91a128f1630c6eb9a1f3467ed951
|
/update_constraints.py
|
21084617ca0f2cda487b66613cfe2f5e3bac850a
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-2-Clause"
] |
permissive
|
icemac/icemac.addressbook
|
a5ae04feb2b2fb1f0ecc7bf3e60b1666f5bfedd8
|
6197e6e01da922feb100dd0943576523050cd703
|
refs/heads/master
| 2021-01-03T12:44:08.365040 | 2020-08-06T06:50:51 | 2020-08-06T06:51:05 | 242,137,124 | 2 | 0 |
BSD-2-Clause
| 2020-04-12T07:41:02 | 2020-02-21T12:43:45 |
Python
|
UTF-8
|
Python
| false | false | 1,003 |
py
|
"""Adapted from https://github.com/zopefoundation/Zope/blob/4aadecc/util.py."""
import os
try:
from configparser import RawConfigParser
except ImportError:
from ConfigParser import RawConfigParser
HERE = os.path.abspath(os.path.dirname(__file__))
class CaseSensitiveParser(RawConfigParser):
def optionxform(self, value):
return value
def generate(in_, constraints_file):
in_file = os.path.join(HERE, in_)
out_file_constraints = os.path.join(HERE, constraints_file)
parser = CaseSensitiveParser()
parser.read(in_file)
constraints = []
versions = parser.items('versions')
for name, pin in versions:
if not pin:
continue
spec = '%s==%s' % (name, pin)
constraints.append(spec + '\n')
with open(out_file_constraints, 'w') as fcon:
for con in sorted(constraints):
fcon.write(con)
def main():
generate('profiles/versions.cfg', 'constraints.txt')
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
9a16597fd1d52b913e1b8ac76edee8bfcc2ffdb0
|
9758fa6d66df1121ff9e0b4a7da511653bc53cf1
|
/Store/migrations/0015_auto_20190831_1807.py
|
6eb4ef39296d595ca30aac8e88963aa2b68bb4ce
|
[] |
no_license
|
hdforoozan/Restaurant-project
|
179fb4138cb92bfd7716671c3b1e8b1949bfbaff
|
2ab096cbc3ee20557b57ed97bd0d5556c5965e87
|
refs/heads/master
| 2020-06-12T08:50:03.067740 | 2019-09-17T18:48:43 | 2019-09-17T18:48:43 | 194,250,030 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 780 |
py
|
# Generated by Django 2.2.2 on 2019-08-31 13:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Store', '0014_auto_20190831_1803'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='education_degree',
field=models.CharField(blank=True, choices=[('0', 'UnderGraduated'), ('1', 'Graduated'), ('2', 'Master'), ('3', 'PH.D')], max_length=50, null=True),
),
migrations.AlterField(
model_name='manager',
name='education_degree',
field=models.CharField(blank=True, choices=[('0', 'UnderGraduated'), ('1', 'Graduated'), ('2', 'Master'), ('3', 'PH.D')], max_length=50, null=True),
),
]
|
[
"[email protected]"
] | |
8652007449a4a6d6723587c87267379f02cbf808
|
ed4123102ac4a96d7d649a0c8871a69ecafde1d7
|
/lib/bitwise.py
|
566773ba9ab47ccc551cd469ce2abc0f70b3da0e
|
[] |
no_license
|
aleksejs-fomins/bio-machine-learning
|
07f88052e5cdd04943aad459667713ce261a98cd
|
0dda7c6681ab5f6b9b2b17c5944cad3664b6bf3f
|
refs/heads/master
| 2023-03-04T12:29:30.425887 | 2023-02-07T08:34:53 | 2023-02-07T08:34:53 | 211,850,531 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 341 |
py
|
import numpy as np
# Flip the i-th bit of integer a
def bitflip(a, i):
pos = 2**i
return a-pos if a&pos else a+pos
# Convert an integer to a list of bits
def bitlist(num, nbit):
return [int(bool(num & (1 << idx))) for idx in range(nbit)]
def bitlist2activation(lst):
return np.array([1 if bit == 1 else -1 for bit in lst])
|
[
"[email protected]"
] | |
e3d5e2d6bd853c4f74d2acc6be9c45639d9428c8
|
effdcfa3a98477226aa02de7a2e589bff8adfa56
|
/3_STL/222score.py
|
511c1d7da9487a38c89d3019058e68aed75f48d6
|
[] |
no_license
|
WUT-IDEA/Y2019_WWJ_Graduation_Design_Code
|
5817ceee8919355cc63262a629087c323b970c17
|
23e15f6156d5cf8552a9d913bb48eb675ef8e3c5
|
refs/heads/master
| 2020-05-27T22:28:48.021213 | 2019-05-27T09:18:04 | 2019-05-27T09:18:04 | 188,805,964 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,347 |
py
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
def plot(pre,actual):
plt.title('arima')
plt.rcParams['font.sans-serif'] = ['SimHei'] # 步骤一(替换sans-serif字体
plt.rcParams['axes.unicode_minus'] = False # 步骤二(解决坐标轴负数的负号显示问题)
plt.plot(actual, linewidth=1, label='actual')
plt.plot(pre, linewidth=1, label='pre')
plt.legend(loc='upper right') # 显示图例,设置图例的位置
plt.show()
def score_action(fileName):
dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d') # 时间格式
table = pd.read_csv(fileName+'.csv', parse_dates=True, index_col='timestamp', date_parser=dateparse)
table = (table.resample('D').mean().interpolate('linear'))
actual= np.array(table['actual'])
purchase_pre = np.array(table['purchase_pre'])
plot(purchase_pre,actual)
sum = 0
for i in range(len(actual)):
# sum += abs(purchase_pre[i] - actual[i])
sum += (purchase_pre[i] - actual[i])*(purchase_pre[i] - actual[i])
sum = math.sqrt(sum / 31.0)
# sum = (sum / 31.0)
return sum
print("STL_7",score_action("STL_7"))
print("STL_30",score_action("STL_30"))
print("STL_35",score_action("STL_35"))
print("STL_365",score_action("STL_365"))
|
[
"[email protected]"
] | |
3fe76dba2406707715ea71443aa5c68084b6427c
|
a97db7d2f2e6de010db9bb70e4f85b76637ccfe6
|
/leetcode/143-Reorder-List.py
|
76dc35a930512a20ad60fd1ba66c72c733a1b227
|
[] |
no_license
|
dongxiaohe/Algorithm-DataStructure
|
34547ea0d474464676ffffadda26a92c50bff29f
|
a9881ac5b35642760ae78233973b1608686730d0
|
refs/heads/master
| 2020-05-24T20:53:45.689748 | 2019-07-19T03:46:35 | 2019-07-19T03:46:35 | 187,463,938 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 792 |
py
|
class Solution:
def reorderList(self, head):
if not head or not head.next: return
slow, fast = head, head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
first, second = slow, slow.next
while second.next: # 1->2->3->4->5->6 to 1->2->3->6->5->4
third = second.next
second.next = third.next
third.next = first.next
first.next = third
first, second, third = head, slow, slow.next
while first != second: # 1->2->3->6->5->4 to 1->6->2->5->3->4
second.next = third.next
first_1 = first.next
first.next = third
third.next = first_1
first = first_1
third = second.next
|
[
"[email protected]"
] | |
931896263bebf84e1b742fc4256243bdbe10d638
|
395f974e62eafed74572efebcd91d62966e61639
|
/deprecated/obsolete/src/testavl.py
|
10d7a4e08ece3e0ff7e6d4fe13926d33defb668e
|
[
"Apache-2.0"
] |
permissive
|
agroce/tstl
|
ad386d027f0f5ff750eab19a722a4b119ed39211
|
8d43ef7fa49534868e6cdf1697863748260405c7
|
refs/heads/master
| 2023-08-08T19:14:52.020314 | 2023-07-26T17:51:36 | 2023-07-26T17:51:36 | 32,408,285 | 106 | 33 |
NOASSERTION
| 2021-01-26T19:05:17 | 2015-03-17T17:14:04 |
Python
|
UTF-8
|
Python
| false | false | 1,506 |
py
|
import avl
import random
import sys
import coverage
import time
import numpy
start = time.time()
branchesHit = set()
maxval = int(sys.argv[1])
testlen = int(sys.argv[2])
numtests = int(sys.argv[3])
cov = coverage.coverage(branch=True, source=["avl.py"])
cov.start()
for t in xrange(0,numtests):
a = avl.AVLTree()
test = []
ref = set()
for s in xrange(0,testlen):
h = a.height
n = len(ref)
if (n > 0):
if not (h <= (numpy.log2(n)+1)):
print h
print n
print (numpy.log2(n))
sys.exit(0)
op = random.choice(["add","del","find"])
val = random.randrange(0,maxval)
test.append((op,val))
if op == "add":
a.insert(val)
ref.add(val)
elif op == "del":
a.delete(val)
ref.discard(val)
elif op == "find":
assert (a.find(val) == (val in ref))
currBranches = cov.collector.get_arc_data()
for src_file, arcs in currBranches.iteritems():
for arc in arcs:
branch = (src_file, arc)
if branch not in branchesHit:
branchesHit.add(branch)
elapsed = time.time()-start
print elapsed,len(branchesHit),branch
avlitems = a.inorder_traverse()
setitems = []
for item in ref:
setitems.append(item)
setitems = sorted(setitems)
assert (avlitems == setitems)
|
[
"[email protected]"
] | |
0d90a0e294b9f072e189229811680e4b4e3babb1
|
a777170c979214015df511999f5f08fc2e0533d8
|
/train.py
|
e6fbe847bc160c06cca04e2c8f32707f3e1cf0ac
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
srlee-ai/claf
|
210b2d51918cf210683e7489ccb8347cb8b1f146
|
89b3e5c5ec0486886876ea3bac381508c6a6bf58
|
refs/heads/master
| 2021-02-13T04:38:36.198288 | 2020-03-03T15:01:01 | 2020-03-03T15:01:01 | 244,661,892 | 0 | 0 |
MIT
| 2020-03-03T14:45:52 | 2020-03-03T14:45:52 | null |
UTF-8
|
Python
| false | false | 248 |
py
|
# -*- coding: utf-8 -*-
from claf.config import args
from claf.learn.experiment import Experiment
from claf.learn.mode import Mode
if __name__ == "__main__":
experiment = Experiment(Mode.TRAIN, args.config(mode=Mode.TRAIN))
experiment()
|
[
"[email protected]"
] | |
34ac7a3126ac16fa1d6c38c6a98abcbeeac04fa3
|
cc18ad6df3249b891a8fb6491a940ac2a33d284a
|
/tests/test_l.py
|
8f31bdd414ae7368ace95e1ffa2f21be89d241f8
|
[] |
no_license
|
ASU-CompMethodsPhysics-PHY494/activity-03-python_calculator
|
39ee8d654a3376a51a432179efd4c7a7e1de82d8
|
60acaaf07338294180e9c804d2343b4f4d41304d
|
refs/heads/main
| 2023-02-24T20:33:55.020537 | 2021-01-28T16:23:31 | 2021-01-28T16:23:31 | 333,042,224 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 131 |
py
|
import pytest
from .tst import _test_variable
def test_l(name='l', reference=-1+3j):
return _test_variable(name, reference)
|
[
"[email protected]"
] | |
23a2097a7cc61138e387807676a9e26a1c578749
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/web/list_web_app_site_backups_slot.py
|
d462ca9222f2a90ee8600578a233a9c59f113a18
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,748 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ListWebAppSiteBackupsSlotResult',
'AwaitableListWebAppSiteBackupsSlotResult',
'list_web_app_site_backups_slot',
'list_web_app_site_backups_slot_output',
]
@pulumi.output_type
class ListWebAppSiteBackupsSlotResult:
"""
Collection of backup items.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> str:
"""
Link to next page of resources.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Sequence['outputs.BackupItemResponse']:
"""
Collection of resources.
"""
return pulumi.get(self, "value")
class AwaitableListWebAppSiteBackupsSlotResult(ListWebAppSiteBackupsSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppSiteBackupsSlotResult(
next_link=self.next_link,
value=self.value)
def list_web_app_site_backups_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppSiteBackupsSlotResult:
"""
Collection of backup items.
API Version: 2020-12-01.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will get backups of the production slot.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web:listWebAppSiteBackupsSlot', __args__, opts=opts, typ=ListWebAppSiteBackupsSlotResult).value
return AwaitableListWebAppSiteBackupsSlotResult(
next_link=__ret__.next_link,
value=__ret__.value)
@_utilities.lift_output_func(list_web_app_site_backups_slot)
def list_web_app_site_backups_slot_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListWebAppSiteBackupsSlotResult]:
"""
Collection of backup items.
API Version: 2020-12-01.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will get backups of the production slot.
"""
...
|
[
"[email protected]"
] | |
47253660053b62e3a3400992df6e8e5c92705a2f
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_2336+264/sdB_PG_2336+264_lc.py
|
5a89a24209124435ed571d1c422e1be8d34c29e3
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 346 |
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[354.680542,26.667064], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_2336+264 /sdB_PG_2336+264_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
ac01b0964e31f632558c44346006f03235d2cbaf
|
5021cd17ce5fb52f7859d69ffa660c1393820fea
|
/34.py
|
273b15775856ae587f4dd416663870f2c79d0f4c
|
[] |
no_license
|
slavo3dev/python_100_exercises
|
720e4f76de670fa969c9d62bddee1db20caf24f1
|
2983131a2a3ec40bbf3460a2e1baed57c6514e6a
|
refs/heads/master
| 2021-08-23T05:22:21.673477 | 2017-12-03T16:14:48 | 2017-12-03T16:14:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 407 |
py
|
# Question: The following script throws a NameError in the last line saying that c is not defined. Please fix the function so that there is no error and the last line is able to print out the value of c (i.e. 1 ).
def foo():
global c = 1
return c
foo()
print(c)
# c is not defined becuse variable is inside the fucntion foo, and c is a local var
# we can declare var c with global key word
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.