blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b07068e53d5ceac86d2431c09b775cdc9a8e872a
|
159aed4755e47623d0aa7b652e178296be5c9604
|
/data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_insecthill_small_fog_gray.py
|
7b3c9de5103bfa24a894b44ac8e8c59d5f7349ac
|
[
"MIT"
] |
permissive
|
anhstudios/swganh
|
fb67d42776864b1371e95f769f6864d0784061a3
|
41c519f6cdef5a1c68b369e760781652ece7fec9
|
refs/heads/develop
| 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 |
Python
|
UTF-8
|
Python
| false | false | 469 |
py
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_insecthill_small_fog_gray.iff"
result.attribute_template_id = -1
result.stfName("lair_n","insecthill")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
[
"[email protected]"
] | |
123bd91eada7ece3d6f864c35413bb2c53b6a044
|
156d054848b211fd4ca75057b9b448c9260fdd7d
|
/python-data-analysis/python_data_analysis/ch06/ch06-6.py
|
9c509a860d71e83883edf980f1ddaa56f8617c1d
|
[] |
no_license
|
wwxFromTju/Python-datascience
|
adfc06030dc785901b5fd33824529f86fcf41c54
|
7c58526ef54a6f10cbe1d4c7e5e024ddc423908a
|
refs/heads/master
| 2021-01-20T17:36:51.701638 | 2016-09-04T11:21:56 | 2016-09-04T11:21:56 | 58,730,506 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,564 |
py
|
#!/usr/bin/env python
# encoding=utf-8
from urllib2 import urlopen
from lxml.html import parse
import pandas as pd
import numpy as np
from pandas.io.parsers import TextParser
from pandas import Series, DataFrame
# XML和HTML
# 通过指定kind来获得列名或数据
def _unpack(row, kind='td'):
elts = row.findall('.//%s' % kind)
return [val.text_content() for val in elts]
# 从一个table获得列名和数据
def parse_options_data(table):
rows = table.findall('.//tr')
header = _unpack(rows[0], kind='th')
data = [_unpack(r) for r in rows[1:]]
return TextParser(data, names=header).get_chunk()
# 使用urlopen打开网页,然后使用lxml解析得到数据流
parsed = parse(urlopen('http://finance.yahoo.com/q/op?s=APPL+Options'))
print parsed
doc = parsed.getroot()
print doc
# 使用XPath来访问各个标签
# 访问所有的URL链接
links = doc.findall('.//a')
# 为HTML元素的对象,要得到URL和链接文本,必须使用各对象的get(URL)和text_content(针对显示的文本)
print links[15:20]
lnk = links[28]
print lnk
print lnk.get('href')
print lnk.text_content()
# 使用list comprehension列表推导式来获得所有的URL
urls = [lnk.get('href') for lnk in doc.findall('.//a')]
print urls[-10:]
# tables = doc.findall('.//table')
# calls = tables[0]
# puts = tables[1]
# rows = calls.findall('.//tr')
# 标题行
# print _unpack(rows[0], kind='th')
# 数据
# print _unpack(rows[1], kind='td')
# call_data = parse_options_data(calls)
# put_data = parse_options_data(puts)
# print call_data[:10]
|
[
"[email protected]"
] | |
0243fca320209c2522051b1b89d64c9a349e4937
|
7160f0637ba4fdd85feeb43aca2125c3479c474c
|
/config/spec.py
|
b1b021a751d98f14b75db187ffcdf0c648646468
|
[
"MIT"
] |
permissive
|
RENCI/pdspi-mapper-parallex-example
|
86a39e513f1e07f73be1281c81b2b143ed7e5d80
|
1c99fa42b7b9bc2c09e9cad2f1c55ea10549814a
|
refs/heads/master
| 2023-05-11T04:29:58.354329 | 2021-03-03T23:14:21 | 2021-03-03T23:14:21 | 260,721,734 | 0 | 2 |
MIT
| 2023-05-01T21:42:44 | 2020-05-02T15:54:12 |
Python
|
UTF-8
|
Python
| false | false | 5,561 |
py
|
from pdsphenotypemapping.clinical_feature import *
from tx.dateutils.utils import strtodate
from dateutil.relativedelta import relativedelta
requested_patient_variable_ids = get_patient_variable_ids(patientVariables)
timestamp_datetime = strtodate(timestamp)
for patient_id in patientIds:
patient_data = deref(data, patient_id)
patient = get_patient_patient(patient_data)
pid = patient["id"]
yield {
"patientId": pid
}
condition = get_condition_patient(fhir=patient_data)
observation = get_observation_patient(fhir=patient_data)
if "LOINC:2160-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:2160-0",
**serum_creatinine(observation, "mg/dL", timestamp_datetime)
}]
}
if "LOINC:82810-3" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:82810-3",
**pregnancy(condition, None, timestamp_datetime)
}]
}
if "HP:0001892" in requested_patient_variable_ids:
yield {
"values": [{
"id": "HP:0001892",
**bleeding(condition, None, timestamp_datetime)
}]
}
if "HP:0000077" in requested_patient_variable_ids:
yield {
"values": [{
"id": "HP:0000077",
**kidney_dysfunction(condition, None, timestamp_datetime)
}]
}
if "LOINC:30525-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:30525-0",
**age(patient, "year", timestamp_datetime)
}]
}
if "LOINC:54134-2" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54134-2",
**race(patient, None, timestamp_datetime)
}]
}
if "LOINC:54120-1" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54120-1",
**ethnicity(patient, None, timestamp_datetime)
}]
}
if "LOINC:21840-4" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:21840-4",
**sex(patient, None, timestamp_datetime)
}]
}
if "LOINC:8302-2" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:8302-2",
**height(observation, "m", timestamp_datetime)
}]
}
if "LOINC:29463-7" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:29463-7",
**weight(observation, "kg", timestamp_datetime)
}]
}
if "LOINC:39156-5" in requested_patient_variable_ids:
height = height(observation, "m", timestamp_datetime)
weight = weight(observation, "kg", timestamp_datetime)
yield {
"values": [{
"id": "LOINC:39156-5",
**bmi(height, weight, observation, "kg/m^2", timestamp_datetime)
}]
}
if "LOINC:45701-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:45701-0",
**fever(condition, None, timestamp_datetime)
}]
}
if "LOINC:LP212175-6" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:LP212175-6",
**date_of_fever_onset(condition, None, timestamp_datetime)
}]
}
if "LOINC:64145-6" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:64145-6",
**cough(condition, None, timestamp_datetime)
}]
}
if "LOINC:85932-2" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:85932-2",
**date_of_cough_onset(condition, None, timestamp_datetime)
}]
}
if "LOINC:54564-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54564-0",
**shortness_of_breath(condition, None, timestamp_datetime)
}]
}
if "LOINC:LP128504-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:LP128504-0",
**autoimmune_disease(condition, None, timestamp_datetime)
}]
}
if "LOINC:54542-6" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54542-6",
**pulmonary_disease(condition, None, timestamp_datetime)
}]
}
if "LOINC:LP172921-1" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:LP172921-1",
**cardiovascular_disease(condition, None, timestamp_datetime)
}]
}
if "LOINC:56799-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:56799-0",
**address(patient, None, timestamp_datetime)
}]
}
if "LOINC:LP21258-6" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54542-6",
**oxygen_saturation(observation, None, timestamp_datetime)
}]
}
|
[
"[email protected]"
] | |
33df19f351ae1e38a5fef7a942b3eaaee767871b
|
6e46a850cc4ece73476a350e676ea55ce72b200a
|
/aliyun-python-sdk-reid/aliyunsdkreid/request/v20190928/ImportSpecialPersonnelRequest.py
|
fb397d0415577135ad8be89532374fbb0d1edd62
|
[
"Apache-2.0"
] |
permissive
|
zhxfei/aliyun-openapi-python-sdk
|
fb3f22ca149988d91f07ba7ca3f6a7a4edf46c82
|
15890bf2b81ce852983f807e21b78a97bcc26c36
|
refs/heads/master
| 2022-07-31T06:31:24.471357 | 2020-05-22T17:00:17 | 2020-05-22T17:00:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,552 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkreid.endpoint import endpoint_data
class ImportSpecialPersonnelRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'reid', '2019-09-28', 'ImportSpecialPersonnel')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_UkId(self):
return self.get_body_params().get('UkId')
def set_UkId(self,UkId):
self.add_body_params('UkId', UkId)
def get_Description(self):
return self.get_body_params().get('Description')
def set_Description(self,Description):
self.add_body_params('Description', Description)
def get_ExternalId(self):
return self.get_body_params().get('ExternalId')
def set_ExternalId(self,ExternalId):
self.add_body_params('ExternalId', ExternalId)
def get_PersonType(self):
return self.get_body_params().get('PersonType')
def set_PersonType(self,PersonType):
self.add_body_params('PersonType', PersonType)
def get_Urls(self):
return self.get_body_params().get('Urls')
def set_Urls(self,Urls):
self.add_body_params('Urls', Urls)
def get_PersonName(self):
return self.get_body_params().get('PersonName')
def set_PersonName(self,PersonName):
self.add_body_params('PersonName', PersonName)
def get_StoreIds(self):
return self.get_body_params().get('StoreIds')
def set_StoreIds(self,StoreIds):
self.add_body_params('StoreIds', StoreIds)
def get_Status(self):
return self.get_body_params().get('Status')
def set_Status(self,Status):
self.add_body_params('Status', Status)
|
[
"[email protected]"
] | |
0565407cdfc58de77957fbefb50611a9c24c4748
|
d9720a7b4bfe713426f766547062aaeacdfa2566
|
/models/city.py
|
44ade30d3614b334699e4c1bc26318d31b39b2b7
|
[
"MIT"
] |
permissive
|
AlisonQuinter17/AirBnB_clone
|
b90a96bc2256e32f648bb2b9a8e1dbdba90ca4eb
|
c890e3b4f9eb7a3ded96ac756387109351e6b13f
|
refs/heads/main
| 2023-01-19T05:10:39.635975 | 2020-11-18T17:36:59 | 2020-11-18T17:36:59 | 308,370,255 | 1 | 2 |
MIT
| 2020-11-03T16:03:36 | 2020-10-29T15:27:01 |
Python
|
UTF-8
|
Python
| false | false | 144 |
py
|
#!/usr/bin/python3
from models.base_model import BaseModel
class City(BaseModel):
""" city attributes """
state_id = ""
name = ""
|
[
"[email protected]"
] | |
2b117cb43b2993dc5748ae809156750eb0e3a3f7
|
6bf005128fb95ea21994325ace59cf0664d0159e
|
/U3DAutomatorClient/script/windows/PPT3DTestCase/FirstStageTestCase/InsertWordArtTestCase.py
|
101fe499da4ad6957383cee6f9715c1d14d63a4c
|
[] |
no_license
|
Bigfishisbig/U3DAutomatorTest
|
5ab4214fc6cda678a5f266fb013f7dd7c52fcaf8
|
93a73d8995f526f998ff50b51a77ef0bbf1b4ff8
|
refs/heads/master
| 2023-01-07T11:59:19.025497 | 2019-09-20T06:06:55 | 2019-09-20T06:06:55 | 209,458,914 | 0 | 0 | null | 2022-12-27T15:35:30 | 2019-09-19T03:58:43 |
Python
|
UTF-8
|
Python
| false | false | 1,796 |
py
|
#!/usr/bin/env python
# coding=utf-8
"""
文件名称:InsertWordArtTestCase.py
作者:ycy
版本:PPTPro
创建时间:2019/1/18 15:51
修改时间:
软件:PyCharm
"""
from script.windows.Operation import *
from script.windows.SystemDialog import SystemDiaglog
from script.windows.PPT3DTestCase.Action import Action
from script.windows.PPT3DSetting.SourcePath import SourcePath
reload(sys)
sys.setdefaultencoding('UTF-8') # 将脚本编码格式转化未置顶的编码格式
class InsertWordArtTestCase(Action, Operation, SystemDiaglog):
'''插入艺术字'''
def test_main(self):
'''插入艺术字'''
self.OperationSetting()
self.Init3DPPT()
self.SetTag("插入艺术字", time.time())
tag = (self.__class__.__doc__ or u"测试") + "_" + self.__class__.__name__
self.startScene(tag)
self.InputPara()
self.InputStr(u"黑夜给了你黑色的眼睛,你却用它来寻找光明。")
wordart = [SourcePath.File_Img_WordArt_Text_1, SourcePath.File_Img_WordArt_Text_2, SourcePath.File_Img_WordArt_Text_3]
for i in range(3):
self.OneClick("BtnFormat")
path = self.getText()
# self.OneClickL(path, 50)
self.ListClick("RotateByZAxisNor")
self.ListClick("WordArtStyle", i)
self.s_witForImg(wordart[i], 10, "艺术字插入失败", None, 0.4)
self.OneClick("BtnStart")
self.OneClick("BtnRevert")
self.s_waitForImgVanish(wordart[i], 10, "撤销艺术字失败", 0.4)
self.OneClick("BtnStart")
self.OneClick("BtnRecover")
self.s_witForImg(wordart[i], 10, "艺术字插入失败")
self.endScene(tag)
time.sleep(1)
self.EndTag()
|
[
"[email protected]"
] | |
6c3521f7f8735e45cd7fa0cd7ff651fbf0bf0d51
|
717171ed7a14ad60dd42d62fe0dd217a0c0c50fd
|
/19年7月/7.02/base64处理图形验证码.py
|
bd54377e9b458358c19bb62f61375ac74e346fcc
|
[] |
no_license
|
friedlich/python
|
6e9513193227e4e9ee3e30429f173b55b9cdb85d
|
1654ef4f616fe7cb9fffe79d1e6e7d7721c861ac
|
refs/heads/master
| 2020-09-04T14:34:48.237404 | 2019-11-18T14:54:44 | 2019-11-18T14:54:44 | 219,756,451 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,693 |
py
|
import requests,base64,sys,csv
from PIL import Image
address_url = 'https://www.ele.me/restapi/bgs/poi/search_poi_nearby?'
place = input('请输入你的收货地址:')
params = {
'geohash': 'wtw3sjq6n6um',
'keyword': place,
'latitude': '31.23037',
'limit': '20',
'longitude': '121.473701',
'type': 'nearby'
}
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}
address_res = requests.get(address_url,headers=headers,params=params)
address_json = address_res.json()
print('以下,是与'+place+'相关的位置信息:\n')
n=0
for address in address_json:
print(str(n)+'. '+address['name']+':'+address['short_address']+'\n')
n = n+1
address_num = int(input('请输入您选择位置的序号:'))
final_address = address_json[address_num]
session = requests.session()
url_1 = 'https://h5.ele.me/restapi/eus/login/mobile_send_code'
tel = input('请输入手机号:')
data_1 = {
'captcha_hash':'',
'captcha_value':'',
'mobile': tel,
'scf': "ms"
}
login = session.post(url_1,headers=headers,data=data_1)
code = login.status_code
print(type(login))
print(login.text)
print('status code of login:' + str(code))
if code == 200: #前三次登录没有图片验证过程
token = login.json()['validate_token']
url_2 = 'https://h5.ele.me/restapi/eus/login/login_by_mobile'
code = input('请输入手机验证码:')
data_2 = {
'mobile': tel,
'scf': 'ms',
'validate_code': code,
'validate_token': token
}
session.post(url_2,headers=headers,data=data_2)
elif code == 400: #登录超过3次,网站会要求图片验证
print('有图形验证码')
url_3 = 'https://h5.ele.me/restapi/eus/v3/captchas'
data_3 = {'captcha_str': tel}
# 提取验证码。
cap =session.post(url_3,headers=headers,data=data_3)
hash = cap.json()['captcha_hash']
value = cap.json()['captcha_image'].replace('data:image/jpeg;base64,','')
# 验证码字符串转图形文件保存到本地
x = base64.b64decode(value)
file = open(sys.path[0]+'\\captcha.jpg','wb')
file.write(x)
file.close()
im = Image.open(sys.path[0]+'\\captcha.jpg')
im.show() #展示验证码图形
captche_value = input('请输入验证码:')
#将图片验证码作为参数post到饿了吗服务器登录
url_1 = 'https://h5.ele.me/restapi/eus/login/mobile_send_code'
data_4 = {
'captcha_hash': hash,
'captcha_value': captche_value,
'mobile': tel,
'scf': "ms"
}
# 将验证码发送到服务器。
login = session.post(url_1,headers=headers,data=data_4)
print(login.json())
token = login.json()['validate_token']
url_2 = 'https://h5.ele.me/restapi/eus/login/login_by_mobile'
code = input('请输入手机验证码:')
data_2 = {
'mobile': tel,
'scf': 'ms',
'validate_code': code,
'validate_token': token
}
session.post(url_2,headers=headers,data=data_2)
restaurants_url = 'https://www.ele.me/restapi/shopping/restaurants'
params={
'extras[]': 'activities',
'geohash': final_address['geohash'],
'latitude': final_address['latitude'],
'limit': '24',
'longitude': final_address['longitude'],
'offset': '0',
'terminal': 'web'
}
restaurants_res = session.get(restaurants_url,headers=headers,params=params)
restaurants_json = restaurants_res.json()
with open(sys.path[0]+'\\restaurants.csv','w',newline='',encoding='utf_8_sig') as f:
writer = csv.writer(f)
for restaurant in restaurants_json:
writer.writerow(restaurant['name'])
|
[
"[email protected]"
] | |
05be3d589bb0eef2a4cd064c43dcf7e93a68c7a2
|
e8b38b8dfa348ff006eb197a7906ca8e491a23dc
|
/tests/conftest.py
|
79144b69789b7a96597410997a9f0eea0252414d
|
[
"MIT"
] |
permissive
|
pyccel/pyccel
|
d79a81dbdff1172839a6a1227abfcc1f97e6c97b
|
1896b761ba662c90b14c195bbb6eb5cddc57cbfc
|
refs/heads/devel
| 2023-08-30T12:15:25.244401 | 2023-08-28T09:31:32 | 2023-08-28T09:31:32 | 100,463,736 | 307 | 39 |
MIT
| 2023-09-14T19:29:26 | 2017-08-16T07:59:14 |
Python
|
UTF-8
|
Python
| false | false | 2,423 |
py
|
# pylint: disable=missing-function-docstring, missing-module-docstring
import logging
import os
import shutil
import pytest
from mpi4py import MPI
from pyccel.commands.pyccel_clean import pyccel_clean
github_debugging = 'DEBUG' in os.environ
if github_debugging:
import sys
sys.stdout = sys.stderr
@pytest.fixture( params=[
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("c", marks = pytest.mark.c),
pytest.param("python", marks = pytest.mark.python)
],
scope = "session"
)
def language(request):
return request.param
def move_coverage(path_dir):
for root, _, files in os.walk(path_dir):
for name in files:
if name.startswith(".coverage"):
shutil.copyfile(os.path.join(root,name),os.path.join(os.getcwd(),name))
def pytest_runtest_teardown(item, nextitem):
path_dir = os.path.dirname(os.path.realpath(item.fspath))
move_coverage(path_dir)
config = item.config
xdist_plugin = config.pluginmanager.getplugin("xdist")
if xdist_plugin is None or "PYTEST_XDIST_WORKER_COUNT" not in os.environ \
or os.getenv('PYTEST_XDIST_WORKER_COUNT') == 1:
print("Tearing down!")
marks = [m.name for m in item.own_markers ]
if 'parallel' not in marks:
pyccel_clean(path_dir, remove_shared_libs = True)
else:
comm = MPI.COMM_WORLD
comm.Barrier()
if comm.rank == 0:
pyccel_clean(path_dir, remove_shared_libs = True)
comm.Barrier()
def pytest_addoption(parser):
parser.addoption("--developer-mode", action="store_true", default=github_debugging, help="Show tracebacks when pyccel errors are raised")
def pytest_sessionstart(session):
# setup_stuff
if session.config.option.developer_mode:
from pyccel.errors.errors import ErrorsMode
ErrorsMode().set_mode('developer')
if github_debugging:
logging.basicConfig()
logging.getLogger("filelock").setLevel(logging.DEBUG)
# Clean path before beginning but never delete anything in parallel mode
path_dir = os.path.dirname(os.path.realpath(__file__))
config = session.config
xdist_plugin = config.pluginmanager.getplugin("xdist")
if xdist_plugin is None:
marks = [m.name for m in session.own_markers ]
if 'parallel' not in marks:
pyccel_clean(path_dir)
|
[
"[email protected]"
] | |
41a3917f248cec7eca19c81329335ccd0bd32c96
|
696799b824503429a3ac65ebdc28890bfbcaebe0
|
/plugins/com.astra.ses.spell.gui.cots_4.0.2.201806070922/win32/spell/spell/lib/adapter/value.py
|
4d84f6d6e65da20cf8515b2f88d5d692597a2fe7
|
[] |
no_license
|
CalypsoCubesat/SPELL_GUI_4.0.2_win32_x86
|
a176886b48873b090ab270c189113a8b2c261a06
|
9275ecfff2195ca4d4c297f894d80c1bcfa609e3
|
refs/heads/master
| 2021-08-03T08:04:25.821703 | 2019-10-28T04:53:50 | 2019-10-28T04:53:50 | 217,968,357 | 0 | 0 | null | 2021-08-02T17:03:44 | 2019-10-28T04:50:59 |
Python
|
UTF-8
|
Python
| false | false | 3,989 |
py
|
###############################################################################
"""
(c) SES-ASTRA 2008
PACKAGE
spell.lib.adapter.value
FILE
user.py
DESCRIPTION
Variant value helper class
COPYRIGHT
This software is the copyrighted work of SES ASTRA S.A.
All rights reserved.
PROJECT
UGCS/USL
AUTHOR
Rafael Chinchilla Camara (GMV)
DATE
02/10/2007
REVISION HISTORY
02/10/2007 10:30 Creation
"""
###############################################################################
from spell.lang.constants import *
from spell.lang.modifiers import *
###############################################################################
class ValueClass:
"""
This class implements a variant value with the following characteristics:
- value
- vtype (long, double...)
- radix (hex, dec, oct..)
- format (eng, raw)
- units (whatsoever)
"""
#==========================================================================
def __init__(self, value, format = ENG, radix = DEC, vtype = LONG, units = '', defCal = True):
self._value = value
self._vtype = vtype
if type(value)==int:
self._vtype = LONG
elif type(value)==float:
self._vtype = FLOAT
elif type(value)==str:
self._vtype = STRING
self._format = format
self._radix = radix
self._units = units
self._defCal = defCal
#==========================================================================
def set(self, value):
self._value = value
#==========================================================================
def get(self):
return self._value
#==========================================================================
def format(self, fmt = None):
if fmt is None:
return self._format
else:
self._format = fmt
#==========================================================================
def vtype(self, vt = None):
if vt is None:
return self._vtype
else:
self._vtype = vt
#==========================================================================
def radix(self, rd = None):
if rd is None:
return self._radix
else:
self._radix = rd
#==========================================================================
def units(self, u = None):
if u is None:
return self._units
else:
self._units = u
#==========================================================================
def __repr__(self):
return "[" + repr(self._value) + ",VType: " + self._vtype + ",Format: " +\
self._format + ", Radix: " + self._radix + ", Units: " + self._units + "]"
#==========================================================================
def evaluate(self, radix = DEC):
cnv = { DEC: '', HEX: '0x', OCT: '0' }
trns = { HEX: hex, OCT: oct }
res = None
try:
if isinstance(self._value, str):
if self._radix == BIN:
res = 0
for c in self._value:
res = res * 2 + eval(c)
elif self._radix in cnv:
res = eval(cnv[self._radix] + self._value)
elif isinstance(self._value, long) or isinstance(self._value, int) or isinstance(self._value, float):
res = self._value
except:
res = None
if res is None:
return None
if radix in trns:
res = trns[radix](res)
elif radix == BIN:
v = ''
while res > 0:
if res % 2 == 1: v = '1' + v
if res % 2 == 0: v = '0' + v
res >>= 1
res = '0b' + v
return res
|
[
"[email protected]"
] | |
7969080b1179beb14ddaf543f8a32366a6d882ae
|
253cd5d6074d322a233bda37da4b1c663b6027b3
|
/cooking/timestamp/broadcast_utils/user_utils.py
|
13c1fc1ea72b1fe54f12287338a2f130e94404fa
|
[] |
no_license
|
ZandTree/idea_project
|
85321156149f9365c6380537d34f05f98e8885ae
|
e48ea39ef05b54c197b635313fb7b5304bd5691c
|
refs/heads/main
| 2023-08-29T11:12:48.561578 | 2021-11-15T16:18:18 | 2021-11-15T16:18:18 | 417,647,191 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 398 |
py
|
def get_ip(req):
"""
if x_forward present return it;
otherwise remote_addr or empty string
"""
try:
forward = req.META.get('HTTP_X_FORWARDED_FOR')
if forward:
return req.META.get('HTTP_X_FORWARDED_FOR', req.META.get('REMOTE_ADDR', '')).split(',')[0].strip()
else:
return req.META.get('REMOTE_ADDR')
except:
return ''
|
[
"[email protected]"
] | |
6d73d4509904137b281d8d1e94290549eded70ac
|
dab869acd10a3dc76e2a924e24b6a4dffe0a875f
|
/Laban/LabanLib/dumpLabanRecognizer.py
|
23f896b8fb16ae97cc5714311bc2eb2e59973fba
|
[] |
no_license
|
ranBernstein/Laban
|
d82aff9b0483dd007e03a06e51f7d635f62ed05d
|
54c88afa9493deacbdd182904cc5d180ecb208b4
|
refs/heads/master
| 2021-01-23T13:17:51.777880 | 2017-02-14T09:02:54 | 2017-02-14T09:02:54 | 25,508,010 | 3 | 1 | null | 2017-02-14T09:02:55 | 2014-10-21T07:16:01 |
Tcl
|
UTF-8
|
Python
| false | false | 382 |
py
|
import pickle
import LabanUtils.util as labanUtil
X = pickle.load( open( "X", "r" ) )
Y_laban = pickle.load( open( "Y_Laban", "r" ) )
labanClf, selectedIndices = labanUtil.getMultiTaskclassifier(X, Y_laban)
f = open('labanClf', 'w')
f.flush()
pickle.dump(labanClf, f)
f.close()
f = open('selectedIndices', 'w')
f.flush()
pickle.dump(selectedIndices, f)
f.close()
|
[
"[email protected]"
] | |
eb03fe561672b829d8ba86e36d4ee415da5ad41c
|
38258a7dd9acbfb7adf72983015de68a948a4826
|
/B_15000~/B_15652.py
|
cc6422a80ae1ddbd9b06ca5b3cf7a4710db163d2
|
[] |
no_license
|
kangsm0903/Algorithm
|
13a7fe5729039a1d0ce91a574c4755a8a92fb02b
|
7d713d1c9e2e4dc30141d4f409ac1430a357065b
|
refs/heads/master
| 2022-10-04T00:33:49.247977 | 2022-09-26T12:51:16 | 2022-09-26T12:51:16 | 219,265,010 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 620 |
py
|
# 중복 허용
# 비내림차순
N,M=list(map(int,input().split()))
result=[]
def is_promising():
base=int(result[0])
for i in range(1,len(result)):
if base>int(result[i]): # 비내림차순이 아닐 때
return False
else:
base=int(result[i]) # 비교값을 최신으로 갱신
continue
return True
def BruteForce():
global result
if len(result)==M:
print(' '.join(result))
return
for i in range(1,N+1):
result.append(str(i))
if is_promising():
BruteForce()
result.pop()
BruteForce()
|
[
"[email protected]"
] | |
0307636f3350b41783f6bc369c9b7562faa04092
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/1154.py
|
d57c56b69272d0a44af0fad344cc5e916a3e8b59
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 654 |
py
|
infile = open('D:\study\codejam\codejam2014\B-large.in','r')
outfile = open('D:\study\codejam\codejam2014\B-large.out','w')
def main():
T = int(infile.readline())
for case in range(1,T+1):
doCase(case)
infile.close()
outfile.close()
def doCase(case):
c,f,x = [float(x) for x in infile.readline().split()]
outfile.write('Case #'+str(case)+': '+str(check(c,f,x))+'\n')
#print('case #'+str(case)+' '+str(check(c,f,x)))
def check(c,f,x):
rate = 2
time1 = 0
while x/(rate+f)+c/rate < x/rate:
time1 += c/rate
rate += f
time = time1+x/rate
return round(time,7)
|
[
"[email protected]"
] | |
ab39ec8dc7ed3dc0a971ff1d720fcf1da8835483
|
5a01497e7c29e2488b6a4cb0478405239375eb66
|
/apetools/commons/broadcaster.py
|
c2cb2070a7ee15ecdd67b7b8e8a1da9bc821e7bf
|
[
"Apache-2.0"
] |
permissive
|
russell-n/oldape
|
8b4d9e996181dc1c7175f72d75c6193443da591b
|
b4d1c77e1d611fe2b30768b42bdc7493afb0ea95
|
refs/heads/master
| 2021-05-30T20:02:18.895922 | 2016-03-27T04:38:18 | 2016-03-27T04:38:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,651 |
py
|
from apetools.baseclass import BaseClass
class Broadcaster(BaseClass):
"""
A broadcaster sends a single datum to multiple targets
"""
def __init__(self, receivers):
"""
:param:
- `receivers`: an iterable of callable receivers
"""
super(Broadcaster, self).__init__()
self._receivers = None
self.receivers = receivers
self._temp_receivers = None
return
@property
def receivers(self):
"""
:return: receivers of broadcast
"""
return self._receivers
@receivers.setter
def receivers(self, new_receivers):
"""
:param:
- `new_receivers`: iterable of callable receivers (or single receiver)
"""
try:
self._receivers = [receiver for receiver in new_receivers]
except TypeError as error:
self._receivers = [new_receivers]
self.logger.debug(error)
return
@property
def temp_receivers(self):
"""
:return: iterable of receivers to remove at next set-up
"""
if self._temp_receivers is None:
self._temp_receivers = []
return self._temp_receivers
@temp_receivers.setter
def temp_receivers(self, new_receivers):
"""
:param:
- `new_receivers`: iterable of callable receivers (or single receiver)
"""
try:
self._temp_receivers = [receiver for receiver in new_receivers]
except TypeError as error:
self._temp_receivers = [new_receivers]
self.logger.debug(error)
return
def subscribe(self, receiver):
"""
Adds a new receiver to the receivers (if it isn't already there)
"""
if receiver not in self.receivers:
self.logger.debug("subscribing {0}".format(receiver))
self.receivers.append(receiver)
return
def unsubscribe(self, receiver):
"""
:param:
- `receiver`: a receiver object to remove
"""
self._receivers = [r for r in self._receivers if r is not receiver]
return
def set_up(self, targets=None):
"""
The targets are removed the next time this is called.
:param:
- `targets`: a set of temporary targets
:postcondition: reset method for each permanent receiver called
"""
self._temp_receivers = None
if targets is not None:
self.temp_receivers = targets
for receiver in self.receivers:
try:
receiver.reset()
except AttributeError as error:
self.logger.debug(error)
self.logger.debug("Unable to reset {0}".format(receiver))
return
def reset(self):
"""
:postcondition: self.receivers is None
"""
self._receivers = None
return
def __contains__(self, receiver):
"""
:param:
- `receiver`: an object
:rtype: Boolean
:return: True if item in receivers
"""
return receiver in self.receivers
def __iter__(self):
"""
:return: iterator over self.receivers
"""
return iter(self.receivers)
def __call__(self, datum):
"""
Calls each receiver with the `datum`
:param:
- `datum`: A single data item
"""
for receiver in self.receivers:
receiver(datum)
return
# end class Broadcaster
|
[
"[email protected]"
] | |
30119e16f12f09d9fa55d967a0bb62f049303183
|
2f5ab43956b947b836e8377370d786e5ee16e4b0
|
/sklearn2code/sym/test/test_function.py
|
c0ae2740f50a0419fbe09bbe835b40e8516be96a
|
[
"MIT"
] |
permissive
|
modusdatascience/sklearn2code
|
b175fb268fa2871c95f0e319f3cd35dd54561de9
|
3ab82d82aa89b18b18ff77a49d0a524f069d24b9
|
refs/heads/master
| 2022-09-11T06:16:37.604407 | 2022-08-24T04:43:59 | 2022-08-24T04:43:59 | 115,747,326 | 4 | 2 |
MIT
| 2018-05-01T00:11:51 | 2017-12-29T19:05:03 |
Python
|
UTF-8
|
Python
| false | false | 3,495 |
py
|
from sklearn2code.sym.function import Function
from nose.tools import assert_list_equal, assert_equal
from operator import __add__, __mul__, __sub__
from six import PY3
from sklearn2code.sym.expression import RealVariable, RealNumber
def test_map_symbols():
fun0 = Function(('x', 'y'), tuple(), (RealVariable('x') + RealVariable('y'),))
fun = Function(('x', 'y'), (((('z',), (fun0, ('x','y')))),), (RealVariable('x') / RealVariable('z'),))
mapped_fun = fun.map_symbols({'x': 'q'})
assert_list_equal(list(mapped_fun.inputs), list(map(RealVariable, ('q', 'y'))))
assert_equal(set(mapped_fun.calls[0][1][1]), set(map(RealVariable, ('q', 'y'))))
assert_equal(mapped_fun.outputs[0], RealVariable('q') / RealVariable('z'))
def test_compose():
fun0 = Function('x', tuple(), (RealVariable('x'), RealNumber(1) - RealVariable('x')))
fun = Function(('x', 'y'), tuple(), (RealVariable('x') / RealVariable('y'),))
composed_fun = fun.compose(fun0)
assert_equal(composed_fun.calls[0][1][0], fun0)
assert_equal(composed_fun.inputs, fun0.inputs)
assert_equal(fun.outputs, composed_fun.map_output_symbols(dict(zip(composed_fun.calls[0][0], fun.inputs))))
def test_from_expressions():
fun = Function.from_expressions((RealVariable('x'), RealVariable('x') + RealVariable('y')))
assert_equal(fun, Function(('x', 'y'), tuple(), (RealVariable('x'), RealVariable('x') + RealVariable('y'))))
def test_trim():
fun0 = Function('x', ((('u',), (Function.from_expression(RealVariable('x0') + RealVariable('x1')), ('x', 'x'))),),
(RealVariable('u'), RealNumber(1) - RealVariable('x')))
fun = Function(('x', 'y'), ((('z','w'), (fun0, ('y',))),), (RealVariable('x') / RealVariable('w'),)).trim()
assert_equal(fun.inputs, (RealVariable('x'), RealVariable('y')))
assert_equal(fun.outputs, (RealVariable('x') / RealVariable('w'),))
assert_equal(fun.calls, (((RealVariable('w'),), (Function(('x', ), tuple(), (RealNumber(1)-RealVariable('x'),)), (RealVariable('y'),))),))
class TestOps(object):
pass
def add_op(op):
def test_op(self):
fun0 = Function(('x', 'y'), tuple(), (RealVariable('x') + RealVariable('y'),))
fun = Function(('x', 'y'), (((('z',), (fun0, ('x','y')))),), (RealVariable('x') / RealVariable('z'),))
fun_op_two = op(fun, RealNumber(2))
assert_equal(fun_op_two.outputs[0], op(RealVariable('x') / RealVariable('z'), RealNumber(2)))
two_op_fun = op(RealNumber(2), fun)
assert_equal(two_op_fun.outputs[0], op(RealNumber(2), RealVariable('x') / RealVariable('z')))
fun_op_fun = op(fun, fun)
assert_equal(fun_op_fun.outputs[0], op(RealVariable('x') / RealVariable('z'), RealVariable('x') / RealVariable('z')))
assert_equal(fun_op_fun.inputs, fun.inputs)
assert_equal(fun_op_fun.calls, fun.calls)
test_name = 'test_%s' % op.__name__.strip('__')
test_op.__name__ = test_name
setattr(TestOps, test_name, test_op)
add_op(__add__)
add_op(__mul__)
add_op(__sub__)
if PY3:
from operator import __truediv__ # @UnresolvedImport
add_op(__truediv__)
else:
from operator import __div__ # @UnresolvedImport
add_op(__div__)
if __name__ == '__main__':
# This code will run the test in this file.'
import sys
import nose
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
|
[
"[email protected]"
] | |
f0ddcfc1386615bfe664efdcc8da103a73ee296d
|
05cde6f12d23eb67258b5a21d4fb0c783bcafbe5
|
/almebic/models/engine/db_engine.py
|
1078b54006ced768772dd3efb6b54c9b7762b300
|
[] |
no_license
|
alejolo311/DataInMotion
|
f5aff692bcaf9a795969951146f6ab7dc6557b08
|
75014600785f9d7f8a4771a9bb24e322e812d08f
|
refs/heads/master
| 2023-05-13T00:57:41.407175 | 2020-07-26T00:51:49 | 2020-07-26T00:51:49 | 267,895,607 | 3 | 2 | null | 2023-05-01T21:26:16 | 2020-05-29T15:46:04 |
CSS
|
UTF-8
|
Python
| false | false | 2,083 |
py
|
#!/usr/bin/python3
"""
Controls the ORM transactions using postgres db
"""
from models.base import BaseNode, Base
from models.user import User
from models.board import Board
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
class DBEngine:
__engine = None
__session = None
def __init__(self):
"""
Creates the engine object using environment variables
"""
user = 'data_im_dev'
password = 'dim_passwd'
host = '172.21.0.2'
db = 'data_im_dev_db'
self.__engine = create_engine('postgres://{}:{}@{}:5432/{}'.format(
user, password, host, db
))
def reload(self):
"""
Creates the Models based on metadata
"""
try:
Base.metadata.create_all(self.__engine)
sess_factory = sessionmaker(bind=self.__engine,
expire_on_commit=False)
Session = scoped_session(sess_factory)
self.__session = Session
except Exception as e:
print(e)
def all(self, cls=None):
"""
Returns all record, or all by class
"""
newdict = {}
objs = self.__session.query(cls).all()
for obj in objs:
key = obj.__class__.__name__ + '.' + obj.id
newdict[key] = obj
return (newdict)
def new(self, obj):
"""
Creates a new object
"""
self.__session.add(obj)
def save(self):
"""
Saves changes in session
"""
self.__session.commit()
def close(self):
"""
Remove the private session
"""
self.__session.remove()
def get(self, cls, id):
"""
Resturn a record by class and id
"""
objs = self.all(cls)
for obj in objs.values():
if obj.id == id:
return obj
return None
def delete(self, obj):
"""
Deletes a record
"""
self.__session.delete(obj)
|
[
"[email protected]"
] | |
35fe9e8d12cff46a0e0ea7b51843e2426507bb4a
|
59e87634c67508bf7eba8c8b9845354aefa57bc7
|
/ML/naiveBayes/bayes-titanic.py
|
b9caec4be2a8acf3fb164902e7017e85f90efa1c
|
[] |
no_license
|
Caohengrui/MLAndDL
|
48729b94b2232e628b699cf8d0d4a6c6e81a36f5
|
d0637f58f45e9c091cd90bbfe9c207223d0994f3
|
refs/heads/master
| 2023-03-16T01:06:03.316463 | 2020-04-14T07:44:15 | 2020-04-14T07:44:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,446 |
py
|
"""
Author:wucng
Time: 20200110
Summary: 朴素贝叶斯对titanic数据分类
源代码: https://github.com/wucng/MLAndDL
参考:https://cuijiahua.com/blog/2017/11/ml_4_bayes_1.html
"""
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler,MinMaxScaler
# from sklearn.neighbors import KNeighborsRegressor,KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score,auc
import pandas as pd
import numpy as np
from functools import reduce
from collections import Counter
import pickle,os,time
# 1.加载数据集(并做预处理)
def loadData(dataPath: str) -> tuple:
# 如果有标题可以省略header,names ;sep 为数据分割符
df = pd.read_csv(dataPath, sep=",")
# 填充缺失值
df["Age"] = df["Age"].fillna(df["Age"].median())
df['Embarked'] = df['Embarked'].fillna('S')
# df = df.fillna(0)
# 数据量化
# 文本量化
df.replace("male", 0, inplace=True)
df.replace("female", 1, inplace=True)
df.loc[df["Embarked"] == "S", "Embarked"] = 0
df.loc[df["Embarked"] == "C", "Embarked"] = 1
df.loc[df["Embarked"] == "Q", "Embarked"] = 2
# 划分出特征数据与标签数据
X = df.drop(["PassengerId","Survived","Name","Ticket","Cabin"], axis=1) # 特征数据
y = df.Survived # or df["Survived"] # 标签数据
# 数据归一化
X = (X - np.min(X, axis=0)) / (np.max(X, axis=0) - np.min(X, axis=0))
# 使用sklearn方式
# X = MinMaxScaler().transform(X)
# 查看df信息
# df.info()
# df.describe()
return (X.to_numpy(), y.to_numpy())
class NaiveBayesClassifier(object):
def __init__(self,save_file="model.ckpt"):
self.save_file = save_file
def fit(self,X:np.array,y:np.array):
if not os.path.exists(self.save_file):
# 计算分成每个类别的概率值
dict_y = dict(Counter(y))
dict_y = {k:v/len(y) for k,v in dict_y.items()}
# 计算每维特征每个特征值发生概率值
unique_label = list(set(y))
dict_feature_value={} # 每个特征每个值对应的概率
for col in range(len(X[0])):
data = X[...,col] # 每列特征
unique_val = list(set(data))
for val in unique_val:
dict_feature_value[str(col)+"_"+str(val)] = np.sum(data==val)/len(data)
dict_feature_value_label = {} # 每个类别发生对应的每个特征每个值的概率
for label in unique_label:
datas = X[y==label]
for col in range(len(datas[0])):
data = datas[..., col] # 每列特征
unique_val = list(set(data))
for val in unique_val:
dict_feature_value_label[str(label)+"_"+str(col)+"_"+str(val)]=np.sum(data==val)/len(data)
# save
result={"dict_y":dict_y,"dict_feature_value":dict_feature_value,
"dict_feature_value_label":dict_feature_value_label}
pickle.dump(result,open(self.save_file,"wb"))
# return dict_y,dict_feature_value,dict_feature_value_label
def __predict(self,X:np.array):
data = pickle.load(open(self.save_file,"rb"))
dict_y, dict_feature_value, dict_feature_value_label = data["dict_y"],data["dict_feature_value"],\
data["dict_feature_value_label"]
labels = sorted(list(dict_y.keys()))
# 计算每条数据分成每个类别的概率值
preds = np.zeros([len(X),len(labels)])
for i,x in enumerate(X):
for j,label in enumerate(labels):
p1 = 1
p2 = 1
for col,val in enumerate(x):
p1*= dict_feature_value_label[str(label)+"_"+str(col)+"_"+str(val)] if str(label)+"_"+str(col)+"_"+str(val) \
in dict_feature_value_label else self.__weighted_average(str(label)+"_"+str(col)+"_"+str(val),dict_feature_value_label) # self.__fixed_value()
p2*= dict_feature_value[str(col)+"_"+str(val)] if str(col)+"_"+str(val) in dict_feature_value else \
self.__weighted_average(str(col)+"_"+str(val),dict_feature_value) # self.__fixed_value()
preds[i,j] = p1*dict_y[label]/p2
return preds
def __fixed_value(self):
return 1e-3
def __weighted_average(self,key:str,data_dict:dict):
"""插值方式找到离该key对应的最近的data_dict中的key做距离加权平均"""
tmp = key.split("_")
value = float(tmp[-1])
if len(tmp)==3:
tmp_key = tmp[0]+"_"+tmp[1]+"_"
else:
tmp_key = tmp[0] + "_"
# 找到相关的key
# related_keys = []
values = [value]
for k in list(data_dict.keys()):
if tmp_key in k:
# related_keys.append(k)
values.append(float(k.split("_")[-1]))
# 做距离加权
values = sorted(values)
index = values.index(value)
# 取其前一个和后一个做插值
last = max(0,index-1)
next = min(index+1,len(values)-1)
if index==last or index==next:
return self.__fixed_value()
else:
d1=abs(values[last] - value)
d2=abs(values[next] - value)
v1 = data_dict[tmp_key+str(values[last])]
v2 = data_dict[tmp_key+str(values[next])]
# 距离加权 y=e^(-x)
return (np.log(d1)*v1+np.log(d2)*v2)/(np.log(d1)+np.log(d2))
def predict_proba(self,X:np.array):
return self.__predict(X)
def predict(self,X:np.array):
return np.argmax(self.__predict(X),-1)
def accuracy(self,y_true:np.array,y_pred:np.array)->float:
return round(np.sum(y_pred==y_true)/len(y_pred),5)
if __name__=="__main__":
dataPath = "../../dataset/titannic/train.csv"
X, y = loadData(dataPath)
# 划分训练集与测试集
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=40)
start = time.time()
clf = NaiveBayesClassifier()
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print("cost time:%.6f(s) acc:%.3f"%(time.time()-start,clf.accuracy(y_test,y_pred)))
# cost time:0.089734(s) acc:0.771
# 使用sklearn 的GaussianNB
start = time.time()
clf = GaussianNB()
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print("cost time:%.6f(s) acc:%.3f" % (time.time() - start, accuracy_score(y_test, y_pred)))
# cost time:0.001023(s) acc:0.810
# 使用sklearn 的DecisionTreeClassifier
start = time.time()
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("cost time:%.6f(s) acc:%.3f" % (time.time() - start, accuracy_score(y_test, y_pred)))
# cost time:0.008215(s) acc:0.816
# 使用sklearn 的RandomForestClassifier
start = time.time()
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("cost time:%.6f(s) acc:%.3f" % (time.time() - start, accuracy_score(y_test, y_pred)))
# cost time:0.018951(s) acc:0.782
|
[
"[email protected]"
] | |
38a90854558605e5a014f7e6272b4f3c11060c65
|
265a07a2becd232b292872d1d7136789463874be
|
/lianxi代码/erchashu.py
|
5543da1004e52bdcd18148677402156b24dcc306
|
[] |
no_license
|
Lz0224/Python-exercise
|
f4918b8cd5f7911f0c35c0458c2269959937d07d
|
3d09f54aebc653f4a5b36765b25c7241e3960764
|
refs/heads/master
| 2020-12-24T22:20:55.573019 | 2017-08-11T07:18:16 | 2017-08-11T07:18:16 | 100,005,776 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,647 |
py
|
#!/usr/bin/python
#coding=utf-8
'''
created bu zwg in 2017-7-8
'''
import copy
class node(object):
def __init__(self, name, data):
self.data = data
self.name = name
self.Rchild = None
self.Lchild = None
self.child_number = 0
self.parent = None
def add_Rchild(self, node):
if self.Rchild is not None:
self.Rchild = node
else:
self.Rchild = node
self.child_number += 1
node.set_parent(self)
def drop_Rchild(self):
self.Rchild = None
self.child_number -= 1
def set_parent(self, node):
self.parent = node
def add_Lchild(self, node):
if self.Lchild is not None:
self.Lchild = node
else:
self.Lchild = node
self.child_number += 1
node.set_parent(self)
def drop_Lchild(self):
self.Lchild = None
self.child_number -= 1
class tree(object):
def __init__(self, node):
self.parent = node
self.depth = 1
self.all_node =用递归访问子节 {node.name:node}
self.enable_node = {node.name:node}
c1 = node.Rchild
c2 = node.Lchild
C = [c1, c2]
B = [i for i in C if i is not None]
if len(B) == 2:
del self.enable_node[node.name]
while len(B) != 0:
self.depth += 1
C = copy.copy(B)
for i in B:
C.remove(i)
if i.Rchild is not None:
C.append(i.Rchild)
if i.Lchild is not None:
C.append(i.Lchild)
|
[
"[email protected]"
] | |
b8e02a80dd4ae30959b434085ed27933a2f54964
|
ae3d0e3c2fb614d96f6c787583c6e2e4cb654ad4
|
/leetcode/89. 格雷编码.py
|
6efb740a93fba7e0c11adf3290a8e415330f35cf
|
[] |
no_license
|
Cjz-Y/shuati
|
877c3f162ff75f764aa514076caccad1b6b43638
|
9ab35dbffed7865e41b437b026f2268d133357be
|
refs/heads/master
| 2023-02-02T10:34:05.705945 | 2020-12-14T01:41:39 | 2020-12-14T01:41:39 | 276,884,136 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 921 |
py
|
from typing import List
class Solution:
def grayCode(self, n: int) -> List[int]:
if n == 0:
return [0]
current = '0' * n
ans = [current]
use = set()
use.add(current)
while current:
next = None
sl = list(current)
for i in range(len(current)):
if sl[i] == '0':
sl[i] = '1'
else:
sl[i] = '0'
temp = ''.join(sl)
if temp not in use:
use.add(temp)
next = temp
ans.append(temp)
break
else:
if sl[i] == '0':
sl[i] = '1'
else:
sl[i] = '0'
current = next
ans = [int(item, 2) for item in ans]
return ans
|
[
"[email protected]"
] | |
0b573c7d0218cd57688f0d50721997333fe6315d
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/CreateTransitRouteTableAggregationRequest.py
|
33fc8ff3c8ce5418023c4de179e733b508294cf5
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 |
NOASSERTION
| 2023-09-14T08:51:06 | 2015-07-23T09:39:45 |
Python
|
UTF-8
|
Python
| false | false | 4,095 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class CreateTransitRouteTableAggregationRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'CreateTransitRouteTableAggregation')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_TransitRouteTableAggregationDescription(self): # String
return self.get_query_params().get('TransitRouteTableAggregationDescription')
def set_TransitRouteTableAggregationDescription(self, TransitRouteTableAggregationDescription): # String
self.add_query_param('TransitRouteTableAggregationDescription', TransitRouteTableAggregationDescription)
def get_TransitRouteTableAggregationName(self): # String
return self.get_query_params().get('TransitRouteTableAggregationName')
def set_TransitRouteTableAggregationName(self, TransitRouteTableAggregationName): # String
self.add_query_param('TransitRouteTableAggregationName', TransitRouteTableAggregationName)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_TransitRouteTableAggregationScope(self): # String
return self.get_query_params().get('TransitRouteTableAggregationScope')
def set_TransitRouteTableAggregationScope(self, TransitRouteTableAggregationScope): # String
self.add_query_param('TransitRouteTableAggregationScope', TransitRouteTableAggregationScope)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_TransitRouteTableId(self): # String
return self.get_query_params().get('TransitRouteTableId')
def set_TransitRouteTableId(self, TransitRouteTableId): # String
self.add_query_param('TransitRouteTableId', TransitRouteTableId)
def get_TransitRouteTableAggregationCidr(self): # String
return self.get_query_params().get('TransitRouteTableAggregationCidr')
def set_TransitRouteTableAggregationCidr(self, TransitRouteTableAggregationCidr): # String
self.add_query_param('TransitRouteTableAggregationCidr', TransitRouteTableAggregationCidr)
|
[
"[email protected]"
] | |
6c6dace090ac4698a71aa96258aa378ca9e059f0
|
aec9a1f3d1d36f19724e745ca4d09a20f67208dc
|
/matching/migrations/0006_auto_20210114_2030.py
|
799634392703d79e913d7c68e61a37828e2927c9
|
[] |
no_license
|
endlessor/open-united-backend
|
b1b1c3411d0d48bc79b35895c70f24d773ac7344
|
86f6905cce14b834b6bf059fd33157249978bd14
|
refs/heads/main
| 2023-04-29T13:35:28.529360 | 2021-05-17T14:16:39 | 2021-05-17T14:16:39 | 368,211,786 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 765 |
py
|
# Generated by Django 3.1 on 2021-01-15 20:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('matching', '0005_auto_20210113_1839'),
]
operations = [
migrations.RemoveField(
model_name='taskclaimrequest',
name='status',
),
migrations.AddField(
model_name='taskclaimrequest',
name='kind',
field=models.IntegerField(choices=[(0, 'New'), (1, 'Approved'), (2, 'Rejected')], default=0),
),
migrations.AlterField(
model_name='taskclaim',
name='kind',
field=models.IntegerField(choices=[(0, 'Done'), (1, 'Active'), (2, 'Failed')], default=0),
),
]
|
[
"[email protected]"
] | |
c737c2c9df7e4e431e045cdd97aecd4aa4483742
|
dcbb4a526f6cf6f490063a6e4b5f1353fda48a1f
|
/tf_agents/drivers/tf_driver.py
|
de16194c74cf80ce5e092c4607046a47a02b73ac
|
[
"Apache-2.0"
] |
permissive
|
Bhaney44/agents
|
91baf121188f35024c09435276d108600ba6f07e
|
792d7c6e769d708f8b08d71926ccb9e8a880efef
|
refs/heads/master
| 2023-08-09T03:51:16.188708 | 2023-07-21T17:50:18 | 2023-07-21T17:50:18 | 177,231,436 | 0 | 0 |
Apache-2.0
| 2019-03-23T01:46:03 | 2019-03-23T01:46:02 | null |
UTF-8
|
Python
| false | false | 5,527 |
py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Driver that steps a TF environment using a TF policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, Optional, Sequence, Tuple
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.drivers import driver
from tf_agents.environments import tf_environment
from tf_agents.policies import tf_policy
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.typing import types
from tf_agents.utils import common
class TFDriver(driver.Driver):
"""A driver that runs a TF policy in a TF environment."""
def __init__(
self,
env: tf_environment.TFEnvironment,
policy: tf_policy.TFPolicy,
observers: Sequence[Callable[[trajectory.Trajectory], Any]],
transition_observers: Optional[Sequence[Callable[[trajectory.Transition],
Any]]] = None,
max_steps: Optional[types.Int] = None,
max_episodes: Optional[types.Int] = None,
disable_tf_function: bool = False):
"""A driver that runs a TF policy in a TF environment.
**Note** about bias when using batched environments with `max_episodes`:
When using `max_episodes != None`, a `run` step "finishes" when
`max_episodes` have been completely collected (hit a boundary).
When used in conjunction with environments that have variable-length
episodes, this skews the distribution of collected episodes' lengths:
short episodes are seen more frequently than long ones.
As a result, running an `env` of `N > 1` batched environments
with `max_episodes >= 1` is not the same as running an env with `1`
environment with `max_episodes >= 1`.
Args:
env: A tf_environment.Base environment.
policy: A tf_policy.TFPolicy policy.
observers: A list of observers that are notified after every step
in the environment. Each observer is a callable(trajectory.Trajectory).
transition_observers: A list of observers that are updated after every
step in the environment. Each observer is a callable((TimeStep,
PolicyStep, NextTimeStep)). The transition is shaped just as
trajectories are for regular observers.
max_steps: Optional maximum number of steps for each run() call. For
batched or parallel environments, this is the maximum total number of
steps summed across all environments. Also see below. Default: 0.
max_episodes: Optional maximum number of episodes for each run() call. For
batched or parallel environments, this is the maximum total number of
episodes summed across all environments. At least one of max_steps or
max_episodes must be provided. If both are set, run() terminates when at
least one of the conditions is
satisfied. Default: 0.
disable_tf_function: If True the use of tf.function for the run method is
disabled.
Raises:
ValueError: If both max_steps and max_episodes are None.
"""
common.check_tf1_allowed()
max_steps = max_steps or 0
max_episodes = max_episodes or 0
if max_steps < 1 and max_episodes < 1:
raise ValueError(
'Either `max_steps` or `max_episodes` should be greater than 0.')
super(TFDriver, self).__init__(env, policy, observers, transition_observers)
self._max_steps = max_steps or np.inf
self._max_episodes = max_episodes or np.inf
if not disable_tf_function:
self.run = common.function(self.run, autograph=True)
def run( # pytype: disable=signature-mismatch # overriding-parameter-count-checks
self, time_step: ts.TimeStep,
policy_state: types.NestedTensor = ()
) -> Tuple[ts.TimeStep, types.NestedTensor]:
"""Run policy in environment given initial time_step and policy_state.
Args:
time_step: The initial time_step.
policy_state: The initial policy_state.
Returns:
A tuple (final time_step, final policy_state).
"""
num_steps = tf.constant(0.0)
num_episodes = tf.constant(0.0)
while num_steps < self._max_steps and num_episodes < self._max_episodes:
action_step = self.policy.action(time_step, policy_state)
next_time_step = self.env.step(action_step.action)
traj = trajectory.from_transition(time_step, action_step, next_time_step)
for observer in self._transition_observers:
observer((time_step, action_step, next_time_step))
for observer in self.observers:
observer(traj)
num_episodes += tf.math.reduce_sum(
tf.cast(traj.is_boundary(), tf.float32))
num_steps += tf.math.reduce_sum(tf.cast(~traj.is_boundary(), tf.float32))
time_step = next_time_step
policy_state = action_step.state
return time_step, policy_state
|
[
"[email protected]"
] | |
9e25a76b082548ee94432dc821353a29a8e5f423
|
107973063f26b791ccd6deca0026acb338eb4d6b
|
/harvest.py
|
8631b158987a039be018791b790f53b2a123623b
|
[] |
no_license
|
sonya-sa/melon-objects
|
322b46138ee9287b74cf8eb50bae64f56eb50e23
|
a035db0be16e749a0654cc8518315f408efc72bc
|
refs/heads/master
| 2020-03-10T10:15:07.606336 | 2018-04-13T01:09:39 | 2018-04-13T01:09:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,986 |
py
|
############
# Part 1 #
############
class MelonType(object):
"""A species of melon at a melon farm."""
def __init__(self, code, first_harvest, color, is_seedless, is_bestseller, name):
"""Initialize a melon."""
self.code = code
self.first_harvest = first_harvest
self.color = color
self.is_seedless = is_seedless
self.is_bestseller = is_bestseller
self.name = name
self.pairings = []
# Fill in the rest
def add_pairing(self, pairing):
"""Add a food pairing to the instance's pairings list."""
self.pairings.extend(pairing)
# Fill in the rest
def update_code(self, new_code):
"""Replace the reporting code with the new_code."""
self.code = new_code
# Fill in the rest
def make_melon_types():
"""Returns a listmy of current melon types."""
all_melon_types = []
musk = MelonType('musk', 1998, 'green',
True, True, 'Muskmelon')
musk.add_pairing(['mint'])
all_melon_types.append(musk)
casaba = MelonType('cas', 2003, 'orange',
True, False, 'Casaba')
casaba.add_pairing(['mint', 'strawberries'])
all_melon_types.append(casaba)
crenshaw = MelonType('cren', 1996, 'green', True, False, 'Crenshaw')
crenshaw.add_pairing(['proscuitto'])
all_melon_types.append(crenshaw)
yellow_watermelon = MelonType('yw', 2013, 'yellow', True, True, 'Yellow Watermelon')
yellow_watermelon.add_pairing(['ice cream'])
all_melon_types.append(yellow_watermelon)
return all_melon_types
def print_pairing_info(melon_types):
"""Prints information about each melon type's pairings."""
# Fill in the rest
for melon_type in melon_types:
print "{} pairs well with".format(melon_type.name)
pairings = melon_type.pairings
for pairing in pairings:
print "- {}".format(pairing)
print ""
def make_melon_type_lookup(melon_types):
"""Takes a list of MelonTypes and returns a dictionary of melon type by code."""
codes = {}
for melon_type in melon_types:
codes[melon_type.code] = melon_type
# Fill in the rest
return codes
############
# Part 2 #
############
# all_melon_types = make_melon_types()
# make_melon
class Melon(object):
"""A melon in a melon harvest."""
self.all_melon_types = make_melon_type_lookup(make_melon_types())
def __init__ (self, melon_code, shape_rating, color_rating, from_field, harvested_by):
self.melon_type = self.all_melon_types[melon_code]
self.shape_rating = shape_rating
self.color_rating = color_rating
self.from_field = from_field
self.harvested_by = harvested_by
def is_sellable():
if (self.from_field != 3) and (self.shape_rating >= 5) and (self.color_rating >= 5):
return True
return False
# Fill in the rest
# Needs __init__ and is_sellable methods
def make_melons(melon_types):
"""Returns a list of Melon objects."""
# Fill in the rest
melon_objects = []
melon1 = Melon('yw', 8, 7, 2, 'Sheila')
melon_objects.append(melon1)
melon2 = Melon('yw', 3, 4, 2, 'Shei1a')
melon_objects.append(melon2)
melon3 = Melon('yw', 9, 8, 3, 'Sheila')
melon_objects.append(melon3)
melon4 = Melon('cas', 10, 6, 35, 'Sheila')
melon_objects.append(melon4)
melon5 = Melon('cren',8,9,35,'Michael')
melon_objects.append(melon5)
melon6 = Melon('cren', 8, 2, 35, 'Michael')
melon_objects.append(melon6)
melon7 = Melon('cren', 6,7,4, 'Michael')
melon_objects.append(melon7)
melon8 = Melon('musk', 6,7,4, 'Michael')
melon_objects.append(melon8)
melon9 = Melon('yw',7,10,3,'Sheila')
melon_objects.append(melon9)
return melon_objects
def get_sellability_report(melons):
"""Given a list of melon object, prints whether each one is sellable."""
# Fill in the rest
|
[
"[email protected]"
] | |
d5d7bc6f783064bdf9f3c5a83dec9a899defc356
|
060967fa3e6e390ac0504172e6dea8421ffb9d98
|
/2022/python2022/aoc/day01.py
|
f8899599170d8fd6ebfed8fd5aa9f6cefed79066
|
[] |
no_license
|
mreishus/aoc
|
677afd18521b62c9fd141a45fec4b7bc844be259
|
e89db235837d2d05848210a18c9c2a4456085570
|
refs/heads/master
| 2023-02-22T12:00:52.508701 | 2023-02-09T04:37:50 | 2023-02-09T04:39:44 | 159,991,022 | 16 | 3 | null | 2023-01-05T10:00:46 | 2018-12-01T22:00:22 |
Python
|
UTF-8
|
Python
| false | false | 901 |
py
|
#!/usr/bin/env python
"""
Advent Of Code 2022 Day 1
https://adventofcode.com/2022/day/1
"""
from typing import List
import heapq
def parse(filename: str) -> List[int]:
"""
Parse the input file into a list of integers.
Each integer is the sum of the numbers in a block.
"""
with open(filename) as file:
lines = file.read().strip()
blocks = lines.split("\n\n")
return [parse_block(block) for block in blocks]
def parse_block(block: str) -> int:
"""
param block: '1000\n2000\n3000'
return: 6000
"""
return sum(int(line) for line in block.splitlines())
class Day01:
"""AoC 2022 Day 01"""
@staticmethod
def part1(filename: str) -> int:
data = parse(filename)
return max(data)
@staticmethod
def part2(filename: str) -> int:
data = parse(filename)
return sum(heapq.nlargest(3, data))
|
[
"[email protected]"
] | |
9e011f833190c003c501b34093c98fea67323259
|
6bf492920985e3741440ba53e1c7f8426b66ac1f
|
/snakemake_rules/rules/gatk/gatk_combine_variants.smk
|
4aeb72ab60e819d714f462e05f027c1fd761730a
|
[
"MIT"
] |
permissive
|
ukaraoz/snakemake-rules
|
5b2ba7c9ec19d88b56067a46f66fd0c72e48c368
|
07e96afeb39307cdf35ecc8482dc1f8b62c120b9
|
refs/heads/master
| 2020-03-31T15:20:44.444006 | 2018-09-07T08:53:47 | 2018-09-07T08:53:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,300 |
smk
|
# -*- snakemake -*-
include: 'gatk.settings.smk'
include: 'gatk_variant_snp_JEXL_filtration.smk'
include: 'gatk_variant_indel_JEXL_filtration.smk'
config_default = {'gatk': {'combine_variants': _gatk_config_rule_default.copy()}}
update_config(config_default, config)
config = config_default
cmd = re.sub("-Xmx[0-9a-zA-Z]+", "-Xmx{mem}".format(mem=config['gatk']['combine_variants']['java_mem']), config['gatk']['cmd'])
rule gatk_combine_variants:
"""Run GATK CombineVariants to combine variant files.
The default rule combines files with suffixes filteredSNP.vcf and
filteredINDEL.vcf.
"""
wildcard_constraints:
suffix = "(.vcf|.vcf.gz)"
params: cmd = cmd + " -T " + COMBINE_VARIANTS,
options = " ".join(["-R", config['gatk']['combine_variants']['ref'],
config['gatk']['combine_variants']['options']]),
runtime = config['gatk']['combine_variants']['runtime']
input: "{prefix}.snp.filteredSNP{suffix}", "{prefix}.indel.filteredINDEL{suffix}"
output: "{prefix}.variants{suffix}"
threads: config['gatk']['combine_variants']['threads']
conda: "env.yaml"
shell: "command=\"{params.cmd} {params.options} $(echo {input} | sed -e 's/[^ ][^ ]*/-V &/g') -o {output}\"; eval \"${{command}}\""
|
[
"[email protected]"
] | |
bcdd0abe6750285e7fa6b8a7a95cdf85baaf302a
|
3bb1cf4309e0e6488aeb3e5ae8b78138cfdaa002
|
/kyopro_tenkei/90_54.py
|
8de332b75aa23b0227743cdd237feacaa92f0a7a
|
[] |
no_license
|
show2214/atcoder
|
18a2dd0c2167fadeda2725a67d2d68d593b0bef9
|
7aae17b41b07bece746b34258b9514e145186327
|
refs/heads/master
| 2022-06-27T19:17:46.514876 | 2022-06-19T23:21:48 | 2022-06-19T23:21:48 | 249,148,332 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 405 |
py
|
N, M = map(int, input().split())
g = [[] for _ in range(N + M)]
for i in range(M):
input()
for j in map(int, input().split()):
g[N + i] += j - 1,
g[j - 1] += N + i,
from collections import *
q = deque([0])
v = [0] + [-1] * (N + M)
while q:
c = q.popleft()
for b in g[c]:
if v[b] < 0:
v[b] = v[c] + 1
q += b,
print(*[i//2 for i in v[:N]])
|
[
"[email protected]"
] | |
f1c5e69189bc8a90462b021c01db2e9eb96a1b0a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03239/s478967614.py
|
7b473b8df981d413d6bb9ee6fe7d2eb9b2bdec4c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 201 |
py
|
n, t = map(int, input().split())
ans = 100000
for i in range(n):
c, tt = map(int, input().split())
if tt <= t:
ans = min(ans, c)
if ans == 100000:
print("TLE")
else:
print(ans)
|
[
"[email protected]"
] | |
a18b89fb83c54798265c1232a5612a39c65e53ff
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/ke4FSMdG2XYxbGQny_5.py
|
3eb2cfb5e413340d184121753557a8220852eae5
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
def even_odd_transform(lst, n):
l=lst
if len(l)==0:
return l
for i in range(n):
for j in range(len(l)):
if l[j]%2==0:
l[j]=l[j]-2
else:
l[j]=l[j]+2
return l
|
[
"[email protected]"
] | |
9fce20f8fc036410b74c53272e3f3ba7e0bbea05
|
9468507c1beeb2cb69591889605ea155d2cb7a63
|
/mysite/urls.py
|
3c3cb29f215257dcd4b0b3f45a2b59dd078c5b1b
|
[] |
no_license
|
nimal54/drf-polls
|
2375e2f5b78670de40c72b51eb616a69e7f49a65
|
9b29230998146eb225e0cffa0703d6bed1cc876a
|
refs/heads/master
| 2020-04-25T00:21:14.952917 | 2018-03-16T11:54:53 | 2018-03-16T11:54:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 167 |
py
|
from django.urls import include, path
from django.contrib import admin
urlpatterns = [
path('api/', include('polls.urls')),
path('admin/', admin.site.urls),
]
|
[
"[email protected]"
] | |
dc07e4c5023b62bbac3b5ed25bf1cbde99182341
|
54516826a15e4588decd4a040c3f3ae73b1f49df
|
/supplier/admin.py
|
d4cd50319ed56dfbd6c7cc180afdbbb36f403d02
|
[] |
no_license
|
boyombo/shylock
|
9454b53ef285af692675be4fe7a176d1aa29ced1
|
c63ac02b3ee18160ec94c9e8462165eaf7e0f3b5
|
refs/heads/master
| 2021-05-05T11:10:13.523616 | 2018-02-06T08:10:47 | 2018-02-06T08:10:47 | 118,116,949 | 0 | 1 | null | 2018-02-06T08:10:48 | 2018-01-19T11:24:14 |
JavaScript
|
UTF-8
|
Python
| false | false | 147 |
py
|
from django.contrib import admin
from supplier.models import Supplier
@admin.register(Supplier)
class SupplierAdmin(admin.ModelAdmin):
pass
|
[
"[email protected]"
] | |
39ab273dae34141056fb99b2a557a0c095a9ee09
|
8cd90c5b92fe85158226de32b1fbb4c34ebd658b
|
/oscar_docdata/models.py
|
f3295ad74437b13549e68019e34d3e7aedc771ad
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
mvantellingen/django-oscar-docdata
|
772ec3db372f9571cf62932ad2fe945c65fd2d7f
|
983d3f8144e1feb67d4a2c5bb98b499e69e4ad44
|
refs/heads/master
| 2023-08-25T06:33:59.105290 | 2016-06-14T12:41:37 | 2016-06-14T12:41:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,043 |
py
|
from decimal import Decimal as D
from django.db import models
from django.utils.translation import ugettext_lazy as _
from oscar_docdata.managers import DocdataOrderManager
from . import appsettings
try:
from polymorphic.models import PolymorphicModel # django-polymorphic 0.8
except ImportError:
from polymorphic import PolymorphicModel
class DocdataOrder(models.Model):
"""
Tracking of the order which is sent to docdata.
"""
# Simplified internal status codes.
# Lowercased on purpose to avoid mixing the statuses together.
STATUS_NEW = 'new' # Initial state
STATUS_IN_PROGRESS = 'in_progress' # In the redirect phase
STATUS_PENDING = 'pending' # Waiting for user to complete payment (e.g. credit cards)
STATUS_PAID = 'paid' # End of story, paid!
STATUS_PAID_REFUNDED = 'paid_refunded' # Paid, and performed a partial refund
STATUS_CANCELLED = 'cancelled' # End of story, cancelled
STATUS_CHARGED_BACK = 'charged_back' # End of story, consumer asked for charge back
STATUS_REFUNDED = 'refunded' # End of story, refunded, merchant refunded
STATUS_EXPIRED = 'expired' # No results of customer, order was closed.
STATUS_UNKNOWN = 'unknown' # Help!
STATUS_CHOICES = (
(STATUS_NEW, _("New")),
(STATUS_IN_PROGRESS, _("In Progress")),
(STATUS_PENDING, _("Pending")),
(STATUS_PAID, _("Paid")),
(STATUS_PAID_REFUNDED, _("Paid, part refunded")),
(STATUS_CANCELLED, _("Cancelled")),
(STATUS_CHARGED_BACK, _("Charged back")),
(STATUS_REFUNDED, _("Refunded")),
(STATUS_EXPIRED, _("Expired")),
(STATUS_UNKNOWN, _("Unknown")),
)
merchant_name = models.CharField(_("Docdata account"), max_length=100, default=appsettings.DOCDATA_MERCHANT_NAME)
merchant_order_id = models.CharField(_("Order ID"), max_length=100, default='')
order_key = models.CharField(_("Payment cluster ID"), max_length=200, default='', unique=True)
status = models.CharField(_("Status"), max_length=50, choices=STATUS_CHOICES, default=STATUS_NEW)
language = models.CharField(_("Language"), max_length=5, blank=True, default='en')
# Track sent information
total_gross_amount = models.DecimalField(_("Total gross amount"), max_digits=15, decimal_places=2)
currency = models.CharField(_("Currency"), max_length=10)
country = models.CharField(_("Country_code"), max_length=2, null=True, blank=True)
# Track received information
total_registered = models.DecimalField(_("Total registered"), max_digits=15, decimal_places=2, default=D('0.00'))
total_shopper_pending = models.DecimalField(_("Total shopper pending"), max_digits=15, decimal_places=2, default=D('0.00'))
total_acquirer_pending = models.DecimalField(_("Total acquirer pending"), max_digits=15, decimal_places=2, default=D('0.00'))
total_acquirer_approved = models.DecimalField(_("Total acquirer approved"), max_digits=15, decimal_places=2, default=D('0.00'))
total_captured = models.DecimalField(_("Total captured"), max_digits=15, decimal_places=2, default=D('0.00'))
total_refunded = models.DecimalField(_("Total refunded"), max_digits=15, decimal_places=2, default=D('0.00'))
total_charged_back = models.DecimalField(_("Total changed back"), max_digits=15, decimal_places=2, default=D('0.00'))
# Internal info.
created = models.DateTimeField(_("created"), auto_now_add=True)
updated = models.DateTimeField(_("updated"), auto_now=True)
objects = DocdataOrderManager()
class Meta:
ordering = ('-created', '-updated')
verbose_name = _("Docdata Order")
verbose_name_plural = _("Docdata Orders")
def __unicode__(self):
return self.order_key
def __repr__(self):
return "<DocdataOrder: {0}, {1} status={2}>".format(self.order_key, self.merchant_order_id, self.status)
@property
def latest_payment(self):
try:
return self.payments.order_by('-payment_id').all()[0]
except IndexError:
return None
def cancel(self):
"""
Cancel an order in Docdata.
"""
from .facade import get_facade
facade = get_facade()
facade.cancel_order(self)
cancel.alters_data = True
class DocdataPayment(PolymorphicModel):
"""
A reported Docdata payment.
This is a summarized version of a Docdata payment transaction,
as returned by the status API call.
Some payment types have additional fields, which are stored as subclass.
"""
docdata_order = models.ForeignKey(DocdataOrder, related_name='payments')
payment_id = models.CharField(_("Payment id"), max_length=100, default='', blank=True, primary_key=True)
# Note: We're not using choices here so that we can write unknown statuses if they are presented by Docdata.
status = models.CharField(_("status"), max_length=30, default='NEW')
# The payment method id from Docdata (e.g. IDEAL, MASTERCARD, etc)
payment_method = models.CharField(max_length=60, default='', blank=True)
# Track the various amounts associated with this source
confidence_level = models.CharField(_("Confidence level"), max_length=30, default='', editable=False)
amount_allocated = models.DecimalField(_("Amount Allocated"), decimal_places=2, max_digits=12, default=D('0.00'), editable=False)
amount_debited = models.DecimalField(_("Amount Debited"), decimal_places=2, max_digits=12, default=D('0.00'), editable=False)
amount_refunded = models.DecimalField(_("Amount Refunded"), decimal_places=2, max_digits=12, default=D('0.00'), editable=False)
amount_chargeback = models.DecimalField(_("Amount Changed back"), decimal_places=2, max_digits=12, default=D('0.00'), editable=False)
# Internal info.
created = models.DateTimeField(_("created"), auto_now_add=True)
updated = models.DateTimeField(_("updated"), auto_now=True)
def __unicode__(self):
return self.payment_id
class Meta:
ordering = ('payment_id',)
verbose_name = _("Payment")
verbose_name_plural = _("Payments")
# NOTE: currently unused.
# DirectDebit is used for periodic transfers (e.g. "Automatische incasso" in The Netherlands)
class DocdataDirectDebitPayment(DocdataPayment):
"""
Web direct debit direct payment.
"""
holder_name = models.CharField(max_length=35) # max_length from Docdata
holder_city = models.CharField(max_length=35) # max_length from Docdata
holder_country_code = models.CharField(_("Country_code"), max_length=2, null=True, blank=True)
# Note: there is django-iban for validated versions of these fields.
# Not needed here.
iban = models.CharField(max_length=34)
bic = models.CharField(max_length=11)
class Meta:
ordering = ('-created', '-updated')
verbose_name = _("Direct Debit Payment")
verbose_name_plural = _("Derect Debit Payments")
|
[
"[email protected]"
] | |
264aa98cdced1e3a3b21e731910d92a4f81a7489
|
5db3d51ff9a0bd7647c2315a358cb4ec9299d9d5
|
/analyzeBusReportFnv2.py
|
f24d495ec4b04167e7b50dce7763a807fe53f163
|
[] |
no_license
|
bikiranguha/Thesis_project
|
866385f51bd476448730c8169eb0b3c1dacba84e
|
1a52ba0fed86afb522bda067b8011b6940b4088d
|
refs/heads/master
| 2020-03-31T06:52:16.627848 | 2018-12-28T02:59:33 | 2018-12-28T02:59:33 | 151,997,984 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,684 |
py
|
"""
Function which generates a bus flow report of comed buses
"""
def BusReport(flowReportFile,Raw):
from getBusDataFn import getBusData
BusDataDict = getBusData(Raw)
ComedPlusBoundarySet = set()
flowDict = {}
#FromBusLines = []
#ToBusLines = []
class flowReport(object):
def __init__(self):
self.toBusList = []
self.MWList = []
self.MVARList = []
self.MVAList = []
self.cktID = []
"""
with open(Raw,'r') as f:
filecontent = f.read()
fileLines = filecontent.split('\n')
branchStartIndex = fileLines.index('0 / END OF GENERATOR DATA, BEGIN BRANCH DATA') + 1
branchEndIndex = fileLines.index('0 / END OF BRANCH DATA, BEGIN TRANSFORMER DATA')
for i in range(branchStartIndex, branchEndIndex):
line = fileLines[i]
words = line.split(',')
Bus1 = words[0].strip()
Bus2 = words[1].strip()
try:
Bus1Area = BusDataDict[Bus1].area
Bus2Area = BusDataDict[Bus2].area
except: # for buses '243083' and '638082'
continue
if Bus1Area == '222' and Bus2Area == '222':
ComedPlusBoundarySet.add(Bus1)
ComedPlusBoundarySet.add(Bus2)
if Bus1Area == '222' and Bus2Area != '222':
ComedPlusBoundarySet.add(Bus1)
ComedPlusBoundarySet.add(Bus2)
if Bus1Area != '222' and Bus2Area == '222':
ComedPlusBoundarySet.add(Bus1)
ComedPlusBoundarySet.add(Bus2)
for Bus in BusDataDict:
area = BusDataDict[Bus].area
if area == '222':
ComedPlusBoundarySet.add(Bus)
"""
with open(flowReportFile,'r') as f:
filecontent = f.read()
fileLines = filecontent.split('\n')
indices = [i for i, line in enumerate(fileLines) if line.startswith('BUS')]
for i in indices:
#print i
line = fileLines[i]
FromBus = line[4:10].strip()
"""
if FromBus not in ComedPlusBoundarySet:
continue
"""
flowDict[FromBus] = flowReport()
i+=2
line = fileLines[i]
while not 'M I S M A T C H' in line:
if 'RATING' in line:
break
if 'GENERATION' in line or 'LOAD' in line or 'SHUNT' in line:
i+=1
line = fileLines[i]
continue
toBus = line[4:10].strip()
MW=float(line[34:42].strip())
MVAR=float(line[42:50].strip())
cktID = line[31:34]
#print toBus
flowDict[FromBus].toBusList.append(toBus)
flowDict[FromBus].MWList.append(MW)
flowDict[FromBus].MVARList.append(MVAR)
flowDict[FromBus].cktID.append(cktID)
#ToBusLines.append(toBus)
i+=1
if i >=len(fileLines):
break
line = fileLines[i]
return flowDict
"""
with open('tmp.txt','w') as f:
for Bus in ToBusLines:
f.write(Bus)
f.write('\n')
"""
if __name__ == '__main__':
flowReportFile = 'BusReportsRawCropped_0723.txt'
Raw = 'RawCropped_0723v2.raw'
flowDict = BusReport(flowReportFile,Raw)
|
[
"[email protected]"
] | |
a6fb2197fbf80b1c53e59f37f84370f5749ed5e1
|
b5dd8d1b798c94731a84c02d98aafb9147200a85
|
/sequence_labeling/SLBaselineSYNLinear/data/Instance.py
|
6ed47e34116c6f3ff8176de9230c270b70bc070a
|
[] |
no_license
|
zhangmeishan/DepSAWR
|
1ae348dd04ec5e46bc5a75c8972b4bc4008528fe
|
104f44fd962a42fdee9b1a9332997d35e8461ff4
|
refs/heads/master
| 2021-07-09T20:56:56.897774 | 2020-10-27T05:41:08 | 2020-10-27T05:41:08 | 206,974,879 | 15 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,126 |
py
|
class Word:
def __init__(self, id, form, label):
self.id = id
self.org_form = form
self.form = form.lower()
self.label = label
# 1 indicates word, 0 indicates syn
self.wtype = 0 if label == "###" else 1
def __str__(self):
values = [str(self.id), self.org_form, self.label]
return '\t'.join(values)
class Sentence:
def __init__(self, words):
self.words = list(words)
self.length = len(self.words)
self.key_head = -1
self.key_start = -1
self.key_end = -1
self.key_label = ""
self.span = False
self.wkey_head = -1
self.wkey_start = -1
self.wkey_end = -1
self.wlength, self.forms, self.labels = 0, [], []
self.wposis, self.r_wposis = [], []
for idx in range(self.length):
if words[idx].wtype == 1:
self.wlength = self.wlength + 1
self.forms.append(words[idx].org_form)
self.labels.append(words[idx].label)
num_words = len(self.wposis)
self.r_wposis.append(num_words)
self.wposis.append(idx)
else:
self.r_wposis.append(-1)
self.sentence = ' '.join(self.forms)
for idx in range(self.length):
if words[idx].label.endswith("-*"):
self.key_head = idx
self.wkey_head = self.r_wposis[idx]
self.key_label = words[idx].label[2:-2]
break
if self.key_head != -1:
self.span = True
for idx in range(self.length):
cur_label = words[idx].label
if cur_label.startswith("B-"+self.key_label) \
or cur_label.startswith("S-"+self.key_label):
self.key_start = idx
self.wkey_start = self.r_wposis[idx]
if cur_label.startswith("E-"+self.key_label) \
or cur_label.startswith("S-"+self.key_label):
self.key_end = idx
self.wkey_end = self.r_wposis[idx]
else:
self.key_start, self.wkey_start = self.length, self.wlength
self.key_end, self.wkey_end = -1, -1
def label_to_entity(labels):
length = len(labels)
entities = set()
idx = 0
while idx < length:
if labels[idx] == "O":
idx = idx + 1
elif labels[idx].startswith("B-"):
label = labels[idx][2:]
predict = False
if label.endswith("-*"):
label = label[0:-2]
predict = True
next_idx = idx + 1
end_idx = idx
while next_idx < length:
if labels[next_idx] == "O" or labels[next_idx].startswith("B-") \
or labels[next_idx].startswith("S-"):
break
next_label = labels[next_idx][2:]
if next_label.endswith("-*"):
next_label = next_label[0:-2]
predict = True
if next_label != label:
break
end_idx = next_idx
next_idx = next_idx + 1
if end_idx == idx:
new_label = "S-" + labels[idx][2:]
print("Change %s to %s" % (labels[idx], new_label))
labels[idx] = new_label
if not predict:
entities.add("[%d,%d]%s"%(idx, end_idx, label))
idx = end_idx + 1
elif labels[idx].startswith("S-"):
label = labels[idx][2:]
predict = False
if label.endswith("-*"):
label = label[0:-2]
predict = True
if not predict:
entities.add("[%d,%d]%s"%(idx, idx, label))
idx = idx + 1
elif labels[idx].startswith("M-"):
new_label = "B-" + labels[idx][2:]
print("Change %s to %s" % (labels[idx], new_label))
labels[idx] = new_label
else:
new_label = "S-" + labels[idx][2:]
print("Change %s to %s" % (labels[idx], new_label))
labels[idx] = new_label
return entities
def normalize_labels(labels):
length = len(labels)
change = 0
normed_labels = []
for idx in range(length):
normed_labels.append(labels[idx])
idx = 0
while idx < length:
if labels[idx] == "O":
idx = idx + 1
elif labels[idx].startswith("B-"):
label = labels[idx][2:]
if label.endswith("-*"):
label = label[0:-2]
next_idx = idx + 1
end_idx = idx
while next_idx < length:
if labels[next_idx] == "O" or labels[next_idx].startswith("B-") \
or labels[next_idx].startswith("S-"):
break
next_label = labels[next_idx][2:]
if next_label.endswith("-*"):
next_label = next_label[0:-2]
if next_label != label:
break
end_idx = next_idx
next_idx = next_idx + 1
if end_idx == idx:
new_label = "S-" + labels[idx][2:]
# print("Change %s to %s" % (labels[idx], new_label))
labels[idx] = new_label
normed_labels[idx] = new_label
change = change + 1
idx = end_idx + 1
elif labels[idx].startswith("S-"):
idx = idx + 1
elif labels[idx].startswith("M-"):
new_label = "B-" + labels[idx][2:]
# print("Change %s to %s" % (labels[idx], new_label))
normed_labels[idx] = new_label
labels[idx] = new_label
change = change + 1
else:
new_label = "S-" + labels[idx][2:]
# print("Change %s to %s" % (labels[idx], new_label))
normed_labels[idx] = new_label
labels[idx] = new_label
change = change + 1
return normed_labels, change
def evalInstance(gold, predict):
glength, plength = gold.length, predict.length
if glength != plength:
raise Exception('gold length does not match predict length.')
gold_entity_num, predict_entity_num, correct_entity_num = 0, 0, 0
goldlabels, predictlabels = gold.labels, predict.labels
if gold.span:
gold_entities = label_to_entity(goldlabels)
predict_entities = label_to_entity(predictlabels)
gold_entity_num, predict_entity_num = len(gold_entities), len(predict_entities)
for one_entity in gold_entities:
if one_entity in predict_entities:
correct_entity_num = correct_entity_num + 1
else:
gold_entity_num, predict_entity_num = len(goldlabels), len(predictlabels)
for idx in range(glength):
if goldlabels[idx] == predictlabels[idx]:
correct_entity_num = correct_entity_num + 1
return gold_entity_num, predict_entity_num, correct_entity_num
def readInstance(file):
min_count = 1
total = 0
words = []
for line in file:
tok = line.strip().split('\t')
if not tok or line.strip() == '' or line.strip().startswith('#'):
if len(words) > min_count:
total += 1
yield Sentence(words)
words = []
elif len(tok) == 3:
try:
words.append(Word(int(tok[0]), tok[1], tok[2]))
except Exception:
pass
else:
pass
if len(words) > min_count:
total += 1
yield Sentence(words)
print("Total num: ", total)
def writeInstance(filename, sentences):
with open(filename, 'w') as file:
for sentence in sentences:
for entry in sentence.words:
file.write(str(entry) + '\n')
file.write('\n')
def printInstance(output, sentence):
for entry in sentence.words:
output.write(str(entry) + '\n')
output.write('\n')
|
[
"[email protected]"
] | |
4d59b6d7525d2424cccd3c6215409bdfb7e78f33
|
171a89102edf10901e18a2c0f41c3313608d2324
|
/src/rogerthat/bizz/job/unschedule_service_api_callback_records.py
|
0a04dd74317213aea6716a58732b45ec57e5498c
|
[
"Apache-2.0"
] |
permissive
|
gitter-badger/rogerthat-backend
|
7e9c12cdd236ef59c76a62ac644fcd0a7a712baf
|
ab92dc9334c24d1b166972b55f1c3a88abe2f00b
|
refs/heads/master
| 2021-01-18T06:08:11.435313 | 2016-05-11T08:50:20 | 2016-05-11T08:50:20 | 58,615,985 | 0 | 0 | null | 2016-05-12T06:54:07 | 2016-05-12T06:54:07 | null |
UTF-8
|
Python
| false | false | 1,170 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Mobicage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.1@@
from rogerthat.dal.service import get_service_api_callback_records_query
from google.appengine.ext import db, deferred
def run(service_user, cursor=None):
query = get_service_api_callback_records_query(service_user)
query.with_cursor(cursor)
records = query.fetch(100)
put = list()
for rec in records:
rec.timestamp = 0 - abs(rec.timestamp)
put.append(rec)
db.put(put)
if len(records) > 0:
return deferred.defer(run, service_user, query.cursor(), _transactional=db.is_in_transaction())
|
[
"[email protected]"
] | |
7e72fb11137d1cc82500a43c590445b6d4222f54
|
11334e46d3575968de5062c7b0e8578af228265b
|
/Projects/subsumption_lewis/test_escape_behavior.py
|
4a60ca86e27a79e1aadad1e7cc150c9a55c47a09
|
[] |
no_license
|
slowrunner/Carl
|
99262f16eaf6d53423778448dee5e5186c2aaa1e
|
1a3cfb16701b9a3798cd950e653506774c2df25e
|
refs/heads/master
| 2023-06-08T05:55:55.338828 | 2023-06-04T02:39:18 | 2023-06-04T02:39:18 | 145,750,624 | 19 | 2 | null | 2023-06-04T02:39:20 | 2018-08-22T18:59:34 |
Roff
|
UTF-8
|
Python
| false | false | 1,653 |
py
|
#!/usr/bin/env python3
"""
FILE: test_escape_behavior.py
PURPOSE: Test an subsumption architecture escape behavior
REFERENCES:
"Mobile Robots: Inspiration To Implementation", Jones, Flynn, Seiger p318
"""
import subsumption
import time
import logging
subsumption.inhibit_scan = False
subsumption.inhibit_drive = False
subsumption.TALK = False
def stop():
subsumption.mot_trans = 0
subsumption.mot_rot = 0
time.sleep(3)
def test_escape_behavior():
logging.info("==== TEST ESCAPE BEHAVIOR ====")
subsumption.say("Escape Behavior Test Will Begin In 5 seconds")
time.sleep(5)
try:
while True:
time.sleep(1.0)
except KeyboardInterrupt:
logging.info("==== ESCAPE BEHAVIOR TEST COMPLETE ====")
subsumption.say("Escape Behavior Test Complete")
# MAIN
def main():
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(funcName)s: %(message)s')
logging.info("==== TEST SUBSUMPTION ====")
subsumption.say("Test subsumption.")
try:
subsumption.setup()
# while True:
# do main things
test_escape_behavior()
except KeyboardInterrupt:
print("")
msg="Ctrl-C Detected in Main"
logging.info(msg)
subsumption.say(msg)
except Exception as e:
logging.info("Handling main exception: %s",e)
finally:
subsumption.teardown()
logging.info("==== Subsumption Test Done ====")
subsumption.say("Subsumption test done")
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
c42ee65059fd84127b788c9f61f22f6091572c64
|
353def93fa77384ee3a5e3de98cfed318c480634
|
/.history/week01/hoework01/gettop10frommaoyam01_20200625172155.py
|
6673c7bd655b35c14f885d7566123eee9d12b9b9
|
[] |
no_license
|
ydbB/Python001-class01
|
d680abc3ea1ccaeb610751e3488421417d381156
|
ad80037ccfc68d39125fa94d2747ab7394ac1be8
|
refs/heads/master
| 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null |
UTF-8
|
Python
| false | false | 1,024 |
py
|
# 使用requests,bs4库,爬取猫眼电影top10的电影名称、电影类型、上映时间,并以utf-8的字符集保存到csv文件中
import requests
maoyanUrl = "https://maoyan.com/films?showType=3";
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Accept': "*/*",
'Accept-Encoding': 'gazip, deflate, br',
'Accept-Language': 'en-AU,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,la;q=0.6',
'Content-Type': 'text/plain',
'Connection': 'keep-alive',
# 'Host': 'wreport1.meituan.net',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/films?showType=3',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'cross-site',
}
response = requests.get(maoyanUrl,headers=header)
response.encoding = 'utf-8'
print(response.text)
|
[
"[email protected]"
] | |
7b95fcc33b3aa2249ed1f27138745f475927c2d6
|
cf14b6ee602bff94d3fc2d7e712b06458540eed7
|
/gs82/gs82/urls.py
|
0aecc6d4eeb66d7fa733fff9c8bcaddef8e0841a
|
[] |
no_license
|
ManishShah120/Learning-Django
|
8b0d7bfe7e7c13dcb71bb3d0dcdf3ebe7c36db27
|
8fe70723d18884e103359c745fb0de5498b8d594
|
refs/heads/master
| 2023-03-29T09:49:47.694123 | 2021-03-28T16:04:34 | 2021-03-28T16:04:34 | 328,925,596 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 307 |
py
|
from django.contrib import admin
from django.urls import path
from enroll import views
from django.views.decorators.cache import cache_page
urlpatterns = [
path('admin/', admin.site.urls),
path('', cache_page(30)(views.home)),
path('home/', views.home),
path('contact/', views.contact),
]
|
[
"[email protected]"
] | |
22a5082162b8e3e3900e02a08ce7e3931b946ac7
|
f6faeb43b394bebb2c66b270ece4a5422cece0f6
|
/Input.py
|
0a45338c9ddf096ffbf6f1a13214ef459aedce03
|
[] |
no_license
|
Khun-Cho-Lwin/Programming-Basic-with-Python
|
a57b6445d0fdfca23017aa691208899935fcf5e7
|
1e8cc924143771b7737bb54ad8f04ae5b88c1e81
|
refs/heads/master
| 2022-11-13T05:56:11.881552 | 2020-06-29T21:58:29 | 2020-06-29T21:58:29 | 267,246,983 | 0 | 4 | null | 2020-06-29T08:00:57 | 2020-05-27T07:10:11 |
Python
|
UTF-8
|
Python
| false | false | 166 |
py
|
input1 = int(input("Please enter first number:"))
input2 = int(input("Please enter second number:"))
result = input1 + input2
print(input1,"+",input2,"=",result)
|
[
"[email protected]"
] | |
fbcf4dff0606fafa97cc778c0778a49cc9e1a8e6
|
8830831a87f35ff2628f379d8230928ec6b5641a
|
/Homedepot/code/stem2.py
|
3a3c13edf6434f0161556c5b49e294bd64829972
|
[] |
no_license
|
nickmcadden/Kaggle
|
e5882c9d68a81700d8d969328d91c059a0643868
|
cbc5347dec90e4bf64d4dbaf28b8ffb362efc64f
|
refs/heads/master
| 2019-07-18T08:09:40.683168 | 2018-01-26T14:35:38 | 2018-01-26T14:35:38 | 40,735,982 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,710 |
py
|
import pandas as pd
import numpy as np
from nltk.stem.snowball import EnglishStemmer
from nltk.tokenize import wordpunct_tokenize
import sys
import csv
reload(sys)
sys.setdefaultencoding('ISO-8859-1')
stemmer = EnglishStemmer()
print("Reading data\n")
train = pd.read_csv('./input/train.csv', encoding="ISO-8859-1")
test = pd.read_csv('./input/test.csv', encoding="ISO-8859-1")
desc = pd.read_csv('./input/product_descriptions.csv', encoding="ISO-8859-1")
print("Stemming train file\n")
for index, row in train.iterrows():
train.ix[index,'product_title'] = " ".join([stemmer.stem(word.lower()) for word in wordpunct_tokenize(row['product_title'])])
train.ix[index,'search_term'] = " ".join([stemmer.stem(word.lower()) for word in wordpunct_tokenize(row['search_term'])])
if index % 1000 == 0:
print(index)
train.to_csv('./input/train_stemmed_snowball.csv', index=False, quoting=csv.QUOTE_NONNUMERIC)
print("\nStemming test file\n")
for index, row in test.iterrows():
test.ix[index,'product_title'] = " ".join([stemmer.stem(word.lower()) for word in wordpunct_tokenize(row['product_title'])])
test.ix[index,'search_term'] = " ".join([stemmer.stem(word.lower()) for word in wordpunct_tokenize(row['search_term'])])
if index % 1000 == 0:
print(index)
test.to_csv('./input/test_stemmed_snowball.csv', index=False, quoting=csv.QUOTE_NONNUMERIC)
'''
print("\nStemming description file\n")
for index, row in desc.iterrows():
desc.ix[index,'product_description'] = " ".join([stemmer.stem(word.lower()) for word in wordpunct_tokenize(row['product_description'])])
if index % 1000 == 0:
print(index)
desc.to_csv('./input/desc_stemmed_snowball.csv', index=False, quoting=csv.QUOTE_NONNUMERIC)
'''
|
[
"[email protected]"
] | |
b217ba63eaddc9616214a06e614c6246f5c30edf
|
c1bd12405d244c5924a4b069286cd9baf2c63895
|
/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/restore_request.py
|
e662315f9bdfdbec34fe2249cdb69996c797c338
|
[
"MIT"
] |
permissive
|
lmazuel/azure-sdk-for-python
|
972708ad5902778004680b142874582a284a8a7c
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
refs/heads/master
| 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 |
MIT
| 2019-10-25T15:56:00 | 2014-06-27T19:40:56 |
Python
|
UTF-8
|
Python
| false | false | 1,055 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RestoreRequest(Model):
"""Base class for restore request. Workload-specific restore requests are
derived from this class.
:param object_type: Polymorphic Discriminator
:type object_type: str
"""
_validation = {
'object_type': {'required': True},
}
_attribute_map = {
'object_type': {'key': 'objectType', 'type': 'str'},
}
_subtype_map = {
'object_type': {'IaasVMRestoreRequest': 'IaasVMRestoreRequest'}
}
def __init__(self):
self.object_type = None
|
[
"[email protected]"
] | |
4b7ad1257588f9d861614a07ee2bc059ad96ebde
|
b34f07d217cdda9f59e7f58f89dad17fae1ee132
|
/malaya_speech/model/frame.py
|
95fde8af773361726a61fb74e10e57b9e3e60f0e
|
[
"MIT"
] |
permissive
|
Ariffleng/malaya-speech
|
965cea504e364c77ca513d43bf340fc122b97672
|
4343c409340c608a426cc6f0926fbe2c1661783e
|
refs/heads/master
| 2023-08-12T23:23:39.983006 | 2021-10-02T09:14:52 | 2021-10-02T09:14:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,386 |
py
|
import numpy as np
from dataclasses import dataclass
SEGMENT_PRECISION = 1e-6
class Frame:
def __init__(self, array, timestamp, duration):
if not isinstance(array, np.ndarray):
array = np.array(array)
self.array = array
self.timestamp = timestamp
self.duration = duration
@dataclass(frozen=True, order=True)
class Segment:
start: float = 0.0
end: float = 0.0
def __bool__(self):
return bool((self.end - self.start) > SEGMENT_PRECISION)
@property
def duration(self) -> float:
"""
Segment duration (read-only)
"""
return self.end - self.start if self else 0.0
@property
def middle(self) -> float:
"""Segment mid-time (read-only)"""
return 0.5 * (self.start + self.end)
def __contains__(self, other: 'Segment'):
"""Inclusion
>>> segment = Segment(start=0, end=10)
>>> Segment(start=3, end=10) in segment:
True
>>> Segment(start=5, end=15) in segment:
False
"""
return (self.start <= other.start) and (self.end >= other.end)
def __and__(self, other):
"""
Intersection
>>> segment = Segment(0, 10)
>>> other_segment = Segment(5, 15)
>>> segment & other_segment
<Segment(5, 10)>
Note
----
When the intersection is empty, an empty segment is returned:
>>> segment = Segment(0, 10)
>>> other_segment = Segment(15, 20)
>>> intersection = segment & other_segment
>>> if not intersection:
... # intersection is empty.
"""
start = max(self.start, other.start)
end = min(self.end, other.end)
return Segment(start=start, end=end)
def intersects(self, other: 'Segment') -> bool:
"""
Check whether two segments intersect each other
Parameters
----------
other : Segment
Other segment
Returns
-------
intersect : bool
True if segments intersect, False otherwise
"""
return (
(
self.start < other.start
and other.start < self.end - SEGMENT_PRECISION
)
or (
self.start > other.start
and self.start < other.end - SEGMENT_PRECISION
)
or (self.start == other.start)
)
def overlaps(self, t: float):
"""
Check if segment overlaps a given time
Parameters
----------
t : float
Time, in seconds.
Returns
-------
overlap: bool
True if segment overlaps time t, False otherwise.
"""
return self.start <= t and self.end >= t
def __or__(self, other):
"""
Union
>>> segment = Segment(0, 10)
>>> other_segment = Segment(5, 15)
>>> segment | other_segment
<Segment(0, 15)>
Note
----
When a gap exists between the segment, their union covers the gap as well:
>>> segment = Segment(0, 10)
>>> other_segment = Segment(15, 20)
>>> segment | other_segment
<Segment(0, 20)
"""
if not self:
return other
if not other:
return self
start = min(self.start, other.start)
end = max(self.end, other.end)
return Segment(start=start, end=end)
def __xor__(self, other):
"""
Gap
>>> segment = Segment(0, 10)
>>> other_segment = Segment(15, 20)
>>> segment ^ other_segment
<Segment(10, 15)
Note
----
The gap between a segment and an empty segment is not defined.
>>> segment = Segment(0, 10)
>>> empty_segment = Segment(11, 11)
>>> segment ^ empty_segment
ValueError: The gap between a segment and an empty segment is not defined.
"""
if (not self) or (not other):
raise ValueError(
'The gap between a segment and an empty segment '
'is not defined.'
)
start = min(self.end, other.end)
end = max(self.start, other.start)
return Segment(start=start, end=end)
def _str_helper(self, seconds: float):
from datetime import timedelta
negative = seconds < 0
seconds = abs(seconds)
td = timedelta(seconds=seconds)
seconds = td.seconds + 86400 * td.days
microseconds = td.microseconds
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return '%s%02d:%02d:%02d.%03d' % (
'-' if negative else ' ',
hours,
minutes,
seconds,
microseconds / 1000,
)
def __str__(self):
"""
Human-readable representation
>>> print(Segment(1337, 1337 + 0.42))
[ 00:22:17.000 --> 00:22:17.420]
Note
----
Empty segments are printed as "[]"
"""
return '<Segment(%g, %g)>' % (self.start, self.end)
def __repr__(self):
"""
Computer-readable representation
>>> Segment(1337, 1337 + 0.42)
<Segment(1337, 1337.42)>
"""
return '<Segment(%g, %g)>' % (self.start, self.end)
|
[
"[email protected]"
] | |
56d61b52a986db759e27b224e6f9af02a912baf9
|
b2605c93db0c5b3dd0ac7f7cfa80674e82ff9439
|
/sandbox/filter-max255.py
|
ee9f129f2aff3855cfde263f3b5c214ef661e5e1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
adnbsr/khmer
|
76728708b60a5662e93b83c6559502d31b92445d
|
64612c1140d17c0988fa01f3c6c627913b509700
|
refs/heads/master
| 2021-01-18T13:20:23.385284 | 2013-08-01T21:13:42 | 2013-08-01T21:13:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,254 |
py
|
import sys, screed.fasta, os
import khmer
from khmer.thread_utils import ThreadedSequenceProcessor, verbose_fastq_iter
K = 32
HT_SIZE=4e9
N_HT=4
WORKER_THREADS=8
GROUPSIZE=100
###
def main():
repfile = sys.argv[1]
infile = sys.argv[2]
outfile = os.path.basename(infile) + '.fno255'
if len(sys.argv) >= 4:
outfile = sys.argv[3]
print 'file to count from: %s' % repfile
print 'input file to filter: %s' % infile
print 'filtering to output:', outfile
print '-- settings:'
print 'K', K
print 'N THREADS', WORKER_THREADS
print '--'
print 'making hashtable'
ht = khmer.new_counting_hash(K, HT_SIZE, N_HT)
print 'consuming input', repfile
ht.consume_fasta(repfile)
outfp = open(outfile, 'w')
def process_fn(record, ht=ht):
name = record['name']
seq = record['sequence']
if 'N' in seq:
return None, None
if len(seq) < K:
return None, None
if ht.get_max_count(seq) >= 255:
return None, None
return name, seq
tsp = ThreadedSequenceProcessor(process_fn, WORKER_THREADS, GROUPSIZE)
###
tsp.start(verbose_fastq_iter(infile), outfp)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
97d23fdb9293035257f2b63f7223884d29f25b32
|
3034e86347c71bf7e7af9e5f7aa44ab5ad61e14b
|
/mongodb/day04/grid.py
|
6a756f9c08f808f46ec135295e3d86b64827d34a
|
[] |
no_license
|
jason12360/AID1803
|
bda039b82f43d6609aa8028b0d9598f2037c23d5
|
f0c54a3a2f06881b3523fba7501ab085cceae75d
|
refs/heads/master
| 2020-03-17T00:43:42.541761 | 2018-06-29T10:07:44 | 2018-06-29T10:07:44 | 133,127,628 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 495 |
py
|
#用来获取数据库中gridfs存储文件
from pymongo import MongoClient
#和pymongo绑定的
import gridfs
conn = MongoClient('localhost',27017)
db = conn.get_database('grid')
#获取gridfs对象
fs = gridfs.GridFS(db)
files = fs.find()
for file in files:
if file.filename =='./生日快乐歌.mp3':
with open(file.filename,'wb') as f:
while True:
#file.read()函数可以获取文件内容
data = file.read(64)
if not data:
break
f.write(data)
conn.close()
|
[
"[email protected]"
] | |
a783bdb2cbac71f57900c83b05288050df71ca1a
|
a161999b8a9009b6bf961288b68d651541882f2d
|
/process_news.py
|
e653f8d7622888988beeeccb4c26faee2e2b6d09
|
[] |
no_license
|
kkb-Projects/P1-news-summarization
|
788896460aa11712812a86eaf7c7c066c5028d0b
|
85122968d92b84741fd2fa8dbb81410e807c7eac
|
refs/heads/master
| 2021-01-09T14:39:09.941508 | 2020-03-19T02:44:17 | 2020-03-19T02:44:17 | 242,340,799 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,912 |
py
|
# -*- coding:utf8 -*-
# author:yaolinxia
# datetime:2020/3/11
# software: PyCharm
import random
import re
import pandas as pd
from collections import Counter
import jieba
from functools import reduce
"""
汉语新闻语料库处理
"""
def token(string):
# we will learn the regular expression next course.
return re.findall('\w+', string)
# 处理后的文本保存一下
def to_txt(articles_clean,outpath='news_articles.txt'):
with open(outpath, 'w') as f:
for a in articles_clean:
f.write(a + '\n')
# 分词
def cut(string):
return list(jieba.cut(string))
# 将token保存到dict在存储起来
def to_dict(Token, out_path='news_articles_dict.txt'):
line_dict = {}
with open(out_path, 'w') as f:
for i, line in enumerate(Token):
line_dict[i] = line
f.write(str(line_dict))
print(line_dict[2])
def seg2txt(Token, out_path='news_articles_cut.txt'):
with open(out_path, 'w') as f:
for line in Token:
f.write(line+' ')
# 计算词频
def seg2num(cut_txt):
c = Counter()
with open(cut_txt, 'r') as f:
for i in range(2):
for lines in f.readlines():
for l in lines.strip():
c[l] += 1
for (k, v) in c.most_common(2): # 输出词频最高的前两个词
print("%s:%d" % (k, v))
if __name__ == '__main__':
filename = 'data/sqlResult_1558435.csv'
wiki_file = "data/wiki_00"
wiki_out = "data/output/wiki_less.txt"
"""
outpath = 'news_articles.txt'
content = pd.read_csv(filename, encoding='gb18030')
articles = content['content'].tolist()
articles_clean = [''.join(token(str(a))) for a in articles]
Token = []
Token = cut(open(outpath).read())
print("Token", Token)
# to_dict(Token)
seg2txt(Token)
"""
seg2num("data/output/wiki_cut.txt")
|
[
"[email protected]"
] | |
46abac533c1ec9a572a565d59cc930bd692ad94d
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/ptp1b_input/L66/66-77_MD_NVT_rerun/set_7.py
|
9c5ad84b39e109861815092ca2f3a6a6735a91e4
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 741 |
py
|
import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L66/MD_NVT_rerun/ti_one-step/66_77/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_7.in'
temp_pbs = filesdir + 'temp_7.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_7.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_7.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"[email protected]"
] | |
ac242bd9428a8e8c909b8ceebdba6c1129a468c2
|
f2673cd07770dca1bc5017341e8293aebbfd66c7
|
/models/attention/encoders/pyramidal_blstm_encoder.py
|
fc78e88348e5a7cb29ead0d799c4e24a19c25a9f
|
[
"MIT"
] |
permissive
|
xiao2mo/tensorflow_end2end_speech_recognition
|
52d2c8d32b2f6e9f9f11dfaf8ddf434da16ff2ea
|
9b4bdcacd9d73c3db19205b74f4d48419584834d
|
refs/heads/master
| 2020-06-03T04:54:34.127500 | 2017-06-12T02:47:51 | 2017-06-12T02:47:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,634 |
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Pyramidal Bidirectional LSTM Encoder class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from .encoder_base import EncoderOutput, EncoderBase
class PyramidalBLSTMEncoder(EncoderBase):
"""Pyramidal Bidirectional LSTM Encoder.
Args:
num_units:
num_layer:
keep_prob_input:
keep_prob_hidden:
parameter_init:
clip_activation:
num_proj:
"""
def __init__(self,
num_units,
num_layer,
keep_prob_input=1.0,
keep_prob_hidden=1.0,
parameter_init=0.1,
clip_activation=50,
num_proj=None,
name='pblstm_encoder'):
EncoderBase.__init__(self, num_units, num_layer, keep_prob_input,
keep_prob_hidden, parameter_init, clip_activation,
num_proj, name)
def _build(self, inputs, inputs_seq_len):
"""Construct Pyramidal Bidirectional LSTM encoder.
Args:
inputs:
inputs_seq_len:
Returns:
EncoderOutput: A tuple of
`(outputs, final_state,
attention_values, attention_values_length)`
outputs:
final_state:
attention_values:
attention_values_length:
"""
self.inputs = inputs
self.inputs_seq_len = inputs_seq_len
raise NotImplementedError
|
[
"[email protected]"
] | |
152553eda650901c21d5c57c5c78ebcc75106dfa
|
0b16b44e4fc8c98c9ea3f9d4b8b470f4f62f918d
|
/Core/migrations/0002_auto_20201101_2120.py
|
26e849d4a31082849e63d44fac1fcb8360cb5f66
|
[] |
no_license
|
AthifSaheer/DipakNiroula-Django-Ecom
|
342eece90211fe80c41ba72bf69a50e63c5ea901
|
94ead608919c5bb076387e26f396e6c38319433e
|
refs/heads/main
| 2023-02-05T06:52:24.204206 | 2020-12-24T13:19:13 | 2020-12-24T13:19:13 | 324,160,212 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,039 |
py
|
# Generated by Django 2.2.14 on 2020-11-01 15:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Core', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='return_POlicy',
new_name='return_Policy',
),
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='admins')),
('mobile', models.CharField(max_length=20)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
009e53c59746e5e95ef1681b709b7a2b28c2339c
|
267aafa3826d216f70a0197369c334bc542aee40
|
/setup.py
|
a4b523deefdb6153c1331d6b30709c5c10b95b35
|
[] |
no_license
|
research-core/core-orders
|
7ccc199e6b89e6cd86affd4d8e5bab4fe845589b
|
37566b742b1423d30f9dc8e67641d828dc22e4a6
|
refs/heads/master
| 2020-06-29T02:37:00.250110 | 2019-08-26T17:10:48 | 2019-08-26T17:10:48 | 200,413,947 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,044 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
from setuptools import setup, find_packages
version, license = None, None
with open('orders/__init__.py', 'r') as fd:
content = fd.read()
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', content, re.MULTILINE).group(1)
license = re.search(r'^__license__\s*=\s*[\'"]([^\'"]*)[\'"]', content, re.MULTILINE).group(1)
if version is None: raise RuntimeError('Cannot find version information')
if license is None: raise RuntimeError('Cannot find license information')
with open('README.md', 'r') as fd:
long_description = fd.read()
setup(
name='core-orders',
version=version,
description='Research CORE ERM - orders module',
author='Ricardo Ribeiro, Hugo Cachitas',
author_email='[email protected], [email protected]',
url='https://github.com/research-core/core-orders',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
license=license,
)
|
[
"[email protected]"
] | |
a9682c31beb5aa6a6e2cacc7e42da087c161cd63
|
7ec04fc867d0a48fffc05c65bff9217cfe211fe7
|
/HW/统计字符串/teachers.py
|
f3e81a089bc6a999b09cf50c7dafa2466777ca3b
|
[] |
no_license
|
Cherry93/pythonPractic
|
3b9d1f99803503073bbb2f3a58009665338bd278
|
2889183af6c9a01ab47895b23e2d6ce8c288fd4d
|
refs/heads/master
| 2021-08-31T16:41:56.655989 | 2017-12-22T03:53:18 | 2017-12-22T03:53:18 | 115,008,198 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 892 |
py
|
'''
定义教师类,属性包括姓名、职称、工资,创建1000个实例,使用pickle写入teachers.dat并再次读出;
'''
import pickle
from tkinter import filedialog
class Teachers:
def __init__(self,name,call,rmb):
self.name =name
self.call = call
self.rmb = rmb
def __str__(self):
return "name:"+str(self.name)+"call:"+str(self.call)+"rmb:"+str(self.rmb)
c = Teachers("王小星","高级",1000)
#print(c)
def writeDemo():
global file
#print(c)
savePath = filedialog.asksaveasfilename()
file = open(savePath, mode="ab")
for i in range(10):
data = c
pickle.dump(data, file)
file.close()
writeDemo()
def readMode():
global file
print(data)
with open(filedialog.askopenfilename(), mode="rb") as file:
for i in range(10):
data = pickle.load(file)
print(data)
readMode()
|
[
"[email protected]"
] | |
eb3d54dc1db886b98008f3a576109aa33e101d6d
|
5e734cd4e071272688ab635243290936c5c2db40
|
/lib/paths.py
|
26971a871946a307647c399e9c700320a62ab114
|
[
"MIT"
] |
permissive
|
jwilk/i18nspector
|
a2a4aecee00de9cfb8d9a0354614f7413e19f1b9
|
d9762416937399b81abaedc9ddcdc36dbda1c318
|
refs/heads/master
| 2023-09-04T12:32:35.255101 | 2023-08-22T08:41:50 | 2023-08-22T08:41:50 | 29,258,684 | 2 | 3 |
MIT
| 2022-06-27T19:04:57 | 2015-01-14T18:22:23 |
Python
|
UTF-8
|
Python
| false | false | 1,388 |
py
|
# Copyright © 2013 Jakub Wilk <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
paths to code and data
'''
import os
basedir = os.path.normpath(os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'',
))
datadir = os.path.join(basedir, 'data', '')
def check():
os.stat(basedir)
os.stat(datadir)
# vim:ts=4 sts=4 sw=4 et
|
[
"[email protected]"
] | |
9b3d001951b24200fcdb3bd49fa67280cf2503c4
|
6659f860ddbb7550f66ea712753d3d2aab1cc6ff
|
/Note_3/Example_36.py
|
2671dcc8e106d4ba64273a5f63c1cda83dfc50f5
|
[] |
no_license
|
ianhom/Python-Noob
|
adf077bee78727eac43da2804a90528ace6c38a6
|
e12f0159d68d7c4962cafa3cb8b68a8761037f21
|
refs/heads/master
| 2020-12-08T12:06:01.909463 | 2018-07-03T00:42:41 | 2018-07-03T00:42:41 | 67,806,200 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 501 |
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
题目:求100之内的素数。
'''
lower = int(input("输入区间最小值: "))
upper = int(input("输入区间最大值: "))
for num in range(lower,upper + 1):
# 素数大于 1
if num > 1:
for i in range(2,num):
if (num % i) == 0:
break
else:
print(num)
# result
'''
输入区间最小值: 2
输入区间最大值: 78
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
'''
|
[
"[email protected]"
] | |
4581172461ca6e272ba66f94b453f7e3627ebeb2
|
e617affbb9292944465969a7f7a6a02b1c88f10a
|
/offer_algri/数组中出现次数超过一半的数字/p.py
|
2be851787656e28518166bb8ce3645d671b6563e
|
[] |
no_license
|
darr/offer_algri
|
92904d02c7bbd721aa47b4836f2190c3e9407f24
|
724fd689cfe7bd2f8aaed19ef912eecbf00a2df3
|
refs/heads/master
| 2020-03-25T04:18:40.491916 | 2018-09-07T08:52:39 | 2018-09-07T08:52:39 | 143,388,188 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 934 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#####################################
# File name : p.py
# Create date : 2018-07-23 08:49
# Modified date : 2018-07-23 13:04
# Author : DARREN
# Describe : not set
# Email : [email protected]
#####################################
class Solution:
#run:32ms memorry:5624k
def MoreThanHalfNum_Solution(self,numbers):
lenN = len(numbers)
if numbers == None or lenN <= 0:
return 0
num = numbers[0]
times =1
for i in range(1,lenN):
if times == 0:
num = numbers[i]
elif numbers[i] == num:
times +=1
else:
times -=1
count = 0
for i in range(lenN):
if numbers[i] == num:
count +=1
if count > lenN/2:
return num
return 0
|
[
"[email protected]"
] | |
6ee3ad7ed2666cd3c2c2e7bb9947e9d2975cadf8
|
ef243d91a1826b490e935fa3f3e6c29c3cc547d0
|
/PyQt5/QtSensors/QAltimeterFilter.py
|
7f6d16487b33ff0384829272d015bff8aad4003c
|
[] |
no_license
|
VentiFang/Python_local_module
|
6b3d0b22399e817057dfd15d647a14bb1e41980e
|
c44f55379eca2818b29732c2815480ee755ae3fb
|
refs/heads/master
| 2020-11-29T11:24:54.932967 | 2019-12-25T12:57:14 | 2019-12-25T12:57:14 | 230,101,875 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 668 |
py
|
# encoding: utf-8
# module PyQt5.QtSensors
# from F:\Python\Python36\lib\site-packages\PyQt5\QtSensors.pyd
# by generator 1.147
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import sip as __sip
from .QSensorFilter import QSensorFilter
class QAltimeterFilter(QSensorFilter):
"""
QAltimeterFilter()
QAltimeterFilter(QAltimeterFilter)
"""
def filter(self, QAltimeterReading): # real signature unknown; restored from __doc__
""" filter(self, QAltimeterReading) -> bool """
return False
def __init__(self, QAltimeterFilter=None): # real signature unknown; restored from __doc__ with multiple overloads
pass
|
[
"[email protected]"
] | |
76a9acaf06ed647f5329818ed4650ab73952cbb8
|
7246faf9a222269ce2612613f58dc5ff19091f10
|
/leetcode/1662.py
|
d793883d41ed3cb54e390d971c41a4c5ca4f7ffd
|
[] |
no_license
|
gusdn3477/Algorithm_Study
|
87a2eb72a8488d9263a86db70dadc7944434d41d
|
3fefe1dcb40122157845ffc542f41cb097711cc8
|
refs/heads/main
| 2023-08-30T12:18:21.412945 | 2021-09-28T13:00:11 | 2021-09-28T13:00:11 | 308,364,230 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 236 |
py
|
class Solution:
def arrayStringsAreEqual(self, word1: List[str], word2: List[str]) -> bool:
a = ''
b = ''
for i in word1:
a += i
for i in word2:
b += i
return a == b
|
[
"[email protected]"
] | |
1f0ba2eb90839c85462d5f63334dbc88a90db375
|
1d672c52ada009c6aeeafec6caeae0adf064060d
|
/docs/source/conf.py
|
a97a5551e3ae8bef36af79a0972f0eb8404b6190
|
[
"BSD-3-Clause"
] |
permissive
|
sakshiseth/fury
|
9927487aaf5dd1b2dc0db5cd31facdb4743f86dd
|
5799e445a5a306852a674396803bbefa922f0ae6
|
refs/heads/master
| 2021-01-13T20:18:49.848717 | 2020-02-22T20:54:59 | 2020-02-22T20:54:59 | 242,483,253 | 0 | 1 |
NOASSERTION
| 2020-02-23T08:39:05 | 2020-02-23T08:39:04 | null |
UTF-8
|
Python
| false | false | 7,705 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# FURY documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 28 12:35:56 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import re
import sys
from datetime import datetime
# Add current path
sys.path.insert(0, os.path.abspath('.'))
# Add doc in path for finding tutorial and examples
sys.path.insert(0, os.path.abspath('../..'))
# Add custom extensions
sys.path.insert(0, os.path.abspath('./ext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '2.1'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'matplotlib.sphinxext.plot_directive',
'numpydoc',
'sphinx_copybutton',
'sphinx_gallery.gen_gallery',
'ext.build_modref_templates',
'ext.github',
'ext.github_tools',
'ext.rstjinja'
]
# Configuration options for plot_directive. See:
# https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81
plot_html_show_source_link = False
plot_html_show_formats = False
# Generate the API documentation when building
autosummary_generate = []
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'FURY'
copyright = '2010-{0}, FURY'.format(datetime.now().year)
author = 'FURY'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import fury
# The short X.Y version.
version = fury.__version__
# The full version, including alpha/beta/rc tags.
release = fury.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'versions.html',
]
}
# ghissue config
github_project_url = "https://github.com/fury-gl/fury"
import github_tools as ght
all_versions = ght.get_all_versions(ignore='micro')
html_context = {'all_versions': all_versions,
'versions_list': ['dev', 'latest'] + all_versions,
'basic_stats': ght.fetch_basic_stats(),
'contributors': ght.fetch_contributor_stats(),
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'fury'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fury.tex', 'FURY Documentation',
'Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fury', 'FURY Documentation',
[author], 1)
]
# -- Options for sphinx gallery -------------------------------------------
from scrap import ImageFileScraper
sc = ImageFileScraper()
sphinx_gallery_conf = {
'doc_module': ('fury',),
# path to your examples scripts
'examples_dirs': ['../examples', '../tutorials'],
# path where to save gallery generated examples
'gallery_dirs': ['auto_examples', 'auto_tutorials'],
'image_scrapers': (sc),
'backreferences_dir': 'api',
'reference_url': {'fury': None, },
'filename_pattern': re.escape(os.sep)
}
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'fury', 'FURY Documentation',
author, 'fury', 'Free Unified Rendering in Python',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'matplotlib': ('https://matplotlib.org', None),
'dipy': ('https://dipy.org/documentation/latest',
'https://dipy.org/documentation/1.0.0./objects.inv/'),
}
|
[
"[email protected]"
] | |
4513165496d6f2e83579ac9cf0684f88a705068e
|
d020606f5e9174aa669e4b6b316bdb0fcb05ce02
|
/run_test.py
|
dc79693a586c6b2f47af9c3cd513684781ca785c
|
[] |
no_license
|
Hanlen520/AutomationProject
|
4c1270fba570b256493cd6681d715e0b5136a4f5
|
95a7cb61d8b339a6409483d738de5a0d9d85b321
|
refs/heads/master
| 2023-04-02T20:23:07.696753 | 2021-04-07T07:57:04 | 2021-04-07T07:57:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,342 |
py
|
# coding = utf8
import logging
import multiprocessing
import subprocess
import pytest
from airtest.core.api import *
from poco.drivers.android.uiautomation import AndroidUiautomationPoco
from config import install_app_necessary, SERIAL_NUMBER
from page.fota.fota_page import Fota_Page
from page.main_page import Main_Page
from page.system.system import System
from toolsbar.common import test_device
from toolsbar.permissionGrant import grant_permission
os.path.abspath(".")
# 过滤airtest log只打印ERROR的Log
logger_airtest = logging.getLogger("airtest")
logger_airtest.setLevel(logging.ERROR)
cur_time = time.strftime("%Y%m%d_%H%M%S")
"""
@File:run_test.py
@Author:Bruce
@Date:2020/12/15
@Description:项目运行函数,存放测试和调试函数
"""
"""
单个设备poco、device不需要初始化
多个设备poco、device都需要创建新对象poco_item
后续将poco_item传入使用即可,airtest相关api,使用对应device_item进行调用
case不需要重复写
UI 进程和底部进程不要在同一个进程中容易出问题
"""
# 多机测试进程池:兼容单机和多机运行
"""
@description:多进程创建进行多台设备测试
@tip:
Pycharm调用adb缺陷,需要使用terminal输入charm来启动pycharm,以获得dash权限
执行case前,手动将pocoservice.apk的contniue安装好并将授权界面点掉,防止后续错误发生
"""
def start_test():
print("当前设备数量:" + str(len(SERIAL_NUMBER)))
if len(SERIAL_NUMBER) > 1:
for i in test_device:
install_app_necessary(i)
grant_permission(i)
else:
install_app_necessary(test_device)
grant_permission(test_device)
test_pool = multiprocessing.Pool(len(SERIAL_NUMBER))
for device_ in SERIAL_NUMBER:
test_pool.apply_async(func=fota_test_area, args=(device_,))
sleep(10)
test_pool.close()
test_pool.join()
"""
@description:Fota checklist测试函数执行区域
@param:
device_:设备序列号
"""
def fota_test_area(device_):
pytest.main(["-v", "-s", "--cmdopt={}".format(device_), "{}".format("./test_case/test_before_fota.py"),
"--reruns={}".format(1),
"--alluredir={}".format("./temp/need_data[{}_{}]/".format(cur_time, device_))])
# 设置差异化
subprocess.Popen(
args=["allure", "generate", "./temp/need_data[{}_{}]/".format(cur_time, device_), "-o",
"./report/test_report[{}_{}]/".format(cur_time, device_),
"--clean"],
shell=False).communicate()[0]
updatesw(device_)
# subprocess.Popen(
# "allure generate ./temp/need_data[{}_{}] -o ./report/test_report[{}_{}]/ --clean".format(cur_time, device_,
# cur_time, device_),
# shell=True).communicate()[0]
"""
@description:Fota checklist测试软件升级函数执行区域
@param:
device_:设备序列号
"""
def updatesw(device_):
print("开始新版本升级")
try:
device_c = connect_device("Android:///{}".format(device_))
poco = AndroidUiautomationPoco(device=device_c, use_airtest_input=False,
screenshot_each_action=False)
main_page = Main_Page(device_c, poco)
system = System(main_page)
system.unlock_screen()
fota_page = Fota_Page(main_page)
fota_page.start_fota_page()
fota_page.skip_guide()
fota_page.updatesw()
print("升级结果:" + str(fota_page.check_update_result(device_)))
print("Fota升级测试结束")
except Exception as ex:
print(str(ex))
"""
@description:Fota checklist测试函数区域
"""
def fota_checklist_test_module():
start_test()
"""
@description:main函数,主要运行函数
"""
if __name__ == '__main__':
print("脚本开始测试,Fota checklist模块测试正在运行中……")
for i in range(5):
print("这是第{}次测试该脚本".format(i))
fota_checklist_test_module()
print("This is {} times running and time is {}".format(str(i), time.strftime("%Y%m%d_%H%M%S")))
print("脚本测试结束,请检查测试结果")
|
[
"[email protected]"
] | |
eb525e2ac4b98dac4261e2d6857bca7619fda42c
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_disgraced.py
|
e19865eec3d6c3da4393171f3fa501464fae3db9
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
from xai.brain.wordbase.verbs._disgrace import _DISGRACE
#calss header
class _DISGRACED(_DISGRACE, ):
def __init__(self,):
_DISGRACE.__init__(self)
self.name = "DISGRACED"
self.specie = 'verbs'
self.basic = "disgrace"
self.jsondata = {}
|
[
"[email protected]"
] | |
03cf906edb96cb427cd37f0de2a53228c70ea321
|
2bcf18252fa9144ece3e824834ac0e117ad0bdf3
|
/zpt/trunk/site-packages/zpt/_pytz/zoneinfo/Africa/Asmera.py
|
9ccd9c3141892ef8d1d76acc48c773f5c5c4c4cf
|
[
"MIT",
"ZPL-2.1"
] |
permissive
|
chadwhitacre/public
|
32f65ba8e35d38c69ed4d0edd333283a239c5e1d
|
0c67fd7ec8bce1d8c56c7ff3506f31a99362b502
|
refs/heads/master
| 2021-05-10T14:32:03.016683 | 2010-05-13T18:24:20 | 2010-05-13T18:24:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 498 |
py
|
'''tzinfo timezone information for Africa/Asmera.'''
from zpt._pytz.tzinfo import DstTzInfo
from zpt._pytz.tzinfo import memorized_datetime as d
from zpt._pytz.tzinfo import memorized_ttinfo as i
class Asmera(DstTzInfo):
'''Africa/Asmera timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Asmera'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1936,5,4,21,24,40),
]
_transition_info = [
i(9300,0,'ADMT'),
i(10800,0,'EAT'),
]
Asmera = Asmera()
|
[
"[email protected]"
] | |
2736cd03881b87e222ecd21b6b92c7e5445f98f5
|
31d5bebb9667b17a17fe98a5c3064cac5a0fd4dd
|
/calisanProfil/urls.py
|
219a0406652a00ff45e0ff330c77ec07ab045d24
|
[] |
no_license
|
refik/audio
|
d6b8829fafcfa2c54e6f477ceede210a2f5d4f41
|
011b7b0f01d14529b91bf6f4d3c5919823e19e6b
|
refs/heads/master
| 2021-01-17T17:06:06.202561 | 2015-09-17T05:38:22 | 2015-09-17T05:38:22 | 1,948,617 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 138 |
py
|
from django.conf.urls.defaults import *
urlpatterns = patterns('audio.calisanProfil.views',
(r'^musteri-temsilcisi/', 'temsilci'),
)
|
[
"[email protected]"
] | |
0d74495bd1cc1679a451768d66fda5ef8194d179
|
ce4f7f8e9336b8bbf9cbfe147d922e37034ab6c3
|
/code-festival-2016-qualc/b/main.py
|
987ac90bc403547651d6d2456180210a150a8701
|
[] |
no_license
|
kussy-tessy/atcoder
|
5604919747242ee9740b9131bb6e168e96af0151
|
ee917fa5a5218d4a9e72f710d0d844e7c203f13b
|
refs/heads/master
| 2023-07-21T09:25:15.464881 | 2021-09-04T14:06:02 | 2021-09-04T14:06:02 | 311,221,203 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 241 |
py
|
#!/usr/bin/env python3
K, T = map(int,(input().split()))
As = list(map(int,(input().split())))
# if len(As) == 1:
# print(As[0] - 1)
As.sort()
As_max = As[-1]
As_other = sum(As[:-1])
print(max(0, As_max - As_other - 1))
|
[
"[email protected]"
] | |
7c562bd59160cfb964891453b9d4a28be9ae4772
|
c6b74df572dc2dcf7034c25860d18cb3c9143d4f
|
/linearizer/generative.py
|
ec00a10615d362bf6ab18a3138b457f78f83330b
|
[] |
no_license
|
kadarakos/LinearAMR
|
55f2dfedb5d100988be5511004be690e2808ad17
|
d8408924171960e84c92cfe46bb531b3b3ee17e0
|
refs/heads/master
| 2021-01-19T23:19:41.226183 | 2017-04-21T11:29:02 | 2017-04-21T11:29:02 | 88,961,862 | 0 | 0 | null | 2017-04-21T08:37:38 | 2017-04-21T08:37:38 | null |
UTF-8
|
Python
| false | false | 3,595 |
py
|
__author__ = 'thiagocastroferreira'
from sys import path
path.append('/home/tcastrof/amr/scp_repo')
path.append('/home/tcastrof/amr/Grammar')
path.append('../')
from compression_tree.compressor import Compressor
from ERG import AMR
import kenlm
import os
import utils
import itertools
class Generative(object):
def __init__(self, lm_path):
self.model = kenlm.Model(lm_path)
self.compressor = compressor
def process(self, amr):
self.amr = amr
return self.linearize(self.amr.root)
def ranking(self, base):
candidates = []
for candidate in itertools.permutations(base):
snt = []
for e in candidate:
for span in e.split():
snt.extend(span.split('~'))
snt = ' '.join(snt)
score = self.model.score(snt)
candidates.append((' '.join(candidate), score))
return sorted(candidates, key=lambda x: x[1], reverse=True)
def linearize(self, root):
linear = []
for edge in self.amr.edges[root]:
linear_child = self.linearize(edge.node_id)
if linear_child.strip() != '':
if edge.status == '+':
linear_child = edge.name + '~' + linear_child
linear.append(linear_child)
status = self.amr.nodes[root].status
name = self.amr.nodes[root].name
if 0 < len(linear) <= 9:
if status == '+':
linear.append(name)
rank = self.ranking(linear)
return rank[0][0]
elif len(linear) > 9:
if status == '+':
linear.insert(len(linear)-1, name)
return ' '.join(linear)
else:
if status == '+':
return name
else:
return ''
if __name__ == '__main__':
CLF_NODE_PATH = '../compression/results/clf_node.cPickle'
CLF_EDGE_PATH = '../compression/results/clf_edge.cPickle'
EDGE_PATH = '../compression/validation/edge_feat.cPickle'
EDGE_PARENT_PATH = '../compression/validation/edge_parent_feat.cPickle'
EDGE_CHILD_PATH = '../compression/validation/edge_child_feat.cPickle'
NODE_PATH = '../compression/validation/node_feat.cPickle'
NODE_PARENT_PATH = '../compression/validation/node_parent_feat.cPickle'
LM_PATH = 'lm/6gram.arpa'
compressor = Compressor(clf_node_path=CLF_NODE_PATH,
clf_edge_path=CLF_EDGE_PATH,
edge_path=EDGE_PATH,
edge_parent_path=EDGE_PARENT_PATH,
edge_child_path=EDGE_CHILD_PATH,
node_path=NODE_PATH,
node_parent_path=NODE_PARENT_PATH)
linearizer = Generative(lm_path=LM_PATH)
amrs_path = '../data/LDC2016E25/data/amrs/split/test'
amrs = []
for fname in os.listdir(amrs_path):
f = os.path.join(amrs_path, fname)
amrs.extend(utils.parse_corpus(f, False))
linears = []
for amr in amrs:
print amr['sentence']
linear = linearizer.process(amr['amr'].lower())
final = []
for l in linear.split():
final.extend(l.split('~'))
linears.append(' '.join(final))
de = open('../data/LDC2016E25/corpus/test.gen', 'w')
# en = open('../data/LDC2016E25/corpus/dev.lex', 'w')
for i, linear in enumerate(linears):
de.write(linear)
de.write('\n')
# en.write(amrs[i]['sentence'].lower())
# en.write('\n')
de.close()
# en.close()
|
[
"[email protected]"
] | |
4a0570c65c81d3d58ef799132c1206c6d01be707
|
bcf88b912b9443c3326466c226f68a7e7ad5aa9d
|
/bdbag/__init__.py
|
ab5519ea26b97ecb75b741254c95bea69f7adaf3
|
[
"Apache-2.0"
] |
permissive
|
mvdbeek/bdbag
|
33bc7e0275c720104af77654b0016024cb6ab012
|
fe67b5bffc68b7dac823ce03d450ede3affccbef
|
refs/heads/master
| 2020-03-25T05:17:09.646537 | 2018-07-12T03:58:06 | 2018-07-12T03:58:06 | 143,438,809 | 0 | 0 | null | 2018-08-03T14:42:27 | 2018-08-03T14:42:27 | null |
UTF-8
|
Python
| false | false | 6,188 |
py
|
import os
import re
import sys
import json
import logging
import mimetypes
from requests.utils import requote_uri
from pkg_resources import get_distribution, DistributionNotFound
__version__ = "1.4.2"
if sys.version_info > (3,):
from urllib.parse import quote as urlquote, unquote as urlunquote, urlsplit, urlunsplit
from urllib.request import urlretrieve, urlopen
else:
from urllib import quote as urlquote, unquote as urlunquote, urlretrieve, urlopen
from urlparse import urlsplit, urlunsplit
try:
VERSION = get_distribution("bdbag").version
except DistributionNotFound:
VERSION = __version__ + '-dev'
PROJECT_URL = 'https://github.com/fair-research/bdbag'
try:
BAGIT_VERSION = get_distribution("bagit").version
except DistributionNotFound:
BAGIT_VERSION = 'unknown'
BAG_PROFILE_TAG = 'BagIt-Profile-Identifier'
BDBAG_PROFILE_ID = 'https://raw.githubusercontent.com/fair-research/bdbag/master/profiles/bdbag-profile.json'
BDBAG_RO_PROFILE_ID = 'https://raw.githubusercontent.com/fair-research/bdbag/master/profiles/bdbag-ro-profile.json'
ID_RESOLVER_TAG = 'identifier_resolvers'
DEFAULT_ID_RESOLVERS = ['n2t.net', 'identifiers.org']
DEFAULT_CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.bdbag')
DEFAULT_CONFIG_FILE = os.path.join(DEFAULT_CONFIG_PATH, 'bdbag.json')
DEFAULT_CONFIG = {
'bag_config':
{
'bag_algorithms': ['md5', 'sha256'],
'bag_processes': 1,
'bag_metadata':
{
BAG_PROFILE_TAG: BDBAG_PROFILE_ID
}
},
ID_RESOLVER_TAG: DEFAULT_ID_RESOLVERS
}
CONTENT_DISP_REGEX = re.compile(r"^filename[*]=UTF-8''(?P<name>[-_.~A-Za-z0-9%]+)$")
FILTER_REGEX = re.compile(r"(?P<column>^.*)(?P<operator>==|!=|=\*|!\*|\^\*|\$\*|>=|>|<=|<)(?P<value>.*$)")
FILTER_DOCSTRING = "\"==\" (equal), " \
"\"!=\" (not equal), " \
"\"=*\" (wildcard substring equal), " \
"\"!*\" (wildcard substring not equal), " \
"\"^*\" (wildcard starts with), " \
"\"$*\" (wildcard ends with), " \
"or \">\", \">=\", \"<\", \"<=\""
if not mimetypes.inited:
mimetypes.init()
def get_typed_exception(e):
exc = "".join(("[", type(e).__name__, "] "))
return "".join((exc, str(e)))
def add_mime_types(types):
if not types:
return
for t in types.keys():
for e in types[t]:
mimetypes.add_type(type=t, ext=e if e.startswith(".") else "".join([".", e]))
def guess_mime_type(file_path):
mtype = mimetypes.guess_type(file_path)
content_type = 'application/octet-stream'
if mtype[0] is not None and mtype[1] is not None:
content_type = "+".join([mtype[0], mtype[1]])
elif mtype[0] is not None:
content_type = mtype[0]
elif mtype[1] is not None:
content_type = mtype[1]
return content_type
def parse_content_disposition(value):
m = CONTENT_DISP_REGEX.match(value)
if not m:
raise ValueError('Cannot parse content-disposition "%s".' % value)
n = m.groupdict()['name']
try:
n = urlunquote(str(n))
except Exception as e:
raise ValueError('Invalid URL encoding of content-disposition filename component. %s.' % e)
try:
if sys.version_info < (3,):
n = n.decode('utf8')
except Exception as e:
raise ValueError('Invalid UTF-8 encoding of content-disposition filename component. %s.' % e)
return n
def escape_uri(uri, illegal_only=True, safe="/"):
if not uri:
return uri
if illegal_only:
return requote_uri(uri)
else:
urlparts = urlsplit(uri)
path = urlquote(urlunquote(urlparts.path), safe=safe)
query = urlquote(urlunquote(urlparts.query), safe=safe)
fragment = urlquote(urlunquote(urlparts.fragment), safe=safe)
return urlunsplit((urlparts.scheme, urlparts.netloc, path, query, fragment))
def filter_dict(expr, entry):
if not expr:
return True
match = FILTER_REGEX.search(expr)
if not match:
raise ValueError("Unable to parse expression: %s" % expr)
expr_dict = match.groupdict()
filter_col = expr_dict["column"]
filter_val = expr_dict["value"]
operator = expr_dict["operator"]
filter_neg = filter_substring = filter_relation = filter_startswith = filter_endswith = False
if "==" == operator:
pass
elif "!=" == operator:
filter_neg = True
elif "=*" == operator:
filter_substring = True
elif "^*" == operator:
filter_startswith = True
elif "$*" == operator:
filter_endswith = True
elif "!*" == operator:
filter_substring = True
filter_neg = True
elif (">" == operator) or (">=" == operator) or ("<" == operator) or ("<=" == operator):
filter_relation = True
else:
raise ValueError("Unsupported operator type in filter expression: %s" % expr)
result = False
filter_val = filter_val.strip()
filter_col = filter_col.strip()
if filter_col in set(entry.keys()):
value = entry[filter_col]
if filter_neg:
if filter_substring:
result = filter_val not in str(value)
else:
result = filter_val != value
else:
if filter_substring:
result = filter_val in str(value)
elif filter_startswith:
result = str(value).startswith(filter_val)
elif filter_endswith:
result = str(value).endswith(filter_val)
elif filter_relation:
try:
statement = "%d%s%d" % (int(value), operator, int(filter_val))
result = eval(statement)
except Exception as e:
logging.warning("Unable to evaluate filter expression [%s]: %s" %
(expr, get_typed_exception(e)))
else:
result = filter_val == value
if not result:
logging.debug(
"Excluding %s because it does not match the filter expression: [%s]." %
(json.dumps(entry), expr))
return result
|
[
"[email protected]"
] | |
ca893e5aeee0c7456739c4457ae664105c5c96c6
|
46c3fd904e7b1c45541ffe0518afe50dfdafb089
|
/movie/migrations/0003_movielink_link.py
|
84dc7889b0f52b99ff6733291f6811344b4d8db2
|
[] |
no_license
|
Shirhussain/Movies
|
6ab10b27748bc1cdd3a904861092e5246ce01190
|
4f6639491a86708a5d04a8de7f928500ecba3fdc
|
refs/heads/master
| 2023-01-01T07:52:25.639564 | 2020-10-26T02:15:23 | 2020-10-26T02:15:23 | 306,643,126 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 413 |
py
|
# Generated by Django 3.1 on 2020-10-24 17:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movie', '0002_auto_20201024_1659'),
]
operations = [
migrations.AddField(
model_name='movielink',
name='link',
field=models.URLField(default=''),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
47a724810b4e9c7cfd2870858a2472067fe6ff19
|
1cceef957954ec516cd8bcd9e9d51e8d9120200b
|
/test_retring_async.py
|
1c13088f03af4ed39ea5ab6e8ea213792c02dbe7
|
[
"MIT"
] |
permissive
|
coneagoe/retrying-async
|
3b8c4a51a7adcbaa2149b110199e6d0b6b5a1f7e
|
54eec24e4183b4ea31c0e133ed11ec0f0535a194
|
refs/heads/master
| 2022-12-21T05:12:17.930689 | 2020-09-21T02:38:42 | 2020-09-21T02:38:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 610 |
py
|
# coding: utf-8
import asyncio
import requests
from retrying_async import retry
def request_api_sync():
print('正在获取')
response = requests.get(url="http://www.baidu.com")
print(response.status_code, response.content)
raise Exception("异常")
@retry(attempts=3, delay=3)
async def request_api_async():
print('正在获取')
response = requests.get(url="http://www.baidu.com")
print(response.status_code, response.content)
raise Exception("异常")
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(request_api_async())
|
[
"[email protected]"
] | |
d751ba839e41585536769b62bfa2c50a150fb12d
|
6559d2c69ddcd73df844f9e26470c8ea06d92a6c
|
/xnr_0429/xnr/_facebook/feedback_comment.py
|
550d853c6fbd9b7769168390aeafe3c05e801dbe
|
[] |
no_license
|
yuanhuiru/xnr2
|
cc4199fbb136fa5bdf18d879bb77ceb5155627f3
|
b37ec9beccf7332efcda9bdff0c34fa3198b816c
|
refs/heads/master
| 2020-03-21T12:22:17.392966 | 2020-01-14T06:40:55 | 2020-01-14T06:40:55 | 138,549,389 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,505 |
py
|
#!/usr/bin/env python
#encoding: utf-8
from launcher import Launcher
import time
from es import Es_fb
class Comment():
def __init__(self):
self.list = []
def get_comment(self):
for url in comment_list:
driver.get(url)
root_content = driver.find_element_by_xpath('//div[@class="_58jw"]/p').text
root_time = driver.find_element_by_xpath('//abbr[@class="_5ptz"]').get_attribute('data-utime')
for each in driver.find_elements_by_xpath('//div[@aria-label="评论"]'):
author_name = each.find_element_by_xpath('./div/div/div/div[2]/div/div/div/span/span[1]/a').text
author_id = ''.join(re.findall(re.compile('id=(\d+)'),each.find_element_by_xpath('./div/div/div/div[2]/div/div/div/span/span[1]/a').get_attribute('data-hovercard')))
pic_url = each.find_element_by_xpath('./div/div/div/div[1]/a/img').get_attribute('src')
content = each.find_element_by_xpath('./div/div/div/div[2]/div/div/div/span/span[2]/span/span/span/span').text
time = each.find_element_by_xpath('./div/div/div/div[2]/div/div/div[2]/span[4]/a/abbr').get_attribute('data-utime')
self.list.append({'author_name':author_name,'author_id':author_id,'pic_url':pic_url,'content':content,'time':time})
return self.list
def save(self,indexName,typeName,item):
es.executeES(indexName,typeName,item)
if __name__ == '__main__':
fb = Launcher('18538728360','zyxing,0513')
es = es_twitter()
comment_list = fb.get_comment_list()
comment = Comment()
list = comment.get_comment()
comment.save(list)
|
[
"[email protected]"
] | |
1aaafa9b5403e7331b1d730439c5a8e67fa3debb
|
d1e4f29e583ee964d63bc48554eaa73d67d58eb2
|
/zerver/migrations/0264_migrate_is_announcement_only.py
|
073eb22a23670741fdc4d7155701549b168dfc77
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
hygolei/zulip
|
299f636f9238f50b0d2746f1c371748f182f1f4e
|
39fe66ab0824bc439929debeb9883c3046c6ed70
|
refs/heads/master
| 2023-07-11T22:50:27.434398 | 2021-08-09T10:07:35 | 2021-08-09T10:07:35 | 375,401,165 | 1 | 1 |
Apache-2.0
| 2021-08-09T10:07:36 | 2021-06-09T15:20:09 |
Python
|
UTF-8
|
Python
| false | false | 972 |
py
|
# Generated by Django 1.11.26 on 2020-01-25 23:47
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def upgrade_stream_post_policy(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Stream = apps.get_model("zerver", "Stream")
Stream.STREAM_POST_POLICY_EVERYONE = 1
Stream.STREAM_POST_POLICY_ADMINS = 2
Stream.objects.filter(is_announcement_only=False).update(
stream_post_policy=Stream.STREAM_POST_POLICY_EVERYONE
)
Stream.objects.filter(is_announcement_only=True).update(
stream_post_policy=Stream.STREAM_POST_POLICY_ADMINS
)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0263_stream_stream_post_policy"),
]
operations = [
migrations.RunPython(
upgrade_stream_post_policy, reverse_code=migrations.RunPython.noop, elidable=True
),
]
|
[
"[email protected]"
] | |
9f0fe44398ecdc7bda9c8cb213e2256c43819598
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/380/usersdata/334/90931/submittedfiles/testes.py
|
12f71f0bb7b483e5f7d1e3b92c7403e72b738f64
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 145 |
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
def maximo (a,b):
if a>b:
return a
else:
return b
x=input()
y=input()
print(maximo(a,b)
|
[
"[email protected]"
] | |
1fd6f92f79cd265af470244e0460ad9853def643
|
e18a353582609732c795401f1a01bc762bd939f2
|
/top/python/MuonTracking.RunII.py
|
9f1a8ca4dcada005ae643bc5e39eb41edab8c6d8
|
[] |
no_license
|
s-farry/workspaces
|
06741807bb464bb0712d52108c2d1b7ae62b1353
|
0dcf3868dcbe110206ea88ff5c9e04a3b44b1ca1
|
refs/heads/master
| 2020-04-03T00:45:39.152227 | 2017-06-15T16:33:33 | 2017-06-15T16:33:33 | 64,213,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,463 |
py
|
from Jawa import EfficiencyClass
from ROOT import TFile, TCut, TTree, TMath
phicut= TCut("(abs(tag_PHI-probe_PHI)<TMath::Pi() ? abs(tag_PHI-probe_PHI) : 2*TMath::Pi()-abs(tag_PHI-probe_PHI))>0.1")
ptcut = TCut("tag_PT > 20000 && probe_PT > 20000")
triggercut = TCut("tag_Hlt2EWSingleMuonVHighPtDecision_TOS==1 && tag_Hlt1SingleMuonHighPTDecision_TOS == 1 && tag_L0MuonEWDecision_TOS ==1")
run1triggercut = TCut("tag_Hlt2SingleMuonHighPTDecision_TOS==1 && tag_Hlt1SingleMuonHighPTDecision_TOS == 1 && tag_L0MuonDecision_TOS ==1")
trkqual = TCut("(sqrt(tag_PERR2)/tag_P) < 0.1")
eta = TCut("tag_ETA > 2 && tag_ETA < 4.5 && probe_ETA > 2 && probe_ETA < 4.5")
vtxcut = TCut("boson_ENDVERTEX_CHI2/boson_ENDVERTEX_NDOF < 5")
isocut = TCut("tag_cpt_0.50 < 2000")
pt25 = TCut("probe_PT > 25000")
pt30 = TCut("probe_PT > 30000")
passcut = TCut("probe_AssocZM == 1")
passcutW = TCut("probe_AssocWM == 1")
passcutStd = TCut("probe_AssocStdM == 1")
mass = TCut("boson_M > 70000 && boson_M < 110000")
selcut = ptcut + phicut + triggercut + vtxcut + eta + mass
f = TFile.Open('root://hepgrid11.ph.liv.ac.uk///dpm/ph.liv.ac.uk/home/lhcb/Run2Effs/MuonTracking_WLine.MD.2016.root')
g = TFile.Open('root://hepgrid11.ph.liv.ac.uk///dpm/ph.liv.ac.uk/home/lhcb/Run2Effs/MuonTracking_WLine.MU.2016.root')
h = TFile.Open('root://hepgrid11.ph.liv.ac.uk///dpm/ph.liv.ac.uk/home/lhcb/Run2Effs/MuonTracking_WLine.MD.2015.root')
i = TFile.Open('root://hepgrid11.ph.liv.ac.uk///dpm/ph.liv.ac.uk/home/lhcb/Run2Effs/MuonTracking_WLine.MU.2015.root')
t = f.Get("PlusTag/DecayTree")
u = f.Get("MinusTag/DecayTree")
v = g.Get("PlusTag/DecayTree")
w = g.Get("MinusTag/DecayTree")
tt = h.Get("PlusTag/DecayTree")
uu = h.Get("MinusTag/DecayTree")
vv = i.Get("PlusTag/DecayTree")
ww = i.Get("MinusTag/DecayTree")
magup = TCut("Polarity == 1")
magdown = TCut("Polarity == -1")
selcutMU = selcut + magup
selcutMD = selcut + magdown
'''
etabins = [2.0 , 2.25 , 2.5 , 2.75 , 3.00 , 3.25 , 3.5 , 4.0 , 4.5]
etabins2 = [2.0 , 2.25 , 2.5 , 2.75 , 2.875, 3.00 , 3.1225, 3.25 , 3.375, 3.5 , 4.0 , 4.5]
tckbins = [3500000.0, 4600000.0, 4800000.0, 5700000.0, 5900000.0, 6000000.0, 7100000.0, 7300000.0, 7400000.0,
7500000.0, 7600000.0, 7700000.0, 7900000.0, 7929912.0, 8000000.0]
effvars = [
["ETA", "probe_ETA", 10 , 2 , 4.5 ],
["ETA5", "probe_ETA", 5 , 2 , 4.5 ],
["ETA8", "probe_ETA", etabins ],
["PT", "probe_PT", 10 , 20000 , 70000],
["PT5", "probe_PT", 5 , 20000 , 70000],
["P", "probe_P", 8 , 100000 , 500000],
["PHI", "probe_PHI", 10 , -TMath.Pi() , TMath.Pi()],
["PHI5", "probe_PHI", 5 , -TMath.Pi() , TMath.Pi()],
["VeloClusters", "nVeloClusters", 8 , 0 , 4000 , "I"],
["ITClusters", "nITClusters", 8 , 0 , 2000 , "I"],
["PVs", "nPVs", 6 , -0.5 , 5.5 , "I"],
["TCK", "OdinTCK", tckbins, "I"],
["SPDHits", "nSPDHits", 20 , 0 , 1000, "I"]
]
eff2dvars = [
["ETA_PHI", "ETA5","PHI5"],
["ETA_PT" , "ETA5","PT5"]
]
'''
from effbins_config import *
def makeMuonTrackingRunII(name, selcut, passcut):
MuonTrackingRunIIMagUpMuPlus = EfficiencyClass("Muon"+name+"TrackingRunIIMagUpMuPlus")
MuonTrackingRunIIMagDownMuPlus = EfficiencyClass("Muon"+name+"TrackingRunIIMagDownMuPlus")
MuonTrackingRunIIMagUpMuMinus = EfficiencyClass("Muon"+name+"TrackingRunIIMagUpMuMinus")
MuonTrackingRunIIMagDownMuMinus = EfficiencyClass("Muon"+name+"TrackingRunIIMagDownMuMinus")
MuonTrackingRunIIMagUpMuMinus.AddTree(v)
MuonTrackingRunIIMagUpMuMinus.AddTree(vv)
MuonTrackingRunIIMagUpMuMinus.SetSelectionCut(selcut + magup)
MuonTrackingRunIIMagUpMuMinus.SetPassCut(passcut)
MuonTrackingRunIIMagUpMuMinus.AddVars(effvars + trkeffvars)
MuonTrackingRunIIMagUpMuMinus.Add2DVars(trk2dvars)
MuonTrackingRunIIMagUpMuMinus.Run()
MuonTrackingRunIIMagUpMuMinus.SaveToFile()
MuonTrackingRunIIMagUpMuPlus.AddTree(w)
MuonTrackingRunIIMagUpMuPlus.AddTree(ww)
MuonTrackingRunIIMagUpMuPlus.SetSelectionCut(selcut + magup)
MuonTrackingRunIIMagUpMuPlus.SetPassCut(passcut)
MuonTrackingRunIIMagUpMuPlus.AddVars(effvars + trkeffvars)
MuonTrackingRunIIMagUpMuPlus.Add2DVars(trk2dvars)
MuonTrackingRunIIMagUpMuPlus.Run()
MuonTrackingRunIIMagUpMuPlus.SaveToFile()
MuonTrackingRunIIMagDownMuMinus.AddTree(t)
MuonTrackingRunIIMagDownMuMinus.AddTree(tt)
MuonTrackingRunIIMagDownMuMinus.SetSelectionCut(selcut + magdown)
MuonTrackingRunIIMagDownMuMinus.SetPassCut(passcut)
MuonTrackingRunIIMagDownMuMinus.AddVars(effvars + trkeffvars)
MuonTrackingRunIIMagDownMuMinus.Add2DVars(trk2dvars)
MuonTrackingRunIIMagDownMuMinus.Run()
MuonTrackingRunIIMagDownMuMinus.SaveToFile()
MuonTrackingRunIIMagDownMuPlus.AddTree(u)
MuonTrackingRunIIMagDownMuPlus.AddTree(uu)
MuonTrackingRunIIMagDownMuPlus.SetSelectionCut(selcut + magdown)
MuonTrackingRunIIMagDownMuPlus.SetPassCut(passcut)
MuonTrackingRunIIMagDownMuPlus.AddVars(effvars + trkeffvars)
MuonTrackingRunIIMagDownMuPlus.Add2DVars(trk2dvars)
MuonTrackingRunIIMagDownMuPlus.Run()
MuonTrackingRunIIMagDownMuPlus.SaveToFile()
MuonTrackingRunIIMagDown = EfficiencyClass("Muon"+name+"TrackingRunIIMagDown", MuonTrackingRunIIMagDownMuPlus, MuonTrackingRunIIMagDownMuMinus)
MuonTrackingRunIIMagDown.MakeEfficiencyGraph()
MuonTrackingRunIIMagDown.SaveToFile()
MuonTrackingRunIIMagUp = EfficiencyClass("Muon"+name+"TrackingRunIIMagUp", MuonTrackingRunIIMagUpMuPlus, MuonTrackingRunIIMagUpMuMinus)
MuonTrackingRunIIMagUp.MakeEfficiencyGraph()
MuonTrackingRunIIMagUp.SaveToFile()
MuonTrackingRunIIMuPlus = EfficiencyClass("Muon"+name+"TrackingRunIIMuPlus", MuonTrackingRunIIMagDownMuPlus, MuonTrackingRunIIMagUpMuPlus)
MuonTrackingRunIIMuPlus.MakeEfficiencyGraph()
MuonTrackingRunIIMuPlus.SaveToFile()
MuonTrackingRunIIMuMinus = EfficiencyClass("Muon"+name+"TrackingRunIIMuMinus", MuonTrackingRunIIMagDownMuMinus, MuonTrackingRunIIMagUpMuMinus)
MuonTrackingRunIIMuMinus.MakeEfficiencyGraph()
MuonTrackingRunIIMuMinus.PrintEfficiencies("ETA")
MuonTrackingRunIIMuMinus.SaveToFile()
MuonTrackingRunII = EfficiencyClass("Muon"+name+"TrackingRunII", MuonTrackingRunIIMagDown, MuonTrackingRunIIMagUp)
MuonTrackingRunII.MakeEfficiencyGraph()
MuonTrackingRunII.SaveToFile()
makeMuonTrackingRunII("",selcut,passcut)
#makeMuonTrackingRunII("W",selcut,passcutW)
|
[
"[email protected]"
] | |
17a4c3efc94fc1e6caad8a5a7ade5f392c075824
|
5c7db30d59cd28fe1923bb5fdb9280ffe2070b70
|
/django-polls/polls/migrations/0001_initial.py
|
cca72afb3465cec2f3f673e3e259b8a64609593e
|
[] |
no_license
|
golkedj/django_test
|
6816b640e675aabd311de98907ff38fc8034b7d5
|
d1ab4b5bf6984aee78163a94638460f187ca12a9
|
refs/heads/master
| 2021-01-22T16:44:30.569480 | 2017-09-06T16:56:23 | 2017-09-06T16:56:23 | 100,724,483 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,230 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-18 14:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
|
[
"="
] |
=
|
778373ee38e2b8e500a508492b5c81d519f80a09
|
f8671d120f8f32b0febe94f4dc84570603e34fac
|
/utils_driver.py
|
c9b9a0185636c8784dadc34512484fe9360420ca
|
[] |
no_license
|
ahashisyuu/OpenSpider
|
f35772a53c4de4217df9dc1ee8f2078e1c2eb281
|
31da122dc2ab658142c34089f3cc0fe71a5016ca
|
refs/heads/master
| 2022-03-19T01:37:58.965682 | 2019-12-10T12:40:02 | 2019-12-10T12:40:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 418 |
py
|
from selenium import webdriver
import platform
#print platform.system()
def get_driver():
system = platform.system()
if system == "Linux":
return webdriver.PhantomJS(executable_path='/home/ubuntu/phantomjs-2.1.1-linux-x86_64/bin/phantomjs')
else:
return webdriver.Chrome()
#return webdriver.PhantomJS()
#driver = get_driver()
#driver.get("http://www.baidu.com")
#driver.close()
|
[
"[email protected]"
] | |
d5706657c7a3d28103d085bb0dbf7d12e11bac82
|
173b7e08d9fdbfeda8349570f7ccd93cbd6c02d4
|
/example_model/model_node_label.py
|
84ea201452534e2e144905c11f081a4272f8ac42
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
embeddedsamurai/kGCN-1
|
ef647d539fb79d6b5ebe090a3b27b349933d6ca4
|
7bc4dc32afd7a76e31b3bd37e2cb71611ba1fc5f
|
refs/heads/master
| 2020-08-04T16:51:36.430607 | 2019-10-01T05:02:31 | 2019-10-01T05:02:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,427 |
py
|
import tensorflow as tf
import tensorflow.contrib.keras as K
import kgcn.layers
from kgcn.default_model import DefaultModel
import tensorflow.contrib.keras as K
class GCN(DefaultModel):
def build_placeholders(self,info,config,batch_size):
# input data types (placeholders) of this neural network
return self.get_placeholders(info,config,batch_size,
['adjs','nodes','mask','dropout_rate',
'node_label','mask_node_label',
'enabled_node_nums','is_train','features'])
def build_model(self,placeholders,info,config,batch_size):
adj_channel_num=info.adj_channel_num
embedding_dim=config["embedding_dim"]
in_adjs=placeholders["adjs"]
features=placeholders["features"]
in_nodes=placeholders["nodes"]
labels=placeholders["node_label"]
mask_labels=placeholders["mask_node_label"]
mask=placeholders["mask"]
enabled_node_nums=placeholders["enabled_node_nums"]
is_train=placeholders["is_train"]
layer=features
input_dim=info.feature_dim
if features is None:
layer=K.layers.Embedding(info.all_node_num,embedding_dim)(in_nodes)
input_dim=embedding_dim
# layer: batch_size x graph_node_num x dim
layer=kgcn.layers.GraphConv(64,adj_channel_num)(layer,adj=in_adjs)
layer=kgcn.layers.GraphBatchNormalization()(layer,
max_node_num=info.graph_node_num,enabled_node_nums=enabled_node_nums)
layer=tf.nn.relu(layer)
layer=kgcn.layers.GraphConv(64,adj_channel_num)(layer,adj=in_adjs)
layer=kgcn.layers.GraphBatchNormalization()(layer,
max_node_num=info.graph_node_num,enabled_node_nums=enabled_node_nums)
layer=tf.nn.relu(layer)
layer=kgcn.layers.GraphConv(2,adj_channel_num)(layer,adj=in_adjs)
prediction=tf.nn.softmax(layer)
# computing cost and metrics
cost=tf.nn.softmax_cross_entropy_with_logits(labels=labels,logits=layer)
cost=mask*tf.reduce_mean(cost,axis=1)
cost_opt=tf.reduce_mean(cost)
metrics={}
cost_sum=tf.reduce_sum(cost)
pre_count=tf.cast(tf.equal(tf.argmax(prediction,2), tf.argmax(labels,2)),tf.float32)
correct_count=mask*tf.reduce_mean(pre_count,axis=1)
metrics["correct_count"]=tf.reduce_sum(correct_count)
return layer,prediction,cost_opt,cost_sum,metrics
|
[
"[email protected]"
] | |
725223f8d060081f839ffe104c2a1a8f0c49e687
|
920f81d8f5fbd45eb15f2970d0bd528b921a3d46
|
/pyplot/plot_loss.py
|
81bb50f95b08e5d8fafdc78fc8d47652605f5877
|
[] |
no_license
|
minhnd3796/RIVF2019_Minh
|
740a4015b7741bea9d2503088e99bc1a97a1f18f
|
c2439421efcbae3bad09f459a3d582b7fcf735c4
|
refs/heads/master
| 2020-03-25T03:49:20.533009 | 2018-08-03T01:27:14 | 2018-08-03T01:27:14 | 143,361,843 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,254 |
py
|
from pandas import read_csv
import pylab
from sys import argv
train_data_8s = read_csv('FCN-8s-ResNet101_Vaihingen/run_train-tag-entropy_1.csv')
train_step_8s = train_data_8s.iloc[:, 1].values
train_acc_8s = train_data_8s.iloc[:, 2].values
validation_data_8s = read_csv('FCN-8s-ResNet101_Vaihingen/run_validation-tag-entropy_1.csv')
validation_step_8s = validation_data_8s.iloc[:, 1].values
validation_acc_8s = validation_data_8s.iloc[:, 2].values
pylab.plot(train_step_8s, train_acc_8s, 'green', label='Training with 2 skips')
pylab.plot(validation_step_8s, validation_acc_8s, 'purple', label='Validation 2 skips')
train_data_4s = read_csv('FCN-4s-ResNet101_Vaihingen/run_train-tag-entropy_1.csv')
train_step_4s = train_data_4s.iloc[:, 1].values
train_acc_4s = train_data_4s.iloc[:, 2].values
validation_data_4s = read_csv('FCN-4s-ResNet101_Vaihingen/run_validation-tag-entropy_1.csv')
validation_step_4s = validation_data_4s.iloc[:, 1].values
validation_acc_4s = validation_data_4s.iloc[:, 2].values
pylab.plot(train_step_4s, train_acc_4s, 'r', label='Training with 3 skips')
pylab.plot(validation_step_4s, validation_acc_4s, 'b', label='Validation 3 skips')
pylab.legend(loc='upper left')
pylab.xlabel('Step')
pylab.ylabel('Loss')
pylab.show()
|
[
"[email protected]"
] | |
f1b816434823e5ff322719c6e792a034ea4f4c35
|
177bb6567b9564b1feb1d6e25ab1e0d61adf8770
|
/ResidualLoss/CNN_l2_prob_far_dist.py
|
dc834cb205256111664a4feebdedd1accd470493
|
[] |
no_license
|
fzdy1914/NUS-FYP
|
4ae9b299cf1cb72a01b371998781b9cec333d3f0
|
cb7195a8b025eb8ab2becd26886551479796f930
|
refs/heads/master
| 2023-04-16T05:08:12.529777 | 2021-04-05T06:56:15 | 2021-04-05T06:56:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,487 |
py
|
import random
import sys
from torch.autograd import Variable
from torch import optim
import numpy as np
from torch.backends import cudnn
import torch.nn.functional as F
import torch
from torch.utils.data import DataLoader, WeightedRandomSampler
from ResidualLoss.dataset import cifar10_data_loader_test, cifar10_data_loader_train, cifar10_dataset_train
from ResidualLoss.model import CIFAR_17
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
log_loc = "./log/%s.txt" % sys.argv[0].split("/")[-1].split(".")[0]
self.log = open(log_loc, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
self.log.flush()
def flush(self):
pass
sys.stdout = Logger()
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.deterministic = True
setup_seed(1914)
num_epochs = 200
batch_size = 100
evaluation_batch_size = 2500
learning_rate = 0.0001
ref_model = CIFAR_17().cuda()
model = CIFAR_17().cuda()
state_dict = torch.load('./CIFAR-17-1.pt')
ref_model.eval()
model.train()
# optimizer = optim.Adam([
# {'params': model.conv1.parameters()},
# {'params': model.conv2.parameters()},
# {'params': model.conv3.parameters()}
# ], lr=learning_rate, weight_decay=1e-5)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)
train_dataset = cifar10_dataset_train()
train_data_length = len(train_dataset)
sampler = WeightedRandomSampler([1] * train_data_length, num_samples=train_data_length, replacement=True)
train_data_loader = DataLoader(train_dataset, batch_size=batch_size, sampler=sampler)
evaluation_data_loader = cifar10_data_loader_train(batch_size=evaluation_batch_size, shuffle=False)
test_data_loader = cifar10_data_loader_test(batch_size)
prob = torch.ones(len(train_dataset), dtype=torch.float64)
ignore_idx_lst = torch.load('CD/ignore_idx_lst.pt')
for idx in ignore_idx_lst:
prob[idx] = 0
sampler.weights = prob
print(prob.sum())
def residual_train():
total_correct_sum = 0
total_classification_loss = 0
for epoch in range(num_epochs):
total_correct = 0
model.eval()
with torch.no_grad():
for data, target in evaluation_data_loader:
data, target = data.cuda(), target.cuda()
output = model(data)
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
total_correct += pred.eq(target.view_as(pred)).sum().item()
model.train()
total_train_loss = 0
for data, target in train_data_loader:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output, features = model.features(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
total_train_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
total_train_loss /= train_data_length
total_correct_sum += total_correct
total_classification_loss += total_train_loss
print('epoch [{}/{}], loss:{:.4f} Accuracy: {}/{}'.format(epoch + 1, num_epochs, total_train_loss, total_correct, train_data_length))
print("average correct:", total_correct_sum / num_epochs)
print("average loss:", total_classification_loss / num_epochs)
def test():
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_data_loader:
data, target = data.cuda(), target.cuda()
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1) # get the index of the max log-probability
correct += pred.eq(target).sum().item()
test_loss /= len(test_data_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_data_loader.dataset),
100. * correct / len(test_data_loader.dataset)))
# 1000, 500, 200, 100, 75, 50, 25, 10, 5, 1, 0.5,
if __name__ == '__main__':
ref_model.load_state_dict(state_dict)
model.load_state_dict(state_dict)
residual_train()
loc = "./CNN-l2-far-dist/non-freeze.pt"
torch.save(model.state_dict(), loc)
|
[
"[email protected]"
] | |
17f147002517ca6e9ce3f90605cfde55fb9f8c21
|
8f736b5cc28cc1d46506abf1b001eb41cc1f9423
|
/apps/trade/migrations/0021_auto_20210322_2247.py
|
9d43bf7c166f0d2b8fdeb3cf75abab92377b96c8
|
[] |
no_license
|
tang1323/MxShop
|
6ac68502f59ae07b483b6145e1b557399192e3dd
|
831b5bdd8abdf7d6e547b0bd3fff9341261e4afa
|
refs/heads/master
| 2023-04-04T07:09:32.759476 | 2021-04-14T14:36:00 | 2021-04-14T14:36:00 | 357,937,706 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 629 |
py
|
# Generated by Django 2.2 on 2021-03-22 22:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trade', '0020_auto_20210322_1137'),
]
operations = [
migrations.AlterField(
model_name='orderinfo',
name='pay_status',
field=models.CharField(blank=True, choices=[('TRADE_SUCCESS', '成功'), ('paying', '待支付'), ('TRADE_FINISHED', '交易结束'), ('WAIT_BUYER_PAY', '交易创建'), ('TRADE_CLOSED', '超时关闭')], default='paying', max_length=30, null=True, verbose_name='订单状态'),
),
]
|
[
"[email protected]"
] | |
ccec29fd6ea83bdc111cb217e95734492d2579ad
|
42348c0ff9785bbab18d87f277df791331bbc121
|
/tests/test_pitches.py
|
2d1420380d1934aca34db60e8b9154860a4255cc
|
[
"MIT"
] |
permissive
|
vincentouma/thinkout
|
783449834bd856d17e5273d9d3a50ecb6d79f6ef
|
85306ccec7924ad4fd6fe7ffb75aa537d9fe97c0
|
refs/heads/master
| 2020-06-29T11:48:09.283309 | 2019-08-06T05:59:14 | 2019-08-06T05:59:14 | 200,524,144 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 271 |
py
|
import unittest
from app.models import User
def test_no_access_password(self):
with self.assertRaises(AttributeError):
self.new_user.password
def test_password_verification(self):
self.assertTrue(self.new_user.verify_password ('banana'))
|
[
"[email protected]"
] | |
6aad1e54b8786ecb8e264520db3f9ee24f1bfb49
|
9ac99a99dc8f79f52fbbe3e8a5b311b518fe45d9
|
/apps/hrm/models/employee_types.py
|
35bc9fbf61826d7c5683608a6038c4e0d7ac01e7
|
[] |
no_license
|
nttlong/quicky-01
|
eb61620e01f04909d564244c46a03ca2b69dfecc
|
0f5610aa7027429bdd9ca9b45899a472c372c6cc
|
refs/heads/master
| 2020-03-25T17:45:31.633347 | 2018-11-27T15:02:30 | 2018-11-27T15:02:30 | 143,994,145 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 186 |
py
|
from qmongo import extends, extends_dict,define
from . commons import base
model_name = "employee_types"
extends(
model_name,
base.model_name,
[],
formular = ("text")
)
|
[
"[email protected]"
] | |
9d20f3f16753593924c66dad24694eb7c72e00c0
|
795435350d2e4fe415acd1fb846abb1c0cf94911
|
/client/code/do some researches on crawler/douban/douban.py
|
8cac1d9078cbb947968dfe3d23aeff118fa4b940
|
[] |
no_license
|
gaoxinge/network
|
c3575c7f0d95f7458a4ec74880ca5b8a0bff773e
|
68d307ec0756abff60914225fd38d69fa4b2a37c
|
refs/heads/master
| 2021-06-06T05:49:36.521243 | 2021-06-02T15:42:39 | 2021-06-02T15:42:39 | 84,033,128 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,133 |
py
|
import requests
from lxml import etree
from Item import Item
import time
def http(url):
response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})
return response
def parse(response):
Movie = Item('Movie', ['title', 'rating', 'vote'])
root = etree.HTML(response.text)
results = root.xpath('//div[@class=\'pl2\']')
for result in results:
movie = Movie()
movie['title'] = result.xpath('a/text()')[0][:-2].strip()
movie['rating'] = float(result.xpath('.//span[@class=\'rating_nums\']/text()')[0])
movie['vote'] = int(result.xpath('.//span[@class=\'pl\']/text()')[0][1:][:-4])
yield movie
def store(item):
f.write(str(item) + '\n')
def http_parse_store(url):
response = http(url)
items = parse(response)
for item in items:
store(item)
urls = ['https://movie.douban.com/tag/2016?start=' + str((i-1)*20) for i in range(1, 10)]
f = open('douban.txt', 'w')
start = time.time()
while urls:
response = http(urls.pop(0))
items = parse(response)
for item in items:
store(item)
print time.time() - start
f.close()
|
[
"[email protected]"
] | |
04ad4d19924cc49f42a7f6ac77847e9bb33362eb
|
9f8fa29bb5a93f896862806157b10b55e9f26825
|
/message_media_conversations/models/message_dto.py
|
f07f49fac18257112666287a40d5f2c106d9e2f8
|
[
"Apache-2.0"
] |
permissive
|
messagemedia/conversations-python-sdk
|
1b245ca7f63ca0c6fdbcd17a9bd11565d421e2a0
|
b53046540bd5c826de784228f838468c22b863cf
|
refs/heads/master
| 2020-03-19T05:52:05.735297 | 2018-10-16T23:29:31 | 2018-10-16T23:29:31 | 135,969,313 | 0 | 0 | null | 2018-06-04T04:12:18 | 2018-06-04T04:09:34 | null |
UTF-8
|
Python
| false | false | 2,057 |
py
|
# -*- coding: utf-8 -*-
"""
message_media_conversations.models.message_dto
This file was automatically generated for MessageMedia by APIMATIC v2.0 ( https://apimatic.io )
"""
class MessageDto(object):
"""Implementation of the 'MessageDto' model.
TODO: type model description here.
Attributes:
channel (string): TODO: type description here.
id (string): TODO: type description here.
text (string): TODO: type description here.
timestamp (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"channel":'channel',
"id":'id',
"text":'text',
"timestamp":'timestamp'
}
def __init__(self,
channel=None,
id=None,
text=None,
timestamp=None):
"""Constructor for the MessageDto class"""
# Initialize members of the class
self.channel = channel
self.id = id
self.text = text
self.timestamp = timestamp
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
channel = dictionary.get('channel')
id = dictionary.get('id')
text = dictionary.get('text')
timestamp = dictionary.get('timestamp')
# Return an object of this model
return cls(channel,
id,
text,
timestamp)
|
[
"[email protected]"
] | |
1227e2067f8c114470a88b026b4a6e6c16ee45bd
|
4ea43f3f79ad483d83238d88572feb822f451372
|
/philo/models/fields/__init__.py
|
efd315f9c3c5e11afe2ba9802508200ca1a0905c
|
[
"ISC"
] |
permissive
|
kgodey/philo
|
c8c433d44b2f31121f13bd0ee101605be11fe9da
|
c19bf577d44606d2b284e6058d633f4a174b61cc
|
refs/heads/master
| 2020-12-29T02:54:11.746966 | 2011-05-24T21:57:47 | 2011-05-24T21:57:47 | 686,009 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,547 |
py
|
from django import forms
from django.core.exceptions import ValidationError
from django.core.validators import validate_slug
from django.db import models
from django.utils import simplejson as json
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from philo.forms.fields import JSONFormField
from philo.validators import TemplateValidator, json_validator
#from philo.models.fields.entities import *
class TemplateField(models.TextField):
"""A :class:`TextField` which is validated with a :class:`.TemplateValidator`. ``allow``, ``disallow``, and ``secure`` will be passed into the validator's construction."""
def __init__(self, allow=None, disallow=None, secure=True, *args, **kwargs):
super(TemplateField, self).__init__(*args, **kwargs)
self.validators.append(TemplateValidator(allow, disallow, secure))
class JSONDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, instance, owner):
if instance is None:
raise AttributeError # ?
if self.field.name not in instance.__dict__:
json_string = getattr(instance, self.field.attname)
instance.__dict__[self.field.name] = json.loads(json_string)
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
setattr(instance, self.field.attname, json.dumps(value))
def __delete__(self, instance):
del(instance.__dict__[self.field.name])
setattr(instance, self.field.attname, json.dumps(None))
class JSONField(models.TextField):
"""A :class:`TextField` which stores its value on the model instance as a python object and stores its value in the database as JSON. Validated with :func:`.json_validator`."""
default_validators = [json_validator]
def get_attname(self):
return "%s_json" % self.name
def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, name, JSONDescriptor(self))
models.signals.pre_init.connect(self.fix_init_kwarg, sender=cls)
def fix_init_kwarg(self, sender, args, kwargs, **signal_kwargs):
# Anything passed in as self.name is assumed to come from a serializer and
# will be treated as a json string.
if self.name in kwargs:
value = kwargs.pop(self.name)
# Hack to handle the xml serializer's handling of "null"
if value is None:
value = 'null'
kwargs[self.attname] = value
def formfield(self, *args, **kwargs):
kwargs["form_class"] = JSONFormField
return super(JSONField, self).formfield(*args, **kwargs)
class SlugMultipleChoiceField(models.Field):
"""Stores a selection of multiple items with unique slugs in the form of a comma-separated list."""
__metaclass__ = models.SubfieldBase
description = _("Comma-separated slug field")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if not value:
return []
if isinstance(value, list):
return value
return value.split(',')
def get_prep_value(self, value):
return ','.join(value)
def formfield(self, **kwargs):
# This is necessary because django hard-codes TypedChoiceField for things with choices.
defaults = {
'widget': forms.CheckboxSelectMultiple,
'choices': self.get_choices(include_blank=False),
'label': capfirst(self.verbose_name),
'required': not self.blank,
'help_text': self.help_text
}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
form_class = forms.TypedMultipleChoiceField
return form_class(**defaults)
def validate(self, value, model_instance):
invalid_values = []
for val in value:
try:
validate_slug(val)
except ValidationError:
invalid_values.append(val)
if invalid_values:
# should really make a custom message.
raise ValidationError(self.error_messages['invalid_choice'] % invalid_values)
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ["^philo\.models\.fields\.SlugMultipleChoiceField"])
add_introspection_rules([], ["^philo\.models\.fields\.TemplateField"])
add_introspection_rules([], ["^philo\.models\.fields\.JSONField"])
|
[
"[email protected]"
] | |
b12c9631dbd2f75d27a5ac4754fee8e016fc58c0
|
60acb606318869410d7437bf6c1a16fd6762b6b4
|
/app/__init__.py
|
871f9dabd9b8272d79ccffa706296dcf48f3ee49
|
[
"Apache-2.0"
] |
permissive
|
heraclitusj/mgek_imgbed
|
8fb0c69599fab3fce06684f659dfd5c0b4c5f866
|
d8a77ba1401f42237adda1b3ea8611f6464a704e
|
refs/heads/master
| 2022-07-28T01:48:51.314094 | 2020-05-20T05:35:52 | 2020-05-20T05:35:52 | 265,461,338 | 0 | 0 | null | 2020-05-20T05:31:37 | 2020-05-20T05:31:37 | null |
UTF-8
|
Python
| false | false | 1,231 |
py
|
# @Author: Landers1037
# @Github: github.com/landers1037
# @File: __init__.py.py
# @Date: 2020-05-12
from flask import Flask
from app.config import *
from flask_sqlalchemy import SQLAlchemy
from flask_pymongo import PyMongo
#初始时会默认初始化数据库连接,根据engine的配置选择配置的数据库
db = SQLAlchemy()
mongo = PyMongo()
global_config = None
def create_app(mode=None):
application = Flask(__name__, static_url_path='/images', static_folder='../images')
check_config()
global global_config
global_config = read_config()
if mode == 'dev' or global_config.debug:
application.debug = True
application.config.from_object(flask_config())
#对数据库连接添加错误判断
if global_config.engine == 'sqlite':
db.init_app(application)
elif global_config.engine == 'mongo':
mongo.init_app(application)
else:
db.init_app(application)
from .api.img import img
from .api.auth import auth
from .api.sys import sys
application.register_blueprint(img)
application.register_blueprint(auth)
application.register_blueprint(sys)
return application
|
[
"[email protected]"
] | |
ca55231bed72276df46a7e9b1d23e67ae3171425
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_fade.py
|
48b2ce0cd12e383a309567bd8721b04aafb27dd9
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 337 |
py
|
#calss header
class _FADE():
def __init__(self,):
self.name = "FADE"
self.definitions = [u'to (cause to) lose colour, brightness, or strength gradually: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"[email protected]"
] | |
fa0b0e8390377d9b563df54e6bfa61219bfd4b70
|
ad0857eaba945c75e705594a53c40dbdd40467fe
|
/baekjoon/python/buying_cards_11052.py
|
b29c863cc46b83674f4b81cdf48a7cffc84bb63f
|
[
"MIT"
] |
permissive
|
yskang/AlgorithmPractice
|
c9964d463fbd0d61edce5ba8b45767785b0b5e17
|
3efa96710e97c8740d6fef69e4afe7a23bfca05f
|
refs/heads/master
| 2023-05-25T13:51:11.165687 | 2023-05-19T07:42:56 | 2023-05-19T07:42:56 | 67,045,852 | 0 | 0 | null | 2021-06-20T02:42:27 | 2016-08-31T14:40:10 |
Python
|
UTF-8
|
Python
| false | false | 630 |
py
|
# Title: 카드 구매하기
# Link: https://www.acmicpc.net/problem/11052
import sys
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def solution(num_card: int, packs: list):
d = [0 for _ in range(num_card+1)]
for i in range(1, num_card+1):
d[i] = max([d[i-j] + packs[j-1] for j in range(1, i+1)])
return d[num_card]
def main():
N = read_single_int()
P = read_list_int()
print(solution(N, P))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
159e01e7c2fe4f3943abf29f49cebe1232f215b3
|
53784d3746eccb6d8fca540be9087a12f3713d1c
|
/res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/meta/VehiclePreviewMeta.py
|
3b324dcf456ad95c0dd3471a225ee52f30dbbf10
|
[] |
no_license
|
webiumsk/WOT-0.9.17.1-CT
|
736666d53cbd0da6745b970e90a8bac6ea80813d
|
d7c3cf340ae40318933e7205bf9a17c7e53bac52
|
refs/heads/master
| 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 2,046 |
py
|
# 2017.02.03 21:51:10 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/VehiclePreviewMeta.py
from gui.Scaleform.framework.entities.View import View
class VehiclePreviewMeta(View):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends View
"""
def closeView(self):
self._printOverrideError('closeView')
def onBackClick(self):
self._printOverrideError('onBackClick')
def onBuyOrResearchClick(self):
self._printOverrideError('onBuyOrResearchClick')
def onOpenInfoTab(self, index):
self._printOverrideError('onOpenInfoTab')
def onCompareClick(self):
self._printOverrideError('onCompareClick')
def as_setStaticDataS(self, data):
"""
:param data: Represented by VehPreviewStaticDataVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setStaticData(data)
def as_updateInfoDataS(self, data):
"""
:param data: Represented by VehPreviewInfoPanelVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_updateInfoData(data)
def as_updateVehicleStatusS(self, status):
if self._isDAAPIInited():
return self.flashObject.as_updateVehicleStatus(status)
def as_updatePriceS(self, data):
"""
:param data: Represented by VehPreviewPriceDataVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_updatePrice(data)
def as_updateBuyButtonS(self, data):
"""
:param data: Represented by VehPreviewBuyButtonVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_updateBuyButton(data)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\meta\VehiclePreviewMeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:51:10 Střední Evropa (běžný čas)
|
[
"[email protected]"
] | |
0ac14a8d24cb5d63875837fa4d9df2a7b1f8b5c6
|
86741e9f531f2aa63af682cc974ebbcc3b202e90
|
/allhub/users/ssh_keys.py
|
5829881dbdcb9d0302a4e6deb02739cf3e27ca79
|
[
"Apache-2.0"
] |
permissive
|
srinivasreddy/allhub
|
ccebea96a106e266743d180410ab5b16d08946fe
|
ff20858c9984da5c4edd5043c39eed3b6d5d693d
|
refs/heads/master
| 2022-12-27T01:24:30.759553 | 2021-06-04T11:38:16 | 2021-06-04T11:38:16 | 204,402,796 | 2 | 2 |
Apache-2.0
| 2022-12-08T07:44:11 | 2019-08-26T05:33:37 |
Python
|
UTF-8
|
Python
| false | false | 1,817 |
py
|
from allhub.response import Response
class SSHKeysMixin:
def list_public_ssh_keys(self, username):
url = "/users/{username}/keys".format(username=username)
self.response = Response(
self.get(
url,
**{"Accept": "application/vnd.github.giant-sentry-fist-preview+json"},
),
"SSHKeys",
)
return self.response.transform()
def ssh_keys(self):
url = "/user/keys"
self.response = Response(
self.get(
url,
**{"Accept": "application/vnd.github.giant-sentry-fist-preview+json"},
),
"SSHKeys",
)
return self.response.transform()
def ssh_key(self, key_id):
url = "/user/keys/{key_id}".format(key_id=key_id)
self.response = Response(
self.get(
url,
**{"Accept": "application/vnd.github.giant-sentry-fist-preview+json"},
),
"SSHKey",
)
return self.response.transform()
def create_public_ssh_key(self, title, key):
url = "/user/keys"
self.response = Response(
self.post(
url,
params=[("title", title), ("key", key)],
**{"Accept": "application/vnd.github.giant-sentry-fist-preview+json"},
),
"SSHKey",
)
return self.response.transform()
def delete_public_ssh_key(self, key_id):
url = "/user/keys/{key_id}".format(key_id=key_id)
self.response = Response(
self.delete(
url,
**{"Accept": "application/vnd.github.giant-sentry-fist-preview+json"},
),
"",
)
return self.response.status_code == 204
|
[
"[email protected]"
] | |
e0159a0bc43cebe51ee88486e5e5cacadec5a5a7
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/netex/models/railway_link_ref_structure.py
|
00ba98f8df1e4547deb46e2b051c3da101b80055
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 |
Python
|
UTF-8
|
Python
| false | false | 246 |
py
|
from dataclasses import dataclass
from .infrastructure_link_ref_structure import InfrastructureLinkRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class RailwayLinkRefStructure(InfrastructureLinkRefStructure):
pass
|
[
"[email protected]"
] | |
6d608921210b60fa6848d362d756532953b5c228
|
2b770588db83bc2a61b52f430248414395cc1b1f
|
/django_/citysearch_project/cities/models.py
|
fa4d0dc5e09ad2f0c02245c2ca01b266a2024625
|
[] |
no_license
|
luka319/portfelio_chemodanio
|
595afb1d2e1fb3564bf94b204aa8e63dddd4cf0c
|
dd37f8c4af9d043ace9b5438b5a7680cfab26ab2
|
refs/heads/master
| 2021-06-26T19:05:33.170977 | 2020-01-08T23:24:10 | 2020-01-08T23:24:10 | 231,444,932 | 0 | 0 | null | 2021-06-10T22:27:34 | 2020-01-02T19:19:49 |
Python
|
UTF-8
|
Python
| false | false | 296 |
py
|
from django.db import models
# Create your models here.
class City(models.Model):
name = models.CharField(max_length = 255)
state = models.CharField(max_length = 255)
class Meta:
verbose_name_plural = "cities_города"
def __str__(self):
return self.name
|
[
"[email protected]"
] | |
5644e5c0b6aa0dab0c7749c8574c9a70eebc075c
|
400b0cb1f25cc2fbe80a3037c06102f40c4d2d89
|
/string33.py
|
575f57fac37b82880d7965f8c50047498875f63b
|
[] |
no_license
|
Prithamprince/Python-programming
|
4c747d306829de552e3b0c6af67cfe534a2dc2e1
|
79a0953084a01978e75d2be4db0d35ba1cf29259
|
refs/heads/master
| 2020-05-30T06:29:26.134906 | 2019-12-13T06:33:49 | 2019-12-13T06:33:49 | 189,580,341 | 0 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 226 |
py
|
from itertools import combinations
p=input()
q=0
l=list(combinations(p,len(p)-1))
for i in range(len(l)):
if(l[i]==l[i][ ::-1]):
print("YES")
q=1
break
if(q==0):
print("NO")
|
[
"[email protected]"
] | |
015fd34248887879e5b092b00ab71bd4a61b4d02
|
8a9ba5e9e8c9f3d8e05b6840f1c17d526344e6d2
|
/src/reia.py
|
7ada168207031f9a5c85d73b90325662fbde0a50
|
[
"MIT"
] |
permissive
|
maanavshah/remote-intelligent-assistant
|
fe1e2bcb6d43345553194c442d4676b3137e0348
|
65ea7287d0ca2dd98a376bbadc81a5093b9b6046
|
refs/heads/master
| 2021-06-13T20:20:05.622634 | 2019-12-20T12:54:00 | 2019-12-20T12:54:00 | 142,580,543 | 4 | 0 |
MIT
| 2021-03-25T21:58:30 | 2018-07-27T13:27:56 |
Python
|
UTF-8
|
Python
| false | false | 5,369 |
py
|
import yaml
import sys
import random
import nltk
import operator
import jellyfish as jf
import json
import requests
import os
import time
import signal
import subprocess
from nltk.tag import StanfordPOSTagger
from textblob.classifiers import NaiveBayesClassifier
from execute import construct_command
from feedback import get_user_feedback
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multiclass import OneVsRestClassifier
from sklearn import preprocessing
def signal_handler(signal, frame):
print ('Thank You!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
my_path = os.path.abspath(os.path.dirname(__file__))
CONFIG_PATH = os.path.join(my_path, "../config/config.yml")
MAPPING_PATH = os.path.join(my_path, "../data/mapping.json")
TRAINDATA_PATH = os.path.join(my_path, "../data/traindata.txt")
LABEL_PATH = os.path.join(my_path, "../data/")
sys.path.insert(0, LABEL_PATH)
import trainlabel
with open(CONFIG_PATH,"r") as config_file:
config = yaml.load(config_file)
os.environ['STANFORD_MODELS'] = config['tagger']['path_to_models']
exec_command = config['preferences']['execute']
def get_username(user_id):
payload = {'token': config['slack']['slack_token'], 'user': user_id}
r = requests.post(config['slack']['user_info'], params=payload)
return r.json()['user']['name']
def read_message():
payload = {'token': config['slack']['slack_token'], 'channel': config['slack']['channel'] , 'count': '1'}
r = requests.get(config['slack']['get_url'], params=payload)
message = r.json()['messages'][0]['text']
ts = r.json()['messages'][0]['ts']
data = r.json()['messages'][0]
if 'user' not in data:
user = r.json()['messages'][0]['username']
else:
user = r.json()['messages'][0]['user']
return(message,ts,user)
def post_message(message):
payload = {'token': config['slack']['slack_token'], 'channel': config['slack']['channel'] , 'text': message, 'username':config['slack']['username']}
r = requests.post(config['slack']['post_url'], params=payload)
return r
def classify(text):
X_train = np.array([line.rstrip('\n') for line in open(TRAINDATA_PATH)])
y_train_text = trainlabel.y_train_text
X_test = np.array([text])
target_names = ['file', 'folder', 'network', 'system', 'general']
lb = preprocessing.MultiLabelBinarizer()
Y = lb.fit_transform(y_train_text)
classifier = Pipeline([
('vectorizer', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', OneVsRestClassifier(LinearSVC()))])
classifier.fit(X_train, Y)
predicted = classifier.predict(X_test)
all_labels = lb.inverse_transform(predicted)
for item, labels in zip(X_test, all_labels):
return (', '.join(labels))
def suggestions(suggest_list):
suggest = (sorted(suggest_list,reverse=True)[:5])
return suggest
def consume_message():
cmd = "sed -i -e \"1d\" /home/maanav/REIA/mqueue.txt"
proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
def call_reia():
while(True):
max_score = 0.1
map_val = ""
with open('/home/maanav/REIA/mqueue.txt', 'r') as f:
first_line = f.readline()
while first_line == "":
time.sleep(1)
call_reia()
print('-----------------------')
user_input = first_line.split(' ', 1)[1]
user_name = get_username(first_line.split(' ', 1)[0])
suggest_list = []
suggest_message = ""
#prev_ts = ts
print("\nINPUT = ")
print(user_input)
label = classify(user_input)
if label == "":
# post_message("Sorry, I could not understand. Please rephrase and try again.")
with open("/home/maanav/REIA/src/user.txt", "a") as output_file:
output_file.write("Incorrectly mapped ::User-Input = " + user_input)
consume_message()
continue
print("Classified as : "+str(label))
tokens = nltk.word_tokenize(user_input)
print(tokens)
st = StanfordPOSTagger(config['tagger']['model'],path_to_jar=config['tagger']['path'])
stanford_tag = st.tag(user_input.split())
print("Tags")
print(stanford_tag)
with open(MAPPING_PATH,'r') as data_file:
data = json.load(data_file)
for i in data[label]:
dist = jf.jaro_distance(str(user_input),str(i))
suggest_list.append(tuple((dist,i)))
print(dist)
if(dist > max_score):
max_score = dist
map_val = i
if max_score < config['preferences']['similarity_threshold']:
# post_message("Sorry, I could not understand. Please rephrase and try again.")
with open("/home/maanav/REIA/src/user.txt", "a") as output_file:
output_file.write("Incorrectly mapped ::User-Input = " + user_input)
consume_message()
continue
if config['preferences']['suggestions'] == True:
suggest = suggestions(suggest_list)
post_message("Did you mean :")
for i in suggest:
suggest_message += (str(i[1])+"\n")
post_message(suggest_message)
continue
print("\nMapped to : "+map_val)
with open("/home/maanav/REIA/src/user.txt", "a") as output_file:
output_file.write("correctly mapped to : " + map_val + " User-Input = " + user_input)
#post_message(map_val)
construct_command(user_input,label,tokens,map_val,stanford_tag,exec_command,user_name)
#call('sed -i -e "1d " REIA/mqueue.txt')
consume_message()
#print(response)
print("Starting...")
call_reia()
|
[
"[email protected]"
] | |
3a068e2a6864d85f641af5e0ebd662ca44331292
|
07a1088bcec25cdf7e4027abc5a8dc83eb37ffb4
|
/fabrik/ext/nginx.py
|
ccc6e5e8f0774d418cd35226796dadcf056ebc96
|
[
"MIT"
] |
permissive
|
Frojd/Fabrik
|
7e00bb66761c552da9d70cc36f3ff0108bf7a481
|
9f2edbba97a7fd236b72a9b3010f6e912ab5c001
|
refs/heads/master
| 2020-04-06T04:39:31.445843 | 2018-04-16T06:54:21 | 2018-04-16T06:54:21 | 25,035,502 | 12 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
# -*- coding: utf-8 -*-
"""
fabrik.ext.nginx
----------------------
Methods for handling nginx
"""
from fabric.state import env
def restart():
env.run("service nginx restart")
def reload():
env.run("nginx -s reload")
|
[
"[email protected]"
] | |
1c89e34f2a701a441c1be1d145087c705e02ff86
|
f2171e2f2c78d616a381b3308d13a600d687587f
|
/x.Machine Learning Foundation/NumPy and Pandas Part 1/numpy_index_array.py
|
fce265f622df7db4d6f5e57be7428a2167fd3916
|
[] |
no_license
|
vinkrish/ml-jupyter-notebook
|
bda01343118869bd2bfb44f3c3122853834d314a
|
ef5d05512b8387d7a3e494f024416f6ca7336827
|
refs/heads/master
| 2021-06-09T00:53:51.638551 | 2021-05-08T15:13:51 | 2021-05-08T15:13:51 | 168,104,038 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,921 |
py
|
import numpy as np
# Change False to True for each block of code to see what it does
# Using index arrays
if False:
a = np.array([1, 2, 3, 4])
b = np.array([True, True, False, False])
print a[b]
print a[np.array([True, False, True, False])]
# Creating the index array using vectorized operations
if False:
a = np.array([1, 2, 3, 2, 1])
b = (a >= 2)
print a[b]
print a[a >= 2]
# Creating the index array using vectorized operations on another array
if False:
a = np.array([1, 2, 3, 4, 5])
b = np.array([1, 2, 3, 2, 1])
print b == 2
print a[b == 2]
def mean_time_for_paid_students(time_spent, days_to_cancel):
'''
Fill in this function to calculate the mean time spent in the classroom
for students who stayed enrolled at least (greater than or equal to) 7 days.
Unlike in Lesson 1, you can assume that days_to_cancel will contain only
integers (there are no students who have not canceled yet).
The arguments are NumPy arrays. time_spent contains the amount of time spent
in the classroom for each student, and days_to_cancel contains the number
of days until each student cancel. The data is given in the same order
in both arrays.
'''
is_continued = days_to_cancel >= 7
paid_time = time_spent[is_continued]
return paid_time.mean()
# Time spent in the classroom in the first week for 20 students
time_spent = np.array([
12.89697233, 0. , 64.55043217, 0. ,
24.2315615 , 39.991625 , 0. , 0. ,
147.20683783, 0. , 0. , 0. ,
45.18261617, 157.60454283, 133.2434615 , 52.85000767,
0. , 54.9204785 , 26.78142417, 0.
])
# Days to cancel for 20 students
days_to_cancel = np.array([
4, 5, 37, 3, 12, 4, 35, 38, 5, 37, 3, 3, 68,
38, 98, 2, 249, 2, 127, 35
])
|
[
"[email protected]"
] | |
1f4bd449aba35de17062609461614b820c3a18f9
|
eddbf9518e7384f0e9a1d9e19cbe74855c3f24bd
|
/2017011066LiShaoFei/First.py
|
7f3f5ef75bfa8561246cc72cba9cfb0ca45f5650
|
[] |
no_license
|
wanghan79/2019_Python
|
9d2391d799efd9545b2afb3565bc5c6d542d1d86
|
f856409af92af3990773966d937d58d9d1cade04
|
refs/heads/master
| 2020-05-05T12:54:30.921361 | 2019-07-20T09:50:03 | 2019-07-20T09:50:03 | 180,050,522 | 11 | 14 | null | 2019-07-15T15:00:03 | 2019-04-08T01:59:24 |
Python
|
UTF-8
|
Python
| false | false | 1,119 |
py
|
import numpy as np
import random
import string
def random_list( start, stop, length):
if length >= 0:
length = int(length)
start, stop = (int(start), int(stop)) if start <= stop else (int(stop), int(start))
random_list = []
for i in range(length):
random_list.append(random.randint(start, stop))
return random_list
class dataGenerate:
def dGen(self, size=100000):
for i in range(size):
keys = random_list(0, 100, 10)
values = random_list(0, 100, 10)
dictionary = dict(zip(keys, values))
numx = np.random.randint(0, 1000)
numy = np.random.randint(0, 1000)
salt = ''.join(random.sample(string.ascii_letters + string.digits, 8)) # Generate a random string
data = {'string': salt, 'intX': numx, 'intY': numy, 'float': np.random.uniform(0, 1000000), 'keys':keys, 'values':values}
yield data
if __name__ == '__main__':
f = open("output.txt", "w")
for i in dataGenerate().dGen():
s=str(i)
f.write(s+'\n')
f.close()
|
[
"[email protected]"
] | |
7d69b0a585408e145f7c50fc555cfe9dfb7cb57f
|
35cb7a8a22fdd3932b63c89b17f587205bd00fec
|
/apps/excursao/migrations/0002_excursao_is_internacional.py
|
3f9637243e6f3af5d03ec7553d613c3b439ba4a1
|
[] |
no_license
|
rcoutelo/viajecomdarcy-web
|
debc24ec44e733c12257f3e89f3424ab7b3ee1f4
|
2ab2db407523299a58423f058c1f74231b15d617
|
refs/heads/master
| 2021-03-27T14:41:34.303463 | 2017-06-19T15:14:54 | 2017-06-19T15:14:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 454 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2017-05-19 19:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('excursao', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='excursao',
name='is_internacional',
field=models.BooleanField(default=False),
),
]
|
[
"[email protected]"
] | |
a64f0f99c0ebcacedc4e8efb592d1f75480fcd7c
|
0e25329bb101eb7280a34f650f9bd66ed002bfc8
|
/tests/functional/test_misc.py
|
5da0c776cf8bde4c5a1a3dc58331fff08885b9f3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
enthought/enstaller
|
2a2d433a3b83bcf9b4e3eaad59d952c531f36566
|
9c9f1a7ce58358b89352f4d82b15f51fbbdffe82
|
refs/heads/master
| 2023-08-08T02:30:26.990190 | 2016-01-22T17:51:35 | 2016-01-22T17:51:35 | 17,997,072 | 3 | 4 | null | 2017-01-13T19:22:10 | 2014-03-21T23:03:58 |
Python
|
UTF-8
|
Python
| false | false | 7,200 |
py
|
import json
import os.path
import platform
import shutil
import sys
import tempfile
import textwrap
import mock
import responses
from enstaller import __version__
from enstaller.config import Configuration
from enstaller.history import History
from enstaller.main import main_noexc
from enstaller.utils import PY_VER
from enstaller.tests.common import authenticated_config, mock_index, mock_print, R_JSON_AUTH_RESP
if sys.version_info[0] == 2:
import unittest2 as unittest
else:
import unittest
class TestMisc(unittest.TestCase):
@authenticated_config
@responses.activate
def test_print_config(self):
self.maxDiff = None
# Given
config = Configuration()
config.update(prefix=sys.prefix)
template = textwrap.dedent("""\
Python version: {pyver}
enstaller version: {version}
sys.prefix: {sys_prefix}
platform: {platform}
architecture: {arch}
use_webservice: True
settings:
prefix = {prefix}
repository_cache = {repository_cache}
noapp = False
proxy = None
You are logged in as 'dummy' (David Cournapeau).
Subscription level: Canopy / EPD Basic or above
""")
r_output = template.format(pyver=PY_VER,
sys_prefix=os.path.normpath(sys.prefix),
version=__version__,
platform=platform.platform(),
arch=platform.architecture()[0],
prefix=os.path.normpath(config.prefix),
repository_cache=config.repository_cache)
responses.add(responses.GET,
"https://api.enthought.com/accounts/user/info/",
body=json.dumps(R_JSON_AUTH_RESP))
# When
with self.assertRaises(SystemExit) as e:
with mock_print() as m:
main_noexc(["--config"])
# Then
self.assertEqual(e.exception.code, 0)
self.assertMultiLineEqual(m.value, r_output)
@authenticated_config
def test_list_bare(self):
# Given
sys_prefix = os.path.normpath(sys.prefix)
# When
with mock.patch("enstaller.cli.commands.print_installed"):
with self.assertRaises(SystemExit) as e:
with mock_print() as m:
main_noexc(["--list"])
# Then
self.assertEqual(e.exception.code, 0)
self.assertMultiLineEqual(m.value, "prefix: {0}\n\n".format(sys_prefix))
@authenticated_config
def test_log(self):
with mock.patch("enstaller.cli.commands.History",
spec=History) as mocked_history:
with self.assertRaises(SystemExit) as e:
with mock_print() as m:
main_noexc(["--log"])
self.assertEqual(e.exception.code, 0)
self.assertTrue(mocked_history.return_value.print_log.called)
self.assertMultiLineEqual(m.value, "")
@authenticated_config
def test_freeze(self):
installed_requirements = ["dummy 1.0.0-1", "another_dummy 1.0.1-1"]
with mock.patch("enstaller.cli.commands.get_freeze_list",
return_value=installed_requirements):
with self.assertRaises(SystemExit) as e:
with mock_print() as m:
main_noexc(["--freeze"])
self.assertEqual(e.exception.code, 0)
self.assertMultiLineEqual(m.value,
"dummy 1.0.0-1\nanother_dummy 1.0.1-1\n")
@mock_index({
"fubar-1.0.0-1.egg": {
"available": True,
"build": 1,
"md5": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"mtime": 0.0,
"name": "fubar",
"packages": [],
"product": "nono",
"python": PY_VER,
"size": 0,
"type": "egg",
"version": "1.0.0"
}}, "https://acme.com")
def test_insecure_flag(self):
# Given
responses.add(responses.GET,
"https://acme.com/accounts/user/info/",
body=json.dumps(R_JSON_AUTH_RESP))
config = Configuration()
config.update(store_url="https://acme.com")
config.update(auth=("nono", "le gros robot"))
# When
with self.assertRaises(SystemExit) as e:
with mock.patch("enstaller.main._ensure_config_or_die",
return_value=config):
with mock.patch(
"enstaller.main.ensure_authenticated_config"
):
main_noexc(["-s", "fubar"])
# Then
self.assertEqual(e.exception.code, 0)
# When
with self.assertRaises(SystemExit) as e:
with mock.patch("enstaller.main._ensure_config_or_die",
return_value=config):
with mock.patch(
"enstaller.main.ensure_authenticated_config"
):
main_noexc(["-ks", "fubar"])
# Then
self.assertEqual(e.exception.code, 0)
class TestPrefix(unittest.TestCase):
def setUp(self):
self.prefix = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.prefix)
@authenticated_config
@mock_index({
"fubar-1.0.0-1.egg": {
"available": True,
"build": 1,
"md5": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"mtime": 0.0,
"name": "fubar",
"packages": [],
"product": "nono",
"python": PY_VER,
"size": 0,
"type": "egg",
"version": "1.0.0"
}}, "https://api.enthought.com")
def test_simple(self):
self.maxDiff = None
# Given
responses.add(responses.GET,
"https://api.enthought.com/accounts/user/info/",
body=json.dumps(R_JSON_AUTH_RESP))
template = textwrap.dedent("""\
Python version: {pyver}
enstaller version: {version}
sys.prefix: {sys_prefix}
platform: {platform}
architecture: {arch}
use_webservice: True
settings:
prefix = {prefix}
repository_cache = {repository_cache}
noapp = False
proxy = None
You are logged in as 'dummy' (David Cournapeau).
Subscription level: Canopy / EPD Basic or above
""")
r_output = template.format(pyver=PY_VER,
sys_prefix=os.path.normpath(sys.prefix),
version=__version__,
platform=platform.platform(),
arch=platform.architecture()[0],
prefix=os.path.normpath(self.prefix),
repository_cache=os.path.join(self.prefix,
"LOCAL-REPO"))
# When
with self.assertRaises(SystemExit):
with mock_print() as m:
main_noexc(["--config", "--prefix={0}".format(self.prefix)])
# Then
self.assertEqual(m.value, r_output)
|
[
"[email protected]"
] | |
6ffbc1fdd0bb94c69f961871e05b86e073a589d5
|
e0ed932fc2e4edb953cc4e423362dabc19083008
|
/python/sanic_learn/docs/learn_conf.py
|
3b279281ff746c4e709cfdd6e544322a6b2da803
|
[] |
no_license
|
glfAdd/note
|
90baee45003ac3998d898dcfbc618caa28f33b74
|
19a9aff61450be25904bff0fe672f660d49d90ff
|
refs/heads/main
| 2023-05-27T13:28:36.092352 | 2023-05-24T03:35:58 | 2023-05-24T03:35:58 | 240,066,208 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 348 |
py
|
""" ============================ config
config对象实现两个__getattr__和__setattr__
方式1: 使用属性
app = Sanic('myapp')
app.config.DB_NAME = 'appdb'
app.config.DB_USER = 'appuser'
方式2: 使用update
db_settings = {
'DB_HOST': 'localhost',
'DB_NAME': 'appdb',
'DB_USER': 'appuser'
}
app.config.update(db_settings)
"""
|
[
"[email protected]"
] | |
2d34fe0d4f1b224a9e161de674ff2f540eaf6f3f
|
d3f448d238b435b48d8f27f17a34b3e39a70dc29
|
/python-client/test/test_kyc_user_validation_share_holder_list_item_response_natural.py
|
5639c0032162e82c676318d5d1ff7f90707312d0
|
[] |
no_license
|
pedroguirao/swagger
|
1fc29b6d9bcc193bf8ce85f6d8a6074f4c37150d
|
5ffea6203b5fcd3f201c2ede76d354302a6fb0ee
|
refs/heads/master
| 2020-06-07T16:15:08.659567 | 2019-06-21T07:51:49 | 2019-06-21T07:51:49 | 193,055,538 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,180 |
py
|
# coding: utf-8
"""
MarketPay API
API for Smart Contracts and Payments # noqa: E501
OpenAPI spec version: v2.01
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.kyc_user_validation_share_holder_list_item_response_natural import KycUserValidationShareHolderListItemResponseNatural # noqa: E501
from swagger_client.rest import ApiException
class TestKycUserValidationShareHolderListItemResponseNatural(unittest.TestCase):
"""KycUserValidationShareHolderListItemResponseNatural unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testKycUserValidationShareHolderListItemResponseNatural(self):
"""Test KycUserValidationShareHolderListItemResponseNatural"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.kyc_user_validation_share_holder_list_item_response_natural.KycUserValidationShareHolderListItemResponseNatural() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
1de1b2caa5a46a524e310c70cb4922b59d81d69c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03106/s261285927.py
|
acddd8336d17f956526b10a5358983a3ae205bef
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 152 |
py
|
import sys
a,b,k=map(int,input().split())
for i in range(1,101):
if a%(101-i)==0 and b%(101-i)==0:
k-=1
if k==0:
print(101-i)
sys.exit()
|
[
"[email protected]"
] | |
71c821509417c94ee842caec376a6a4c2803b333
|
d9a22d4dcdfc0c28176c0e8afd784b30d275597e
|
/test_suite/shared_data/dispersion/Fyn_SH3_R1rho/relax_results/solution_tp02.py
|
6e2250c6b7fdf8ea287e0c2e8ad080017c2505a3
|
[] |
no_license
|
jlec/relax
|
fda1b3ff77be0afc21c2e6cc52348ae7635cd07a
|
c317326ddeacd1a1c608128769676899daeae531
|
refs/heads/master
| 2016-09-08T00:27:57.256090 | 2015-02-10T12:24:55 | 2015-02-10T12:24:55 | 30,596,131 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,039 |
py
|
"""Compare the synthetic cpmg_fit data to the relax solution.
To run this, type:
$ rm -f solution_tp02.log; ../../../../../relax --tee solution_tp02.log solution_tp02.py
"""
# Python module imports.
from os import remove
from shutil import move
# relax module imports.
from lib.dispersion.variables import EXP_TYPE_R1RHO
from lib.nmr import frequency_to_ppm
from specific_analyses.relax_disp.data import generate_r20_key
# Create a data pipe.
pipe.create('R2eff', 'relax_disp')
# Create the spin system.
spin.create(res_name='X', res_num=14, spin_name='N')
spin.element('N', spin_id='@N')
spin.isotope('15N', spin_id='@N')
# The spectral data - experiment ID, R2eff file name, experiment type, spin ID string, spectrometer frequency in Hertz, relaxation time.
data = [
['600_MHz_nu1_50_Hz', 'T14_600_50.dsp', ':14@N', 600e6, 50, 0.04],
['600_MHz_nu1_75_Hz', 'T14_600_75.dsp', ':14@N', 600e6, 75, 0.04],
['600_MHz_nu1_100_Hz', 'T14_600_100.dsp', ':14@N', 600e6, 100, 0.04],
['600_MHz_nu1_150_Hz', 'T14_600_150.dsp', ':14@N', 600e6, 150, 0.04],
['600_MHz_nu1_200_Hz', 'T14_600_200.dsp', ':14@N', 600e6, 200, 0.04],
['800_MHz_nu1_100_Hz', 'T14_800_100.dsp', ':14@N', 800e6, 100, 0.04],
['800_MHz_nu1_200_Hz', 'T14_800_200.dsp', ':14@N', 800e6, 200, 0.04],
['800_MHz_nu1_400_Hz', 'T14_800_400.dsp', ':14@N', 800e6, 400, 0.04]
]
spin_lock_offset = {}
spin_lock_offset['600_MHz_nu1_50_Hz'] = [ 340.0, 330.0, 320.0, 310.0, 300.0, 290.0, 280.0, 270.0, 260.0, 250.0, 240.0, 230.0, 220.0, 210.0, 200.0, 190.0, 180.0, 170.0, 160.0, 150.0, 140.0, 130.0, 120.0, 110.0, 100.0, 90.0, 80.0, 70.0, 60.0, 50.0, 40.0, 30.0, 20.0, 10.0, 0.0, -10.0, -20.0, -30.0, -40.0, -50.0, -60.0, -70.0, -80.0, -90.0]
spin_lock_offset['600_MHz_nu1_75_Hz'] = [ 340.0, 330.0, 320.0, 310.0, 300.0, 290.0, 280.0, 270.0, 260.0, 250.0, 240.0, 230.0, 220.0, 210.0, 200.0, 190.0, 180.0, 170.0, 160.0, 150.0, 140.0, 130.0, 120.0, 110.0, 100.0, 90.0, 80.0, 70.0, 60.0, 50.0, 40.0, 30.0, 20.0, 10.0, 0.0, -10.0, -20.0, -30.0, -40.0, -50.0, -60.0, -70.0, -80.0, -90.0]
spin_lock_offset['600_MHz_nu1_100_Hz'] = [ 340.0, 330.0, 320.0, 310.0, 300.0, 290.0, 280.0, 270.0, 260.0, 250.0, 240.0, 230.0, 220.0, 210.0, 200.0, 190.0, 180.0, 170.0, 160.0, 150.0, 140.0, 130.0, 120.0, 110.0, 100.0, 90.0, 80.0, 70.0, 60.0, 50.0, 40.0, 30.0, 20.0, 10.0, 0.0, -10.0, -20.0, -30.0, -40.0, -50.0, -60.0, -70.0, -80.0, -90.0]
spin_lock_offset['600_MHz_nu1_150_Hz'] = [ 385.0, 370.0, 355.0, 340.0, 325.0, 310.0, 295.0, 280.0, 265.0, 250.0, 235.0, 220.0, 205.0, 190.0, 175.0, 160.0, 145.0, 130.0, 115.0, 100.0, 85.0, 70.0, 55.0, 40.0, 25.0, 10.0, -5.0, -20.0, -35.0, -50.0, -65.0, -80.0, -95.0, -110.0, -125.0, -140.0, -155.0, -170.0, -185.0]
spin_lock_offset['600_MHz_nu1_200_Hz'] = [ 385.0, 370.0, 355.0, 340.0, 325.0, 310.0, 295.0, 280.0, 265.0, 250.0, 235.0, 220.0, 205.0, 190.0, 175.0, 160.0, 145.0, 130.0, 115.0, 100.0, 85.0, 70.0, 55.0, 40.0, 25.0, 10.0, -5.0, -20.0, -35.0, -50.0, -65.0, -80.0, -95.0, -110.0, -125.0, -140.0, -155.0, -170.0, -185.0]
spin_lock_offset['800_MHz_nu1_100_Hz'] = [ 780.0, 750.0, 720.0, 690.0, 660.0, 630.0, 600.0, 570.0, 540.0, 510.0, 480.0, 450.0, 420.0, 390.0, 360.0, 330.0, 300.0, 270.0, 240.0, 210.0, 180.0, 150.0, 120.0, 90.0, 60.0, 30.0, 0.0, -30.0, -60.0, -90.0, -120.0, -150.0, -180.0, -210.0, -240.0, -270.0, -300.0, -330.0, -360.0]
spin_lock_offset['800_MHz_nu1_200_Hz'] = [ 960.0, 920.0, 880.0, 840.0, 800.0, 760.0, 720.0, 680.0, 640.0, 600.0, 560.0, 520.0, 480.0, 440.0, 400.0, 360.0, 320.0, 280.0, 240.0, 200.0, 160.0, 120.0, 80.0, 40.0, 0.0, -40.0, -80.0, -120.0, -160.0, -200.0, -240.0, -280.0, -320.0, -360.0, -400.0, -440.0, -480.0, -520.0, -560.0]
spin_lock_offset['800_MHz_nu1_400_Hz'] = [ 1150.0, 1100.0, 1050.0, 1000.0, 950.0, 900.0, 850.0, 800.0, 750.0, 700.0, 650.0, 600.0, 550.0, 500.0, 450.0, 400.0, 350.0, 300.0, 250.0, 200.0, 150.0, 100.0, 50.0, 0.0, -50.0, -100.0, -150.0, -200.0, -250.0, -300.0, -350.0, -400.0, -450.0, -500.0, -550.0, -600.0, -650.0, -700.0, -750.0]
# Loop over the files, reading in the data.
for id, file, spin_id, H_frq, field, relax_time in data:
# Loop over each CPMG frequency.
for offset in spin_lock_offset[id]:
# The id.
new_id = "%s_%.3f" % (id, offset)
# Set the NMR field strength.
spectrometer.frequency(id=new_id, frq=H_frq)
# Set the relaxation dispersion experiment type.
relax_disp.exp_type(spectrum_id=new_id, exp_type=EXP_TYPE_R1RHO)
# Relaxation dispersion CPMG constant time delay T (in s).
relax_disp.relax_time(spectrum_id=new_id, time=relax_time)
# Set the relaxation dispersion spin-lock field strength (nu1).
relax_disp.spin_lock_field(spectrum_id=new_id, field=field)
# Set the spin-lock offset, converting back to ppm.
relax_disp.spin_lock_offset(spectrum_id=new_id, offset=-frequency_to_ppm(frq=offset, B0=H_frq, isotope='15N'))
# Read the R2eff data.
relax_disp.r2eff_read_spin(id=id, file=file, dir='..', spin_id=spin_id, offset_col=1, data_col=2, error_col=3)
# Load the R1 data.
relax_data.read(ri_id='600MHz', ri_type='R1', frq=600e6, file='R1_600MHz.out', dir='..', mol_name_col=1, res_num_col=2, res_name_col=3, spin_num_col=4, spin_name_col=5, data_col=6, error_col=7)
relax_data.read(ri_id='800MHz', ri_type='R1', frq=800e6, file='R1_800MHz.out', dir='..', mol_name_col=1, res_num_col=2, res_name_col=3, spin_num_col=4, spin_name_col=5, data_col=6, error_col=7)
# Change the model.
relax_disp.select_model('TP02')
# The R20 keys.
r20_600_key = generate_r20_key(exp_type=EXP_TYPE_R1RHO, frq=600e6)
r20_800_key = generate_r20_key(exp_type=EXP_TYPE_R1RHO, frq=800e6)
# Manually set the parameter values.
spin_N = cdp.mol[0].res[0].spin[0]
spin_N.r2 = {
r20_600_key: 9.108060397660111,
r20_800_key: 13.793213528551924,
}
spin_N.pA = 0.945912353996981
spin_N.pB = 0.054087646003019
spin_N.kex = 367.981715073974556
spin_N.dw = 4.305697497613982
spin_N.ri_data['600MHz'] = 3.179051390898238
spin_N.ri_data['800MHz'] = 4.452840879991469
# Calculate.
minimise.calculate()
print("%-40s %20.15f" % ("relax chi2:", spin_N.chi2))
print("%-40s %20.15f" % ("cpmg_fit chi2 (corrections turned off):", 472.400507470708874))
# Minimisation.
minimise.grid_search(inc=7)
minimise.execute('simplex', constraints=True)
# Plot the dispersion curves.
relax_disp.plot_disp_curves(dir='.', num_points=100, extend=0, force=True)
# Save the results.
state.save('solution_tp02', dir='.', compress_type=1, force=True)
# Cleanup.
print("\n\nMoving 'disp_14_N.agr' to 'solution_tp02.agr'.")
move('disp_14_N.agr', 'solution_tp02.agr')
print("Deleting 'grace2images.py'.")
remove('grace2images.py')
|
[
"bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5"
] |
bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5
|
400c4825be91859d206dbc84ac0bef043e1582b7
|
66a05459831aef06fc86316ecb782848c116b226
|
/collective/etherpad/Extensions/Install.py
|
be56c049b8b686cc2c19c1f049088f75de18d462
|
[] |
no_license
|
toutpt/collective.etherpad
|
8d01323b3e31ff0872afa1fd8e4bc85b14a3f123
|
590414ddd3ed7437cefea91c853d291feb9b328f
|
refs/heads/master
| 2020-05-18T05:31:36.603712 | 2013-09-26T12:45:35 | 2013-09-26T12:45:35 | 8,142,351 | 2 | 0 | null | 2013-09-18T16:42:41 | 2013-02-11T17:13:59 |
Python
|
UTF-8
|
Python
| false | false | 709 |
py
|
def uninstall(portal, reinstall=False):
"""We uninstall things that are not handles by quickinstaller"""
if not reinstall:
# lets remove action on content types
types = portal.portal_types
for _type in ('Document', 'News Item', 'Event', 'Topic'):
_typeinfo = getattr(types, _type, None)
if _typeinfo:
action_info = _typeinfo.getActionObject('object/etherpad')
if action_info:
actions = _typeinfo.listActions()
indexes = [(a.category, a.id) for a in actions]
index = indexes.index(('object', 'etherpad'))
_typeinfo.deleteActions((index, ))
|
[
"[email protected]"
] | |
01e31b5def65ba66a0b5b8c58dd666c03742a49f
|
00ed1eb9f4875be9c116eae90c850b4c5f0ebd4d
|
/tests/funcat/utils/test_yahoo.py
|
8cf228d677cb84b693c54063b84d932589854b5c
|
[
"Apache-2.0"
] |
permissive
|
pchaos/funcat2
|
a64fbcfc5c1d7b6ed1356cd9558a2efabae90c0e
|
ff554cc134906a5a182fc31774488d62a839b314
|
refs/heads/master
| 2023-09-02T19:56:16.017728 | 2021-09-03T01:57:15 | 2021-09-03T01:57:15 | 356,155,099 | 12 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 904 |
py
|
# -*- coding: utf-8 -*-
import unittest
import warnings
from funcat.utils import save_sp500_tickers, get_data_from_yahoo
__updated__ = "2021-08-10"
class TestYahoo(unittest.TestCase):
"""Test case docstring."""
@classmethod
def setUpClass(cls):
super(TestYahoo, cls).setUpClass()
# 隐藏warning: ResourceWarning: Enable tracemalloc to get the object
# allocation traceback
warnings.simplefilter('ignore', ResourceWarning)
def setUp(self):
pass
def tearDown(self):
pass
def test_save_sp500_tickers(self):
sp500 = save_sp500_tickers()
self.assertTrue(len(sp500) >= 500,
f"返回长度不够{len(sp500)=}\n: {sp500=}")
print(f"{len(sp500)=}, {sp500=}")
def test_get_data_from_yahoo(self):
get_data_from_yahoo()
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
815555a0b9ba8d3eef9e459b9d19cd9f6e6e9305
|
824f831ce0921b3e364060710c9e531f53e52227
|
/Leetcode/Python_Basics/02_C_Collection_OrderedDict.py
|
4c7578f24feadc8a52aabb12cbb8fd63c8f4f69d
|
[] |
no_license
|
adityakverma/Interview_Prepration
|
e854ff92c10d05bc2c82566ea797d2ce088de00a
|
d08a7f728c53943e9a27c33f8e4249633a69d1a6
|
refs/heads/master
| 2020-04-19T19:36:06.527353 | 2019-06-15T23:02:30 | 2019-06-15T23:02:30 | 168,392,921 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 948 |
py
|
# 8.3.6.1. OrderedDict Examples and Recipes
# OrderedDict Examples and Recipes
#
# Since an ordered dictionary remembers its insertion order, it can be used in
# conjunction with sorting to make a sorted dictionary:
# >>>
#
# >>> # regular unsorted dictionary
# >>> d = {'banana': 3, 'apple': 4, 'pear': 1, 'orange': 2}
#
# >>> # dictionary sorted by key
# >>> OrderedDict(sorted(d.items(), key=lambda t: t[0]))
# OrderedDict([('apple', 4), ('banana', 3), ('orange', 2), ('pear', 1)])
#
# >>> # dictionary sorted by value
# >>> OrderedDict(sorted(d.items(), key=lambda t: t[1]))
# OrderedDict([('pear', 1), ('orange', 2), ('banana', 3), ('apple', 4)])
#
# >>> # dictionary sorted by length of the key string
# >>> OrderedDict(sorted(d.items(), key=lambda t: len(t[0])))
# OrderedDict([('pear', 1), ('apple', 4), ('orange', 2), ('banana', 3)])
# ------------------------------------------------------------------------------
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.